metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "joegomes/protein-transformer",
"score": 2
} |
#### File: protein-transformer/protein_transformer/losses.py
```python
import numpy as np
import prody as pr
import torch
import wandb
import protein_transformer.protein.Structure
from protein_transformer.protein.Sequence import VOCAB
from protein_transformer.protein.Structure import NUM_PREDICTED_ANGLES, \
NUM_PREDICTED_COORDS, SC_ANGLES_START_POS
from .protein.structure_utils import get_backbone_from_full_coords
def combine_drmsd_mse(d, mse, w=.5, lndrmsd_norm=0.02, mse_norm=0.01, log=True):
"""
Returns a combination of drmsd and mse loss that first normalizes their
zscales, and then computes w * drmsd + (1 - w) * mse.
"""
d = w * (d / lndrmsd_norm)
mse = (1 - w) * (mse / mse_norm)
if log: wandb.log({"MSE Weight": mse, "DRMSD Weight": d}, commit=False)
return d + mse
def inverse_trig_transform(t):
"""
Given a (BATCH x L X NUM_PREDICTED_ANGLES ) tensor, returns (BATCH X
L X NUM_PREDICTED_ANGLES) tensor. Performs atan2 transformation from sin
and cos values.
"""
t = t.view(t.shape[0], -1, NUM_PREDICTED_ANGLES, 2)
t_cos = t[:, :, :, 0]
t_sin = t[:, :, :, 1]
t = torch.atan2(t_sin, t_cos)
return t
def remove_sos_eos_from_input(input_seq):
"""
Given a sequence of integers that may be surrounded with EOS/SOS characters,
returns the sequence without those characters.
"""
start_idx = 1 if input_seq[0] == VOCAB.sos_id else 0
end_idx = -1 if input_seq[-1] == VOCAB.eos_id else None
return input_seq[start_idx : end_idx]
def drmsd_work(pred_ang, true_crd, input_seq, return_rmsd, do_backward=True, backbone_only=False):
"""
A version of drmsd loss meant to be used in parallel. Operates on a tuple
of predicted angles, coordinates, and sequence. Works for 1 protein at a
time.
"""
# Move numpy arrays to torch tensors
pred_ang, true_crd, input_seq = torch.tensor(pred_ang), torch.tensor(true_crd), torch.tensor(input_seq)
# Record leaf-node pointer to access gradients at end
pred_ang.requires_grad_()
starting_ang = pred_ang
# Remove batch-level masking
batch_mask = input_seq.ne(VOCAB.pad_id)
input_seq = input_seq[batch_mask]
true_crd = true_crd[:input_seq.shape[0] * NUM_PREDICTED_COORDS]
# Compute coordinates
pred_crd = angles_to_coords(pred_ang, input_seq)
if backbone_only:
pred_crd = get_backbone_from_full_coords(pred_crd)
true_crd = get_backbone_from_full_coords(true_crd)
# Remove coordinate-level masking for missing atoms
true_crd_non_nan = torch.isnan(true_crd).eq(0)
pred_crds_masked = pred_crd[true_crd_non_nan].reshape(-1, 3)
true_crds_masked = true_crd[true_crd_non_nan].reshape(-1, 3)
# Compute drmsd between existing atoms only
loss = drmsd(pred_crds_masked, true_crds_masked)
l_normed = loss / pred_crds_masked.shape[0]
# Repeat above for bb only
pred_crd_bb = get_backbone_from_full_coords(pred_crd)
true_crd_bb = get_backbone_from_full_coords(true_crd)
true_crd_bb_non_nan = torch.isnan(true_crd_bb).eq(0)
pred_crd_bb_masked = pred_crd_bb[true_crd_bb_non_nan].reshape(-1, 3)
true_crd_bb_masked = true_crd_bb[true_crd_bb_non_nan].reshape(-1, 3)
bb_loss = drmsd(pred_crd_bb_masked, true_crd_bb_masked)
bb_loss_normed = bb_loss / pred_crd_bb_masked.shape[0]
if do_backward:
l_normed.backward()
if return_rmsd:
return starting_ang.grad, loss.item(), l_normed.item(), bb_loss.item(), bb_loss_normed.item(), \
rmsd(pred_crds_masked.data.numpy(), true_crds_masked.data.numpy())
else:
return starting_ang.grad, loss.item(), l_normed.item(), bb_loss.item(), bb_loss_normed.item()
def angles_to_coords(angles, seq, remove_batch_padding=False):
"""
Convert torsional angles to coordinates.
"""
pred_ang, input_seq = angles, seq
if remove_batch_padding:
# Remove batch-level masking
batch_mask = input_seq.ne(VOCAB.pad_id)
input_seq = input_seq[batch_mask]
# Remove SOS and EOS characters if present
input_seq = remove_sos_eos_from_input(input_seq)
pred_ang = pred_ang[:input_seq.shape[0]]
# Generate coordinates
return protein_transformer.protein.Structure.generate_coords(pred_ang, input_seq, torch.device("cpu"))
def parallel_coords_only(ang, seq):
coords = angles_to_coords(ang, seq)
return coords
def drmsd_work_wrapper(ang_crd_seq_retrmsd_doback_bbonly):
"""
Unpacks arguments for the drmsd_work function. Useful for Pool.map().
Parameters
----------
ang_crd_seq_retrmsd_doback_bbonly : tuple
"""
ang, crd, seq, return_rmsd, do_backward, backbone_only = ang_crd_seq_retrmsd_doback_bbonly
return drmsd_work(ang, crd, seq, return_rmsd, do_backward, backbone_only)
def compute_batch_drmsd(pred_angs, true_crds, input_seqs, device=torch.device("cpu"), return_rmsd=False,
do_backward=False, retain_graph=False, pool=None, backbone_only=False):
"""
Calculate DRMSD loss by first generating predicted coordinates from
angles. Then, predicted coordinates are compared with the true coordinate
tensor provided to the function.
"""
pred_angs, true_crds, input_seqs = pred_angs.to(device), true_crds.to(device), input_seqs.to(device)
pred_angs = inverse_trig_transform(pred_angs)
# Compute drmsd in parallel over the batch
if pool is not None:
results = pool.map(drmsd_work_wrapper, zip(pred_angs.detach().numpy(), true_crds.detach().numpy(),
input_seqs.detach().numpy(), [return_rmsd]*pred_angs.shape[0],
[do_backward]*pred_angs.shape[0], [backbone_only]*pred_angs.shape[0]))
else:
results = (drmsd_work(ang.detach(), crd.detach(), seq.detach(), return_rmsd, do_backward, backbone_only)
for ang, crd, seq in zip(pred_angs, true_crds, input_seqs))
# Unpack the multiprocessing results
grads, losses, ln_losses, bb_losses, bb_ln_losses, rmsds = [], [], [], [], [], []
for r in results:
if len(r) == 6:
grad, l, ln, bb_l, bb_ln, rmsd_val = r
rmsds.append(rmsd_val)
else:
grad, l, ln, bb_l, bb_ln = r
grads.append(grad)
losses.append(l)
ln_losses.append(ln)
bb_losses.append(bb_l)
bb_ln_losses.append(bb_ln)
if do_backward:
pred_angs.backward(gradient=torch.stack(grads), retain_graph=retain_graph)
if return_rmsd:
return np.mean(losses), np.mean(ln_losses), np.mean(bb_losses), np.mean(bb_ln_losses), np.mean(rmsds)
else:
return np.mean(losses), np.mean(ln_losses), np.mean(bb_losses), np.mean(bb_ln_losses)
def mse_over_angles(pred, true, bb_only=False, sc_only=False):
"""Returns the mean squared error between two tensor batches.
Given a predicted angle tensor and a true angle tensor (batch-padded with
zeros, and missing-item-padded with nans), this function first removes
batch then item padding before using torch's built-in MSE loss function.
Args:
pred, true (np.ndarray): 4-dimensional tensors
Returns:
MSE loss between true and pred.
"""
assert len(pred.shape) == 3, "This function must operate on a batch of angles."
# Slice off appropriate angles for evaluation, depending on whether or not
# the input is in sin/cos terms, or radians
if bb_only and pred.shape[-1] == NUM_PREDICTED_ANGLES * 2:
pred = pred[:,:,:SC_ANGLES_START_POS*2]
true = true[:,:,:SC_ANGLES_START_POS*2]
elif bb_only and pred.shape[-1] == NUM_PREDICTED_ANGLES:
pred = pred[:,:,:SC_ANGLES_START_POS]
true = true[:,:,:SC_ANGLES_START_POS]
elif sc_only and pred.shape[-1] == NUM_PREDICTED_ANGLES * 2:
pred = pred[:,:,SC_ANGLES_START_POS * 2:]
true = true[:,:,SC_ANGLES_START_POS * 2:]
elif sc_only and pred.shape[-1] == NUM_PREDICTED_ANGLES:
pred = pred[:, :, SC_ANGLES_START_POS:]
true = true[:, :, SC_ANGLES_START_POS:]
elif not (not bb_only and not sc_only):
print(pred.shape)
raise Exception("Unknown angle tensor shape.")
# Remove batch padding
ang_non_zero = true.ne(0).any(dim=2)
tgt_ang_non_zero = true[ang_non_zero]
# Remove missing angles
ang_non_nans = torch.isnan(tgt_ang_non_zero).eq(0)
return torch.nn.functional.mse_loss(pred[ang_non_zero][ang_non_nans], true[ang_non_zero][ang_non_nans])
def mse_over_angles_numpy(pred, true):
""" Numpy version of mse_over_angles.
Given a predicted angle tensor and a true angle tensor (batch-padded with
zeros, and missing-item-padded with nans), this function first removes
batch then item padding before using torch's built-in MSE loss function.
Args:
pred true (np.ndarray): 4-dimensional tensors
Returns:
MSE loss between true and pred.
"""
return mse_over_angles(torch.tensor(pred), torch.tensor(true)).numpy()
def pairwise_internal_dist(x):
""" Returns all pairwise distances between points in a coordinate tensor.
An implementation of cdist (pairwise distances between sets of vectors)
from user jacobrgardner on github. Not implemented for batches.
https://github.com/pytorch/pytorch/issues/15253
Args:
x (torch.Tensor): coordinate tensor with shape (L x 3)
Returns:
res (torch.Tensor): a distance tensor comparing all (L x L) pairs of
points
"""
x1, x2 = x, x
assert len(x1.shape) == 2, "Pairwise internal distance method is not " \
"implemented for batches."
x1_norm = x1.pow(2).sum(dim=-1, keepdim=True) # TODO: experiment with alternative to pow, remove duplicated norm
res = torch.addmm(x1_norm.transpose(-2, -1), x1, x2.transpose(-2, -1), alpha=-2).add_(x1_norm)
res = res.clamp_min_(1e-30).sqrt_()
return res
def drmsd(a, b):
""" Returns distance root-mean-squared-deviation between tensors a and b.
Given 2 coordinate tensors, returns the dRMSD between them. Both
tensors must be the exact same shape. It works by creating a mask of the
upper-triangular indices of the pairwise distance matrix (excluding the
diagonal). Then, the resulting values are compared with Pytorch's MSE loss.
Args:
a, b (torch.Tensor): coordinate tensor with shape (L x 3).
Returns:
res (torch.Tensor): DRMSD between a and b.
"""
a_ = pairwise_internal_dist(a)
b_ = pairwise_internal_dist(b)
i = torch.triu_indices(a_.shape[0], a_.shape[1], offset=1)
mse = torch.nn.functional.mse_loss(a_[i[0], i[1]].float(), b_[i[0], i[1]].float())
res = torch.sqrt(mse)
return res
def rmsd(a, b):
"""
Returns the RMSD between two sets of coordinates.
"""
t = pr.calcTransformation(a, b)
return pr.calcRMSD(t.apply(a), b)
```
#### File: protein_transformer/models/ModelPrediction.py
```python
from protein_transformer.losses import inverse_trig_transform
from protein_transformer.losses import angles_to_coords
class ModelPrediction(object):
""" Represents a prediction from a model, can be transformed as needed. """
def __init__(self, input_sequence, raw_model_output, modality="sincos"):
self.input_sequence = input_sequence
self.raw_model_output = raw_model_output
self.modality = "sincos"
self.data = raw_model_output
def to_radians(self):
""" Modifies data to angles. Returns data. """
if self.modality == "radians":
return self.data
elif self.modality == "sincos":
self.data = inverse_trig_transform(self.data)
self.modality = "radians"
return self.data
else:
raise NotImplementedError
def to_coordinates(self):
""" Modifies data to coordinates. Returns data. """
if self.modality == "coords":
return self.data
else:
self.to_radians()
self.data = angles_to_coords(self.data)
self.modality = "coords"
return self.data
```
#### File: models/transformer/Encoder.py
```python
import torch
from .Attention import MultiHeadedAttention
from .Sublayers import PositionwiseFeedForward, PositionalEncoding, \
SublayerConnection, Embeddings
class Encoder(torch.nn.Module):
"""
Transformer encoder model.
"""
def __init__(self, din, dm, dff, n_heads, n_enc_layers, max_seq_len, dropout):
super(Encoder, self).__init__()
self.din = din
self.dm = dm
self.dff = dff
self.n_heads = n_heads
self.n_enc_layers = n_enc_layers
self.max_seq_len = max_seq_len
self.emb_dropout = torch.nn.Dropout(dropout)
self.input_embedding = Embeddings(self.din, self.dm)
self.positional_enc = PositionalEncoding(dm, dropout, max_seq_len)
self.enc_layers = torch.nn.ModuleList([EncoderLayer(dm, dff, n_heads, dropout) for _ in range(self.n_enc_layers)])
def forward(self, src_seq, src_mask):
enc_output = self.input_embedding(src_seq)
enc_output = self.emb_dropout(enc_output + self.positional_enc(enc_output))
for enc_layer in self.enc_layers:
enc_output = enc_layer(enc_output, src_mask)
return enc_output
class EncoderLayer(torch.nn.Module):
"""
Transformer encoder layer.
"""
def __init__(self, dm, dff, n_heads, dropout):
super(EncoderLayer, self).__init__()
self.dm = dm
self.dff = dff
self.n_heads = n_heads
self.self_attn = MultiHeadedAttention(dm, n_heads)
self.pwff = PositionwiseFeedForward(dm, dff, dropout)
self.sublayer_connections = torch.nn.ModuleList([SublayerConnection(dm, dropout) for _ in range(2)])
def forward(self, enc_input, enc_input_mask):
enc_output = self.sublayer_connections[0](enc_input, lambda x: self.self_attn(x, x, x, mask=enc_input_mask))
enc_output = self.sublayer_connections[1](enc_output, self.pwff)
return enc_output
```
#### File: models/transformer/Sublayers.py
```python
import numpy as np
import torch
class SublayerConnection(torch.nn.Module):
"""
Does residual + layer norm of input. Modular design inspired from Harvard
NLP.
http://nlp.seas.harvard.edu/2018/04/03/attention.html#encoder-and-decoder-stacks
"""
def __init__(self, size, dropout=0.1):
super(SublayerConnection, self).__init__()
self.norm = torch.nn.LayerNorm(size)
self.dropout = torch.nn.Dropout(dropout)
def forward(self, layer_input, layer):
return layer_input + self.dropout(layer(self.norm(layer_input)))
class PositionwiseFeedForward(torch.nn.Module):
"""
Position-wise Feed Forward network sublayer for the Transformer model.
"""
def __init__(self, dm, dh, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.dm = dm
self.dh = dh
self.layer1 = torch.nn.Linear(dm, dh)
self.layer2 = torch.nn.Linear(dh, dm)
self.relu = torch.nn.ReLU()
self.dropout = torch.nn.Dropout(dropout)
def forward(self, input_seq):
return self.layer2(self.dropout(self.relu(self.layer1(input_seq))))
class PositionalEncoding(torch.nn.Module):
"""
Positional encoding layer for the Transformer model.
From <NAME>,
https://github.com/harvardnlp/annotated-transformer/blob/master/The%20Annotated%20Transformer.ipynb
"""
def __init__(self, dm, dropout, max_seq_len):
super(PositionalEncoding, self).__init__()
self.dropout = torch.nn.Dropout(p=dropout)
# Compute the positional encodings once in log space.
pe = torch.zeros(max_seq_len, dm)
position = torch.arange(0., max_seq_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0., dm, 2) *
-(np.log(10000.0) / dm))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + torch.autograd.Variable(self.pe[:, :x.size(1)],
requires_grad=False)
return self.dropout(x)
class Embeddings(torch.nn.Module):
def __init__(self, vocab, d_model):
super(Embeddings, self).__init__()
self.emb = torch.nn.Embedding(vocab, d_model)
self.d_model = d_model
def forward(self, x):
return self.emb(x) * np.sqrt(self.d_model)
if __name__ == "__main__":
seq = torch.ones(8, 7, 64)
penc = PositionalEncoding(64, 300)
print(penc(seq).shape)
print(penc(seq))
```
#### File: protein-transformer/scripts/create_development_datasets.py
```python
import datetime
import sys
import torch
from glob import glob
from protein_transformer.dataset import VALID_SPLITS
import os
def make_dev_dataset(data, dev_ids):
# Initialize empty dictionary
new_data = {"train": {"ang": [], "ids": [], "crd": [], "seq": []},
"test": {"ang": [], "ids": [], "crd": [], "seq": []},
"pnids": {}}
d = {f"valid-{x}": {"ang": [], "ids": [], "crd": [], "seq": []} for x in VALID_SPLITS}
new_data.update(d)
# Add each id to every subset in the new dataset dictionary
completed = 0
for did in dev_ids:
try:
target_subdict, target_idx = data["pnids"][did]["subset"], data["pnids"][did]["idx"]
except KeyError:
print(f"\t{did} not found in processed data.")
continue
for subdict in ["train", "test"] + [f"valid-{split}" for split in VALID_SPLITS]:
new_data[subdict]["seq"].append(data[target_subdict]["seq"][target_idx])
new_data[subdict]["ang"].append(data[target_subdict]["ang"][target_idx])
new_data[subdict]["crd"].append(data[target_subdict]["crd"][target_idx])
new_data[subdict]["ids"].append(did)
new_data["pnids"][did] = {"idx": len(new_data["train"]["seq"]) -1, "subset": "train"}
completed += 1
# Copy any remaining data from the original dictionary
other_items = {k: v for k, v in data.items() if k not in ["train", "test"] + [f"valid-{split}" for split in VALID_SPLITS]}
new_data.update(other_items)
new_data["date"] = datetime.datetime.now().strftime("%I:%M%p on %B %d, %Y")
print(f"\t{completed} completed.")
return new_data
if __name__ == "__main__":
_, dataset = sys.argv
data = torch.load(dataset)
dev_datasets = glob("../data/development/*.txt")
for dev_dataset_file in dev_datasets:
with open(dev_dataset_file, "r") as f:
dev_dataset_ids = f.read().splitlines()
print(f"Processing {len(dev_dataset_ids)} ProteinNet IDs from {os.path.basename(dev_dataset_file)}.")
new_dataset = make_dev_dataset(data, dev_dataset_ids)
torch.save(new_dataset, dev_dataset_file.replace(".txt", ".pt"))
```
#### File: protein-transformer/scripts/proteinnet_to_data.py
```python
import torch
import argparse
import re
import prody as pr
ASTRAL_FILE = "/home/jok120/proteinnet/data/dir.cla.scope.2.07-stable.txt"
def get_pdbid_from_astral_db(domain):
"""
Given an ASTRAL parseable file and an ASTRAL domain name, this function
returns the (pdbid, description) associated with it.
"""
pattern = domain + r"\s+(?P<pdbid>\S{4})\s+(?P<desc>\S+)"
m = re.search(pattern, ASTRAL_FILE_DATA)
print(domain, m.group('pdbid'), m.group('desc'))
return m.group('pdbid'), m.group('desc')
def main():
d = torch.load(args.input_pn_dict)
for pnid, data in d.items():
try:
pdb_id, model_id, chain_id = pnid.split("_")
except ValueError:
print(pnid)
continue
print(pdb_id, model_id, chain_id)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Parses the ProteinNet dictionary for PDB IDs so they may be "
"downloaded and processed for the all-atom ProteinTransformer.")
parser.add_argument('input_pn_dict', type=str, help='Path to PN-parsed dictionary file')
parser.add_argument("--pdb_dir", default="/home/jok120/pdb/", type=str, help="Path for ProDy-downloaded PDB files.")
args = parser.parse_args()
with open(ASTRAL_FILE, "r") as f:
ASTRAL_FILE_DATA = f.read()
pr.pathPDBFolder(args.pdb_dir)
main()
```
#### File: scripts/tests/align_dataset_to_proteinnet_test.py
```python
import sys
sys.path.append("scripts")
from align_dataset_to_proteinnet import *
# from ..align_dataset_to_proteinnet import *
import pytest
global_aligner = init_aligner()
@pytest.fixture
def aligner():
return global_aligner
@pytest.mark.parametrize("target, mobile, mask",[
("AAAAAAAAGAPAAAAAAA", "AAAAAAAAAAAAAAA", "++++++++---+++++++"),
("STARTAAAAAAAAAGAPAAAAAA", "AAAAAAAAAAAAAAA", "-----+++++++++---++++++"),
("STARTAAAAAAAGAAAAPAAAAAAAAAEND", "AAAAAAAAAAAAAAAA", '-----+++++++------+++++++++---')
])
def test_get_mask_from_alignment(target, mobile, mask):
a = init_aligner()
a1 = a.align(target, mobile)[0]
computed_mask = get_mask_from_alignment(a1)
assert mask == computed_mask
@pytest.mark.parametrize("pn_seq, my_seq, pn_mask", [
("AAAAAAAAGAPAAAAAAA", "AAAAAAAAAAAAAAA", "++++++++---+++++++"),
("STARTAAAAAAAAAGAPAAAAAA", "AAAAAAAAAAAAAAA", "-----+++++++++---++++++"),
("STARTAAAAAAAGAAAAPAAAAAAAAAEND", "AAAAAAAAAAAAAAAA", '-----+++++++------+++++++++---')
])
def test_can_be_directly_merged(aligner, pn_seq, my_seq, pn_mask):
assert can_be_directly_merged(aligner, pn_seq, my_seq, pn_mask)[0]
@pytest.mark.parametrize("pn_seq, my_seq, pn_mask", [
("AAAAAAAAGAPAAAAAAA", "AAAAAAAAAAAAAAAA", "++++++++---+++++++"),
("STARTAAAAAAAAAGAPAAAAAA", "AAAAAAAAAAAAAAA", "-----+++++++++---+++++-"),
("STARTAAAAAAAGAAAAPAAAAAAAAAEND", "AAAAAAAAAAAAAAAA", '-----+++++++--+---+++++++++---')
])
def test_not_can_be_directly_merged(aligner, pn_seq, my_seq, pn_mask):
assert not can_be_directly_merged(aligner, pn_seq, my_seq, pn_mask)[0]
``` |
{
"source": "joegotflow83/guitar_tabs",
"score": 3
} |
#### File: guitar_tabs/main/views.py
```python
from django.views.generic import TemplateView
from bs4 import BeautifulSoup
import requests
class Search(TemplateView):
"""Page to display search for guitar tabs"""
template_name = 'main/index.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
search = self.request.GET.get('search', False)
contents = requests.get("http://www.guitartabs.cc/search.php?song={}".format(search)).content
soup = BeautifulSoup(contents, "html.parser").find(class_='tabslist')
context['songs'] = soup.prettify()
return context
class Tabs(TemplateView):
"""Display tabs from song"""
template_name = 'main/tabs.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
song = requests.get("http://www.guitartabs.cc/{}".format(self.kwargs['url'])).content
context['song'] = song
soup = BeautifulSoup(song, "html.parser")
context['tabs'] = [tab.prettify() for tab in soup.find_all("pre")]
return context
``` |
{
"source": "joe-graham/asadbg",
"score": 3
} |
#### File: joe-graham/asadbg/curr_thread.py
```python
try:
import gdb
except ImportError:
print("[curr_thread] Not running inside of GDB, exiting...")
exit()
import sys
def get_inferior():
try:
if len(gdb.inferiors()) == 0:
print("No gdb inferior could be found.")
return -1
else:
inferior = gdb.inferiors()[0]
return inferior
except AttributeError:
print("This gdb's python support is too old.")
exit()
def find_current_thread():
inf = get_inferior()
dat = gdb.execute("info threads", False, True)
lines = dat.split("\n")
for L in lines:
elts = L.split()
if elts[0] == "*":
print("thread %s" % elts[1])
break
print("Finding current thread...")
find_current_thread()
```
#### File: joe-graham/asadbg/find_lina_pid.py
```python
try:
import gdb
except ImportError:
print("[find_lina_pid] Not running inside of GDB, exiting...")
exit()
import sys
def get_inferior():
try:
if len(gdb.inferiors()) == 0:
print("No gdb inferior could be found.")
return -1
else:
inferior = gdb.inferiors()[0]
return inferior
except AttributeError:
print("This gdb's python support is too old.")
exit()
def find_lina_pid():
inf = get_inferior()
pid = 0
found = 0
maxpids = 0x300 # usually lina is around 500-530
while pid < maxpids:
#print("%d" % pid)
pid += 1
dat = gdb.execute("info proc cmdline %d" % pid, False, True)
#if "unable to open" not in dat:
# print(dat)
found = dat.find("lina'")
if found != -1:
idx1 = len("process ")
idx2 = dat.find("\n")
pid = dat[idx1:idx2]
print("%d" % int(pid))
break
print("Finding lina PID:")
find_lina_pid()
print("You can use the following to see the lina mapping: info proc mappings <pid>")
```
#### File: joe-graham/asadbg/ignore_errors.py
```python
class IgnoreErrorsCommand (gdb.Command):
"""Execute a single command, ignoring all errors.
Only one-line commands are supported.
This is primarily useful in scripts."""
def __init__ (self):
super (IgnoreErrorsCommand, self).__init__ ("ignore-errors",
gdb.COMMAND_OBSCURE,
# FIXME...
gdb.COMPLETE_COMMAND)
def invoke (self, arg, from_tty):
try:
gdb.execute (arg, from_tty)
except:
pass
IgnoreErrorsCommand ()
``` |
{
"source": "Joegratz/mqtt-home-automation",
"score": 3
} |
#### File: Joegratz/mqtt-home-automation/powermate2mqtt.py
```python
import paho.mqtt.client as mqtt
from powermate import Powermate, PowermateDelegate
import time
import json
# Change the following line to include the Bluetooth address of your Powermate Bluetooth
# To find the address, run: sudo hcitool lescan
POwERMATE_ADDRESS = '00:12:92:08:2B:59'
class PrintEvents(PowermateDelegate):
def __init__(self, addr, mqttClient):
self.addr = addr
self.mqttClient = mqttClient
def on_connect(self):
self.mqttClient.publish('powermateBluetooth/status', json.dumps({'type': 'connected', 'address': self.addr}))
def on_disconnect(self):
self.mqttClient.publish('powermateBluetooth/status', json.dumps({'type': 'disconnected', 'address': self.addr}))
def on_battery_report(self, val):
self.mqttClient.publish('powermateBluetooth/status', json.dumps({'type': 'batteryPercentage', 'value': val}))
def on_press(self):
self.mqttClient.publish('powermateBluetooth/interaction', json.dumps({'type': 'buttonDown'}))
def on_long_press(self, t):
self.mqttClient.publish('powermateBluetooth/interaction', json.dumps({'type': 'buttonUp', 'pressDuration': t}))
def on_clockwise(self):
self.mqttClient.publish('powermateBluetooth/interaction', json.dumps({'type': 'turn', 'direction': 'clockwise', 'buttonPressed': False}))
def on_counterclockwise(self):
self.mqttClient.publish('powermateBluetooth/interaction', json.dumps({'type': 'turn', 'direction': 'counterclockwise', 'buttonPressed': False}))
def on_press_clockwise(self):
self.mqttClient.publish('powermateBluetooth/interaction', json.dumps({'type': 'turn', 'direction': 'clockwise', 'buttonPressed': True}))
def on_press_counterclockwise(self):
self.mqttClient.publish('powermateBluetooth/interaction', json.dumps({'type': 'turn', 'direction': 'counterclockwise', 'buttonPressed': True}))
mqttClient = mqtt.Client()
mqttClient.connect('127.0.0.1')
mqttClient.loop_start()
p = Powermate(POwERMATE_ADDRESS, PrintEvents(POwERMATE_ADDRESS, mqttClient))
while True:
time.sleep(5)
p.stop()
``` |
{
"source": "joe-greenawalt/skulpt",
"score": 2
} |
#### File: skulpt/doc/simple.py
```python
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
import os
from django.utils import simplejson
from google.appengine.ext import db
class MainPage(webapp.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/html'
path = os.path.join(os.path.dirname(__file__), 'index.html')
self.response.out.write(open(path).read())
class TurtlePage(webapp.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/html'
path = os.path.join(os.path.dirname(__file__), 'turtle.html')
self.response.out.write(open(path).read())
class IdePage(webapp.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/html'
path = os.path.join(os.path.dirname(__file__), 'ide/index.html')
self.response.out.write(open(path).read())
class TestResult(db.Model):
browsername = db.StringProperty()
browserversion = db.StringProperty()
browseros = db.StringProperty()
version = db.StringProperty()
rc = db.StringProperty()
results = db.TextProperty()
date = db.DateTimeProperty(auto_now_add=True)
class TestResults(webapp.RequestHandler):
def post(self):
data = simplejson.loads(self.request.body)
tr = TestResult()
tr.browsername = str(data['browsername'])
tr.browserversion = str(data['browserversion'])
tr.browseros = str(data['browseros'])
tr.version = str(data['version'])
tr.rc = str(data['rc'])
tr.results = str(data['results'])
tr.put()
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write("{result:'ok'}")
application = webapp.WSGIApplication(
[('/', MainPage),
('/testresults', TestResults),
('/turtle', TurtlePage),
('/ide', IdePage)
],
debug=False)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main()
``` |
{
"source": "joegreene/deap-practice",
"score": 3
} |
#### File: deap-practice/src/make_algebraic.py
```python
def add(a: str, b: str) -> str:
'''Converts function with the format 'add(a, b)' to 'a + b'.'''
return f'({a} + {b})'
def neg(a: str) -> str:
'''Converts function with the format 'neg(a)' to '-a'.'''
return f'-{a}'
def sub(a: str, b: str) -> str:
'''Converts function with the format 'sub(a, b)' to 'a - b'.'''
return f'({a} - {b})'
def div(a: str, b: str) -> str:
'''Converts function with the format 'div(a, b)' to 'a / b'.'''
return f'({a} / {b})'
def mul(a: str, b: str) -> str:
'''Converts function with the format 'mul(a, b)' to 'a * b'.'''
return f'({a} * {b})'
def make_algebraic(candidate: 'deap.creator.Individual') -> str:
'''Converts a candidate function (generated by deap.algorithms.eaSimple) to its algebraic form.
This works with a little bit of eval trickery and recursion. For example, given the following
winner candidate:
add(mul(mul(x, x), x), 2)
make_algebraic takes a depth-first approach and evaluates the expression as follows:
add('mul(mul(x, x), x)' '2')
> mul('mul(x, x)', 'x')
> mul('x', 'x') = 'x * x'
> mul('x * x', 'x') = 'x * x * x'
add('x * x * x + 2') = 'x * x * x + 2'
End result:
'x * x * x + 2'
'''
x = 'x' # pylint: disable=unused-variable
return eval(str(candidate))
``` |
{
"source": "JoeGreiner/MorphologicalOperatorsDemo",
"score": 4
} |
#### File: MorphologicalOperatorsDemo/InteractiveBreak_Filtering/showAnswerFiltering.py
```python
from IPython.display import display
from ipywidgets import widgets
button = widgets.Button(description="Click for answer")
output = widgets.Output()
display(button, output)
already_clicked = False
def on_button_clicked(b):
global already_clicked, button
if not already_clicked:
already_clicked = True
button.description = 'Answer:'
with output:
print('We added Salt & Pepper noise (minimum value – pepper – black pixels;'
' maximum value – salt – white pixels.\n\n'
'The median filter will sort the elements within the mask'
' and then return the center value of the sorted elements.\n\n'
'Therefore, by construction, it is unlikely that the median filter chooses a noisy pixel,\nas noisy pixels'
' are likely at the very begin/end of the sorted elements; and not a center pixel.')
button.on_click(on_button_clicked)
```
#### File: InteractiveBreak_MorphologicalOps/helpers/morphology.py
```python
import numpy as np
from ipywidgets import widgets
import matplotlib.pyplot as plt
from itk import binary_dilate_image_filter,binary_morphological_closing_image_filter, binary_morphological_opening_image_filter, binary_erode_image_filter, GetArrayFromImage, GetImageFromArray, label_image_to_shape_label_map_filter
class OpenCloseWidget():
def __init__(self):
self.image = np.zeros((50,50), dtype=np.uint8)
self.image[13, 13] = 255
self.image[20:30, 20:23] = 255
self.image[30, 21] = 255
self.image[31:40, 20:23] = 255
self.image_copy = self.image
reset_button = widgets.Button(description='Reset Image')
open_button = widgets.Button(description='Open')
close_button = widgets.Button(description='Close')
reset_button.on_click(self.reset)
open_button.on_click(self.opening)
close_button.on_click(self.closing)
display(widgets.HBox([open_button, close_button, reset_button]))
self.fig = plt.figure(figsize=(5,5))
self.img_obj = plt.imshow(self.image, origin='lower')
plt.clim((0,255))
def opening(self, event):
itk_image = GetImageFromArray(self.image)
self.image = GetArrayFromImage(binary_morphological_opening_image_filter(itk_image))
self.img_obj.set_data(self.image)
def closing(self, event):
itk_image = GetImageFromArray(self.image)
self.image = GetArrayFromImage(binary_morphological_closing_image_filter(itk_image))
self.img_obj.set_data(self.image)
def reset(self, event):
self.image = self.image_copy
self.img_obj.set_data(self.image_copy)
class DilateErodeWidget():
def __init__(self):
self.image = np.zeros((50,50), dtype=np.uint8)
self.image[13, 13] = 255
self.image[20:30, 20:23] = 255
self.image[30, 21] = 255
self.image[31:40, 20:23] = 255
self.image_copy = self.image
reset_button = widgets.Button(description='Reset Image')
dilate_button = widgets.Button(description='Dilate')
erode_button = widgets.Button(description='Erode')
dilate_button.on_click(self.dilate)
erode_button.on_click(self.erode)
reset_button.on_click(self.reset)
display(widgets.HBox([dilate_button, erode_button, reset_button]))
self.fig = plt.figure(figsize=(5,5))
self.img_obj = plt.imshow(self.image, origin='lower')
plt.clim((0,255))
def dilate(self, event):
itk_image = GetImageFromArray(self.image)
self.image = GetArrayFromImage(binary_dilate_image_filter(itk_image))
self.img_obj.set_data(self.image)
def erode(self, event):
itk_image = GetImageFromArray(self.image)
self.image = GetArrayFromImage(binary_erode_image_filter(itk_image))
self.img_obj.set_data(self.image)
def reset(self, event):
self.image = self.image_copy
self.img_obj.set_data(self.image_copy)
class Drawer():
def __init__(self, paint_width=1, paint_value = 255, erase_value=0):
self.drawing = False
self.paint_width = paint_width
self.paint_value = paint_value
self.erase_value = erase_value
self.image = self.create_image()
dilate_button = widgets.Button(description='Dilate')
erode_button = widgets.Button(description='Erode')
open_button = widgets.Button(description='Open')
close_button = widgets.Button(description='Close')
reset_button = widgets.Button(description='Reset Image')
dilate_button.on_click(self.dilate)
erode_button.on_click(self.erode)
open_button.on_click(self.opening)
close_button.on_click(self.closing)
reset_button.on_click(self.reset)
display(widgets.HBox([dilate_button,erode_button, open_button, close_button, reset_button]))
self.fig = plt.figure(figsize=(5,5))
self.img_obj = plt.imshow(self.image, origin='lower')
plt.clim((0,255))
plt.show()
self.fig.canvas.mpl_connect('button_press_event', self.onclick)
self.fig.canvas.mpl_connect('button_release_event', self.onrelease)
self.fig.canvas.mpl_connect('motion_notify_event', self.onmove)
def create_image(self):
return np.zeros((100,100), dtype=np.uint8)
def dilate(self, event):
itk_image = GetImageFromArray(self.image)
self.image = GetArrayFromImage(binary_dilate_image_filter(itk_image))
self.img_obj.set_data(self.image)
def erode(self, event):
itk_image = GetImageFromArray(self.image)
self.image = GetArrayFromImage(binary_erode_image_filter(itk_image))
self.img_obj.set_data(self.image)
def reset(self, event):
self.image = self.create_image()
self.img_obj.set_data(self.image)
def opening(self, event):
itk_image = GetImageFromArray(self.image)
self.image = GetArrayFromImage(binary_morphological_opening_image_filter(itk_image))
self.img_obj.set_data(self.image)
def closing(self, event):
itk_image = GetImageFromArray(self.image)
self.image = GetArrayFromImage(binary_morphological_closing_image_filter(itk_image))
self.img_obj.set_data(self.image)
def onclick(self, event):
self.drawing = True
if event.button == 1:
self.draw_point(int(event.xdata), int(event.ydata), self.paint_value)
elif event.button == 3:
self.draw_point(int(event.xdata), int(event.ydata), self.erase_value)
def onmove(self, event):
if event.button == 1:
self.draw_point(int(event.xdata), int(event.ydata), self.paint_value)
elif event.button == 3:
self.draw_point(int(event.xdata), int(event.ydata), self.erase_value)
def draw_point(self, ix, iy, value):
if self.drawing == True:
if self.paint_width == 0:
self.image[iy, ix] = value
self.img_obj._A.data[iy, ix] = value
else:
self.image[iy-self.paint_width : iy+self.paint_width,
ix-self.paint_width : ix+self.paint_width] = value
self.img_obj._A.data[iy-self.paint_width : iy+self.paint_width,
ix-self.paint_width : ix+self.paint_width] = value
plt.draw()
def onrelease(self, event):
self.drawing=False
``` |
{
"source": "joegrover/exercism",
"score": 3
} |
#### File: joegrover/exercism/mock_requests.py
```python
import requests
import pandas
import random
sv_df = pandas.read_csv("sample_variables.csv")
host = "https://jgrover.hiring.ipums.org"
usa_endpoint = host + "/publish_usa"
nhgis_endpoint = host + "/publish_nhgis"
samples = set(sv_df["sample_name"].to_list())
variables = set(sv_df["variable_mnemonic"].to_list())
data_tables = set(["1790_cPop.dt1", "1790_cPop.dt8", "1800_cPop.dt1", "1800_cPop.dt9"])
users = ["glip", "glop", "rocko", "heffer", "Filburt"]
def mock_usa_request(repeats=2):
k_s = random.randint(1, len(samples))
s = random.sample(samples, k=k_s)
k_v = random.randint(1, len(variables))
v = random.sample(variables, k=k_v)
u = random.choice(users)
params = {
"samples[]": s,
"variables[]": v,
"user": u,
"submit": "Submit",
}
# XXX Need to repeat here until I figure out the whole client-only
# -gets-every-other-message RabbitMQ issue.
for _ in range(repeats):
resp = requests.get(usa_endpoint, params=params)
print(resp)
return resp
def mock_nhgis_request(repeats=1):
k_d = random.randint(1, len(data_tables))
d = random.sample(data_tables, k=k_d)
u = random.choice(users)
params = {
"data_tables[]": d,
"user": u,
"submit": "Submit",
}
# XXX Need to repeat here until I figure out the whole client-only
# -gets-every-other-message RabbitMQ issue.
for _ in range(repeats):
resp = requests.get(nhgis_endpoint, params=params)
print(resp)
return resp
``` |
{
"source": "JoeGruffins/tinydecred",
"score": 3
} |
#### File: decred/util/encode.py
```python
import struct
from decred import DecredError
NONE = "None".encode()
def filterNone(b):
"""
If the provided argument is None, return the special None indicator bytes.
Otherwise, the argument is returned directly.
Args:
b (bytes-like or None): The bytes to filter.
Returns:
bytes-like
"""
if b is None:
return NONE
return b
def extractNone(b):
"""
If the provided bytes are the special None indicator, return None, else
return the bytes.
Args:
b (bytes-like): The bytes to filter.
Returns:
(bytes-like or None)
"""
if b == NONE:
return None
return b
def intToBytes(i, signed=False):
"""
Encodes an integer to bytes.
Args:
i (int): The integer.
signed (bool): Whether to encode as a signed integer.
Returns:
bytearray: The encoded integer.
"""
length = ((i + ((i * signed) < 0)).bit_length() + 7 + signed) // 8
return bytearray(i.to_bytes(length, byteorder="big", signed=signed))
def intFromBytes(b, signed=False):
"""
Decodes an integer from bytes.
Args:
b (bytes-like): The encoded integer.
signed (bool): Whether to decode as a signed integer.
Returns:
int: The decoded integer.
"""
return int.from_bytes(b, "big", signed=signed)
def floatToBytes(flt):
"""
Encodes a float to bytes.
Args:
flt (float): The float to encode.
Returns:
bytearray: The encoded float.
"""
return bytearray(struct.pack("d", flt))
def floatFromBytes(b):
"""
Decode a float from bytes.
Args:
b (bytes-like): The float bytes to decode.
Returns:
float: The decoded float.
"""
return struct.unpack("d", b)[0]
def boolToBytes(v):
"""
Encode the boolean value as a byte.
Args:
v (bool): A boolean to encode.
Returns:
int: A byte.
"""
return 0x01 if v else 0x00
def boolFromBytes(b):
"""
Decode the byte as True if 0x01, else False.
Args:
b (bytes-like): A length-1 byte buffer with the encoded boolean.
Returns:
bool: The decoded value.
"""
return b == 0x01
def decodeBA(b, copy=False):
"""
Decode into a bytearray.
Args:
b (str, bytes-like, ByteArray, int, list(int)): The value to decode to
a bytearray. Strings are interpreted as hexadecimal. Integers are
minimally encoded to an unsigned integer.
Returns:
bytearray: The decoded bytes.
"""
if isinstance(b, ByteArray):
return bytearray(b.b) if copy else b.b
if isinstance(b, bytearray):
return bytearray(b) if copy else b
if isinstance(b, bytes):
return bytearray(b)
if isinstance(b, int):
return intToBytes(b) if b else bytearray([0])
if isinstance(b, str):
return bytearray.fromhex(b)
if hasattr(b, "__iter__"):
return bytearray(b)
raise TypeError("decodeBA: unknown type %s" % type(b))
class ByteArray:
"""
ByteArray is a bytearray manager. It implements a subset of bytearray's
bitwise operators and provides some convenience decodings on the fly, so
operations work with various types of input. Since bytearrays are mutable,
ByteArray can also zero the internal value without relying on garbage
collection. An important difference between ByteArray and bytearray is that
an integer argument to ByteArray constructor will result in the shortest
possible byte representation of the integer, where for bytearray an int
argument results in a zero-valued bytearray of said length. To get a
zero-valued or zero-padded ByteArray of length n, use the `length` keyword
argument.
"""
def __init__(self, b=b"", copy=True, length=None):
"""
Set copy to False if you want to share the memory with another
bytearray/ByteArray. If the type of b is not bytearray or ByteArray,
copy has no effect.
"""
if length:
self.b = decodeBA(ByteArray(bytearray(length)) | b, copy=False)
else:
self.b = decodeBA(b, copy=copy)
@staticmethod
def unblob(b):
"""Satisfies the encode.Blobber API"""
return ByteArray(b)
@staticmethod
def blob(ba):
"""Satisfies the encode.Blobber API"""
return ba.b
def comp(self, a):
"""
comp gets the underlying bytearray and length of both this ByteArray
and a.
Args:
a (ByteArray): The other ByteArray.
Returns:
bytearray: This ByteArray's bytearray.
int: This ByteArray's length.
bytearray: The other ByteArray's bytearray.
int: The other ByteArray's length.
"""
a = decodeBA(a)
aLen, bLen = len(a), len(self.b)
if aLen > bLen:
raise DecredError("decode: invalid length %i > %i" % (aLen, bLen))
return a, aLen, self.b, bLen
def __lt__(self, a):
return bytearray.__lt__(self.b, decodeBA(a))
def __le__(self, a):
return bytearray.__le__(self.b, decodeBA(a))
def __eq__(self, a):
try:
return bytearray.__eq__(self.b, decodeBA(a))
except Exception:
return False
def __ne__(self, a):
try:
return bytearray.__ne__(self.b, decodeBA(a))
except Exception:
return True
def __ge__(self, a):
return bytearray.__ge__(self.b, decodeBA(a))
def __gt__(self, a):
return bytearray.__gt__(self.b, decodeBA(a))
def __repr__(self):
return "ByteArray(" + self.hex() + ")"
def __len__(self):
return len(self.b)
def __and__(self, a):
a, aLen, b, bLen = self.comp(a)
b = ByteArray(b)
for i in range(bLen):
b[bLen - i - 1] &= a[aLen - i - 1] if i < aLen else 0
return b
def __iand__(self, a):
a, aLen, b, bLen = self.comp(a)
for i in range(bLen):
b[bLen - i - 1] &= a[aLen - i - 1] if i < aLen else 0
return self
def __or__(self, a):
a, aLen, b, bLen = self.comp(a)
b = ByteArray(b)
for i in range(bLen):
b[bLen - i - 1] |= a[aLen - i - 1] if i < aLen else 0
return b
def __ior__(self, a):
a, aLen, b, bLen = self.comp(a)
for i in range(bLen):
b[bLen - i - 1] |= a[aLen - i - 1] if i < aLen else 0
return self
def __add__(self, a):
return self.__iadd__(a)
def __iadd__(self, a):
"""append the bytes and return a new ByteArray"""
a = decodeBA(a)
return ByteArray(self.b + a)
def __getitem__(self, k):
if isinstance(k, slice):
return ByteArray(self.b[k.start : k.stop : k.step], copy=False)
return self.b[k]
def __setitem__(self, i, v):
v = decodeBA(v, copy=False)
if i + len(v) > len(self.b):
raise DecredError("source bytes too long")
for j in range(len(v)):
self.b[i + j] = v[j]
def __reversed__(self):
return ByteArray(bytearray(reversed(self.b)))
def __hash__(self):
"""Enables ByteArray to be a dict key."""
return hash(bytes(self.b))
def hex(self):
"""
A hexadecimal string representation of the bytes.
Returns:
str: The hex bytes.
"""
return self.b.hex()
def rhex(self):
"""
A reversed hexadecimal string representation of the bytes.
Returns:
str: The hex bytes.
"""
return self.__reversed__().hex()
def zero(self):
"""
Sets the bytes of the underlying bytearray to zero. The benefit of
zeroing is that the info is destroyed immediately, rather than relying
on the garbage collector.
"""
for i in range(len(self.b)):
self.b[i] = 0
def iszero(self):
"""
True if all bytes are zero.
"""
return all((v == 0 for v in self.b))
def iseven(self):
"""
True if empty or if last byte is zero.
"""
l = len(self.b)
return l == 0 or self.b[l - 1] == 0
def int(self):
"""The bytes as an integer."""
return intFromBytes(self.b)
def bytes(self):
"""The bytes as Python `bytes`."""
return bytes(self.b)
def unLittle(self):
"""A copy of the ByteArray, reversed."""
return self.littleEndian()
def littleEndian(self):
"""A copy of the ByteArray, reversed."""
return ByteArray(reversed(self.b))
def copy(self):
"""A copy of the ByteArray."""
return ByteArray(self.b)
def pop(self, n):
"""
Remove n bytes from the beginning of the ByteArray, returning the bytes.
"""
b = self[:n]
self.b = self.b[n:]
return b
def rba(*a, **k):
"""
Reversed ByteArray. All args and kwargs are passed to the ByteArray
constructor.
"""
return reversed(ByteArray(*a, **k))
class BuildyBytes(ByteArray):
"""
The BuildyBytes class is used to construct (optionally versioned) linearly-
encoded 2-D byte arrays.
"""
def __init__(self, version=None):
"""
Constructor for a BuildyBytes.
Args:
version (int): optinonal. The version to encode. Default encodes no
version byte.
"""
if version == 0:
version = [0x00]
if version is None:
version = []
super().__init__(version)
def addData(self, d):
"""
addData adds the data to the BuildyBytes. self is returned to enable
chaining. The data has hard-coded length limit of uint16_max = 65535
bytes.
"""
d = decodeBA(d)
lenBytes = intToBytes(len(d))
bLen = len(lenBytes)
if bLen > 2:
raise DecredError("cannot push data longer than 65535")
if bLen == 2:
lBytes = bytearray((0xFF, lenBytes[0], lenBytes[1]))
elif bLen == 1:
lBytes = lenBytes
elif bLen == 0:
lBytes = bytearray((0x00,))
self.b += lBytes + d
return self
def extractPushes(b):
"""
Parses the linearly-encoded 2D byte array into a list of byte arrays.
Args:
b (bytes-like): The linearly encoded 2-D byte array.
Returns:
list(bytes-like): The 2-D byte array.
"""
pushes = []
while True:
if len(b) == 0:
break
bLen = b[0]
b = b[1:]
if bLen == 255:
if len(b) < 2:
raise DecredError("2 bytes not available for uint16 data length")
bLen = intFromBytes(b[:2])
b = b[2:]
if len(b) < bLen:
raise DecredError("data too short for pop of %d bytes" % bLen)
pushes.append(b[:bLen])
b = b[bLen:]
return pushes
def decodeBlob(b):
"""
decodeBlob decodes a versioned blob into its version and the pushes extracted
from its data.
Args:
b (bytes-like): The bytes to decode.
Returns:
int: The blob version (the version passed to BuildyBytes).
list(bytes-like): The data pushes.
"""
if len(b) == 0:
raise DecredError("zero length blob not allowed")
return b[0], extractPushes(b[1:])
def unblobStrList(b):
"""
Decode a list of strings from the bytes.
Args:
bytes-like: The encoded list.
Returns:
list(str): The decoded list.
"""
return [s.decode("utf-8") for s in extractPushes(b)]
def blobStrList(strs):
"""
Encode a list of strings a bytes.
Args:
list(str): The strings to encode.
Returns:
bytearray: The encoded list.
"""
b = BuildyBytes()
for s in strs:
b.addData(s.encode("utf-8"))
return b.b
def unblobCheck(class_name, version, pushes, check_data):
"""
Check version and pushes to unblob.
Args:
class_name str: the class name that will appear in error messages.
version int: the version number that will be checked.
pushes int: the number of pushes that will be checked.
check_data dict: keys are version numbers, values are number of
expected pushes.
Raises:
NotImplementedError if version is not in check_data keys.
DecredError if pushes is not the value in check_data keyed by version.
"""
if version not in check_data.keys():
raise NotImplementedError(f"{class_name}: unsupported version {version}")
expected_pushes = check_data[version]
if pushes != expected_pushes:
raise DecredError(
f"{class_name}: expected {expected_pushes} pushes, got {pushes}"
)
```
#### File: decred/wallet/accounts.py
```python
from decred import DecredError
from decred.crypto import crypto
from decred.util import chains, encode, helpers
EXTERNAL_BRANCH = 0
INTERNAL_BRANCH = 1
ACCOUNT_GAP_LIMIT = 10
DEFAULT_ACCOUNT_NAME = "default"
log = helpers.getLogger("ACCTS")
def checkBranchKeys(acctKey):
"""
Try to raise an exception.
checkBranchKeys ensures deriving the extended keys for the internal and
external branches given an account key does not result in an invalid child
error which means the chosen seed is not usable. This conforms to the
hierarchy described by BIP0044 so long as the account key is already derived
accordingly.
In particular this is the hierarchical deterministic extended key path:
m/44'/<coin type>'/<account>'/<branch>
The branch is 0 for external addresses and 1 for internal addresses.
Args:
acctKey (crypto.ExtendedKey): An account's extended key.
"""
# Derive the external branch as the first child of the account key.
acctKey.child(EXTERNAL_BRANCH)
# Derive the interal branch as the second child of the account key.
acctKey.child(INTERNAL_BRANCH)
class AccountManager:
"""
The AccountManager provides generation, organization, and other management
of Accounts.
"""
def __init__(
self, coinType, coinKeyEnc, netName, db=None, signals=None,
):
"""
Args:
coinType (int): The BIP-0044 coin type.
coinKeyEnc (ByteArray): The encrypted, serialized extended key.
netName (string): Network name. "mainnet", "testnet", etc.
db (database.Bucket): optional. The database bucket. If specified,
the db will be loaded.
signals (Signal): optional. The UI callbacks. Should be
included if db is specified.
"""
# The crypto keys are used to decrypt the other keys.
self.coinType = coinType
self.coinKeyEnc = coinKeyEnc
# The Scrypt parameters used to encrypt the crypto keys.
self.netName = netName
self.netParams = chains.NetworkParams[self.coinType][self.netName]
self.watchingOnly = False
self.node = None
self.acctDB = None
self.signals = None
self.accounts = {}
if db is not None:
self.load(db, signals)
@staticmethod
def blob(bal):
"""Satisfies the encode.Blobber API"""
return (
encode.BuildyBytes(0)
.addData(bal.coinType)
.addData(bal.coinKeyEnc)
.addData(bal.netName.encode("utf-8"))
.addData(encode.boolToBytes(bal.watchingOnly))
.b
)
@staticmethod
def unblob(b):
"""Satisfies the encode.Blobber API"""
ver, d = encode.decodeBlob(b)
encode.unblobCheck("AccountManager", ver, len(d), {0: 4})
am = AccountManager(
coinType=encode.intFromBytes(d[0]),
coinKeyEnc=encode.ByteArray(d[1]),
netName=d[2].decode("utf-8"),
)
am.watchingOnly = encode.boolFromBytes(d[3])
return am
def serialize(self):
"""
Serialize the AccountManager.
Returns:
ByteArray: The serialized AccountManager.
"""
return encode.ByteArray(AccountManager.blob(self))
def load(self, db, signals):
"""
Set up the database and set the UI signals.
Args:
db (database.Bucket): The database bucket.
signals (Signal): The UI signals.
"""
blobber = chains.AccountConstructors[self.coinType]
self.acctDB = db.child("accts", datatypes=("INTEGER", "BLOB"), blobber=blobber)
self.signals = signals
blockchain = chains.chain(self.coinType)
for idx, acct in self.acctDB.items():
self.accounts[idx] = acct
db = self.dbForAcctIdx(idx)
acct.load(self.dbForAcctIdx(idx), blockchain, self.signals)
def setNode(self, node):
"""
Set the dcrd connection for the account.
Args:
node (LocalNode): A connected LocalNode.
"""
self.node = node
for acct in self.accounts.values():
acct.setNode(node)
def coinKey(self, cryptoKey):
"""
Decrypt the coin-type extended key.
Args:
cryptoKey (crypto.SecretKey): The master encryption key.
Returns:
ByteArray:
"""
return crypto.decodeExtendedKey(self.netParams, cryptoKey, self.coinKeyEnc)
def dbForAcctIdx(self, idx):
"""
Get the database bucket for the specified index.
Args:
idx (int): The account index.
Returns:
database.Bucket: The account bucket.
"""
return self.acctDB.child(str(idx), table=False)
def addAccount(self, cryptoKey, acctName):
"""
Add a new account and return its index.
Args:
cryptoKey (ByteArray): The master encoding key.
acctName: A name for the account.
Returns:
Account: The account.
"""
idx = len(self.acctDB)
coinExtKey = self.coinKey(cryptoKey)
db = self.dbForAcctIdx(idx)
acct = createAccount(
cryptoKey,
coinExtKey,
self.coinType,
idx,
self.netName,
acctName,
db,
self.signals,
)
blockchain = chains.chain(self.coinType)
acct.load(self.dbForAcctIdx(idx), blockchain, self.signals)
self.acctDB[idx] = acct
self.accounts[idx] = acct
return acct
def saveAccount(self, idx):
"""
Save the account at the specified index.
Args:
idx: The account index.
"""
self.acctDB[idx] = self.accounts[idx]
def account(self, idx):
"""
Get the account at the provided index.
Args:
idx (int): The account index.
Returns:
Account: The account at idx.
"""
return self.accounts[idx]
def listAccounts(self):
"""
Get a list of accounts in order of their account index. The index of the
of the account in the returned list is its BIP-44 account index.
Returns:
list(dcr.Account): All known accounts.
"""
sortedAccts = sorted(self.accounts.items(), key=lambda pair: pair[0])
if len(sortedAccts) != sortedAccts[-1][0] + 1:
raise DecredError(
"account index mismatch. expected last index {} got {}".format(
len(sortedAccts) - 1, sortedAccts[-1][0]
)
)
return [a for _, a in sortedAccts]
def openAccount(self, idx, cryptoKey):
"""
Open an account.
Args:
idx (int): The acccount index, which is its position in the
accounts list.
cryptoKey (ByteArray): The master encoding key.
Returns:
Account: The open account.
"""
acct = self.accounts[idx]
acct.unlock(cryptoKey)
return acct
def discover(self, cryptoKey):
"""
Discover accounts up to the account gap limit. If an account is
discovered, all accounts up to and including the discovered account's
index will be created.
Args:
cryptoKey (ByteArray): The master encoding key.
"""
coinExtKey = self.coinKey(cryptoKey)
blockchain = chains.chain(self.coinType)
lastSeenIdx = len(self.acctDB) - 1
idx = lastSeenIdx + 1
acctConstructor = chains.AccountConstructors[self.coinType]
while True:
acctKeyPriv = coinExtKey.deriveAccountKey(idx)
acctKeyPub = acctKeyPriv.neuter()
if acctConstructor.txsExistForKey(acctKeyPub, blockchain):
# Add accounts up to the newly seen index.
log.info(f"account discovered at index {idx}")
while len(self.accounts) <= idx:
self.addAccount(cryptoKey, f"Account {len(self.accounts)}")
lastSeenIdx = idx
idx += 1
if idx - lastSeenIdx > ACCOUNT_GAP_LIMIT:
break
def createNewAccountManager(root, cryptoKey, coinType, netParams, db):
"""
Create a new account manager and a set of BIP0044 keys for creating
accounts. The zeroth account is created for the provided network parameters.
Args:
root (crypto.ExtendedKey): The wallet key.
cryptoKey (crypto.SecretKey): The master encryption key.
netParams (module): Network parameters.
Returns:
AccountManager: An initialized account manager.
"""
coinKey = root.deriveCoinTypeKey(netParams)
coinKeyEnc = crypto.encrypt(cryptoKey, coinKey.serialize())
manager = AccountManager(
coinType=chains.parseCoinType(coinType),
coinKeyEnc=coinKeyEnc,
netName=netParams.Name,
db=db,
)
manager.addAccount(cryptoKey, DEFAULT_ACCOUNT_NAME)
return manager
def createAccount(
cryptoKey, coinExtKey, coinType, acctIdx, netName, acctName, db, signals
):
# Create the zeroth account.
# Derive the account key for the first account according to BIP0044.
acctKeyPriv = coinExtKey.deriveAccountKey(acctIdx)
# Ensure the branch keys can be derived for the provided seed according
# to BIP0044.
checkBranchKeys(acctKeyPriv)
acctKeyPub = acctKeyPriv.neuter()
pubKeyEncrypted = crypto.encrypt(cryptoKey, acctKeyPub.serialize())
privKeyEncrypted = crypto.encrypt(cryptoKey, acctKeyPriv.serialize())
constructor = chains.AccountConstructors[coinType]
blockchain = chains.chain(coinType)
account = constructor(
acctIdx,
pubKeyEncrypted,
privKeyEncrypted,
acctName,
netName,
db,
blockchain,
signals,
)
# Open the account.
account.unlock(cryptoKey)
# Create the first payment address.
account.generateGapAddresses()
# Close the account to zero the key.
account.lock()
return account
```
#### File: decred/examples/send_testnet.py
```python
from getpass import getpass
from decred.wallet.wallet import SimpleWallet
# Testnet return address for faucet.decred.org.
TESTNET_ADDRESS = "TsfDLrRkk9ciUuwfp2b8PawwnukYD7yAjGd"
def main():
value = int(1 * 1e8) # 1 DCR, atoms
password = getpass()
walletDir = "wallets"
try:
print("Opening and synchronizing wallet")
wallet = SimpleWallet(walletDir, password, "testnet")
except Exception as e:
print("Failed to open wallet with provided password: %s" % e)
exit()
try:
# Send some DCR.
tx = wallet.sendToAddress(value, TESTNET_ADDRESS)
# Print the transaction ID and a dcrdata link.
print("Transaction ID: %s" % tx.id())
print("See transaction at https://testnet.dcrdata.org/tx/%s" % tx.id())
except Exception as e:
print("Failed to send transaction: %s" % e)
finally:
wallet.close()
if __name__ == "__main__":
main()
```
#### File: unit/crypto/test_gcs.py
```python
import random
import pytest
from decred import DecredError
from decred.crypto import gcs, rando
from decred.util.encode import ByteArray, rba
def test_BitReader():
"""
Ensure that the bit reader and all associated methods work as
expected including expected errors and corner cases at byte boundaries.
"""
br = gcs.BitReader(ByteArray(int("11101111", 2)))
with pytest.raises(DecredError):
br.readNBits(-1)
with pytest.raises(DecredError):
br.readNBits(65)
"""
Test parameters
name: test description
bytes: bytes to use as the bitstream
perReaderTests: tests to run against same reader
name (str): test description
doUnary (bool): whether or not to perform a unary read
wantUnary (int): expected number of consecutive ones
unaryErr (Exception): expected error on unary read
nValBits (int): number of bits to read from bitstream as uint64
wantVal (int): expected value from nValBits read
bitsErr (Exception): expected error on bits read
"""
tests = [
dict(
name="unary read on empty bytes error",
b=ByteArray(""),
perReaderTests=[
dict(
name="unary read",
doUnary=True,
wantUnary=0,
unaryErr=gcs.EncodingError,
)
],
),
dict(
name="0 bits read on empty bytes (no error)",
b=ByteArray(""),
perReaderTests=[dict(name="0 bit read", nValBits=0, wantVal=0,)],
),
dict(
name="1 bit read on empty bytes error",
b=ByteArray(""),
perReaderTests=[
dict(name="1 bit read", nValBits=1, bitsErr=gcs.EncodingError,)
],
),
dict(
name="9 bit read on single byte error (straddle byte boundary)",
b=ByteArray("0f"),
perReaderTests=[
dict(name="9 bit read", nValBits=9, bitsErr=gcs.EncodingError,)
],
),
dict(
name="16 bit read on single byte error (byte boundary)",
b=ByteArray("0f"),
perReaderTests=[
dict(name="16 bit read", nValBits=16, bitsErr=gcs.EncodingError,)
],
),
dict(
name="0 bits followed by 8 bits ",
b=ByteArray("ff"),
perReaderTests=[
dict(name="0 bit read", nValBits=0, wantVal=0,),
dict(name="8 bit read", nValBits=8, wantVal=0xFF,),
],
),
dict(
name="unary 1",
b=ByteArray("80"),
perReaderTests=[dict(name="first unary read", doUnary=True, wantUnary=1,)],
),
dict(
name="unary 2",
b=ByteArray("c0"),
perReaderTests=[dict(name="first unary read", doUnary=True, wantUnary=2,)],
),
dict(
name="unary 9 (more than one byte)",
b=ByteArray("ff80"),
perReaderTests=[dict(name="first unary read", doUnary=True, wantUnary=9,)],
),
dict(
name="unary 0, 1 bit read",
b=ByteArray("40"),
perReaderTests=[
dict(name="unary read", doUnary=True, wantUnary=0,),
dict(name="1 bit read", nValBits=1, wantVal=1,),
],
),
dict(
name="unary 0, 8 bits read (straddle byte)",
b=ByteArray("5a80"),
perReaderTests=[
dict(name="unary read", doUnary=True, wantUnary=0,),
dict(name="8 bit read", nValBits=8, wantVal=0xB5,),
],
),
dict(
name="unary 0, 15 bits read (byte boundary)",
b=ByteArray("5ac5"),
perReaderTests=[
dict(name="unary read", doUnary=True, wantUnary=0,),
dict(name="15 bit read", nValBits=15, wantVal=0x5AC5,),
],
),
dict(
name="unary 0, 16 bits read (straddle 2nd byte boundary)",
b=ByteArray("5ac580"),
perReaderTests=[
dict(name="unary read", doUnary=True, wantUnary=0,),
dict(name="16 bit read", nValBits=16, wantVal=0xB58B,),
],
),
dict(
name="unary 3, 15 bits read, unary 2",
b=ByteArray("eac518"),
perReaderTests=[
dict(name="first unary read", doUnary=True, wantUnary=3,),
dict(name="15 bit read", nValBits=15, wantVal=0x5628,),
dict(name="second unary read", doUnary=True, wantUnary=2,),
],
),
]
for test in tests:
# Parse the specified bytes to read and create a bitstream reader from
# them.
r = gcs.BitReader(test["b"])
for prTest in test.get("perReaderTests", []):
testTag = test["name"] + ": " + prTest["name"]
# Read unary and ensure expected result if requested.
if prTest.get("doUnary"):
unaryErr = prTest.get("unaryErr")
if unaryErr:
with pytest.raises(unaryErr):
r.readUnary()
continue
try:
gotUnary = r.readUnary()
except gcs.EncodingError:
break
assert gotUnary == prTest.get("wantUnary", 0), testTag
# Read specified number of bits as uint64 and ensure expected
# result.
bitsErr = prTest.get("bitsErr")
if bitsErr:
with pytest.raises(bitsErr):
r.readNBits(prTest.get("nValBits", 0))
continue
try:
gotVal = r.readNBits(prTest.get("nValBits", 0))
except gcs.EncodingError:
break
assert gotVal == prTest.get("wantVal", 0), testTag
def test_filter():
"""
Ensure that the filters and all associated methods work as expected by using
various known parameters and contents along with random keys for matching
purposes.
"""
# Use a random key for each test instance and log it if the tests fail.
randKey = rando.newKey()[: gcs.KeySize]
fixedKey = ByteArray(length=16)
# Test some error paths.
f = gcs.FilterV2.deserialize(
ByteArray(
"1189af70ad5baf9da83c64e99b18e96a06cd7295a58b324e81f09c85d093f1e33dcd6f40f18cfcbe2aeb771d8390"
)
)
member = ByteArray("Alex".encode())
with pytest.raises(DecredError):
f.match(key=ByteArray(length=17), data=ByteArray(0x0A0B))
# random entry doesn't match.
assert not f.match(key=fixedKey, data=ByteArray("0a"))
assert not f.matchAny(key=fixedKey, data=[ByteArray("0a")])
# Filter of all FF gives encoding error, which returns False.
f.filterData = ByteArray(0xFF)
assert not f.match(key=fixedKey, data=member)
assert not f.matchAny(key=fixedKey, data=[member])
# fmt: off
# contents1 defines a set of known elements for use in the tests below.
contents1 = [
ByteArray(s.encode()) for s in
("Alex", "Bob", "Charlie", "Dick", "Ed", "Frank", "George", "Harry",
"Ilya", "John", "Kevin", "Larry", "Michael", "Nate", "Owen", "Paul",
"Quentin")
]
# contents2 defines a separate set of known elements for use in the tests
# below.
contents2 = [
ByteArray(s.encode()) for s in
("Alice", "Betty", "Charmaine", "Donna", "Edith", "Faina", "Georgia",
"Hannah", "Ilsbeth", "Jennifer", "Kayla", "Lena", "Michelle", "Natalie",
"Ophelia", "Peggy", "Queenie")
]
# fmt: on
tests = [
dict(
name="v2 empty filter",
matchKey=randKey,
contents=[],
wantMatches=[],
fixedKey=fixedKey,
wantBytes=ByteArray(),
wantHash=rba(length=32),
),
dict(
name="v2 filter single nil item produces empty filter",
matchKey=randKey,
contents=[ByteArray()],
wantMatches=[],
fixedKey=fixedKey,
wantBytes=bytearray(),
wantHash=rba(length=32),
),
dict(
name="v2 filter contents1 with nil item with B=19, M=784931",
matchKey=randKey,
contents=[ByteArray()] + contents1,
wantMatches=contents1,
fixedKey=fixedKey,
wantBytes=ByteArray(
"1189af70ad5baf9da83c64e99b18e96a06cd7295a58b324e81f09c85d093f1e33dcd6f40f18cfcbe2aeb771d8390"
),
wantHash=rba(
"b616838c6090d3e732e775cc2f336ce0b836895f3e0f22d6c3ee4485a6ea5018"
),
),
dict(
name="v2 filter contents1 with B=19, M=784931",
matchKey=randKey,
contents=contents1,
wantMatches=contents1,
fixedKey=fixedKey,
wantBytes=ByteArray(
"1189af70ad5baf9da83c64e99b18e96a06cd7295a58b324e81f09c85d093f1e33dcd6f40f18cfcbe2aeb771d8390"
),
wantHash=rba(
"b616838c6090d3e732e775cc2f336ce0b836895f3e0f22d6c3ee4485a6ea5018"
),
),
dict(
name="v2 filter contents2 with B=19, M=784931",
matchKey=randKey,
contents=contents2,
wantMatches=contents2,
fixedKey=fixedKey,
wantBytes=ByteArray(
"118d4be5372d2f4731c7e1681aefd23028be12306b4d90701a46b472ee80ad60f9fa86c4d6430cfb495ced604362"
),
wantHash=rba(
"f3028f42909209120c8bf649fbbc5a70fb907d8997a02c2c1f2eef0e6402cb15"
),
),
]
for test in tests:
# Create a filter with the match key for all tests not related to
# testing serialization.
f = gcs.FilterV2.deserialize(test["wantBytes"])
wantN = len(test["contents"]) - sum(1 for d in test["contents"] if len(d) == 0)
assert f.n == wantN, test["name"]
# Ensure empty data never matches.
assert not f.match(test["matchKey"], ByteArray())
assert not f.matchAny(test["matchKey"], []), test["name"]
assert not f.matchAny(test["matchKey"], [ByteArray()]), test["name"]
# Ensure empty filter never matches data.
if len(test["contents"]) == 0:
wantMiss = "test".encode()
assert not f.match(test["matchKey"], wantMiss), test["name"]
assert not f.matchAny(test["matchKey"], [wantMiss]), test["name"]
# Ensure all of the expected matches occur individually.
for wantMatch in test["wantMatches"]:
assert f.match(test["fixedKey"], wantMatch), test["name"]
# Ensure a subset of the expected matches works in various orders when
# matching any.
if len(test["wantMatches"]) > 0:
# Create set of data to attempt to match such that only the final
# item is an element in the filter.
matches = []
for data in test["wantMatches"]:
mutated = ByteArray(data)
mutated[0] ^= 0x55
matches.append(mutated)
matches[-1] = test["wantMatches"][-1]
assert f.matchAny(test["fixedKey"], matches), test["name"]
# Fisher-Yates shuffle the match set and test for matches again.
for i in range(len(matches)):
# Pick a number between current index and the end.
j = random.randint(0, len(matches) - i - 1) + i
matches[i], matches[j] = matches[j], matches[i]
assert f.matchAny(test["fixedKey"], matches), test["name"]
assert f.hash() == test["wantHash"], test["name"]
```
#### File: unit/dcr/test_vsp_unit.py
```python
import time
import pytest
from decred import DecredError
from decred.dcr import vsp
from decred.dcr.nets import mainnet
from decred.util import encode
def test_result_is_success():
# (res, isSuccess)
tests = [
(dict(status="success"), True),
(dict(status="fail"), False),
(dict(), False),
("success", False),
("abcd", False),
("", False),
(0, False),
(True, False),
(None, False),
]
for res, isSuccess in tests:
assert vsp.resultIsSuccess(res) == isSuccess
purchaseInfo = {
"PoolAddress": "TsbyH2p611jSWnvUAq3erSsRYnCxBg3nT2S",
"PoolFees": 0.5,
"Script": "512103af3c24d005ca8b755e7167617f3a5b4c60a65f8318a7fcd1b0cacb1ab"
"d2a97fc21027b81bc16954e28adb832248140eb58bedb6078ae5f4dabf21fde5a8ab7135c"
"b652ae",
"TicketAddress": "Tcbvn2hiEAXBDwUPDLDG2SxF9iANMKhdVev",
"VoteBits": 5,
"VoteBitsVersion": 0,
}
def assertPiIsEqual(pi):
assert pi.poolAddress == purchaseInfo["PoolAddress"]
assert pi.poolFees == purchaseInfo["PoolFees"]
assert pi.script == purchaseInfo["Script"]
assert pi.ticketAddress == purchaseInfo["TicketAddress"]
assert pi.voteBits == purchaseInfo["VoteBits"]
assert pi.voteBitsVersion == purchaseInfo["VoteBitsVersion"]
def test_purchase_info_parse():
now = int(time.time())
pi = vsp.PurchaseInfo.parse(purchaseInfo)
assertPiIsEqual(pi)
assert isinstance(pi.unixTimestamp, int) and pi.unixTimestamp >= now
def test_purchase_info_blobbing():
pi = vsp.PurchaseInfo.parse(purchaseInfo)
b = vsp.PurchaseInfo.blob(pi)
assert isinstance(b, bytearray)
rePi = vsp.PurchaseInfo.unblob(b)
assertPiIsEqual(rePi)
ts = rePi.unixTimestamp
assert isinstance(ts, int) and ts == pi.unixTimestamp
# bad version
bCopy = encode.ByteArray(b, copy=True)
bCopy[0] = 255
with pytest.raises(NotImplementedError):
vsp.PurchaseInfo.unblob(bCopy.bytes())
# too long
bCopy = encode.ByteArray(b, copy=True)
bCopy += b"\x00"
with pytest.raises(DecredError):
vsp.PurchaseInfo.unblob(bCopy.bytes())
poolStats = {
"AllMempoolTix": 12,
"APIVersionsSupported": [1, 2],
"BlockHeight": 368781,
"Difficulty": 88.50820708,
"Expired": 3,
"Immature": 0,
"Live": 28,
"Missed": 349,
"OwnMempoolTix": 0,
"PoolSize": 5759,
"ProportionLive": 0.004861955200555652,
"ProportionMissed": 0.3216589861751152,
"Revoked": 349,
"TotalSubsidy": 293.10719669,
"Voted": 736,
"Network": "testnet3",
"PoolEmail": "<EMAIL>",
"PoolFees": 0.5,
"PoolStatus": "Open",
"UserCount": 44,
"UserCountActive": 34,
"Version": "1.6.0-pre",
}
def test_pool_stats():
ps = vsp.PoolStats(poolStats)
assert ps.allMempoolTix == poolStats["AllMempoolTix"]
assert ps.apiVersionsSupported == poolStats["APIVersionsSupported"]
assert ps.blockHeight == poolStats["BlockHeight"]
assert ps.difficulty == poolStats["Difficulty"]
assert ps.expired == poolStats["Expired"]
assert ps.immature == poolStats["Immature"]
assert ps.live == poolStats["Live"]
assert ps.missed == poolStats["Missed"]
assert ps.ownMempoolTix == poolStats["OwnMempoolTix"]
assert ps.poolSize == poolStats["PoolSize"]
assert ps.proportionLive == poolStats["ProportionLive"]
assert ps.proportionMissed == poolStats["ProportionMissed"]
assert ps.revoked == poolStats["Revoked"]
assert ps.totalSubsidy == poolStats["TotalSubsidy"]
assert ps.voted == poolStats["Voted"]
assert ps.network == poolStats["Network"]
assert ps.poolEmail == poolStats["PoolEmail"]
assert ps.poolFees == poolStats["PoolFees"]
assert ps.poolStatus == poolStats["PoolStatus"]
assert ps.userCount == poolStats["UserCount"]
assert ps.userCountActive == poolStats["UserCountActive"]
assert ps.version == poolStats["Version"]
now = int(time.time())
votingServiceProvider = {
"url": "https://www.dcrstakedinner.com/",
"apiKey": (
"<KEY>"
"<KEY>"
"XMiOjQ2fQ.PEb000_TjQuBYxjRdh-VOaXMdV2GUw3_ZyIyp_tfpFE"
),
"netName": "testnet3",
"purchaseInfo": vsp.PurchaseInfo.parse(purchaseInfo),
}
def assertVspIsEqual(pool):
assert pool.url == votingServiceProvider["url"]
assert pool.apiKey == votingServiceProvider["apiKey"]
assert pool.netParams.Name == votingServiceProvider["netName"]
assertPiIsEqual(pool.purchaseInfo)
def test_vsp_init():
pool = vsp.VotingServiceProvider(**votingServiceProvider)
assertVspIsEqual(pool)
ts = pool.purchaseInfo.unixTimestamp
assert isinstance(ts, int) and ts >= now
def test_vsp_blobbing():
pool = vsp.VotingServiceProvider(**votingServiceProvider)
b = vsp.VotingServiceProvider.blob(pool)
assert isinstance(b, bytearray)
rePool = vsp.VotingServiceProvider.unblob(b)
assertVspIsEqual(rePool)
ts = rePool.purchaseInfo.unixTimestamp
assert isinstance(ts, int) and ts == pool.purchaseInfo.unixTimestamp
# bad version
bCopy = encode.ByteArray(b, copy=True)
bCopy[0] = 255
with pytest.raises(NotImplementedError):
vsp.VotingServiceProvider.unblob(bCopy.bytes())
# too long
bCopy = encode.ByteArray(b, copy=True)
bCopy += b"\x00"
with pytest.raises(DecredError):
vsp.VotingServiceProvider.unblob(bCopy.bytes())
def test_vsp_serialize():
pool = vsp.VotingServiceProvider(**votingServiceProvider)
b = vsp.VotingServiceProvider.blob(pool)
assert pool.serialize() == encode.ByteArray(b)
vspProviders = {
"Staked": {
"APIEnabled": True,
"APIVersionsSupported": [1, 2],
"Network": "mainnet",
"URL": "https://decred.staked.us",
"Launched": 1543433400,
"LastUpdated": 1582020568,
"Immature": 0,
"Live": 141,
"Voted": 2730,
"Missed": 10,
"PoolFees": 5,
"ProportionLive": 0.0034847511245118877,
"ProportionMissed": 0.0036496350364963502,
"UserCount": 229,
"UserCountActive": 106,
"Version": "1.4.0-pre+dev",
},
"Golf": {
"APIEnabled": True,
"APIVersionsSupported": [1, 2],
"Network": "mainnet",
"URL": "https://stakepool.dcrstats.com",
"Launched": 1464167340,
"LastUpdated": 1582020568,
"Immature": 21,
"Live": 768,
"Voted": 148202,
"Missed": 154,
"PoolFees": 5,
"ProportionLive": 0.01898077208244773,
"ProportionMissed": 0,
"UserCount": 6005,
"UserCountActive": 2751,
"Version": "1.5.0-pre",
},
"Hotel": {
"APIEnabled": True,
"APIVersionsSupported": [1, 2],
"Network": "mainnet",
"URL": "https://stake.decredbrasil.com",
"Launched": 1464463860,
"LastUpdated": 1582020568,
"Immature": 41,
"Live": 607,
"Voted": 48135,
"Missed": 49,
"PoolFees": 5,
"ProportionLive": 0.015002842383647644,
"ProportionMissed": 0.0010169350821849577,
"UserCount": 1607,
"UserCountActive": 968,
"Version": "1.5.0",
},
"November": {
"APIEnabled": True,
"APIVersionsSupported": [1, 2],
"Network": "mainnet",
"URL": "https://decred.raqamiya.net",
"Launched": 1513878600,
"LastUpdated": 1582020568,
"Immature": 5,
"Live": 334,
"Voted": 15720,
"Missed": 50,
"PoolFees": 1,
"ProportionLive": 0.008255270767937913,
"ProportionMissed": 0.0031705770450221942,
"UserCount": 261,
"UserCountActive": 114,
"Version": "1.5.0-pre",
},
"Ray": {
"APIEnabled": True,
"APIVersionsSupported": [1, 2],
"Network": "mainnet",
"URL": "https://dcrpos.idcray.com",
"Launched": 1518446640,
"LastUpdated": 1582020569,
"Immature": 50,
"Live": 1108,
"Voted": 36974,
"Missed": 298,
"PoolFees": 2,
"ProportionLive": 0.027385748535554512,
"ProportionMissed": 0.007995277956643057,
"UserCount": 137,
"UserCountActive": 70,
"Version": "1.4.0-pre+dev",
},
}
def test_vsp_providers(http_get_post):
http_get_post("https://api.decred.org/?c=gsd", vspProviders)
providers = vsp.VotingServiceProvider.providers(mainnet)
assert len(providers) == 5
def test_vsp_api_path():
pool = vsp.VotingServiceProvider(**votingServiceProvider)
path = pool.apiPath("stakeinfo")
assert path == "https://www.dcrstakedinner.com/api/v2/stakeinfo"
def test_vsp_headers():
pool = vsp.VotingServiceProvider(**votingServiceProvider)
headers = pool.headers()
assert headers == {"Authorization": "Bearer " + votingServiceProvider["apiKey"]}
def test_vsp_validate():
pool = vsp.VotingServiceProvider(**votingServiceProvider)
# correct address
addr = "<KEY>"
pool.validate(addr)
# valid but wrong address
addr = "<KEY>"
with pytest.raises(DecredError):
pool.validate(addr)
# invalid address
addr = "ASDF"
with pytest.raises(DecredError):
pool.validate(addr)
# no address
addr = ""
with pytest.raises(DecredError):
pool.validate(addr)
def test_vsp_authorize(http_get_post):
pool = vsp.VotingServiceProvider(**votingServiceProvider)
success = {"status": "success", "data": purchaseInfo}
addressNotSet = {
"status": "error",
"code": 9,
"message": "no address submitted",
}
# ok
addr = "<KEY>"
http_get_post(pool.apiPath("getpurchaseinfo"), success)
pool.authorize(addr)
# address not submitted
addr = "<KEY>"
http_get_post(pool.apiPath("getpurchaseinfo"), addressNotSet)
http_get_post(pool.apiPath("getpurchaseinfo"), success)
http_get_post((pool.apiPath("address"), repr({"UserPubKeyAddr": addr})), success)
pool.authorize(addr)
# other error
systemErr = {"status": "error", "code": 14, "message": "system error"}
addr = "<KEY>"
http_get_post(pool.apiPath("getpurchaseinfo"), systemErr)
with pytest.raises(DecredError):
pool.authorize(addr)
# wrong address
addr = "<KEY>"
http_get_post(pool.apiPath("getpurchaseinfo"), systemErr)
with pytest.raises(DecredError):
pool.authorize(addr)
def test_vsp_get_purchase_info(http_get_post):
pool = vsp.VotingServiceProvider(**votingServiceProvider)
success = {"status": "success", "data": purchaseInfo}
addressNotSet = {
"status": "error",
"code": 9,
"message": "no address submitted",
}
# ok
http_get_post(pool.apiPath("getpurchaseinfo"), success)
pool.getPurchaseInfo()
assert not pool.err
# error
http_get_post(pool.apiPath("getpurchaseinfo"), addressNotSet)
with pytest.raises(DecredError):
pool.getPurchaseInfo()
assert pool.err
def test_vsp_update_purchase_info(http_get_post):
pool = vsp.VotingServiceProvider(**votingServiceProvider)
success = {"status": "success", "data": purchaseInfo}
# updated
pool.purchaseInfo.unixTimestamp = 0
http_get_post(pool.apiPath("getpurchaseinfo"), success)
pool.updatePurchaseInfo()
assert pool.purchaseInfo.unixTimestamp != 0
# not updated
# within the update threshhold
before = int(time.time() - vsp.PURCHASE_INFO_LIFE / 2)
pool.purchaseInfo.unixTimestamp = before
pool.updatePurchaseInfo()
assert pool.purchaseInfo.unixTimestamp == before
def test_vsp_get_stats(http_get_post):
pool = vsp.VotingServiceProvider(**votingServiceProvider)
success = {"status": "success", "data": poolStats}
# ok
http_get_post(pool.apiPath("stats"), success)
pool.getStats()
# pool error
systemErr = {"status": "error", "code": 14, "message": "system error"}
http_get_post(pool.apiPath("stats"), systemErr)
with pytest.raises(DecredError):
pool.getStats()
def test_vsp_set_vote_bits(http_get_post):
pool = vsp.VotingServiceProvider(**votingServiceProvider)
success = {"status": "success", "data": "ok"}
# votebits are 5
assert pool.purchaseInfo.voteBits == 5
# ok
http_get_post((pool.apiPath("voting"), repr({"VoteBits": 7})), success)
pool.setVoteBits(7)
# set to 7
assert pool.purchaseInfo.voteBits == 7
# pool error
systemErr = {"status": "error", "code": 14, "message": "system error"}
http_get_post((pool.apiPath("voting"), repr({"VoteBits": 3})), systemErr)
with pytest.raises(DecredError):
pool.setVoteBits(3)
# no change
assert pool.purchaseInfo.voteBits == 7
```
#### File: tests/unit/test_config.py
```python
import logging
import sys
from tinywallet.config import CmdArgs
def test_CmdArgs():
sys.argv = ["cmd", "--simnet"]
cfg = CmdArgs()
assert cfg.netParams.Name == "simnet"
sys.argv = ["cmd", "--testnet", "--loglevel", "debug"]
cfg = CmdArgs()
assert cfg.netParams.Name == "testnet3"
assert cfg.logLevel == logging.DEBUG
sys.argv = ["cmd", "--loglevel", "A:Warning,B:deBug,C:Critical,D:0"]
cfg = CmdArgs()
assert len(cfg.moduleLevels) == 4
assert cfg.moduleLevels["A"] == logging.WARNING
assert cfg.moduleLevels["B"] == logging.DEBUG
assert cfg.moduleLevels["C"] == logging.CRITICAL
assert cfg.moduleLevels["D"] == logging.NOTSET
``` |
{
"source": "joe-habel/Chrome-Global-Media-Key-Hog-Fix",
"score": 3
} |
#### File: joe-habel/Chrome-Global-Media-Key-Hog-Fix/installer.py
```python
import os
import subprocess
from glob import glob
def env_error():
with open('errors.txt', 'w') as log:
log.write('''Local App Data Enviornment Variable Not Found. \n
Please place your chrome intsall directory in the config.txt file.''')
def plexamp_dir_error():
with open('errors.txt', 'w') as log:
log.write('Plexamp directory not found.\n Please update your plex directory in the config.txt file.')
def default_plexamp_dir():
try:
local_app_data = os.environ.get('LOCALAPPDATA')
except KeyError:
env_error()
return False
plexamp_dir = os.path.join(local_app_data,'Programs','plexamp')
if not os.path.exists(plexamp_dir):
return False
return plexamp_dir
def check_dir(path):
if not os.path.exists(path):
plexamp_dir_error()
return False
return True
def plexamp_config():
if not os.path.exists('config'):
with open('config.txt' ,'w') as config:
config.write('Chrome=\n')
config.write('Plexamp=\n')
with open('config.txt', 'r') as config:
paths = config.readlines()
plexamp_dir = ''
chrome_dir = ''
for path in paths:
if 'Plexamp' in path:
plexamp_dir = path[path.find('=')+1:].strip()
if 'Chrome' in path:
chrome_dir = path[path.find('=')+1:].strip()
with open('config.txt', 'w') as config:
config.write('Chrome=%s\n'%chrome_dir)
config.write('Plexamp=%s\n'%plexamp_dir)
return plexamp_dir
def main():
plexamp_dir = plexamp_config()
if not bool(plexamp_dir):
plexamp_dir = default_plexamp_dir()
plexamp_dir = os.path.join(plexamp_dir,'Plexamp.exe')
if not check_dir(plexamp_dir):
return
current_path = os.path.abspath(os.getcwd())
if len(glob(os.path.join(current_path,'*.py'))) > 0:
fix_keys = os.path.join(current_path,'update_chrome_files.py')
with open('launch.bat', 'w') as launcher:
launcher.write('python %s start\n'%fix_keys)
launcher.write('start /b /wait "" %s\n'%plexamp_dir)
launcher.write('python %s close'%fix_keys)
else:
fix_keys = os.path.join(current_path,'update_chrome_files.exe')
with open('launch.bat', 'w') as launcher:
launcher.write('start /b /wait %s start\n'%fix_keys)
launcher.write('start /b /wait "" %s\n'%plexamp_dir)
launcher.write('start /b /wait %s close'%fix_keys)
with open('Plexamp (Media Key Fix).vbs', 'w') as vbs:
vbs.write('Set oShell = CreateObject ("Wscript.Shell")\n')
vbs.write('Dim StrArgs\n')
bat = os.path.join(current_path,'launch.bat')
vbs.write('StrArgs = "cmd /c %s"\n'%bat)
vbs.write('oShell.Run strArgs, 0, false')
shortcut_link = os.path.join(current_path,'Plexamp (Media Key Fix).vbs')
with open('make_shortcut.vbs', 'w') as short:
short.write('Set WshShell = CreateObject("Wscript.shell")\n')
short.write('strDesktop = WshShell.SpecialFolders("Desktop")\n')
short.write('set oMyShortcut = WshShell.CreateShortcut(strDesktop & "\\Plexamp (Media Key Fix).lnk")\n')
short.write('oMyShortcut.WindowStyle = 1\n')
short.write('oMyShortcut.IconLocation = "%s,0"\n'%plexamp_dir)
short.write('oMyShortcut.TargetPath = "%s"\n'%shortcut_link)
short.write('oMyShortcut.Description = "Shortcut to the Streamkeys Plexamp Fix"\n')
short.write('oMyShortcut.WorkingDirectory = "%s"\n'%current_path)
short.write('oMyShortcut.Save')
subprocess.call('cmd /c make_shortcut.vbs')
os.remove('make_shortcut.vbs')
if __name__ == '__main__':
main()
``` |
{
"source": "joe-habel/JobSearch",
"score": 3
} |
#### File: JobSearch/queries/indeed.py
```python
from .query import Query, QueryArgument
what = QueryArgument('q', str, disp_name='What')
where = QueryArgument('l', str, required=True, disp_name='Where')
radius = QueryArgument('radius', int, choices=(0, 5, 10, 15, 25, 50, 100), disp_name="Miles away")
min_salary = QueryArgument('q', int, fmt="${}", disp_name="Minimum Salary")
company = QueryArgument('rbc', str, requires='jcid', disp_name="Compay Name")
company_id = QueryArgument('jcid', str, requires='rbc', disp_name="Company id")
job_type = QueryArgument('jt', str, choices=('fulltime', 'parttime', 'contract', 'internship', 'temporary', 'commission'), disp_name="Job Type")
experience = QueryArgument('explvl', str, choices=('entry_level', 'mid_level', 'senior_level'), disp_name="Experience Level")
start = QueryArgument('start', int, required=True, value=0, disp_name="start")
class SimpleIndeedQuery(Query):
_kwargs = {
'what' : what,
'where' : where,
'radius' : radius,
'min_salary' : min_salary,
'company' : company,
'company_id' : company_id,
'job_type' : job_type,
'experience' : experience,
'start' : start
}
def __init__(self, **kwargs):
super(SimpleIndeedQuery, self).__init__('https://indeed.com/jobs', **SimpleIndeedQuery._kwargs)
self._init_kwargs(**kwargs)
def _init_kwargs(self, **kwargs):
for key, val in kwargs.items():
if key in SimpleIndeedQuery._kwargs:
self._args[key].value = val
else:
raise NotImplementedError("The following keyword is not valid for the SimpleIndeedQuery: %s"%key)
#Search Strings
all_of_these_words = QueryArgument('as_and', str, disp_name='All of these words')
exact_phrase = QueryArgument('as_phr', str, disp_name='Exact phrase')
any_of_these_words = QueryArgument('as_any', str, disp_name="Any of these words")
none_of_these_words = QueryArgument('as_not', str, disp_name="None of these words")
title_words = QueryArgument('as_ttl', str, disp_name="These words in title")
from_company = QueryArgument('as_cmp', str, disp_name="From this company")
from_this_job_site = QueryArgument('as_src', str, disp_name="From this job site")
#Ad Origin
posted_to = QueryArgument('st', str, choices=('jobsite', 'employer'), disp_name="Posted to")
hired_by = QueryArgument('sr', str, choices=('directhire',), disp_name="Who handles hiring")
#Sort and paging
sort_by = QueryArgument('sort', str, choices=('date',), disp_name="Sort by")
limit = QueryArgument('limit', int, choices=(10, 20, 30, 50), disp_name="Per Page")
#Age
from_age = QueryArgument('fromage', choices=('last', 1, 3, 7, 15, 'any'), disp_name="Max days old")
#Magic Args
psf = QueryArgument('psf', str, required=True, value='advsrch', mutable=False, requires='from')
searched_from = QueryArgument('from', str, required=True, value='advancedsearch', mutable=False, requires='psf')
class AdvancedIndeedQuery(Query):
_kwargs = {
'where' : where,
'radius' : radius,
'min_salary' : min_salary,
'job_type' : job_type,
'experience' : experience,
'start' : start,
'all_words' : all_of_these_words,
'exact_phrase' : exact_phrase,
'any_words' : any_of_these_words,
'none_words' : none_of_these_words,
'title_words' : title_words,
'from_company' : from_company,
'from_job_site' : from_this_job_site,
'posted_to' : posted_to,
'hired_by' : hired_by,
'sort_by' : sort_by,
'limit' : limit,
'age' : from_age,
'psf' : psf,
'searched_from': searched_from
}
def __init__(self, **kwargs):
super(AdvancedIndeedQuery, self).__init__('https://indeed.com/jobs', **AdvancedIndeedQuery._kwargs)
self._init_kwargs(**kwargs)
def _init_kwargs(self, **kwargs):
for key, val in kwargs.items():
if key in AdvancedIndeedQuery._kwargs:
self._args[key].value = val
else:
raise NotImplementedError("The following keyword is not valid for the AdvancedIndeedQuery: %s"%key)
``` |
{
"source": "joe-habel/SampleRHOptionsFinder",
"score": 3
} |
#### File: joe-habel/SampleRHOptionsFinder/utilities.py
```python
import os
from datetime import timedelta
import yaml
import pandas as pd
PARENT_PATH = os.path.dirname(os.path.abspath(__file__))
def get_symbols():
"""
Get a series of listed symbols.
Returns
-------
pandas.Series
"""
return pd.read_csv(os.path.join(PARENT_PATH, 'data/symbols.csv'), sep='|')['Symbol']
def get_listings():
"""
Get a dataframe of listed symbols and security names
Returns
-------
pandas.DataFrame
"""
return pd.read_csv(os.path.join(PARENT_PATH, 'data/symbols.csv'), sep='|')[['Symbol', 'Security Name']]
def robinhood_login():
"""
Log into robinhood via robin_stocks and the credentials file.
"""
import robin_stocks
credentials = read_login_file()
robin_stocks.login(credentials['username'], credentials['password'])
def read_login_file():
"""
Parse the credentials file into username and password.
Returns
-------
dict
"""
with open('.robinhood_login', 'r') as login_file:
credentials = yaml.safe_load(login_file)
return credentials
def query_date_to_display(date):
"""
date: str
"""
yyyy, mm, dd = date.split('-')
return '-'.join((mm, dd, yyyy))
def display_date_to_query(date):
"""
date: str
"""
mm, dd, yyyy = date.split('-')
return '-'.join((yyyy, mm, dd))
def date_range(start_date, end_date):
"""
Parameters
----------
start_date: datetime.date
end_date: datetime.date
"""
for n in range(int((end_date - start_date).days)):
yield (start_date + timedelta(n)).strftime('%Y-%m-%d')
``` |
{
"source": "joehakimrahme/DungeonCrawler",
"score": 2
} |
#### File: DungeonCrawler/dungeoncrawler/battle.py
```python
import random
import re
from dungeoncrawler import utils
class BadComboInputError(Exception):
pass
class Battle(object):
def __init__(self, yourteam, enemyteam, intro="", outro="", logo=""):
self.yourteam = yourteam
self.enemyteam = enemyteam
self.intro = intro
self.status = []
self.turn = 1 # start counting from 1. Like normal people!
self.outro_win = outro
self.logo = logo
self.outro_loss = """
@@@@@@@@
@@@@@@@@@@@@
/@@@@@@@@@* @@@@@@@@@@@@@@@@@@
/@@@@@@@@@@@@@@@@@ @@@@@ @@@@@
@@@@@@@@@@@@@@@@@@@@ @@@@@ @@@@@
*@@@@@@@@@@@@@@@@@@@@@@ @@@@@ @@@@@
@@@@@@@@@@@@( #@@@@@@@ @@@@@ @@ @@ @@@@@
,@@@@@@@ @@@@@ @@ @@@@@@@@ @@@@@@@@ @@
@@& @@@@@ @@ @@@@@@@@ @@@@@@@@ @@
@@@@@ @@@@@ @@ @@ @@@@@
@@@@@ @@@@@@@ @@ @@@@@@@
@@@@@ @@@@@@@@@@@@ @@@@@@@@@@
#@@@@@@@ @@@@@@@ @@@@@@@
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@ @@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@ @@@@@@@@@@@@@@@@@@@@@@
@@@@@ @@@@@@@@@@@@@@@@@@@@@@
@@@@@ @@@@@@@@@@@@@@@@@@@@@@
@@@@@ @@@@@@@@@@@@@@@@@@@@@@
@@@@@ @@@@@@@@@@@@@@@@@@
@@@@@ @@@@@@@@@@@@@@@@@@
@@@@@ @@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@ &@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
@@@@@@@. @@ @@@@@ @@@@@@@@ @@@@@ @@ @@
&@@ @@@ @@@ @@ @@@ @@@
You dead. You lose. Try to do better next time.
"""
def outro(self, cond):
if cond:
return self.outro_win
return self.outro_loss
def rivals(self, hero):
if hero in self.yourteam:
return self.enemyteam
return self.yourteam
def allies(self, hero):
if hero in self.yourteam:
return self.yourteam
return self.enemyteam
def weighted_shuffle(self, combo):
s = []
while combo:
t = []
for h in combo:
for _ in range(int(h.SPD**2)):
t.append(h)
random.shuffle(t)
_next = t[0]
s.append(_next)
while _next in combo:
combo.remove(_next)
return s
def check_for_win(self):
if all(h.hp <= 0 for h in self.yourteam):
return False
if not self.enemyteam or all(h.hp <= 0 for h in self.enemyteam):
return True
def generate_choices(self, world):
choices = {}
for h in self.yourteam + self.enemyteam:
choices[h.name] = h.choice(world)
return choices
def display_choice(self, choices):
print("-" * 90)
print("%s %-24s %-22s %-19s %-s" % (
"#", "NAME", "HIT POINTS", "MANA POINTS", "CHOICE")
)
print()
for i, h in enumerate(self.yourteam):
if h.hp:
_c = choices[h.name]
if h.mp_ratio == 100:
_c = utils.color(str(_c), 'bold')
print(
"%d %-22s %s %s" %
(i + 1, utils.color(h.name, 'bold'),
h.bars(), _c)
)
print()
for m in self.enemyteam:
print(
"%-15s %9s %s" % (
m.name, "/".join((str(int(m.hp)), str(int(m.maxHP)))),
m.hp_bars()))
print("-" * 90)
print()
def generate_combo(self):
while True:
try:
cmd = input("[%s] Pick your combo> " % self.turn)
cmd = cmd.strip()
if cmd == "q":
break # need to do this!
if not re.match(r"^[0-9]{1,3}$", cmd):
print(
"Malformed input. Please input a proper combo string. "
"Examples: '124', '423', '21'.")
raise BadComboInputError
combo = []
for i in cmd:
idx = int(i)
if idx <= len(self.yourteam):
selected = self.yourteam[idx - 1]
if selected.hp <= 0:
print("Can't select hero %s" % selected)
raise BadComboInputError
combo.append(self.yourteam[idx - 1])
except BadComboInputError:
continue
combo += self.enemyteam
combo = self.weighted_shuffle(combo)
print()
return combo
def execute_step(self, combo, choices):
for h in combo:
if h.hp:
choices[h.name].effect(combo)
# execute end-of-step
for mob in self.enemyteam:
if mob.hp == 0:
self.enemyteam.remove(mob)
def battle_loop(self):
# Execute start-of-battle
while True:
win = self.check_for_win()
if win is not None:
break
choices = self.generate_choices(self)
self.display_choice(choices)
combo = self.generate_combo()
self.execute_step(combo, choices)
for h in self.yourteam:
h.mp += 20
# execute end-of-turn
self.turn += 1
print(self.outro(win))
input()
return win
def display_stats(heroes):
def print_bar(stat, ratio):
print("%3s: %s" % (
stat, utils.bars(size=20, color_fill="white", ratio=ratio * 100)))
_maxHP = max((h.maxHP for h in heroes))
_maxMP = max((h.maxMP for h in heroes))
_maxATK = max((h.ATK for h in heroes))
_maxMAG = max((h.MAG for h in heroes))
_maxDEF = max((h.DEF for h in heroes))
_maxSPR = max((h.SPR for h in heroes))
_maxSPD = max((h.SPD for h in heroes))
for h in heroes:
print()
print(h.name)
print_bar('HP', h.maxHP / _maxHP)
print_bar('MP', h.maxMP / _maxMP)
print_bar('ATK', h.ATK / _maxATK)
print_bar('MAG', h.MAG / _maxMAG)
print_bar('DEF', h.DEF / _maxDEF)
print_bar('SPR', h.SPR / _maxSPR)
print_bar('SPD', h.SPD / _maxSPD)
```
#### File: DungeonCrawler/dungeoncrawler/skills.py
```python
import abc
import random
from dungeoncrawler import utils
class Ability(abc.ABC):
mp_cost = 100
name = "Ability"
def __init__(self, world, caster):
self.world = world
self.caster = caster
def __repr__(self):
return self.name
def predicate(self):
return self.caster.mp == self.caster.maxMP
def effect(self, combo):
self.opening_words()
_targets = self.targets(combo)
if _targets:
self.caster.mp -= self.mp_cost
for _t in _targets:
_dmg, _old, _new = self.single_effect(_t)
self.log_skill(_dmg, _t, _old, _new)
self.closing_words()
def targets(self, combo):
raise NotImplementedError
def single_effect(self, target):
raise NotImplementedError
def log_skill(self, amount, target, old_value, new_value):
utils.slow_type(
"%s: [%s] on %s dealing %s dmg"
". (%d -> %d)\n" % (
utils.bold(utils.color_green(self.caster.name)),
utils.color_yellow(str(self)),
utils.bold(target.name),
utils.bold(str(amount)),
old_value, new_value))
def opening_words(self):
pass
def closing_words(self):
pass
def physical_damage(self, source, target, multiplier):
return int((source.ATK**2) / target.DEF * multiplier)
def healing_damage(self, source, multiplier):
return int(source.SPR / 1.3 * multiplier)
def hybrid_damage(self, source, target, multiplier):
_a = self.physical_damage(source, target, multiplier)
_b = self.magical_damage(source, target, multiplier)
return int((_a + _b) / 2)
def magical_damage(self, source, target, multiplier):
return int(source.MAG**2 / target.SPR * multiplier)
class ATTACK(Ability):
mp_cost = 0
name = "ATTACK"
def predicate(self):
return True
def targets(self, combo):
_targets = []
_rivals_set = set(self.world.rivals(self.caster))
_alive_in_combo_set = {h for h in combo if h.hp}
_combo_targets = _rivals_set & _alive_in_combo_set
if _combo_targets:
sorted_hp = sorted(_combo_targets, key=lambda x: x.hp)
if len(sorted_hp) > 1:
_targets.append(random.choice(sorted_hp[:2]))
if len(sorted_hp) == 1:
_targets.append(sorted_hp[0])
return _targets
def single_effect(self, target):
dmg = self.physical_damage(self.caster, target, 1)
_oldt = target.hp
target.hp -= dmg
mp_received = 0.3 * dmg
target.mp += mp_received if mp_received < 60 else 60
self.caster.mp += mp_received if mp_received < 30 else 30
return (dmg, _oldt, target.hp)
def log_skill(self, amount, target, old_value, new_value):
if self.caster in self.world.yourteam:
color = utils.color_green
else:
color = utils.color_red
description_low = [
"punches", "kicks", "throws a pebble at"
]
description_mid = [
"lands a blow on", "hits", "swings at"
]
description_high = [
"lands a powerful blow on", "injures", "maims"
]
_ratio = amount / old_value
if _ratio < 0.1:
description = description_low
elif 0.1 <= _ratio < 0.25:
description = description_mid
else:
description = description_high
utils.slow_type("%s: %s %s dealing %s dmg. (%d -> %d)\n" % (
utils.bold(color(self.caster.name)),
random.choice(description),
utils.bold(target.name), utils.bold(str(amount)),
old_value, new_value))
class WellIntentionedWish(Ability):
mp_cost = 70
name = "Wishful Intention"
def targets(self, combo):
_targets = []
for unit in combo:
if unit in self.world.yourteam and 0 < unit.hp_ratio < 100:
_targets.append(unit)
if not _targets:
for unit in self.world.yourteam:
if unit.hp_ratio < 100:
_targets.append(unit)
if not _targets:
_targets.append(self.caster)
return [min(_targets, key=lambda x: x.hp_ratio)]
def single_effect(self, target):
dmg = self.healing_damage(self.caster, 3)
_oldt = target.hp
target.hp += dmg
return (dmg, _oldt, target.hp)
def log_skill(self, amount, target, old_value, new_value):
utils.slow_type("%s: [%s] on %s for %s HP. (%d -> %d)\n" % (
utils.bold(utils.color_green(self.caster.name)),
utils.color_yellow(str(self)),
utils.bold(target.name),
utils.bold(str(amount)), old_value, new_value))
class WellIntentionedWish2(WellIntentionedWish):
mp_cost = 100
def targets(self, combo):
_target = []
for unit in combo:
if unit in self.world.yourteam and 0 < unit.hp_ratio:
_target.append(unit)
return _target
class ThousandFists(Ability):
mp_cost = 120
name = "A Thousand Fists"
def targets(self, combo):
_targets = (h for h in self.world.enemyteam if h.hp)
if _targets:
sorted_hp_def = min(_targets,
key=lambda x: x.hp_ratio / (5 * x.DEF))
return [sorted_hp_def]
def single_effect(self, target):
dmg = self.physical_damage(self.caster, target, 3)
_oldt = target.hp
target.hp -= dmg
return (dmg, _oldt, target.hp)
class SilentPrayer(Ability):
mp_cost = 70
name = "<NAME>"
def targets(self, combo):
_targets = []
for hero in self.world.yourteam:
if hero.hp and hero in combo and hero.mp_ratio < 100:
_targets.append(hero)
if not _targets:
_targets.append(self.caster)
return [min(_targets, key=lambda x: x.mp_ratio)]
def single_effect(self, target):
_oldt = target.mp
target.mp += target.maxMP
return (target.mp - _oldt, _oldt, target.mp)
def log_skill(self, amount, target, old_value, new_value):
utils.slow_type(
"%s: [%s] on %s restoring full MP.\n" % (
utils.bold(utils.color_green(self.caster.name)),
utils.color_yellow(str(self)), utils.bold(target.name)))
class NovaBlast(Ability):
mp_cost = 120
name = "<NAME>"
def opening_words(self):
print(r"""
/$$$$$$ /$$ /$$ /$$$$$$$ /$$$$$$$$ /$$$$$$$ /$$ /$$ /$$$$$$ /$$ /$$ /$$$$$$
/$$__ $$| $$ | $$| $$__ $$| $$_____/| $$__ $$| $$$ | $$ /$$__ $$| $$ | $$ /$$__ $$
| $$ \__/| $$ | $$| $$ \ $$| $$ | $$ \ $$| $$$$| $$| $$ \ $$| $$ | $$| $$ \ $$
| $$$$$$ | $$ | $$| $$$$$$$/| $$$$$ | $$$$$$$/| $$ $$ $$| $$ | $$| $$ / $$/| $$$$$$$$
\____ $$| $$ | $$| $$____/ | $$__/ | $$__ $$| $$ $$$$| $$ | $$ \ $$ $$/ | $$__ $$
/$$ \ $$| $$ | $$| $$ | $$ | $$ \ $$| $$\ $$$| $$ | $$ \ $$$/ | $$ | $$
| $$$$$$/| $$$$$$/| $$ | $$$$$$$$| $$ | $$| $$ \ $$| $$$$$$/ \ $/ | $$ | $$
\______/ \______/ |__/ |________/|__/ |__/|__/ \__/ \______/ \_/ |__/ |__/
""") # noqa: E501
def targets(self, combo):
return self.world.enemyteam
def single_effect(self, target):
dmg = self.magical_damage(self.caster, target, 3.5)
_oldt = target.hp
target.hp -= dmg
return (dmg, _oldt, target.hp)
class Focus(Ability):
mp_cost = 60
name = "Sharp Focus"
def targets(self, combo):
return [self.caster]
def single_effect(self, target):
target.MAG *= 1.5
target.SPD *= 1.5
return ('', '', '')
def log_skill(self, amount, target, old_value, new_value):
utils.slow_type("%s: [%s] on self increasing MAG/SPD.\n" % (
utils.bold(utils.color_green(self.caster.name)),
utils.color_yellow(str(self))))
class BurstingQi(Ability):
mp_cost = 80
name = "Burning Qi"
def targets(self, combo):
return [self.caster]
def single_effect(self, target):
target.ATK *= 1.2
target.DEF *= 1.2
return ('', '', '')
def log_skill(self, amount, target, old_value, new_value):
utils.slow_type(
"%s: [%s] on self increasing ATK/DEF.\n" % (
utils.bold(utils.color_green(self.caster.name)),
utils.color_yellow(str(self))))
class CuriousBox(Ability):
mp_cost = 10
name = "Curious Box"
def targets(self, combo):
_targets = []
while True:
_enemies = [m for m in self.world.enemyteam if m.hp]
if not _enemies:
break
_targets.append(random.choice(_enemies))
if any((m.hp for m in self.world.enemyteam)):
if random.random() > 0.6:
break
return _targets
def single_effect(self, target):
_mult = int((random.random() * random.random() * 3) + 1.3)
dmg = self.hybrid_damage(self.caster, target, _mult)
_oldt = target.hp
target.hp -= dmg
return (dmg, _oldt, target.hp)
class BootyTrap(Ability):
mp_cost = 100
name = "Booty Trap"
def targets(self, combo):
eligible = [m for m in self.world.enemyteam if m.hp]
if eligible:
return [max(eligible, key=lambda x: x.DEF)]
def single_effect(self, target):
dmg = self.hybrid_damage(self.caster, target, 1.5)
_oldt = target.hp
target.hp -= dmg
target.DEF *= 0.7
target.SPR *= 0.7
return (dmg, _oldt, target.hp)
def log_skill(self, amount, target, old_value, new_value):
utils.slow_type(
"%s: [%s] on %s dealing %s dmg. (%d -> %d)\n" % (
utils.bold(utils.color_green(self.caster.name)),
utils.color_yellow(str(self)),
utils.bold(target.name), utils.bold(str(amount)),
old_value, target.hp))
utils.slow_type(
"%s: [%s] on %s decreasing DEF/SPR.\n" % (
utils.bold(utils.color_green(self.caster.name)),
utils.color_yellow(str(self)), target.name))
class ChivalrousProtection(Ability):
mp_cost = 100
name = "Chivalrous Protection"
def targets(self, combo):
return [m for m in self.world.yourteam if m.hp]
def single_effect(self, target):
target.DEF *= 1.8
target.SPR *= 1.8
return ('', '', '')
def log_skill(self, amount, target, old_value, new_value):
utils.slow_type(
"%s: [%s] on %s increasing DEF/SPR.\n" % (
utils.bold(utils.color_green(self.caster.name)),
utils.color_yellow(str(self)), target.name))
class RighteousInspiration(Ability):
mp_cost = 100
name = "Righteous Inspiration"
def targets(self, combo):
return [h for h in self.world.yourteam if (h.mp * h.hp)]
def single_effect(self, target):
target.mp *= 3
return ('', '', '')
def log_skill(self, amount, target, old_value, new_value):
utils.slow_type(
"%s: [%s] on %s restoring MP.\n" % (
utils.bold(utils.color_green(self.caster.name)),
utils.color_yellow(str(self)),
utils.bold(target.name)))
class BubblyPickMeUp(Ability):
name = "Bubbly Pick-me-up"
def predicate(self):
return any((mob.hp_ratio < 60 for mob in self.world.enemyteam))
def targets(self, combo):
return [m for m in self.world.enemyteam if m.hp]
def single_effect(self, target):
dmg = self.healing_damage(self.caster, 1)
_oldt = target.hp
target.hp += dmg
return (dmg, _oldt, target.hp)
def log_skill(self, amount, target, old_value, new_value):
utils.slow_type(
"%s: [%s] on %s healing for %s. (%d -> %d)\n" % (
utils.bold(utils.color_red(self.caster.name)),
utils.color_yellow(str(self)),
utils.bold(target.name),
utils.bold(str(amount)), old_value, new_value))
class TemporaryInsanity(Ability):
name = "Temporary Insanity"
def predicate(self):
return True
def targets(self, combo):
return self.world.enemyteam
def single_effect(self, target):
target.ATK *= 1.35
return ('', '', '')
def log_skill(self, amount, target, old_value, new_value):
utils.slow_type("%s: [%s] on %s increases ATK.\n" % (
utils.bold(utils.color_red(self.caster.name)),
utils.color_yellow(str(self)),
utils.bold(target.name)))
class AngryOwner(Ability):
name = "<NAME>"
def predicate(self):
if len(self.world.enemyteam) == 1:
return True
def opening_words(self):
utils.slow_type("The Owner's here, and he's not happy!!!\n")
def closing_words(self):
self.caster.ATK *= 1.35
utils.slow_type(
"%s: [%s] on himself increasing ATK.\n" % (
utils.bold(utils.color_red(self.caster.name)),
utils.color_yellow(str(self))))
def targets(self, combo):
return (h for h in self.world.yourteam if h.hp)
def single_effect(self, target):
dmg = self.hybrid_damage(self.caster, target, 3.5)
_oldt = target.hp
target.hp -= dmg
return (dmg, _oldt, target.hp)
def log_skill(self, amount, target, old_value, new_value):
utils.slow_type(
"%s: [%s] on %s dealing %s dmg"
". (%d -> %d)\n" % (
utils.bold(utils.color_red(self.caster.name)),
utils.color_yellow(str(self)),
utils.bold(target.name),
utils.bold(str(amount)),
old_value, new_value))
class LieDownAndBleed(Ability):
name = "Lie Down and Bleed"
def predicate(self):
return random.random() <= 0.1
def targets(self, combo):
_t = []
for h in combo:
if h in self.world.yourteam and h.hp:
_t.append(h)
if not _t:
for h in self.world.yourteam:
if h.hp:
_t.append(h)
return [max(_t, key=lambda x: x.hp_ratio)]
def single_effect(self, target):
dmg = self.physical_damage(self.caster, target, 3.5)
_oldt = target.hp
target.hp -= dmg
return (dmg, _oldt, target.hp)
class ClashingAndSlashing(Ability):
name = "Clashing and Slashing"
def predicate(self):
return True
def targets(self, combo):
_t = []
for h in combo:
if h in self.world.yourteam and h.hp:
_t.append(h)
return _t
def single_effect(self, target):
return ATTACK(self.world, self.caster).single_effect(target)
class FullOfZeal(Ability):
name = "Full of Zeal"
def predicate(self):
return random.random() <= 0.3
def targets(self, combo):
_t = []
for h in combo:
if h in self.world.yourteam and h.hp:
_t.append(h)
if not _t:
_t = self.world.yourteam
return [random.choice(_t)]
def single_effect(self, target):
dmg = self.physical_damage(self.caster, target, 0.7)
_oldt = target.hp
target.hp -= dmg
self.caster.SPD *= 1.3
return (dmg, _oldt, target.hp)
def log_skill(self, amount, target, old_value, new_value):
utils.slow_type(
"%s: [%s] on %s dealing %s dmg. (%d -> %d)\n" % (
utils.bold(utils.color_red(self.caster.name)),
utils.color_yellow(str(self)),
utils.bold(target.name),
utils.bold(str(amount)),
old_value, new_value))
utils.slow_type(
"%s: [%s] increases self SPD.\n" % (
utils.bold(utils.color_red(self.caster.name)),
utils.color_yellow(str(self))))
class BoastfulNoMore(Ability):
name = "Boastful No More"
def predicate(self):
return any(40 < h.mp_ratio < 70 for h in self.world.yourteam)
def targets(self, combo):
_t = list(reversed(sorted(
(h for h in self.world.yourteam if h.mp_ratio != 100),
key=lambda x: x.mp_ratio)))
if _t:
return [_t[0]]
else:
return random.choice(
[h for h in combo if h in self.world.yourteam and h.hp])
def single_effect(self, target):
target.mp = 0
return ('', '', '')
def log_skill(self, amount, target, old_value, new_value):
utils.slow_type(
"%s: [%s] absorbed mp of %s\n" % (
utils.bold(utils.color_red(self.caster.name)),
utils.color_yellow(str(self)),
utils.bold(target.name)))
``` |
{
"source": "joehakimrahme/thawra",
"score": 2
} |
#### File: thawra/tests/test_actions.py
```python
import unittest
from thawra import hero
class ActionTest(unittest.TestCase):
def setUp(self):
self.h1 = hero.Hero("H1", "", (8, 8, 3), "fire", None)
self.h2 = hero.Hero("H2", "", (7, 2, 10), "fire", None)
self.h3 = hero.Hero("H3", "", (4, 5, 10), "fire", None)
def test_attack(self):
self.h1.actions['ATK']([self.h2])("")
self.assertNotEqual(self.h2.hp, self.h2.maxHP)
self.assertEqual(self.h1.mp, self.h1.maxMP)
def test_magic(self):
self.h1.actions['MAG']([self.h2])("")
self.assertNotEqual(self.h2.hp, self.h2.maxHP)
self.assertNotEqual(self.h1.mp, self.h1.maxMP)
def test_attack_twice(self):
self.h1.actions['ATK']([self.h2, self.h2])("")
self.assertNotEqual(self.h2.hp, self.h2.maxHP)
self.assertEqual(self.h1.mp, self.h1.maxMP)
def test_attack_multiple(self):
self.h1.actions['ATK']([self.h2, self.h3])("")
self.assertNotEqual(self.h2.hp, self.h2.maxHP)
self.assertNotEqual(self.h3.hp, self.h3.maxHP)
self.assertEqual(self.h1.mp, self.h1.maxMP)
def test_magic_twice(self):
self.h1.actions['MAG']([self.h2, self.h2])("")
self.assertNotEqual(self.h2.hp, self.h2.maxHP)
self.assertNotEqual(self.h1.mp, self.h1.maxMP)
def test_magic_multiple(self):
self.h1.actions['MAG']([self.h2, self.h3])("")
self.assertNotEqual(self.h2.hp, self.h2.maxHP)
self.assertNotEqual(self.h3.hp, self.h3.maxHP)
self.assertNotEqual(self.h1.mp, self.h1.maxMP)
if __name__ == "__main__":
unittest.main()
```
#### File: thawra/tests/test_hero.py
```python
import unittest
from thawra import hero
class HeroTest(unittest.TestCase):
def setUp(self):
self.hero = hero.Hero(name="",
skillmap="",
attributes=[8, 8, 3],
element="fire",
macros=hero.randattack)
def test_attributes(self):
self.assertEqual(self.hero.strength, 8)
self.assertEqual(self.hero.intelligence, 8)
self.assertEqual(self.hero.agility, 3)
def test_level(self):
self.assertEqual(self.hero.level, 1)
def test_hero_maxHP(self):
return self.assertEqual(self.hero.hp, self.hero.intelligence * 100)
def test_hero_maxMP(self):
return self.assertEqual(self.hero.mp, self.hero.intelligence * 100)
def test_hero_stats(self):
return self.assertEqual(self.hero.stats, {
'ATK': self.hero.strength * 10,
'DEF': self.hero.strength * 2,
'MAG': self.hero.intelligence * 7,
'MDE': self.hero.intelligence * 2,
'SPD': self.hero.agility * 30})
def test_hero_hp(self):
self.assertEqual(self.hero.hp, self.hero.maxHP)
self.hero.hp -= self.hero.maxHP + 1
self.assertEqual(self.hero.hp, 0)
self.hero.hp += self.hero.maxHP * 2
self.assertEqual(self.hero.hp, self.hero.maxHP)
def test_invalid_attributes(self):
self.assertRaises(hero.InvalidHero, hero.Hero,
"", "", [10], "", None)
def test_choice(self):
"""This test should be renamed test_randattack gambit.
Or something.
"""
choice, target = self.hero.choice([self.hero], [self.hero])
self.assertEqual(choice, "ATK")
self.assertEqual(target, [self.hero])
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "joehalloran/python-quiz",
"score": 3
} |
#### File: joehalloran/python-quiz/gamedata.py
```python
import filehandler
def saveScore(user, score):
filehandler.saveScore(user, score)
def reportScore(user, score):
print()
print("Well done " + user + " your final score was...")
print(score)
showFinalMenu(user)
def showFinalMenu(user):
print()
print("Press 1 to show your high scores")
print("Press 2 to quit")
userSelection = input("> ")
if userSelection == '1':
highScores = getHighScores(user)
print()
print("Your highscores:")
for score in highScores:
print(score)
showFinalMenu(user)
else:
print("Thanks for playing. Goodbye")
exit()
def getHighScores(user):
return filehandler.loadHighScores(user)
``` |
{
"source": "joehalloran/shoppinglist_project",
"score": 2
} |
#### File: shoppinglist/lists/views.py
```python
from django.views.generic import ListView, DetailView
from django.views.generic.edit import CreateView, DeleteView
from django.contrib.messages.views import SuccessMessageMixin
from django.contrib import messages
from django.shortcuts import render
from django.forms import inlineformset_factory, modelformset_factory, TextInput, HiddenInput
from django.http import HttpResponseRedirect
from django.urls import reverse, reverse_lazy
from django.utils.decorators import method_decorator
from django.core.exceptions import PermissionDenied
from django.db import transaction
from oauth2client.contrib.django_util import decorators
from oauth2client import client
from .models import List, Item
from .forms import ListCreateForm
class MyListView(ListView):
"""
The /mylists/ root. Lists lists I own.
"""
model = List
template_name = 'lists/mylists.html'
context_object_name = 'mylists'
@method_decorator(decorators.oauth_required())
def dispatch(self,request,*args,**kwargs):
"""
Add oauth decorator to bind google user data to request object
"""
return super(MyListView,self).dispatch(request,*args,**kwargs)
def get_queryset(self):
"""
Only list Lists I own.
"""
return List.objects.filter(owner = self.request.oauth.credentials.id_token['email'])
class ListCreate(SuccessMessageMixin, CreateView):
"""
Form to create a new list
"""
template_name = 'lists/create_form.html'
form_class = ListCreateForm
success_message = "List Created. Now you can add some items."
@method_decorator(decorators.oauth_required())
def dispatch(self,request,*args,**kwargs):
"""
Add oauth decorator to bind google user data to request object
"""
return super(ListCreate,self).dispatch(request,*args,**kwargs)
def form_valid(self, form):
"""
Manually add google user as owner
"""
form.instance.owner = self.request.oauth.credentials.id_token['email']
return super(ListCreate, self).form_valid(form)
class ListDelete(DeleteView):
"""
Form to delete a list.
"""
model = List
success_url = reverse_lazy('lists:mylists')
success_message = "Your list deleted successfully."
@method_decorator(decorators.oauth_required())
def dispatch(self,request,*args,**kwargs):
"""
Add oauth decorator to bind google user data to request object and check if user owns object before delete.
"""
current_user = request.oauth.credentials.id_token['email']
parentList = self.get_object()
if current_user != parentList.owner and not request.user.is_superuser:
raise PermissionDenied
return super(ListDelete,self).dispatch(request,*args,**kwargs)
def get_context_data(self, **kwargs):
"""
Add in a QuerySet of of the items belonging to the list
"""
context = super(ListDelete, self).get_context_data(**kwargs)
context['items'] = Item.objects.filter( parentList = self.get_object() )
return context
def delete(self, request, *args, **kwargs):
"""
Work around as SuccessMessageMixin does not work with DeleteView
"""
messages.success(self.request, self.success_message)
return super(ListDelete, self).delete(request, *args, **kwargs)
@decorators.oauth_required
def editListView(request, pk):
"""
Create / edit form to edit all items in list and list title.
"""
# Get current user and list object to check ownership
current_user = request.oauth.credentials.id_token['email']
parentList = List.objects.get(pk=pk)
if current_user != parentList.owner and not request.user.is_superuser:
raise PermissionDenied
# Generate formset for the parent list, so we can edit the list name
ListFormSet = modelformset_factory(
List,
fields=("name",),
extra=0,
widgets={
'name': TextInput(attrs={'class': 'form-control form-title form-inactive', 'required': True}),
}
)
# If no items exist create an extra field to add new list item.
itemCount = Item.objects.filter(parentList = parentList).count()
if itemCount > 0:
extraItemField = 0
else:
extraItemField = 1
# Generate the formset of list member items, so we can edit the shopping list
ItemInlineFormSet = inlineformset_factory(
List,
Item,
fields=("name", "parentList"),
extra=extraItemField,
widgets={
'name': TextInput(attrs={'class': 'form-control form-inactive', 'required': True}),
}
)
# if POST generate form data and save if valid
if request.method == 'POST':
item_formset = ItemInlineFormSet( request.POST, instance=parentList )
list_formset = ListFormSet( request.POST, queryset= List.objects.filter(pk=pk) )
if item_formset.is_valid() and list_formset.is_valid():
# Transactions - only save list and items together
with transaction.atomic():
item_formset.save()
list_formset.save()
# Redirect to list page.
messages.add_message(request, messages.SUCCESS, 'List changes saved.')
return HttpResponseRedirect(reverse('lists:edit', kwargs={'pk':pk}))
else:
# Add errors and redirect to form.
for errors in list_formset.errors:
messages.add_message(request, messages.INFO, errors, extra_tags='danger')
for errors in item_formset.errors:
messages.add_message(request, messages.INFO, errors, extra_tags='danger')
return HttpResponseRedirect(reverse('lists:edit', kwargs={'pk':pk}))
# if a GET (or any other method) create a blank form
else:
# Limit list items to only include parent list members (not from any old list)
item_formset = ItemInlineFormSet( instance=parentList )
# Limit list item to one. We only want to edit the title of the current list
list_formset = ListFormSet( queryset= List.objects.filter(pk=pk) )
return render(request, 'lists/item_form.html', {
'item_formset': item_formset,
'list_formset': list_formset,
})
``` |
{
"source": "joehand/joeahand",
"score": 2
} |
#### File: joehand/joeahand/pelicanconf.py
```python
import time
from urllib.parse import urlparse
VERSION = 0.2
############################
##### Pelican Settings #####
############################
# General Pelican settings. You can problably change these without breaking theme.
# Will be set in publish config.
SITEURL = ''
# Uncomment following line if you want document-relative URLs when developing
RELATIVE_URLS = True
PATH = 'content'
TIMEZONE = 'US/Mountain'
DEFAULT_LANG = u'en'
DEFAULT_DATE_FORMAT = '%Y-%B-%d'
DIRECT_TEMPLATES = ('index', 'archives', 'sitemap')
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
ARTICLE_URL = 'archive/{slug}/'
ARTICLE_SAVE_AS = 'archive/{slug}/index.html'
PAGE_URL = '{slug}/'
PAGE_SAVE_AS = '{slug}/index.html'
ARCHIVES_URL = 'archive/'
ARCHIVES_SAVE_AS = 'archive/index.html'
SITEMAP_SAVE_AS = 'sitemap.xml'
TAG_URL = ''
TAG_SAVE_AS = ''
CATEGORY_URL = ''
CATEGORY_SAVE_AS = ''
# Nicer pagination
# (e.g. site.com/blog/1/ instead of site.com/blog/1.html)
DEFAULT_PAGINATION = 5
PAGINATION_PATTERNS = (
(1, '{base_name}/', '{base_name}/index.html'),
(2, '{base_name}/{number}/', '{base_name}/{number}/index.html'),
)
#################################
##### Theme/Custom Settings #####
#################################
# Theme specific settings. These are important and will break theme if missing
THEME = 'themes/joe/'
# These pages will show in nav pages.
# Need to put files in content/pages/
NAV_PAGES = ['about', 'cv']
# We have a special homepage. Blog will go to site.com/blog
INDEX_SAVE_AS = 'blog/index.html'
# These things are used in various places.
AUTHOR = u'<NAME>'
AUTHOR_LINKS = {
'INSTAGRAM': 'http://instagram.com/joeahand',
'GITHUB': 'https://github.com/joehand',
'TWITTER': 'http://twitter.com/joeahand/',
# use html entities to obfuscate for spammers
# (http://stackoverflow.com/questions/748780/best-way-to-obfuscate-an-e-mail-address-on-a-website)
'EMAIL': 'joe@joeahand.com'
}
SITENAME = u'<NAME>'
SITESUBTITLE = u'Better communities with open data'
SITEDESCRIPTION = u'<NAME> is a product lead, developer, researcher, and open data enthusiast. Joe works with international communities, open source developers, scientists, and data visualization & mapping experts to enable communities to solve problems.'
PLUGIN_PATHS = ["plugins", 'plugins/pelican-plugins']
PLUGINS = [
'assets',
'pelican_gdocs'
]
# GDOCS PLUGIN Settings
GDOCS = [
{
'name': 'instagram',
'url': 'http://docs.google.com/spreadsheets/d/16KHyJyTGvOIFKTR5uUHrXKWH3kf-UiucCwXfceFet0k/pub?gid=0&single=true&output=csv'
},
{
'name': 'articles',
'url': 'http://docs.google.com/spreadsheets/d/1Wav1nDxtOTRm3WMLL3RI0oqApxLjBxzTcPftWsCn6x4/pub?gid=0&single=true&output=csv'
},
{
'name': 'fitbit_activity',
'url': 'http://docs.google.com/spreadsheets/d/1AZRyvrcm-Stk0VlWoPEHD4sxe1PTOdEpU2MejRzHB7s/pub?gid=0&single=true&output=csv'
},
{
'name': 'tweets',
'url': 'http://docs.google.com/spreadsheets/d/1qRuICBJWHQQ34ujTXkY8jh7obJuVJ_quLbwMrBiQFyg/pub?gid=0&single=true&output=csv'
},
{
'name': 'steps',
'url': 'http://docs.google.com/spreadsheets/d/1AZRyvrcm-Stk0VlWoPEHD4sxe1PTOdEpU2MejRzHB7s/pub?gid=0&single=true&output=csv'
},
{
'name': 'coffee',
'url': 'http://docs.google.com/spreadsheets/d/1fsaSy8HJdoTr5iUX7p-iCxUwC-TFzZxnqNzt6mMP26s/pub?gid=0&single=true&output=csv'
},
]
COPYRIGHT_LINK = 'http://creativecommons.org/licenses/by-nc-nd/4.0/'
home_bundle = [
"external/pure/base.css",
"external/pure/menus-core.css",
"external/pure/menus-horizontal.css",
"external/pure/menus-skin.css",
"external/pure/tables.css",
"external/pure/grids.css",
"external/pure/grids-responsive.css",
'critical_home.scss'
]
blog_bundle = [
"external/pure/base.css",
"external/pure/menus-core.css",
"external/pure/menus-horizontal.css",
"external/pure/menus-skin.css",
"external/pure/tables.css",
"external/pure/grids.css",
"external/pure/grids-responsive.css",
'critical_blog.scss'
]
page_bundle = [
"external/pure/base.css",
"external/pure/menus-core.css",
"external/pure/menus-horizontal.css",
"external/pure/menus-skin.css",
"external/pure/tables.css",
"external/pure/grids.css",
"external/pure/grids-responsive.css",
'critical_page.scss'
]
ASSET_BUNDLES = (
('home', home_bundle,
{'filters':"pyscss,cssmin",
'output':"../../themes/joe/templates/home.min.css"}),
('blog',blog_bundle ,
{'filters':"pyscss,cssmin",
'output':"../../themes/joe/templates/blog.min.css"}),
('page',page_bundle,
{'filters':"pyscss,cssmin",
'output':"../../themes/joe/templates/page.min.css"}),
)
###############################
##### Nice Things to Have #####
###############################
# These can be used anywhere in templates
LAST_UPDATE = str(time.strftime('%m %Y'))
YEAR = str(time.strftime('%Y'))
def get_domain(url):
''' Return the domain (and subdomain!) for a url
'''
parsed_uri = urlparse(url)
return '{uri.netloc}'.format(uri=parsed_uri).replace('www.', '')
JINJA_FILTERS = {
'get_domain': get_domain,
}
``` |
{
"source": "joehand/statistics-neighborhoods",
"score": 3
} |
#### File: statistics-neighborhoods/statsneighborhoods/price.py
```python
import numpy as np
from pandas import concat, DataFrame, Series
from scipy import stats
LOW_BINS = [0, 10000, 15000, 20000, 25000, 30000, 35000, 40000, 45000,
50000, 60000, 75000, 100000, 125000, 150000, 200000]
MID_BINS = [5000, 12500, 17500, 22500, 27500, 32500, 37500, 42500, 47500,
55000, 67500, 87500, 112500, 137500, 175000, 225000]
def adjust_rich_bin(df, col_suffix, inc_bin=200000):
col = 'ACSHINC200'
df['new_mid_pt' + col_suffix] = (df['ACSAVGHINC'] * df['Total_Households'] -
df['mean_inc' + col_suffix] * df['Total_Households'] + df[col] * inc_bin)/df[col]
df['new_mid_pt' + col_suffix] = df['new_mid_pt' +
col_suffix].replace([np.inf, -np.inf], inc_bin)
df[col + col_suffix] = df[col] * df['new_mid_pt' + col_suffix]
df['adjusted' + col_suffix] = df.filter(
regex='^ACSHINC([0-9])+' + col_suffix).sum(axis=1)//df['Total_Households']
df['adjusted' + col_suffix] = df['adjusted' + col_suffix].astype(int)
return df
def calculate_mean_income(df, inc_bins, col_suffix):
df = df.copy()
cols = df.filter(regex='^ACSHINC([0-9])+$').columns
for i, col in enumerate(cols):
df[col + col_suffix] = df[col] * inc_bins[i]
df['total_inc' +
col_suffix] = df.filter(regex='^ACSHINC([0-9])+' + col_suffix).sum(axis=1)
df['mean_inc' + col_suffix] = df['total_inc' +
col_suffix]//df['Total_Households']
df = adjust_rich_bin(df, col_suffix, inc_bin=inc_bins[i])
return df
def calculate_a(df, reported_mean='ACSAVGHINC', calc_mean_low='mean_inc_low',
calc_mean_mid='mean_inc_mid'):
df = df.copy()
df['a'] = (df[reported_mean] - df[calc_mean_low]) / \
(df[calc_mean_mid] - df[calc_mean_low])
return df
def calculate_price(df):
df = calculate_mean_income(df, LOW_BINS, '_low')
df = calculate_mean_income(df, MID_BINS, '_mid')
df = calculate_a(df, calc_mean_low='adjusted_low')
return df
``` |
{
"source": "joehandzik/dlcookbook-dlbs",
"score": 2
} |
#### File: caffe2_benchmarks/models/googlenet.py
```python
from __future__ import absolute_import
from caffe2.python import brew
from caffe2_benchmarks.models.model import Model
# https://github.com/dmlc/mxnet/blob/master/example/image-classification/symbols/googlenet.py
# http://ethereon.github.io/netscope/#/gist/4325909f3683e51eaf93fdaeed6b2a9b
def conv_factory(model, v, num_in_channels, num_filter, kernel, stride=1, pad=0, name=None, suffix=''):
v = brew.conv(model, v, 'conv_%s%s' %(name, suffix), num_in_channels, num_filter, kernel=kernel, pad=pad, stride=stride)
v = brew.relu(model, v, 'relu_%s%s' %(name, suffix))
return v
def inception_factory(model, v, num_in_channels, num_1x1, num_3x3red, num_3x3, num_d5x5red, num_d5x5, proj, name):
# 1x1
c1x1 = conv_factory(model, v, num_in_channels, num_filter=num_1x1, kernel=1, name=('%s_1x1' % name))
# 3x3 reduce + 3x3
c3x3r = conv_factory(model, v, num_in_channels, num_filter=num_3x3red, kernel=1, name=('%s_3x3' % name), suffix='_reduce')
c3x3 = conv_factory(model, c3x3r, num_3x3red, num_filter=num_3x3, kernel=3, pad=1, name=('%s_3x3' % name))
# double 3x3 reduce + double 3x3
cd5x5r = conv_factory(model, v, num_in_channels, num_filter=num_d5x5red, kernel=1, name=('%s_5x5' % name), suffix='_reduce')
cd5x5 = conv_factory(model, cd5x5r, num_d5x5red, num_filter=num_d5x5, kernel=5, pad=2, name=('%s_5x5' % name))
# pool + proj
pooling = brew.max_pool(model, v, 'max_pool_%s_pool' % name, kernel=3, stride=1, pad=1)
cproj = conv_factory(model, pooling, num_in_channels, num_filter=proj, kernel=1, name=('%s_proj' % name))
# concat and return
return brew.concat(model, [c1x1, c3x3, cd5x5, cproj], 'ch_concat_%s_chconcat' % name)
class GoogleNet(Model):
"""A GoogleNet implementation"""
implements = 'googlenet'
def __init__(self, params):
""" Naming and topology according to: http://ethereon.github.io/netscope/#/gist/f2e4825a8d4f8a3609cefd7ffadc910a
Based on: https://github.com/dmlc/mxnet/blob/master/example/image-classification/symbols/alexnet.py
"""
Model.check_parameters(
params,
{'name': 'GoogleNet', 'input_shape':(3, 224, 224),
'num_classes': 1000, 'arg_scope': {'order': 'NCHW'}})
Model.__init__(self, params)
def forward_pass_builder(self, model, loss_scale=1.0):
"""
This function adds the operators, layers to the network. It should return
a list of loss-blobs that are used for computing the loss gradient. This
function is also passed an internally calculated loss_scale parameter that
is used to scale your loss to normalize for the number of GPUs.
Signature: function(model, loss_scale)
"""
v = 'data'
v = conv_factory(model, v, self.input_shape[0], 64, kernel=7, stride=2, pad=3, name="conv1/7x7_s2")
v = brew.max_pool(model, v, 'pool1/3x3_s2', kernel=3, stride=2)
v = brew.lrn(model, v, 'pool1/norm1', size=5, alpha=0.0001, beta=0.75)
v = conv_factory(model, v, 64, 64, kernel=1, stride=1, name="conv2/3x3_reduce")
v = conv_factory(model, v, 64, 192, kernel=3, stride=1, pad=1, name="conv2/3x3")
v = brew.lrn(model, v, 'conv2/norm2', size=5, alpha=0.0001, beta=0.75)
v = brew.max_pool(model, v, 'pool2/3x3_s2', kernel=3, stride=2)
v = inception_factory(model, v, 192, 64, 96, 128, 16, 32, 32, name="inception_3a")
v = inception_factory(model, v, 256, 128, 128, 192, 32, 96, 64, name="inception_3b")
v = brew.max_pool(model, v, 'pool3/3x3_s2', kernel=3, stride=2)
v = inception_factory(model, v, 480, 192, 96, 208, 16, 48, 64, name="inception_4a")
v = inception_factory(model, v, 512, 160, 112, 224, 24, 64, 64, name="inception_4b")
v = inception_factory(model, v, 512, 128, 128, 256, 24, 64, 64, name="inception_4c")
v = inception_factory(model, v, 512, 112, 144, 288, 32, 64, 64, name="inception_4d")
v = inception_factory(model, v, 528, 256, 160, 320, 32, 128, 128, name="inception_4e")
v = brew.max_pool(model, v, 'pool4/3x3_s2', kernel=3, stride=2, pad=1)
v = inception_factory(model, v, 832, 256, 160, 320, 32, 128, 128, name="inception_5a")
v = inception_factory(model, v, 832, 384, 192, 384, 48, 128, 128, name="inception_5b")
v = brew.average_pool(model, v, 'pool5/7x7_s1', kernel=7, stride=1)
v = brew.dropout(model, v, 'pool5/drop_7x7_s1', ratio=0.5, is_test=(self.phase == 'inference'))
return self.add_head_nodes(model, v, 1024, 'classifier', loss_scale=loss_scale)
```
#### File: data/imagenet/imagenet_tools.py
```python
import os
import sys
import gzip
import json
import random
from dlbs.utils import IOUtils
class ImageNetTools(object):
"""Various framework-independent tools to process ImageNet and prepare files
requried to generate benchmark datasets.
"""
@staticmethod
def get_labels():
"""Load labels from 'imagenet_labels.json.gz' that's located in the same directory
as this file.
:rtype: dict
:return: Dictionary that maps ImageNet class folder ID to an object that
contains two fields - 'label' and 'human_labels'. The 'label' is an
integer index of a label from [0,1000) according to this list:
http://data.csail.mit.edu/soundnet/categories/categories_imagenet.txt
"""
labels_file = os.path.join(os.path.dirname(__file__), 'imagenet_labels.json.gz')
with gzip.open(labels_file, 'rb') as file_obj:
labels = json.load(file_obj)
return labels
@staticmethod
def get_image_files(folder, shuffle=True, num_files=-1):
""" Get *.JPEG files in folder. Shuffle files and return at most num_files
files.
"""
# Scan the folder recursively and find files.
files = IOUtils.find_files(folder, '*.JPEG', recursively=True)
# Shuffle files and return first 'num_files' files.
if shuffle:
random.shuffle(files)
if num_files > 0 and num_files < len(files):
files = files[0:num_files]
return files
@staticmethod
def get_file_info(img_file, labels):
"""Return meta information about image in 'img_file' file. """
fdir, fname = os.path.split(img_file)
synset = os.path.basename(fdir)
if synset not in labels:
raise ValueError("Invalid synset '%s: not found in labels dict." % synset)
return synset, fname, labels[synset]
@staticmethod
def build_caffe_labels(imagenet_dir, labels_file):
"""Generates a textual file with the following content:
img_0000.jpeg 1
img_0001.jpeg 0
...
mapping image file name to its class label
"""
IOUtils.mkdirf(labels_file)
img_files = ImageNetTools.get_image_files(imagenet_dir)
labels = ImageNetTools.get_labels()
with open(labels_file, 'w') as fobj:
for img_file in img_files:
synset, fname, finfo = ImageNetTools.get_file_info(img_file, labels)
fobj.write("%s/%s %d\n" % (synset, fname, finfo['label']))
@staticmethod
def build_mxnet_labels(imagenet_dir, labels_file):
"""Generates a textual file with the following content:
0 45 n02093256/n02093256_3032.JPEG
1 45 n02093256/n02093256_3353.JPEG
...
image_index image_class_label image_path
"""
IOUtils.mkdirf(labels_file)
img_files = ImageNetTools.get_image_files(imagenet_dir)
labels = ImageNetTools.get_labels()
with open(labels_file, 'w') as fobj:
for img_index, img_file in enumerate(img_files):
synset, fname, finfo = ImageNetTools.get_file_info(img_file, labels)
fobj.write("%d\t%d\t%s/%s\n" % (img_index, finfo['label'], synset, fname))
@staticmethod
def build_tensorflow_synsets(imagenet_dir, synset_file):
"""Builds a textual file with one synset on a line"""
IOUtils.mkdirf(synset_file)
labels = ImageNetTools.get_labels()
with open(synset_file, 'w') as fobj:
for label in labels:
fobj.write("%s\n" % label)
@staticmethod
def build_tensorflow_human_labels(imagenet_dir, human_labels_file):
"""Builds a textual file with one synset on a line"""
IOUtils.mkdirf(human_labels_file)
labels = ImageNetTools.get_labels()
with open(human_labels_file, 'w') as fobj:
for label in labels:
fobj.write("%s\t%s\n" % (label, labels[label]['human_labels']))
if __name__ == '__main__':
num_args = len(sys.argv)
if num_args > 1:
if sys.argv[1] == 'build_caffe_labels' and num_args == 4:
ImageNetTools.build_caffe_labels(sys.argv[2], sys.argv[3])
if sys.argv[1] == 'build_mxnet_labels' and num_args == 4:
ImageNetTools.build_mxnet_labels(sys.argv[2], sys.argv[3])
if sys.argv[1] == 'build_tensorflow_synsets' and num_args == 4:
ImageNetTools.build_tensorflow_synsets(sys.argv[2], sys.argv[3])
if sys.argv[1] == 'build_tensorflow_human_labels' and num_args == 4:
ImageNetTools.build_tensorflow_human_labels(sys.argv[2], sys.argv[3])
```
#### File: dlbs/reports/bench_stats.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import argparse
import json
from dlbs.utils import IOUtils
from dlbs.logparser import LogParser
"""
print("[WARNING] This module is deprecated and will be removed in future releases. "
"Please, use bench_data.py instead.")
"""
class BenchStats(object):
"""Class that finds log files and computes simple statistics on experiments."""
@staticmethod
def load_data(**kwargs):
is_dir = os.path.isdir(kwargs['input'])
is_file = os.path.isfile(kwargs['input'])
is_log_file = is_file and kwargs['input'].endswith('.log')
is_json_file = is_file and (kwargs['input'].endswith('.json') or kwargs['input'].endswith('.json.gz'))
if is_dir or is_log_file:
files = IOUtils.find_files(config['input'], "*.log", config['recursive'])
benchmarks, failed_benchmarks = LogParser.parse_log_files(files)
benchmarks.extend(failed_benchmarks)
elif is_json_file:
benchmarks = IOUtils.read_json(kwargs['input'])
benchmarks = benchmarks['data']
else:
raise ValueError("Invalid input descriptor: {}".format(kwargs['input']))
return benchmarks
@staticmethod
def compute(**kwargs):
""" Finds files and compute experiments' statistics.
:param std log_dir: Directory to search files for.
:param bool recursive: If True, directory will be searched recursively.
:return: Dictionary with experiment statistics.
"""
benchmarks = BenchStats.load_data(**kwargs)
def _get(d, key, val=''):
return d[key] if key in d else val
stats = {
'num_benchmarks': len(benchmarks),
'num_failed_exps': 0,
'num_successful_exps': 0,
'failed_exps': {},
'node_ids': set(),
'node_titles': set(),
'gpu_titles': set(),
'framework_titles': set()
}
for bench in benchmarks:
time_val = str(bench['results.time']).strip() if 'results.time' in bench else ''
if not time_val:
stats['num_failed_exps'] += 1
if 'exp.id' not in bench:
print("[ERROR] No exp.id found in benchmark (%s)" % str(bench))
continue
stats['failed_exps'][bench['exp.id']] = {
'msg': 'No %s time found in log file.' % _get(bench, 'exp.phase', 'PHASE_UNKNOWN'),
'log_file': _get(bench, 'exp.log_file', 'LOG_FILE_UNKNOWN'),
'phase': _get(bench, 'exp.phase', 'PHASE_UNKNOWN'),
'framework_title': _get(bench, 'exp.framework_title', 'FRAMEWORK_TITLE_UNKNOWN')
}
else:
stats['num_successful_exps'] += 1
#
for key in [('exp.node_id', 'node_ids'), ('exp.node_title', 'node_titles'), ('exp.gpu_title', 'gpu_titles'), ('exp.framework_title', 'framework_titles')]:
if key[0] in bench:
stats[key[1]].add(bench[key[0]])
for key in ['node_ids', 'node_titles', 'gpu_titles', 'framework_titles']:
stats[key] = list(stats[key])
return stats
if __name__ == "__main__":
print("[WARNING] This module is deprecated and will be removed in future releases. "
"Please, use bench_data.py instead.")
parser = argparse.ArgumentParser()
parser.add_argument('--input', type=str, required=True, default=None,
help="Either a folder containing Scan this folder for *.log files. "
"Scan recursively if --recursive is set.")
parser.add_argument('--recursive', required=False, default=False, action='store_true',
help='Scan --log_dir folder recursively for log files.')
config = vars(parser.parse_args())
stats = BenchStats.compute(**config)
print(json.dumps(stats, sort_keys=False, indent=2))
```
#### File: python/dlbs/worker.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import time
import json
import threading
import logging
import subprocess
import traceback
from dlbs.utils import IOUtils, DictUtils, ParamUtils
from dlbs.sysinfo.systemconfig import SysInfo
class Worker(threading.Thread):
"""This class runs one benchmarking experiment.
It runs it in a separate thread. The typical usage example:
.. code-block:: python
worker = Worker(
["echo", "'Hello World'"], # Command to execute (with ``Popen``)
{}, # Environmental variables to set
{}, # Experiment variables (dictionary)
5, # Current benchmark index
10 # Total number of benchmarks
)
worker.work() # This is a blocking call.
"""
def __init__(self, command, environ, params):
""" Initializes this worker with the specific parameters.
Args:
command (list): List containing command to execute and its command line arguments (with Popen).
environ (dict): Environment variables to set with Popen.
params (dict): Parameters of this benchmark (dictionary).
"""
threading.Thread.__init__(self)
self.command = command # Command + command line arguments
self.environ = environ # Environmental variables to append
self.params = params # All experiment variables
self.process = None # Background process object
self.ret_code = 0 # Return code of the process
def __dump_parameters(self, file_object):
"""Dumps all experiment parameters to a file (or /dev/stdout).
Args:
file_object: A file object. This is a log file for an individual benchmark. All parameters are dumped which
not great.
"""
file_object.write("Running subprocess (%s) with log file '%s'\n" % (self.command, self.params['exp.log_file']))
file_object.write("\n"
"--------------------------\n"
"Variables from python script.\n"
"--------------------------\n")
ParamUtils.log_parameters(self.params, file_object)
file_object.write("\n"
"----------------------------\n"
"Starting framework launcher.\n"
"----------------------------\n")
def run(self):
"""Runs subprocess with Popen.
This method must not be called directly. Use blocking :py:meth:`~dlbs.worker.Worker.work`
method instead.
"""
try:
# Dump parameters to a log file or to standard output
DictUtils.ensure_exists(self.params, 'exp.log_file', default_value='')
if self.params['exp.log_file'].strip() == '':
self.params['exp.log_file'] = '/dev/stdout'
IOUtils.mkdirf(self.params['exp.log_file'])
with open(self.params['exp.log_file'], 'a+') as log_file:
self.__dump_parameters(log_file)
# This is where we launch process. Keep in mind, that the log file that's
# supposed to be created is exp.log_file or exp_log_file in the script.
# Other output of the launching script will be printed by this python code
# to a standard output.
try:
self.process = subprocess.Popen(self.command, universal_newlines=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, env=self.environ)
except OSError:
print("[ERROR] Failed to run command '%s' (file must exist and be executable)" % str(self.command))
raise
while True:
output = self.process.stdout.readline()
if output == '' and self.process.poll() is not None:
break
if output:
sys.stdout.write(output)
sys.stdout.flush()
self.ret_code = self.process.poll()
except Exception as err:
logging.warn('Exception has been caught for experiment %s: %s', self.params.get('exp.id'), str(err))
logging.warn(traceback.format_exc())
self.ret_code = -1
def work(self, resource_monitor=None):
"""Runs a benchmark as subprocess and waits for it.
Args:
resource_monitor (dlbs.utils.ResourceMonitor): Optional resource monitor object.
Returns:
Status code for a subprocess call invocation.
"""
self.start()
self.join()
if resource_monitor is not None:
resource_monitor.empty_pid_file()
metrics = resource_monitor.get_measurements()
with open(self.params['exp.log_file'], 'a+') as log_file:
for key in metrics:
log_file.write('__results.use.%s__=%s\n' % (key, json.dumps(metrics[key])))
if self.is_alive():
self.process.terminate()
self.join()
time.sleep(1)
if 'exp.sys_info' in self.params and self.params['exp.sys_info']:
info = SysInfo(self.params['exp.sys_info']).collect()
with open(self.params['exp.log_file'], 'a+') as log_file:
for key in info:
log_file.write('__%s__=%s\n' % (key, json.dumps(info[key])))
return self.ret_code
```
#### File: mxnet_benchmarks/models/alexnet.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import mxnet as mx
from mxnet_benchmarks.models.model import Model, Layers
class AlexNet(Model):
implements = 'alexnet'
@property
def output(self):
return self.__output
def __init__(self, params):
Model.check_parameters(
params,
{'name': 'AlexNet', 'num_classes': 1000, 'phase': 'training', 'dtype': 'float32',
'input_layout': 'NCHW', 'model_layout': 'NCHW', 'nvidia_layers': False}
)
params['input_shape'] = Model.conv_shape(3, (227, 227), params['input_layout'])
Model.__init__(self, params)
layers = Layers(params)
data = self.add_data_node()
data = Layers.conv_transform_layout(data, params['input_layout'], params['model_layout'])
conv1 = layers.Convolution(name='conv1', data=data, kernel=(11, 11), stride=(4, 4), num_filter=96)
relu1 = layers.Activation(name='relu1', data=conv1, act_type='relu')
norm1 = self.maybe_lrn(relu1, 'norm1')
pool1 = layers.Pooling(name='pool1', data=norm1, pool_type="max", kernel=(3, 3), stride=(2, 2))
conv2 = layers.Convolution(name='conv2', data=pool1, kernel=(5, 5), pad=(2, 2), num_filter=256, num_group=1)
relu2 = layers.Activation(name='relu2', data=conv2, act_type="relu")
norm2 = self.maybe_lrn(relu2, 'norm2')
pool2 = layers.Pooling(name='pool2', data=norm2, kernel=(3, 3), stride=(2, 2), pool_type="max")
conv3 = layers.Convolution(name='conv3', data=pool2, kernel=(3, 3), pad=(1, 1), num_filter=384)
relu3 = layers.Activation(name='relu3', data=conv3, act_type="relu")
conv4 = layers.Convolution(name='conv4', data=relu3, kernel=(3, 3), pad=(1, 1), num_filter=384, num_group=1)
relu4 = layers.Activation(name='relu4', data=conv4, act_type="relu")
conv5 = layers.Convolution(name='conv5', data=relu4, kernel=(3, 3), pad=(1, 1), num_filter=256, num_group=1)
relu5 = layers.Activation(name='relu5', data=conv5, act_type="relu")
pool5 = layers.Pooling(name='pool5', data=relu5, kernel=(3, 3), stride=(2, 2), pool_type="max")
flatten = mx.symbol.Flatten(data=pool5)
fc6 = mx.symbol.FullyConnected(name='fc6', data=flatten, num_hidden=4096)
relu6 = layers.Activation(name='relu6', data=fc6, act_type="relu")
drop6 = layers.Dropout(name='drop6', data=relu6, p=0.5)
fc7 = mx.symbol.FullyConnected(name='fc7', data=drop6, num_hidden=4096)
relu7 = layers.Activation(name='relu7', data=fc7, act_type="relu")
drop7 = layers.Dropout(name='drop7', data=relu7, p=0.5)
self.__output = self.add_head_nodes(drop7)
```
#### File: mxnet_benchmarks/models/model.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import copy
import mxnet as mx
import numpy as np
class Layers(object):
"""
This class was introduced to work with some of the new NVIDIA performance improvements to MXNET available
in NGC containers. Once these changes are merged into master branch, this class will probably be removed.
"""
def __init__(self, params):
"""
Args:
params (dict): Dictionary of parameters:
--model_layout (str) Tensor layout for CNN models - NCHW or NHWC.
--nvidia_layers (bool) Enables/disables performance improvements.
--dtype (str) Data type of computations - float32 or float16.
"""
self.__model_layout = params['model_layout']
self.__nvidia_layers = params['nvidia_layers']
self.__training = params['phase'] == 'training'
self.__conv_args = {'layout': self.__model_layout}
self.__pool_args = {}
dtype = params['dtype']
if self.__nvidia_layers:
if dtype == 'float16':
self.__conv_args.update({'cudnn_algo_fwd': 1, 'cudnn_algo_bwd_data': 1, 'cudnn_algo_bwd_filter': 1,
'cudnn_tensor_core_only': True})
elif dtype == 'float32':
self.__conv_args.update({'cudnn_algo_fwd': -1, 'cudnn_algo_bwd_data': -1, 'cudnn_algo_bwd_filter': -1,
'cudnn_tensor_core_only': False})
self.__conv_args.update({'layout': self.__model_layout})
self.__pool_args.update({'layout': self.__model_layout})
logging.info("Layers: model_layout = %s.", self.__model_layout)
logging.info("Layers: conv_args = %s.", self.__conv_args)
logging.info("Layers: pool_args = %s.", self.__pool_args)
@staticmethod
def merge_args(user_args, additional_args):
merged_args = user_args.copy()
merged_args.update(additional_args)
return merged_args
@staticmethod
def conv_transform_layout(data, from_layout, to_layout):
""" Transform a symbol from one layout to another, or do nothing if they have the same layout.
Args:
data (obj): Input tensor of rank 4.
from_layout (str): Input layout
to_layout (str): Output layout
Returns:
Tensor `data` with `to_layout`.
"""
supported_layouts = ['NCHW', 'NHWC']
if from_layout not in supported_layouts:
raise ValueError('Not prepared to handle layout: {}'.format(from_layout))
if to_layout not in supported_layouts:
raise ValueError('Not prepared to handle layout: {}'.format(to_layout))
# Insert transpose if from_layout and to_layout don't match
if from_layout == 'NCHW' and to_layout == 'NHWC':
return mx.sym.transpose(data, axes=(0, 2, 3, 1))
elif from_layout == 'NHWC' and to_layout == 'NCHW':
return mx.sym.transpose(data, axes=(0, 3, 1, 2))
else:
return data
# noinspection PyPep8Naming
def Convolution(self, **kwargs):
return mx.symbol.Convolution(**Layers.merge_args(kwargs, self.__conv_args))
# noinspection PyPep8Naming
def Activation(self, **kwargs):
return mx.symbol.Activation(**kwargs)
# noinspection PyPep8Naming
def Pooling(self, **kwargs):
data = kwargs.pop('data')
if not self.__nvidia_layers:
data = Layers.conv_transform_layout(data, self.__model_layout, 'NCHW')
data = mx.symbol.Pooling(data=data, **Layers.merge_args(kwargs, self.__pool_args))
if not self.__nvidia_layers:
data = Layers.conv_transform_layout(data, 'NCHW', self.__model_layout)
return data
# noinspection PyPep8Naming
def Dropout(self, **kwargs):
return mx.symbol.Dropout(**kwargs) if self.__training else kwargs['data']
# noinspection PyPep8Naming
def BatchNorm(self, **kwargs):
bn_axis = 3 if self.__model_layout == 'NHWC' else 1
if 'act_type' in kwargs:
if kwargs['act_type'] is not None and not self.__nvidia_layers:
raise ValueError("Model construction logic violation. Batch norm support activation only with "
"enabled NVIDIA layers (--nvidia_layers=true), this functionality is only available "
"in NGC containers (it's not yet in MXNET repository).")
del kwargs['act_type']
return mx.sym.BatchNorm(**Layers.merge_args(kwargs, {'axis': bn_axis}))
# noinspection PyPep8Naming
def BatchNormAddRelu(self, **kwargs):
bn_axis = 3 if self.__model_layout == 'NHWC' else 1
return mx.sym.BatchNormAddRelu(**Layers.merge_args(kwargs, {'axis': bn_axis}))
class Model(object):
"""Base class for all models"""
def __init__(self, kwargs):
""" name: printable name like AlexNet, ResNet152 etc
input_shape: tuple of dimensions of input data excluding batch, for
instance, (3,224,224) - 3 channels with 224 spatial dimensions
num_classes: size of output softmax (affine) operator
phase: 'inference' or 'training'
"""
for arg in ['name', 'input_shape', 'num_classes', 'phase', 'dtype', 'model_opts']:
if arg not in kwargs:
raise ValueError("Missing mandatory neural net parameter '%s'" % arg)
self.__name = kwargs['name']
self.__input_shape = kwargs['input_shape']
self.__num_classes = kwargs['num_classes']
self.__phase = kwargs['phase']
self.__dtype = kwargs['dtype']
self.__model_opts = copy.deepcopy(kwargs['model_opts'])
self.__have_float16_lrn = 'DLBS_MXNET_NO_FLOAT16_LRN' not in os.environ
self._eval_metric = 'acc'
# The following two parameters are used by data providers.
self._labels_shape = (1,) # Shape of labels tensor excluding leading batch dimension
self._labels_range = (0, self.num_classes-1) # Possible labels' values inclusive
if self.__dtype == 'float16' and self.__have_float16_lrn:
logging.warning(
"The data type is 'float16' and I assume MXNET provides a float16 kernel for LRN layer. If this model "
"uses LRN and your MXNET version is outdated, you will get error. In this case, to disable LRN layers "
"in float16 regime, define the following variable 'DLBS_MXNET_NO_FLOAT16_LRN' (the value of this "
"variable does not matter) i.e.: -Pruntime.launcher='\"DLBS_MXNET_NO_FLOAT16_LRN=1 \"'")
if self.__dtype == 'float16' and not self.__have_float16_lrn:
logging.warning(
"The data type is 'float16' and you disable LRN layers. All calls to Model.maybe_lrn will do nothing. "
"If your MXNET version is up to date and provides LRN float16 kernel make sure "
"DLBS_MXNET_NO_FLOAT16_LRN environment variable is not defined. All this is relevant only if this "
"model uses LRN operators.")
@staticmethod
def conv_shape(num_channels, spatial_dims, layout='NCHW'):
""" Return shape of a feature map tensor for convolutional models.
Args:
num_channels (int): Number of channels.
spatial_dims (tuple or list): Spatial dimensions (H, W) for a feature map.
layout (str): Required layout, one of NCHW (channel first) or NHWC (channel last).
Returns:
Tuple with shape, either (C, H, W) or (H, W, C) depending on `layout`.
"""
if layout not in ('NCHW', 'NHWC'):
raise ValueError("Invalid conv layout '{}'. Must be one of ['NCHW', 'NHWC']".format(layout))
if not isinstance(spatial_dims, (list, tuple)):
raise ValueError("Invalid type of spatial_dims argument '{}'. "
"Must be tuple or list.".format(type(spatial_dims)))
return (num_channels, ) + tuple(spatial_dims) if layout == 'NCHW' else tuple(spatial_dims) + (num_channels, )
@staticmethod
def check_parameters(params, default_params):
"""Ensures `params` dictionary contains all keys in `default_params`
Args:
params (dict): Dictionary to check.
default_params (dict): Values with these keys must present in `params`.
"""
for param, value in default_params.items():
if params.get(param, None) is None:
params[param] = value
def add_data_node(self, name='data'):
"""Add data node casting it to float16 is required. Also implements double-buffering.
https://github.com/NVIDIA/DeepLearningExamples/blob/40e074257fb8670b0284a37c92b9372bb1587354/MxNet/Classification/RN50v1.5/resnet.py#L241
"""
data = mx.sym.Variable(name=name)
if self.dtype == 'float32':
data = mx.sym.identity(data=data)
elif self.dtype == 'float16':
data = mx.sym.cast(data=data, dtype=np.float16)
return data
def add_head_nodes(self, v):
"""Adds dense and softmax head nodes.
Args:
v (obj): input tensor.
Returns:
Output tensor
"""
v = mx.sym.FullyConnected(data=v, num_hidden=self.num_classes)
if self.dtype == 'float16':
logging.info("Casting logits to np.float32")
v = mx.sym.cast(data=v, dtype=np.float32)
if self.phase == 'training':
labels = mx.sym.Variable(name="softmax_label")
# Just in case labels are of shape (batch_size, 1) we need to
# reshape them to (batch_size,).
labels = mx.sym.Reshape(labels, shape=(-1,))
v = mx.symbol.SoftmaxOutput(data=v, label=labels, name='softmax')
else:
v = mx.symbol.softmax(data=v, name='softmax')
return v
def maybe_lrn(self, v, name):
""" MxNet does not have float16 kernel for LRN operator. So, we use it only
for float32 data type. That makes comparison not fair. Need to do something
about it like dropping completely these operators.
They are used by AlexNet and GoogleNet.
UPDATE: Seems like mxnet now provides this kernel.
:param obj v: Input tensor.
:param str name: Name of the LRN operator.
:return: The input tensor 'v' if data type is float16 else result of LRN
operator
"""
if self.dtype == 'float32' or self.__have_float16_lrn:
return mx.symbol.LRN(data=v, alpha=0.0001, beta=0.75, knorm=2, nsize=5, name=name)
else:
return v
def render_to_file(self, node, bsize, fname):
"""Render the neural network to JPG file.
:param sym node: Head node.
:param int bsize: Batch size.
:param str fname: File name without extension.
"""
g = mx.viz.plot_network(
node,
shape={'data': (bsize,) + self.input_shape},
node_attrs={"shape": 'rect', "fixedsize": 'false'},
save_format='jpg'
)
g.render(fname)
@staticmethod
def num_parameters(module, count_aux_params=True):
"""Return number of parameters in a module.
"""
arg_params, aux_params = module.get_params()
num_params = 0
for p in arg_params:
num_params += np.prod(arg_params[p].shape)
if count_aux_params:
for p in aux_params:
num_params += np.prod(aux_params[p].shape)
return num_params
@staticmethod
def print_parameters(module):
def __print(params):
total_params = 0
pnames = params.keys()
pnames.sort()
for p in pnames:
nparams = np.prod(params[p].shape)
total_params += nparams
print("%-30s %-30s %d" % (p, str(params[p].shape), int(nparams)))
return total_params
arg_params, aux_params = module.get_params()
print("Arg parameters")
net_params = __print(arg_params)
print("Aux parameters")
net_params += __print(aux_params)
print("Total number of parameters %d" % net_params)
@property
def name(self):
"""Get model name"""
return self.__name
@property
def input_shape(self):
"""Get input shape excluding batch size dimension"""
return self.__input_shape if isinstance(self.__input_shape, tuple) else (self.__input_shape, )
@property
def num_classes(self):
"""Get number of classes"""
return self.__num_classes
@property
def phase(self):
"""Get current phase ('training' or 'inference')"""
return self.__phase
@property
def dtype(self):
"""Get type of data ('float32' or 'float16' or 'int8')"""
return self.__dtype
@property
def model_opts(self):
"""Get additional model options (json dictionary)"""
return self.__model_opts
@property
def eval_metric(self):
"""Return evaluation metric"""
return self._eval_metric
@property
def labels_shape(self):
"""Shape of labels tensor excluding leading batch dimension"""
return self._labels_shape
@property
def labels_range(self):
"""Get range for possible label values. Range is inclusive."""
return self._labels_range
```
#### File: pytorch_benchmarks/models/googlenet.py
```python
from __future__ import absolute_import
import torch
import torch.nn as nn
from pytorch_benchmarks.models.model import Model
class ConvModule(nn.Module):
"""
[input] -> Conv2D -> ReLU -> [output]
"""
def __init__(self, num_input_channels, num_filters, kernel_size,
stride=1, padding=0):
super(ConvModule, self).__init__()
self.conv_module = nn.Sequential(
nn.Conv2d(num_input_channels, num_filters, kernel_size=kernel_size,
stride=stride, padding=padding),
nn.ReLU(inplace=True),
)
def forward(self, x):
return self.conv_module(x)
class InceptionModule(nn.Module):
"""
| -> c1x1 | branch: conv1
| -> c3x3r -> c3x3 | branch: conv3
[input] -| |-> concat -> [output]
| -> cd5x5r -> cd5x5 | branch: conv5
| -> pooling -> cproj | branch: pooling
"""
def __init__(self, num_input_channels, num_1x1, num_3x3red, num_3x3, num_d5x5red,
num_d5x5, proj):
super(InceptionModule, self).__init__()
self.conv1 = ConvModule(num_input_channels, num_filters=num_1x1, kernel_size=1)
self.conv3 = nn.Sequential(
ConvModule(num_input_channels, num_filters=num_3x3red, kernel_size=1),
ConvModule(num_3x3red, num_filters=num_3x3, kernel_size=3, padding=1)
)
self.conv5 = nn.Sequential(
ConvModule(num_input_channels, num_filters=num_d5x5red, kernel_size=1),
ConvModule(num_d5x5red, num_filters=num_d5x5, kernel_size=5, padding=2)
)
self.pooling = nn.Sequential(
nn.MaxPool2d(kernel_size=3, stride=1, padding=1),
ConvModule(num_input_channels, num_filters=proj, kernel_size=1)
)
def forward(self, x):
return torch.cat(
[self.conv1(x), self.conv3(x), self.conv5(x), self.pooling(x)],
dim=1
)
class GoogleNet(Model):
implements = 'googlenet'
def __init__(self, params):
""""""
Model.check_parameters(
params,
{'name': 'GoogleNet', 'input_shape':(3, 224, 224),
'num_classes': 1000, 'phase': 'training',
'dtype': 'float32'}
)
Model.__init__(self, params)
self.features = nn.Sequential(
ConvModule(self.input_shape[0], 64, kernel_size=7, stride=2, padding=3),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.LocalResponseNorm(size=5, alpha=0.0001, beta=0.75, k=2),
ConvModule(64, 64, kernel_size=1, stride=1),
ConvModule(64, 192, kernel_size=3, stride=1, padding=1),
nn.LocalResponseNorm(size=5, alpha=0.0001, beta=0.75, k=2),
nn.MaxPool2d(kernel_size=3, stride=2),
InceptionModule(192, num_1x1=64, num_3x3red=96, num_3x3=128, num_d5x5red=16, num_d5x5=32, proj=32), # out channels = 256
InceptionModule(256, num_1x1=128, num_3x3red=128, num_3x3=192, num_d5x5red=32, num_d5x5=96, proj=64), # out channels = 480
nn.MaxPool2d(kernel_size=3, stride=2),
InceptionModule(480, num_1x1=192, num_3x3red=96, num_3x3=208, num_d5x5red=16, num_d5x5=48, proj=64), # out channels = 512
InceptionModule(512, num_1x1=160, num_3x3red=112, num_3x3=224, num_d5x5red=24, num_d5x5=64, proj=64), # out channels = 512
InceptionModule(512, num_1x1=128, num_3x3red=128, num_3x3=256, num_d5x5red=24, num_d5x5=64, proj=64), # out channels = 512
InceptionModule(512, num_1x1=112, num_3x3red=144, num_3x3=288, num_d5x5red=32, num_d5x5=64, proj=64), # out channels = 528
InceptionModule(528, num_1x1=256, num_3x3red=160, num_3x3=320, num_d5x5red=32, num_d5x5=128, proj=128), # out channels = 832
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
InceptionModule(832, num_1x1=256, num_3x3red=160, num_3x3=320, num_d5x5red=32, num_d5x5=128, proj=128), # out channels = 832
InceptionModule(832, num_1x1=384, num_3x3red=192, num_3x3=384, num_d5x5red=48, num_d5x5=128, proj=128), # out channels = 1024
nn.AvgPool2d(kernel_size=7, stride=1)
)
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(1024, self.num_classes)
)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), 1024 * 1 * 1)
return self.classifier(x)
```
#### File: pytorch_benchmarks/models/model.py
```python
import torch.nn as nn
class Model(nn.Module):
"""Base class for all models"""
def __init__(self, params):
super(Model, self).__init__()
for param in ['name', 'input_shape', 'num_classes', 'phase', 'dtype']:
assert param in params, "Missing mandatory neural net parameter '%s'" % param
assert params['phase'] in ['inference', 'training'],\
"Invalid phase: '%s'. Expecting 'inference' or 'training'" % (params['phase'])
self.__name = params['name']
self.__input_shape = params['input_shape']
self.__num_classes = params['num_classes']
self.__phase = params['phase']
self.__dtype = params['dtype']
@staticmethod
def check_parameters(params, default_params):
"""Ensures `params` dictionary contains all keys in `default_params`
Args:
params (dict): Dictionary to check.
default_params (dict): Values with these keys must present in `params`.
"""
for param, value in default_params.items():
if param not in params:
params[param] = value
@property
def name(self):
"""Get model name"""
return self.__name
@property
def input_shape(self):
"""Get input shape excluding batch size dimension"""
return self.__input_shape if isinstance(self.__input_shape, tuple)\
else (self.__input_shape,)
@property
def num_classes(self):
"""Get number of classes"""
return self.__num_classes
@property
def phase(self):
"""Get current phase ('training' or 'inference')"""
return self.__phase
@property
def dtype(self):
"""Get type of data ('float32' or 'float16' or 'int8')"""
return self.__dtype
```
#### File: pytorch_benchmarks/models/overfeat.py
```python
from __future__ import absolute_import
import torch.nn as nn
from pytorch_benchmarks.models.model import Model
class Overfeat(Model):
implements = 'overfeat'
def __init__(self, params):
Model.check_parameters(
params,
{'name': 'Overfeat', 'input_shape': (3, 231, 231), 'num_classes': 1000,
'phase': 'training',
'dtype': 'float32'}
)
Model.__init__(self, params)
self.features = nn.Sequential(
# Layer1
nn.Conv2d(self.input_shape[0], 96, kernel_size=11, stride=4),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
# Layer2
nn.Conv2d(96, 256, kernel_size=5),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
# Layer3
nn.Conv2d(256, 512, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
# Layer4
nn.Conv2d(512, 1024, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
# Layer5
nn.Conv2d(1024, 1024, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.classifier = nn.Sequential(
# Layer6
nn.Linear(1024 * 6 * 6, 3072),
nn.ReLU(inplace=True),
nn.Dropout(),
# Layer7
nn.Linear(3072, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, self.num_classes)
)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), 1024 * 6 * 6)
return self.classifier(x)
```
#### File: pytorch_benchmarks/models/resnet.py
```python
from __future__ import absolute_import
import torch.nn as nn
from pytorch_benchmarks.models.model import Model
class ResnetModule(nn.Module):
"""Number of outut channels is always 'num_filters'."""
def __init__(self, num_input_channels, num_filters, stride, dim_match, bottle_neck):
"""Number of outut channels is always 'num_filters'."""
super(ResnetModule, self).__init__()
# Branch 1
if dim_match:
self.shortcut = None
else:
self.shortcut = nn.Sequential(
nn.Conv2d(num_input_channels, num_filters, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(num_filters, eps=2e-5, momentum=0.9, affine=True)
)
# Branch 2
if bottle_neck:
bottleneck_channels = num_filters // 4
self.main = nn.Sequential(
# Block 2A
nn.Conv2d(num_input_channels, bottleneck_channels, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(bottleneck_channels, eps=2e-5, momentum=0.9, affine=True),
nn.ReLU(inplace=True),
# Block 2B
nn.Conv2d(bottleneck_channels, bottleneck_channels, kernel_size=3, stride=stride, padding=1, bias=False),
nn.BatchNorm2d(bottleneck_channels, eps=2e-5, momentum=0.9, affine=True),
nn.ReLU(inplace=True),
# Block 3B
nn.Conv2d(bottleneck_channels, num_filters, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(num_filters, eps=2e-5, momentum=0.9, affine=True),
)
else:
self.main = nn.Sequential(
# Block 2A
nn.Conv2d(num_input_channels, num_filters, kernel_size=3, stride=stride, padding=1, bias=False),
nn.BatchNorm2d(num_filters, eps=2e-5, momentum=0.9, affine=True),
nn.ReLU(inplace=True),
# Block 2B
nn.Conv2d(num_filters, num_filters, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(num_filters, eps=2e-5, momentum=0.9, affine=True),
)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
shortcut = x if self.shortcut is None else self.shortcut(x)
return self.relu(shortcut + self.main(x))
class ResNet(Model):
implements = [
'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnet200', 'resnet269'
]
specs = {
'resnet18': { 'name': 'ResNet18', 'units': [2, 2, 2, 2], 'num_layers': 18 }, # pylint: disable=C0326
'resnet34': { 'name': 'ResNet34', 'units': [3, 4, 6, 3], 'num_layers': 34 }, # pylint: disable=C0326
'resnet50': { 'name': 'ResNet50', 'units': [3, 4, 6, 3], 'num_layers': 50 }, # pylint: disable=C0326
'resnet101': { 'name': 'ResNet101', 'units': [3, 4, 23, 3], 'num_layers': 101 }, # pylint: disable=C0326
'resnet152': { 'name': 'ResNet152', 'units': [3, 8, 36, 3], 'num_layers': 152 }, # pylint: disable=C0326
'resnet200': { 'name': 'ResNet200', 'units': [3, 24, 36, 3], 'num_layers': 200 }, # pylint: disable=C0326
'resnet269': { 'name': 'ResNet269', 'units': [3, 30, 48, 8], 'num_layers': 269 } # pylint: disable=C0326
}
def __init__(self, params):
specs = ResNet.specs[params['model']]
Model.check_parameters(
params,
{'name': specs['name'], 'input_shape':(3, 224, 224),
'num_classes': 1000, 'phase': 'training',
'dtype': 'float32'}
)
Model.__init__(self, params)
# Adapted from https://github.com/tornadomeet/ResNet/blob/master/train_resnet.py
# Original author <NAME>
if specs['num_layers'] >= 50:
filter_list = [64, 256, 512, 1024, 2048]
bottle_neck = True
else:
filter_list = [64, 64, 128, 256, 512]
bottle_neck = False
self.features = nn.Sequential(
nn.Conv2d(3, filter_list[0], kernel_size=7, stride=2, padding=3, bias=False),
nn.BatchNorm2d(filter_list[0], eps=2e-5, momentum=0.9, affine=True),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
)
# Number of stages is always 4
num_prev_channels = filter_list[0]
for i in range(len(specs['units'])):
# num_input_channels, num_filters, stride, dim_match, bottle_neck
self.features.add_module(
'stage%d_unit%d' % (i + 1, 1),
ResnetModule(num_prev_channels, filter_list[i+1], (1 if i == 0 else 2), False, bottle_neck)
)
num_prev_channels = filter_list[i+1]
for j in range(specs['units'][i]-1):
self.features.add_module(
'stage%d_unit%d' % (i + 1, j + 2),
ResnetModule(num_prev_channels, filter_list[i+1], 1, True, bottle_neck)
)
self.features.add_module('pool1', nn.AvgPool2d(kernel_size=7, padding=0))
self.num_output_channels = filter_list[-1]
self.classifier = nn.Sequential(
nn.Linear(self.num_output_channels, self.num_classes)
)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), self.num_output_channels)
x = self.classifier(x)
return x
```
#### File: python/tf_cnn_benchmarks/benchmark_cnn_distributed_test.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import os
import subprocess
import time
from absl import flags as absl_flags
import portpicker
import six
import tensorflow as tf
import flags
import test_util
from platforms import util as platforms_util
FLAGS = absl_flags.FLAGS
def _convert_params_to_flags_list(params):
"""Converts Params to a list of flags. Skips default-valued parameters.
E.g., converts
benchmark_cnn.make_params(batch_size=32, model='resnet50')
to
['--batch_size=32', '--model=resnet50']
Args:
params: Params for BenchmarkCNN.
Returns:
A list of flags.
"""
return [
'--%s=%s' % (k, str(v)) for k, v in six.iteritems(params._asdict())
if v != flags.param_specs[k].default_value
]
# When outputting a process's output in the log, maximum number of characters
# to output. The log system does not allow us to output more than this in a
# single log message, but this limit is also useful to avoid the logs from
# becoming too large (the full process output is written to disk).
MAX_OUTPUT_CHARS = 15000
# A process. name is a string identifying the process in logs. stdout and
# stderr are file objects of the process's stdout and stderr, respectively.
_ProcessInfo = namedtuple('_ProcessInfo', ['name', 'popen', 'stdout', 'stderr'])
def _create_task_process(job_name, task_index, args, env, output_dir):
"""Creates a process for a single task for benchmark_cnn.
Args:
job_name: 'worker' or 'ps' or ''. Empty string used for non-distributed
mode.
task_index: The index of the task within the cluster.
args: A list of arguments to pass to the task. This function additionally
sets --task_index and --job_name
env: The environment to use for the task.
output_dir: Where to place the output files, storing the task's stdout and
stderr.
Returns:
A _ProcessInfo namedtuple of the running process. The stdout and stderr
fields of this tuple must be closed by the caller once the process ends.
"""
args = args[:]
args += ['--task_index=%s' % task_index, '--job_name=%s' % job_name]
name_prefix = job_name or 'local'
process_name = '%s_%s' % (name_prefix, task_index)
tf.logging.info('Spawning %s process: %s' % (process_name, ' '.join(args)))
stdout_filename = os.path.join(output_dir, '%s_stdout.txt' % process_name)
stderr_filename = os.path.join(output_dir, '%s_stderr.txt' % process_name)
stdout_file = open(stdout_filename, 'w+')
stderr_file = open(stderr_filename, 'w+')
popen = subprocess.Popen(
args, stdout=stdout_file, stderr=stderr_file, env=env)
return _ProcessInfo(process_name, popen, stdout_file, stderr_file)
def _wait_for_processes(wait_processes, kill_processes):
"""Waits until all `wait_processes` finish, then kills `kill_processes`.
Fails an assert if a process in `wait_processes` finishes unsuccessfully.
The processes in `kill_processes` are assumed to never finish so they are
killed.
Args:
wait_processes: A list of _ProcessInfo tuples. This function will wait
for each to finish.
kill_processes: A list of _ProcessInfo tuples. Each will be killed once
every process in `wait_processes` is finished.
Returns:
A list of strings, each which is a string of the stdout of a wait process.
"""
wait_process_stdouts = [None] * len(wait_processes)
finished_wait_processes = set()
while len(finished_wait_processes) < len(wait_processes):
for i, wait_process in enumerate(wait_processes):
if i in finished_wait_processes:
continue
ret_code = wait_process.popen.poll()
if ret_code is None:
continue
tf.logging.info('{} finished'.format(wait_process.name))
wait_process.stdout.seek(0)
wait_process_stdouts[i] = wait_process.stdout.read()
tf.logging.info('stdout for {} (last {} chars): {}\n'.format(
wait_process.name, MAX_OUTPUT_CHARS,
wait_process_stdouts[i][-MAX_OUTPUT_CHARS:]))
wait_process.stderr.seek(0)
tf.logging.info('stderr for {} (last {} chars): {}\n'.format(
wait_process.name, MAX_OUTPUT_CHARS,
wait_process.stderr.read()[-MAX_OUTPUT_CHARS:]))
assert ret_code == 0, 'Process failed with return code %d' % ret_code
finished_wait_processes.add(i)
for kill_process in kill_processes:
ret_code = kill_process.popen.poll()
# kill processes should not end until we kill them.
assert ret_code is None, 'Process returned early with code %d' % ret_code
time.sleep(0.25)
tf.logging.info('All wait processes finished')
for i, kill_process in enumerate(kill_processes):
# Kill each kill process.
kill_process.popen.kill()
kill_process.popen.wait()
kill_process.stdout.seek(0)
tf.logging.info('stdout for {} (last {} chars): {}\n'.format(
kill_process.name, MAX_OUTPUT_CHARS,
kill_process.stdout.read()[-MAX_OUTPUT_CHARS:]))
kill_process.stderr.seek(0)
tf.logging.info('stderr for {} (last {} chars): {}\n'.format(
kill_process.name, MAX_OUTPUT_CHARS,
kill_process.stderr.read()[-MAX_OUTPUT_CHARS:]))
return wait_process_stdouts
def _spawn_benchmark_processes(output_dir_path, num_workers, num_ps,
num_controllers, params):
"""Run training or evaluation in spawned processes.
Runs locally if num_workers == 1, num_ps == 0, and num_controllers == 0,
otherwise runs in distributed mode. In either case, one process is spawned
per worker and ps. Waits for training/evaluation to finish before returning.
Args:
output_dir_path: Relative path where stdout and stderr files will be
placed.
num_workers: Number of workers to spawn.
num_ps: Number of ps processes to spawn.
num_controllers: Number of controller processes to spawn (must be 0 or 1).
params: Params for BenchmarkCNN in each subprocess.
Returns:
A list output_list of outputs from all processes that output the
images/sec and accuracy. This process is the controller host in
distributed_all_reduce, and the workers otherwise. output_list[i] is a
list of lines from the ith worker's stdout.
"""
run_distributed = num_workers != 1 or num_ps != 0 or num_controllers != 0
if params.variable_update == 'distributed_all_reduce':
assert num_controllers == 1 or not run_distributed
assert num_ps == 0
else:
assert num_controllers == 0
output_base_dir = platforms_util.get_test_output_dir()
output_dir = os.path.join(output_base_dir, output_dir_path)
os.makedirs(output_dir)
tf.logging.info('Outputs of processes will be outputted to: %s' % output_dir)
args = platforms_util.get_command_to_run_python_module(
'benchmark_cnn_distributed_test_runner')
args += _convert_params_to_flags_list(params)
if run_distributed:
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
controller_ports = [portpicker.pick_unused_port()
for _ in range(num_controllers)]
# The numerator is 0.7 instead of 1 to leave some memory for the Cuda
# runtime, etc.
gpu_memory_frac = 0.7 / num_workers
args += [
'--gpu_memory_frac_for_testing=%f' % gpu_memory_frac,
'--worker_hosts=' + ','.join('localhost:%d' % p for p in worker_ports)
]
if num_ps > 0:
ps_hosts_str = ','.join('localhost:%d' % p for p in ps_ports)
args.append('--ps_hosts=' + ps_hosts_str)
else:
controller_host_str = ','.join('localhost:%d' % p
for p in controller_ports)
args.append('--controller_host=' + controller_host_str)
env = os.environ.copy()
# Allow stdout to be viewed before the process ends.
env['PYTHONUNBUFFERED'] = '1'
worker_processes = []
ps_processes = []
controller_processes = []
try:
for i in range(num_workers):
job_name = 'worker' if run_distributed else ''
process = _create_task_process(job_name, i, args, env, output_dir)
worker_processes.append(process)
# Don't let ps or controller processes use the gpu.
env['CUDA_VISIBLE_DEVICES'] = ''
for i in range(num_ps):
process = _create_task_process('ps', i, args, env, output_dir)
ps_processes.append(process)
for i in range(num_controllers):
process = _create_task_process('controller', i, args, env, output_dir)
controller_processes.append(process)
# If all distributed all reduce mode is being used, the controller process
# finishes and the worker processes block forever. Otherwise, the worker
# processes finish and the ps processes block forever. We set
# wait_processes and kill_processes accordingly.
if controller_processes:
wait_processes = controller_processes
kill_processes = worker_processes
else:
wait_processes = worker_processes
kill_processes = ps_processes
outputs = _wait_for_processes(wait_processes, kill_processes)
finally:
for process in worker_processes + ps_processes + controller_processes:
try:
process.popen.kill()
except OSError:
pass # It's OK (and expected) if the process already exited.
process.stdout.close()
process.stderr.close()
return [output.splitlines() for output in outputs]
# When this test class is run, a method will fail about 0.3% of the time with a
# gRPC error. It is not clear why this occurs.
# TODO(reedwm): Fix this test class.
class TfCnnBenchmarksDistributedTest(tf.test.TestCase):
"""Tests running benchmark_cnn in distributed mode."""
# We cannot check for a GPU via tf.test.is_gpu_available() before the tests in
# this class because it allocates all the GPU memory which would cause the
# spawned processes to run out of GPU memory.
def _test_distributed(self,
test_name,
num_workers,
num_ps,
params,
num_controllers=0,
check_output_values=False,
skip_eval=False):
# TODO(reedwm): check_output_values should default to True and be enabled
# on every test. See the TODO in benchmark_cnn_test.py.
def run_fn(run_type, inner_params):
output_dir_path = os.path.join(test_name, run_type)
if run_type == 'Evaluation':
# Distributed evaluation is not supported, so we use a single process.
# We still must spawn another process, because if we evaluate in the
# current process, it would allocate the GPU memory causing future test
# methods to fail.
if inner_params.variable_update == 'distributed_replicated':
inner_params = inner_params._replace(variable_update='replicated')
return _spawn_benchmark_processes(
output_dir_path, num_workers=1, num_ps=0, num_controllers=0,
params=inner_params)
else:
return _spawn_benchmark_processes(output_dir_path, num_workers, num_ps,
num_controllers, inner_params)
return test_util.train_and_eval(self, run_fn, params,
check_output_values=check_output_values,
skip_eval=skip_eval)
def testParameterServer(self):
test_name = 'testParameterServer'
params = test_util.get_params(test_name)
self._test_distributed(test_name, 2, 2, params)
def testParameterServerStaged(self):
test_name = 'testParameterServerStaged'
params = test_util.get_params(test_name)._replace(staged_vars=True)
self._test_distributed(test_name, 2, 2, params)
def testReplicated(self):
test_name = 'testReplicated'
params = test_util.get_params(test_name)._replace(
variable_update='distributed_replicated')
self._test_distributed(test_name, 2, 2, params)
def testAllReducePsgpu(self):
test_name = 'testAllReducePsgpu'
flags_dict = test_util.get_params(test_name)._replace(
variable_update='distributed_all_reduce',
all_reduce_spec='psgpu#4')
self._test_distributed(test_name, 2, 0, flags_dict, num_controllers=1)
def testAllReducePscpuXring(self):
test_name = 'testAllReducePscpuXring'
flags_dict = test_util.get_params(test_name)._replace(
variable_update='distributed_all_reduce',
all_reduce_spec='pscpu:2k:xring')
self._test_distributed(test_name, 2, 0, flags_dict, num_controllers=1)
def testForwardOnly(self):
test_name = 'testForwardOnly'
params = test_util.get_params(test_name)._replace(forward_only=True)
# Evaluation is not supported with --forward_only, so we set skip_eval=True.
self._test_distributed(test_name, 2, 2, params, skip_eval=True)
def testSingleWorkerAndPs(self):
test_name = 'testSingleWorkerAndPs'
params = test_util.get_params(test_name)
self._test_distributed(test_name, 1, 1, params)
def testThreeWorkersAndPses(self):
test_name = 'testThreeWorkersAndPses'
params = test_util.get_params(test_name)
self._test_distributed(test_name, 3, 3, params)
def testOneWorkerThreePses(self):
test_name = 'testOneWorkerThreePses'
params = test_util.get_params(test_name)
self._test_distributed(test_name, 1, 3, params)
def testThreeWorkersOnePs(self):
test_name = 'testThreeWorkersOnePs'
params = test_util.get_params(test_name)
self._test_distributed(test_name, 3, 1, params)
def testNoPrintTrainingAccuracy(self):
test_name = 'testNoPrintTrainingAccuracy'
params = test_util.get_params(test_name)._replace(
print_training_accuracy=False)
self._test_distributed(test_name, 2, 2, params)
def testRmspropParameterServer(self):
test_name = 'testRmspropParameterServer'
params = test_util.get_params(test_name)._replace(optimizer='rmsprop')
self._test_distributed(test_name, 2, 2, params)
def testMomentumReplicated(self):
test_name = 'testMomentumReplicated'
params = test_util.get_params(test_name)._replace(
optimizer='momentum', variable_update='distributed_replicated')
self._test_distributed(test_name, 2, 2, params)
def testNoCrossReplicaSyncParameterServerStaged(self):
test_name = 'testNoCrossReplicaSyncParameterServerStaged'
params = test_util.get_params(test_name)._replace(
staged_vars=True, cross_replica_sync=False)
self._test_distributed(test_name, 2, 2, params)
def testSingleGpu(self):
test_name = 'testSingleGpu'
params = test_util.get_params(test_name)._replace(num_gpus=1)
self._test_distributed(test_name, 2, 2, params)
def testBatchGroupSize(self):
test_name = 'testBatchGroupSize'
params = test_util.get_params(test_name)._replace(
batch_group_size=4, num_batches=100, num_warmup_batches=5)
self._test_distributed(test_name, 2, 2, params)
def testFp16WithFp32Vars(self):
test_name = 'testFp16WithFp32Vars'
params = test_util.get_params(test_name)._replace(
use_fp16=True, fp16_vars=False)
self._test_distributed(test_name, 2, 2, params)
def testFp16WithFp16Vars(self):
test_name = 'testFp16WithFp16Vars'
params = test_util.get_params(test_name)._replace(
use_fp16=True, fp16_vars=True, fp16_loss_scale=1.)
self._test_distributed(test_name, 2, 2, params)
def testFp16Replicated(self):
test_name = 'testFp16Replicated'
params = test_util.get_params(test_name)._replace(
use_fp16=True, variable_update='distributed_replicated')
self._test_distributed(test_name, 2, 2, params)
class DistributedVariableUpdateTest(tf.test.TestCase):
"""Tests that variables are updated correctly in distributed mode."""
def _test_variable_update(self,
test_name,
num_workers,
num_ps,
params,
num_controllers=0):
"""Tests variables are updated correctly when the given params are used."""
output_dir_path = os.path.join(test_name, 'variable_update')
logs = _spawn_benchmark_processes(output_dir_path, num_workers, num_ps,
num_controllers, params)
actual_losses = []
for worker_logs in logs:
outputs = test_util.get_training_outputs_from_logs(
worker_logs, params.print_training_accuracy)
actual_losses.append([x.loss for x in outputs])
inputs = test_util.get_fake_var_update_inputs()
expected_losses = test_util.TestModel().manually_compute_losses(
inputs, num_workers, params)
if params.variable_update == 'distributed_all_reduce':
# In distributed all reduce, each step, the controller outputs the average
# of the loss from each worker. So we modify expected losses accordingly.
# E.g, we change [[1, 2], [4, 5]] to [[2.5, 3.5]]
expected_losses = [[sum(losses) / num_workers
for losses in zip(*expected_losses)]]
rtol = 3e-2 if params.use_fp16 else 1e-5
for worker_actual_losses, worker_expected_losses in zip(actual_losses,
expected_losses):
self.assertAllClose(worker_actual_losses[:len(worker_expected_losses)],
worker_expected_losses, rtol=rtol, atol=0.)
def _test_variable_updates(self, test_name, params):
"""Tests variables are updated correctly with various variable updates."""
# Unfortunately, distributed parameter server is non-deterministic with
# multiple workers, because one worker may write to a variable before
# another worker reads it. This probably does not harm training, but it
# does mean we cannot easily test that case. So, we use one worker.
self._test_variable_update(
test_name + '_ps', num_workers=1, num_ps=2, num_controllers=0,
params=params._replace(variable_update='parameter_server'))
self._test_variable_update(
test_name + '_rep', num_workers=2, num_ps=1, num_controllers=0,
params=params._replace(variable_update='distributed_replicated'))
self._test_variable_update(
test_name + '_allreduce', num_workers=2, num_ps=0, num_controllers=1,
params=params._replace(variable_update='distributed_all_reduce',
all_reduce_spec='psgpu#%d' % params.num_gpus))
def testVarUpdateDefault(self):
params = test_util.get_var_update_params()
self._test_variable_updates('testVarUpdateDefault', params)
def testVarUpdateCpuAsLocalParamDevice(self):
params = test_util.get_var_update_params()._replace(
local_parameter_device='cpu')
self._test_variable_updates('testVarUpdateCpuAsLocalParamDevice', params)
def testVarUpdateFp16(self):
params = test_util.get_var_update_params()._replace(use_fp16=True)
self._test_variable_updates('testVarUpdateFp16', params)
if __name__ == '__main__':
tf.test.main()
```
#### File: python/tf_cnn_benchmarks/cbuild_benchmark_storage.py
```python
from datetime import datetime
import json
import os
import sys
import six
from google.cloud import datastore
_TEST_NAME_ENV_VAR = 'TF_DIST_BENCHMARK_NAME'
def upload_to_benchmark_datastore(data, test_name=None, start_time=None):
"""Use a new datastore.Client to upload data to datastore.
Create the datastore Entities from that data and upload them to the
datastore in a batch using the client connection.
Args:
data: Map from benchmark names to values.
test_name: Name of this test. If not specified, name will be set either
from TF_DIST_BENCHMARK_NAME environment variable or to default name
'TestBenchmark'.
start_time: (datetime) Time to record for this test.
Raises:
ValueError: if test_name is not passed in and TF_DIST_BENCHMARK_NAME
is not set.
"""
client = datastore.Client()
if not test_name:
if _TEST_NAME_ENV_VAR in os.environ:
test_name = os.environ[_TEST_NAME_ENV_VAR]
else:
raise ValueError(
'No test name passed in for benchmarks. '
'Either pass a test_name to upload_to_benchmark_datastore or '
'set %s environment variable.' % _TEST_NAME_ENV_VAR)
test_name = six.text_type(test_name)
if not start_time:
start_time = datetime.now()
# Create one Entry Entity for each benchmark entry. The wall-clock timing is
# the attribute to be fetched and displayed. The full entry information is
# also stored as a non-indexed JSON blob.
entries = []
batch = []
for name, value in data.items():
e_key = client.key('Entry')
e_val = datastore.Entity(e_key, exclude_from_indexes=['info'])
entry_map = {'name': name, 'wallTime': value, 'iters': '1'}
entries.append(entry_map)
e_val.update({
'test': test_name,
'start': start_time,
'entry': six.text_type(name),
'timing': value,
'info': six.text_type(json.dumps(entry_map))
})
batch.append(e_val)
# Create the Test Entity containing all the test information as a
# non-indexed JSON blob.
test_result = json.dumps(
{'name': test_name,
'startTime': (start_time - datetime(1970, 1, 1)).total_seconds(),
'entries': {'entry': entries},
'runConfiguration': {'argument': sys.argv[1:]}})
t_key = client.key('Test')
t_val = datastore.Entity(t_key, exclude_from_indexes=['info'])
t_val.update({
'test': test_name,
'start': start_time,
'info': six.text_type(test_result)
})
batch.append(t_val)
# Put the whole batch of Entities in the datastore.
client.put_multi(batch)
```
#### File: tf_cnn_benchmarks/models/sensornet_model.py
```python
from models import model
class SensorNetModel(model.Model):
"""SensorNet fully connected model."""
def __init__(self):
super(SensorNetModel, self).__init__('sensor_net', 28, 512, 0.005)
def add_inference(self, cnn):
# We have one channel image of size 28x28 = 784
cnn.reshape([-1, 28*28])
cnn.affine(1024)
cnn.affine(1024)
cnn.affine(1024)
``` |
{
"source": "JoeHarman/ngs_pipeline",
"score": 2
} |
#### File: ngs_pipeline/ngs_pipeline/pipeline_atac_chipseq.py
```python
import sys
import os
import seaborn as sns
import glob
from cgatcore import pipeline as P
from ruffus import (
mkdir,
follows,
transform,
merge,
originate,
collate,
regex,
add_inputs,
active_if,
)
from cgatcore.iotools import zap_file, touch_file
from utils import is_none, is_on
import re
import glob
##################
# Pipeline setup #
##################
# Read in parameter file
P.get_parameters(glob.glob("config_*.yml")[0])
# Small edits to config to enable cluster usage
P.PARAMS["cluster_queue_manager"] = P.PARAMS.get(
"pipeline_cluster_queue_manager")
P.PARAMS["conda_env"] = os.path.basename(os.environ["CONDA_PREFIX"])
# Make sure that params dict is typed correctly
for key in P.PARAMS:
if is_none(P.PARAMS[key]):
P.PARAMS[key] = None
elif is_on(P.PARAMS):
P.PARAMS[key] = True
# Global variables
CREATE_BIGWIGS = P.PARAMS.get("run_options_bigwigs")
CALL_PEAKS = P.PARAMS.get("run_options_peaks")
CREATE_HUB = P.PARAMS.get("run_options_hub")
USE_HOMER = P.PARAMS.get("homer_use")
USE_DEEPTOOLS = P.PARAMS.get("deeptools_use")
USE_MACS = P.PARAMS.get("macs_use")
# Ensures that all fastq are named correctly
if not os.path.exists("fastq"):
os.mkdir("fastq")
fastqs = dict()
for fq in glob.glob("*.fastq*"):
fq_renamed = (
fq.replace("Input", "input")
.replace("INPUT", "input")
.replace("R1.fastq", "1.fastq")
.replace("R2.fastq", "2.fastq")
)
fastqs[os.path.abspath(fq)] = os.path.join("fastq", fq_renamed)
for src, dest in fastqs.items():
if not os.path.exists(dest):
os.symlink(src, dest)
###################
# Setup functions #
###################
def set_up_chromsizes():
"""
Ensures that genome chromsizes are present.
If chromsizes are not provided this function attempts to download them from UCSC.
The P.PARAMS dictionary is updated with the location of the chromsizes.
"""
assert P.PARAMS.get("genome_name"), "Genome name has not been provided."
if P.PARAMS.get("genome_chrom_sizes"):
pass
elif os.path.exists("chrom_sizes.txt.tmp"):
P.PARAMS["genome_chrom_sizes"] = "chrom_sizes.txt.tmp"
else:
from pybedtools.helpers import get_chromsizes_from_ucsc
get_chromsizes_from_ucsc(
P.PARAMS["genome_name"], "chrom_sizes.txt.tmp")
P.PARAMS["genome_chrom_sizes"] = "chrom_sizes.txt.tmp"
#############
# Read QC #
#############
@follows(mkdir("statistics"), mkdir("statistics/fastqc"))
@transform("*.fastq.gz", regex(r"(.*).fastq.gz"), r"statistics/fastqc/\1_fastqc.zip")
def qc_reads(infile, outfile):
"""Quality control of raw sequencing reads"""
statement = "fastqc -q -t %(pipeline_n_cores)s --nogroup %(infile)s --outdir statistics/fastqc"
P.run(
statement,
job_queue=P.PARAMS["pipeline_cluster_queue"],
job_pipeline_n_cores=P.PARAMS["pipeline_n_cores"],
job_condaenv=P.PARAMS["conda_env"],
)
@merge(qc_reads, "statistics/readqc_report.html")
def multiqc_reads(infile, outfile):
"""Collate fastqc reports into single report using multiqc"""
statement = """export LC_ALL=en_US.UTF-8 &&
export LANG=en_US.UTF-8 &&
multiqc statistics/fastqc/ -o statistics -n readqc_report.html"""
P.run(
statement,
job_queue=P.PARAMS["pipeline_cluster_queue"],
job_memory="2G",
job_condaenv=P.PARAMS["conda_env"],
)
######################
# Fastq processing #
######################
@follows(mkdir('trimmed'))
@transform('fastq/*.fastq*',
# Regex negates any filenames matching the paired pattern
regex(r'(?!.*_[12])^fastq/(.*).fastq.gz'),
r'trimmed/\1_trimmed.fq')
def fastq_trim_single(infile, outfile):
statement = '''trim_galore
--cores
%(pipeline_n_cores)s
--dont_gzip
%(trim_options)s
-o trimmed
%(infile)s'''
P.run(
statement,
job_queue=P.PARAMS["pipeline_cluster_queue"],
job_pipeline_n_cores=P.PARAMS["pipeline_n_cores"],
job_condaenv=P.PARAMS["conda_env"],
)
@follows(mkdir("trimmed"), mkdir("statistics/trimming/data"))
@collate(
"fastq/*.fastq.gz",
regex(r"fastq/(.*)_R?[12].fastq(?:.gz)?"),
r"trimmed/\1_1_val_1.fq",
)
def fastq_trim_paired(infiles, outfile):
"""Trim adaptor sequences from fastq files using trim_galore"""
fq1, fq2 = infiles
fq1_basename, fq2_basename = os.path.basename(fq1), os.path.basename(fq2)
outdir = "trimmed"
trim_options = P.PARAMS.get("trim_options", "")
cores = (
P.PARAMS["pipeline_n_cores"] if int(
P.PARAMS["pipeline_n_cores"]) <= 8 else "8"
)
statement = """trim_galore
--cores %(cores)s
--paired
%(trim_options)s
--dont_gzip
-o %(outdir)s
%(fq1)s
%(fq2)s
"""
P.run(
statement,
job_queue=P.PARAMS["pipeline_cluster_queue"],
job_pipeline_n_cores=P.PARAMS["pipeline_n_cores"],
job_condaenv=P.PARAMS["conda_env"],
)
###############
# Alignment #
###############
@follows(mkdir("bam"), mkdir("statistics/alignment"), fastq_trim_single, fastq_trim_paired)
@transform(fastq_trim_single, regex(r"trimmed/(.*)_trimmed.fq"), r"bam/\1.bam")
def fastq_align_single(infile, outfile):
"""
Aligns fq files.
Uses bowtie2 before conversion to bam file using Samtools view.
Bam file is then sorted and the unsorted bam file is replaced.
"""
basename = os.path.basename(outfile).replace(".bam", "")
sorted_bam = outfile.replace(".bam", "_sorted.bam")
aligner = P.PARAMS.get("aligner_aligner", "bowtie2")
aligner_options = P.PARAMS.get("aligner_options", "")
blacklist = P.PARAMS.get("genome_blacklist", "")
statement = [
"%(aligner_aligner)s -x %(aligner_index)s -U %(infile)s %(aligner_options)s |",
"samtools view - -b > %(outfile)s &&",
"samtools sort -@ %(pipeline_n_cores)s -o %(sorted_bam)s %(outfile)s",
]
if blacklist:
# Uses bedtools intersect to remove blacklisted regions
statement.append(
"&& bedtools intersect -v -b %(blacklist)s -a %(sorted_bam)s > %(outfile)s"
)
statement.append("&& rm -f %(sorted_bam)s")
else:
statement.append("&& mv %(sorted_bam)s %(outfile)s")
P.run(
" ".join(statement),
job_queue=P.PARAMS["pipeline_cluster_queue"],
job_pipeline_n_cores=P.PARAMS["pipeline_n_cores"],
job_condaenv=P.PARAMS["conda_env"],
)
# Zeros the trimmed fastq files
zap_file(infile)
@follows(mkdir("bam"), mkdir("statistics/alignment"), fastq_trim_paired, fastq_trim_single)
@collate("trimmed/*.fq", regex(r"trimmed/(.*)_[12]_val_[12].fq"), r"bam/\1.bam")
def fastq_align_paired(infiles, outfile):
"""
Aligns fq files.
Uses bowtie2 before conversion to bam file using Samtools view.
Bam file is then sorted and the unsorted bam file is replaced.
"""
fq1, fq2 = infiles
basename = os.path.basename(outfile).replace(".bam", "")
sorted_bam = outfile.replace(".bam", "_sorted.bam")
aligner = P.PARAMS.get("aligner_aligner", "bowtie2")
aligner_options = P.PARAMS.get("aligner_options", "")
blacklist = P.PARAMS.get("genome_blacklist", "")
statement = [
"%(aligner_aligner)s -x %(aligner_index)s -1 %(fq1)s -2 %(fq2)s %(aligner_options)s |",
"samtools view - -b > %(outfile)s &&",
"samtools sort -@ %(pipeline_n_cores)s -o %(sorted_bam)s %(outfile)s",
]
if blacklist:
# Uses bedtools intersect to remove blacklisted regions
statement.append(
"&& bedtools intersect -v -b %(blacklist)s -a %(sorted_bam)s > %(outfile)s"
)
statement.append("&& rm -f %(sorted_bam)s")
else:
statement.append("&& mv %(sorted_bam)s %(outfile)s")
P.run(
" ".join(statement),
job_queue=P.PARAMS["pipeline_cluster_queue"],
job_pipeline_n_cores=P.PARAMS["pipeline_n_cores"],
job_condaenv=P.PARAMS["conda_env"],
)
# Zeros the trimmed fastq files
for fn in infiles:
zap_file(fn)
@transform([fastq_align_single, fastq_align_paired], regex(r"bam/(.*)"), r"bam/\1.bai")
def create_bam_index(infile, outfile):
"""Creates an index for the bam file"""
statement = "samtools index %(infile)s"
P.run(
statement,
job_queue=P.PARAMS["pipeline_cluster_queue"],
job_memory=P.PARAMS["pipeline_memory"],
job_condaenv=P.PARAMS["conda_env"],
)
##############
# Mapping QC #
##############
@transform([fastq_align_single, fastq_align_paired], regex(r".*/(.*).bam"), r"statistics/alignment/\1.txt")
def alignment_statistics(infile, outfile):
statement = """samtools stats %(infile)s > %(outfile)s"""
P.run(
statement,
job_queue=P.PARAMS["pipeline_cluster_queue"],
job_memory="2G",
job_condaenv=P.PARAMS["conda_env"],
)
@follows(multiqc_reads, alignment_statistics)
@originate("statistics/mapping_report.html")
def alignments_multiqc(outfile):
"""Combines mapping metrics using multiqc"""
statement = """export LC_ALL=en_US.UTF-8 &&
export LANG=en_US.UTF-8 &&
multiqc statistics/alignment/ -o statistics -n alignmentqc_report.html"""
P.run(
statement,
job_queue=P.PARAMS["pipeline_cluster_queue"],
job_memory="2G",
job_condaenv=P.PARAMS["conda_env"],
)
#####################
# Remove duplicates #
#####################
@follows(create_bam_index, mkdir("bam_processed"))
@transform([fastq_align_single, fastq_align_paired], regex(r"bam/(.*.bam)"), r"bam_processed/\1")
def alignments_filter(infile, outfile):
"""Remove duplicate fragments from bam file."""
alignments_deduplicate = P.PARAMS.get("alignments_deduplicate")
alignments_filter_options = P.PARAMS.get("alignments_filter_options")
if alignments_deduplicate or alignments_filter_options:
statement = [
"alignmentSieve",
"-b",
infile,
"-o",
outfile,
"-p",
"%(pipeline_n_cores)s",
"--ignoreDuplicates" if alignments_deduplicate else "",
alignments_filter_options if alignments_filter_options else " ",
"&& samtools sort -o %(outfile)s.tmp %(outfile)s -@ %(pipeline_n_cores)s",
"&& mv %(outfile)s.tmp %(outfile)s",
"&& samtools index %(outfile)s",
"&& rm -f %(outfile)s.tmp",
]
else:
infile_abspath = os.path.abspath(infile)
statement = [
"ln -s %(infile_abspath)s %(outfile)s",
"&& ln -s %(infile_abspath)s.bai %(outfile)s.bai",
]
P.run(
" ".join(statement),
job_queue=P.PARAMS["pipeline_cluster_queue"],
job_memory=P.PARAMS["pipeline_memory"],
job_condaenv=P.PARAMS["conda_env"],
)
@active_if(USE_HOMER)
@follows(mkdir("tag/"))
@transform(alignments_filter, regex(r".*/(.*).bam"), r"tag/\1")
def create_tag_directory(infile, outfile):
statement = [
"makeTagDirectory",
outfile,
P.PARAMS["homer_tagdir_options"] or " ",
infile,
]
P.run(
" ".join(statement),
job_queue=P.PARAMS["pipeline_cluster_queue"],
job_memory=P.PARAMS["pipeline_memory"],
job_condaenv=P.PARAMS["conda_env"],
)
###########
# BigWigs #
###########
@active_if(CREATE_BIGWIGS and USE_DEEPTOOLS)
@follows(mkdir("bigwigs/deeptools/"))
@transform(
alignments_filter, regex(
r"bam_processed/(.*).bam"), r"bigwigs/deeptools/\1_deeptools.bigWig")
def alignments_pileup_deeptools(infile, outfile):
statement = [
"bamCoverage",
"-b",
infile,
"-o",
outfile,
"-p",
"%(pipeline_n_cores)s",
P.PARAMS.get("deeptools_bamcoverage_options") or " ",
]
P.run(
" ".join(statement),
job_queue=P.PARAMS["pipeline_cluster_queue"],
job_memory=P.PARAMS["pipeline_memory"],
job_pipeline_n_cores=P.PARAMS["pipeline_n_cores"],
job_condaenv=P.PARAMS["conda_env"],
)
@follows(mkdir("bigwigs/homer/"))
@active_if(CREATE_BIGWIGS and USE_HOMER)
@transform(
create_tag_directory, regex(r".*/(.*)"), r"bigwigs/homer/\1_homer.bigWig", extras=[r"\1"]
)
def alignments_pileup_homer(infile, outfile, tagdir_name):
outdir = os.path.dirname(outfile)
statement_bw = [
"makeBigWig.pl",
infile,
P.PARAMS["genome_name"],
"-chromSizes",
P.PARAMS["genome_chrom_sizes"],
"-url",
P.PARAMS.get("homer_makebigwig_options") or "INSERT_URL_HERE",
"-webdir",
outdir,
]
statement_mv = ["&&", 'mv', outfile.replace('_homer.bigWig', '.ucsc.bigWig'), outfile]
P.run(
" ".join([*statement_bw, *statement_mv]),
job_queue=P.PARAMS["pipeline_cluster_queue"],
job_pipeline_n_cores=1,
job_condaenv=P.PARAMS["conda_env"],
)
try:
# Rename bigwigs to remove ucsc
bigwig_src = os.path.join(outdir, f"{tagdir_name}.ucsc.bigWig")
bigwig_dest = os.path.join(outdir, f"{tagdir_name}.bigWig")
os.rename(bigwig_src, bigwig_dest)
except OSError:
pass
##############
# Call peaks #
##############
@active_if(CALL_PEAKS and USE_MACS)
@follows(mkdir("peaks/macs"))
@transform(
alignments_filter,
regex(r".*/(.*?)(?<!input).bam"),
r"peaks/macs/\1_peaks.narrowPeak",
)
def call_peaks_macs(infile, outfile):
output_prefix = outfile.replace("_peaks.narrowPeak", "")
statement = [
"%(macs_caller)s",
"callpeak",
"-t",
"%(infile)s",
"-n",
"%(output_prefix)s",
P.PARAMS.get("macs_callpeak_options") or " ",
]
chipseq_match = re.match(r".*/(.*)_(.*).bam", infile)
if chipseq_match:
samplename = chipseq_match.group(1)
antibody = chipseq_match.group(2)
control_file = f"bam_processed/{samplename}_input.bam"
if os.path.exists(control_file):
statement.append(f"-c {control_file}")
P.run(
" ".join(statement),
job_queue=P.PARAMS["pipeline_cluster_queue"],
job_memory=P.PARAMS["pipeline_memory"],
job_condaenv=P.PARAMS["conda_env"],
)
@active_if(CALL_PEAKS and USE_HOMER)
@follows(mkdir("peaks/homer"))
@transform(
create_tag_directory,
regex(r".*/(?!.*_input)(.*)"),
r"peaks/homer/\1_homer_peaks.bed",
)
def call_peaks_homer(infile, outfile):
tmp = outfile.replace(".bed", ".txt")
statement = [
"findPeaks",
infile,
P.PARAMS["homer_findpeaks_options"] or " ",
"-o",
tmp,
]
# Finds the matching input file if one extists
chipseq_match = re.match(r".*/(.*)_(.*)", infile)
if chipseq_match:
samplename = chipseq_match.group(1)
antibody = chipseq_match.group(2)
control = f"tag/{samplename}_input"
if os.path.exists(control):
statement.append(f"-i {control}")
# Need to convert homer peak format to bed
statement.append(f"&& pos2bed.pl {tmp} -o {outfile}")
P.run(
" ".join(statement),
job_queue=P.PARAMS["pipeline_cluster_queue"],
job_memory=P.PARAMS["pipeline_memory"],
job_condaenv=P.PARAMS["conda_env"],
)
#######################
# UCSC hub generation #
#######################
@transform(call_peaks_macs, regex(r"peaks/(.*).narrowPeak"), r"peaks/\1.bed")
def convert_narrowpeak_to_bed(infile, outfile):
statement = """awk '{OFS="\\t"; print $1,$2,$3,$4}' %(infile)s > %(outfile)s"""
P.run(
statement,
job_queue=P.PARAMS["pipeline_cluster_queue"],
job_condaenv=P.PARAMS["conda_env"],
)
@transform(
[convert_narrowpeak_to_bed, call_peaks_homer],
regex(r"(.*)/(.*).bed"),
r"\1/\2.bigBed",
)
def convert_bed_to_bigbed(infile, outfile):
statement = """cat %(infile)s
| sort -k1,1 -k2,2n > %(infile)s.tmp
&& bedToBigBed %(infile)s.tmp %(genome_chrom_sizes)s %(outfile)s"""
P.run(
statement,
job_queue=P.PARAMS["pipeline_cluster_queue"],
job_condaenv=P.PARAMS["conda_env"],
)
@active_if(CREATE_HUB)
@follows(
fastq_align_single,
fastq_align_paired,
alignments_pileup_deeptools,
alignments_pileup_homer,
alignments_multiqc,
)
@merge(
[alignments_pileup_deeptools, alignments_pileup_homer, convert_bed_to_bigbed],
regex(r".*"),
os.path.join(
P.PARAMS.get("hub_dir", ""), P.PARAMS.get("hub_name", "") + ".hub.txt"
),
)
def make_ucsc_hub(infile, outfile, *args):
import trackhub
import shutil
import seaborn as sns
hub, genomes_file, genome, trackdb = trackhub.default_hub(
hub_name=P.PARAMS["hub_name"],
short_label=P.PARAMS.get("hub_short"),
long_label=P.PARAMS.get("hub_long"),
email=P.PARAMS["hub_email"],
genome=P.PARAMS["genome_name"],
)
bigwigs = [fn for fn in infile if ".bigWig" in fn]
colours = dict(zip(bigwigs, sns.color_palette("hls", len(set(bigwigs)))))
bigbeds = [fn for fn in infile if ".bigBed" in fn]
for bw in bigwigs:
track = trackhub.Track(
name=os.path.basename(bw).replace(".bigWig", ""),
source=bw, # filename to build this track from
visibility="full", # shows the full signal
color=",".join(
[str(int(x * 255)) for x in colours[bw]]), # brick red
autoScale="on", # allow the track to autoscale
tracktype="bigWig", # required when making a track
)
trackdb.add_tracks(track)
for bb in bigbeds:
track = trackhub.Track(
name=os.path.basename(bb).replace(".bigBed", ""),
source=bb, # filename to build this track from
color="0,0,0", # brick red
tracktype="bigBed", # required when making a track
)
trackdb.add_tracks(track)
# Stage the hub
trackhub.upload.stage_hub(hub=hub, staging="hub_tmp_dir")
# Copy to the new location
shutil.copytree(
"hub_tmp_dir",
P.PARAMS["hub_dir"],
dirs_exist_ok=True,
symlinks=P.PARAMS.get("hub_symlink", False),
)
# Delete the staged hub
shutil.rmtree("hub_tmp_dir")
@follows(
alignments_pileup_homer,
alignments_pileup_homer,
call_peaks_homer,
call_peaks_macs,
)
@originate("pipeline_complete.txt")
def full(outfile):
touch_file(outfile)
if __name__ == "__main__":
if (
"-h" in sys.argv or "--help" in sys.argv
): # If --help then just run the pipeline without setup
sys.exit(P.main(sys.argv))
elif not "make" in sys.argv:
sys.exit(P.main(sys.argv))
elif "make" in sys.argv:
set_up_chromsizes()
sys.exit(P.main(sys.argv))
``` |
{
"source": "joeharpur/compare_ocr",
"score": 3
} |
#### File: joeharpur/compare_ocr/helpers.py
```python
import pandas as pd
import itertools
from fuzzywuzzy import fuzz
import matplotlib.pyplot as plt
import matplotlib.patches as patches
def iou(box1, box2):
"""
Implement the intersection over union (IoU) between box1 and box2
Arguments:
box1 (List) -- first box with coordinates (box1_x1, box1_y1, box1_x2, box_1_y2)
box2 (List) -- second box with coordinates (box2_x1, box2_y1, box2_x2, box2_y2)
Returns:
iou (Float) -- intersection over union value for box1, box2
"""
# Assign variable names to coordinates for clarity
(box1_x1, box1_y1, box1_x2, box1_y2) = box1
(box2_x1, box2_y1, box2_x2, box2_y2) = box2
# Calculate the coordinates and area of the intersection of box1 and box2.
xi1 = max(box1_x1, box2_x1)
yi1 = max(box1_y1, box2_y1)
xi2 = min(box1_x2, box2_x2)
yi2 = min(box1_y2, box2_y2)
inter_width = max(yi2 - yi1, 0)
inter_height = max(xi2 - xi1, 0)
inter_area = inter_width * inter_height
# Calculate the Union area by using Formula: Union(A,B)=A+B-Inter(A,B)
box1_area = (box1_y2 - box1_y1) * (box1_x2 - box1_x1)
box2_area = (box2_y2 - box2_y1) * (box2_x2 - box2_x1)
union_area = (box1_area + box2_area) - inter_area
# compute the IoU
iou = inter_area / union_area
return iou
def find_boundaries(table, word, fuzz_threshold):
"""
Find the boundary boxes for a specified word.
String match strictness can be tuned using fuzz_threshold.
Arguments:
table (pd.DataFrame) -- table containing text field and bounds field
word (Str) -- search word
fuzz_threshold (Int) -- accepted closeness between string values
Returns:
(List) -- list of matching boundaries
"""
filter = lambda x: fuzz.ratio(x.lower(), word) >= fuzz_threshold
boundaries = table[table['text'].apply(filter)]['bounds'].values
return list(boundaries)
def extract_page(table, page):
"""
Filter table by page number field.
Arguments:
table (pd.DataFrame) -- table containing "page" field
page (Int) -- page value
Returns:
extracted (pd.DataFrame) -- filtered table
"""
extracted = table[table['page'] == page]
return extracted
def plot_page(im_data, scale):
"""
Plot image data to Matplotlib axes.
Arguments:
im_data (np.array) -- array containing image data
scale (Int) -- scale to convert image dpi to inches
Returns:
ax (matplotlib.axes) -- axes with image data plotted
"""
height, width = im_data.shape[:2]
figsize = height/scale, width/scale
fig = plt.figure(figsize=figsize)
ax = fig.add_axes([0, 0, 1, 1])
ax.axis('off')
ax.imshow(im_data, cmap='gray')
return ax
def plot_boundary_boxes(ax, red_boxes, green_boxes):
"""
Plot boundary boxes to Matplotlib axes.
Arguments:
ax (matplotlib.axes) -- pre generated axes
red_boxes (List) -- list of boundary coordinates
green_boxes (List) -- list of boundary coordinates
Returns:
ax (matplotlib.axes) -- axes with boundary boxes plotted
"""
for r, g in itertools.zip_longest(red_boxes, green_boxes):
if r:
r_x, r_y, r_w, r_h = r[0], r[1], r[2]-r[0], r[3]-r[1]
r_rect = patches.Rectangle((r_x, r_y), r_w, r_h,
linewidth=2,
edgecolor='r',
facecolor='none')
ax.add_patch(r_rect)
if g:
g_x, g_y, g_w, g_h = g[0], g[1], g[2]-g[0], g[3]-g[1]
g_rect = patches.Rectangle((g_x, g_y), g_w, g_h,
linewidth=2,
edgecolor='g',
facecolor='none')
ax.add_patch(g_rect)
return ax
def build_legend(ax, names, red_count, green_count):
"""
Plot boundary boxes to Matplotlib axes.
Arguments:
ax (matplotlib.axes) -- pre generated axes
names (List) -- list of ocr engine names
red_count (Int) -- count of red boundary boxes
green_count (Int) -- count of green boundary boxes
Returns:
ax (matplotlib.axes) -- axes with legend plotted
"""
handles = []
red_patch = patches.Patch(linewidth=2,
edgecolor='r',
facecolor='none',
label=names[0].capitalize())
handles.append(red_patch)
info_1 = patches.Patch(edgecolor='none',
facecolor='none',
label=str(red_count) + ' matches')
handles.append(info_1)
if green_count > 0:
green_patch = patches.Patch(linewidth=2,
edgecolor='g',
facecolor='none',
label=names[1].capitalize())
handles.append(green_patch)
info_2 = patches.Patch(edgecolor='none',
facecolor='none',
label=str(green_count) + ' matches')
handles.append(info_2)
ax.legend(handles=handles,
loc='best',
framealpha=0.5)
return ax
if __name__ == '__main__':
pass
``` |
{
"source": "JoeHartley3/raspberry-pi-cdplayer",
"score": 3
} |
#### File: server/classes/MediaPlayer.py
```python
import queue
import subprocess
from time import sleep
from enum import Enum
from classes.MediaLibrary import MediaLibrary
from classes.MediaPlayerInfo import MediaPlayerInfo, CurrentTrackInfo, TrackInfo
import json
import musicbrainzngs as m
import libdiscid
class MediaPlayer:
"""
Contains logic for controlling mpv and getting information about CD.
"""
class DiskType(Enum):
AUDIO_CD = 'audio_cd'
MP3_CD = 'mp3_cd'
class BranchType(Enum):
FOLDERS = 'folders'
ARTISTS = 'artists'
ALBUMS = 'albums'
def __init__(self, config):
self._config = config
self.MPV_COMMAND = ["mpv", "--quiet", "--vo=null",
"--no-audio-display",
"--cache=1024", "--loop",
"--input-ipc-server=" + self._config['MPV_SOCKET_PATH']]
self._cd = CD()
self._mpv = None
self._current_disk_type = None
self._media_library = None
self._current_track_list = None
self._current_media_library_branch_type_index = None
self._info_events = None
self._current_track = 0
self._volume = 95
def get_current_info(self, status=True, cur_track_info=True, volume=True, track_list=False, library=False):
info = MediaPlayerInfo()
if self.is_running:
if status:
status_res = self._run_command('get_property', 'pause')
info.status = 'paused' if status_res else 'playing'
if cur_track_info:
info.cur_track_info = CurrentTrackInfo()
if self._current_disk_type == MediaPlayer.DiskType.AUDIO_CD:
chapter_res = self._run_command('get_property', 'chapter')
self._current_track = chapter_res
info.cur_track_info.track_number = chapter_res
elif self._current_disk_type == MediaPlayer.DiskType.MP3_CD:
playlist_pos_res = self._run_command('get_property', 'playlist-pos')
self._current_track = playlist_pos_res
info.cur_track_info.track_number = playlist_pos_res
if self._current_track is not None:
time_res = self._run_command('get_property', 'time-pos')
if time_res is not None:
time_millis = time_res * 1000
if self._current_disk_type == MediaPlayer.DiskType.AUDIO_CD:
for track in self._current_track_list[0:self._current_track]:
time_millis -= track.total_time
info.cur_track_info.cur_time = time_millis
if volume:
vol = self._run_command('get_property', 'volume')
if vol is not None:
self._volume = vol
info.volume = vol
if track_list and self._current_track_list is not None:
info.track_list = list(map(lambda x: x.as_dict(), self._current_track_list))
if library and self._media_library is not None:
info.library = self._media_library
else:
info.volume = self._volume
info.status = 'waitingForCD'
return info
def poll_info(self):
try:
info_event = self._info_events.get_nowait()
return info_event
except queue.Empty:
return None
def try_play_cd(self):
"""
Tries to play CD in CD drive, if there is any (or USB drive).
Sets the current media library branch type and index attribute and puts info into the info queue.
:return: None
"""
self._info_events = queue.Queue()
if not self.is_running:
cd_type = self._check_for_cd()
if cd_type is None:
return
if cd_type == MediaPlayer.DiskType.AUDIO_CD:
# check for audio CD
print('playing audio CD')
self._mpv = subprocess.Popen(self.MPV_COMMAND + [
'cdda://', '--volume=' + self._config['DEFAULT_VOLUME']
], bufsize=1)
elif cd_type == MediaPlayer.DiskType.MP3_CD:
# check for MP3 CD
print('playing MP3 CD')
self._mpv = subprocess.Popen(self.MPV_COMMAND + ['--volume=' + self._config['DEFAULT_VOLUME']] +
list(map(lambda file: file.full_path,
self._media_library.media_folders[0].media_files)),
bufsize=1)
self._current_media_library_branch_type_index = (MediaPlayer.BranchType.FOLDERS, 0)
info = self.get_current_info(True, True, True, True, True)
# info = self.get_current_info(True, False, True, True, True)
# fill cur_track_info with zeros, because it may not be initialized yet (mpv loading)
info.cur_track_info = CurrentTrackInfo()
info.cur_track_info.cur_time = 0
info.cur_track_info.track_number = 0
self._info_events.put(info)
def _check_for_cd(self):
self._current_disk_type = None
self._current_track_list = []
self._cd.load_cd_info()
df = []
if CD.is_cd_inserted():
if self._cd.numtracks > 1:
# CD that isn't audio CD has 1 track
self._current_disk_type = MediaPlayer.DiskType.AUDIO_CD
try:
artist = self._cd._cd_info['disc']['release-list'][0]['artist-credit-phrase']
album = self._cd._cd_info['disc']['release-list'][0]['title']
self._current_track_list = list(map(
lambda x, y: TrackInfo(y, artist, album, x['recording']['title']),
self._cd._cd_info['disc']['release-list'][0]['medium-list'][0]['track-list'],
self._cd.track_lengths))
except:
self._current_track_list = list(map(lambda x: TrackInfo(x), self._cd.track_lengths))
else:
df = subprocess.getoutput('df | grep ' + self._config['CD_DEVICE']).split()
else:
df = subprocess.getoutput('df | grep ' + self._config['USB_DEVICE']).split()
if len(df) > 0:
mount_point = ' '.join(df[5:])
self._media_library = MediaLibrary()
self._media_library.init(mount_point)
if self._media_library.media_file_count > 0:
self._current_disk_type = MediaPlayer.DiskType.MP3_CD
self._current_track_list = list(map(
lambda media_info: TrackInfo(media_info.total_time, media_info.artist, media_info.album,
media_info.title),
self._media_library.media_folders[0].media_files))
# print(self._media_library.as_dict())
return self._current_disk_type
def _run_command(self, *command):
command_dict = {
"command": command
}
command_json = json.dumps(command_dict) + '\n'
socat = subprocess.Popen(['socat', '-', self._config['MPV_SOCKET_PATH']], stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
socat_output = socat.communicate(command_json.encode('utf-8'))
if socat_output[0] is not None and \
len(socat_output[0]) != 0 and \
socat_output[1] is None:
try:
data = json.loads(socat_output[0].decode())
return data['data']
except:
return None
def _put_info_with_delay(self, full=False):
if full:
sleep(0.2)
self._info_events.put(self.get_current_info(True, True, True, True, True))
sleep(1)
self._info_events.put(self.get_current_info(True, True, True, True, True))
else:
sleep(0.2)
self._info_events.put(self.get_current_info())
sleep(1)
self._info_events.put(self.get_current_info())
def next_track(self):
last_track = len(self._current_track_list) - 1
if self._current_track != last_track:
if self._current_disk_type == MediaPlayer.DiskType.AUDIO_CD:
self._run_command('add', 'chapter', '1')
elif self._current_disk_type == MediaPlayer.DiskType.MP3_CD:
self._run_command('add', 'playlist-pos', '1')
else:
if self._current_disk_type == MediaPlayer.DiskType.AUDIO_CD:
self._run_command('set', 'chapter', '0')
elif self._current_disk_type == MediaPlayer.DiskType.MP3_CD:
self._run_command('set', 'playlist-pos', '0')
self._put_info_with_delay()
def prev_track(self):
if self._current_track != 0:
if self._current_disk_type == MediaPlayer.DiskType.AUDIO_CD:
self._run_command('add', 'chapter', '-1')
elif self._current_disk_type == MediaPlayer.DiskType.MP3_CD:
self._run_command('add', 'playlist-pos', '-1')
else:
last_track = len(self._current_track_list) - 1
if self._current_disk_type == MediaPlayer.DiskType.AUDIO_CD:
self._run_command('set', 'chapter', str(last_track))
elif self._current_disk_type == MediaPlayer.DiskType.MP3_CD:
self._run_command('set', 'playlist-pos', str(last_track))
self._put_info_with_delay()
def next_branch(self):
if self._current_disk_type == MediaPlayer.DiskType.AUDIO_CD:
self.next_track()
elif self._current_disk_type == MediaPlayer.DiskType.MP3_CD:
type_index = self._current_media_library_branch_type_index
folder_index = None
artist_index = None
album_index = None
if type_index[0] == MediaPlayer.BranchType.FOLDERS:
folder_index = (type_index[1] + 1) % len(self._media_library.media_folders)
elif type_index[0] == MediaPlayer.BranchType.ALBUMS:
artist_index = (type_index[1] + 1) % len(self._media_library.artists)
album_index = type_index[2] + 1
if album_index >= len(self._media_library.artists[artist_index].albums):
album_index = 0
elif type_index[0] == MediaPlayer.BranchType.ARTISTS:
artist_index = (type_index[1] + 1) % len(self._media_library.artists)
self.play_file(type_index[0], (folder_index, artist_index, album_index, 0))
def prev_branch(self):
if self._current_disk_type == MediaPlayer.DiskType.AUDIO_CD:
self.prev_track()
elif self._current_disk_type == MediaPlayer.DiskType.MP3_CD:
type_index = self._current_media_library_branch_type_index
folder_index = None
artist_index = None
album_index = None
if type_index[0] == MediaPlayer.BranchType.FOLDERS:
folder_index = type_index[1] - 1
folder_index = folder_index if folder_index != -1 else len(self._media_library.media_folders) - 1
elif type_index[0] == MediaPlayer.BranchType.ALBUMS:
album_index = type_index[2] - 1
if album_index == -1:
artist_index = type_index[1] - 1
artist_index = artist_index if artist_index != -1 else len(self._media_library.artist) - 1
album_index = len(self._media_library.artists[artist_index].albums) - 1
elif type_index[0] == MediaPlayer.BranchType.ARTISTS:
artist_index = type_index[1] - 1
artist_index = artist_index if artist_index != -1 else len(self._media_library.artists) - 1
self.play_file(type_index[0], (folder_index, artist_index, album_index, 0))
def play_track(self, track_number):
if self._current_disk_type == MediaPlayer.DiskType.AUDIO_CD:
self._run_command('set', 'chapter', str(track_number))
elif self._current_disk_type == MediaPlayer.DiskType.MP3_CD:
self._run_command('set', 'playlist-pos', str(track_number))
self._put_info_with_delay()
def play_file(self, media_library_type, indexes):
# indexes = (folder_index, artist_index, album_index, file_index)
if self._current_disk_type == MediaPlayer.DiskType.MP3_CD and \
media_library_type is not None and \
indexes is not None:
files = None
if media_library_type == MediaPlayer.BranchType.FOLDERS:
self._current_media_library_branch_type_index = (MediaPlayer.BranchType.FOLDERS,
indexes[0])
files = self._media_library.media_folders[indexes[0]].media_files
elif media_library_type == MediaPlayer.BranchType.ALBUMS:
self._current_media_library_branch_type_index = (MediaPlayer.BranchType.ALBUMS,
indexes[1],
indexes[2])
files = self._media_library.artists[indexes[1]].albums[indexes[2]].songs
elif media_library_type == MediaPlayer.BranchType.ARTISTS:
self._current_media_library_branch_type_index = (MediaPlayer.BranchType.ARTISTS,
indexes[1])
files = self._media_library.artists[indexes[1]].songs
file_index = indexes[3]
if files is not None:
ordered_files = files[file_index:] + files[0:file_index]
self._current_track_list = list(map(
lambda media_info: TrackInfo(media_info.total_time, media_info.artist, media_info.album,
media_info.title),
ordered_files))
self._run_command('playlist-clear')
self._run_command('loadfile', files[file_index].full_path)
for file in ordered_files[1:]:
self._run_command('loadfile', file.full_path, 'append')
self._put_info_with_delay(True)
def volume_up(self):
self._volume = (self._volume + 5) % 101
self._run_command('set', 'volume', str(self._volume))
self._info_events.put(self.get_current_info(False, False, True, False, False))
def volume_down(self):
volume = self._volume - 5
volume = volume if volume >= 0 else 0
self._volume = volume
self._run_command('set', 'volume', str(self._volume))
self._info_events.put(self.get_current_info(False, False, True, False, False))
def play_pause(self):
pause = self._run_command('get_property', 'pause')
if pause:
self._run_command('set', 'pause', 'no')
else:
self._run_command('set', 'pause', 'yes')
self._info_events.put(self.get_current_info())
def stop(self):
try:
self._mpv.kill()
except:
print("Nothing is playing.")
subprocess.call(['umount', '/dev/' + self._config['USB_DEVICE']])
self._current_disk_type = None
self._current_track = 0
self._current_track_list = None
self._current_media_library_branch_type_index = None
self._media_library = None
self.eject()
def eject(self):
subprocess.Popen(['eject', self._config['CD_DEVICE']])
def seek(self, seek_percent):
time_millis = self._current_track_list[self._current_track].total_time * seek_percent / 100
if self._current_disk_type == MediaPlayer.DiskType.AUDIO_CD:
for track in self._current_track_list[:self._current_track]:
time_millis += track.total_time
self._run_command('set', 'time-pos', str(time_millis / 1000))
# (time_millis / 1000) * (seek_percent / 100)
self._put_info_with_delay()
@property
def is_running(self):
return self._mpv is not None and self._mpv.poll() is None
@property
def current_track_list(self):
return self._current_track_list
class CD:
"""
Represents CD drive and disc inside.
"""
def __init__(self):
self._numtracks = 0
self._track_lengths = []
self._cd_info = None
def load_cd_info(self):
# JH - added code to query musicbrainz for disk info, build track list and times from that info
# instead of the cd-discid output, if available.
track_offsets = []
m.set_useragent('raspberry-pi-cdplayer', '0.2', 'https://github.com/JoeHartley3/raspberry-pi-cdplayer')
try:
this_disc = libdiscid.read('/dev/cdrom')
except:
print('DiskID could not read /dev/cdrom')
self._numtracks = 0
self._track_lengths = []
self._cd_info = None
return
try:
# A CD stub is an anonymously submitted track list that contains a disc ID, barcode, comment field, and
# basic metadata like a release title and track names. ( https://wiki.musicbrainz.org/CD_Stub )
# By using cdstubs=False here, we force a ResponseError rather than try and parse the stub. Remove the
# argument to enable cdstubs.
self._cd_info = m.get_releases_by_discid(this_disc.id, includes=["recordings", "artists"], cdstubs=False)
except m.ResponseError:
print("Disk not found or database unavailable")
discid = subprocess.getstatusoutput('cd-discid --musicbrainz')
if discid[0] == 0:
output_split = discid[1].split()
self._numtracks = int(output_split[0])
track_offsets = list(map(lambda i: int(i), output_split[1:]))
if self._cd_info is not None:
if self._cd_info.get("disc"):
self._numtracks = self._cd_info['disc']['offset-count']
track_offsets = self._cd_info['disc']['offset-list']
# Append the total time to the track_offsets
track_offsets.append(int(self._cd_info['disc']['sectors']))
elif self._cd_info.get("cdstub"):
pass
else:
# We should never actually get to this point with or without cdstubs, but let's make sure.
# This is the same code as for a ResponseError above.
print("Unknown disk type from MB - use track numbers")
discid = subprocess.getstatusoutput('cd-discid --musicbrainz')
if discid[0] == 0:
output_split = discid[1].split()
self._numtracks = int(output_split[0])
track_offsets = list(map(lambda i: int(i), output_split[1:]))
try:
self._track_lengths = list(
map(lambda i, offsets=track_offsets: int((offsets[i + 1] - offsets[i]) * 1000 / 75),
range(0, self._numtracks)))
except:
self._numtracks = 0
self._track_lengths = []
@staticmethod
def is_cd_inserted():
try:
subprocess.check_output(['cd-discid', '--musicbrainz'])
except subprocess.CalledProcessError:
# return value is not 0
return False
return True
@property
def numtracks(self):
return self._numtracks
@property
def track_lengths(self):
return self._track_lengths
``` |
{
"source": "Joe-Heffer-Shef/owast",
"score": 3
} |
#### File: blueprints/blob/views.py
```python
import io
import flask
import owast.blob
app = flask.current_app
blueprint = flask.Blueprint('blob', __name__, url_prefix='/blob',
template_folder='templates')
service_client = owast.blob.get_service_client()
def iter_blobs():
for container in service_client.list_containers():
container_client = service_client.get_container_client(container)
yield from container_client.list_blobs()
@blueprint.route('/')
def list_():
"""
Show all blobs
"""
return flask.render_template('blob/list.html', blobs=iter_blobs())
@blueprint.route('/<string:container>/<string:blob>')
def detail(container: str, blob: str):
"""
Inspect a single blob
"""
blob_client = service_client.get_blob_client(
container=container, blob=blob)
blob = blob_client.get_blob_properties()
return flask.render_template('blob/detail.html', blob=blob)
@blueprint.route('/<string:container>/<string:blob>/download')
def download(container: str, blob: str):
"""
Retrieve data for this file
"""
# Download blob
blob_client = service_client.get_blob_client(
container=container, blob=blob)
blob = blob_client.get_blob_properties()
downloader = blob_client.download_blob() # type: azure.storage.blob.StorageStreamDownloader
# Send download file
data = io.BytesIO(downloader.readall())
data.seek(0)
return flask.send_file(data, download_name=blob['name'],
mimetype=blob['content_settings']['content_type'])
@blueprint.route('/<string:container>/<string:blob>/delete')
def delete(container: str, blob: str):
blob_client = service_client.get_blob_client(
container=container, blob=blob)
blob_client.delete_blob()
flask.flash(f'Deleted blob "{container}/{blob}"')
return flask.redirect(
flask.url_for('container.detail', container=container))
```
#### File: blueprints/container/views.py
```python
import flask
import owast.blob
app = flask.current_app
blueprint = flask.Blueprint('container', __name__, url_prefix='/container',
template_folder='templates')
service_client = owast.blob.get_service_client()
@blueprint.route('/')
def list_():
"""
Show all the containers
"""
containers = service_client.list_containers()
return flask.render_template('container/list.html', containers=containers)
@blueprint.route('/<string:container>')
def detail(container: str):
"""
Inspect a container on Azure Blob Storage
"""
# Get container info
container_client = service_client.get_container_client(
container=container)
container = container_client.get_container_properties()
# Get the blobs inside this container
blobs = container_client.list_blobs()
return flask.render_template('container/detail.html',
container=container, blobs=blobs)
```
#### File: blueprints/schema/views.py
```python
import json
import os
import bson.json_util
import flask
from bson.objectid import ObjectId
from flask_pymongo.wrappers import Collection
from pymongo.results import UpdateResult, InsertOneResult, DeleteResult
from .forms import SchemaForm
app = flask.current_app
blueprint = flask.Blueprint('schema', __name__, url_prefix='/schema',
template_folder='templates')
# These are properties that contain JSON objects or arrays
JSON_FIELDS = {
'properties',
'required',
}
# Don't allow users to overwrite these collections
SYSTEM_COLLECTIONS = {
'schemas',
'relations',
}
@blueprint.route('/')
def list_():
schemas = app.mongo.db.schemas.find()
return flask.render_template('schema/list.html', schemas=schemas)
@blueprint.route('/create', methods={'GET', 'POST'})
def create():
"""
Add a new research object
"""
form = SchemaForm()
# Process form submission
if form.validate_on_submit():
# Prevent collection name conflict
if flask.request.form['collection'] in SYSTEM_COLLECTIONS:
raise ValueError('Forbidden collection name')
# Build research object from user input
schema = dict(
title=flask.request.form['title'],
description=flask.request.form['description'],
icon=flask.request.form['icon'],
collection=flask.request.form['collection'].casefold(),
type='object',
properties=json.loads(flask.request.form['properties']),
required=json.loads(flask.request.form['required']),
)
# Create document
result = app.mongo.db.schemas.insert_one(
schema) # type: InsertOneResult
app.logger.info(result.acknowledged)
flask.flash(f'Created "{result.inserted_id}"')
# Redirect to the new object
return flask.redirect(flask.url_for('schema.detail',
schema_id=result.inserted_id))
# Show form
return flask.render_template('schema/create.html', form=form)
@blueprint.route('/<ObjectId:schema_id>')
def detail(schema_id: ObjectId):
schema = app.mongo.db.schemas.find_one_or_404(schema_id)
return flask.render_template('schema/detail.html', schema=schema)
def build_json_schema_document(schema: dict) -> dict:
uri = flask.url_for('schema.document', schema_id=str(schema['_id']),
_external=True)
return dict(
**{'$schema': os.environ['JSON_SCHEMA_SPEC'],
'$id': uri},
**{key: value for key, value in schema.items() if
not key.startswith('_')}
)
@blueprint.route('/<ObjectId:schema_id>/schema.json')
def document(schema_id: ObjectId):
"""
Show the JSON schema document
https://json-schema.org/draft/2020-12/json-schema-core.html
"""
schema = app.mongo.db.schemas.find_one_or_404(schema_id)
schema_document = build_json_schema_document(schema)
return app.response_class(
bson.json_util.dumps(schema_document, **flask.request.args),
mimetype='application/schema+json')
@blueprint.route('/<ObjectId:schema_id>/delete')
def delete(schema_id: ObjectId):
schemas = app.mongo.db.schemas # type: Collection
result = schemas.delete_one(
dict(_id=schema_id)) # type: DeleteResult
app.logger.info(result.raw_result)
flask.flash(f'Deleted {schema_id}')
return flask.redirect(flask.url_for('schema.list_'))
@blueprint.route('/<ObjectId:schema_id>/edit', methods={'GET', 'POST'})
def edit(schema_id: ObjectId):
"""
Modify a research object
"""
collection = app.mongo.db.schemas # type: Collection
schema = collection.find_one_or_404(schema_id)
form = SchemaForm()
# Process form submission
if form.validate_on_submit():
# Parse JSON fields
_schema = {key: json.loads(value) if key in JSON_FIELDS else value
for key, value in form.data.items()}
# Write changes to database
result = collection.update_one(
dict(_id=schema_id), {'$set': _schema}) # type: UpdateResult
app.logger.info(result.raw_result)
flask.flash(f"Saved changes to '{schema['title']}'")
return flask.redirect(flask.url_for('schema.detail',
schema_id=schema_id))
# Convert fields to JSON
form.process(
**{key: json.dumps(schema[key]) if key in JSON_FIELDS else value for
key, value in schema.items()})
return flask.render_template('schema/edit.html', schema=schema, form=form)
```
#### File: blueprints/tool/views.py
```python
import json
import flask
import pymongo.collection
import pymongo.results
from bson.objectid import ObjectId
app = flask.current_app
blueprint = flask.Blueprint('tool', __name__, url_prefix='/tool',
template_folder='templates')
@blueprint.route('/create', methods={'GET', 'POST'})
def create():
if flask.request.method == 'POST':
tool = {key: value for key, value in flask.request.form.items() if
not key.startswith('_')}
# Insert custom JSON fields
# TODO handle malformed JSON input
custom = json.loads(tool.pop('custom'))
tool.update(custom)
tools = app.mongo.db.tools # type: pymongo.collection.Collection
result = tools.insert_one(
tool) # type: pymongo.results.InsertOneResult
app.logger.info(result)
flask.flash(f"Created {result.inserted_id}")
option_names = {'Supplier'}
options = app.mongo.db.options.find(dict(name={'$in': list(option_names)}))
return flask.render_template('tool/create.html', options=options)
@blueprint.route('/<ObjectId:tool_id>')
def detail(tool_id: ObjectId):
tool = app.mongo.db.tools.find_one_or_404(tool_id)
return flask.render_template('tool/detail.html', tool=tool)
@blueprint.route('/')
def list_():
tools = app.mongo.db.tools.find()
return flask.render_template('tool/list.html', tools=tools)
@blueprint.route('/<string:tool_id>/delete/')
def delete():
return flask.redirect(flask.url_for('tool.list_'))
```
#### File: owast/fields/datetime_local.py
```python
import datetime
def datetime_local_default(t: datetime.datetime = None) -> datetime.datetime:
"""
Current timestamp with timezone floored to minute resolution (which is the
resolution of HTML5 datetime-local e.g. "2018-06-14T00:00"
https://developer.mozilla.org/en-US/docs/Web/HTML/Element/input/datetime-local
"""
t = t or datetime.datetime.utcnow()
return t.replace(microsecond=0)
``` |
{
"source": "Joe-Heffer-Shef/pyspark-bessemer",
"score": 3
} |
#### File: pyspark-bessemer/pysparktest/__main__.py
```python
import logging
import os
import pyspark.sql
import pysparktest.test_data_frame
import pysparktest.test_basic_stats
import pysparktest.test_pandas
LOG_LEVEL = os.getenv('LOG_LEVEL', 'WARNING')
LOGGER = logging.getLogger(__name__)
def run_tests(session: pyspark.sql.SparkSession):
pysparktest.test_data_frame.test_data_frame(session)
pysparktest.test_basic_stats.test_correlation(session)
pysparktest.test_basic_stats.test_chi_square(session)
pysparktest.test_pandas.test_pandas(session)
def main():
# Create Spark session (only one can exist per process) as global variable
# to be shared between all unit tests
# https://stackoverflow.com/a/41513805
with pyspark.sql.SparkSession.builder.appName(
__name__).getOrCreate() as session:
session.sparkContext.setLogLevel(LOG_LEVEL)
# Log configuration
for key, value in session.sparkContext.getConf().getAll():
LOGGER.info("%s=%s", key, value)
run_tests(session)
if __name__ == '__main__':
main()
``` |
{
"source": "Joe-Heffer-Shef/reprohack_site",
"score": 2
} |
#### File: reprohack/tests/test_views.py
```python
import pytest
from django.forms.models import model_to_dict
from django.test import Client
from django.urls import reverse
from reprohack_hub.reprohack.models import Review
from reprohack_hub.users.models import User
pytestmark = pytest.mark.django_db
def test_markdown_page(client: Client) -> None:
"""Test markdown rendering."""
response = client.get(reverse("about_test"))
assert response.status_code == 200
assert "<h3>ReproHack History</h3>" in response.content.decode()
def test_create_review(client: Client, user: User, review: Review) -> None:
"""Test creating a review."""
# Test reviwer hasn't been set for a generated test paper review
assert user not in review.reviewers.all()
# Create a new review from similar data to test setting author
review_dict = model_to_dict(review)
client.force_login(user)
response = client.post(reverse("review_new"), review_dict, follow=True)
assert response.status_code == 200
rendered_response = response.render()
assert review.paper.title in rendered_response.content.decode()
# Test reviewer is now set for newly created paper review
assert user in review.paper.review_set.last().reviewers.all()
# Test reviwer still hasn't been set to initial review
assert user not in review.reviewers.all()
```
#### File: reprohack_hub/users/forms.py
```python
from django.contrib.auth import forms, get_user_model
from django.core.exceptions import ValidationError
from django.utils.translation import gettext_lazy as _
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
User = get_user_model()
class UserChangeForm(forms.UserChangeForm):
class Meta(forms.UserChangeForm.Meta):
model = User
class UserCreationForm(forms.UserCreationForm):
error_message = forms.UserCreationForm.error_messages.update(
{"duplicate_username": _("This username has already been taken.")}
)
class Meta(forms.UserCreationForm.Meta):
fields = ('username', 'email', '<PASSWORD>', '<PASSWORD>')
model = User
def clean_username(self):
username = self.cleaned_data["username"]
try:
User.objects.get(username=username)
except User.DoesNotExist:
return username
raise ValidationError(self.error_messages["duplicate_username"])
def __init__(self, *args, **kwargs):
super(UserCreationForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_method = 'post'
self.helper.add_input(Submit('submit', 'Sign up'))
``` |
{
"source": "joehellmers/Castro",
"score": 3
} |
#### File: Castro/Docs/add_doxy_headers.py
```python
import re
import sys
def make_class_header(class_name, description):
# remove // from description
description = re.sub(r"//", "", description).strip()
description = re.sub(r"\n[ ]*", "\n/// ", description)
class_name = re.sub(r"{", "", class_name).strip()
class_name = class_name.split(':')[0].strip()
boilerplate = f"""
///
/// @class {class_name}
///
/// @brief {description}
///"""
return boilerplate
def make_method_header(description="", parameters=[]):
# remove // from description
description = re.sub(r"//", "", description).strip()
description = re.sub(r"\n[ ]*", "\n/// ", description)
boilerplate = ""
if description != "":
boilerplate += f"""
///
/// {description}
///
"""
elif parameters != []:
boilerplate += """
///
"""
if parameters != []:
for param in parameters:
boilerplate += f"""/// @param {(param.split('=')[0].strip()).split(' ')[-1]}
"""
boilerplate += r"""///
"""
return boilerplate
def make_method_doxycomment(description=""):
# remove // from description
description = re.sub(r"//", "", description).strip()
description = re.sub(r"\n[ ]*", "\n/// ", description)
if description == "":
return ""
else:
return f"""
///
/// @note
/// {description}
///
"""
def make_variable_docstring(description):
description = re.sub(r"//", "", description).strip()
description = re.sub(r"\n[ ]*", "\n/// ", description)
if description == "":
return ""
else:
return f"""
///
/// {description}
///
"""
def process_header_file(filename):
output_data = ""
# find comments in lines above
re_comments = re.compile(
r"[ \t]*\/\/\s*\n[ \t]*(\/\/[ \t]*[\S ^\n]*?)\n[ \t]*\/\/")
with open(filename) as input_file:
data = input_file.read()
# find classes
re_class_name = re.compile(r"\n[^\n\s]*class ([^\n]+)")
last_index = 0
for m in re.finditer(re_class_name, data):
comments = None
for comments in re.finditer(re_comments,
data[last_index:m.start()]):
pass
if comments and (m.start() - comments.end() - last_index) < 2:
output_data += data[last_index:last_index + comments.start()]
class_header = make_class_header(m.group(1), comments.group(1))
else:
output_data += data[last_index:m.start()]
class_header = make_class_header(m.group(1), "")
output_data += class_header
last_index = m.start()
output_data += data[last_index:]
data = output_data
output_data = ""
last_index = 0
re_prototype = re.compile(
r"(?:^[\w&:*\t ]+\n)*^[ \t]*[~\w:*& <>]+\(([*\w\: \,&\n\t_=\<>\-.]*)\)", flags=re.MULTILINE)
# markup methods
for m in re.finditer(re_prototype, data):
# print("match = ", m.group(1))
parameters = m.group(1).split(",")
parameters = [param.strip() for param in parameters]
parameters = [param for param in parameters if param != ""]
comments = None
for comments in re.finditer(re_comments,
data[last_index:m.start()]):
pass
if comments and (m.start() - comments.end() - last_index) < 2:
# print(comments.span())
output_data += data[last_index:last_index + comments.start()]
method_header = make_method_header(comments.group(1), parameters)
last_index = m.start()
else:
output_data += data[last_index:m.start()]
method_header = make_method_header("", parameters)
last_index = m.start()
output_data += method_header
output_data += data[last_index:]
data = output_data
output_data = ""
last_index = 0
re_comments = re.compile(
r"^[ \t]*(\/\/[ \t]*[\S \n]*?)\n^(?![ \t]*\/\/)", flags=re.MULTILINE)
re_variable = re.compile(
r"^[ \t]*[~\w:*& <>\[\]]+;", flags=re.MULTILINE)
# markup variables
for m in re.finditer(re_variable, data):
# print("match =", m.group(0))
if " return " in m.group(0):
continue
comments = None
for comments in re.finditer(re_comments,
data[last_index:m.start() + 1]):
pass
# print(data[last_index:m.start()-1])
# if comments:
# print(comments.group(0), m.start(), comments.end()+last_index)
if comments and (m.start() - comments.end() - last_index) < 1:
output_data += data[last_index:last_index + comments.start()]
variable_header = make_variable_docstring(comments.group(1))
output_data += variable_header
last_index = m.start()
else:
output_data += data[last_index:m.start()]
last_index = m.start()
output_data += data[last_index:]
output_filename = filename + ".doxygen"
# print(output_data)
with open(output_filename, 'w+') as output_file:
output_file.write(output_data)
def process_cpp_file(filename):
output_data = ""
# find comments in lines above
re_comments = re.compile(
r"[ \t]*(\/\/[ \t]*[\S ^\n]*?)\n^[ \t]*[^\/]", flags=re.MULTILINE)
re_prototype = re.compile(
r"^\w*\n^\w[~\w:*& ]+\([\w\: \,&\n\t_=<>.]*\)\n?[\s\S]*?\n?{", flags=re.MULTILINE)
with open(filename) as input_file:
data = input_file.read()
last_index = 0
for m in re.finditer(re_prototype, data):
comments = None
for comments in re.finditer(re_comments,
data[last_index:m.start() + 1]):
pass
if comments and (m.start() - comments.end() - last_index) < 3:
output_data += data[last_index:last_index + comments.start()]
method_header = make_method_doxycomment(comments.group(1))
last_index = m.start()
else:
output_data += data[last_index:m.start()]
method_header = make_method_doxycomment("")
last_index = m.start()
output_data += method_header
output_data += data[last_index:]
output_filename = filename + ".doxygen"
with open(output_filename, 'w+') as output_file:
output_file.write(output_data)
if __name__ == "__main__":
filename = sys.argv[1]
if filename[-2:] == ".H":
process_header_file(filename)
elif filename[-4:] == ".cpp":
process_cpp_file(filename)
```
#### File: Castro/Docs/preprocess_files.py
```python
import os
import re
# directory of the source files
rootdir = "../Source"
outdir = "source/preprocessed_files"
def strip_directives(filename, filepath, outpath):
"""
Read in file, remove all preprocessor directives and output.
This is also going to switch square brackets initializing arrays to
parentheses and remove the new-line characters in these so sphinx
fortran is happy.
"""
with open(os.path.join(filepath, filename)) as infile:
txt = infile.read()
outtxt = re.sub(r"(^#.*$\n)", '', txt, flags=re.M)
outtxt = re.sub(r"(&\n)\s*", '', outtxt)
outtxt = re.sub(r"\[", r"(\\", outtxt)
outtxt = re.sub(r"\]", r'\\)', outtxt)
with open(os.path.join(outpath, filename), 'w') as outfile:
outfile.write(outtxt)
if __name__ == "__main__":
# make the output directory if it does not exist
if not os.path.exists(outdir):
os.makedirs(outdir)
# loop over source dir
for subdir in sorted(os.listdir(rootdir)):
if not os.path.isdir(os.path.join(rootdir, subdir)):
continue
# loop over files in subdirectories and run strip_directives on all
# C++ header files
for f in sorted(os.listdir(os.path.join(rootdir, subdir))):
if (f[-2:] == ".H"):
strip_directives(f, os.path.join(rootdir, subdir), outdir)
```
#### File: massive_star/analysis/initial_model.py
```python
import numpy as np
import matplotlib.pyplot as plt
def find_r_for_rho(r, rho, rho_want):
idx = np.where(rho < rho_want)[0][0]
return r[idx]
#file = "../15m_500_sec.aprox19.hse.6400"
file = "../15m_500_sec.aprox19.hse.20.0km"
Lx = 1.6384e10
data = np.loadtxt(file)
print(data.shape)
# now manually read to get the variable names
# the first column is position
names = ["r"]
with open(file) as f:
for n, line in enumerate(f):
if line.startswith("# num"):
continue
if line.startswith("# npts"):
continue
if not line.startswith("#"):
break
names.append(line.split()[-1].strip())
# now make plots
idens = names.index("density")
itemp = names.index("temperature")
iye = names.index("Ye")
fig = plt.figure()
ax = fig.add_subplot(211)
l1 = ax.plot(data[:,0], data[:,idens], label="density")
l2 = ax.plot(data[:,0], data[:,itemp], label="temperature")
# show where the refinement kicks in
rho_refine = 1.e4
r_refine = find_r_for_rho(data[:,0], data[:,idens], rho_refine)
print(r_refine)
ax.axvline(r_refine, color="0.25", ls=":")
ax.axvline(Lx, color="0.25", ls="-")
ax.set_xscale("log")
ax.set_yscale("log")
ax.set_xlabel("r [cm]")
ax.set_ylabel(r"$\rho~[\rm{g/cm^3}]$, $T~[K]$")
ax2 = ax.twinx()
ax2.set_ylabel(r"$Y_e$")
l3 = ax2.plot(data[:,0], data[:,iye], color="C2", label="Ye")
lns = l1 + l2 + l3
labs = [l.get_label() for l in lns]
ax.legend(lns, labs, frameon=False)
ax = fig.add_subplot(212)
threshold = 0.1
for n, var in enumerate(names):
print(n, var)
if var in ["r", "density", "temperature", "pressure", "Ye"]:
continue
Xmax = data[:,n].max()
if Xmax > threshold:
if Xmax > 0.5:
lw = 2
else:
lw = 1
ax.plot(data[:,0], data[:,n], label=var, lw=lw)
ax.axvline(r_refine, color="0.25", ls=":")
ax.axvline(Lx, color="0.25", ls="-")
ax.set_xscale("log")
ax.set_yscale("log")
ax.set_xlabel("r [cm]")
ax.set_ylabel("mass fraction")
ax.legend(frameon=False, fontsize="small", ncol=2)
fig.set_size_inches((8, 12))
fig.tight_layout()
fig.savefig("initial_model.png")
``` |
{
"source": "joehendrix/pyre-check",
"score": 2
} |
#### File: client/tests/pyre_test.py
```python
import tempfile
from pathlib import Path
import testslide
from .. import (
command_arguments,
configuration,
pyre,
recently_used_configurations,
)
from .setup import (
ensure_directories_exists,
switch_working_directory,
write_configuration_file,
)
class CreateConfigurationWithRetryTest(testslide.TestCase):
def test_create_configuration_with_retry_no_retry(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root).resolve()
ensure_directories_exists(root_path, [".pyre", "local"])
write_configuration_file(root_path, {})
with switch_working_directory(root_path):
try:
pyre._create_configuration_with_retry(
command_arguments.CommandArguments(
local_configuration=None,
source_directories=["."],
dot_pyre_directory=Path(".pyre"),
),
base_directory=Path(root),
)
except configuration.InvalidConfiguration:
self.fail("Unexpected InvalidConfiguration failure!")
def test_create_configuration_with_retry__no_recent_configuration(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root).resolve()
ensure_directories_exists(root_path, [".pyre", "local"])
write_configuration_file(root_path, {})
with switch_working_directory(root_path):
with self.assertRaises(configuration.InvalidConfiguration):
pyre._create_configuration_with_retry(
command_arguments.CommandArguments(
local_configuration=None,
source_directories=[],
dot_pyre_directory=Path(".pyre"),
),
base_directory=Path(root),
)
def test_create_configuration_with_retry__success(self) -> None:
self.mock_callable(
recently_used_configurations, "prompt_user_for_local_root"
).to_return_value("local").and_assert_called_once()
with tempfile.TemporaryDirectory() as root:
root_path = Path(root).resolve()
ensure_directories_exists(root_path, [".pyre", "local"])
write_configuration_file(root_path, {})
write_configuration_file(
root_path, {"source_directories": ["."]}, relative="local"
)
recently_used_configurations.Cache(root_path / ".pyre").put("local")
with switch_working_directory(root_path):
test_configuration = pyre._create_configuration_with_retry(
command_arguments.CommandArguments(
local_configuration=None,
source_directories=[],
dot_pyre_directory=Path(".pyre"),
),
base_directory=Path(root),
)
self.assertEqual(
test_configuration.local_root, str(root_path / "local")
)
def test_create_configuration_with_retry__fail(self) -> None:
self.mock_callable(
recently_used_configurations, "prompt_user_for_local_root"
).to_return_value("local2").and_assert_called_once()
with tempfile.TemporaryDirectory() as root:
root_path = Path(root).resolve()
ensure_directories_exists(root_path, [".pyre", "local", "local2"])
write_configuration_file(root_path, {})
write_configuration_file(
root_path, {"source_directories": ["."]}, relative="local"
)
recently_used_configurations.Cache(root_path / ".pyre").put("local2")
with switch_working_directory(root_path):
with self.assertRaises(configuration.InvalidConfiguration):
pyre._create_configuration_with_retry(
command_arguments.CommandArguments(
local_configuration=None,
source_directories=[],
dot_pyre_directory=Path(".pyre"),
),
base_directory=Path(root),
)
def test_create_configuration_with_retry__invalid_user_input(self) -> None:
self.mock_callable(
recently_used_configurations, "prompt_user_for_local_root"
).to_return_value(None).and_assert_called_once()
with tempfile.TemporaryDirectory() as root:
root_path = Path(root).resolve()
ensure_directories_exists(root_path, [".pyre", "local"])
write_configuration_file(root_path, {})
write_configuration_file(
root_path, {"source_directories": ["."]}, relative="local"
)
recently_used_configurations.Cache(root_path / ".pyre").put("local")
with switch_working_directory(root_path):
with self.assertRaises(configuration.InvalidConfiguration):
pyre._create_configuration_with_retry(
command_arguments.CommandArguments(
local_configuration=None,
source_directories=[],
dot_pyre_directory=Path(".pyre"),
),
base_directory=Path(root),
)
```
#### File: documentation/deliberately_vulnerable_flask_app/app.py
```python
import sqlite3
import subprocess
import flask
import requests
from flask import Flask, render_template
from lxml import etree
app = Flask(__name__)
@app.route("/rce/<string:payload>")
def definite_rce(payload: str) -> None:
subprocess.run(payload, shell=True)
@app.route("/rce/<string:payload>")
def potential_rce_1(payload: str) -> None:
subprocess.run(["echo", payload])
@app.route("/rce/<int:payload>")
def potential_rce_2(payload: int) -> None:
subprocess.run(f"echo {payload}", shell=True)
@app.route("/pt/<string:payload>")
def definite_pt(payload: str) -> str:
f = open(payload, "r")
text = f.read()
return text
@app.route("/xss/<string:payload>")
def definite_xss(payload: str) -> None:
content = flask.Markup(payload)
return render_template(content)
@app.route("/sql/<string:payload>")
def definite_sql(payload: str) -> None:
con = sqlite3.connect()
cur = con.cursor()
cur.execute(f"SELECT info FROM users WHERE name={payload}")
@app.route("/ssrf/<string:payload>")
def definite_ssrf(payload: str) -> None:
requests.get(payload)
@app.route("/xxe/<string:payload>")
def definite_xxe(payload: str) -> None:
etree.fromstring(payload)
```
#### File: pysa_tutorial/exercise2/views.py
```python
import subprocess
from django.http import HttpRequest, HttpResponse
def operate_on_twos(request: HttpRequest) -> HttpResponse:
operator = request.POST["operator"]
result = eval(f"2 {operator} 2") # noqa: P204
return result
def operate_on_threes(request: HttpRequest) -> HttpResponse:
operator = request.GET["operator"]
exec(f"result = 3 {operator} 3")
return result # noqa: F821
def operate_on_fours(request: HttpRequest) -> HttpResponse:
operator = request.GET["operator"]
result = subprocess.getoutput(f"expr 4 {operator} 4")
return result
```
#### File: test/integration/multi_sink_ports.py
```python
def source():
pass
def sinkA(x):
pass
def sinkB(x):
pass
def sinkC(x):
pass
def sinkD(x):
pass
def split(x):
y = x._params
sinkB(y)
sinkC(y)
sinkD(y)
return x
def wrapper(x):
y = split(x)
sinkA(y)
def issue():
x = source()
wrapper(x)
def splitwrapper(x):
return split(x)
class QueryBase:
def send(self):
pass
class Query(QueryBase):
_params = None
def send(self):
return splitwrapper(self)
def params(self, data):
self._params = data
return self
def log_call(params, response):
sinkA(params)
sinkA(response)
def wrapper2(x: Query):
params = x._params
response = None
try:
response = x.send()
except Exception as ex:
raise ex
log_call(params, response)
def issue2():
taint = source()
query = Query().params(taint)
wrapper2(query)
```
#### File: test/integration/taint_in_taint_out.py
```python
from builtins import _test_sink, _test_source
from typing import Dict, List, Tuple
def some_service(id):
...
def _unpack(tuple):
...
class DataRecord:
...
class Data:
def __init__(self, a, b):
self.a = a
self.b = b
def get_data(x):
return {"name": x, "id": x}
def product_data(x):
data = get_data(x)
if x:
parent = product_data(x.parent)
else:
parent = None
is_blocked = some_service(data.id)
report_tuple = DataRecord(id=data.id, username=data.name, isBlocked=is_blocked)
return {
"report": _unpack(report_tuple),
"id": data.id,
"parent_data": parent,
"name": data.name,
}
def product_data_wrapper(x):
return product_data(x)
def tito():
return product_data_wrapper(_test_source())
def via_getattr(x, y):
return getattr(x, "foo", y)
class Recursive:
def __init__(self, select):
self.init_list = [
f[0].target.attname for f in select[self.cols_start : self.cols_end]
]
self.related = get_related(select)
def get_related(select):
return Recursive(select)
class FieldIsTITO:
add_tito: int = 1
def adds_tito(x: FieldIsTITO) -> int:
return x.add_tito
class InheritsFromTITO(FieldIsTITO):
pass
def adds_tito_inherited(x: InheritsFromTITO) -> int:
return x.add_tito
def adds_tito_with_indirect_sink(src: FieldIsTITO) -> None:
indirect_sink(src)
def indirect_sink(x: FieldIsTITO) -> None:
_test_sink(x.add_tito)
def issue_with_indirect_sink_tito():
x = _test_source()
adds_tito_with_indirect_sink(x)
def approximate_return_access_paths(x):
return {
"a": x.a,
"b": x.b,
"c": x.c,
"d": x.d,
"e": x.e,
"f": x.f,
"g": x.g,
"h": x.h,
"j": x.j,
"k": x.k,
"l": x.l,
}
def approximate_return_access_paths_common_prefix_input(x):
return {
"a": x.y.a,
"b": x.y.b,
"c": x.y.c,
"d": x.y.d,
"e": x.y.e,
"f": x.y.f,
"g": x.y.g,
"h": x.y.h,
"j": x.y.j,
"k": x.y.k,
"l": x.y.l,
}
def approximate_return_access_paths_common_prefix_output(x):
return {
"a": {
"a": x.a,
"b": x.b,
"c": x.c,
"d": x.d,
"e": x.e,
"f": x.f,
"g": x.g,
"h": x.h,
"j": x.j,
"k": x.k,
"l": x.l,
}
}
async def return_taint(tainted: str, b1: str, b2: str) -> Tuple[str, str, str]:
return tainted, b1, b2
async def test_tuple_tito_indices():
tainted, b1, b2 = await return_taint(_test_source(), "", "")
_test_sink(b2)
def return_taint_in_list(tainted: str, a: str, b: str) -> List[str]:
return [tainted, a, b]
def add_feature(arg):
return arg
def tito_with_feature(arg):
if arg:
return arg
else:
return add_feature(arg)
def test_always_via_feature():
_test_sink(tito_with_feature(_test_source()))
# Test TITO through explicit super.
class GetQuery:
def __init__(self, arg):
self.arg = arg
class GetUser(GetQuery):
def __init__(self, arg):
GetQuery.__init__(self, arg)
def test_explicit_call_to_superclass():
user = GetUser(_test_source())
_test_sink(user.arg)
def evaluate_lazy(payload: Dict[str, str]):
return {key: value for key, value in payload.items()}
def test_simplified_evaluator():
_test_sink(evaluate_lazy(_test_source()))
class ComplexEvaluator:
def evaluate_lazy_field(self, field):
if callable(field):
return field()
else:
return field
def evaluate_lazy_payload(self, payload):
def _evaluate(field):
if isinstance(field, dict):
return self.evaluate_lazy_payload(field)
return self.evaluate_lazy_field(field)
return {key: _evaluate(value) for key, value in payload.items()}
def test_complex_evaluator(evaluator: ComplexEvaluator):
_test_sink(evaluator.evaluate_lazy_payload(_test_source()))
```
#### File: test/integration/typed_dict.py
```python
from builtins import _test_sink, _test_source
from typing import TypedDict
class Foo(TypedDict):
a: int
b: int
class Bar(TypedDict):
other: int
foo: Foo
def test1():
bar: Bar = _test_source()
_test_sink(bar["other"])
def test2():
bar: Bar = _test_source()
# TODO(T81192268): this should not trigger an issue.
_test_sink(bar["foo"]["a"])
def test3():
bar: Bar = _test_source()
_test_sink(bar["foo"]["b"])
```
#### File: stubs/integration_test/decorator.py
```python
from typing import Callable
from django.http import HttpRequest
from .logging_decorator import with_logging_with_helper, with_logging_without_helper
def with_logging(f: Callable[[str], None]) -> Callable[[str], None]:
def inner(x: str) -> None:
eval(x)
f(x)
return inner
@with_logging
def foo(x: str) -> None:
print(x)
@with_logging_with_helper
@with_logging_without_helper
def foo2(x: int) -> None:
eval(x)
def bar(request: HttpRequest) -> None:
foo(request.GET["bad"])
foo2(request.GET["bad"])
```
#### File: stubs/integration_test/lru_cache_test.py
```python
from functools import lru_cache
from django.http import HttpRequest
@lru_cache
def cached_sanitizer(x):
return x
def test_cached_sanitizer(request: HttpRequest) -> None:
sanitized = cached_sanitizer(request.GET["bad"])
eval(sanitized)
```
#### File: tools/generate_taint_models/decorator_parser.py
```python
import ast
import logging
from dataclasses import dataclass
from typing import List, Optional, Set, Tuple, Union, cast
from typing_extensions import Final
FunctionDefinition = Union[ast.FunctionDef, ast.AsyncFunctionDef]
LOG: logging.Logger = logging.getLogger(__name__)
@dataclass
class Decorator:
name: str
arguments: Final[Optional[Set[str]]] = None
keywords: Final[Optional[Set[Tuple[Optional[str], str]]]] = None
class DecoratorParser:
def __init__(self, unparsed_target_decorators: str) -> None:
self._unparsed_target_decorators: str = unparsed_target_decorators
self._target_decorators: Optional[List[Decorator]] = None
@property
def target_decorators(self) -> List[Decorator]:
target_decorators = self._target_decorators
if target_decorators is None:
target_decorators = self._parse_target_decorators(
self._unparsed_target_decorators
)
self._target_decorators = target_decorators
return target_decorators
def function_matches_target_decorators(self, node: FunctionDefinition) -> bool:
## TODO T58744796: In the future, change this to support
## filtering on multiple decorators.
target_decorator: Decorator = self.target_decorators[0]
for decorator in node.decorator_list:
node_decorator = self._parse_decorator(
cast(Union[ast.Name, ast.Call, ast.Attribute], decorator)
)
# if the target decorator has args / kwargs, the node decorator
# must also have them
if (
target_decorator.name == node_decorator.name
and (
not target_decorator.arguments
or (
node_decorator.arguments
and target_decorator.arguments.issubset(
node_decorator.arguments
)
)
)
and (
not target_decorator.keywords
or (
node_decorator.keywords
and target_decorator.keywords.issubset(node_decorator.keywords)
)
)
):
return True
return False
def _resolve_decorator_func_name(self, func: ast.expr) -> str:
if isinstance(func, ast.Name):
return func.id
func = cast(ast.Attribute, func)
return self._resolve_decorator_func_name(func.value) + "." + func.attr
def _parse_decorator(
self, decorator: Union[ast.Name, ast.Call, ast.Attribute]
) -> Decorator:
# decorator does not have args or kwargs
if isinstance(decorator, ast.Name) or isinstance(decorator, ast.Attribute):
return Decorator(self._resolve_decorator_func_name(decorator), set(), set())
# decorator does have args and / or kwargs
decorator_name = self._resolve_decorator_func_name(decorator.func)
decorator_arguments = {
argument.s for argument in decorator.args if isinstance(argument, ast.Str)
}
decorator_keywords = {
# pyre-fixme[22]: The cast is redundant.
(keyword.arg, cast(ast.Str, keyword.value).s)
for keyword in decorator.keywords
if isinstance(keyword.value, ast.Str)
}
return Decorator(
decorator_name,
decorator_arguments,
decorator_keywords,
)
def _parse_target_decorators(self, target_decorator: str) -> List[Decorator]:
"""
Responsible for parsing the target decorator to extract the
decorator name, named and unnamed attributes.
"""
# We need to create a well formed decorator so we attach a bogus
# function to the decorators.
well_formed_decorator = target_decorator + """\ndef foo(): ..."""
try:
parsed_ast = ast.parse(well_formed_decorator)
except SyntaxError as error:
LOG.error(f"Can't parse `{well_formed_decorator}`.")
raise error
function_definition = parsed_ast.body[0]
if not isinstance(function_definition, ast.FunctionDef):
return []
decorator_list = function_definition.decorator_list
if len(decorator_list) < 1:
LOG.error("No target decorators were specified.")
raise Exception("No target decorators were specified.")
return [
self._parse_decorator(
cast(Union[ast.Name, ast.Call, ast.Attribute], decorator)
)
for decorator in decorator_list
]
```
#### File: tools/generate_taint_models/get_constructor_initialized_attribute_sources.py
```python
import logging
from typing import Callable, Iterable, List, Optional, Type, TypeVar
from ...api import query
from ...api.connection import PyreConnection
from .constructor_generator import gather_all_constructors_in_hierarchy
from .inspect_parser import extract_parameters, extract_qualified_name
from .model import AssignmentModel
from .model_generator import ModelGenerator
LOG: logging.Logger = logging.getLogger(__name__)
T = TypeVar("T")
BATCH_SIZE = 200
class ConstructorInitializedAttributeSourceGenerator(ModelGenerator[AssignmentModel]):
"""
This Generator will taint the attributes initialized by the constructors of
'classes_to_taint' and their descendants. Only descendants that have had
their modules loaded at preprocessing time will be tainted. Models are
generated on a best effort basis by assuming the name of the parameter will
match the name of the attribute it is assigned to. This naive approach means
this model generator will likely generate some invalid models.
"""
def __init__(
self,
classes_to_taint: List[str],
pyre_connection: PyreConnection,
filter_classes_by: Optional[Callable[[Type[T]], bool]] = None,
taint_annotation: str = "TaintSource[UserControlled]",
) -> None:
self.classes_to_taint: List[str] = classes_to_taint
self.pyre_connection = pyre_connection
self.filter_classes_by = filter_classes_by
self.taint_annotation: str = taint_annotation
def gather_functions_to_model(self) -> Iterable[Callable[..., object]]:
return gather_all_constructors_in_hierarchy(
self.classes_to_taint, self.filter_classes_by
)
def compute_models(
self, functions_to_model: Iterable[Callable[..., object]]
) -> Iterable[AssignmentModel]:
constructors = {}
for constructor in functions_to_model:
qualified_name = extract_qualified_name(constructor)
if not qualified_name:
continue
# Strip off __init__ and append the parameter name as an attribute
# name.
class_name = ".".join(qualified_name.split(".")[:-1])
constructors[class_name] = constructor
attributes_map = query.get_attributes(
self.pyre_connection, constructors.keys(), BATCH_SIZE
)
for class_name, constructor in constructors.items():
attributes = {attribute.name for attribute in attributes_map[class_name]}
parameters = extract_parameters(constructor)
for parameter in parameters:
# Skip 'self', and attributes that are callables
if parameter.name == "self" or (
"Callable[" in (parameter.annotation or "")
or "Coroutine[" in (parameter.annotation or "")
):
continue
if parameter.name in attributes:
# If a parameter is a valid attribute, add a taint model.
target = f"{class_name}.{parameter.name}"
yield AssignmentModel(
target=target, annotation=self.taint_annotation
)
if "_" + parameter.name in attributes:
# Same as above, but parameters might be prefixed with an
# underscore to indicate a private attribute.
target = f"{class_name}._{parameter.name}"
yield AssignmentModel(
target=target, annotation=self.taint_annotation
)
```
#### File: generate_taint_models/tests/get_exit_nodes_test.py
```python
import unittest
from unittest.mock import MagicMock
from ..get_exit_nodes import ExitNodeGenerator
from .test_functions import __name__ as qualifier, all_functions
class GetExitNodesTest(unittest.TestCase):
def test_compute_models(self) -> None:
self.maxDiff = None
sink = "TaintSink[ReturnedToUser]"
self.assertEqual(
[
*map(
str,
ExitNodeGenerator(django_urls=MagicMock()).compute_models(
all_functions
),
)
],
[
f"def {qualifier}.TestClass.methodA(self, x) -> {sink}: ...",
f"def {qualifier}.TestClass.methodB(self, *args) -> {sink}: ...",
f"def {qualifier}.testA() -> {sink}: ...",
f"def {qualifier}.testB(x) -> {sink}: ...",
f"def {qualifier}.testC(x) -> {sink}: ...",
f"def {qualifier}.testD(x, *args) -> {sink}: ...",
f"def {qualifier}.testE(x, **kwargs) -> {sink}: ...",
],
)
self.assertEqual(
[
*map(
str,
ExitNodeGenerator(
django_urls=MagicMock(),
whitelisted_views=[f"{qualifier}.TestClass.methodA"],
).compute_models(all_functions),
)
],
[
f"def {qualifier}.TestClass.methodB(self, *args) -> {sink}: ...",
f"def {qualifier}.testA() -> {sink}: ...",
f"def {qualifier}.testB(x) -> {sink}: ...",
f"def {qualifier}.testC(x) -> {sink}: ...",
f"def {qualifier}.testD(x, *args) -> {sink}: ...",
f"def {qualifier}.testE(x, **kwargs) -> {sink}: ...",
],
)
```
#### File: generate_taint_models/tests/get_REST_api_sources_test.py
```python
import unittest
from unittest.mock import MagicMock
from ..generator_specifications import (
AllParametersAnnotation,
AnnotationSpecification,
WhitelistSpecification,
default_entrypoint_taint,
)
from ..get_REST_api_sources import RESTApiSourceGenerator
from .test_functions import __name__ as qualifier, all_functions
class GetRESTApiSourcesTest(unittest.TestCase):
def test_compute_models(self) -> None:
# Test with default arguments
source = "TaintSource[UserControlled]"
sink = default_entrypoint_taint.returns
self.assertEqual(
[
*map(
str,
RESTApiSourceGenerator(django_urls=MagicMock()).compute_models(
all_functions
),
)
],
[
f"def {qualifier}.TestClass.methodA(self, x: {source})"
f" -> {sink}: ...",
f"def {qualifier}.TestClass.methodB(self, *args: {source})"
f" -> {sink}: ...",
f"def {qualifier}.testA() -> {sink}: ...",
f"def {qualifier}.testB(x: {source}) -> {sink}: ...",
f"def {qualifier}.testC(x: {source}) -> {sink}: ...",
f"def {qualifier}.testD(x: {source}, *args: {source})"
f" -> {sink}: ...",
f"def {qualifier}.testE(x: {source}, **kwargs: {source})"
f" -> {sink}: ...",
],
)
# Test with view whitelisting
self.assertEqual(
[
*map(
str,
RESTApiSourceGenerator(
django_urls=MagicMock(),
whitelisted_views=[f"{qualifier}.testA"],
).compute_models(all_functions),
)
],
[
f"def {qualifier}.TestClass.methodA(self, x: {source})"
f" -> {sink}: ...",
f"def {qualifier}.TestClass.methodB(self, *args: {source})"
f" -> {sink}: ...",
f"def {qualifier}.testB(x: {source}) -> {sink}: ...",
f"def {qualifier}.testC(x: {source}) -> {sink}: ...",
f"def {qualifier}.testD(x: {source}, *args: {source})"
f" -> {sink}: ...",
f"def {qualifier}.testE(x: {source}, **kwargs: {source})"
f" -> {sink}: ...",
],
)
# Test with AnnotationSpecification
self.assertEqual(
[
*map(
str,
RESTApiSourceGenerator(
django_urls=MagicMock(),
annotations=AnnotationSpecification(
parameter_annotation=AllParametersAnnotation(
arg="Arg", vararg="VarArg", kwarg="KWArg"
),
returns="Returns",
),
).compute_models(all_functions),
)
],
[
f"def {qualifier}.TestClass.methodA(self, x: Arg) -> Returns: ...",
f"def {qualifier}.TestClass.methodB(self, *args: VarArg)"
" -> Returns: ...",
f"def {qualifier}.testA() -> Returns: ...",
f"def {qualifier}.testB(x: Arg) -> Returns: ...",
f"def {qualifier}.testC(x: Arg) -> Returns: ...",
f"def {qualifier}.testD(x: Arg, *args: VarArg) -> Returns: ...",
f"def {qualifier}.testE(x: Arg, **kwargs: KWArg) -> Returns: ...",
],
)
# Test with WhitelistSpecification
self.assertEqual(
[
*map(
str,
RESTApiSourceGenerator(
django_urls=MagicMock(),
whitelisted_parameters=WhitelistSpecification(
parameter_name={"self"}, parameter_type={"int"}
),
).compute_models(all_functions),
)
],
[
f"def {qualifier}.TestClass.methodA(self, x) -> {sink}: ...",
f"def {qualifier}.TestClass.methodB(self, *args: {source})"
f" -> {sink}: ...",
f"def {qualifier}.testA() -> {sink}: ...",
f"def {qualifier}.testB(x: {source}) -> {sink}: ...",
f"def {qualifier}.testC(x) -> {sink}: ...",
f"def {qualifier}.testD(x, *args) -> {sink}: ...",
f"def {qualifier}.testE(x, **kwargs: {source}) -> {sink}: ...",
],
)
``` |
{
"source": "joeherold/weekly_coding_challenge_fwkwkw_python",
"score": 3
} |
#### File: weekly_coding_challenge_fwkwkw_python/Challenge_3/challenge_3.py
```python
def removeTrailingNewLine(value):
return value.rstrip("\n")
def addTrailingNewLine(value):
return value + ("\n")
def readFileAndGetAsList(pathToFile):
f = open(pathToFile, "r")
if f.mode == 'r':
contents = f.readlines()
f.close()
return list(map(removeTrailingNewLine, contents))
else:
return []
def writeListToFile(pathToFile, dataAsList=[]):
f = open(pathToFile, "w+")
listWithNewsLintes = list(map(addTrailingNewLine, dataAsList))
f.writelines(listWithNewsLintes)
f.close()
pathToFile = "/Users/johannespichler/Development/FHWKW/WeeklyCodingChallenge/weekly_coding_challenge_fwkwkw_python/Challenge_3/tasks.txt"
lines = readFileAndGetAsList(pathToFile)
print(lines)
lines.append("holladrio")
print(lines)
writeListToFile(pathToFile, lines)
```
#### File: weekly_coding_challenge_fwkwkw_python/Challenge_4/challange_4.py
```python
def getAbstand(p1, p2):
# Calculate distance between two point tupels
# frist get delta x
x1 = p1[0]
x2 = p2[0]
deltaX = x2 - x1
# then get delta y
y1 = p1[1]
y2 = p2[1]
deltaY = y2 - y1
# then we take advantage of Pythagoras a^2 + b^2 = c^2
distance = ( deltaX**2 + deltaY**2 ) ** 0.5
return distance
def getUmfang(p1, p2, p3):
# here we sum up the distances between the three points
a = getAbstand(p1, p2)
b = getAbstand(p2, p3)
c = getAbstand(p3, p1)
return (a + b + c)
def getFlaeche(p1, p2, p3):
# with semiperimeter by the Heron Formlar
# s = (a + b + c)/2 = Umfang / 2
a = getAbstand(p1,p2)
b = getAbstand(p2,p3)
c = getAbstand(p3,p1)
# semiparameter
s = (a + b + c)/2
# <NAME>: area = ( s * (s - a) * (s - b) * (s - c) )^0.5
return (s * (s-a) * (s-b) * (s-c))**0.5
def getSchwerpunkt(p1, p2, p3):
# S = (A+B+C)/3
return (
(p1[0]+p2[0]+p3[0])/3 , # x = (x1 + x2 + x3) / 3
(p1[1]+p2[1]+p3[1])/3 # y = (y1 + y2 + y3) / 3
)
# This function is ment to generalize the point input and conversion
def enterPoint(nameOfPoint):
# we wrap the input and conversion in a try block
# to handle wrong inputs...
try:
print(f"Punkt {nameOfPoint}: x,y")
pointInput = input()
arrPoint = pointInput.split(",")
return (float(arrPoint[0]), float(arrPoint[1]))
except:
print(f"Deine Eingabe {pointInput} war ungüligt. Es wird stattdessen der Punkt 0,0 verwendet")
return (0,0)
def main():
print("### Magic Triangle ###")
print("Bitte gebe 3 Punkte an, aus denen ein Dreieck gebildet wird")
A = enterPoint("A")
B = enterPoint("B")
C = enterPoint("C")
umfang = getUmfang(A,B,C)
schwerpunkt = getSchwerpunkt(A,B,C)
flaeche = getFlaeche(A,B,C)
print(f"Umfang: {round(umfang,3)}cm")
print(f"Semiperimeter (Halbumfang): {round(umfang/2,3)}cm")
print(f"Schwerpunkt: x={round(schwerpunkt[0],3)}cm, y={round(schwerpunkt[1],3)}cm")
print(f"Fläche: {round(flaeche,3)} cm^2")
main()
``` |
{
"source": "joehewitt/devon",
"score": 2
} |
#### File: devon/devon/maker.py
```python
class Maker:
def __init__(self):
self.nextMaker = None
self.previousMaker = None
def __rshift__(self, other):
self.nextMaker = other
other.previousMaker = self
return other
def filterSources(self, project, sources):
return sources
def getDependencies(self, project):
return []
def install(self, project, out, sources, target):
return 0
def printResult(self, project, out, text):
pass
class MakerOneToOne(Maker):
def needsUpdate(self, project, source, target):
return False
def getSourceTarget(self, project, source):
return ""
def printAction(self, project, out, source, targets):
pass
class MakerOneToMany(Maker):
def needsUpdate(self, project, source, target):
return False
def getSourceTargets(self, project, source, target):
return []
def printAction(self, project, out, source, target):
pass
class MakerManyToOne(Maker):
def getTarget(self, project):
return ""
def needsUpdate(self, project, target):
return False
def printAction(self, project, out, target):
pass
class Preprocessor(Maker):
def needsUpdate(self, project, source, target):
return False
def getSourceTarget(self, project, source):
return ""
def printAction(self, project, out, source, target):
pass
```
#### File: makers/unix/compile.py
```python
import devon.makers, devon.make, devon.parsers.gcc
from devon.tags import *
import os.path, re, time
reCFiles = re.compile(r"\.(cpp|c|cxx|m|mm)$")
# **************************************************************************************************
class Compile(devon.makers.Compile):
path = "gcc"
parser = devon.parsers.gcc.GCCParser()
def getSourceTarget(self, project, source):
if source == project.pch:
return "%s.gch" % source
elif reCFiles.search(source):
target = re.sub(r"\.(cpp|c|cxx|m|mm)$", r".o", source)
return target
def build(self, project, out, source, target):
args = self.getBaseArgs(project, source, target)
if project.pedantic:
args += " -pedantic"
if len(source) >= 2 and source[-2:] == ".h":
args += " -x c++-header"
compilerPath = project.compilerPath if project.compilerPath else self.path
line = "%s %s -c -o %s %s" % (compilerPath, args, target, source)
#c1 = time.time()
result = devon.make.executeCommand(project, self, line, out)
#c2 = time.time()
#print "built %s in %f" % (source, c2-c1)
return result
def printAction(self, project, out, source, target):
out << Block("progressBox progress-build") << "Compiling " \
<< FileLink(basePath=project.path, path=source) << source << Close \
<< "..." << Close << Flush
def printResult(self, project, out, text):
if project.formatOutput:
self.parser.parse(text, project, out)
else:
out << CodeBlock << text << Close
def getBaseArgs(self, project, source, target):
# The "no-long-double" option seems to be a mac-only thing
import sys
if sys.platform == "darwin":
# Causes error on Snow Leopard
#args = "-Wno-long-double "
args = ""
else:
args = ""
if project.compilerFlags:
args += project.compilerFlags + " "
args += self.getOptimizeFlag(project)
# XXXjoe Building on the fb sandboxes without this flag causes a link error on some libs
#args += " -fPIC"
for name in vars(project.defines):
value = getattr(project.defines, name)
if value:
args += ' -D%s=%s' % (name, value)
args += " -I%s" % project.getBuildPath()
for includePath in project.getIncludePaths(True, source):
args += " -I%s" % includePath
return args
def getOptimizeFlag(self, project):
session = devon.projects.getSession()
vals = {"size": "-Os", "speed": "-O3", "full": "-O3"}
if project.optimize in vals:
return vals[project.optimize]
else:
return "-O0 -gdwarf-2 -DDEBUG -D_DEBUG"
# **************************************************************************************************
class CompileTestRunner(Compile, devon.makers.CompileTestRunner):
def filterSources(self, project, sources):
return devon.makers.CompileTestRunner.filterSources(self, project, sources)
```
#### File: devon/devon/projects.py
```python
import devon.maker
import time, glob, os.path, re, sys, types, threading
# **************************************************************************************************
projectBuiltin = None
projectCache = {} # path -> project
exportMap = {} # export -> projects
exportReverseMap = {}
projectMap = {} # name -> project
kDefaultWikiPath = "docs"
kPrecompiledHeader = "pch"
projectFileName = "project.dev"
projectDepFileName = ".project.dep"
workspaceFileName = "workspace.dev"
userFileName = "user.dev"
configFileName = "config.dev"
pchH = kPrecompiledHeader + ".h"
if sys.platform == "win32":
defaultInterfaceFlags = "/no_robust"
defaultCompilerFlags = "/EHsc /Wp64 /nologo /GR"
defaultLinkerFlags = "/NOLOGO /MACHINE:X86"
else:
defaultInterfaceFlags = ""
defaultCompilerFlags = ""
defaultLinkerFlags = ""
rePrivate = re.compile("__.*?__")
# **************************************************************************************************
# Public API
def load(projectPath = None, recurse = True):
"""Loads the project that lives in a specified directory"""
if projectPath == None:
projectPath = os.getcwd()
localProjectPath = __findProjectPath(projectPath, recurse)
if localProjectPath == None:
# Either the project path is invalid, or it's an external project. If it's external, it
# should already be in our cache from when we read in the user file.
if projectPath in projectCache:
return projectCache[projectPath]
raise Exception("%s is not a valid project path " % projectPath)
project = __importProject(localProjectPath)
return project
def loadUserProject():
""" Loads a project that contains only the contents of user.dev.
This project will not be cached, so every call will reload it."""
userFilePath = os.path.join(os.path.expanduser(devon.userPath), userFileName)
project = DevonProject("", time.time())
__mergeProject(project, "", userFilePath)
return project
def getNearestProjectPath(sourcePath):
projectPath = __findProjectPath(sourcePath)
if projectPath:
return os.path.join(projectPath, projectFileName)
def getProjectByName(name):
if name in projectMap:
return projectMap[name]
return None
def shutdownProjects():
for project in projectCache.values():
if isinstance(project, DevonProject):
project.writeDependencyFile()
def loadExternalProjects():
""" Instantiates each of the external projects found in the user file. """
path = os.path.join(os.path.expanduser(devon.userPath), userFileName)
locals = __importProjectLocals(path)
for attr in locals:
obj = locals[attr]
if not isinstance(obj, types.ClassType) \
or not issubclass(obj, ExternalProject) \
or obj is ExternalProject:
continue
# Instantiate the external project. ExternalProject's constructor will
# do everything else.
obj = obj()
# **************************************************************************************************
class ProjectBranch:
def writeBranch(self, projectLocals):
for name in projectLocals:
localValue = projectLocals[name]
if hasattr(self, name):
selfValue = getattr(self, name)
if isinstance(selfValue, ProjectBranch):
# If the member is a branch object, the project variable is expected to be
# a class whose members we copy directly onto the branch
if localValue and isinstance(localValue, types.ClassType):
selfValue.writeBranch(vars(localValue))
continue
setattr(self, name, localValue)
# **************************************************************************************************
class Project:
name = ""
def __init__(self):
self.__includePaths = None
def __post_init__(self):
pass
def __repr__(self):
return "<project %s (%s)>" % (self.name, self.path)
def getDependencies(self, deep = False, source = None, projects = None, dups = None):
return []
def getIncludePaths(self):
if self.__includePaths is None:
self.__includePaths = self.getExportPaths()
if hasattr(self, 'dependencies'):
for projectName in self.dependencies:
if projectName in exportReverseMap:
self.__includePaths += exportReverseMap[projectName]
return self.__includePaths[:]
def getSourceDependencies(self, source, deep, checkedDeps):
return []
def getAbsolutePath(self, relPath):
""" Joins the project path with the relative path and returns the absolute path. Does not
check to see if the path exists. """
return os.path.join(self.path, relPath)
def getAbsolutePaths(self, relPaths):
""" Returns a list of absolute paths given a list of relative paths.
This is just a convenience function; see getAbsolutePath() above
for the dirty work. """
return [self.getAbsolutePath(relPath) for relPath in relPaths]
def getExportPaths(self, deep = False, absPaths = True):
""" Returns a list of the project's export paths. An export path contains
files that other projects may include.
There are two kinds of export paths: the real export path, which
reflects the actual directory structure, and the virtual export
path, which is the path that other projects use. For example, a
project may export the directory /foo/shared/thread/include,
containing foopy.h, as the virtual path suade/thread, such that
clients would include suade/thread/foopy.h. Behind the scenes, virtual
paths are symlinks in the build directory. This allows us to have nice
include namespaces that we can use to look up the exporting project,
without having to muck up the directory structure.
Because the actual location of the exported files is an unnecessary
detail, this function only supplies virtual paths.
A project is not required to use virtual paths, in which case we
export the parent of the real path. In other words, if a project
exports /foo/shared/thread/Thread containing bar.h, we export
/foo/shared/thread, and a client project would include
<Thread/bar.h>.
If absPaths is True, this function returns the absolute export paths
that can be handed to the compiler as include paths. If False, it
it returns the paths the client uses, such as "suade/thread".
If deep is True, the returned list includes exported subfolders as
well. For example, if a project exports /foo/shared/thread/Thread
containing subfolder Primitives and you request client paths, this
function returns ["Thread", "Thread/Primitives"]. """
paths = []
for relRealPath in self.exports:
# Ensure that the real export path actually exists
absRealPath = self.getAbsolutePath(relRealPath)
if not os.path.exists(absRealPath):
raise OSError, "Export path %s does not exist" % absRealPath
# absIncludePath below represents the directory containing the
# exports, since that's actually the path we export. In other words,
# if a client project is to include foopy/bar/baz.h as bar/baz.h,
# we need to export foopy.
relVirtualPath = self.exports[relRealPath]
if relVirtualPath:
absIncludePath = self.getBuildPath()
linkPath = os.path.join(absIncludePath, relVirtualPath)
# XXXblake Need to check symlink target against expected target
# and recreate if they don't match; the symlink could exist,
# but point to the wrong place
if not os.path.exists(linkPath):
# All directories up until the actual link must exist
dir = os.path.dirname(linkPath)
if dir and not os.path.exists(dir):
os.makedirs(dir)
# Finally, create the actual link target to absRealPath
try:
os.symlink(absRealPath, linkPath)
except Exception,exc:
print "ERROR: unable to link", absRealPath, "to", linkPath
relIncludePath = relVirtualPath
else:
# XXXblake This isn't right. Consider libxml2, which by default installs headers to
# /usr/include/libxml2/libxml/. If a user file specifies
# exports = { "include/libxml2" : None }
# we should be exporting everything in /usr/include/libxml2, and thus
# include <libxml/*.h should work...to make this work, we have to enumerate the dirs
# in the export path and add those to our list. The code below will add libxml2 to
# our export map and export include/
absIncludePath, relIncludePath = os.path.split(absRealPath)
if absPaths:
includePath = absIncludePath
else:
includePath = relIncludePath
if not includePath in paths:
paths.append(includePath)
# If it's an external project, we grudgingly add the non-namespaced export path as well,
# since the project's own files won't use the namespace. For example, if the exported
# path is include/libxml2, we would normally export include and force clients to include
# libxml2/*.h. But since libxml2's own headers won't do that, we'll also export
# include/libxml2.
if absPaths and isinstance(self, ExternalProject):
paths.append(absRealPath)
if deep:
arpLength = len(absRealPath)
for root, dirs, files in os.walk(absRealPath):
length = len(dirs)
for i, dir in enumerate(reversed(dirs)):
if not includeSource(root, dir, False):
del dirs[length-i-1]
continue
# Get the relative path (e.g. "win")
relPath = os.path.join(root, dir)[arpLength:]
if absPaths:
includePath = os.path.join(absRealPath, relVirtualPath, relPath)
else:
includePath = os.path.join(relVirtualPath, relPath)
if not includePath in paths:
paths.append(includePath)
return paths
def makeVirtual(self, source):
""" Given an absolute or relative concrete export path, returns the
same path with symlinks if possible. """
if os.path.isabs(source):
absPaths = True
relPath = self.getRelativePath(source)
else:
absPaths = False
relPath = source
# If not, enumerate our real paths until we find which one this is
for realPath in self.exports:
virtualPath = self.exports[realPath]
if not virtualPath:
continue
if os.path.dirname(relPath) == realPath:
path = os.path.join(virtualPath, os.path.basename(source))
if absPaths:
path = self.getAbsolutePath(os.path.join(realPath, kVirtualExportDir, path))
return path
# Can't be virtualized, so return the original source.
return source
def makeConcrete(self, source):
""" Given an absolute or relative export path, returns the same path
without symlinks. This is similar to os.path.realpath(), but it's
much faster because it only works for known export paths.
Examples:
INPUT OUTPUT
<Absolute Paths>
/foo/shared/base/Suade/base/foopy.h /foo/shared/base/include/foopy.h
<Relative Paths>
Suade/base/foopy.h include/foopy.h
"""
if os.path.isabs(source):
relSrcPath = self.getRelativePath(source)
absPaths = True
else:
relSrcPath = source
absPaths = False
# Enumerate our real path -> virtual path mapping until we find the corresponding virtual
# path, then splice in the real path in its place.
for relRealPath in self.exports:
relVirtualPath = self.exports[relRealPath]
if relVirtualPath:
rvpLen = len(relVirtualPath)
# See if the source path begins with the real path. We have to check for a trailing
# slash to ensure we're actually matching directories. If we just compared rvpLen
# letters, we'd consider e.g. "DevonLog/Foopy.h" to match an export path of "Devon"
if relSrcPath[:rvpLen+1] == (relVirtualPath + "/"):
source = os.path.join(relRealPath, relSrcPath[rvpLen+1:])
if absPaths:
source = self.getAbsolutePath(source)
break
else:
# No virtual path specified. That means we're exporting the
# directory itself, so check and see if the head of the real path
# + the head of the source path exists. In other words, say the
# project exported { "foopy/bar": None } given foopy/bar/baz/blah.h
# and the source path is <bar/baz/blah.h>.
# XXXblake Not really making concrete here...
relPath = os.path.join(os.path.dirname(relRealPath), relSrcPath)
absPath = self.getAbsolutePath(relPath)
if os.path.exists(absPath):
if absPaths:
source = absPath
else:
source = relPath
break
return source
def getRelativePath(self, absPath, throw=True):
""" Extracts the project path from an absolute path and returns the relative path."""
if not absPath.lower().find(self.path.lower()) == 0:
if throw:
raise ValueError, "Supplied source is not part of this project"
return None
length = len(self.path)
if not self.path[-1] == "/":
length += 1
return absPath[length:]
__includePaths = None
class ExternalProject(Project):
def __init__(self):
""" Adds the project's exports to the global export map, and the project
itself to our global project cache. ExternalProjects are only
instantiated by loadUserSettings() in web.py """
Project.__init__(self)
if not self.path:
if sys.platform == "win32":
raise Exception("No path specified for external project ", self.name)
self.path = "/usr"
# XXXblake Doesn't seem like the right place to do this
populateExportMap(self)
projectCache[self.path] = self
projectMap[self.name] = self
def getBuildTarget(self):
build = None
# `build` can be unspecified, True, False, None or a filename. If unspecified or True, we'll
# attempt to locate a file matching the project's name. If False or None, the project is not
# built. If a filename is specified, we'll just use that filename.
if not hasattr(self, "build") or self.build == True:
build = self.name
elif self.build:
build = self.build
else:
return
# XXXblake I think external projects actually need a getBuildTargetS(),
# and we should just try to link all of its libraries. You shouldn't have
# to specify manually.
# targets = []
# for source in getSourcesIn(self.getBuildPath(), absPaths=True):
# if source[-4:].lower() == ".lib":
# targets.append(source)
# return targets
if sys.platform == "win32":
if build.find(".") == -1:
build += ".lib"
path = self.getBuildPath(build)
else:
def getLibPath(name):
if sys.platform == "darwin":
fullName = "lib%s.dylib" % name
else:
fullName = "lib%s.so" % name # XXXblake Version numbers (e.g. .so.2)?
path = self.getBuildPath(fullName)
if not os.path.exists(path):
path = self.getBuildPath("lib%s.a" % name)
return path
if build.find(".") == -1:
path = getLibPath(build)
else:
path = self.getBuildPath("lib%s" % build)
# The dot may not necessarily indicate an extension (e.g. "libpython2.4")
if not os.path.exists(path):
path = getLibPath(build)
# XXXblake REQUIRE(os.path.exists...)
if not os.path.exists(path):
# Couldn't find the build target, but this is only an error if the developer indicated
# that he expects one--by specifying build = True, build = #, or a buildPath.
if (hasattr(self, "build") and self.build) or self.buildPath:
raise Exception("Invalid path (%s) for external project %s" % (path, self.path))
return
return path
def getName(self):
classname = str(self.__class__)
return classname[classname.rfind(".")+1:]
def getBuildPath(self, targetPath = None):
buildPath = self.getAbsolutePath(self.buildPath or "lib")
if targetPath:
return os.path.join(buildPath, targetPath)
return buildPath
name = property(getName)
path = None
buildPath = None
libs = []
def getFrameworks(self, deep=False):
return {}
class DevonProject(Project, ProjectBranch):
def __init__(self, path, updateTime):
Project.__init__(self)
# Use forward slashes since this gets printed to streams in many places
# and it's easier than trying to escape it everywhere
self.path = os.path.abspath(path).replace("\\", "/")
self.updateTime = updateTime
self.projectFilePaths = []
self.userName = ""
self.password = ""
self.name = None
self.version = None
self.description = None
self.url = None
self.author = None
self.authorEmail = None
self.glossary = {}
self.defaultProject = None
self.wikiPath = kDefaultWikiPath
self.buildPath = None
self.buildRootPath = None
self.buildProject = self
self.buildPre = []
self.build = None
self.buildPost = []
self.deps = {}
self.dist = None
self.distInstallPaths = None
self.exclude = []
self.debugSources = []
self.installPaths = {}
self.pythonModules = []
import devon.builtin
self.buildTests = devon.builtin.CompileTestRunner() >> devon.builtin.LinkTestRunner()
self.alwaysBuild = [] # Source files to rebuild every time
self.neverLink = [] # Target files to never link
# Libraries to link against. This should be used only for system libs
# in cases where we can't determine library from the include alone. For
# example, the include for many Windows libraries is the generic
# <windows.h>
# XXXblake I think in the future we should require a comment next to
# such includes that we'll parse, then remove this, e.g.:
# #include <windows.h> // gdiplus, timer
# The code should be self-documenting like that.
self.libs = []
self.resources = []
self.frameworks = []
self.ignoreFrameworks = []
self.frameworkPaths = []
self.exports = {}
self.includes = []
self.dependencies = []
self.optimize = "debug"
self.warningLevel = 4
self.pedantic = False
pch = self.getAbsolutePath(pchH)
if os.path.exists(pch):
self.pch = pchH
else:
self.pch = None
self.testExcludes = []
self.testRunner = None
self.pythonExes = {}
self.pythonPaths = []
self.showCommands = False
self.formatOutput = True
self.tracer = True
self.platform = None
self.device = None
self.sdk = None
self.arch = None
self.developerPath = None
self.compilerPath = None
self.linkerPath = None
self.ranlibPath = None
self.linkerFlags = defaultLinkerFlags
self.compilerFlags = defaultCompilerFlags
self.interfaceFlags = defaultInterfaceFlags
# ******************************************************************************************
self.defines = ProjectBranch()
self.config = ProjectBranch()
self.wiki = ProjectBranch()
self.buildArguments = ProjectBranch()
self.distArguments = ProjectBranch()
if not sys.platform == "darwin" and not sys.platform == "win32":
self.defines._REENTRANT = True
# ******************************************************************************************
# Win32 Options
if sys.platform == "win32":
# Compiler options
# Use the multithreaded CRT by default; msvc requires runtime libs to be the same type
self.runtimeThreaded = True
# Linker options
self.linkIncremental = True
self.definition = None # http://msdn.microsoft.com/library/default.asp?url=/library/en-us/vccore/html/_core_module.2d.definition_files.asp
self.subSystem = "CONSOLE"
self.defines.WIN32 = True
self.defines._WINDOWS = True
self.defines._WIN32_DCOM = True
self.defines._WIN32_WINNT = 0x0501
self.defines._MBCS = True
# ******************************************************************************************
# Mac OS X options
elif sys.platform == "darwin":
self.defines.DARWIN = True
# App Bundle options
self.executableFile = None
self.plistFile = None
self.iconFile = None
self.signature = None
self.osFiles = None
self.resourceFiles = None
# ******************************************************************************************
# Firefox Extension options
self.installScript = None
# ******************************************************************************************
# Mozilla Jar options
pass
@property
def debug(self):
return not self.optimize or self.optimize == 'debug'
@property
def buildDir(self):
if self.debug:
return 'debug'
else:
return 'release'
def __post_init__(self):
""" Called after the project file is reflected onto the Project instance. """
self.__dependencyFile = self.getBuildPath(projectDepFileName)
# If a name in the exclusion list has no extension and it's not a child
# project, we assume exclusion of both the source file and its associated
# header (if any).
for i, exclude in enumerate(self.exclude):
excludeRelPath = exclude
exclude = self.makeConcrete(self.getAbsolutePath(exclude))
# Skip over anything with an extension
if exclude.rfind(".") > -1:
self.exclude[i] = exclude
continue
# XXXblake Just use glob
# Check if it's a project
path = os.path.join(exclude, projectFileName)
if os.path.exists(path):
self.exclude[i] = exclude
continue
# Otherwise, if it's a source file, exclude associated include
cpp = "%s.cpp" % exclude
if os.path.exists(cpp):
self.exclude[i] = cpp
for export in self.exports:
header = os.path.join(self.path, export, "%s.h" % excludeRelPath)
if os.path.exists(header):
self.exclude.append(header)
break
def writeBranch(self, projectLocals):
# Copy all of the variables from the project file onto the project object
# if they are in the set of pre-defined project member variables
for name in vars(self):
if name in projectLocals:
selfValue = getattr(self, name)
localValue = projectLocals[name]
if isinstance(selfValue, ProjectBranch):
# If the member is a branch object, the project variable is expected to be
# a class whose members we copy directly onto the branch
if localValue and isinstance(localValue, types.ClassType):
selfValue.writeBranch(vars(localValue))
else:
setattr(self, name, localValue)
def getParentProject(self):
parentPath = os.path.dirname(self.path)
if not parentPath or not os.path.isdir(parentPath):
return None
try:
return load(parentPath)
except:
return None
def getChildProject(self, projectPath):
childPath = os.path.join(self.path, projectPath)
if not os.path.isdir(childPath):
return None
return load(childPath, False)
def getChildProjects(self, deep = False):
""" Compiles a list of the specified project's child projects """
allExclude = [self.buildRootPath]
def getProjectsUnderPath(path, deep, projectExclude = []):
""" Returns a list of the projects that live below the specified path. The
optional exclude list should contain absolute project paths. """
projects = []
for name in os.listdir(path):
if name[0] == "." or name == "docs":
continue
full = os.path.join(path, name)
if not os.path.isdir(full) or full in projectExclude or full in allExclude:
continue
# Respect platform forks
if name == "mac" and sys.platform != "darwin":
continue
if name == "win" and sys.platform != "win32":
continue
if name == "unix" and sys.platform == "win32":
continue
project = None
projectPath = os.path.join(full, projectFileName)
if os.path.exists(projectPath):
project = load(projectPath)
projects.append(project)
# Don't bother walking this project's build directory. We add the path to
# our "all exclude" list, since it's not necessarily a direct child of the
# project; it could be anywhere.
buildPath = project.buildRootPath
if not buildPath in allExclude:
allExclude.append(buildPath)
if deep:
if project:
exclude = project.exclude
else:
exclude = []
projects.extend(getProjectsUnderPath(full, True, exclude))
return projects
return getProjectsUnderPath(self.path, deep, self.exclude)
def getBuildTarget(self):
"""Gets the path of the build target for a ManyToOne project"""
if not self.build or isinstance(self.build, devon.maker.MakerOneToOne):
return None
targetPath = self.build.getTarget(self)
return self.getBuildPath(targetPath)
def getBuildPath(self, targetPath=None):
""" Gets the full path of a single target in the build directory of the build project.
All returned paths end with trailing slashes. """
if self.path.find(self.buildProject.path) == 0:
relativePath = self.path[len(self.buildProject.path)+1:] # +1 for the trailing slash
session = getSession()
basePath = os.path.join(self.buildRootPath, self.getPlatformAbbreviation())
basePath = os.path.join(basePath, self.buildDir, relativePath)
else:
raise Exception("Not currently supported/tested")
if targetPath:
return os.path.join(basePath, targetPath)
return basePath
def getPlatformAbbreviation(self):
names = []
if self.platform:
names.append(self.platform)
if self.sdk:
names.append(self.sdk)
if names:
return "_".join(names)
else:
return getSystemAbbreviation()
def getDocumentPath(self, docName):
docName = docName.replace("_", " ")
return os.path.join(self.path, self.wikiPath, "%s.txt" % docName)
def getSources(self, includeDirs = False, includeTests = False, absPaths = False, changedAfter = 0,
types = []):
""" Returns a project's sources, which basically consists of every file
within the project directory and each of its subdirectories until
another built project is reached. In other words, given
/foopy/
base/
project.dev
blah.cpp
blah.h
this function would return ["blah.cpp", "blah.h"] for the base
project and [] for the top-level foopy project. """
excludes = self.exclude
if not self.debug:
excludes += self.getAbsolutePaths(self.debugSources)
return getSourcesIn(self.path, includeDirs, includeTests, absPaths, excludes, changedAfter, types)
# **************************************************************************************************
# Test
def getTestSources(self, includeDirs = False, absPaths = False):
sources = getSourcesIn(self.getAbsolutePath("tests"), includeDirs, absPaths)
# getSourcesIn() returns paths relative to the test dir. If the caller is
# expecting paths relative to the project path, fix up the paths here.
if not absPaths:
sources = [os.path.join("tests", source) for source in sources]
return sources
# **************************************************************************************************
# Dependencies
def getLibs(self, deep=False, absPaths=False):
""" Retrieves the project's dependent libraries. By default, a complete
list of library names (e.g. "foopy.lib") is returned, including
system libraries, such as gdi.lib. If absPaths is specified, we may
not be able to return the absolute paths of all explicit libs,
particularly system libraries.
"""
# XXXblake This is temporary pending ExternalProject.getBuildTargetS()
def getAbsPathsOfExplicitLibs(explicitLibs):
libs = []
for lib in explicitLibs:
# XXXblake Make getExportingProject() just take the export path,
# since we end up indexing slash again below
project = getExportingProject(lib)
if project: # XXXblake Not currently checking dups...is it faster to just pass dups to the linker?
libs.append(project.getBuildPath(lib[lib.rfind("/")+1:]))
else:
libs.append(lib)
return libs
if absPaths:
libs = getAbsPathsOfExplicitLibs(self.libs)
else:
libs = self.libs[:]
# XXXblake getDependencies() should probably also return any projects
# culled from our explicit libs list
for dep in self.getDependencies(deep):
depExplicitLibs = dep.libs
if absPaths:
depExplicitLibs = getAbsPathsOfExplicitLibs(depExplicitLibs)
libs.extend(depExplicitLibs)
target = dep.getBuildTarget()
if target: # If the project is built
if absPaths:
lib = target
else:
lib = os.path.basename(target)
# XXXblake Do we really need this check if getDependencies() already does dup-checking?
if not lib in libs:
libs.append(lib)
return libs
def getLibPaths(self, deep=False):
""" Retrieves the project's absolute library paths """
libPaths = []
libs = self.getLibs(deep, True)
for lib in libs:
libPaths.append(os.path.dirname(lib))
return libPaths
def getIncludePaths(self, deep = False, source = None):
""" Retrieves the project's absolute include paths. """
includes = Project.getIncludePaths(self)
includes.append(self.path)
if deep:
for dep in self.getDependencies(deep, source):
for include in dep.getIncludePaths():
if not include in includes:
includes.append(include)
return includes
# XXXblake Seems like a lot of the complexity would disappear if we stored concrete paths in the
# dependency lists in a tuple that also contains the project name (rather than storing virtual
# paths just so we can retrieve the exporting project)
# XXXblake This function is currently not used because the headersOnly method of retrieving
# dependencies is faster.
def getSourceIncludePaths(self, source, deep = False):
""" Retrieves the source's absolute include paths. The specified source should be relative
to the project. """
includes = []
projects = []
sources = []
def getIncludes(project, source, deep):
if source in sources:
return includes
sources.append(source)
project.__updateDependencies()
for dep in project.deps[source]:
depProject = getExportingProject(dep)
if not depProject:
if dep in project.deps:
# If there's no exporting project but the source is in our own list, it must
# be a local project include
depProject = project
else:
# Otherwise, could be a system include, e.g. <string>
continue
if not depProject in projects:
for include in depProject.getIncludePaths():
if not include in includes:
includes.append(include)
projects.append(depProject)
# Only retrieve includes for Devon projects for now, since we don't currently have
# a concept of dependencies tracking for external projects (XXXblake)
if deep and isinstance(depProject, DevonProject):
source = depProject.makeConcrete(dep)
getIncludes(depProject, source, deep)
return includes
return getIncludes(self, source, deep)
def readDependencyFile(self):
""" Load the dependency map from the project's dependency file """
path = self.__dependencyFile
if not os.path.exists(path):
self.__updateDependencies()
else:
f = {}
execfile(path, {}, f)
self.deps = f["deps"]
def writeDependencyFile(self):
""" Write the dependency map to the project's dependency file """
dir = os.path.dirname(self.__dependencyFile)
if not os.path.exists(self.__dependencyFile) and not os.path.exists(dir):
os.makedirs(dir)
f = file(self.__dependencyFile, "w")
f.write("deps = {")
for source, deps in self.deps.iteritems():
f.write("'%s': [" % source)
for dep in deps:
f.write("'%s'," % dep)
f.write("],")
f.write("}")
def __updateDependencies(self):
""" Ensures that our in-memory and written dependency map is completely
up to date. Note that we currently use the dependency file's time
stamp--rather than a per-source timestamp--as the sole indicator of
whether a source's dependency listing needs updating. This means that
whenever a source has changed and we update its dependencies, we
need to ensure that all other source dependency lists are up to date
as well. """
# Note: We don't strip tests for now, because a test source might create a
# dependency that none of the actual sources do, so we need to find those
# too. In the future, we may actually wish to offer a separate "test project"
# that offers getIncludePaths(), getSources(), etc.
# XXXblake Rather than doing this check, create the file and utime(0) earlier
# XXXblake Why use the dependency file time as the marker instead of an in-memory flag?
if os.path.exists(self.__dependencyFile):
depLastMod = os.path.getmtime(self.__dependencyFile)
else:
depLastMod = 0
# Clear the in-memory map so we don't write old cruft if you delete a dependency file
# while the server is running
self.deps.clear()
source = None
types = ["cpp", "h", "c", "hpp", "hxx", "mm", "lex", "y"]
for source in self.getSources(absPaths=True, changedAfter=depLastMod, includeTests=True, types=types):
self.deps[self.getRelativePath(source)] = getIncludes(source)
# If we updated the in-memory dependency map, just touch it and we'll write it on shutdown,
# unless we've never created the dependency file before, in which case we'll write it now
if source:
if not depLastMod:
self.writeDependencyFile()
os.utime(self.__dependencyFile, None)
def getSourceDependencies(self, source, deep = False, checkedDeps = None):
""" Yields the source's include dependencies. Takes either an absolute
or relative source path. Since a source's dependencies usually come
from a mix of projects, this function always returns absolute paths. """
if os.path.isabs(source):
# Ensure that the source is actually part of this project by trying
# to retrieve a relative path.
relSrcPath = self.getRelativePath(source, throw=False)
# If the source isn't in the project path, it might be a file in
# our build directory. Not quite sure how to handle this right now.
# It seems like we should be able to extract dependency information
# from generated files--consider e.g. a file that gets preprocessed
# into a source file. In fact, that's how flex/bison files work
# right now, but we get lucky there in that their original files
# have includes of the proper format. For now, we'll just check
# if this is a build file and return an empty list if so.
if not relSrcPath:
buildPath = self.getBuildPath()
if source[:len(buildPath)] == buildPath:
return
raise ValueError, "Supplied source is not part of project"
else:
relSrcPath = source
source = self.getAbsolutePath(source)
if checkedDeps is None:
checkedDeps = []
# XXXblake Check ctime (which is creation time on Windows) everywhere
if not os.path.exists(self.__dependencyFile) or \
os.path.getmtime(source) > os.path.getmtime(self.__dependencyFile):
self.__updateDependencies()
if not relSrcPath in self.deps:
self.deps[relSrcPath] = []
# raise Exception("Couldn't retrieve dependency information for ", relSrcPath)
for dep in self.deps[relSrcPath]:
if dep in checkedDeps:
continue
checkedDeps.append(dep)
project = getExportingProject(dep)
path = None
if not project:
# Note that it's entirely possible for a local file to match the name of an external
# file. Since we've lost the <> versus "" information in the include directive, we'll
# just forge ahead knowing we might be wrong. Usually the worst that happens is that
# we unnecessarily rebuild a source file, but since it also affects getDependencies(),
# it could cause incorrect lib/include paths, etc. If this becomes an issue, we could
# prevent this by prepending something (e.g. "./") in getIncludes when we parse a
# quoted include. In other words, given local subfolder "gecko" and external project
# "gecko", we'll mistake "gecko/foopy.h" as a gecko-project reference. Or given
# <system/file.h> and local system subfolder, we'll mistake it as a self-reference.
# The latter case is less concerning since it's just the self project again.
path = self.getAbsolutePath(dep)
if os.path.isfile(path):
project = self
else:
# OK, the header isn't at the base of the project. Before giving up, see if it's
# relative to the current file.
dirname = os.path.dirname(relSrcPath)
if dirname:
rel = os.path.join(dirname, dep)
path = self.getAbsolutePath(rel)
# We check that rel is in self.deps below to guard against a scenario such as
# that presented by shared/thread/include/Semaphore.h, which includes the
# posix <semaphore.h>, and which we would otherwise treat as the local header
# only to bail on the next call when we can't find dependency information for
# include/semaphore.h. The correct fix here is probably to retain the
# important metadata provided by the choice of "" versus <> for includes, as
# discussed above.
if os.path.isfile(path) and rel in self.deps:
project = self
dep = rel
if not project:
continue
# Now we need to turn the virtual path into a real path so we can
# get an absolute path, and so we can call ourselves recursively
# if deep was requested.
if not path:
dep = project.makeConcrete(dep)
path = project.getAbsolutePath(dep)
# It's possible that a given dependency no longer exists, e.g. if
# a header has been renamed but nobody updated the source file. Since
# we won't recompile the file (and thus induce the compiler to catch
# the error) unless the source file happened to change for other reasons,
# we'll throw and cause the buck to stop here.
if not os.path.exists(path):
raise ValueError, "%s does not exist; did you rename a header \
file and forget to update the source file?" % path
yield path
if deep:
for dep in project.getSourceDependencies(dep, deep, checkedDeps):
yield dep
def getDependencies(self, deep = False, source = None, projects = None):
""" Returns a list of the project's dependent projects. """
# XXXblake The "source" param is a big, big hack. Basically what we're doing is calculating
# one source's dependencies by retrieving all the *potential* dependencies, i.e. anything
# exported by a project that exports one of the source's dependencies, recursively.
if projects is None:
projects = []
# Ensure the dependency map is up-to-date
self.__updateDependencies()
# Now flatten the dependency map into a list, retrieve the project from
# the dependency, and, if requested, get each dependency's dependencies
for src, deps in self.deps.iteritems():
if source and src[-2:] not in (".h", ".y") and not src[-3:] == ".lex" \
and not src == source:
continue
for dep in deps:
project = getExportingProject(dep)
# Don't include ourselves or duplicates in the dependency list
if project and project is not self and not project in projects:
yield project
projects.append(project)
# XXXblake Profile to see if breadth approach (myProjects list) is faster
if deep:
for proj in project.getDependencies(deep, bool(source), projects):
yield proj
# **************************************************************************************************
def getFrameworks(self, deep=False):
names = self.frameworks
if deep:
for depProject in self.getDependencies(deep):
for libName in depProject.getFrameworks():
if libName not in names and libName not in self.ignoreFrameworks:
names.append(libName)
return names
def expandString(self, path):
return path % ProjectVariables(self)
def getInheritableProperty(self, name, default):
value = getattr(self, name)
if value:
return value, self
else:
parent = self.getParentProject()
if not parent:
return default, None
else:
return parent.getInheritableProperty(name, default)
def getPathList(self, sources):
fullSources = []
if sources:
cwd = os.getcwd()
os.chdir(self.path)
for source in sources:
source = self.expandString(source)
# On Windows, glob can return paths with backslashes. We change
# back to forward slashes to maintain the path integrity.
fullSources += [s.replace("\\", "/") for s in glob.glob(source)]
os.chdir(cwd)
return fullSources
# **************************************************************************************************
class ProjectVariables:
"""A wrapper that can used to embed project variables in formatted strings"""
def __init__(self, project):
self.project = project
def __getitem__(self, name):
if name == "buildTarget":
return self.project.getBuildTarget()
elif name == "buildPath":
return self.project.getBuildPath()
elif name.find("buildTarget_") == 0:
projectPath = name[12:]
childProject = self.project.getChildProject(projectPath)
if childProject:
return childProject.getBuildTarget()
else:
parentPath = os.path.basename(self.project.path)
projectPath = os.path.abspath(os.path.join("..", projectPath))
siblingProject = load(projectPath)
if siblingProject:
return siblingProject.getBuildTarget()
elif name.find("buildPath_") == 0:
projectPath = name[10:]
childProject = self.project.getChildProject(projectPath)
if childProject:
return childProject.getBuildPath()
else:
parentPath = os.path.basename(self.project.path)
projectPath = os.path.abspath(os.path.join("..", projectPath))
siblingProject = load(projectPath)
if siblingProject:
return siblingProject.getBuildPath()
else:
names = name.split(".")
obj = self.project
for name in names:
obj = getattr(obj, name)
return str(obj)
# **************************************************************************************************
class Session:
pass
def getSession():
thread = threading.currentThread()
if not hasattr(thread, "session"):
# XXXjoe Sessions are dead, I think
thread.session = Session()
return thread.session
# **************************************************************************************************
def __findProjectPath(path, recurse = True):
"""Returns the path of the project that lives in |path|. If no project is
found and |recurse| is true, this searches the parents of a directory
until a project file is found"""
path = os.path.abspath(path)
if not os.path.isdir(path):
path = os.path.dirname(path)
while 1:
projectPath = os.path.join(path, projectFileName)
if os.path.exists(projectPath):
return os.path.dirname(projectPath)
elif recurse:
newPath = os.path.dirname(path)
if newPath == path: # We've hit the drive root
break
else:
path = newPath
else:
break
return None
def __importProject(projectPath):
projectFilePath = os.path.join(projectPath, projectFileName)
workspaceFilePath = os.path.join(projectPath, workspaceFileName)
userFilePath = os.path.join(os.path.expanduser(devon.userPath), userFileName)
configFilePath = os.path.join(os.path.expanduser(devon.userPath), configFileName)
# Look for the project in the cache
project = None
if projectPath in projectCache:
project = projectCache[projectPath]
# If any of the files have been changed, invalidate and reload the project
for path in (projectFilePath, workspaceFilePath, userFilePath, configFilePath):
if os.path.isfile(path):
fileTime = os.path.getmtime(path)
if fileTime > project.updateTime:
# Sanity check: if the file time is greater than time.time(), something's wacky.
# (I ran into this problem running a Unix VM, where the Unix "hardware clock"
# was off and thus invariably fileTime > updateTime and we recursed infinitely.)
if fileTime > time.time():
raise Exception("Error: System clock appears to be set inappropriately " \
"(while importing project %s)" % project.path)
project = None
break
if project:
return project
project = DevonProject(os.path.dirname(projectFilePath), time.time())
projectCache[projectPath] = project
# Merge the config file
__mergeProject(project, projectPath, configFilePath)
# Merge the project file
__mergeProject(project, projectPath, projectFilePath)
# Merge the local workspace file
__mergeProject(project, projectPath, workspaceFilePath)
# Merge the user file
__mergeProject(project, projectPath, userFilePath)
# Find the project that hosts the build directory and cache it on this project
if project.buildPath:
if os.path.isabs(project.buildPath):
project.buildRootPath = project.buildPath
else:
project.buildRootPath = project.getAbsolutePath(project.buildPath)
else:
buildPath, buildProject = project.getInheritableProperty("buildPath", ".")
if buildProject:
project.buildRootPath = os.path.join(buildProject.path, buildPath)
project.buildProject = buildProject
else:
project.buildRootPath = buildPath
project.buildProject = project
# Merge the workspace file for the whole project
if project.buildProject and not project.buildProject == project:
rootWorkspacePath = os.path.join(project.buildRootPath, '..', workspaceFileName)
__mergeProject(project, projectPath, rootWorkspacePath)
# "Post initialize" the project so it has a chance to do additional work
# after the project file has been read in
project.__post_init__()
# Add the project's exports to our global export map
populateExportMap(project)
# Add the project to our global name map
projectMap[project.name] = project
# Read in cached dependencies if the project is built or if it exports
if isinstance(project, DevonProject) and (project.getBuildTarget() or project.exports):
project.readDependencyFile()
return project
def __mergeProject(project, projectPath, projectFilePath):
projectLocals = __importProjectLocals(projectFilePath, project)
if projectLocals:
project.writeBranch(projectLocals)
def __importProjectLocals(projectFilePath, project=None):
if not os.path.isfile(projectFilePath):
return {}
cwd = os.getcwd()
os.chdir(os.path.dirname(projectFilePath))
projectLocals = {}
projectLocals.update(__getProjectBuiltin())
if project:
projectLocals.update(vars(project))
execfile(projectFilePath, {}, projectLocals)
os.chdir(cwd)
return projectLocals
def __getProjectBuiltin():
global projectBuiltin
if not projectBuiltin:
projectBuiltin = {}
import devon.builtin
for name in vars(devon.builtin):
if not rePrivate.match(name):
value = getattr(devon.builtin, name)
if not type(value) == types.ModuleType:
projectBuiltin[name] = value
return projectBuiltin
# **************************************************************************************************
# Dependency Helpers
# reInclude = re.compile(r"""#include (?:(?:"|<)((?:\w*/\w*)*\(?:.h|.hxx|.hpp))(?:"|>))|(?:(?:\")(\w*\(?:.h|.hxx|.hpp)(?:\"))""")
reInclude = re.compile(r"""#include (?:"|<)(.*?(?:\.h|\.hpp|))(?:"|>)""")
reNamespace = re.compile(r"namespace \w* {")
def populateExportMap(project):
""" Adds the project's export paths to our map. We use this map to lookup
a project based on its exports when we encounter an include in a source
file. Export paths are case-insensitive. """
# XXXblake On Unix, where external project paths tend to be "usr", this adds all sorts of junk
# to the map
if project.name:
exports = project.getExportPaths(deep=False, absPaths=True)
exportReverseMap[project.name] = exports
for export in project.getExportPaths(deep=True, absPaths=False):
if not export in exportMap:
exportMap[export] = []
# print "Adding ", export, " to project ", project.path XXXblake This reveals some bugs
exportMap[export].append(project)
def getIncludes(source):
""" Returns a list of relative, concrete include paths used by the specified (absolute) source. """
includes = []
# XXXblake We need to parse conditionals so we only process the right includes.
# XXXblake Doesn't work for includes after the namespace (e.g. at the bottom of the file)
# XXXblake Need to handle relative includes (e.g. #include "../foopy.h")
for line in file(source):
match = reInclude.match(line)
if match:
includes.append(match.group(1))
elif reNamespace.match(line):
break
return includes
def getExportingProject(export):
""" Returns the project that exports the supplied path. """
exportDir = os.path.dirname(export)
if not exportDir in exportMap:
return None
projects = exportMap[exportDir]
exportingProject = None
numProjects = len(projects)
if numProjects > 1:
# If we already determined the correct project for this export in the past, we cached it
# in our map, so check for that first.
if export in exportMap:
exportingProject = exportMap[export]
else:
# If more than one project exports the given path, we need to determine
# which one it is. We want to minimize cases like this (by using distinctive
# export paths) since it slows down building.
# XXXblake This assumes a header, but getLibs() also uses this function
# to look up namespaced libraries. We should check the extension here and
# look at export or lib paths accordingly. This will probably fail if, say,
# two projects export "gecko" currently.
for project in projects:
absRealPath = project.getAbsolutePath(project.makeConcrete(export))
if os.path.exists(absRealPath):
exportingProject = project
# Add to our cache so future lookups are faster
exportMap[export] = exportingProject
break
elif numProjects:
exportingProject = projects[0]
# Although we already have a project instance here, we call through load()
# to ensure that the project is up to date.
if exportingProject and isinstance(exportingProject, DevonProject):
exportingProject = load(exportingProject.path)
# Warn if a file includes a header that was marked for exclusion. This rare situation
# arises if you try to prevent a project from building by excluding all the files in it
# but fail to remove the associated includes. The linker usually complains anyways, but this
# warning helps identify the source of the problem. Note that the correct way to prevent
# a project from building is to exclude the project itself in its parent.
if isinstance(exportingProject, DevonProject):
path = exportingProject.getAbsolutePath(exportingProject.makeConcrete(export))
if path in exportingProject.exclude:
print "Warning: A header designated for exclusion was included (%s)" % path
return exportingProject
def includeSource(root, name, isFile, types = []):
""" Filters out project files, build and test directories, hidden directories,
generated files, other platforms' files, and (optionally) files that aren't of the specified
types. """
# Filter hidden files and directories. This excludes things like the svn
# working copy. # XXXblake Move under dir when dep files move.
if name[0] == ".":
return False
if isFile:
if types:
index = name.rfind(".")
if index == -1:
return False
ext = name[index+1:]
if not ext in types:
return False
else:
# Filter other platforms' sources
if sys.platform == "win32":
if name == "mac" or name == "unix":
return False
elif sys.platform == "darwin":
if name == "win":
return False
else:
if name == "mac" or name == "win":
return False
# If the subdirectory is a built project of its own, don't crawl into
# it since the files contained within are that project's sources, not
# our own.
if os.path.exists(os.path.join(root, name, projectFileName)):
return False
# Filter the build directory
if name == "build":
return False
return True
def relativePath(fullPath, basePath):
slash = basePath[-1] == "/" or basePath[-1] == "\\"
return fullPath[len(basePath) + (not slash):]
def getSourcesIn(path, includeDirs = False, includeTests = False, absPaths = False, excludes = [],
changedAfter = 0, types = []):
""" Returns the files in the specified path and its subfolders, excluding project files, build
and test directories, etc. (see includeSource()). By default, the function returns paths
relative to |path|; use absPaths to change that. You can also pass a list of absolute paths
to exclude. """
sources = []
def getSource(root, name, changedAfter=changedAfter):
fullPath = os.path.join(root, name)
source = None
if not fullPath in excludes:
if absPaths:
source = fullPath
else:
source = relativePath(fullPath, path)
if source:
# We also check ctime below, which ensures that we count moved/copied files as "changed"
# even though their modification times have not changed. (We can't *just* check ctime,
# because on Windows it's only the creation time, although on Unix it encompasses both
# modification and moves.
if not changedAfter or os.path.getmtime(fullPath) > changedAfter \
or os.path.getctime(fullPath) > changedAfter:
return source
# Walk the directory tree looking for sources
for root, dirs, files in os.walk(path):
# Directories
length = len(dirs)
for i, dir in enumerate(reversed(dirs)):
if not includeSource(root, dir, False) or (not includeTests and dir == "tests"):
del dirs[length-i-1]
elif includeDirs:
# We don't exclude directories based on the mod date because their subdirectories
# may have changed, so pass 0 here.
source = getSource(root, dir, 0)
if source:
sources.append(source)
# Optimization: only bother checking files if the current directory mod time is newer than
# our requested mod time
if changedAfter and os.path.getmtime(root) < changedAfter:
continue
# Files
for file in files:
if not includeSource(root, file, True, types):
continue
source = getSource(root, file)
if source:
sources.append(source)
return sources
def getSystemAbbreviation():
import sys
if sys.platform == "win32":
return "win"
if sys.platform == "darwin":
return "mac"
return "unix"
```
#### File: devon/devon/tag.py
```python
class Node:
def __init__(self, attributes=None):
self.attributes = attributes
self.childNodes = None
def __getitem__(self, name):
return self.attributes[name]
def __setitem__(self, name, value):
self.attributes[name] = value
def __lshift__(self, value):
from devon.stream import OutStream
from devon.renderers.html import HTMLRenderer
from cStringIO import StringIO
stream = OutStream(StringIO(), HTMLRenderer())
stream.write(self)
stream.write(value)
return stream
def clone(self):
cloned = self.__class__(self.line)
cloned.mergeAttributes(self.attributes)
return cloned
def mergeAttributes(self, attributes):
if self.attributes:
self.attributes.update(attributes)
else:
self.attributes = dict(attributes)
def appendChild(self, node):
if not self.childNodes:
self.childNodes = []
self.childNodes.append(node)
def getInnerText(self):
text = ""
if self.childNodes:
for node in self.childNodes:
if isinstance(node, basestring):
text += node
return text
def iterateNodes(self, exclude=None):
if self.childNodes:
for node in self.childNodes:
if exclude and exclude(node):
continue
yield node
if isinstance(node, Node):
if node.childNodes:
for child in node.iterateNodes(exclude):
yield child
yield Close
def getNodesBy(self, criteria):
if self.childNodes:
for node in self.childNodes:
if isinstance(node, Node) and node.childNodes:
if criteria(node):
yield node
for child in node.getNodesBy(criteria):
yield child
def __str__(self):
xml = "<%s" % self.__class__.__name__
if self.attributes:
for name in self.attributes:
xml += ' %s="%s"' % (name, self.attributes[name])
xml += ">"
return xml
# **************************************************************************************************
class Document(Node):
def __init__(self, sourcePath=None):
self.sourcePath = sourcePath
Node.__init__(self)
# **************************************************************************************************
class Tag(Node):
def __init__(self, classes=None, line=-1, **attributes):
self.line = line
if classes:
attributes["class"] = classes
Node.__init__(self, attributes)
def clone(self):
cloned = self.__class__("", self.line)
cloned.mergeAttributes(self.attributes)
return cloned
# **************************************************************************************************
class Close:
"""Write this class to the stream to close the last open tag."""
def __init__(self, line=-1):
self.line = line
class Flush:
"""Write this class to the stream to flush all buffered text."""
class FileTarget:
def getFileTargetPath(self):
pass
# **************************************************************************************************
def dumpNodeTree(node, out=None, indent=""):
if not out:
import sys
out = sys.stdout
out.write(indent)
if isinstance(node, basestring):
out.write("'" + node + "'\n")
else:
out.write(str(node) + "\n")
if isinstance(node, Node) and node.childNodes:
childIndent = indent + " "
for child in node.childNodes:
dumpNodeTree(child, out, childIndent)
out.write(indent)
out.write("</%s>\n" % node.__class__.__name__)
```
#### File: devon/web/logCatalog.py
```python
import devon.projects.run
import os.path
# **************************************************************************************************
def main(request):
devon.projects.run.writeProjectLogCatalog(request.project, request.out)
``` |
{
"source": "joeHickson/flood_nowcasting",
"score": 2
} |
#### File: joeHickson/flood_nowcasting/lambda_function.py
```python
import json
import os
import sys
# bugger about with the path to include the package. not the right way really.
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + "/flood_nowcasting")
from flood_nowcasting.flood_nowcasting import FloodNowcasting
# import sys
#
# # bugger about with the path to include the package. not the right way really.
# sys.path.append(os.path.dirname(os.path.realpath(__file__)) + "/flood_nowcasting")
def lambda_handler(event, context):
try:
nowcast = FloodNowcasting(app_key=os.environ['APP_KEY'],
app_secret=os.environ['APP_SECRET'],
access_token=os.environ['ACCESS_TOKEN'],
access_token_secret=os.environ['ACCESS_TOKEN_SECRET'])
nowcast.main()
return {
'statusCode': 200,
'body': json.dumps('Run complete')
}
except Exception:
return {
'statusCode': 500,
'body': json.dumps('Server Error')
}
```
#### File: flood_nowcasting/tests/test_load_ea_data.py
```python
import unittest
from entities import Location, FloodStates
from load_ea_data import get_data
class TestLoadEaData(unittest.TestCase):
def test_load(self):
location = Location(
name="test",
monitoring_station="45128",
wet=1,
warn=0.5,
messages={state: f"message {state.name}" for state in FloodStates}
)
x_data, y_data = get_data(location)
self.assertEqual(24, len(x_data))
self.assertEqual(24, len(y_data))
x_data, y_data = get_data(location, 5)
self.assertEqual(5, len(x_data))
self.assertEqual(5, len(y_data))
# check the last point in the sequence is within 5 minutes of where it should be
self.assertAlmostEqual(min(x_data) + 4 * 60 * 15, max(x_data), delta=300)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "joehillen/jsonrpcake",
"score": 3
} |
#### File: jsonrpcake/jsonrpcake/core.py
```python
import sys
import errno
import json
import jsonrpc_ns
from .models import Environment
from .output import build_output_stream, write
from . import ExitStatus
def main(args=sys.argv[1:], env=Environment()):
"""Run the main program and write the output to ``env.stdout``.
Return exit status code.
"""
from .cli import parser
def error(msg, *args, **kwargs):
msg = msg % args
level = kwargs.get('level', 'error')
env.stderr.write('\njsonrpc: {level}: {msg}\n'
.format(level=level, msg=msg))
debug = '--debug' in args
traceback = debug or '--traceback' in args
exit_status = ExitStatus.OK
if debug:
if args == ['--debug']:
return exit_status
try:
args = parser.parse_args(args=args, env=env)
try:
response = jsonrpc_ns.request(
args.addr, args.method, args.data, timeout=args.timeout)
except jsonrpc_ns.JSONRPCResponseError as e:
response = e.value
code = e.value['code']
message = e.value['message']
if args.check_status:
exit_status = ExitStatus.ERROR
error('JSONRPC %s %s', code, message, level='warning')
response = json.dumps(response)
write_kwargs = {
'stream': build_output_stream(
args, env, None, response),
'outfile': env.stdout,
'flush': env.stdout_isatty or args.stream
}
try:
write(**write_kwargs)
except IOError as e:
if not traceback and e.errno == errno.EPIPE:
# Ignore broken pipes unless --traceback.
env.stderr.write('\n')
else:
raise
except (KeyboardInterrupt, SystemExit):
if traceback:
raise
env.stderr.write('\n')
exit_status = ExitStatus.ERROR
except Exception as e:
# TODO: Better distinction between expected and unexpected errors.
# Network errors vs. bugs, etc.
if traceback:
raise
error('%s: %s', type(e).__name__, str(e))
exit_status = ExitStatus.ERROR
return exit_status
``` |
{
"source": "joehmchiu/tsoncli",
"score": 2
} |
#### File: tsoncli/tsoncli/msg.py
```python
import sys, os, time
from markdown import markdown
def info():
return markdown(
"# tson-cli"
"##Task Service over Networks CLI"
)
``` |
{
"source": "joehoeller/Algorithmic-Data-Cleaning-with-Pandas",
"score": 3
} |
#### File: joehoeller/Algorithmic-Data-Cleaning-with-Pandas/pipeline.py
```python
import pandas as pd
from datetime import datetime
import numpy as np
import pydash as _
### Read in CSV files
df1 = pd.read_csv('./input/Task_Data_1.csv')
df2 = pd.read_csv('./input/Task_Data_2.csv')
### Use Pandas DF to merge by common key, "item_id" to ensure
### that rows from both CSVs matched
merged_by_id = df1.merge(df2, how="left", on="item_id")
### Format values for result Data1.mp1 * data2.mp2
merged_by_id['result'] = merged_by_id.mp1 * merged_by_id.mp2
### Format values for date as Data2.date in YYYY-MM-DD
merged_by_id['date_new'] = pd.to_datetime(merged_by_id['date']).dt.strftime('%Y-%m-%d')
### Drop missing values: Drop all rows from col 'value'
### inside Pands DF that are NaN or Null
merged_by_id = merged_by_id[pd.notnull(merged_by_id['value'])]
### Space in mem to hold data
data = []
### Python struct
init = {
'prev': None,
'curr': None,
'result': []
}
### Get data in array so pseudo rolling windows
### with conditions can operate on data
for x in merged_by_id.loc[:, 'value']:
data.append(x)
### Algorithmic accumulator that walks arrays right (reduceRight)
### and handles the conditions of Data1.value or
### (data1[row-1].value + data1[row+1].value) / 2
### Or 1 if there is no [row-1] or [row+1] values
### without mutations to variables, no loops, and zero non deterministic
### code design patterns
def fn(acc, curr):
if acc['prev'] == None and acc['curr'] == None:
return {
'prev': None,
'curr': curr,
'result': [1]
}
elif acc['curr'] != None and acc['prev'] == None:
return {
'prev': acc['curr'],
'curr': curr,
'result': acc['result']
}
elif acc['curr'] != None and acc['prev'] != None:
return {
'prev': acc['curr'],
'curr': curr,
'result': [ (acc['prev']+curr) / 2 ] + acc['result']
}
def det():
if len(data) == 1:
return data
else:
return [1] + _.reduce_right(data, fn, init)['result']
### print to verify Pandas DF, output desired CSV file in /output folder
result = det()
df = pd.DataFrame({"item_identifier":merged_by_id['item_id'],"result":merged_by_id['result'],"date_new":merged_by_id['date_new'],"new_value":result})
print(df)
df.to_csv(r'./output/clean_data.csv')
``` |
{
"source": "joehood/SubCircuit",
"score": 3
} |
#### File: subcircuit/devices/c.py
```python
import subcircuit.interfaces as inter
import subcircuit.sandbox as sb
class C(inter.MNADevice):
"""SPICE capacitor device"""
def __init__(self, nodes, value, mname=None, l=None, w=None, ic=None,
**parameters):
"""General form:
CXXXXXXX N+ N- VALUE <IC=INCOND>
Examples:
CBYP 13 0 1UF
COSC 17 23 10U IC=3V
N+ and N- are the positive and negative element nodes, respectively. VALUE is
the capacitance in Farads.
The (optional) initial condition is the initial (time-zero) value of capacitor
voltage (in Volts). Note that the initial conditions (if any) apply 'only' if
the UIC option is specified on the .TRAN control line.
Semiconductor Capacitors:
General form:
CXXXXXXX N1 N2 <VALUE> <MNAME> <L=LENGTH> <W=WIDTH> <IC=VAL>
Examples:
CLOAD 2 10 10P
CMOD 3 7 CMODEL L=10u W=1u
This is the more general form of the Capacitor presented in section 6.2, and
allows for the calculation of the actual capacitance value from strictly
geometric information and the specifications of the process. If VALUE is
specified, it defines the capacitance. If MNAME is specified, then the
capacitance is calculated from the process information in the model MNAME and
the given LENGTH and WIDTH. If VALUE is not specified, then MNAME and LENGTH
must be specified. If WIDTH is not specified, then it is taken from the
default width given in the model. Either VALUE or MNAME, LENGTH, and WIDTH
may be specified, but not both sets.
The capacitor model contains process information that may be used to compute
the capacitance from strictly geometric information.
name parameter units default example
-----------------------------------------------------------------
CJ junction bottom capacitance F m-2 - 5e-5
CJSW junction sidewall capacitance F m-1 - 2e-11
DEFW default device width m 1e-6 2e-6
NARROW narrowing due to side etching m 0.0 1e-7
The capacitor has a capacitance computed as
CAP = CJ(LENGTH - NARROW)(WIDTH - NARROW) + 2.CJSW(LENGTH + WIDTH - 2.NARROW)
"""
inter.MNADevice.__init__(self, nodes, 0, **parameters)
self.value = value
self.ic = ic
def connect(self):
npos, nneg = self.nodes
self.port2node = {0: self.get_node_index(npos),
1: self.get_node_index(nneg)}
def start(self, dt):
self.jac[0, 0] = self.value / dt
self.jac[0, 1] = -self.value / dt
self.jac[1, 0] = -self.value / dt
self.jac[1, 1] = self.value / dt
def step(self, dt, t):
vc = self.get_across_history(0, 1)
self.bequiv[0] = self.value / dt * vc
self.bequiv[1] = -self.value / dt * vc
class CBlock(sb.Block):
"""Schematic graphical inteface for L device."""
friendly_name = "Capacitor"
family = "Elementary"
label = "C"
engine = C
symbol = sb.Symbol()
symbol.lines.append(((60, 0), (60, 40)))
symbol.lines.append(((60, 60), (60, 100)))
symbol.lines.append(((40, 40), (80, 40)))
symbol.lines.append(((40, 60), (80, 60)))
def __init__(self, name):
# init super:
sb.Block.__init__(self, name, C)
# ports:
self.ports['positive'] = sb.Port(self, 0, (60, 0))
self.ports['negative'] = sb.Port(self, 1, (60, 100))
# properties:
self.properties['Capacitance (F)'] = 0.1
def get_engine(self, nodes):
return C(nodes, self.properties['Capacitance (F)'])
```
#### File: subcircuit/devices/e.py
```python
import subcircuit.sandbox as sb
import subcircuit.interfaces as inter
class E(inter.MNADevice, inter.CurrentSensor):
"""A SPICE E (VCVS) device."""
def __init__(self, nodes, value=1.0, limit=None, **parameters):
"""Create a new SPICE E (VCVS) device.
:param name: Name of device. Must be unique within the subcircuit
:param nodes: node connections sequence
:param value: Either fixed value or transfer function table
Examples:
value=10.0 # fixed value
value=Table((-1, 0), (0, 0), (0.0001, 1)) # lookup table
TODO: add stimulus option.
:param kwargs: Additional keyword arguments
:return: New E instance
General form:
EXXXXXXX N+ N- NC+ NC- VALUE
Examples:
E1 2 3 14 1 2.0
N+ is the positive node, and N- is the negative node. NC+ and NC- are
the positive and negative controlling nodes, respectively. VALUE is the
voltage gain.
"""
inter.MNADevice.__init__(self, nodes, 1, **parameters)
self.gain = None
self.table = None
if isinstance(value, float) or isinstance(value, int):
self.gain = float(value)
elif isinstance(value, inter.Table):
self.table = value
self.subckt = None
self.limit = limit
def connect(self):
ncp, ncm, np, nm = self.nodes
self.port2node = {0: self.get_node_index(ncp),
1: self.get_node_index(ncm),
2: self.get_node_index(np),
3: self.get_node_index(nm),
4: self.create_internal("{0}_int".format(self.name))}
def start(self, dt):
"""Define the initial VCVS jacobian stamp."""
if self.gain:
k = self.gain
else:
k = 0.0
self.jac[2, 4] = -1.0
self.jac[3, 4] = 1.0
self.jac[4, 0] = k
self.jac[4, 2] = -1.0
self.jac[4, 3] = 1.0
self.jac[4, 1] = -k
def step(self, dt, t):
"""TODO Doc"""
if self.table:
vc = self.get_across(2, 3)
k = self.table.output(vc) # get gain for this control voltage
if self.limit and not vc == 0.0:
if vc * k > self.limit:
k = self.limit / vc
if vc * k < -self.limit:
k = -self.limit / vc
self.jac[4, 0] = k
self.jac[4, 1] = -k
def get_current_node(self):
"""Return the current node."""
return self.nodes[4], 1.0
class EBlock(sb.Block):
"""Schematic graphical inteface for E device."""
friendly_name = "Voltage Controlled Voltage Source"
family = "Dependant Sources"
label = "E"
engine = E
symbol = sb.Symbol()
# main leads:
symbol.lines.append(((60, 0), (60, 20)))
symbol.lines.append(((60, 80), (60, 100)))
# control leads:
symbol.lines.append(((20, 40), (40, 40)))
symbol.lines.append(((20, 60), (40, 60)))
# plus:
symbol.lines.append(((60, 33), (60, 43)))
symbol.lines.append(((55, 38), (65, 38)))
# diamond:
symbol.lines.append(((60, 20), (90, 50), (60, 80), (30, 50),
(60, 20)))
def __init__(self, name):
# init super:
sb.Block.__init__(self, name)
# ports:
self.ports['control positive'] = sb.Port(self, 0, (20, 40))
self.ports['control negative'] = sb.Port(self, 1, (20, 60))
self.ports['positive'] = sb.Port(self, 2, (60, 0))
self.ports['negative'] = sb.Port(self, 3, (60, 100))
# properties:
self.properties['Gain value or (time, value) pairs'] = 1.0
def get_engine(self, nodes):
return E(nodes, self.properties['Gain value or (time, value) pairs'])
```
#### File: subcircuit/devices/isense.py
```python
import subcircuit.interfaces as inter
import subcircuit.sandbox as sb
class CurrentSensor(inter.MNADevice, inter.CurrentSensor):
def __init__(self, nodes, **parameters):
inter.MNADevice.__init__(self, nodes, 0, **parameters)
def connect(self):
nplus, nminus, current_node = self.nodes
self.port2node = {0: self.get_node_index(nplus),
1: self.get_node_index(nminus),
2: self.get_node_index(current_node)}
def start(self, dt):
self.jac[0, 2] = 1.0
self.jac[1, 2] = -1.0
self.jac[2, 0] = 1.0
self.jac[2, 1] = -1.0
def get_current_node(self):
return self.port2node[2], -1.0
class CurrentSensorBlock(sb.Block):
"""Schematic graphical inteface for IScope device."""
friendly_name = "Current Sensor"
family = "Meters"
label = "I"
engine = CurrentSensor
symbol = sb.Symbol()
# lines:
symbol.lines.append(((80, 40), (80, 70)))
symbol.lines.append(((60, 80), (100, 80)))
# plus:
symbol.lines.append(((60, 53), (60, 63)))
symbol.lines.append(((55, 58), (65, 58)))
# circle
symbol.circles.append((75, 70, 10, 20))
def __init__(self, name):
# init super:
sb.Block.__init__(self, name, None)
# port:
self.ports['positive'] = sb.Port(self, 0, (60, 80))
self.ports['negative'] = sb.Port(self, 1, (100, 80))
self.ports['current node'] = sb.Port(self, 2, (80, 40))
def get_engine(self, nodes):
self.engine = CurrentSensor(nodes)
return self.engine
```
#### File: subcircuit/devices/l.py
```python
import math
import subcircuit.interfaces as inter
import subcircuit.sandbox as sb
class L(inter.MNADevice, inter.CurrentSensor):
"""SPICE Inductor Device"""
def __init__(self, nodes, value, ic=None, res=0.0, **parameters):
"""
General form:
LYYYYYYY N+ N- VALUE <IC=INCOND>
Examples:
LLINK 42 69 1UH
LSHUNT 23 51 10U IC=15.7MA
N+ and N- are the positive and negative element nodes, respectively. VALUE is the
inductance in Henries.
The (optional) initial condition is the initial (time-zero) value of inductor
current (in Amps) that flows from N+, through the inductor, to N-. Note that the
initial conditions (if any) apply only if the UIC option is specified on the
.TRAN analysis line.
"""
self.linkable = False
if len(nodes) == 2:
inter.MNADevice.__init__(self, nodes, 1, **parameters)
elif len(nodes) == 3:
inter.MNADevice.__init__(self, nodes, 0, **parameters)
self.linkable = True
self.value = value
self.ic = ic
self.res = res
def connect(self):
if self.linkable:
nplus, nminus, link = self.nodes
self.port2node = {0: self.get_node_index(nplus),
1: self.get_node_index(nminus),
2: self.get_node_index(link)}
else:
nplus, nminus = self.nodes
internal = "{0}_int".format(self.name)
self.port2node = {0: self.get_node_index(nplus),
1: self.get_node_index(nminus),
2: self.create_internal(internal)}
def start(self, dt):
self.jac[0, 2] = 1.0
self.jac[1, 2] = -1.0
self.jac[2, 0] = 1.0
self.jac[2, 1] = -1.0
self.jac[2, 2] = -(self.res + self.value / dt)
def step(self, dt, t):
inductor_current = self.get_across_history(2)
self.bequiv[2] = -self.value / dt * inductor_current
def get_current_node(self):
return self.port2node[2], 1.0
class LBlock(sb.Block):
"""Schematic graphical inteface for L device."""
friendly_name = "Inductor"
family = "Elementary"
label = "L"
engine = L
symbol = sb.Symbol()
# leads:
symbol.lines.append(((60, 0), (60, 20)))
symbol.lines.append(((60, 80), (60, 100)))
# coils (x, y, r, ang0, ang1, clockwise)
ang1 = math.pi * 0.5
ang2 = 3.0 * math.pi * 0.5
symbol.arcs.append((60, 30, 10, ang1, ang2, True))
symbol.arcs.append((60, 50, 10, ang1, ang2, True))
symbol.arcs.append((60, 70, 10, ang1, ang2, True))
def __init__(self, name):
# init super:
sb.Block.__init__(self, name, L)
# ports:
self.ports['positive'] = sb.Port(self, 0, (60, 0))
self.ports['negative'] = sb.Port(self, 1, (60, 100))
# properties:
self.properties['Inductance (H)'] = 0.1
def get_engine(self, nodes):
return L(nodes, self.properties['Inductance (H)'])
class LLinkBlock(sb.Block):
"""Schematic graphical inteface for L device."""
friendly_name = "Inductor (Linkable)"
family = "Elementary"
label = "L"
engine = L
symbol = sb.Symbol()
# leads:
symbol.lines.append(((60, 0), (60, 20)))
symbol.lines.append(((60, 80), (60, 100)))
# coils (x, y, r, ang0, ang1, clockwise)
ang1 = math.pi * 0.5
ang2 = 3.0 * math.pi * 0.5
symbol.arcs.append((60, 30, 10, ang1, ang2, False))
symbol.arcs.append((60, 50, 10, ang1, ang2, False))
symbol.arcs.append((60, 70, 10, ang1, ang2, False))
# mag link bar:
symbol.lines.append(((80, 20), (80, 80)))
def __init__(self, name):
# init super:
sb.Block.__init__(self, name, L)
# ports:
self.ports['positive'] = sb.Port(self, 0, (60, 0))
self.ports['negative'] = sb.Port(self, 1, (60, 100))
self.ports['link'] = sb.Port(self, 1, (80, 20))
# properties:
self.properties['Inductance (H)'] = 0.1
def get_engine(self, nodes):
return L(nodes, self.properties['Inductance (H)'])
```
#### File: subcircuit/devices/r.py
```python
import subcircuit.interfaces as inter
import subcircuit.sandbox as sb
class R(inter.MNADevice):
"""Engine for SPICE R (resistor or semiconductor resistor) device."""
def __init__(self, nodes, value,
rmodel=None, l=None, w=None, temp=None, **parameters):
"""General form:
RXXXXXXX N1 N2 VALUE
Examples:
R1 1 2 100
RC1 12 17 1K
N1 and N2 are the two element nodes. VALUE is the resistance (in ohms)
and may be positive or negative but not zero.
Semiconductor Resistors:
General form:
RXXXXXXX N1 N2 <VALUE> <MNAME> <L=LENGTH> <W=WIDTH> <TEMP=T>
Examples:
RLOAD 2 10 10K
RMOD 3 7 RMODEL L=10u W=1u
"""
inter.MNADevice.__init__(self, nodes, 0, **parameters)
self.value = value
self.rmodel = rmodel
self.l = l
self.w = w
self.temp = temp
def connect(self):
nplus, nminus = self.nodes
self.port2node = {0: self.get_node_index(nplus),
1: self.get_node_index(nminus)}
def update(self):
if self.value: # if non-zero:
g = 1.0 / self.value
else:
g = 1.0E12 # approximate short circuit for 0-resistance
self.jac[0, 0] = g
self.jac[0, 1] = -g
self.jac[1, 0] = -g
self.jac[1, 1] = g
def start(self, dt):
self.update()
def step(self, dt, t):
"""Do nothing here. Linear and time-invariant device."""
pass
class RBlock(sb.Block):
"""Schematic graphical inteface for R device."""
friendly_name = "Resistor"
family = "Elementary"
label = "R"
engine = R
symbol = sb.Symbol()
# resistor shape:
symbol.lines.append(((60, 0), (60, 20), (45, 25), (75, 35), (45, 45),
(75, 55), (45, 65), (75, 75), (60, 80), (60, 100)))
def __init__(self, name):
# init super:
sb.Block.__init__(self, name)
# ports:
self.ports['positive'] = sb.Port(self, 0, (60, 0))
self.ports['negative'] = sb.Port(self, 1, (60, 100))
# properties:
self.properties['Resistance (R)'] = 1.0
def get_engine(self, nodes):
return R(nodes, self.properties['Resistance (R)'])
```
#### File: subcircuit/devices/x.py
```python
import subcircuit.interfaces as inter
import subcircuit.sandbox as sb
from subcircuit.loader import load_engines_to_module
import sys
# Suncircuit instance device needs access to all other devices:
load_engines_to_module(sys.modules[__name__], "devices")
class X(inter.MNADevice):
"""Subckt instance device (SPICE X)"""
def __init__(self, nodes, subckt, **parameters):
"""Creates a new subckt instance device
:param nodes: Device extrnal node names
:param subckt: Subckt name
:param parameters: Dictionary of parameters for the subcircuit instance
:return: New subckt instance device
"""
inter.MNADevice.__init__(self, nodes, 0, **parameters)
self.subckt = subckt
self.parameters = parameters
def connect(self):
"""Maps the ports to the system node indexes.
:return: None
"""
self.port2node = {}
for p, n in zip(self.netlist.subckts[self.subckt].ports, self.nodes):
self.port2node[p] = n
class XBlock2Port(sb.Block):
"""Schematic graphical inteface for R device."""
friendly_name = "Subcircuit 2-Port"
family = "Subcircuit"
label = "X"
engine = X
def __init__(self, name):
# init super:
sb.Block.__init__(self, name)
# ports:
self.ports['port1'] = sb.Port(self, 0, (60, 0))
self.ports['port2'] = sb.Port(self, 1, (60, 100))
# properties:
self.properties['Subckt Name'] = "RLCSeries"
self.properties['Port 1 Name'] = "1"
self.properties['Port 2 Name'] = "2"
self.properties['Device 1'] = "R((1, 3), 1.0)"
self.properties['Device 2'] = "L((3, 4), 0.001)"
self.properties['Device 3'] = "C((4, 2), 0.001)"
self.properties['Device 4'] = ""
self.properties['Device 5'] = ""
self.properties['Device 6'] = ""
self.properties['Device 7'] = ""
self.properties['Device 8'] = ""
self.properties['Device 9'] = ""
# leads:
self.lines.append(((60, 0), (60, 20), (45, 25), (75, 35), (45, 45),
(75, 55), (45, 65), (75, 75), (60, 80), (60, 100)))
def get_engine(self, nodes, netlist=None):
ports = self.properties['Port 1 Name'], self.properties['Port 2 Name']
subckt = inter.Subckt(ports)
if netlist:
netlist.subckt(self.properties['Subckt Name'], subckt)
for i in range(1, 10):
devicedef = self.properties["Device {0}".format(i)]
if devicedef.strip():
try:
# this is the fun part where we try to evaluate the
# strings and turn it into a device record...
device = eval(devicedef, globals(), locals())
subckt.device(devicedef, device)
except Exception as e:
print(str(e))
return X(nodes, self.properties['Subckt Name'])
```
#### File: subcircuit/mathutils/lti.py
```python
import math
import numpy as np
import numpy.linalg as la
import matplotlib.pyplot as plt
import sympy
from sympy.parsing.sympy_parser import parse_expr
class LTISystem(object):
def __init__(self):
pass
def reset(self, dt):
raise NotImplementedError()
def step(self, u, dt, t):
raise NotImplementedError()
def rollback(self, dt, t):
raise NotImplementedError()
class StateSpace(LTISystem):
def __init__(self, a, b, c, d=None, xo=None):
"""TODO
:param a:
:param b:
:param c:
:param d:
:param xo:
:return:
"""
LTISystem.__init__(self)
# grab arguments:
self.a = a
self.b = b
self.c = c
self.d = d
self.xo = xo
# get system size:
self.n, x = a.shape
x, self.m = b.shape
self.l, x = c.shape
if not self.d:
self.d = np.zeros((self.m, self.l))
if not self.xo:
self.xo = np.zeros((self.n, 1))
self.x = self.xo[:, :]
self.xh = self.xo[:, :]
self.u = np.zeros((self.n, 1))
self.y = np.zeros((self.m, 1))
self.y = None
def reset(self, dt):
self.x = self.xo[:, :]
self.xh = self.xo[:, :]
self.u = np.zeros((self.n, 1))
def step(self, u, dt, t):
self.u = u
dx = self.a.dot(self.x) + self.b.dot(u)
self.x = self.x + dx.dot(dt)
self.y = self.c.dot(self.x) + self.d.dot(u)
self.xh = self.x[:, :]
return self.y
def rollback(self, dt, t):
pass
class TransferFunction(LTISystem):
def __init__(self, equation):
LTISystem.__init__(self)
self.is_gain = False
self.equation = equation
self.ss = None
self.reset(0.0)
def reset(self, dt):
equ = parse_expr(self.equation)
num, den = equ.as_numer_denom()
self.is_gain = False
if str(den).find("s") >= 0: # if s domain expr:
a = self.expr2poly(den)
self.is_gain = False
else:
a = [eval(str(den))]
if str(num).find("s") >= 0: # if s domain expr:
b = self.expr2poly(num)
self.is_gain = False
else:
b = [eval(str(num))]
if self.is_gain:
A = np.zeros((1, 1))
B = np.zeros((1, 1))
C = np.zeros((1, 1))
D = np.zeros((1, 1))
else:
n = len(a)
# now convert tf to ss (CCF):
n = len(a)
ao = a[0]
b2 = np.zeros(n)
b2[(n - len(b)):] = b
bo = b2[0]
b = b2[:]
# truncate:
a = a[1:]
b = b[1:]
n -= 1
# normalize:
for i in range(n):
a[i] /= ao
b[i] /= ao
A = np.zeros((n, n))
B = np.zeros((n, 1))
C = np.zeros((1, n))
D = np.zeros((1, 1))
# a matrix and c vector:
for i in range(n):
for j in range(n):
if j == i+1:
A[i, j] = 1.0
elif i == n-1:
A[i, j] = -a[j]
C[0, i] = b[-(i+1)] - a[-(i+1)] * bo
# b vector:
B[-1, 0] = 1.0
# d matrix:
D[0, 0] = bo
self.ss = StateSpace(A, B, C, D)
def step(self, u, dt, t):
return self.ss.step(u, dt, t)
def rollback(self, dt, t):
pass
@staticmethod
def expr2poly(expr, n=None):
poly = sympy.Poly(expr)
coeffs = poly.all_coeffs()
return coeffs
if __name__ == "__main__":
A = np.mat([[0, 1], [-1, -1]])
B = np.mat([[0], [1]])
C = np.mat([[1, 0]])
ss = StateSpace(A, B, C)
u = 10.0
tp = np.linspace(0.0, 1.0, 50)
to = 0.0
xp = np.zeros((len(tp)))
for i, t in enumerate(tp):
dt = t - to
y = ss.step(u, dt, t)
xp[i] = y
print(y)
plt.plot(tp, xp)
plt.show()
```
#### File: SubCircuit/subcircuit/stimuli.py
```python
import math
import subcircuit.interfaces as inter
class Pulse(inter.Stimulus):
def __init__(self, v1, v2, td=0.0, tr=None, tf=None, pw=None, per=None):
"""
General form:
PULSE(V1 V2 TD TR TF PW PER)
Examples:
VIN 3 0 PULSE(-1 1 2NS 2NS 2NS 50NS 100NS)
parameter default value units
V1 (initial value) V or A
V2 (pulsed value) V or A
TD (delay time) 0.0 s
TR (rise time) TSTEP s
TF (fall time) TSTEP s
PW (pulse width) TSTOP s
PER (period) TSTOP seconds
A single pulse so specified is described by the following table:
time value
-----------------
0 V1
TD V1
TD+TR V2
TD+TR+PW V2
TD+TR+PW+TF V1
TSTOP V1
Intermediate points are determined by linear interpolation.
"""
self.v1 = v1
self.v2 = v2
self.td = td
self.tr = tr
self.tf = tf
self.pw = pw
self.per = per
self.device = None
def start(self, dt):
if self.tr is None:
self.tr = dt
if self.tf is None:
self.tf = dt
if self.pw is None:
self.pw = dt
if self.per is None:
self.per = float('inf')
return self.v1
def step(self, dt, t):
t %= self.per
if (self.td + self.tr) <= t < (self.td + self.tr + self.pw):
return self.v2
elif self.td <= t < (self.td + self.tr):
return self.v1 + (self.v2 - self.v1) * (t - self.td) / self.tr
elif ((self.td + self.tr + self.pw) <= t <
(self.td + self.tr + self.pw + self.tf)):
return (self.v2 + (self.v1 - self.v2) *
(t - (self.td + self.tr + self.pw)) / self.tf)
else:
return self.v1
def __str__(self):
s = "Pulse({0}, {1}, {2}, {3}, {4}, {5}, {6})".format(self.v1, self.v2,
self.td, self.tr,
self.tf, self.pw,
self.per)
return s
def __repr__(self):
return str(self)
class Sin(inter.Stimulus):
"""Models a sin wave stimulus for independent sources."""
def __init__(self, vo=0.0, va=1.0, freq=1.0, td=0.0, theta=0.0, phi=0.0):
"""
General form:
SIN(VO VA FREQ TD THETA)
Examples:
VIN 3 0 SIN(0 1 100MEG 1NS 1E10)
parameters default value units
VO (offset) V or A
VA (amplitude) V or A
FREQ (frequency) 1/TSTOP Hz
TD (delay) 0.0 s
THETA (damping factor) 0.0 s-1
The shape of the waveform is described by the following table:
time, t value
0 to TD VO
TD to TSTOP VO+VA.exp[-(t-TD)/THETA].sin[2pi.FREQ.(t+TD)]
"""
self.vo = vo
self.va = va
self.freq = freq
self.td = td
self.theta = theta
self.phi = phi
self.device = None
def start(self, dt):
"""Sets up the pulse stimulus and returns the initial output."""
return self.step(dt, 0.0)
def step(self, dt, t):
"""Update and return the stimulus value at the current time."""
if t < self.td:
return 0.0
elif self.theta:
return (self.vo + self.va * math.exp(-(t + self.td) / self.theta) *
math.sin(2.0 * math.pi * self.freq
* (t + self.td) + self.phi))
else:
return self.vo + self.va * math.sin(
2.0 * math.pi * self.freq * (t + self.td) + self.phi)
def __str__(self):
s = "Sin({0}, {1}, {2}, {3}, {4}, {5})".format(self.vo, self.va,
self.freq, self.td,
self.theta, self.phi)
return s
def __repr__(self):
return str(self)
class Exp(inter.Stimulus):
"""Generates a SPICE EXP stimulus for independant sources."""
def __init__(self, v1, v2, td1=0.0, tau1=None, td2=None, tau2=None):
"""
Define a pulse stimulus.
:param v1: initial value (V or A)
:param v2: pulsed value (V or A)
:param td1: rise time delay, default=0.0 (s)
:param tau1: rise time constant default=None (will be set to timestep) (s)
:param td2: fall delay time, default=None (will be set to td1 + timestep) (s)
:param tau2: fall time constant, default=None (will be set to timestep) (s)
:return: None
"""
self.v1 = v1
self.v2 = v2
self.td1 = td1
self.tau1 = tau1
self.td2 = td2
self.tau2 = tau2
def start(self, dt):
"""Initialize the Exp output at time 0s."""
if not self.tau1:
self.tau1 = dt
if not self.td2:
self.td2 = self.td1 + dt
if not self.tau2:
self.tau2 = dt
return self.step(dt, 0.0)
def step(self, dt, t):
"""Update and return the current value of the Exp stimulus"""
if 0.0 >= t < self.td1:
return self.v1
elif self.td1 <= t < self.td1:
return self.v1 + (self.v2 - self.v2) * (
1.0 - math.exp(-(t - self.td1) / self.tau1))
else:
return (1.0 + (self.v2 - self.v1)
* (1.0 - math.exp(-(t - self.td1) / self.tau1))
+ (self.v1 - self.v2) * (
1.0 - math.exp(-(t - self.td2) / self.tau2)))
def __str__(self):
s = "Exp({0}, {1}, {2}, {3}, {4}, {5})".format(self.v1, self.v2,
self.td1, self.tau1,
self.td2, self.tau2)
return s
def __repr__(self):
return str(self)
class Pwl(inter.Stimulus):
"""TODO Doc"""
def __init__(self, *time_voltage_pairs):
self.xp = []
self.yp = []
for time, value in time_voltage_pairs:
try:
self.xp.append(float(time))
self.yp.append(float(value))
except ValueError as e:
pass
pass
def start(self, dt):
pass
def step(self, dt, t):
x = self.device.get_time()
return self._interp_(x)
def _interp_(self, x):
if x <= self.xp[0]:
return self.yp[0]
elif x >= self.xp[-1]:
return self.yp[-1]
else:
itr = 1
while x > self.xp[itr] and itr < (len(self.xp) - 1):
itr += 1
x0, y0 = self.xp[itr - 1], self.yp[itr - 1]
x1, y1 = self.xp[itr], self.yp[itr]
return y0 + (y1 - y0) * (x - x0) / (x1 - x0)
def __str__(self):
p = ""
for x, y in zip(self.xp, self.yp):
p += "({0}, {1}),".format(x, y)
s = "Pwl({0})".format(p.strip(","))
return s
def __repr__(self):
return str(self)
class Sffm(inter.Stimulus):
def __init__(self, vo, va, fc, md1, fs):
pass # todo
def start(self, dt):
pass
def step(self, dt, t):
pass
``` |
{
"source": "Joe-houghton/CommandToDomoticz",
"score": 3
} |
#### File: Joe-houghton/CommandToDomoticz/CommandToDomoticz.py
```python
__author__ = '<NAME>'
import logging
import json
import requests
from collections import namedtuple
# CONFIG START
DOMOTICZ_USER = ''
DOMOTICZ_PASS = ''
DOMOTICZ_ADDRESS = 'http://1172.16.17.32:8080'
#CONFIG END
DomoticzDevice = namedtuple("DomoticzDevice", "name idx type")
class Domoticz:
def __init__(self, address, user, password):
self.address = address
self.user = user
self.password = password
self.devicesAndScenes = []
def __populateUsingURL(self, url, deviceType):
requestUrl = self.address + url
print("RequestUrl: " + requestUrl)
response = requests.get(requestUrl, auth=(self.user,self. password))
if response.status_code != 200:
print("Bad Request")
return None
jsonDevices = response.json()
if "result" in jsonDevices:
devices = jsonDevices["result"]
for device in devices:
tempDevice = DomoticzDevice(device["Name"], device["idx"], deviceType)
self.devicesAndScenes.append(tempDevice)
def __populateDevicesAndScenes(self):
self.devicesAndScenes = []
getAllSwitches = "/json.htm?type=command¶m=getlightswitches"
self.__populateUsingURL(getAllSwitches, 0)
getAllScenes = "/json.htm?type=scenes"
self.__populateUsingURL(getAllScenes, 1)
def __doesDeviceExist(self, deviceName):
deviceNameToTest = deviceName.lower()
for device in self.devicesAndScenes:
if device[0].lower() == deviceNameToTest :
return device
return None
def __getTargetDevice(self, words):
print("finding device...")
wordsLength = len(words)
targetDevice = ""
for i in range(wordsLength - 2):
if i > 0 :
targetDevice += " "
targetDevice += words[i + 2]
returnDevice = self.__doesDeviceExist(targetDevice)
if returnDevice != None :
return returnDevice
print("No matches for " + targetDevice)
return None # No matches
def __sendCommand(self, command, deviceId, deviceType):
# e.g. '/json.htm?type=command¶m=switchscene&idx=1'
url = ""
param = ""
if deviceType == 0:
param = "switchlight"
elif deviceType == 1:
param = "switchscene"
jsonString = '/json.htm?type=command&'
switchCommand = '&switchcmd='
seq = (self.address, jsonString, "param=", param, "&idx=", deviceId, switchCommand, command)
blankString = ''
url = blankString.join(seq)
print(url, '\n')
response = requests.get(url, auth=(self.user, self.password))
if response.status_code != 200:
print("Bad Send Request")
return None
def ProcessCommand(self, message):
print("Processing Domoticz Command")
self.__populateDevicesAndScenes()
lines = message.split('\n')
commandUnderstood = False
for line in lines:
words = line.split(' ')
if len(words) > 2:
if words[0] == '#command':
commandUnderstood = True
self.__sendCommand(words[1], words[2], 0)
elif words[0] == '#commandToScene':
commandUnderstood = True
self.__sendCommand(words[1], words[2], 1)
elif words[0] == '#commandByName':
print("Processing Command by Name")
commandUnderstood = True
targetDevice = self.__getTargetDevice(words)
if targetDevice != None:
print("Target Device is " + targetDevice[0])
self.__sendCommand(words[1], targetDevice[1], targetDevice[2])
else:
print("Cannot find device: ")
return commandUnderstood
def PrintObject(self):
print("\nAddress: " + self.address)
print("\nUsername: " + self.user)
print("\nPassword: " + self.password)
def main():
domoticz = Domoticz(DOMOTICZ_ADDRESS, DOMOTICZ_USER, DOMOTICZ_PASS)
domoticz.PrintObject()
while True:
userInput = input("Please enter a Command: \n")
domoticz.ProcessCommand(userInput)
#domoticz.ProcessCommand("#commandByName On lamp")
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
``` |
{
"source": "JoeHowarth/meta_turing",
"score": 3
} |
#### File: meta_turing/PyTorch-GatedCNN-Lang-Model/model.py
```python
import numpy as np
import tensorflow as tf
class GatedCNN(nn.Module):
def __init__(self, conf):
tf.reset_default_graph()
self.X = tf.placeholder(shape=[conf.batch_size, conf.context_size-1], dtype=tf.int32, name="X")
self.y = tf.placeholder(shape=[conf.batch_size, conf.context_size-1], dtype=tf.int32, name="y")
embed = self.create_embeddings(self.X, conf)
h, res_input = embed, embed
for i in range(conf.num_layers):
fanin_depth = h.get_shape()[-1]
#last layer should have filter size of 1
filter_size = conf.filter_size if i < conf.num_layers-1 else 1
shape = (conf.filter_h, conf.filter_w, fanin_depth, filter_size)
with tf.variable_scope("layer_%d"%i):
conv_w = self.conv_op(h, shape, "linear")
conv_v = self.conv_op(h, shape, "gated")
h = conv_w * tf.sigmoid(conv_v)
if i % conf.block_size == 0:
h += res_input
res_input = h
h = tf.reshape(h, (-1, conf.embedding_size))
y_shape = self.y.get_shape().as_list()
self.y = tf.reshape(self.y, (y_shape[0] * y_shape[1], 1))
softmax_w = tf.get_variable("softmax_w", [conf.vocab_size, conf.embedding_size], tf.float32,
tf.random_normal_initializer(0.0, 0.1))
softmax_b = tf.get_variable("softmax_b", [conf.vocab_size], tf.float32, tf.constant_initializer(1.0))
#PROBLEM
#Preferance: NCE Loss, heirarchial softmax, adaptive softmax
self.loss = tf.reduce_mean(tf.nn.nce_loss(softmax_w, softmax_b, h, self.y, conf.num_sampled, conf.vocab_size))
trainer = tf.train.MomentumOptimizer(conf.learning_rate, conf.momentum)
gradients = trainer.compute_gradients(self.loss)
clipped_gradients = [(tf.clip_by_value(_[0], -conf.grad_clip, conf.grad_clip), _[1]) for _ in gradients]
self.optimizer = trainer.apply_gradients(clipped_gradients)
self.perplexity = tf.exp(self.loss)
self.create_summaries()
def create_embeddings(self, X, conf):
#No getters in pytorch
embeddings = tf.get_variable("embeds",(conf.vocab_size, conf.embedding_size), tf.float32, tf.random_uniform_initializer(-1.0,1.0))
#embeddings for words sentences in particular batch
embed = tf.nn.embedding_lookup(embeddings, X)
mask_layer = np.ones((conf.batch_size, conf.context_size-1, conf.embedding_size))
mask_layer[:,0:conf.filter_h/2,:] = 0
embed *= mask_layer
embed_shape = embed.get_shape().as_list()
embed = tf.reshape(embed, (embed_shape[0], embed_shape[1], embed_shape[2], 1))
return embed
def conv_op(self, fan_in, shape, name):
W = tf.get_variable("%s_W"%name, shape, tf.float32, tf.random_normal_initializer(0.0, 0.1))
b = tf.get_variable("%s_b"%name, shape[-1], tf.float32, tf.constant_initializer(1.0))
return tf.add(tf.nn.conv2d(fan_in, W, strides=[1,1,1,1], padding='SAME'), b)
def create_summaries(self):
tf.summary.scalar("loss", self.loss)
tf.summary.scalar("perplexity", self.perplexity)
self.merged_summary_op = tf.summary.merge_all()
'''
variables_dict = {
"conv1_weights": tf.Variable(tf.random_normal([5, 5, 32, 32]),
name="conv1_weights")
"conv1_biases": tf.Variable(tf.zeros([32]), name="conv1_biases")
... etc. ...
}
def my_image_filter(input_images, variables_dict):
conv1 = tf.nn.conv2d(input_images, variables_dict["conv1_weights"],
strides=[1, 1, 1, 1], padding='SAME')
relu1 = tf.nn.relu(conv1 + variables_dict["conv1_biases"])
conv2 = tf.nn.conv2d(relu1, variables_dict["conv2_weights"],
strides=[1, 1, 1, 1], padding='SAME')
return tf.nn.relu(conv2 + variables_dict["conv2_biases"])
# The 2 calls to my_image_filter() now use the same variables
result1 = my_image_filter(image1, variables_dict)
result2 = my_image_filter(image2, variables_dict)
def conv_relu(input, kernel_shape, bias_shape):
# Create variable named "weights".
weights = tf.get_variable("weights", kernel_shape,
initializer=tf.random_normal_initializer())
# Create variable named "biases".
biases = tf.get_variable("biases", bias_shape,
initializer=tf.constant_initializer(0.0))
conv = tf.nn.conv2d(input, weights,
strides=[1, 1, 1, 1], padding='SAME')
return tf.nn.relu(conv + biases)
'''
``` |
{
"source": "joehowells/7drl2020",
"score": 2
} |
#### File: ecs/processors/defendaiprocessor.py
```python
from esper import Processor, World
from action import Action, ActionType
from constants import DijkstraMap
from ecs.components.boss import Boss
from ecs.components.defendtarget import DefendTarget
from ecs.components.firescroll import FireScroll
from ecs.components.healingpotion import HealingPotion
from ecs.components.inventory import Inventory
from ecs.components.item import Item
from ecs.components.map import Map
from ecs.components.monster import Monster
from ecs.components.player import Player
from ecs.components.position import Position
from ecs.components.smokebomb import SmokeBomb
from ecs.components.taunted import Taunted
from ecs.components.teleportscroll import TeleportScroll
from ecs.components.visible import Visible
from ecs.processors.spatialprocessor import Coincident
from functions import move_dijkstra, color_item_name
class DefendAIProcessor(Processor):
def process(self):
self.world: World
for _, game_map in self.world.get_component(Map):
for player_entity, (player, player_position) in self.world.get_components(Player, Position):
if self.world.has_component(player_entity, Taunted):
player.defend_action = player.attack_action
return
if player.health < 9:
for entity, (item, _, _) in self.world.get_components(Item, Inventory, HealingPotion):
self.world.add_component(entity, DefendTarget())
player.defend_action = Action(
action_type=ActionType.USE_ITEM,
rage=-20,
nice_name=f"Use {color_item_name(self.world, entity)}",
)
return
for _ in self.world.get_components(Monster, Visible):
target = move_dijkstra(self.world, game_map, player_position, DijkstraMap.MONSTER, reverse=True)
if target:
player.defend_action = Action(
action_type=ActionType.MOVE,
rage=-1,
target=target,
nice_name="Retreat",
)
return
for entity, (item, _, _) in self.world.get_components(Item, Inventory, FireScroll):
candidates = []
for monster_entity, (monster, _) in self.world.get_components(Monster, Visible):
if self.world.has_component(monster_entity, Boss):
continue
if monster.visible_threat <= 0:
continue
candidate = (monster.actual_threat, monster.visible_threat, monster_entity, monster)
candidates.append(candidate)
if candidates:
candidates.sort(reverse=True)
_, _, monster_entity, monster = candidates[0]
self.world.add_component(monster_entity, DefendTarget())
self.world.add_component(entity, DefendTarget())
player.defend_action = Action(
action_type=ActionType.USE_ITEM,
rage=-20,
nice_name=f"Use {color_item_name(self.world, entity)}",
)
return
if player.actual_threat > 0:
for entity, (item, _, _) in self.world.get_components(Item, Inventory, SmokeBomb):
self.world.add_component(entity, DefendTarget())
player.defend_action = Action(
action_type=ActionType.USE_ITEM,
rage=-20,
nice_name=f"Use {color_item_name(self.world, entity)}",
)
return
for entity, (item, _, _) in self.world.get_components(Item, Inventory, TeleportScroll):
self.world.add_component(entity, DefendTarget())
player.defend_action = Action(
action_type=ActionType.USE_ITEM,
rage=-20,
nice_name=f"Use {color_item_name(self.world, entity)}",
)
return
player.defend_action = Action(
action_type=ActionType.WAIT,
rage=-1,
nice_name="Wait",
)
return
for entity, (item, _) in self.world.get_components(Item, Coincident):
player.defend_action = Action(
action_type=ActionType.GET_ITEM,
rage=-1,
nice_name=f"Get {color_item_name(self.world, entity)}",
)
return
target = move_dijkstra(self.world, game_map, player_position, DijkstraMap.ITEM)
if target:
player.defend_action = Action(
action_type=ActionType.MOVE,
rage=-1,
target=target,
nice_name="Gather items",
)
return
target = move_dijkstra(self.world, game_map, player_position, DijkstraMap.EXPLORE)
if target:
player.defend_action = Action(
action_type=ActionType.MOVE,
rage=-1,
target=target,
nice_name="Explore",
)
return
player.defend_action = Action(
action_type=ActionType.WAIT,
rage=-1,
nice_name="Wait",
)
```
#### File: ecs/processors/exploremapprocessor.py
```python
import itertools
from dataclasses import dataclass
from typing import Optional, Set, Tuple
from esper import Processor, World
from constants import DijkstraMap
from ecs.components.map import Map
from functions import dijkstra_map
@dataclass
class ExploreMapProcessor(Processor):
sources: Optional[Set[Tuple[int, int]]] = None
def process(self):
self.world: World
for _, game_map in self.world.get_component(Map):
sources: Set[Tuple[int, int]] = set()
for x, y in itertools.product(range(game_map.w), range(game_map.h)):
if not game_map.explored[y][x]:
sources.add((x, y))
if self.sources != sources:
game_map.dijkstra[DijkstraMap.EXPLORE] = dijkstra_map(game_map, sources)
self.sources = sources
```
#### File: ecs/processors/getitemprocessor.py
```python
from esper import Processor, World
import script
from action import ActionType
from constants import MAX_WEAPON, MAX_ARMOUR
from ecs.components.equipment import Equipment
from ecs.components.inventory import Inventory
from ecs.components.item import Item
from ecs.components.lastknownposition import LastKnownPosition
from ecs.components.message import Message
from ecs.components.player import Player
from ecs.components.position import Position
from ecs.processors.spatialprocessor import Coincident
from functions import color_item_name
class GetItemProcessor(Processor):
def process(self):
self.world: World
for _, player in self.world.get_component(Player):
if player.action.action_type is ActionType.GET_ITEM:
for entity, (item, _) in self.world.get_components(Item, Coincident):
for equipment in self.world.try_component(entity, Equipment):
self.world.delete_entity(entity, immediate=True)
if equipment is Equipment.WEAPON:
if player.attack_equip >= MAX_WEAPON:
self.world.create_entity(Message(
text=script.WEAPON_UPGRADE_FAIL,
priority=45,
))
else:
player.attack_equip += 1
self.world.create_entity(Message(
text=script.WEAPON_UPGRADE,
priority=45,
))
if equipment is Equipment.ARMOUR:
if player.defend_equip >= MAX_ARMOUR:
self.world.create_entity(Message(
text=script.ARMOUR_UPGRADE_FAIL,
priority=45,
))
else:
player.defend_equip += 1
self.world.create_entity(Message(
text=script.ARMOUR_UPGRADE,
priority=45,
))
break
else:
self.world.remove_component(entity, Coincident)
self.world.remove_component(entity, Position)
self.world.remove_component(entity, LastKnownPosition)
self.world.add_component(entity, Inventory())
self.world.create_entity(Message(
text=script.PICKUP_ITEM.format(name=color_item_name(self.world, entity)),
priority=50,
))
```
#### File: ecs/processors/playerattackprocessor.py
```python
from random import randint
from esper import Processor, World
import script
from action import ActionType
from ecs.components.attacktarget import AttackTarget
from ecs.components.map import Map
from ecs.components.message import Message
from ecs.components.monster import Monster
from ecs.components.player import Player
from ecs.components.position import Position
class PlayerAttackProcessor(Processor):
def process(self):
self.world: World
for _, game_map in self.world.get_component(Map):
for _, (position, player) in self.world.get_components(Position, Player):
if player.action.action_type is ActionType.ATTACK:
for entity, (monster, _) in self.world.get_components(Monster, AttackTarget):
self.world.remove_component(entity, AttackTarget)
if randint(0, monster.defend) < player.attack:
self.world.create_entity(Message(
text=script.PLAYER_HIT.format(name=monster.name),
priority=50,
))
monster.health -= 1
if monster.health <= 0:
player.kills[monster.name] += 1
self.world.delete_entity(entity, immediate=True)
self.world.create_entity(Message(
text=script.PLAYER_KILL.format(name=monster.name),
priority=50,
))
else:
self.world.create_entity(Message(
text=script.PLAYER_MISS.format(name=monster.name),
priority=50,
))
```
#### File: ecs/processors/playerthreatprocessor.py
```python
from esper import Processor, World
from constants import MAX_THREAT
from ecs.components.map import Map
from ecs.components.monster import Monster
from ecs.components.player import Player
class PlayerThreatProcessor(Processor):
def process(self):
self.world: World
for _, game_map in self.world.get_component(Map):
for player_entity, player in self.world.get_component(Player):
player.visible_threat = 0
player.actual_threat = 0
for entity, monster in self.world.get_component(Monster):
player.visible_threat += monster.visible_threat
player.actual_threat += monster.actual_threat
player.visible_threat = min(max(player.visible_threat, 0), MAX_THREAT)
player.actual_threat = min(max(player.actual_threat, 0), MAX_THREAT)
```
#### File: ecs/processors/usestairsprocessor.py
```python
from typing import List, Collection, Any
from esper import Processor, World
import script
from action import ActionType
from constants import MAX_LEVEL
from ecs.components.dead import Dead
from ecs.components.gamestate import GameState
from ecs.components.inventory import Inventory
from ecs.components.item import Item
from ecs.components.message import Message
from ecs.components.player import Player
from factories.world import make_world
class UseStairsProcessor(Processor):
def process(self):
self.world: World
for player_entity, player in self.world.get_component(Player):
if player.action.action_type is ActionType.USE_STAIRS:
if player.level >= MAX_LEVEL:
self.world.add_component(player_entity, Dead())
self.world.create_entity(Message(text=script.GAME_COMPLETE, priority=-100))
self.world.create_entity(Message(text=script.GAME_OVER, priority=-200))
else:
player.level += 1
entities: List[Collection[Any]] = make_world(player=player, level=player.level)
for entity, _ in self.world.get_component(GameState):
entities.append(self.world.components_for_entity(entity))
for entity, _ in self.world.get_components(Item, Inventory):
entities.append(self.world.components_for_entity(entity))
self.world.clear_database()
for entity in entities:
self.world.create_entity(*entity)
self.world.create_entity(Message(text=script.USE_STAIRS, priority=100))
```
#### File: ecs/processors/visibilityprocessor.py
```python
from esper import Processor
from ecs.components.lastknownposition import LastKnownPosition
from ecs.components.map import Map
from ecs.components.position import Position
from ecs.components.visible import Visible
class VisibilityProcessor(Processor):
"""Determines whether entities are visible."""
def process(self):
for _, game_map in self.world.get_component(Map):
for entity, position in self.world.get_component(Position):
if game_map.visible[position.y][position.x]:
self.world.add_component(entity, Visible())
self.world.add_component(entity, LastKnownPosition(position.x, position.y))
else:
if self.world.has_component(entity, Visible):
self.world.remove_component(entity, Visible)
```
#### File: ecs/processors/visiblethreatprocessor.py
```python
from esper import Processor, World
from constants import DijkstraMap
from ecs.components.assassin import Assassin
from ecs.components.map import Map
from ecs.components.monster import Monster
from ecs.components.player import Player
from ecs.components.position import Position
from ecs.components.visible import Visible
class VisibleThreatProcessor(Processor):
def process(self):
self.world: World
for _, game_map in self.world.get_component(Map):
for _, player in self.world.get_component(Player):
for entity, (monster, _, position) in self.world.get_components(Monster, Visible, Position):
distance = game_map.dijkstra[DijkstraMap.PLAYER][position.y][position.x]
in_range = 1 <= distance <= len(monster.threat)
if in_range or not self.world.has_component(entity, Assassin):
visible_threat = max(monster.threat)
visible_threat = max(0, visible_threat - player.defend)
else:
visible_threat = 0
monster.visible_threat = visible_threat
```
#### File: 7drl2020/factories/entities.py
```python
from typing import List, Any, Optional, Callable
from ecs.components.display import Display
from ecs.components.player import Player
from ecs.components.position import Position
from ecs.components.stairs import Stairs
from ecs.components.trap import Trap
def make_player(x: int, y: int, player: Optional[Player] = None) -> List[Any]:
if player is None:
player = Player()
return [
Display(0x0040),
player,
Position(x, y),
]
def make_stairs(x: int, y: int) -> List[Any]:
return [
Display(0x003E, draw_order=-2),
Stairs(),
Position(x, y),
]
def make_trap(x: int, y: int, factory: Callable[[int, int], List[Any]]) -> List[Any]:
return [
Display(
code=0x005E,
color=0xFF999999,
draw_order=-2,
),
Trap(factory),
Position(x, y),
]
```
#### File: 7drl2020/factories/items.py
```python
from random import choice
from typing import List, Any, Callable
from ecs.components.display import Display
from ecs.components.equipment import Equipment
from ecs.components.firescroll import FireScroll
from ecs.components.healingpotion import HealingPotion
from ecs.components.item import Item
from ecs.components.position import Position
from ecs.components.smokebomb import SmokeBomb
from ecs.components.teleportscroll import TeleportScroll
def make_healing_potion(x: int, y: int) -> List[Any]:
return [
Display(
code=0x0021,
color=0xFFFF0066,
draw_order=-1,
),
Item(
name="healing potion",
),
HealingPotion(),
Position(x, y),
]
def make_smoke_bomb(x: int, y: int) -> List[Any]:
return [
Display(
code=0x0021,
color=0xFF00FF66,
draw_order=-1,
),
Item(
name="smoke bomb",
),
SmokeBomb(),
Position(x, y),
]
def make_fire_scroll(x: int, y: int) -> List[Any]:
return [
Display(
code=0x003F,
color=0xFFFF6600,
draw_order=-1,
),
Item(
name="fire scroll",
),
FireScroll(),
Position(x, y),
]
def make_teleport_scroll(x: int, y: int) -> List[Any]:
return [
Display(
code=0x003F,
color=0xFF6600FF,
draw_order=-1,
),
Item(
name="teleport scroll",
),
TeleportScroll(),
Position(x, y),
]
def make_weapon(x: int, y: int) -> List[Any]:
return [
Display(
code=0x0029,
color=0xFF999999,
draw_order=-1,
),
Item(
name="weapon upgrade",
),
Equipment.WEAPON,
Position(x, y),
]
def make_armour(x: int, y: int) -> List[Any]:
return [
Display(
code=0x005B,
color=0xFF999999,
draw_order=-1,
),
Item(
name="armour upgrade",
),
Equipment.ARMOUR,
Position(x, y),
]
def get_item_factory() -> Callable[[int, int], List[Any]]:
return choice([
make_healing_potion,
make_healing_potion,
make_healing_potion,
make_healing_potion,
make_fire_scroll,
make_fire_scroll,
make_teleport_scroll,
make_teleport_scroll,
make_smoke_bomb,
make_smoke_bomb,
])
```
#### File: 7drl2020/factories/world.py
```python
from random import choice, shuffle
from typing import List, Any, Optional
from ecs.components.map import Map
from ecs.components.message import Message
from ecs.components.player import Player
from factories.rooms import make_enemy_room, make_trap_room, make_item_room, make_player_room, make_stairs_room, \
make_mid_boss_room, make_end_boss_room, make_weapon_room, make_armour_room
ROOM_FACTORIES = [
make_enemy_room,
make_enemy_room,
make_item_room,
make_item_room,
make_trap_room,
]
def make_world(player: Optional[Player] = None, level: int = 0) -> List[List[Any]]:
game_map = Map()
entities = [[game_map]]
big_rooms = [room for room in game_map.rooms if room.w >= 4 and room.h >= 4]
shuffle(big_rooms)
make_player_room(game_map, entities, big_rooms.pop(), player)
make_stairs_room(game_map, entities, big_rooms.pop())
make_weapon_room(game_map, entities, big_rooms.pop(), level)
make_armour_room(game_map, entities, big_rooms.pop(), level)
if level == 0:
entities.append([Message(text="You enter the dungeon.", priority=90)])
if level == 2:
entities.append([Message(text="[color=#FFFFFF00]The militia commander is on this level.[/color]", priority=90)])
make_mid_boss_room(game_map, entities, big_rooms.pop())
if level == 5:
entities.append([Message(text="[color=#FFFFFF00]The militia captain is on this level.[/color]", priority=90)])
make_end_boss_room(game_map, entities, big_rooms.pop())
while big_rooms:
factory = choice(ROOM_FACTORIES)
factory(game_map, entities, big_rooms.pop(), level)
return entities
``` |
{
"source": "joehowells/critical-keep",
"score": 2
} |
#### File: ecs/components/aicomponent.py
```python
class AIComponent:
def __init__(self):
self.awake: bool = False
```
#### File: ecs/components/combatcomponent.py
```python
class CombatComponent:
def __init__(self, max_hp, attack_stat, defend_stat, hit_stat, critical_stat):
self.max_hp = max_hp
self.cur_hp = max_hp
self.attack_stat = attack_stat
self.defend_stat = defend_stat
self.hit_stat = hit_stat
self.critical_stat = critical_stat
self.base_attack_stat = attack_stat
self.base_defend_stat = defend_stat
self.base_hit_stat = hit_stat
self.base_critical_stat = critical_stat
```
#### File: components/criticals/execute.py
```python
from typing import Any, Dict, List, Tuple, TYPE_CHECKING
from ecs.components.combatcomponent import CombatComponent
from ecs.components.criticals.abc import CriticalComponent
if TYPE_CHECKING:
from ecs.entity import Entity
class ExecuteComponent(CriticalComponent):
def damage(self, _: 'Entity', defender: 'Entity') -> int:
defender_combat = defender[CombatComponent]
return max(1, defender_combat.cur_hp - 1)
def critical(self, attacker: 'Entity', defender: 'Entity') -> List[Tuple[str, Dict[str, Any]]]:
damage = self.damage(attacker, defender)
defender_combat = defender[CombatComponent]
defender_combat.cur_hp = max(0, defender_combat.cur_hp - damage)
events = [('critical_execute', {'attacker': attacker, 'defender': defender, 'damage': damage})]
if defender_combat.cur_hp == 0:
events.append(('dead', {'defender': defender}))
return events
```
#### File: components/criticals/poison.py
```python
from typing import Any, Dict, List, Tuple, TYPE_CHECKING
from ecs.components.combatcomponent import CombatComponent
from ecs.components.criticals.abc import CriticalComponent
from ecs.components.poisoncomponent import PoisonComponent
from helper_functions import get_combat_base_damage
if TYPE_CHECKING:
from ecs.entity import Entity
class PoisonCriticalComponent(CriticalComponent):
def __init__(self, tier: int = 0) -> None:
self.tier = tier
self.name = f'poison ({self.tier+1})'
ticks = [
2,
4,
6,
]
self.tick = ticks[tier]
def damage(self, attacker: 'Entity', defender: 'Entity') -> int:
return int(1.5*get_combat_base_damage(attacker, defender))
def critical(self, attacker: 'Entity', defender: 'Entity') -> List[Tuple[str, Dict[str, Any]]]:
damage = self.damage(attacker, defender)
defender_combat = defender[CombatComponent]
defender_combat.cur_hp = max(0, defender_combat.cur_hp-damage)
if PoisonComponent in defender:
poison = defender[PoisonComponent]
poison.duration = 5
if poison.tick < self.tick:
poison.tick = self.tick
else:
defender.attach(PoisonComponent(tick=self.tick, duration=5))
events = [
('critical_poison', {'attacker': attacker, 'defender': defender, 'damage': damage}),
]
if defender_combat.cur_hp == 0:
events.append(('dead', {'defender': defender}))
return events
```
#### File: ecs/components/durabilitycomponent.py
```python
class DurabilityComponent:
def __init__(self, durability: int) -> None:
self.value = durability
```
#### File: ecs/components/smokecomponent.py
```python
class SmokeComponent:
def __init__(self, duration: int) -> None:
self.duration = duration
```
#### File: ecs/systems/combatsystem.py
```python
import random
from ecs.components.combatcomponent import CombatComponent
from ecs.components.criticals.abc import CriticalComponent
from ecs.components.inventorycomponent import InventoryComponent
from ecs.components.poisoncomponent import PoisonComponent
from ecs.components.randomnumbercomponent import RandomNumberComponent
from ecs.components.smokecomponent import SmokeComponent
from ecs.components.visiblecomponent import VisibleComponent
from helper_functions import get_combat_result, get_combat_base_damage, get_weapon
from project_types import CombatResult
class CombatSystem:
def __init__(self, container):
self.container = container
def event_take_turn(self, entity):
if CombatComponent in entity:
combat: CombatComponent = entity[CombatComponent]
if PoisonComponent in entity:
poison: PoisonComponent = entity[PoisonComponent]
combat.cur_hp = max(0, combat.cur_hp-poison.tick)
self.container.event('poison_damage', entity=entity, damage=poison.tick)
if combat.cur_hp == 0:
self.container.event('dead', defender=entity)
poison.duration -= 1
if poison.duration <= 0:
entity.remove(PoisonComponent)
self.container.event('poison_expire', entity=entity)
if SmokeComponent in entity:
smoke: SmokeComponent = entity[SmokeComponent]
smoke.duration -= 1
if smoke.duration <= 0:
entity.remove(SmokeComponent)
self.container.event('smoke_expire', entity=entity)
def event_attack(self, attacker, defender):
# Get the attacker RN
result = get_combat_result(attacker)
if result is CombatResult.MISS:
self.container.event('miss', attacker=attacker, defender=defender)
if result is CombatResult.HIT:
self.container.event('hit', attacker=attacker, defender=defender)
if result is CombatResult.CRITICAL:
self.container.event('critical', attacker=attacker, defender=defender)
attacker[RandomNumberComponent].number = random.randint(0, 99)
weapon = get_weapon(attacker)
if weapon is not None:
self.container.event('degrade_item', item=weapon, amount=1)
if attacker is self.container.player:
self.container.target = defender
def event_hit(self, attacker, defender):
damage = get_combat_base_damage(attacker, defender)
defender_combat = defender[CombatComponent]
defender_combat.cur_hp = max(0, defender_combat.cur_hp-damage)
if defender_combat.cur_hp == 0:
self.container.event('dead', defender=defender)
def event_critical(self, attacker, defender):
critical: CriticalComponent = attacker[InventoryComponent].items[0][CriticalComponent]
events = critical.critical(attacker=attacker, defender=defender)
for event_type, kwargs in events:
self.container.event(event_type, **kwargs)
def event_use_smoke_bomb(self):
for entity in self.container.entities:
if CombatComponent in entity and VisibleComponent in entity and entity is not self.container.player:
entity.attach(SmokeComponent(duration=3))
self.container.event('smoke_attach', entity=entity)
```
#### File: ecs/systems/itempickupsystem.py
```python
from ecs.components.inventorycomponent import InventoryComponent
from ecs.components.itemcomponent import ItemComponent
from ecs.components.positioncomponent import PositionComponent
class ItemPickupSystem:
def __init__(self, container):
self.container = container
def event_entity_moved(self, entity):
player = self.container.player
if entity is not player:
return
assert PositionComponent in player and InventoryComponent in player
for entity in self.container.entities:
coincident_item = (
ItemComponent in entity
and PositionComponent in entity
and entity[PositionComponent] == player[PositionComponent]
)
if coincident_item:
slot = next(
(
slot for slot in range(player[InventoryComponent].capacity)
if slot not in player[InventoryComponent].items
),
None,
)
if slot is None:
self.container.event('pickup_full', item=entity)
else:
player[InventoryComponent].items[slot] = entity
entity.remove(PositionComponent)
self.container.event('pickup_success', item=entity)
```
#### File: systems/panels/inventorypanel.py
```python
import string
import constants
from ecs.components.combatcomponent import CombatComponent
from ecs.components.displaycomponent import DisplayComponent
from ecs.components.durabilitycomponent import DurabilityComponent
from ecs.components.inventorycomponent import InventoryComponent
from ecs.components.namecomponent import NameComponent
from ecs.systems.panels.panel import Panel
from project_types import GameState
class InventoryPanel(Panel):
def update_contents(self):
target = self.container.target
if self.container.input_mode is GameState.STATUS_DROP:
title = 'Discard which item?'
elif self.container.input_mode is GameState.STATUS_USE:
title = 'Use which item?'
elif self.container.input_mode is GameState.STATUS_VIEW:
if target is self.container.player:
title = 'Your inventory'
else:
title = f'The {target[NameComponent].name}\'s inventory'
else:
assert False # Unreachable branch
self.console.draw_frame(x=0, y=0, width=self.w, height=self.h, title=title)
if target is not None:
cc: CombatComponent = target[CombatComponent]
self.console.print(x=2, y=2, string=f'Health: {cc.cur_hp:>2d}/{cc.max_hp:>2d}')
self.console.print(x=2, y=4, string=f'Attack: {cc.attack_stat:>2d}')
self.console.print(x=2, y=5, string=f'Defend: {cc.defend_stat:>2d}')
self.console.print(x=2, y=6, string=f'Hit: {cc.hit_stat:>3d}%')
self.console.print(x=2, y=7, string=f'Critical: {cc.critical_stat:>3d}%')
ic: InventoryComponent = target[InventoryComponent]
item_entity = ic.items.get(0)
if item_entity is None:
self.console.print(x=20, y=2, string=f'a) None', fg=constants.COLOR_GRAY1)
else:
name = item_entity[NameComponent].name
fg = item_entity[DisplayComponent].fg
self.console.print(x=20, y=2, string=f'a) {name}', fg=fg)
if DurabilityComponent in item_entity:
durability = item_entity[DurabilityComponent].value
self.console.print(x=48, y=2, string=f'{durability:>2d}', fg=fg)
for i, letter in zip(range(1, ic.capacity), string.ascii_lowercase[1:]):
item_entity = ic.items.get(i)
if item_entity is None:
self.console.print(x=20, y=3+i, string=f'{letter}) None', fg=constants.COLOR_GRAY1)
else:
name = item_entity[NameComponent].name
fg = item_entity[DisplayComponent].fg
self.console.print(x=20, y=3+i, string=f'{letter}) {name}', fg=fg)
if DurabilityComponent in item_entity:
durability = item_entity[DurabilityComponent].value
self.console.print(x=48, y=3+i, string=f'{durability:>2d}', fg=fg)
```
#### File: systems/panels/messagepanel.py
```python
import textwrap
from ecs.systems.panels.panel import Panel
class MessagePanel(Panel):
def update_contents(self):
w = self.w - 4
h = self.h - 4
old_messages = self.container.buffer[-h:]
new_messages = []
for message, color in old_messages:
for line in textwrap.wrap(message, w):
new_messages.append((line, color))
new_messages = new_messages[-h:]
self.console.draw_frame(0, 0, self.w, self.h, title='Messages')
for i, (message, color) in enumerate(new_messages):
self.console.print(x=2, y=i+2, string=message, fg=color)
```
#### File: systems/panels/titlepanel.py
```python
import textwrap
import constants
from ecs.systems.panels.panel import Panel
class TitlePanel(Panel):
def update_contents(self):
wrap_lines = []
with open('data/title.txt') as file:
for line in file:
file_line = line.rstrip()
if not file_line:
wrap_lines.append('')
continue
wrap_lines.extend(textwrap.wrap(file_line, self.w))
for i, line in enumerate(wrap_lines):
y = i
if i == 0:
self.console.print(x=0, y=y, string='Critical', fg=constants.COLOR_CRITICAL)
self.console.print(x=9, y=y, string='Keep', fg=constants.COLOR_MISS)
else:
if i in (2, 13, 34, 41):
fg = constants.COLOR_YELLOW
else:
fg = constants.COLOR_WHITE
self.console.print(x=0, y=y, string=line, fg=fg)
```
#### File: ecs/systems/test_cursorsystem.py
```python
from unittest import TestCase
from ecs.components.combatcomponent import CombatComponent
from ecs.components.cursorcomponent import CursorComponent
from ecs.components.inventorycomponent import InventoryComponent
from ecs.components.positioncomponent import PositionComponent
from ecs.components.randomnumbercomponent import RandomNumberComponent
from ecs.container import Container
from ecs.entity import Entity
from ecs.systems.cursorsystem import CursorSystem
class TestCursorSystem(TestCase):
def test_first(self):
"""Target the first coincident entity."""
container = Container()
system = CursorSystem(container)
cursor = Entity(PositionComponent(1, 1), CursorComponent())
first = Entity(PositionComponent(1, 1))
second = Entity(PositionComponent(1, 1))
container.entities = [
cursor,
first,
second,
]
system.check_target()
self.assertIs(container.target, first)
def test_no_coincident(self):
"""Target nothing if there are no coincident entities."""
container = Container()
system = CursorSystem(container)
cursor = Entity(PositionComponent(1, 1), CursorComponent())
first = Entity(PositionComponent(2, 2))
container.entities = [
cursor,
first,
]
system.check_target()
self.assertIsNone(container.target)
def test_stat_second(self):
"""Target the first coincident entity with a CombatComponent and InventoryComponent."""
container = Container()
system = CursorSystem(container)
cursor = Entity(PositionComponent(1, 1), CursorComponent())
first = Entity(PositionComponent(1, 1))
second = Entity(PositionComponent(1, 1), CombatComponent(0, 0, 0, 0, 0), InventoryComponent(0))
container.entities = [
cursor,
first,
second,
]
system.check_target()
self.assertIs(container.target, second)
def test_swap_second(self):
"""Target the first coincident entity with a RandomNumberComponent."""
container = Container()
system = CursorSystem(container)
cursor = Entity(PositionComponent(1, 1), CursorComponent())
first = Entity(PositionComponent(1, 1))
second = Entity(PositionComponent(1, 1), RandomNumberComponent())
container.entities = [
cursor,
first,
second,
]
system.check_target()
self.assertIs(container.target, second)
```
#### File: src/factories/consumable.py
```python
import random
import constants
from ecs.components import consumables
from ecs.components.displaycomponent import DisplayComponent
from ecs.components.durabilitycomponent import DurabilityComponent
from ecs.components.itemcomponent import ItemComponent
from ecs.components.namecomponent import NameComponent
from ecs.entity import Entity
from project_types import DrawLayer
def random_consumable():
choices = [
elixir,
elixir,
smoke_bomb,
tonic,
antidote,
]
choice = random.choice(choices)
return choice()
def elixir():
return Entity(
NameComponent('elixir'),
DisplayComponent(char=chr(173), fg=constants.COLOR_WHITE, layer=DrawLayer.ITEM),
ItemComponent(),
consumables.Elixir(),
DurabilityComponent(durability=5),
)
def smoke_bomb():
return Entity(
NameComponent('smoke bomb'),
DisplayComponent(char=chr(173), fg=constants.COLOR_WHITE, layer=DrawLayer.ITEM),
ItemComponent(),
consumables.SmokeBomb(),
DurabilityComponent(durability=3),
)
def tonic():
pool = [
('health tonic', consumables.HealthTonic, 1),
('attack tonic', consumables.AttackTonic, 1),
('defend tonic', consumables.DefendTonic, 1),
('hit tonic', consumables.HitTonic, 1),
('critical tonic', consumables.CriticalTonic, 1),
]
name, constructor, durability = random.choice(pool)
return Entity(
NameComponent(name),
DisplayComponent(char=chr(173), fg=constants.COLOR_YELLOW, layer=DrawLayer.ITEM),
ItemComponent(),
constructor(),
DurabilityComponent(durability=durability),
)
def antidote():
return Entity(
NameComponent('antidote'),
DisplayComponent(char=chr(173), fg=constants.COLOR_WHITE, layer=DrawLayer.ITEM),
ItemComponent(),
consumables.Antidote(),
DurabilityComponent(durability=3),
)
```
#### File: src/factories/cursor.py
```python
import constants
from ecs.components.cursorcomponent import CursorComponent
from ecs.components.displaycomponent import DisplayComponent
from ecs.components.positioncomponent import PositionComponent
from ecs.entity import Entity
from project_types import DrawLayer
def make_cursor(x, y):
return Entity(
CursorComponent(),
DisplayComponent(char='X', fg=constants.COLOR_YELLOW, layer=DrawLayer.CURSOR),
PositionComponent(x=x, y=y),
)
```
#### File: src/factories/throne.py
```python
import constants
from ecs.components.displaycomponent import DisplayComponent
from ecs.components.namecomponent import NameComponent
from ecs.components.positioncomponent import PositionComponent
from ecs.components.thronecomponent import ThroneComponent
from ecs.entity import Entity
from project_types import DrawLayer
def make_throne(x, y):
return Entity(
DisplayComponent(char=chr(210), fg=constants.COLOR_YELLOW, layer=DrawLayer.THRONE),
NameComponent(name='throne'),
PositionComponent(x, y),
ThroneComponent(),
)
```
#### File: src/factories/weapon.py
```python
import random
import constants
from constants import WEAPON_TIER_SELECTION
from ecs.components.criticals.abc import CriticalComponent
from ecs.components.criticals.cleave import CleaveComponent
from ecs.components.criticals.execute import ExecuteComponent
from ecs.components.criticals.extradamage import ExtraDamageComponent
from ecs.components.criticals.knockback import KnockbackComponent
from ecs.components.criticals.poison import PoisonCriticalComponent
from ecs.components.criticals.shatter import ShatterComponent
from ecs.components.criticals.thunder import ThunderComponent
from ecs.components.displaycomponent import DisplayComponent
from ecs.components.durabilitycomponent import DurabilityComponent
from ecs.components.itemcomponent import ItemComponent
from ecs.components.namecomponent import NameComponent
from ecs.components.weaponcomponent import WeaponComponent
from ecs.entity import Entity
from project_types import DrawLayer
def make_sword(level: int) -> Entity:
if random.randint(0, constants.DUNGEON_DEPTH) < level:
return make_exotic_sword(level)
else:
return make_normal_sword(level)
def make_axe(level: int) -> Entity:
if random.randint(0, constants.DUNGEON_DEPTH) < level:
return make_exotic_axe(level)
else:
return make_normal_axe(level)
def make_spear(level: int) -> Entity:
if random.randint(0, constants.DUNGEON_DEPTH) < level:
return make_exotic_spear(level)
else:
return make_normal_spear(level)
def make_bow(level: int) -> Entity:
if random.randint(0, constants.DUNGEON_DEPTH) < level:
return make_exotic_bow(level)
else:
return make_normal_bow(level)
def make_normal_sword(level: int) -> Entity:
choices = WEAPON_TIER_SELECTION[min(len(WEAPON_TIER_SELECTION) - 1, level)]
choice = random.choice(choices)
factories = [
make_sword1,
make_sword2,
make_sword3,
]
factory = factories[choice]
return factory()
def make_normal_axe(level: int) -> Entity:
choices = WEAPON_TIER_SELECTION[min(len(WEAPON_TIER_SELECTION) - 1, level)]
choice = random.choice(choices)
factories = [
make_axe1,
make_axe2,
make_axe3,
]
factory = factories[choice]
return factory()
def make_normal_spear(level: int) -> Entity:
choices = WEAPON_TIER_SELECTION[min(len(WEAPON_TIER_SELECTION) - 1, level)]
choice = random.choice(choices)
factories = [
make_spear1,
make_spear2,
make_spear3,
]
factory = factories[choice]
return factory()
def make_normal_bow(level: int) -> Entity:
choices = WEAPON_TIER_SELECTION[min(len(WEAPON_TIER_SELECTION) - 1, level)]
choice = random.choice(choices)
factories = [
make_crossbow1,
make_crossbow2,
make_crossbow3,
]
factory = factories[choice]
return factory()
def make_exotic_sword(level: int) -> Entity:
basic = make_normal_sword(level)
wc: WeaponComponent = basic[WeaponComponent]
wc.critical_bonus += 10
dc: DurabilityComponent = basic[DurabilityComponent]
dc.value += 10
constructors: CriticalComponent = [
CleaveComponent,
KnockbackComponent,
ShatterComponent,
PoisonCriticalComponent,
]
constructor = random.choice(constructors)
tiers = WEAPON_TIER_SELECTION[min(len(WEAPON_TIER_SELECTION) - 1, level)]
tier = random.choice(tiers)
critical = constructor(tier=tier)
basic.remove(CriticalComponent)
basic.attach(critical)
basic[NameComponent].name = f'{critical.name} sword'
basic[DisplayComponent].fg = constants.COLOR_YELLOW
return basic
def make_exotic_axe(level: int) -> Entity:
basic = make_normal_axe(level)
wc: WeaponComponent = basic[WeaponComponent]
wc.critical_bonus += 10
dc: DurabilityComponent = basic[DurabilityComponent]
dc.value += 10
constructors: CriticalComponent = [
ExtraDamageComponent,
KnockbackComponent,
ShatterComponent,
PoisonCriticalComponent,
]
constructor = random.choice(constructors)
tiers = WEAPON_TIER_SELECTION[min(len(WEAPON_TIER_SELECTION) - 1, level)]
tier = random.choice(tiers)
critical = constructor(tier=tier)
basic.remove(CriticalComponent)
basic.attach(critical)
basic[NameComponent].name = f'{critical.name} axe'
basic[DisplayComponent].fg = constants.COLOR_YELLOW
return basic
def make_exotic_spear(level: int) -> Entity:
basic = make_normal_spear(level)
wc: WeaponComponent = basic[WeaponComponent]
wc.critical_bonus += 10
dc: DurabilityComponent = basic[DurabilityComponent]
dc.value += 10
constructors: CriticalComponent = [
CleaveComponent,
ExtraDamageComponent,
ShatterComponent,
PoisonCriticalComponent,
]
constructor = random.choice(constructors)
tiers = WEAPON_TIER_SELECTION[min(len(WEAPON_TIER_SELECTION) - 1, level)]
tier = random.choice(tiers)
critical = constructor(tier=tier)
basic.remove(CriticalComponent)
basic.attach(critical)
basic[NameComponent].name = f'{critical.name} spear'
basic[DisplayComponent].fg = constants.COLOR_YELLOW
return basic
def make_exotic_bow(level: int) -> Entity:
basic = make_normal_bow(level)
wc: WeaponComponent = basic[WeaponComponent]
wc.critical_bonus += 10
dc: DurabilityComponent = basic[DurabilityComponent]
dc.value += 10
constructors: CriticalComponent = [
CleaveComponent,
KnockbackComponent,
ShatterComponent,
PoisonCriticalComponent,
]
constructor = random.choice(constructors)
tiers = WEAPON_TIER_SELECTION[min(len(WEAPON_TIER_SELECTION) - 1, level)]
tier = random.choice(tiers)
critical = constructor(tier=tier)
basic.remove(CriticalComponent)
basic.attach(critical)
basic[NameComponent].name = f'{critical.name} crossbow'
basic[DisplayComponent].fg = constants.COLOR_YELLOW
return basic
def make_sword_player():
return Entity(
NameComponent('ancestral sword'),
DisplayComponent(char=')', fg=constants.COLOR_YELLOW, layer=DrawLayer.ITEM),
ItemComponent(droppable=False),
WeaponComponent(attack_bonus=3, hit_bonus=80, critical_bonus=5),
ExtraDamageComponent(tier=0),
)
def make_sword1():
return Entity(
NameComponent('bronze sword'),
DisplayComponent(char=')', fg=constants.COLOR_GRAY3, layer=DrawLayer.ITEM),
ItemComponent(),
WeaponComponent(attack_bonus=5, hit_bonus=70),
DurabilityComponent(durability=30),
ExtraDamageComponent(tier=0),
)
def make_sword2():
return Entity(
NameComponent('iron sword'),
DisplayComponent(char=')', fg=constants.COLOR_GRAY3, layer=DrawLayer.ITEM),
ItemComponent(),
WeaponComponent(attack_bonus=8, hit_bonus=65),
DurabilityComponent(durability=25),
ExtraDamageComponent(tier=1),
)
def make_sword3():
return Entity(
NameComponent('steel sword'),
DisplayComponent(char=')', fg=constants.COLOR_GRAY3, layer=DrawLayer.ITEM),
ItemComponent(),
WeaponComponent(attack_bonus=13, hit_bonus=60),
DurabilityComponent(durability=20),
ExtraDamageComponent(tier=2),
)
def make_axe1():
return Entity(
NameComponent('bronze axe'),
DisplayComponent(char=')', fg=constants.COLOR_GRAY3, layer=DrawLayer.ITEM),
ItemComponent(),
WeaponComponent(attack_bonus=9, hit_bonus=50),
DurabilityComponent(durability=30),
CleaveComponent(tier=0),
)
def make_axe2():
return Entity(
NameComponent('iron axe'),
DisplayComponent(char=')', fg=constants.COLOR_GRAY3, layer=DrawLayer.ITEM),
ItemComponent(),
WeaponComponent(attack_bonus=12, hit_bonus=45),
DurabilityComponent(durability=25),
CleaveComponent(tier=1),
)
def make_axe3():
return Entity(
NameComponent('steel axe'),
DisplayComponent(char=')', fg=constants.COLOR_GRAY3, layer=DrawLayer.ITEM),
ItemComponent(),
WeaponComponent(attack_bonus=17, hit_bonus=40),
DurabilityComponent(durability=20),
CleaveComponent(tier=2),
)
def make_spear1():
return Entity(
NameComponent('bronze spear'),
DisplayComponent(char=')', fg=constants.COLOR_GRAY3, layer=DrawLayer.ITEM),
ItemComponent(),
WeaponComponent(attack_bonus=7, hit_bonus=60),
DurabilityComponent(durability=30),
KnockbackComponent(tier=0),
)
def make_spear2():
return Entity(
NameComponent('iron spear'),
DisplayComponent(char=')', fg=constants.COLOR_GRAY3, layer=DrawLayer.ITEM),
ItemComponent(),
WeaponComponent(attack_bonus=10, hit_bonus=55),
DurabilityComponent(durability=25),
KnockbackComponent(tier=1),
)
def make_spear3():
return Entity(
NameComponent('steel spear'),
DisplayComponent(char=')', fg=constants.COLOR_GRAY3, layer=DrawLayer.ITEM),
ItemComponent(),
WeaponComponent(attack_bonus=15, hit_bonus=60),
DurabilityComponent(durability=20),
KnockbackComponent(tier=2),
)
def make_crossbow1():
return Entity(
NameComponent('light crossbow'),
DisplayComponent(char=')', fg=constants.COLOR_GRAY3, layer=DrawLayer.ITEM),
ItemComponent(),
WeaponComponent(max_range=constants.FOV_RADIUS, attack_bonus=5, hit_bonus=50),
DurabilityComponent(durability=30),
ExtraDamageComponent(tier=0),
)
def make_crossbow2():
return Entity(
NameComponent('heavy crossbow'),
DisplayComponent(char=')', fg=constants.COLOR_GRAY3, layer=DrawLayer.ITEM),
ItemComponent(),
WeaponComponent(max_range=constants.FOV_RADIUS, attack_bonus=8, hit_bonus=45),
DurabilityComponent(durability=25),
ExtraDamageComponent(tier=1),
)
def make_crossbow3():
return Entity(
NameComponent('arbalest'),
DisplayComponent(char=')', fg=constants.COLOR_GRAY3, layer=DrawLayer.ITEM),
ItemComponent(),
WeaponComponent(max_range=constants.FOV_RADIUS, attack_bonus=13, hit_bonus=40),
DurabilityComponent(durability=20),
ExtraDamageComponent(tier=2),
)
def make_midboss_weapon():
return Entity(
NameComponent('book of lightning'),
DisplayComponent(char=')', fg=constants.COLOR_YELLOW, layer=DrawLayer.ITEM),
ItemComponent(droppable=False),
WeaponComponent(max_range=constants.FOV_RADIUS, attack_bonus=15, hit_bonus=0, smite=True),
ThunderComponent(),
)
def make_endboss_weapon():
return Entity(
NameComponent('executioner\'s sword'),
DisplayComponent(char=')', fg=constants.COLOR_YELLOW, layer=DrawLayer.ITEM),
ItemComponent(droppable=False),
WeaponComponent(attack_bonus=15, hit_bonus=30),
ExecuteComponent(),
)
```
#### File: src/factories/world.py
```python
import random
from typing import List
import constants
from ecs.components.positioncomponent import PositionComponent
from ecs.entity import Entity
from factories.chest import make_chest
from factories.enemy import make_boss, make_enemy, make_endboss, make_midboss
from factories.map import Room, make_map
from factories.player import make_player
from factories.throne import make_throne
from map import Map
def make_world(level, player=None):
entities = []
game_map: Map = make_map()
if player is None:
player = make_player(0, 0)
rooms = game_map.rooms
endpoints = game_map.endpoints
start_room = endpoints[0]
entities.extend(populate_start_room(start_room, player=player))
final_room = endpoints[-1]
entities.extend(populate_final_room(final_room, level))
for room in endpoints[1:-1]:
entities.extend(populate_treasure_room(room, level))
for room in rooms:
if room not in endpoints and room.w > 2 and room.h > 2:
entities.extend(populate_room(room, level))
return entities, game_map, player
def populate_start_room(room: Room, player: Entity) -> List[Entity]:
entities = []
cells = room.cells
random.shuffle(cells)
x, y = cells.pop()
player[PositionComponent].x = x
player[PositionComponent].y = y
entities.append(player)
for _ in range(random.randint(0, 1)):
x, y = cells.pop()
entities.append(make_chest(x=x, y=y))
return entities
def populate_room(room: Room, level: int) -> List[Entity]:
entities = []
case = random.randint(1, 4)
cells = room.cells
random.shuffle(cells)
for _ in range(random.randint(1, max(1, room.w*room.h // 16))):
x, y = cells.pop()
entities.append(make_enemy(x=x, y=y, level=level, case=case))
for _ in range(random.randint(0, max(1, room.w*room.h // 32))):
x, y = cells.pop()
entities.append(make_chest(x=x, y=y))
return entities
def populate_treasure_room(room: Room, level: int) -> List[Entity]:
entities = []
case = random.randint(1, 4)
cells = room.cells
random.shuffle(cells)
for _ in range(random.randint(0, max(2, room.w*room.h // 16))):
x, y = cells.pop()
entities.append(make_enemy(x=x, y=y, level=level, case=case))
for _ in range(random.randint(2, max(2, room.w*room.h // 16))):
x, y = cells.pop()
entities.append(make_chest(x=x, y=y))
return entities
def populate_final_room(room: Room, level: int) -> List[Entity]:
entities = []
cells = room.cells
random.shuffle(cells)
x, y = cells.pop()
if level == constants.DUNGEON_MIDBOSS:
entities.append(make_midboss(x, y))
elif level == constants.DUNGEON_ENDBOSS:
entities.append(make_endboss(x, y))
else:
case = random.randint(1, 4)
entities.append(make_boss(x, y, level, case))
x, y = cells.pop()
entities.append(make_throne(x, y))
for _ in range(random.randint(2, max(2, room.w*room.h // 16))):
x, y = cells.pop()
entities.append(make_chest(x=x, y=y))
return entities
``` |
{
"source": "JoeHowse/VisualizingTheInvisible",
"score": 3
} |
#### File: JoeHowse/VisualizingTheInvisible/LiveVideoAsusXtion.py
```python
import cv2
import numpy
__author__ = '<NAME>'
__copyright__ = 'Copyright (c) 2019, Nummist Media Corporation Limited'
__credits__ = ['<NAME>']
__license__ = 'BSD 3-Clause'
__version__ = '0.0.1'
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>'
__status__ = 'Prototype'
def main():
capture = cv2.VideoCapture(cv2.CAP_OPENNI2_ASUS)
channel = cv2.CAP_OPENNI_IR_IMAGE
success = capture.grab()
if success:
success, image = capture.retrieve(flag=channel)
while success:
if image is not None:
if channel == cv2.CAP_OPENNI_IR_IMAGE:
# Assume the image is 10-bit.
# Convert it to 8-bit.
image = (image >> 2).astype(numpy.uint8)
elif channel == cv2.CAP_OPENNI_DEPTH_MAP:
# Assume the image is 12-bit (max depth 4.096m).
# Convert it to 8-bit.
image = (image >> 4).astype(numpy.uint8)
cv2.imshow('Live Video', image)
keycode = cv2.waitKey(1)
if keycode == ord('1'):
channel = cv2.CAP_OPENNI_IR_IMAGE
elif keycode == ord('2'):
channel = cv2.CAP_OPENNI_DEPTH_MAP
elif keycode == ord('3'):
channel = cv2.CAP_OPENNI_VALID_DEPTH_MASK
elif keycode == ord('4'):
channel = cv2.CAP_OPENNI_DISPARITY_MAP
elif keycode == ord('5'):
channel = cv2.CAP_OPENNI_POINT_CLOUD_MAP
elif keycode == 27:
# The user pressed the escape key.
# Quit.
break
success = capture.grab()
if success:
success, image = capture.retrieve(flag=channel)
if __name__ == '__main__':
main()
```
#### File: JoeHowse/VisualizingTheInvisible/PySpinCapture.py
```python
import PySpin
import cv2
__author__ = '<NAME>'
__copyright__ = 'Copyright (c) 2018, Nummist Media Corporation Limited'
__credits__ = ['<NAME>']
__license__ = 'BSD 3-Clause'
__version__ = '0.0.1'
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>'
__status__ = 'Prototype'
class PySpinCapture:
def __init__(self, index, roi, binning_radius=1, is_monochrome=False):
self._system = system = PySpin.System.GetInstance()
self._camera_list = system.GetCameras()
self._camera = self._camera_list.GetByIndex(index)
self._camera.Init()
self._nodemap = self._camera.GetNodeMap()
# Enable continuous acquisition mode.
node_acquisition_mode = PySpin.CEnumerationPtr(self._nodemap.GetNode(
'AcquisitionMode'))
node_acquisition_mode_continuous = node_acquisition_mode.GetEntryByName(
'Continuous')
acquisition_mode_continuous = node_acquisition_mode_continuous.GetValue()
node_acquisition_mode.SetIntValue(acquisition_mode_continuous)
# Set the pixel format.
node_pixel_format = PySpin.CEnumerationPtr(self._nodemap.GetNode('PixelFormat'))
if is_monochrome:
# Enable Mono8 mode.
node_pixel_format_mono8 = PySpin.CEnumEntryPtr(
node_pixel_format.GetEntryByName('Mono8'))
pixel_format_mono8 = node_pixel_format_mono8.GetValue()
node_pixel_format.SetIntValue(pixel_format_mono8)
else:
# Enable BGR8 mode.
node_pixel_format_bgr8 = PySpin.CEnumEntryPtr(
node_pixel_format.GetEntryByName('BGR8'))
pixel_format_bgr8 = node_pixel_format_bgr8.GetValue()
node_pixel_format.SetIntValue(pixel_format_bgr8)
# Set the vertical binning radius.
# The horizontal binning radius is automatically set to the same value.
node_binning_vertical = PySpin.CIntegerPtr(self._nodemap.GetNode(
'BinningVertical'))
node_binning_vertical.SetValue(binning_radius)
# Set the ROI.
x, y, w, h = roi
node_offset_x = PySpin.CIntegerPtr(self._nodemap.GetNode('OffsetX'))
node_offset_x.SetValue(int(x))
node_offset_y = PySpin.CIntegerPtr(self._nodemap.GetNode('OffsetY'))
node_offset_y.SetValue(int(y))
node_width = PySpin.CIntegerPtr(self._nodemap.GetNode('Width'))
node_width.SetValue(int(w))
node_height = PySpin.CIntegerPtr(self._nodemap.GetNode('Height'))
node_height.SetValue(int(h))
self._camera.BeginAcquisition()
def get(self, propId):
if propId == cv2.CAP_PROP_FRAME_WIDTH:
node_width = PySpin.CIntegerPtr(self._nodemap.GetNode('Width'))
return float(node_width.GetValue())
if propId == cv2.CAP_PROP_FRAME_HEIGHT:
node_height = PySpin.CIntegerPtr(self._nodemap.GetNode('Height'))
return float(node_height.GetValue())
if propId == cv2.CAP_PROP_GAIN:
node_gain = PySpin.CFloatPtr(self._nodemap.GetNode('Gain'))
return node_gain.GetValue()
return 0.0
def set(self, propId, value):
if propId == cv2.CAP_PROP_FRAME_WIDTH:
node_width = PySpin.CIntegerPtr(self._nodemap.GetNode('Width'))
node_width.SetValue(int(value))
return True
if propId == cv2.CAP_PROP_FRAME_HEIGHT:
node_height = PySpin.CIntegerPtr(self._nodemap.GetNode('Height'))
node_height.SetValue(int(value))
return True
if propId == cv2.CAP_PROP_GAIN:
node_gain = PySpin.CFloatPtr(self._nodemap.GetNode('Gain'))
node_gain.SetValue(value)
return True
return False
def __del__(self):
self.release()
def read(self, image=None):
camera_image = self._camera.GetNextImage()
if camera_image.IsIncomplete():
return False, None
h = camera_image.GetHeight()
w = camera_image.GetWidth()
num_channels = camera_image.GetNumChannels()
if num_channels > 1:
camera_image_data = camera_image.GetData().reshape(h, w, num_channels)
else:
camera_image_data = camera_image.GetData().reshape(h, w)
if image is None:
image = camera_image_data.copy()
else:
image[:] = camera_image_data
camera_image.Release()
return True, image
def release(self):
self._camera.EndAcquisition()
self._camera.DeInit()
del self._camera
self._camera_list.Clear()
self._system.ReleaseInstance()
``` |
{
"source": "joehwang/auto-door",
"score": 3
} |
#### File: cvs_scrapy/spiders/hilife.py
```python
import scrapy
import time
from scrapy.http import FormRequest
from cvs_scrapy.items import CvsScrapyItem
class Hilife(scrapy.Spider):
name = "hilife"
DEBUG=0
_citys=[]
def start_requests(self):
urls = [
'https://www.hilife.com.tw/storeInquiry_street.aspx'
]
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
self._citys=self.get_citys(response)
self.log("--------------")
self.log("列出所有縣市:{}".format(self._citys))
for city in self._citys:
yield scrapy.FormRequest('https://www.hilife.com.tw/storeInquiry_street.aspx',
formdata={'CITY': city,
'__VIEWSTATE': response.css('input#__VIEWSTATE::attr(value)').extract_first(),
'__EVENTVALIDATION': response.css('input#__EVENTVALIDATION::attr(value)').extract_first()
},dont_filter=True,callback=self.get_areas_into_city)
if self.DEBUG==1:
return None
self.log("***********")
def get_citys(self,response):
return response.xpath('//*[@id="CITY"]/option/text()').extract()
def get_areas_into_city(self,response):
self.log("************")
areas=response.xpath('//*[@id="AREA"]/option/text()').extract()
city=response.xpath('//*[@id="lblCity"]/text()').extract()[0]
self.log("{}的所有行政區{}".format(city,areas))
for area in areas:
self.log("查詢:{} {}".format(city,area))
yield scrapy.FormRequest('https://www.hilife.com.tw/storeInquiry_street.aspx',
formdata={'CITY': city,
'AREA': area,
'__VIEWSTATE': response.css('input#__VIEWSTATE::attr(value)').extract_first(),
'__EVENTVALIDATION': response.css('input#__EVENTVALIDATION::attr(value)').extract_first()},
dont_filter=True,callback=self.get_area_info)
if self.DEBUG==1:
return
def get_area_info(self,response):
self.log("店鋪列表")
self.log(response.xpath('//*[@id="wrapper"]/div[2]/div/div/table/tr/th/text()').extract())
item = CvsScrapyItem()
for tr in response.xpath('//*[@id="wrapper"]/div[2]/div/div/table/tr'):
#self.log(tr.xpath('td[1]/img[@title]/@title').extract())
item["serial"]=tr.xpath('th[1]/text()').get()
item["name"]=tr.xpath('th[2]/text()').get()
item["phone"]=tr.xpath('td[2]/text()').get()
item["addr"]=tr.xpath('td[1]/a/text()').get()
item["ship_status"]=""
item["note"]=tr.xpath('td[1]/img[@title]/@title').extract()
item["kind"]="hilife"
yield item
```
#### File: cvs_scrapy/spiders/seveneleven.py
```python
import scrapy
import time
from cvs_scrapy.items import CvsScrapyItem
import json
class Seveneleven(scrapy.Spider):
name = "seveneleven"
DEBUG=0
_citys=[]
#https://api.map.com.tw/net/familyShop.aspx?searchType=ShopList&type=&city=%E6%BE%8E%E6%B9%96%E7%B8%A3&area=%E9%A6%AC%E5%85%AC%E5%B8%82&road=&fun=showStoreList&key=<KEY>
def start_requests(self):
urls = [
#'https://emap.pcsc.com.tw/emap.aspx#',
'https://emap.pcsc.com.tw/lib/areacode.js'
]
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
self._citys=self.get_citys(response)
self.log("--------------")
self.log("列出含有7-11所有縣市:{}".format(self._citys))
headers={'Referer':'https://emap.pcsc.com.tw/emap.aspx','Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'}
for city in self._citys:
city_code=list(city.values())[0]
city_name=list(city.keys())[0]
#self.log(city_code) #unpack dict values
url="https://emap.pcsc.com.tw/EMapSDK.aspx"
yield scrapy.Request(url=url, method='POST',
body="commandid=GetTown&cityid={}&leftMenuChecked=".format(city_code),meta={'city_name':city_name},headers=headers,callback=self.get_dists_of_city)
if self.DEBUG==1:
return None
def get_dists_of_city(self,response):
self.log("***********")
dicts=response.xpath('//*/GeoPosition/TownName/text()').extract()
self.log(dicts)
##get the shops
headers={'Referer':'https://emap.pcsc.com.tw/emap.aspx','Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'}
url="https://emap.pcsc.com.tw/EMapSDK.aspx"
for dict in dicts:
yield scrapy.Request(url=url, method='POST',
body="commandid=SearchStore&city={}&town={}".format(response.meta["city_name"],dict),headers=headers,callback=self.get_shops)
if self.DEBUG==1:
return None
def get_shops(self,response):
shops=response.xpath('//*/GeoPosition')
item = CvsScrapyItem()
for shop in shops:
item["serial"]=shop.xpath('POIID/text()').get().strip()
item["name"]=shop.xpath('POIName/text()').get().strip()
item["phone"]=shop.xpath('Telno/text()').get().strip()
item["addr"]=shop.xpath('Address/text()').get().strip()
item["ship_status"]=""
item["note"]=shop.xpath('StoreImageTitle/text()').get().strip().replace(","," ")
item["kind"]="seven"
yield item
def get_citys(self,response):
#return response.xpath('//*[@id="tw"]/div/a/text()').extract()
groups=response.xpath('//*').re(r'new AreaNode\(\'(.*)\', new bu\(.*,.*\), \'(\d\d)\'\)')
#self.log(groups)
a=[]
for i in range(0,len(groups),2):
a.append({groups[i]:groups[i+1]})
return a
``` |
{
"source": "joehybird/django-extended-choices",
"score": 3
} |
#### File: django-extended-choices/extended_choices/choices.py
```python
from __future__ import unicode_literals
from past.builtins import basestring
from collections import OrderedDict
try:
from collections.abc import Mapping
except ImportError:
from collections import Mapping
from .helpers import ChoiceEntry
__all__ = [
'Choices',
'OrderedChoices',
'AutoDisplayChoices',
'AutoChoices',
]
_NO_SUBSET_NAME_ = '__NO_SUBSET_NAME__'
class Choices(list):
"""Helper class for choices fields in Django
A choice entry has three representation: constant name, value and
display name). So ``Choices`` takes list of such tuples.
It's easy to get the constant, value or display name given one of these value. See in
example.
Parameters
----------
*choices : list of tuples
It's the list of tuples to add to the ``Choices`` instance, each tuple having three
entries: the constant name, the value, the display name.
A dict could be added as a 4th entry in the tuple to allow setting arbitrary
arguments to the final ``ChoiceEntry`` created for this choice tuple.
name : string, optional
If set, a subset will be created containing all the constants. It could be used if you
construct your ``Choices`` instance with many calls to ``add_choices``.
dict_class : type, optional
``dict`` by default, it's the dict class to use to create dictionaries (``constants``,
``values`` and ``displays``. Could be set for example to ``OrderedDict`` (you can use
``OrderedChoices`` that is a simple subclass using ``OrderedDict``.
Example
-------
Start by declaring your ``Choices``:
>>> ALIGNMENTS = Choices(
... ('BAD', 10, 'bad'),
... ('NEUTRAL', 20, 'neutral'),
... ('CHAOTIC_GOOD', 30, 'chaotic good'),
... ('GOOD', 40, 'good'),
... dict_class=OrderedDict
... )
Then you can use it in a django field, Notice its usage in ``choices`` and ``default``:
>>> from django.conf import settings
>>> try:
... settings.configure(DATABASE_ENGINE='sqlite3')
... except: pass
>>> from django.db.models import IntegerField
>>> field = IntegerField(choices=ALIGNMENTS, # use ``ALIGNMENTS`` or ``ALIGNMENTS.choices``.
... default=ALIGNMENTS.NEUTRAL)
The ``Choices`` returns a list as expected by django:
>>> ALIGNMENTS == ((10, 'bad'), (20, 'neutral'), (30, 'chaotic good'), (40, 'good'))
True
But represents it with the constants:
>>> repr(ALIGNMENTS)
"[('BAD', 10, 'bad'), ('NEUTRAL', 20, 'neutral'), ('CHAOTIC_GOOD', 30, 'chaotic good'), ('GOOD', 40, 'good')]"
Use ``choices`` which is a simple list to represent it as such:
>>> ALIGNMENTS.choices
((10, 'bad'), (20, 'neutral'), (30, 'chaotic good'), (40, 'good'))
And you can access value by their constant, or as you want:
>>> ALIGNMENTS.BAD
10
>>> ALIGNMENTS.BAD.display
'bad'
>>> 40 in ALIGNMENTS
True
>>> ALIGNMENTS.has_constant('BAD')
True
>>> ALIGNMENTS.has_value(20)
True
>>> ALIGNMENTS.has_display('good')
True
>>> ALIGNMENTS.for_value(10)
('BAD', 10, 'bad')
>>> ALIGNMENTS.for_value(10).constant
'BAD'
>>> ALIGNMENTS.for_display('good').value
40
>>> ALIGNMENTS.for_constant('NEUTRAL').display
'neutral'
>>> ALIGNMENTS.constants
OrderedDict([('BAD', ('BAD', 10, 'bad')), ('NEUTRAL', ('NEUTRAL', 20, 'neutral')), ('CHAOTIC_GOOD', ('CHAOTIC_GOOD', 30, 'chaotic good')), ('GOOD', ('GOOD', 40, 'good'))])
>>> ALIGNMENTS.values
OrderedDict([(10, ('BAD', 10, 'bad')), (20, ('NEUTRAL', 20, 'neutral')), (30, ('CHAOTIC_GOOD', 30, 'chaotic good')), (40, ('GOOD', 40, 'good'))])
>>> ALIGNMENTS.displays
OrderedDict([('bad', ('BAD', 10, 'bad')), ('neutral', ('NEUTRAL', 20, 'neutral')), ('chaotic good', ('CHAOTIC_GOOD', 30, 'chaotic good')), ('good', ('GOOD', 40, 'good'))])
You can create subsets of choices:
>>> ALIGNMENTS.add_subset('WESTERN',('BAD', 'GOOD'))
>>> ALIGNMENTS.WESTERN.choices
((10, 'bad'), (40, 'good'))
>>> ALIGNMENTS.BAD in ALIGNMENTS.WESTERN
True
>>> ALIGNMENTS.NEUTRAL in ALIGNMENTS.WESTERN
False
To use it in another field (only the values in the subset will be available), or for checks:
>>> def is_western(value):
... return value in ALIGNMENTS.WESTERN
>>> is_western(40)
True
"""
# Allow to easily change the ``ChoiceEntry`` class to use in subclasses.
ChoiceEntryClass = ChoiceEntry
def __init__(self, *choices, **kwargs):
# Init the list as empty. Entries will be formatted for django and added in
# ``add_choices``.
super(Choices, self).__init__()
# Class to use for dicts.
self.dict_class = kwargs.get('dict_class', dict)
# List of ``ChoiceEntry``, one for each choice in this instance.
self.entries = []
# List of the created subsets
self.subsets = []
# Dicts to access ``ChoiceEntry`` instances by constant, value or display value.
self.constants = self.dict_class()
self.values = self.dict_class()
self.displays = self.dict_class()
# For now this instance is mutable: we need to add the given choices.
self._mutable = True
self.add_choices(*choices, name=kwargs.get('name', None))
# Now we can set ``_mutable`` to its correct value.
self._mutable = kwargs.get('mutable', True)
@property
def choices(self):
"""Property that returns a tuple formatted as expected by Django.
Example
-------
>>> MY_CHOICES = Choices(('FOO', 1, 'foo'), ('BAR', 2, 'bar'))
>>> MY_CHOICES.choices
((1, 'foo'), (2, 'bar'))
"""
return tuple(self)
def _convert_choices(self, choices):
"""Validate each choices
Parameters
----------
choices : list of tuples
The list of choices to be added
Returns
-------
list
The list of the added constants
"""
# Check that each new constant is unique.
constants = [c[0] for c in choices]
constants_doubles = [c for c in constants if constants.count(c) > 1]
if constants_doubles:
raise ValueError("You cannot declare two constants with the same constant name. "
"Problematic constants: %s " % list(set(constants_doubles)))
# Check that none of the new constants already exists.
bad_constants = set(constants).intersection(self.constants)
if bad_constants:
raise ValueError("You cannot add existing constants. "
"Existing constants: %s." % list(bad_constants))
# Check that none of the constant is an existing attributes
bad_constants = [c for c in constants if hasattr(self, c)]
if bad_constants:
raise ValueError("You cannot add constants that already exists as attributes. "
"Existing attributes: %s." % list(bad_constants))
# Check that each new value is unique.
values = [c[1] for c in choices]
values_doubles = [c for c in values if values.count(c) > 1]
if values_doubles:
raise ValueError("You cannot declare two choices with the same name."
"Problematic values: %s " % list(set(values_doubles)))
# Check that none of the new values already exists.
try:
bad_values = set(values).intersection(self.values)
except TypeError:
raise ValueError("One value cannot be used in: %s" % list(values))
else:
if bad_values:
raise ValueError("You cannot add existing values. "
"Existing values: %s." % list(bad_values))
# We can now add each choice.
for choice_tuple in choices:
# Convert the choice tuple in a ``ChoiceEntry`` instance if it's not already done.
# It allows to share choice entries between a ``Choices`` instance and its subsets.
choice_entry = choice_tuple
if not isinstance(choice_entry, self.ChoiceEntryClass):
choice_entry = self.ChoiceEntryClass(choice_entry)
# Append to the main list the choice as expected by django: (value, display name).
self.append(choice_entry.choice)
# And the ``ChoiceEntry`` instance to our own internal list.
self.entries.append(choice_entry)
# Make the value accessible via an attribute (the constant being its name).
setattr(self, choice_entry.constant, choice_entry.value)
# Fill dicts to access the ``ChoiceEntry`` instance by its constant, value or display..
self.constants[choice_entry.constant] = choice_entry
self.values[choice_entry.value] = choice_entry
self.displays[choice_entry.display] = choice_entry
return constants
def add_choices(self, *choices, **kwargs):
"""Add some choices to the current ``Choices`` instance.
The given choices will be added to the existing choices.
If a ``name`` attribute is passed, a new subset will be created with all the given
choices.
Note that it's not possible to add new choices to a subset.
Parameters
----------
*choices : list of tuples
It's the list of tuples to add to the ``Choices`` instance, each tuple having three
entries: the constant name, the value, the display name.
A dict could be added as a 4th entry in the tuple to allow setting arbitrary
arguments to the final ``ChoiceEntry`` created for this choice tuple.
If the first entry of ``*choices`` is a string, then it will be used as a name for a
new subset that will contain all the given choices.
**kwargs : dict
name : string
Instead of using the first entry of the ``*choices`` to pass a name of a subset to
create, you can pass it via the ``name`` named argument.
Example
-------
>>> MY_CHOICES = Choices()
>>> MY_CHOICES.add_choices(('ZERO', 0, 'zero'))
>>> MY_CHOICES
[('ZERO', 0, 'zero')]
>>> MY_CHOICES.add_choices('SMALL', ('ONE', 1, 'one'), ('TWO', 2, 'two'))
>>> MY_CHOICES
[('ZERO', 0, 'zero'), ('ONE', 1, 'one'), ('TWO', 2, 'two')]
>>> MY_CHOICES.SMALL
[('ONE', 1, 'one'), ('TWO', 2, 'two')]
>>> MY_CHOICES.add_choices(('THREE', 3, 'three'), ('FOUR', 4, 'four'), name='BIG')
>>> MY_CHOICES
[('ZERO', 0, 'zero'), ('ONE', 1, 'one'), ('TWO', 2, 'two'), ('THREE', 3, 'three'), ('FOUR', 4, 'four')]
>>> MY_CHOICES.BIG
[('THREE', 3, 'three'), ('FOUR', 4, 'four')]
Raises
------
RuntimeError
When the ``Choices`` instance is marked as not mutable, which is the case for subsets.
ValueError
* if the subset name is defined as first argument and as named argument.
* if some constants have the same name or the same value.
* if at least one constant or value already exists in the instance.
"""
# It the ``_mutable`` flag is falsy, which is the case for subsets, we refuse to add
# new choices.
if not self._mutable:
raise RuntimeError("This ``Choices`` instance cannot be updated.")
# Check for an optional subset name as the first argument (so the first entry of *choices).
subset_name = None
if choices and isinstance(choices[0], basestring) and choices[0] != _NO_SUBSET_NAME_:
subset_name = choices[0]
choices = choices[1:]
# Check for an optional subset name in the named arguments.
if kwargs.get('name', None):
if subset_name:
raise ValueError("The name of the subset cannot be defined as the first "
"argument and also as a named argument")
subset_name = kwargs['name']
constants = self._convert_choices(choices)
# If we have a subset name, create a new subset with all the given constants.
if subset_name:
self.add_subset(subset_name, constants)
def extract_subset(self, *constants):
"""Create a subset of entries
This subset is a new ``Choices`` instance, with only the wanted constants from the
main ``Choices`` (each "choice entry" in the subset is shared from the main ``Choices``)
Parameters
----------
*constants: list
The constants names of this ``Choices`` object to make available in the subset.
Returns
-------
Choices
The newly created subset, which is a ``Choices`` object
Example
-------
>>> STATES = Choices(
... ('ONLINE', 1, 'Online'),
... ('DRAFT', 2, 'Draft'),
... ('OFFLINE', 3, 'Offline'),
... )
>>> STATES
[('ONLINE', 1, 'Online'), ('DRAFT', 2, 'Draft'), ('OFFLINE', 3, 'Offline')]
>>> subset = STATES.extract_subset('DRAFT', 'OFFLINE')
>>> subset
[('DRAFT', 2, 'Draft'), ('OFFLINE', 3, 'Offline')]
>>> subset.DRAFT
2
>>> subset.for_constant('DRAFT') is STATES.for_constant('DRAFT')
True
>>> subset.ONLINE
Traceback (most recent call last):
...
AttributeError: 'Choices' object has no attribute 'ONLINE'
Raises
------
ValueError
If a constant is not defined as a constant in the ``Choices`` instance.
"""
# Ensure that all passed constants exists as such in the list of available constants.
bad_constants = set(constants).difference(self.constants)
if bad_constants:
raise ValueError("All constants in subsets should be in parent choice. "
"Missing constants: %s." % list(bad_constants))
# Keep only entries we asked for.
choice_entries = [self.constants[c] for c in constants]
# Create a new ``Choices`` instance with the limited set of entries, and pass the other
# configuration attributes to share the same behavior as the current ``Choices``.
# Also we set ``mutable`` to False to disable the possibility to add new choices to the
# subset.
subset = self.__class__(
*choice_entries,
**{
'dict_class': self.dict_class,
'mutable': False,
}
)
return subset
def add_subset(self, name, constants):
"""Add a subset of entries under a defined name.
This allow to defined a "sub choice" if a django field need to not have the whole
choice available.
The sub-choice is a new ``Choices`` instance, with only the wanted the constant from the
main ``Choices`` (each "choice entry" in the subset is shared from the main ``Choices``)
The sub-choice is accessible from the main ``Choices`` by an attribute having the given
name.
Parameters
----------
name : string
Name of the attribute that will old the new ``Choices`` instance.
constants: list or tuple
List of the constants name of this ``Choices`` object to make available in the subset.
Returns
-------
Choices
The newly created subset, which is a ``Choices`` object
Example
-------
>>> STATES = Choices(
... ('ONLINE', 1, 'Online'),
... ('DRAFT', 2, 'Draft'),
... ('OFFLINE', 3, 'Offline'),
... )
>>> STATES
[('ONLINE', 1, 'Online'), ('DRAFT', 2, 'Draft'), ('OFFLINE', 3, 'Offline')]
>>> STATES.add_subset('NOT_ONLINE', ('DRAFT', 'OFFLINE',))
>>> STATES.NOT_ONLINE
[('DRAFT', 2, 'Draft'), ('OFFLINE', 3, 'Offline')]
>>> STATES.NOT_ONLINE.DRAFT
2
>>> STATES.NOT_ONLINE.for_constant('DRAFT') is STATES.for_constant('DRAFT')
True
>>> STATES.NOT_ONLINE.ONLINE
Traceback (most recent call last):
...
AttributeError: 'Choices' object has no attribute 'ONLINE'
Raises
------
ValueError
* If ``name`` is already an attribute of the ``Choices`` instance.
* If a constant is not defined as a constant in the ``Choices`` instance.
"""
# Ensure that the name is not already used as an attribute.
if hasattr(self, name):
raise ValueError("Cannot use '%s' as a subset name. "
"It's already an attribute." % name)
subset = self.extract_subset(*constants)
# Make the subset accessible via an attribute.
setattr(self, name, subset)
self.subsets.append(name)
def for_constant(self, constant):
"""Returns the ``ChoiceEntry`` for the given constant.
Parameters
----------
constant: string
Name of the constant for which we want the choice entry.
Returns
-------
ChoiceEntry
The instance of ``ChoiceEntry`` for the given constant.
Raises
------
KeyError
If the constant is not an existing one.
Example
-------
>>> MY_CHOICES = Choices(('FOO', 1, 'foo'), ('BAR', 2, 'bar'))
>>> MY_CHOICES.for_constant('FOO')
('FOO', 1, 'foo')
>>> MY_CHOICES.for_constant('FOO').value
1
>>> MY_CHOICES.for_constant('QUX')
Traceback (most recent call last):
...
KeyError: 'QUX'
"""
return self.constants[constant]
def for_value(self, value):
"""Returns the ``ChoiceEntry`` for the given value.
Parameters
----------
value: ?
Value for which we want the choice entry.
Returns
-------
ChoiceEntry
The instance of ``ChoiceEntry`` for the given value.
Raises
------
KeyError
If the value is not an existing one.
Example
-------
>>> MY_CHOICES = Choices(('FOO', 1, 'foo'), ('BAR', 2, 'bar'))
>>> MY_CHOICES.for_value(1)
('FOO', 1, 'foo')
>>> MY_CHOICES.for_value(1).display
'foo'
>>> MY_CHOICES.for_value(3)
Traceback (most recent call last):
...
KeyError: 3
"""
return self.values[value]
def for_display(self, display):
"""Returns the ``ChoiceEntry`` for the given display name.
Parameters
----------
display: string
Display name for which we want the choice entry.
Returns
-------
ChoiceEntry
The instance of ``ChoiceEntry`` for the given display name.
Raises
------
KeyError
If the display name is not an existing one.
Example
-------
>>> MY_CHOICES = Choices(('FOO', 1, 'foo'), ('BAR', 2, 'bar'))
>>> MY_CHOICES.for_display('foo')
('FOO', 1, 'foo')
>>> MY_CHOICES.for_display('foo').constant
'FOO'
>>> MY_CHOICES.for_display('qux')
Traceback (most recent call last):
...
KeyError: 'qux'
"""
return self.displays[display]
def has_constant(self, constant):
"""Check if the current ``Choices`` object has the given constant.
Parameters
----------
constant: string
Name of the constant we want to check..
Returns
-------
boolean
``True`` if the constant is present, ``False`` otherwise.
Example
-------
>>> MY_CHOICES = Choices(('FOO', 1, 'foo'), ('BAR', 2, 'bar'))
>>> MY_CHOICES.has_constant('FOO')
True
>>> MY_CHOICES.has_constant('QUX')
False
"""
return constant in self.constants
def has_value(self, value):
"""Check if the current ``Choices`` object has the given value.
Parameters
----------
value: ?
Value we want to check.
Returns
-------
boolean
``True`` if the value is present, ``False`` otherwise.
Example
-------
>>> MY_CHOICES = Choices(('FOO', 1, 'foo'), ('BAR', 2, 'bar'))
>>> MY_CHOICES.has_value(1)
True
>>> MY_CHOICES.has_value(3)
False
"""
return value in self.values
def has_display(self, display):
"""Check if the current ``Choices`` object has the given display name.
Parameters
----------
display: string
Display name we want to check..
Returns
-------
boolean
``True`` if the display name is present, ``False`` otherwise.
Example
-------
>>> MY_CHOICES = Choices(('FOO', 1, 'foo'), ('BAR', 2, 'bar'))
>>> MY_CHOICES.has_display('foo')
True
>>> MY_CHOICES.has_display('qux')
False
"""
return display in self.displays
def __contains__(self, item):
"""Check if the current ``Choices`` object has the given value.
Example
-------
>>> MY_CHOICES = Choices(('FOO', 1, 'foo'), ('BAR', 2, 'bar'))
>>> 1 in MY_CHOICES
True
>>> 3 in MY_CHOICES
False
"""
return self.has_value(item)
def __getitem__(self, key):
"""Return the attribute having the given name for the current instance
It allows for example to retrieve constant by keys instead of by attributes (as constants
are set as attributes to easily get the matching value.)
Example
-------
>>> MY_CHOICES = Choices(('FOO', 1, 'foo'), ('BAR', 2, 'bar'))
>>> MY_CHOICES['FOO']
1
>>> MY_CHOICES['constants'] is MY_CHOICES.constants
True
"""
# If the key is an int, call ``super`` to access the list[key] item
if isinstance(key, int):
return super(Choices, self).__getitem__(key)
if not hasattr(self, key):
raise KeyError("Attribute '%s' not found." % key)
return getattr(self, key)
def __repr__(self):
"""String representation of this ``Choices`` instance.
Notes
-----
It will represent the data passed and store in ``self.entries``, not the data really
stored in the base list object, which is in the format expected by django, ie a list of
tuples with only value and display name.
Here, we display everything.
Example
-------
>>> Choices(('FOO', 1, 'foo'), ('BAR', 2, 'bar'))
[('FOO', 1, 'foo'), ('BAR', 2, 'bar')]
"""
return '%s' % self.entries
def __eq__(self, other):
"""Override to allow comparison with a tuple of choices, not only a list.
It also allow to compare with default django choices, ie (value, display name), or
with the format of ``Choices``, ie (constant name, value, display_name).
Example
-------
>>> MY_CHOICES = Choices(('FOO', 1, 'foo'), ('BAR', 2, 'bar'))
>>> MY_CHOICES == [('FOO', 1, 'foo'), ('BAR', 2, 'bar')]
True
>>> MY_CHOICES == (('FOO', 1, 'foo'), ('BAR', 2, 'bar'))
True
>>> MY_CHOICES == [(1, 'foo'), (2, 'bar')]
True
>>> MY_CHOICES == ((1, 'foo'), (2, 'bar'))
True
"""
# Convert to list if it's a tuple.
if isinstance(other, tuple):
other = list(other)
# Compare to the list of entries if the first element seems to have a constant
# name as first entry.
if other and len(other[0]) == 3:
return self.entries == other
return super(Choices, self).__eq__(other)
# TODO: implement __iadd__ and __add__
def __reduce__(self):
"""Reducer to make the auto-created classes picklable.
Returns
-------
tuple
A tuple as expected by pickle, to recreate the object when calling ``pickle.loads``:
1. a callable to recreate the object
2. a tuple with all positioned arguments expected by this callable
"""
return (
# Function to create a ``Choices`` instance
create_choice,
(
# The ``Choices`` class, or a subclass, used to create the current instance
self.__class__,
# The list of choices
[
(
entry.constant.original_value,
entry.value.original_value,
entry.display.original_value,
entry.attributes,
)
for entry in self.entries
],
# The list of subsets
[
(
# The name
subset_name,
# The list of constants to use in this subset
[
c.original_value
for c in getattr(self, subset_name).constants.keys()
]
)
for subset_name in self.subsets
],
# Extra kwargs to pass to ``__ini__``
{
'dict_class': self.dict_class,
'mutable': self._mutable,
}
)
)
class OrderedChoices(Choices):
"""Simple subclass of ``Choices`` using ``OrderedDict`` as ``dict_class``
Example
-------
Start by declaring your ``Choices``:
>>> ALIGNMENTS = OrderedChoices(
... ('BAD', 10, 'bad'),
... ('NEUTRAL', 20, 'neutral'),
... ('CHAOTIC_GOOD', 30, 'chaotic good'),
... ('GOOD', 40, 'good'),
... )
>>> ALIGNMENTS.dict_class
<class 'collections.OrderedDict'>
>>> ALIGNMENTS.constants
OrderedDict([('BAD', ('BAD', 10, 'bad')), ('NEUTRAL', ('NEUTRAL', 20, 'neutral')), ('CHAOTIC_GOOD', ('CHAOTIC_GOOD', 30, 'chaotic good')), ('GOOD', ('GOOD', 40, 'good'))])
>>> ALIGNMENTS.values
OrderedDict([(10, ('BAD', 10, 'bad')), (20, ('NEUTRAL', 20, 'neutral')), (30, ('CHAOTIC_GOOD', 30, 'chaotic good')), (40, ('GOOD', 40, 'good'))])
>>> ALIGNMENTS.displays
OrderedDict([('bad', ('BAD', 10, 'bad')), ('neutral', ('NEUTRAL', 20, 'neutral')), ('chaotic good', ('CHAOTIC_GOOD', 30, 'chaotic good')), ('good', ('GOOD', 40, 'good'))])
"""
def __init__(self, *choices, **kwargs):
# Class to use for dicts
if 'dict_class' not in kwargs:
kwargs['dict_class'] = OrderedDict
super(OrderedChoices, self).__init__(*choices, **kwargs)
class AutoDisplayChoices(OrderedChoices):
"""Subclass of ``OrderedChoices`` that will compose the display value based on the constant.
To compose the display value, it will call a ``display_transform`` function, that is defined
as a class attribute but can be overridden by passing it to the constructor.
Example
-------
>>> ALIGNMENTS = AutoDisplayChoices(
... ('BAD', 10),
... ('NEUTRAL', 20),
... ('CHAOTIC_GOOD', 30, 'THE CHAOS'),
... ('GOOD', 40, {'additional': 'attributes'}),
... )
>>> ALIGNMENTS.BAD.display
'Bad'
>>> ALIGNMENTS.NEUTRAL.choice_entry
('NEUTRAL', 20, 'Neutral')
>>> ALIGNMENTS.CHAOTIC_GOOD.display
'THE CHAOS'
>>> ALIGNMENTS.GOOD.choice_entry.additional
'attributes'
"""
display_transform = staticmethod(lambda const: const.lower().replace('_', ' ').capitalize())
def __init__(self, *choices, **kwargs):
self.display_transform = kwargs.pop('display_transform', None) or self.display_transform
super(AutoDisplayChoices, self).__init__(*choices, **kwargs)
def _convert_choices(self, choices):
"""Auto create display values then call super method"""
final_choices = []
for choice in choices:
if isinstance(choice, ChoiceEntry):
final_choices.append(choice)
continue
original_choice = choice
choice = list(choice)
length = len(choice)
assert 2 <= length <= 4, 'Invalid number of entries in %s' % (original_choice,)
final_choice = []
# do we have attributes?
if length > 2 and isinstance(choice[-1], Mapping):
final_choice.append(choice.pop())
elif length == 4:
attributes = choice.pop()
assert attributes is None or isinstance(attributes, Mapping), 'Last argument must be a dict-like object in %s' % (original_choice,)
if attributes:
final_choice.append(attributes)
# the constant
final_choice.insert(0, choice.pop(0))
# the db value
final_choice.insert(1, choice.pop(0))
if len(choice):
# we were given a display value
final_choice.insert(2, choice.pop(0))
else:
# no display value, we compute it from the constant
final_choice.insert(2, self.display_transform(final_choice[0]))
final_choices.append(final_choice)
return super(AutoDisplayChoices, self)._convert_choices(final_choices)
class AutoChoices(AutoDisplayChoices):
"""Subclass of ``AutoDisplayChoices`` that will also compose the value to be saved based on the constant.
To compose the display value, it will call a ``display_transform`` function, that is defined
as a class attribute but can be overridden by passing it to the constructor.
In this class, the ``*choices`` argument can simply be strings, or tuples with one element (or two
to add additional attributes)
Example
-------
>>> ALIGNMENTS = AutoChoices(
... 'BAD',
... ('NEUTRAL', ),
... ('CHAOTIC_GOOD', 'chaos', 'THE CHAOS'),
... ('GOOD', None, 'Yeah', {'additional': 'attributes'}),
... )
>>> ALIGNMENTS.BAD.value
'bad'
>>> ALIGNMENTS.BAD.display
'Bad'
>>> ALIGNMENTS.NEUTRAL.choice_entry
('NEUTRAL', 'neutral', 'Neutral')
>>> ALIGNMENTS.CHAOTIC_GOOD.value
'chaos'
>>> ALIGNMENTS.CHAOTIC_GOOD.display
'THE CHAOS'
>>> ALIGNMENTS.GOOD.value
'good'
>>> ALIGNMENTS.GOOD.display
'Yeah'
>>> ALIGNMENTS.GOOD.choice_entry.additional
'attributes'
"""
value_transform = staticmethod(lambda const: const.lower())
def __init__(self, *choices, **kwargs):
self.value_transform = kwargs.pop('value_transform', None) or self.value_transform
super(AutoChoices, self).__init__(*choices, **kwargs)
def add_choices(self, *choices, **kwargs):
"""Disallow super method to thing the first argument is a subset name"""
return super(AutoChoices, self).add_choices(_NO_SUBSET_NAME_, *choices, **kwargs)
def _convert_choices(self, choices):
"""Auto create db values then call super method"""
final_choices = []
for choice in choices:
if isinstance(choice, ChoiceEntry):
final_choices.append(choice)
continue
original_choice = choice
if isinstance(choice, basestring):
if choice == _NO_SUBSET_NAME_:
continue
choice = [choice, ]
else:
choice = list(choice)
length = len(choice)
assert 1 <= length <= 4, 'Invalid number of entries in %s' % (original_choice,)
final_choice = []
# do we have attributes?
if length > 1 and isinstance(choice[-1], Mapping):
final_choice.append(choice.pop())
elif length == 4:
attributes = choice.pop()
assert attributes is None or isinstance(attributes, Mapping), 'Last argument must be a dict-like object in %s' % (original_choice,)
if attributes:
final_choice.append(attributes)
# the constant
final_choice.insert(0, choice.pop(0))
if len(choice):
# we were given a db value
final_choice.insert(1, choice.pop(0))
if len(choice):
# we were given a display value
final_choice.insert(2, choice.pop(0))
else:
# set None to compute it later
final_choice.insert(1, None)
if final_choice[1] is None:
# no db value, we compute it from the constant
final_choice[1] = self.value_transform(final_choice[0])
final_choices.append(final_choice)
return super(AutoChoices, self)._convert_choices(final_choices)
def create_choice(klass, choices, subsets, kwargs):
"""Create an instance of a ``Choices`` object.
Parameters
----------
klass : type
The class to use to recreate the object.
choices : list(tuple)
A list of choices as expected by the ``__init__`` method of ``klass``.
subsets : list(tuple)
A tuple with an entry for each subset to create. Each entry is a list with two entries:
- the name of the subsets
- a list of the constants to use for this subset
kwargs : dict
Extra parameters expected on the ``__init__`` method of ``klass``.
Returns
-------
Choices
A new instance of ``Choices`` (or other class defined in ``klass``).
"""
obj = klass(*choices, **kwargs)
for subset in subsets:
obj.add_subset(*subset)
return obj
``` |
{
"source": "joehybird/django-north",
"score": 2
} |
#### File: management/commands/runserver.py
```python
from django.contrib.staticfiles.management.commands.runserver import \
Command as RunserverCommand
from django.db import connection
from django_north.management import migrations
class Command(RunserverCommand):
help = ("Starts a lightweight Web server for development and also "
"serves static files.")
def check_migrations(self):
try:
migration_plan = migrations.build_migration_plan(connection)
except migrations.DBException as e:
self.stdout.write(self.style.NOTICE("\n{}\n".format(e)))
return
if migration_plan is None:
self.stdout.write(self.style.NOTICE("\nSchema not inited.\n"))
return
has_migrations = any(
[
any([not applied
for mig, applied, path, is_manual
in plan['plan']])
for plan in migration_plan['plans']
]
)
if has_migrations:
self.stdout.write(self.style.NOTICE(
"\nYou have unapplied migrations; your app may not work "
"properly until they are applied."
))
self.stdout.write(self.style.NOTICE(
"Run 'python manage.py migrate' to apply them.\n"
))
```
#### File: management/commands/showmigrations.py
```python
import logging
from django.conf import settings
from django.core.management.base import BaseCommand
from django.db import connections
from django.db import DEFAULT_DB_ALIAS
from django_north.management import migrations
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = "Shows all available migrations for the current project"
def add_arguments(self, parser):
parser.add_argument(
'--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS,
help='Nominates a database to synchronize. '
'Defaults to the "default" database.',
)
def handle(self, *args, **options):
if getattr(settings, 'NORTH_MANAGE_DB', False) is not True:
logger.info('showmigrations command disabled')
return
self.connection = connections[options['database']]
return self.show_list()
def show_list(self):
"""
Shows a list of all migrations on the system,
from the version used to init the DB, to the current target version.
"""
migration_plan = migrations.build_migration_plan(self.connection)
if migration_plan is None:
self.stdout.write(self.style.ERROR("Schema not inited"))
return
self.stdout.write(
self.style.MIGRATE_HEADING("Current version of the DB:"))
self.stdout.write(
self.style.MIGRATE_LABEL(
" {}".format(migration_plan['current_version'])))
self.stdout.write(
self.style.MIGRATE_HEADING("Schema used to init the DB:"))
self.stdout.write(
self.style.MIGRATE_LABEL(
" {}".format(migration_plan['init_version'])))
# display migration status for each version to apply
for plan in migration_plan['plans']:
self.stdout.write(self.style.MIGRATE_HEADING("Version:"))
self.stdout.write(
self.style.MIGRATE_LABEL(" {}".format(plan['version'])))
# print plan
for mig, applied, path, is_manual in plan['plan']:
title = mig
if is_manual:
title += ' (manual)'
if applied:
self.stdout.write(" [X] %s" % title)
else:
self.stdout.write(" [ ] %s" % title)
``` |
{
"source": "joehybird/django-perf-rec",
"score": 2
} |
#### File: django-perf-rec/django_perf_rec/sql.py
```python
from django.utils.lru_cache import lru_cache
from sqlparse import parse, tokens
from sqlparse.sql import IdentifierList, Token
@lru_cache(maxsize=500)
def sql_fingerprint(query, hide_columns=True):
"""
Simplify a query, taking away exact values and fields selected.
Imperfect but better than super explicit, value-dependent queries.
"""
parsed_query = parse(query)[0]
sql_recursively_simplify(parsed_query, hide_columns=hide_columns)
return str(parsed_query)
sql_deleteable_tokens = (
tokens.Number,
tokens.Number.Float,
tokens.Number.Integer,
tokens.Number.Hexadecimal,
tokens.String,
tokens.String.Single,
)
def sql_recursively_simplify(node, hide_columns=True):
# Erase which fields are being updated in an UPDATE
if node.tokens[0].value == 'UPDATE':
i_set = [i for (i, t) in enumerate(node.tokens) if t.value == 'SET'][0]
i_where = [i for (i, t) in enumerate(node.tokens)
if _is_group(t) and t.tokens[0].value == 'WHERE'][0]
middle = [Token(tokens.Punctuation, ' ... ')]
node.tokens = node.tokens[:i_set + 1] + middle + node.tokens[i_where:]
# Erase the names of savepoints since they are non-deteriministic
if hasattr(node, 'tokens'):
# SAVEPOINT x
if str(node.tokens[0]) == 'SAVEPOINT':
node.tokens[2].tokens[0].value = '`#`'
return
# RELEASE SAVEPOINT x
elif len(node.tokens) >= 3 and node.tokens[2].value == 'SAVEPOINT':
node.tokens[4].tokens[0].value = "`#`"
return
# ROLLBACK TO SAVEPOINT X
token_values = [getattr(t, 'value', '') for t in node.tokens]
if len(node.tokens) == 7 and token_values[:6] == ['ROLLBACK', ' ', 'TO', ' ', 'SAVEPOINT', ' ']:
node.tokens[6].tokens[0].value = '`#`'
return
# Erase volatile part of PG cursor name
if node.tokens[0].value.startswith('"_django_curs_'):
node.tokens[0].value = '"_django_curs_#"'
one_before = None
for token in node.tokens:
ttype = getattr(token, 'ttype', None)
# Detect IdentifierList tokens within an ORDER BY, GROUP BY or HAVING
# clauses
inside_order_group_having = match_keyword(one_before, ['ORDER BY', 'GROUP BY', 'HAVING'])
replace_columns = not inside_order_group_having and hide_columns
if isinstance(token, IdentifierList) and replace_columns:
token.tokens = [Token(tokens.Punctuation, '...')]
elif hasattr(token, 'tokens'):
sql_recursively_simplify(token, hide_columns=hide_columns)
elif ttype in sql_deleteable_tokens:
token.value = '#'
elif ttype == tokens.Whitespace.Newline:
token.value = '' # Erase newlines
elif ttype == tokens.Whitespace:
token.value = ' '
elif getattr(token, 'value', None) == 'NULL':
token.value = '#'
if not token.is_whitespace:
one_before = token
def match_keyword(token, keywords):
"""
Checks if the given token represents one of the given keywords
"""
if not token:
return False
if not token.is_keyword:
return False
return token.value.upper() in keywords
def _is_group(token):
"""
sqlparse 0.2.2 changed it from a callable to a bool property
"""
is_group = token.is_group
if isinstance(is_group, bool):
return is_group
else:
return is_group()
```
#### File: django-perf-rec/django_perf_rec/utils.py
```python
import difflib
import inspect
from collections import namedtuple
try:
from _pytest.fixtures import FixtureRequest
except ImportError:
FixtureRequest = None
TestDetails = namedtuple('TestDetails', ['file_path', 'class_name', 'test_name'])
def current_test():
"""
Use a little harmless stack inspection to determine the test that is currently running.
"""
frame = inspect.currentframe()
try:
while True:
details = (
_get_details_from_test_function(frame) or
_get_details_from_pytest_request(frame)
)
if details:
return details
# Next frame
frame = frame.f_back
if frame is None:
break
raise RuntimeError("Could not automatically determine the test name.")
finally:
# Always delete frame references to help garbage collector
del frame
def _get_details_from_test_function(frame):
if not frame.f_code.co_name.startswith('test_'):
return
file_path = frame.f_globals['__file__']
# May be a pytest function test so we can't assume 'self' exists
its_self = frame.f_locals.get('self', None)
if its_self is None:
class_name = None
else:
class_name = its_self.__class__.__name__
test_name = frame.f_code.co_name
return TestDetails(
file_path=file_path,
class_name=class_name,
test_name=test_name,
)
def _get_details_from_pytest_request(frame):
if FixtureRequest is None:
return
request = frame.f_locals.get('request', None)
if request is None:
return
if request.cls is not None:
class_name = request.cls.__name__
else:
class_name = None
return TestDetails(
file_path=request.fspath.strpath,
class_name=class_name,
test_name=request.function.__name__,
)
def sorted_names(names):
"""
Sort a list of names but keep the word 'default' first if it's there.
"""
names = list(names)
have_default = False
if 'default' in names:
names.remove('default')
have_default = True
sorted_names = sorted(names)
if have_default:
sorted_names = ['default'] + sorted_names
return sorted_names
def record_diff(old, new):
"""
Generate a human-readable diff of two performance records.
"""
return '\n'.join(difflib.ndiff(
['%s: %s' % (k, v) for op in old for k, v in op.items()],
['%s: %s' % (k, v) for op in new for k, v in op.items()],
))
``` |
{
"source": "joehybird/factory_boy",
"score": 2
} |
#### File: django_demo/generic_foreignkey/tests.py
```python
from django.contrib.auth.models import User, Group
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from .factories import UserFactory, GroupFactory, TaggedUserFactory, TaggedGroupFactory
class GenericFactoryTest(TestCase):
def test_user_factory(self):
user = UserFactory()
self.assertEqual(user.first_name, 'Adam')
def test_group_factory(self):
group = GroupFactory()
self.assertEqual(group.name, 'group')
def test_generic_user(self):
model = TaggedUserFactory(tag='user')
self.assertEqual(model.tag, 'user')
self.assertTrue(isinstance(model.content_object, User))
self.assertEqual(model.content_type, ContentType.objects.get_for_model(model.content_object))
def test_generic_group(self):
model = TaggedGroupFactory(tag='group')
self.assertEqual(model.tag, 'group')
self.assertTrue(isinstance(model.content_object, Group))
self.assertEqual(model.content_type, ContentType.objects.get_for_model(model.content_object))
```
#### File: factory_boy/tests/test_alchemy.py
```python
import factory
from .compat import unittest
from .compat import mock
import warnings
from factory.alchemy import SQLAlchemyModelFactory
from .alchemyapp import models
class StandardFactory(SQLAlchemyModelFactory):
class Meta:
model = models.StandardModel
sqlalchemy_session = models.session
id = factory.Sequence(lambda n: n)
foo = factory.Sequence(lambda n: 'foo%d' % n)
class NonIntegerPkFactory(SQLAlchemyModelFactory):
class Meta:
model = models.NonIntegerPk
sqlalchemy_session = models.session
id = factory.Sequence(lambda n: 'foo%d' % n)
class NoSessionFactory(SQLAlchemyModelFactory):
class Meta:
model = models.StandardModel
sqlalchemy_session = None
id = factory.Sequence(lambda n: n)
class SQLAlchemyPkSequenceTestCase(unittest.TestCase):
def setUp(self):
super(SQLAlchemyPkSequenceTestCase, self).setUp()
StandardFactory.reset_sequence(1)
NonIntegerPkFactory._meta.sqlalchemy_session.rollback()
def test_pk_first(self):
std = StandardFactory.build()
self.assertEqual('foo1', std.foo)
def test_pk_many(self):
std1 = StandardFactory.build()
std2 = StandardFactory.build()
self.assertEqual('foo1', std1.foo)
self.assertEqual('foo2', std2.foo)
def test_pk_creation(self):
std1 = StandardFactory.create()
self.assertEqual('foo1', std1.foo)
self.assertEqual(1, std1.id)
StandardFactory.reset_sequence()
std2 = StandardFactory.create()
self.assertEqual('foo0', std2.foo)
self.assertEqual(0, std2.id)
def test_pk_force_value(self):
std1 = StandardFactory.create(id=10)
self.assertEqual('foo1', std1.foo) # sequence and pk are unrelated
self.assertEqual(10, std1.id)
StandardFactory.reset_sequence()
std2 = StandardFactory.create()
self.assertEqual('foo0', std2.foo) # Sequence doesn't care about pk
self.assertEqual(0, std2.id)
class SQLAlchemySessionPersistenceTestCase(unittest.TestCase):
def setUp(self):
super(SQLAlchemySessionPersistenceTestCase, self).setUp()
self.mock_session = mock.NonCallableMagicMock(spec=models.session)
def test_flushing(self):
class FlushingPersistenceFactory(StandardFactory):
class Meta:
sqlalchemy_session = self.mock_session
sqlalchemy_session_persistence = 'flush'
self.mock_session.commit.assert_not_called()
self.mock_session.flush.assert_not_called()
FlushingPersistenceFactory.create()
self.mock_session.commit.assert_not_called()
self.mock_session.flush.assert_called_once_with()
def test_committing(self):
class CommittingPersistenceFactory(StandardFactory):
class Meta:
sqlalchemy_session = self.mock_session
sqlalchemy_session_persistence = 'commit'
self.mock_session.commit.assert_not_called()
self.mock_session.flush.assert_not_called()
CommittingPersistenceFactory.create()
self.mock_session.commit.assert_called_once_with()
self.mock_session.flush.assert_not_called()
def test_noflush_nocommit(self):
class InactivePersistenceFactory(StandardFactory):
class Meta:
sqlalchemy_session = self.mock_session
sqlalchemy_session_persistence = None
self.mock_session.commit.assert_not_called()
self.mock_session.flush.assert_not_called()
InactivePersistenceFactory.create()
self.mock_session.commit.assert_not_called()
self.mock_session.flush.assert_not_called()
def test_type_error(self):
with self.assertRaises(TypeError):
class BadPersistenceFactory(StandardFactory):
class Meta:
sqlalchemy_session_persistence = 'invalid_persistence_option'
model = models.StandardModel
def test_force_flush_deprecation(self):
with warnings.catch_warnings(record=True) as warning_list:
class OutdatedPersistenceFactory(StandardFactory):
class Meta:
force_flush = True
sqlalchemy_session = self.mock_session
# There should be *1* DeprecationWarning
self.assertEqual(len(warning_list), 1)
warning = warning_list[0]
self.assertTrue(issubclass(warning.category, DeprecationWarning))
# The warning text should point to the class declaration.
text = warnings.formatwarning(warning.message, warning.category, warning.filename, warning.lineno)
self.assertIn('test_alchemy.py', text)
self.assertIn('class OutdatedPersistenceFactory', text)
# However, we shall keep the old-style behavior.
self.mock_session.commit.assert_not_called()
self.mock_session.flush.assert_not_called()
OutdatedPersistenceFactory.create()
self.mock_session.commit.assert_not_called()
self.mock_session.flush.assert_called_once_with()
class SQLAlchemyNonIntegerPkTestCase(unittest.TestCase):
def setUp(self):
super(SQLAlchemyNonIntegerPkTestCase, self).setUp()
NonIntegerPkFactory.reset_sequence()
NonIntegerPkFactory._meta.sqlalchemy_session.rollback()
def test_first(self):
nonint = NonIntegerPkFactory.build()
self.assertEqual('foo0', nonint.id)
def test_many(self):
nonint1 = NonIntegerPkFactory.build()
nonint2 = NonIntegerPkFactory.build()
self.assertEqual('foo0', nonint1.id)
self.assertEqual('foo1', nonint2.id)
def test_creation(self):
nonint1 = NonIntegerPkFactory.create()
self.assertEqual('foo0', nonint1.id)
NonIntegerPkFactory.reset_sequence()
nonint2 = NonIntegerPkFactory.build()
self.assertEqual('foo0', nonint2.id)
def test_force_pk(self):
nonint1 = NonIntegerPkFactory.create(id='foo10')
self.assertEqual('foo10', nonint1.id)
NonIntegerPkFactory.reset_sequence()
nonint2 = NonIntegerPkFactory.create()
self.assertEqual('foo0', nonint2.id)
class SQLAlchemyNoSessionTestCase(unittest.TestCase):
def test_create_raises_exception_when_no_session_was_set(self):
with self.assertRaises(RuntimeError):
NoSessionFactory.create()
def test_build_does_not_raises_exception_when_no_session_was_set(self):
inst0 = NoSessionFactory.build()
inst1 = NoSessionFactory.build()
self.assertEqual(inst0.id, 0)
self.assertEqual(inst1.id, 1)
``` |
{
"source": "joehybird/mock-services",
"score": 3
} |
#### File: mock-services/mock_services/service.py
```python
import json
import logging
import re
try:
from urllib import parse as urlparse
except ImportError:
# Python 2
import urlparse
import attr
from . import storage
from .decorators import to_json
from .decorators import trap_errors
from .exceptions import Http400
from .exceptions import Http404
logger = logging.getLogger(__name__)
@attr.s
class ResourceContext(object):
hostname = attr.ib()
resource = attr.ib()
action = attr.ib(default='default')
id = attr.ib(default=None)
@property
def key(self):
return '{hostname}/{resource}/{action}'.format(**attr.asdict(self))
def parse_url(request, url_pattern, id=None, require_id=False):
logger.debug('url_pattern: %s', url_pattern)
logger.debug('url: %s', request.url)
url_kw = re.compile(url_pattern).search(request.url).groupdict()
logger.debug('url_kw: %s', url_kw)
if 'resource' not in url_kw:
raise Http404
if require_id and 'id' not in url_kw:
raise Http404
hostname = urlparse.urlparse(request.url).hostname
logger.debug('hostname: %s', hostname)
action = url_kw.pop('action', 'default')
logger.debug('action: %s', action)
resource_context = ResourceContext(
hostname=hostname,
resource=url_kw.pop('resource'),
action=action,
id=url_kw.pop('id', id),
)
logger.debug('resource_context: %s', attr.asdict(resource_context))
return resource_context
def validate_data(request, attrs=None, validators=None):
logger.debug('attrs: %s', attrs)
logger.debug('body: %s', request.body)
data = json.loads(request.body)
data_to_validate = {k: v
for k, v in data.items()
if k in (attrs or {}).keys()}
logger.debug('data_to_validate: %s', data_to_validate)
# missing field
if attrs and not data_to_validate:
raise Http400
# invalid field
if data_to_validate:
try:
attr.make_class("C", attrs)(**data_to_validate)
except (TypeError, ValueError):
raise Http400
# custom validation
for validate_func in (validators or []):
validate_func(request)
return data
@to_json
@trap_errors
def list_cb(request, context, url=None, **kwargs):
resource_context = parse_url(request, url)
context.status_code = 200
return storage.to_list(resource_context)
@to_json
@trap_errors
def get_cb(request, context, url=None, **kwargs):
resource_context = parse_url(request, url, require_id=True)
context.status_code = 200
return storage.get(resource_context)
@trap_errors
def head_cb(request, context, url=None, id_name='id', **kwargs):
resource_context = parse_url(request, url, require_id=True)
context.headers = dict(context.headers or {},
**{id_name: resource_context.id})
context.status_code = 200
return ''
@to_json
@trap_errors
def post_cb(request, context, url=None, id_name='id', id_factory=int,
attrs=None, validators=None, **kwargs):
data = validate_data(request, attrs=attrs, validators=validators)
id = storage.next_id(id_factory)
logger.debug('id: %s', id)
data.update({
id_name: id
})
logger.debug('data: %s', data)
resource_context = parse_url(request, url, id=id)
context.status_code = 201
return storage.add(resource_context, data)
@to_json
@trap_errors
def patch_cb(request, context, url=None, attrs=None, validators=None,
**kwargs):
data = validate_data(request, attrs=attrs, validators=validators)
logger.debug('data: %s', data)
resource_context = parse_url(request, url, require_id=True)
context.status_code = 200
return storage.update(resource_context, data)
put_cb = patch_cb
@trap_errors
def delete_cb(request, context, url=None, **kwargs):
resource_context = parse_url(request, url, require_id=True)
context.status_code = 204
return storage.remove(resource_context) or ''
```
#### File: mock-services/tests/test_http_mock.py
```python
import logging
import unittest
import requests
from requests.exceptions import ConnectionError
from mock_services import http_mock
from mock_services import is_http_mock_started
from mock_services import no_http_mock
from mock_services import reset_rules
from mock_services import start_http_mock
from mock_services import stop_http_mock
from mock_services import update_http_rules
from mock_services import with_http_mock
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s %(levelname)-8s %(name)s %(message)s'
)
def fake_duckduckgo_cb(request, context):
return 'Coincoin!'
rules = [
{
'text': fake_duckduckgo_cb,
'headers': {'Content-Type': 'text/html'},
'method': 'GET',
'url': r'^https://duckduckgo.com/\?q='
},
]
class HttpTestCase(unittest.TestCase):
def setUp(self):
stop_http_mock()
reset_rules()
http_mock.set_allow_external(False)
tearDown = setUp
def test_reset_rules(self):
self.assertFalse(http_mock.get_rules())
update_http_rules(rules)
self.assertEqual(len(http_mock.get_rules()), 1)
# reset
reset_rules()
self.assertFalse(http_mock.get_rules())
def test_update_rules(self):
self.assertFalse(http_mock.get_rules())
# add first rule
update_http_rules(rules)
self.assertEqual(len(http_mock.get_rules()), 1)
matcher = http_mock.get_rules()[0]
self.assertEqual(matcher._method, 'GET')
self.assertTrue(hasattr(matcher._url, 'match'))
self.assertTrue(matcher._url.match('https://duckduckgo.com/?q=mock-services')) # noqa
response = matcher._responses[0]
self.assertTrue(hasattr(response._params['text'], '__call__'))
self.assertEqual(response._params['headers']['Content-Type'], 'text/html') # noqa
# add second rule
update_http_rules([
{
'method': 'POST',
'status_code': 201,
'text': '{"coin": 1}',
'url': r'http://dummy/',
},
])
self.assertEqual(len(http_mock.get_rules()), 2)
matcher = http_mock.get_rules()[1]
self.assertTrue(hasattr(matcher._url, 'match'))
self.assertTrue(matcher._url.match('http://dummy/'))
self.assertEqual(matcher._method, 'POST')
response = matcher._responses[0]
self.assertEqual(response._params['status_code'], 201)
self.assertEqual(response._params['text'], '{"coin": 1}')
self.assertEqual(response._params['headers']['Content-Type'], 'text/plain') # noqa
def test_start_http_mock(self):
update_http_rules(rules)
response = requests.get('https://duckduckgo.com/?q=mock-services')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content[:15], b'<!DOCTYPE html>')
self.assertTrue(start_http_mock())
response = requests.get('https://duckduckgo.com/?q=mock-services')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'Coincoin!')
def test_stop_http_mock(self):
update_http_rules(rules)
self.assertTrue(start_http_mock())
response = requests.get('https://duckduckgo.com/?q=mock-services')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'Coincoin!')
self.assertTrue(stop_http_mock())
response = requests.get('https://duckduckgo.com/?q=mock-services')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content[:15], b'<!DOCTYPE html>')
def test_restart_http_mock(self):
update_http_rules(rules)
start_http_mock()
response = requests.get('https://duckduckgo.com/?q=mock-services')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'Coincoin!')
self.assertTrue(stop_http_mock())
# already stopped
self.assertFalse(stop_http_mock())
response = requests.get('https://duckduckgo.com/?q=mock-services')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content[:15], b'<!DOCTYPE html>')
self.assertTrue(start_http_mock())
response = requests.get('https://duckduckgo.com/?q=mock-services')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'Coincoin!')
# already started
self.assertFalse(start_http_mock())
def test_is_http_mock_started(self):
update_http_rules(rules)
self.assertFalse(is_http_mock_started())
self.assertTrue(start_http_mock())
self.assertTrue(is_http_mock_started())
def test_no_http_mock(self):
update_http_rules(rules)
self.assertTrue(start_http_mock())
@no_http_mock
def please_do_not_mock_me():
self.assertFalse(is_http_mock_started())
response = requests.get('https://duckduckgo.com/?q=mock-services')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content[:15], b'<!DOCTYPE html>')
self.assertTrue(is_http_mock_started())
def test_with_http_mock(self):
update_http_rules(rules)
self.assertFalse(is_http_mock_started())
@with_http_mock
def please_do_not_mock_me():
self.assertTrue(is_http_mock_started())
response = requests.get('https://duckduckgo.com/?q=mock-services')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'Coincoin!')
self.assertFalse(is_http_mock_started())
def test_real_http_0(self):
update_http_rules(rules)
self.assertTrue(start_http_mock())
# mocked
response = requests.get('https://duckduckgo.com/?q=mock-services')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'Coincoin!')
# not mocked but fail
self.assertRaises(ConnectionError, requests.get,
'https://www.google.com/#q=mock-services')
# test we keep the request
try:
url = 'https://www.google.com/#q=mock-services'
requests.get(url)
except ConnectionError as e:
self.assertEqual(e.request.url, url)
def test_real_http_1(self):
update_http_rules(rules)
self.assertTrue(start_http_mock())
# allow external call
http_mock.set_allow_external(True)
# mocked
response = requests.get('https://duckduckgo.com/?q=mock-services')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'Coincoin!')
# not mocked but do an external call
response = requests.get('https://www.google.com/#q=mock-services')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content[:15], b'<!doctype html>')
``` |
{
"source": "JoeinChina/DeepWeather",
"score": 3
} |
#### File: src/runtime/capture.py
```python
from model_config import base_config
import imghdr
import os
from datetime import datetime, timedelta
from PIL import Image
import sys
sys.path.append(base_config['append_path'])
def check_last(path0, time2):
filename = time2.strftime("%Y%m%d%H%M") + '.PNG'
if os.path.exists(os.path.join(path0, filename)) and imghdr.what(os.path.join(path0, filename)) != None:
return filename
else:
for i in range(1, 100):
filename1 = (time2 - timedelta(minutes=(6 * i))
).strftime("%Y%m%d%H%M") + '.PNG'
if os.path.exists(os.path.join(path0, filename1)) and imghdr.what(os.path.join(path0, filename1)) != None:
break
return filename1
def check_next(path0, time2):
filename = time2.strftime("%Y%m%d%H%M") + '.PNG'
if os.path.exists(os.path.join(path0, filename)) and imghdr.what(os.path.join(path0, filename)) != None:
return filename
else:
for i in range(1, 100):
filename1 = (time2 + timedelta(minutes=(6 * i))
).strftime("%Y%m%d%H%M") + '.PNG'
if os.path.exists(os.path.join(path0, filename1)) and imghdr.what(os.path.join(path0, filename1)) != None:
break
return filename1
def extract_last(path0, time1, path1):
with Image.open(os.path.join(path0, check_last(path0, time1)), 'r') as f:
f.save(os.path.join(path1, time1.strftime("%Y%m%d%H%M") + '.png'))
def extract_next(path0, time1, path1):
with Image.open(os.path.join(path0, check_next(path0, time1))) as f:
f.save(os.path.join(path1, time1.strftime("%Y%m%d%H%M") + '.png'))
def check_dir(path1):
if not os.path.exists(path1):
os.makedirs(path1)
assert os.path.exists(path1)
def main():
predict_moment = datetime.strptime(sys.argv[1], "%Y%m%d%H%M")
raw_img_path = raw_image_path
to_folder = to_path
check_dir(to_folder)
extract_to = os.path.join(to_folder, predict_moment.strftime("%Y%m%d%H%M"))
check_dir(extract_to)
check_dir(os.path.join(extract_to, 'input10'))
check_dir(os.path.join(extract_to, 'true20'))
for i in range(10):
extract_last(path0=os.path.join(raw_img_path, predict_moment.strftime('%Y%m')),
time1=predict_moment - timedelta(minutes=(6 * i)),
path1=os.path.join(extract_to, 'input10'))
for j in range(1, 21):
extract_next(path0=os.path.join(raw_img_path, predict_moment.strftime("%Y%m")),
time1=predict_moment + timedelta(minutes=(6 * j)),
path1=os.path.join(extract_to, 'true20'))
if __name__ == '__main__':
main()
```
#### File: src/runtime/deduce_emsemble.py
```python
from settings import *
import sys
sys.path.append(append_path)
import os
import os.path
import random
import theano
import json
from utils import *
from reader import *
from factorWeather import FactorWeatherModel
import sparnn
from sparnn.optimizers import RMSProp
import argparse
import numpy as np
import datetime
import time
import logging
import ConfigParser
from netCDF4 import Dataset
from crypt import crypt
from mylog import mylog as lg
# sparnn.utils.quick_logging_config('deep-weather.log')
def float_formatter(x): return "%.2f" % x
np.set_printoptions(formatter={'float_kind': float_formatter})
# Enable or disable parallel computation on the CPU with OpenMP
theano.config.openmp = True
theano.config.optimizer = "fast_compile"
# The number of user stack level to keep for variables.
theano.config.traceback.limit = 100
my_log = lg.init_logger(deduce_log_path)
# dictMerged2=dict(dict1, **dict2)
radar_config = {
'name': 'radar',
'max': 14.0,
'offset': 0.0,
'cmap': 'radar',
'cost_func': 'Fade',
'level': 0,
'data_dir': radar_data_dir,
'save_dir': radar_save_dir
}
wind_config = {
'name': 'wind',
'level': 10,
'max': 50.0,
'offset': 25.0,
'cmap': 'wind',
'data_dir': wind_data_dir,
'save_dir': wind_save_dir,
'cost_func': 'BinaryCrossEntropy'
}
pgm_config = {
'name': 'pgm',
'level': 10,
'max': 255.0,
'offset': 0,
'cmap': 'pgm',
'data_dir': pgm_data_dir,
'save_dir': pgm_save_dir,
'cost_func': 'Fade'
}
model_config = {
'interval': 6,
'input_seq_length': 10,
'output_seq_length': 20,
'minibatch_size': 8,
'learning_rate': 0.003, # ori 0.002
'patch_size': 2,
'max_epoch': 20,
'layer_num': 6,
'layer_name': 'lstm',
'kernel_size': 3,
'kernel_num': (64, 64, 64),
'size': (200, 200),
'compress': 2,
'model_path': model_path,
'model_name': None,
#'vmax': 100,#instead of 'model'
'use_input_mask': False,
'input_data_type': 'float32',
'is_output_sequence': True,
#'name': 'DeepWeather',
'cost_func': 'Fade',
#'cost_func': 'BinaryCrossEntropy' #
}
def check_crypt():
with open('crypt') as f:
line = f.readline()
if line == crypt():
return True
return False
def get_config(mode='train', src='radar'):
if src == 'radar':
config = dict(model_config, **radar_config)
elif src == 'wind':
config = dict(model_config, **wind_config)
elif src == 'pgm':
config = dict(model_config, **pgm_config)
if mode == "train":
config['start_date'] = start_date
config['end_date'] = end_date
elif mode == "valid":
config['start_date'] = valid_start_date
config['end_date'] = valid_end_date
elif mode == "test":
config['start_date'] = test_start_date
config['end_date'] = test_end_date
return config
def predict(begin_date, end_date, save_mode, src='radar'):
config = get_config('test', src=src)
config['savejson'] = save_mode
save_dir = config['save_dir']
if not os.path.exists(save_dir):
os.makedirs(save_dir)
model_path = config['model_path']
model_list = ['DeepWeatherModel_5690.pkl', 'DeepWeatherModel_800.pkl']
predict_res = [[] for i in range(len(model_list) + 1)]
for x, model_name in enumerate(model_list):
print('Load model:' + model_path + '/' + model_name)
config['model_name'] = model_name
model = FactorWeatherModel.load(os.path.join(model_path, model_name))
print("done")
model.set_mode("predict")
predict_func = theano.function(inputs=model.interface_layer.input_symbols(),
outputs=sparnn.utils.quick_reshape_patch_back(model.middle_layers[-1].output,
config['patch_size']),
on_unused_input='ignore')
it = begin_date
while(it <= end_date):
#it += datetime.timedelta(minutes=6)
start_date = it - \
datetime.timedelta(
minutes=(config['input_seq_length'] - 1) * config['interval'])
config['start_date'] = start_date
config['end_date'] = it
print('loading data', config['start_date'], config['end_date'])
try:
test_iterator = load_data(config, mode='predict')
test_iterator.begin(do_shuffle=False)
except Exception as e:
print(Exception, e)
continue
result = predict_func(*(test_iterator.input_batch())) * \
config['max'] - config['offset']
result_dir = os.path.join(save_dir, it.strftime('%Y%m%d%H%M'))
if not os.path.exists(result_dir):
os.makedirs(result_dir)
input_image = np.reshape(test_iterator.input_batch()[
0][-1][0], (1, config['size'][0], config['size'][1]))[0] * config['max'] - config['offset']
#write_image_update(input_image, result_dir, it, config)
print('predict', it, result.shape, input_image.max(),
input_image.min(), result.max(), result.min())
for i, r in enumerate(result):
image = np.reshape(
r[0], (1, config['size'][0], config['size'][1]))[0]
image = resize(
image, (config['size'][0] * config['compress'], config['size'][1] * config['compress']))
predict_res[x].append(image)
write_image_update(image, result_dir, it, config, predict=i)
it += datetime.timedelta(minutes=6)
for i in range(config['output_seq_length']):
# file1 = begin_date.strftime(
# "%Y%m%d%H%M") + "-" + str(i) + "-" + model_list[0][:-4] + '.json'
# file2 = begin_date.strftime(
# "%Y%m%d%H%M") + "-" + str(i) + "-" + model_list[1][:-4] + '.json'
# a1 = np.array(json.load(open(os.path.join(
# save_dir + "/" + begin_date.strftime("%Y%m%d%H%M"), file1))))
# a2 = np.array(json.load(open(os.path.join(
# save_dir + "/" + begin_date.strftime("%Y%m%d%H%M"), file2))))
a1 = predict_res[0][i]
a2 = predict_res[1][i]
A = 0.7 * a1 + 0.5 * a2
config['savejson'] = 'both'
predict_res[-1].append(A)
# write_image(A, os.path.join(save_dir, begin_date.strftime("%Y%m%d%H%M")),
# begin_date, config, predict=i)
write_image(predict_res[-1][i], os.path.join(save_dir, begin_date.strftime("%Y%m%d%H%M")),
begin_date, config, predict=i)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Deduce future images.')
parser.add_argument('mode', metavar='base', type=str, default='run',
help='Mode: run, train')
parser.add_argument('--src', type=str, default='radar', required=False,
help='Type of data: radar, pgm, wind')
parser.add_argument('save_mode', type=str, default='onlypng',
help='if save jsonfile: onlypng or both')
parser.add_argument('start', type=str,
default='201902192000', help='predict start time')
parser.add_argument('end', type=str,
default='201902192030', help='predict end time')
args = parser.parse_args()
mode = args.mode
src = args.src
save_mode = args.save_mode
start = args.start
end = args.end
if mode == 'train':
try:
train(src=src)
my_log.info('End of the training')
except Exception as e:
my_log.info('error2: ' + str(e))
elif mode == 'predict':
begin_date = datetime.datetime.strptime(start, "%Y%m%d%H%M")
end_date = datetime.datetime.strptime(end, "%Y%m%d%H%M")
predict(begin_date, end_date, save_mode, src=src)
elif mode == 'run':
run(src=src)
```
#### File: src/runtime/prediction_remover.py
```python
import os
import shutil
import sys
from datetime import datetime, timedelta
from settings import *
from mylog import mylog as lg
remove_log = lg.init_logger(remove_log_path)
# def calc_size():
# pass
def remove_all(path1):
try:
shutil.rmtree(path1)
remove_log.info(" Clean train data done !")
except Exception as e:
remove_log.error(" Clean error ", e)
def remove_folder(path1):
for dirpath, dirnames, filenames in os.walk(path1):
for filename in filenames:
if os.path.isfile(os.path.join(dirpath, filename)):
os.remove(os.path.join(dirpath, filename))
shutil.rmtree(dirpath)
def remove_part(path1, t):
for dir in os.listdir(path1):
if os.path.isfile(os.path.join(path1, dir)):
os.remove(os.path.join(path1, dir))
elif datetime.strptime(dir, "%Y%m%d%H%M") + timedelta(hours=7) < datetime.strptime(t, "%Y%m%d%H%M"):
try:
remove_folder(os.path.join(path1, dir))
remove_log.info(" Clean prediction folder [" + dir + "]")
except Exception as e:
#remove_log.error(" Clean prediction folder error : ", e)
print str(e)
else:
pass
def main():
check1 = to_path
check2 = train_pool
check3 = radar_save_dir
check_time = datetime.now().strftime("%Y%m%d%H%M")
remove_part(check1, check_time)
remove_all(check2)
remove_part(check3, check_time)
if __name__ == '__main__':
main()
```
#### File: src/runtime/process_runtime_for_train.py
```python
import datetime
import imghdr
import os
import shutil
from PIL import Image
import numpy as np
import sys
from settings import *
from mylog import mylog as lg
preprocess_log = lg.init_logger(preprocess_log_path)
def crop(input_dir, filename, output_dir):
img = Image.open(os.path.join(input_dir, filename))
img = img.convert('RGB')
q = np.asarray(img)
q = q[0:800, 224:1024]
q = q[100:500, 300:700]
final = Image.fromarray(q.astype('uint8')).convert('RGB')
final.save(os.path.join(output_dir, filename))
def wipeBaseMap(local_path, filename):
img = Image.open(os.path.join(local_path, filename))
width = img.size[0]
height = img.size[1]
for i in range(0, width):
for j in range(0, height):
data = (img.getpixel((i, j)))
if (data[0] == 1 and data[1] == 160 and data[2] == 246):
n = 1
elif (data[0] == 0 and data[1] == 236 and data[2] == 236):
n = 1
elif (data[0] == 0 and data[1] == 216 and data[2] == 0):
n = 1
elif (data[0] == 1 and data[1] == 144 and data[2] == 0):
n = 1
elif (data[0] == 255 and data[1] == 255 and data[2] == 0):
n = 1
elif (data[0] == 231 and data[1] == 192 and data[2] == 0):
n = 1
elif (data[0] == 255 and data[1] == 144 and data[2] == 0):
n = 1
elif (data[0] == 255 and data[1] == 0 and data[2] == 0):
n = 1
elif (data[0] == 214 and data[1] == 0 and data[2] == 0):
n = 1
elif (data[0] == 192 and data[1] == 0 and data[2] == 0):
n = 1
elif (data[0] == 255 and data[1] == 0 and data[2] == 240):
n = 1
elif (data[0] == 150 and data[1] == 0 and data[2] == 180):
n = 1
elif (data[0] == 173 and data[1] == 144 and data[2] == 240):
n = 1
else:
img.putpixel((i, j), (255, 255, 255))
n = 1
img = img.convert("RGB")
img.save(os.path.join(local_path, filename))
def format_img(path1, filename, path2):
name_new = filename[:12] + '.PNG' # local env
# name_new =
crop(path1, filename, path2)
os.rename(os.path.join(path2, filename), os.path.join(path2, name_new))
wipeBaseMap(local_path=path2, filename=name_new)
def check_dir(path0):
if not os.path.exists(path0):
os.makedirs(path0)
assert os.path.exists(path0)
def main():
raw_path = raw_image_path + '201904/'
to_fold = train_pool
check_dir(raw_path)
check_dir(to_fold)
time_now = datetime.now() - timedelta(hours=6)
time_start = time_now - timedelta(days=1)
while (time_start <= time_now):
target = time_start.strftime("%Y%m%d%H%M") + '.PNG'
if os.path.exists(os.path.join(raw_path, target)) and imghdr.what(os.path.join(raw_path, target)) != None:
format_img(path1=raw_path, filename=target, path2=to_fold)
else:
preprocess_log.info(target + " doesn't exist or was broken !!!")
pass
time_start += timedelta(minutes=6)
preprocess_log.info(time_now.strftime("%Y-%m-%d %H:%M") + " preprocess train data done !")
preprocess_log.info("------- ------- ------- ------ ------ -------\r")
if __name__ == '__main__':
print datetime.now()
main()
print datetime.now()
```
#### File: src/runtime/runtime_test.py
```python
from model_config import base_config
import os
from datetime import datetime, timedelta
import sys
sys.path.append(base_config['append_path'])
from mylog import mylog as lg
runtime_log = lg.init_logger(base_config['runtime_log_path'])
def main(t1):
#start_time = datetime(2018, 6, 30, 10, 0, 0)
#end_time = datetime(2018, 6, 30, 11, 0, 0)
start_time = t1
end_time = start_time
while start_time <= end_time:
now = start_time.strftime("%Y-%m-%d %H:%M")
print start_time
runtime_log.info(' Predict time: ' + now)
try:
os.system('/data/anaconda2/bin/python /data/python_scripts/Pre_processor.py ' +
start_time.strftime("%Y%m%d%H%M"))
runtime_log.info(' 1. Pre_precess Done!')
except Exception as e:
runtime_log.info(' Pre_preocess err: ' + e)
sys.exit(0)
try:
os.system('/data/anaconda2/bin/python /data/python_scripts/bg_Transparent.py ' +
start_time.strftime("%Y%m%d%H%M"))
runtime_log.info(' 2. Background Transparent!')
except Exception as e:
runtime_log.info(' Calc err: ' + e)
sys.exit(0)
try:
os.system('/data/anaconda2/bin/python /data/weather_update/src/runtime/deduce_runtime.py predict --src radar '+
start_time.strftime("%Y%m%d%H%M") + " " + start_time.strftime("%Y%m%d%H%M"))
runtime_log.info(' 3.Predict ' + now + ' Done!')
except Exception as e:
runtime_log.info(' Predict err: ' + e)
sys.exit(0)
print "- > -> -> End " + now
start_time = start_time + timedelta(minutes=6)
if __name__ == '__main__':
runtime_log.info(' Start...')
time_now = datetime.now()
t = datetime(time_now.date().year, time_now.date().month, time_now.date().day, time_now.time().hour, time_now.time().minute, 0)
main(t)
runtime_log.info(' End !!!')
runtime_log.info(' ========================================\n')
```
#### File: sparnn/iterators/imdb_iterator.py
```python
import numpy
import logging
import theano
import theano.tensor as TT
import theano.tensor.nnet
import random
import cPickle
from sparnn.utils import *
from sparnn.iterators import PklIterator
logger = logging.getLogger(__name__)
class IMDBIterator(PklIterator):
def __init__(self, iterator_param):
super(IMDBIterator, self).__init__(iterator_param)
self.vocabulary_size = 10000
```
#### File: layers/basic/conv_lstm_layer.py
```python
import numpy
import logging
import theano
import theano.tensor as TT
from theano.gradient import grad_clip
from sparnn.utils import *
from sparnn.layers import Layer
logger = logging.getLogger(__name__)
class ConvLSTMLayer(Layer):
def __init__(self, layer_param):
super(ConvLSTMLayer, self).__init__(layer_param)
if self.input is not None:
assert 5 == self.input.ndim
else:
assert ("init_hidden_state" in layer_param or "init_cell_state" in layer_param)
self.input_receptive_field = layer_param['input_receptive_field']
self.transition_receptive_field = layer_param['transition_receptive_field']
self.gate_activation = layer_param.get('gate_activation', 'sigmoid')
self.modular_activation = layer_param.get('modular_activation', 'tanh')
self.hidden_activation = layer_param.get('hidden_activation', 'tanh')
self.init_hidden_state = layer_param.get("init_hidden_state", quick_theano_zero((self.minibatch_size,) + self.dim_out))
self.init_cell_state = layer_param.get("init_cell_state", quick_theano_zero((self.minibatch_size,) + self.dim_out))
self.init_hidden_state = TT.unbroadcast(self.init_hidden_state, *range(self.init_hidden_state.ndim))
self.init_cell_state = TT.unbroadcast(self.init_cell_state, *range(self.init_cell_state.ndim))
self.learn_padding = layer_param.get('learn_padding', False)
self.input_padding = layer_param.get('input_padding', None)
if self.input is None:
assert 'n_steps' in layer_param
self.n_steps = layer_param['n_steps']
else:
self.n_steps = layer_param.get('n_steps', self.input.shape[0])
self.kernel_size = (self.feature_out, self.feature_in,
self.input_receptive_field[0], self.input_receptive_field[1])
self.transition_mat_size = (self.feature_out, self.feature_out,
self.transition_receptive_field[0], self.transition_receptive_field[1])
#print('ConvLSTMLayer', self.kernel_size, self.transition_mat_size)
self.W_hi = quick_init_xavier(self.rng, self.transition_mat_size, self._s("W_hi"))
self.W_hf = quick_init_xavier(self.rng, self.transition_mat_size, self._s("W_hf"))
self.W_ho = quick_init_xavier(self.rng, self.transition_mat_size, self._s("W_ho"))
self.W_hc = quick_init_xavier(self.rng, self.transition_mat_size, self._s("W_hc"))
if self.input is not None:
self.W_xi = quick_init_xavier(self.rng, self.kernel_size, self._s("W_xi"))
self.W_xf = quick_init_xavier(self.rng, self.kernel_size, self._s("W_xf"))
self.W_xo = quick_init_xavier(self.rng, self.kernel_size, self._s("W_xo"))
self.W_xc = quick_init_xavier(self.rng, self.kernel_size, self._s("W_xc"))
if self.learn_padding:
self.hidden_padding = quick_zero((self.feature_out, ), self._s("hidden_padding"))
else:
self.hidden_padding = None
self.b_i = quick_zero((self.feature_out, ), self._s("b_i"))
self.b_f = quick_zero((self.feature_out, ), self._s("b_f"))
self.b_o = quick_zero((self.feature_out, ), self._s("b_o"))
self.b_c = quick_zero((self.feature_out, ), self._s("b_c"))
self.W_ci = quick_zero((self.feature_out, ), self._s("W_ci"))
self.W_cf = quick_zero((self.feature_out, ), self._s("W_cf"))
self.W_co = quick_zero((self.feature_out, ), self._s("W_co"))
if self.input is not None:
self.param = [self.W_xi, self.W_hi, self.W_ci, self.b_i,
self.W_xf, self.W_hf, self.W_cf, self.b_f,
self.W_xo, self.W_ho, self.W_co, self.b_o,
self.W_xc, self.W_hc, self.b_c]
if self.learn_padding:
self.param.append(self.hidden_padding)
else:
self.param = [self.W_hi, self.W_ci, self.b_i,
self.W_hf, self.W_cf, self.b_f,
self.W_ho, self.W_co, self.b_o,
self.W_hc, self.b_c]
if self.learn_padding:
self.param.append(self.hidden_padding)
self.is_recurrent = True
self.fprop()
def set_name(self):
self.name = "ConvLSTMLayer-" + str(self.id)
def step_fprop(self, x_t, mask_t, h_tm1, c_tm1):
#print('step fprop in conv lstm layer:', self.dim_in, self.kernel_size)
if x_t is not None:
# input_gate = x_t*W + h_t*W + c_t W
input_gate = quick_activation(conv2d_same(x_t, self.W_xi, (None, ) + self.dim_in,
self.kernel_size, self.input_padding)
+ conv2d_same(h_tm1, self.W_hi, (None, ) + self.dim_out,
self.transition_mat_size, self.hidden_padding)
+ c_tm1 * self.W_ci.dimshuffle('x', 0, 'x', 'x')
+ self.b_i.dimshuffle('x', 0, 'x', 'x'), "sigmoid")
forget_gate = quick_activation(conv2d_same(x_t, self.W_xf, (None, ) + self.dim_in,
self.kernel_size, self.input_padding)
+ conv2d_same(h_tm1, self.W_hf, (None, ) + self.dim_out,
self.transition_mat_size, self.hidden_padding)
+ c_tm1 * self.W_cf.dimshuffle('x', 0, 'x', 'x')
+ self.b_f.dimshuffle('x', 0, 'x', 'x'), "sigmoid")
c_t = forget_gate * c_tm1 \
+ input_gate * quick_activation(conv2d_same(x_t, self.W_xc, (None, ) + self.dim_in,
self.kernel_size, self.input_padding)
+ conv2d_same(h_tm1, self.W_hc, (None, ) + self.dim_out,
self.transition_mat_size, self.hidden_padding)
+ self.b_c.dimshuffle('x', 0, 'x', 'x'), "tanh")
output_gate = quick_activation(conv2d_same(x_t, self.W_xo, (None, ) + self.dim_in,
self.kernel_size, self.input_padding)
+ conv2d_same(h_tm1, self.W_ho, (None, ) + self.dim_out,
self.transition_mat_size, self.hidden_padding)
+ c_t * self.W_co.dimshuffle('x', 0, 'x', 'x')
+ self.b_o.dimshuffle('x', 0, 'x', 'x'), "sigmoid")
h_t = output_gate * quick_activation(c_t, "tanh")
else:
#input_gate = h_t * W
input_gate = quick_activation(
conv2d_same(h_tm1, self.W_hi, (None, ) + self.dim_out, self.transition_mat_size, self.hidden_padding)
+ c_tm1 * self.W_ci.dimshuffle('x', 0, 'x', 'x')
+ self.b_i.dimshuffle('x', 0, 'x', 'x'), "sigmoid")
forget_gate = quick_activation(conv2d_same(h_tm1, self.W_hf, (None, ) + self.dim_out,
self.transition_mat_size, self.hidden_padding)
+ c_tm1 * self.W_cf.dimshuffle('x', 0, 'x', 'x')
+ self.b_f.dimshuffle('x', 0, 'x', 'x'), "sigmoid")
c_t = forget_gate * c_tm1 \
+ input_gate * quick_activation(conv2d_same(h_tm1, self.W_hc, (None, ) + self.dim_out,
self.transition_mat_size, self.hidden_padding)
+ self.b_c.dimshuffle('x', 0, 'x', 'x'), "tanh")
output_gate = quick_activation(conv2d_same(h_tm1, self.W_ho, (None, ) + self.dim_out,
self.transition_mat_size, self.hidden_padding)
+ c_t * self.W_co.dimshuffle('x', 0, 'x', 'x')
+ self.b_o.dimshuffle('x', 0, 'x', 'x'), "sigmoid")
h_t = output_gate * quick_activation(c_t, "tanh")
if mask_t is not None:
h_t = mask_t * h_t + (1 - mask_t) * h_tm1
c_t = mask_t * c_t + (1 - mask_t) * c_tm1
#print h_t.ndim, c_t.ndim
#h_t = quick_aggregate_pooling(h_t, "max", mask=None)
#c_t = quick_aggregate_pooling(c_t, "max", mask=None)
return h_t, c_t
def init_states(self):
return self.init_hidden_state, self.init_cell_state
def fprop(self):
# The dimension of self.mask is (Timestep, Minibatch).
# We need to pad it to (Timestep, Minibatch, FeatureDim, Row, Col)
# and keep the last three added dimensions broadcastable. TT.shape_padright
# function is thus a good choice
if self.mask is None:
if self.input is not None:
scan_input = [self.input]
scan_fn = lambda x_t, h_tm1, c_tm1: self.step_fprop(x_t, None, h_tm1, c_tm1)
else:
scan_input = None
scan_fn = lambda h_tm1, c_tm1: self.step_fprop(None, None, h_tm1, c_tm1)
else:
if self.input is not None:
scan_input = [self.input, TT.shape_padright(self.mask, 3)]
scan_fn = lambda x_t, mask_t, h_tm1, c_tm1: self.step_fprop(x_t, mask_t, h_tm1, c_tm1)
else:
scan_input = [TT.shape_padright(self.mask, 3)]
scan_fn = lambda mask_t, h_tm1, c_tm1: self.step_fprop(None, mask_t, h_tm1, c_tm1)
#print('conv lstm output:', scan_fn, self.init_cell_state, scan_input, self.n_steps)
[self.output, self.cell_output], self.output_update = quick_scan(fn=scan_fn,
outputs_info=[self.init_hidden_state,
self.init_cell_state],
sequences=scan_input,
name=self._s("lstm_output_func"),
n_steps=self.n_steps
)
```
#### File: layers/basic/pooling_layer.py
```python
import numpy
import logging
import theano
import theano.tensor as TT
from sparnn.utils import *
from sparnn.layers import Layer
logger = logging.getLogger(__name__)
class PoolingLayer(Layer):
def __init__(self, layer_param):
super(PoolingLayer,self).__init__(layer_param)
def set_name(self):
self.name = "PoolingLayer-" + str(self.id)
```
#### File: sparnn/optimizers/rmsprop.py
```python
import numpy
import time
import theano
import logging
import theano.tensor as TT
from sparnn.utils import *
from sparnn.optimizers import Optimizer
logger = logging.getLogger(__name__)
from model_config import base_config
from mylog import mylog as lg
# opt_log = lg.init_logger(base_config['deduce_log_path'])
class RMSProp(Optimizer):
"""
RMSProp Introduced by Hinton(http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf)
"""
def __init__(self,
model,
train_data_iterator0,
train_data_iterator1,
train_data_iterator2,
train_data_iterator3,
train_data_iterator4,
train_data_iterator5,
valid_data_iterator,
test_data_iterator,
hyper_param,
wind_train_iterator=None, wind_valid_iterator=None, wind_test_iterator=None
):
super(RMSProp, self).__init__(model,
train_data_iterator0, train_data_iterator1,
train_data_iterator2, train_data_iterator3,
train_data_iterator4, train_data_iterator5,
valid_data_iterator, test_data_iterator, hyper_param,
wind_train_iterator=wind_train_iterator,
wind_valid_iterator=wind_valid_iterator,
wind_test_iterator=wind_test_iterator)
self.learning_rate = numpy_floatX(hyper_param["learning_rate"])
self.decay_rate = numpy_floatX(hyper_param["decay_rate"])
def set_name(self):
self.name = "RMSProp-" + self.id
def get_update_func(self):
print('*** Update Function of Rmsprop ......')
# opt_log.info("*** Update Function of Rmsprop ......")
updates = []
lr = TT.scalar(self._s("learning_rate"), dtype=theano.config.floatX)
rho = TT.scalar(self._s("decay_rate"), dtype=theano.config.floatX)
eps = numpy_floatX(1E-6)
self.meansquare = [theano.shared(p.get_value(
) * numpy_floatX(0.), name="%s.meansquare" % p.name) for p in self.model.param]
g_msnew_list = [rho * g_ms + (1 - rho) * (TT.square(g))
for g, g_ms in zip(self.grad, self.meansquare)]
updates += [(g_ms, g_msnew)
for g_ms, g_msnew in zip(self.meansquare, g_msnew_list)]
updates += [(p, p - lr * g / TT.sqrt(g_msnew + eps))
for p, g, g_msnew in zip(self.model.param, self.grad, g_msnew_list)]
return self.model.get_update_func(updates, [lr, rho])
def learning_param(self):
return [self.learning_rate, self.decay_rate]
def print_stat(self):
super(RMSProp, self).print_stat()
logger.info(" Learning Parameters:")
logger.info(" Clipping Threshold: " + str(self.clip_threshold))
logger.info(" Learning Rate: " + str(self.decay_rate))
logger.info(" Decay Rate: " + str(self.decay_rate))
```
#### File: sparnn/optimizers/sgd.py
```python
import numpy
import time
import theano
import logging
import theano.tensor as TT
import cPickle
from sparnn.utils import *
from sparnn.optimizers import Optimizer
logger = logging.getLogger(__name__)
class SGD(Optimizer):
"""
First Order Stochastic Gradient Descent With Momentum
The clipping strategy is the same as the ICML 2013 paper:On the difficulty of training recurrent neural networks
"""
def __init__(self,
model,
train_data_iterator,
valid_data_iterator,
test_data_iterator,
hyper_param
):
super(SGD, self).__init__(model, train_data_iterator, valid_data_iterator, test_data_iterator, hyper_param)
self.learning_rate = hyper_param["learning_rate"]
self.momentum = hyper_param["momentum"]
self.decay_rate = hyper_param.get("decay_rate", numpy_floatX(0.1))
self.decay_step = hyper_param.get("decay_step", (self.max_epoch - self.start_epoch) / 3 + 1)
self.decay_begin = hyper_param.get("decay_begin", 0)
def set_name(self):
self.name = "SGD-" + self.id
def get_update_func(self):
updates = []
lr = TT.scalar(self._s("learning_rate"), dtype=theano.config.floatX)
momentum = TT.scalar(self._s("SGD.momentum"), dtype=theano.config.floatX)
self.grad_last_update = [theano.shared(p.get_value() * numpy_floatX(0.), name="%s.grad_last_update" % p.name)
for p in self.model.param]
updates += [(p, p + momentum * p_last_update - lr * p_grad)
for p, p_grad, p_last_update in zip(self.model.param, self.grad, self.grad_last_update)]
updates += [(p_last_update, momentum * p_last_update - lr * p_grad)
for p_grad, p_last_update in zip(self.grad, self.grad_last_update)]
return self.model.get_update_func(updates, [lr, momentum])
def learning_param(self):
if (0 == (self.current_epoch - self.start_epoch + 1) % self.decay_step) and (
self.current_epoch - self.start_epoch) > self.decay_begin:
self.learning_rate *= self.decay_rate
return [self.learning_rate, self.momentum]
def print_stat(self):
super(SGD, self).print_stat()
logger.info(" Learning Parameters:")
logger.info(" Learning Rate: " + str(self.learning_rate))
logger.info(" Momentum: " + str(self.momentum))
logger.info(" Decay Rate: " + str(self.decay_rate))
logger.info(" Decay Step: " + str(self.decay_step))
```
#### File: src/tools/reader_backup.py
```python
from sparnn.iterators import DataIterator
import numpy as np
from utils import *
import theano
class WeatherIterator(DataIterator):
def __init__(self, iterator_param, mode='train'):
self.use_input_mask = iterator_param.get('use_input_mask', None)
self.use_output_mask = iterator_param.get('use_output_mask', None)
self.name = iterator_param['name']
self.input_data_type = iterator_param.get('input_data_type', theano.config.floatX)
self.output_data_type = iterator_param.get('output_data_type', theano.config.floatX)
self.minibatch_size = iterator_param['minibatch_size']
self.is_output_sequence = iterator_param['is_output_sequence']
self.data = {}
self.indices = {}
self.current_position = 0
self.current_batch_size = 0
self.current_batch_indices = []
self.current_input_length = 0
self.current_output_length = 0
self.config = iterator_param
self.mode = mode
self.load()
def load(self):
self.data = self.raw_input(self.config)
self.check_data()
def raw_input(self, config):
filenames = load_range_filenames(config)
print("image length: ", len(filenames))
X, X_last = [], []
for i, filename in enumerate(filenames): # filenames = [201811230524.png, ..., ]
#print(filename)
X_seq = filenames[i: i+config['input_seq_length']+config['output_seq_length']]
if len(X_seq) == (config['input_seq_length']+config['output_seq_length']):
print("X_seq: ", X_seq)
for filename1 in X_seq:
try:
X_hour = read_data(filename1, config) # X_hour is compressed point matrix
#print(X_hour)
if X_hour is not None:
X.append(X_hour)
X_last = X_hour
elif len(X_last) > 0:
X.append(X_last)
except IOError:
print(filename+' not exists!')
pass
else:
break
#if i % 240 == 0 and i > 1: # change day original:i % 240
#print('read to ', i, filename, len(filenames))
X = np.array(X, dtype=np.dtype("float32"))
# print('read X', X.shape)
X = np.reshape(X, (X.shape[0], 1, X[0].shape[0], X[0].shape[1])) # ???
#print('load ', X.shape, X.max(), X.min())
# normalize
X = (X+config['offset'])/config['max'] # todo:Normalization
#print('normalize ', X.shape, X.max(), X.min())
clips = [[] for i in range(2)] # [[],[]]
minibatch_size = config['minibatch_size']
input_seq_length = config['input_seq_length']
output_seq_length = config['output_seq_length']
# print('minibatch_size:', minibatch_size)
# print('input_seq_length:', input_seq_length)
# print('output_seq_length:', output_seq_length)
if self.mode == 'train': # load input+output
#print(X.shape)
#print('input+output:',input_seq_length+output_seq_length)
for x in range(X.shape[0]/(input_seq_length+output_seq_length)):
#print(x)
#print([(input_seq_length+output_seq_length)*x, input_seq_length])
clips[0].append([(input_seq_length+output_seq_length)*x, input_seq_length])
#print([(input_seq_length+output_seq_length)*x+input_seq_length, output_seq_length])
clips[1].append([(input_seq_length+output_seq_length)*x+input_seq_length, output_seq_length])
elif self.mode == 'predict': # load input only
for x in range(X.shape[0]/(input_seq_length)):
# print(x)
clips[0].append([(input_seq_length+output_seq_length)*x, input_seq_length])
clips[1].append([(input_seq_length+output_seq_length)*x+input_seq_length, 0])
clips = np.array(clips, dtype=np.dtype("int32"))
#print('clips:', clips)
dims = np.array([[1, X[0].shape[1], X[0].shape[2]]], dtype=np.dtype("int32"))
#print('dims:', dims)
return {'input_raw_data': X, 'dims': dims, "clips": clips}
def load_data(config, mode='predict'):
iterator = WeatherIterator(config, mode=mode)
iterator.begin(do_shuffle=True)
return iterator
``` |
{
"source": "joeinus134131/Stoikometrik-Apps",
"score": 3
} |
#### File: joeinus134131/Stoikometrik-Apps/stokiometric.py
```python
import math
import tkinter as tk
from tkinter import StringVar
import matplotlib.pyplot as plt
import numpy as np
from PIL import ImageTk, Image
#kandungan kimia
#Co2
#So2
#O2
#N2
#Parameter
#carbon, hidrogen, sulfur, oxygen, nitrogen, ash, TM
rho = 1.2 #density 1.2 kg/m3
m = 500 #massa udara yang dibutuhkan perjam
aktualair = 11.25
#Gui Setting
gui = tk.Tk()
gui.title("Stokiometrik Apps Calculator")
gui.geometry("650x650")
gui.iconbitmap('E:\\PROJECT-KODINGAN\\1. PYTHON\\3. 2021 PENGUJIAN MODULE\\logo_itera_oke_bvD_icon.ico')
#Gambar
image=Image.open("E:\\PROJECT-KODINGAN\\1. PYTHON\\3. 2021 PENGUJIAN MODULE\\Logo_PLN.png")
image.thumbnail((100,300),Image.ANTIALIAS)
photo=ImageTk.PhotoImage(image)
label_image=tk.Label(image=photo)
label_image.place(x=430, y=50)
#gambar=Image.open("E:\\PROJECT-KODINGAN\\1. PYTHON\\3. 2021 PENGUJIAN MODULE\\logo itera oke.png")
#gambar.thumbnail((100,200),Image.ANTIALIAS)
#foto=ImageTk.PhotoImage(gambar)
#label_imager=tk.Label(gambar=foto)
#label_imager.place(x=750, y=50)
#Manual Input
#aktual = input("Masukan nilai aktual : ")
#stokiometri = input("Masukan data Stokiometrik : ")
aktualdata = float()
stokiometridata = float()
#Gui Input massa unsur
labelapp = tk.Label(gui, font="arial 20 bold", text="ITERA x PLTU UPK TARAHAN")
labelapp.pack()
labelkarbon = tk.Label(gui, font = "arial 12 bold", text = "Carbon: ")
labelkarbon.place(x=10, y=50)
karboninput = tk.Entry()
karboninput.place(x=200, y=50)
labelhsd = tk.Label(gui, font = "arial 12 bold", text = " kg ")
labelhsd.place(x=330, y=50)
labelhidrogen = tk.Label(gui, font = "arial 12 bold", text = "Hydrogen : ")
labelhidrogen.place(x=10, y=80)
hidrogeninput = tk.Entry()
hidrogeninput.place(x=200, y=80)
labelhsd = tk.Label(gui, font = "arial 12 bold", text = " kg ")
labelhsd.place(x=330, y=80)
labelsulfur = tk.Label(gui, font = "arial 12 bold", text = "Sulfur : ")
labelsulfur.place(x=10, y=110)
sulfurinput = tk.Entry()
sulfurinput.place(x=200, y=110)
labelhsd = tk.Label(gui, font = "arial 12 bold", text = " kg ")
labelhsd.place(x=330, y=110)
labeloksigen = tk.Label(gui, font = "arial 12 bold", text = "Oxygen : ")
labeloksigen.place(x=10, y=140)
oksigeninput = tk.Entry()
oksigeninput.place(x=200, y=140)
labelhsd = tk.Label(gui, font = "arial 12 bold", text = " kg ")
labelhsd.place(x=330, y=140)
labelnitrogen = tk.Label(gui, font = "arial 12 bold", text = "Nitrogen : ")
labelnitrogen.place(x=10, y=170)
nitrogeninput = tk.Entry()
nitrogeninput.place(x=200, y=170)
labelhsd = tk.Label(gui, font = "arial 12 bold", text = " kg ")
labelhsd.place(x=330, y=170)
labelash = tk.Label(gui, font = "arial 12 bold", text = "Ash : ")
labelash.place(x=10, y=200)
ashinput = tk.Entry()
ashinput.place(x=200, y=200)
labelhsd = tk.Label(gui, font = "arial 12 bold", text = " kg ")
labelhsd.place(x=330, y=200)
labeltm = tk.Label(gui, font = "arial 12 bold", text = "TM: ")
labeltm.place(x=10, y=230)
tminput = tk.Entry()
tminput.place(x=200, y=230)
labelhsd = tk.Label(gui, font = "arial 12 bold", text = " kg ")
labelhsd.place(x=330, y=230)
labeltcf = tk.Label(gui, font = "arial 12 bold", text = "TCF : ")
labeltcf.place(x=10, y=260)
tcfinput = tk.Entry()
tcfinput.place(x=200, y=260)
labelhsd = tk.Label(gui, font = "arial 12 bold", text = " kg ")
labelhsd.place(x=330, y=260)
labellhv = tk.Label(gui, font = "arial 12 bold", text = "LHV : ")
labellhv.place(x=10, y=290)
lhvinput = tk.Entry()
lhvinput.place(x=200, y=290)
labelhsd = tk.Label(gui, font = "arial 12 bold", text = " kg ")
labelhsd.place(x=330, y=290)
labelhhv = tk.Label(gui, font = "arial 12 bold", text = "HHV : ")
labelhhv.place(x=10, y=320)
hhvinput = tk.Entry()
hhvinput.place(x=200, y=320)
labelhsd = tk.Label(gui, font = "arial 12 bold", text = " kg ")
labelhsd.place(x=330, y=320)
labelo2 = tk.Label(gui, font = "arial 12 bold", text = "O2 : ")
labelo2.place(x=10, y=350)
o2input = tk.Entry()
o2input.place(x=200, y=350)
labelhsd = tk.Label(gui, font = "arial 12 bold", text = " % ")
labelhsd.place(x=330, y=350)
labelhsd = tk.Label(gui, font = "arial 12 bold", text = "Flow HSD : ")
labelhsd.place(x=10, y=380)
hsdinput = tk.Entry()
hsdinput.place(x=200, y=380)
labelhsd = tk.Label(gui, font = "arial 12 bold", text = " kL/H ")
labelhsd.place(x=330, y=380)
#fungsi hitung stokometri
def hitung_stokiometrik():
#pengurangan = (float(aktualinput.get())-float(stokiometriinput.get()))
#z = float(pengurangan)
#persentase = float(z/float(stokiometriinput.get()))*100
#O2 yang diminta
c = float(karboninput.get())*(32/12)
h = float(hidrogeninput.get())*(16/2)
s = float(sulfurinput.get())*(32/32)
o = -1*(float(oksigeninput.get()))
tcf = float(tcfinput.get())*(1000/3600)
hhv = float(hhvinput.get())*4.1868
#o2 yg giminta total
o2total = c+h+s+o
n2 = o2total*(76.7/23.3)
stoichair = o2total+n2
#produk
#heat input
heatinput = tcf*hhv
stoichairflow = stoichair*tcf
thermalefisiensi = (100/heatinput)*100
#exccessair
excessair = (float(o2input.get())/(21-float(o2input.get())))*100
#excessairflow
excessairflow = (excessair*stoichairflow)/100
#total air flow
totalairflow = stoichairflow + excessairflow
#primaryairflow
primaryairflow = 1.8*tcf
#secondaryairflow
secondaryairflow = stoichairflow-primaryairflow
#subtotalsaflow
subtotalsaairflow = secondaryairflow + excessairflow
textArea = tk.Text(gui,height=10,width=50)
textArea.place(x=200, y=470)
jawabano2total = " O2 : {o2} ,".format(o2=o2total)
jawabann2 = " N2 : {n2plus} ,".format(n2plus=n2)
jawabanstoichair = " Stoich Air : {stoich} ,".format(stoich=stoichair)
jawabanheatinput = " Heat input : {heatin} Watt, ".format(heatin=heatinput)
jawabanthermal = " Efisiensi Thermal : {thermal} tph,".format(thermal=thermalefisiensi)
jawabansaf = " Stoich Air Flow : {saf} tph,".format(saf=stoichairflow)
jawabanea = " Excess Air : {ea} %,".format(ea=excessair)
jawabaneaf = " Excess Air Flow : {eaf} tph,".format(eaf=excessairflow)
jawabanpaf = " Primary Air Flow : {paf} tph,".format(paf=primaryairflow)
jawabansndaf = " Secondary Air Flow : {sndaf} tph,".format(sndaf=secondaryairflow)
jawabanstaf = " subTotal Air Flow : {staf} tph,".format(staf=subtotalsaairflow)
textArea.insert(tk.END, jawabano2total)
textArea.insert(tk.END, jawabann2)
textArea.insert(tk.END, jawabanstoichair)
textArea.insert(tk.END, jawabanheatinput)
textArea.insert(tk.END, jawabanthermal)
textArea.insert(tk.END, jawabansaf)
textArea.insert(tk.END, jawabanea)
textArea.insert(tk.END, jawabaneaf)
textArea.insert(tk.END, jawabanpaf)
textArea.insert(tk.END, jawabansndaf)
textArea.insert(tk.END, jawabanstaf)
print("hasil total", o2total)
#print("nilai z = ", z)
#print("Hasil Persentase : ", persentase)
def printdata():
O2 = 18.5
hsd = float(hsdinput.get())
rasioO2 = hsd*O2
textArea = tk.Text(gui,height=10,width=50)
textArea.place(x=200, y=470)
perbandingan = " HSD : {hsd2} : ".format(hsd2=hsd)
rasioo2 = " O2 : {rasioo2}".format(rasioo2=rasioO2)
textArea.insert(tk.END, perbandingan)
textArea.insert(tk.END, rasioo2)
def flowrate_fungsi():
flowrate = aktualair*(m/(rho*3600)) #satuan m3/s
print(flowrate)
def tampilgrafik():
series = np.array(4, 3)
x = np.array(5, 10, 1)
plt.plot(series, x)
plt.show()
def thermal_efisiensi():
efisiensi = (outlet/inlet)*100
print(efisiensi)
#fungsi reset data
def reset():
karboninput.delete(0)
hidrogeninput.delete(0)
sulfurinput.delete(0)
oksigeninput.delete(0)
ashinput.delete(0)
tminput.delete(0)
tcfinput.delete(0)
lhvinput.delete(0)
hhvinput.delete(0)
o2input.delete(0)
hsdinput.delete(0)
#tombol hitung
tombolhitung = tk.Button(gui, font="arial 13 bold", text = "Hitung", command = hitung_stokiometrik)
tombolhitung.place(x=200, y=420)
tombolreset = tk.Button(gui, font="arial 13 bold", text = "Reset", command = reset)
tombolreset.place(x=430, y=420)
#tombolhitungfr = tk.Button(gui, font="arial 13 bold", text = "Hitung Flow Rate", command = flowrate_fungsi)
#tombolhitungfr.place(x=220, y=350)
tampilkangrafik = tk.Button(gui, font="arial 13 bold", text = "Hitung Rasio", command = printdata)
tampilkangrafik.place(x=290, y=420)
#Hasil
labelhasil=tk.Label(gui, font="arial 12 bold", text="Hasil :")
labelhasil.place(x=10, y=470)
gui.mainloop()
``` |
{
"source": "joein/zarnitsa",
"score": 3
} |
#### File: zarnitsa/zarnitsa/DataAugmenterInternally.py
```python
from typing import Tuple
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from .DataAugmenter import AbstractDataAugmenter
class DataAugmenterInternally(AbstractDataAugmenter):
def __init__(self, n_jobs=1):
self.n_jobs = n_jobs
def augment_dataframe(
self,
df: pd.DataFrame,
aug_type="normal",
freq=0.2,
return_only_aug=False,
) -> pd.DataFrame:
"""Augment dataframe data. Pandas dataframe"""
augment_column_method = {
"normal": self.augment_column_norm,
"uniform": self.augment_column_uniform,
"permutations": self.augment_column_permut,
}
not_to_aug, to_aug = self._prepare_data_to_aug(df, freq=freq)
for col in df.columns:
to_aug[col] = augment_column_method[aug_type](
to_aug[col], freq=1.0
)
return to_aug if return_only_aug else pd.concat([not_to_aug, to_aug])
def augment_column(
self,
col: pd.Series,
aug_type="normal",
freq=0.2,
return_only_aug=False,
) -> pd.Series:
"""Augment Serial data. Pandas column"""
augment_column_method = {
"normal": self.augment_column_norm,
"uniform": self.augment_column_uniform,
"permutations": self.augment_column_permut,
}
not_to_aug, to_aug = self._prepare_data_to_aug(col, freq=freq)
to_aug = augment_column_method[aug_type](to_aug, freq=1.0)
return to_aug if return_only_aug else pd.concat([not_to_aug, to_aug])
def _prepare_data_to_aug(
self, data, freq=0.2
) -> Tuple[pd.Series, pd.Series]:
"""Get part of data. Not augment all of it excep case freq=1.0"""
data = (
pd.Series(data)
if type(data) is not pd.Series and type(data) is not pd.DataFrame
else data
)
if freq < 1:
not_to_aug, to_aug = train_test_split(data, test_size=freq)
return not_to_aug, to_aug
elif freq == 1:
return data.sample(0), data
elif freq == 0:
return data, data.sample(0)
def augment_column_permut(
self, col: pd.Series, freq=0.2, return_only_aug=False
) -> pd.Series:
"""Augment column data using permutations. Pandas column"""
not_to_aug, to_aug = self._prepare_data_to_aug(col, freq=freq)
indices_to_permute = to_aug.index
to_aug = to_aug.sample(frac=1.0)
to_aug.index = indices_to_permute
return to_aug if return_only_aug else pd.concat([not_to_aug, to_aug])
def augment_column_norm(
self, col: pd.Series, freq=0.2, return_only_aug=False
) -> pd.Series:
"""Augment column data using normal distribution. Pandas column"""
not_to_aug, to_aug = self._prepare_data_to_aug(col, freq=freq)
column_std = col.std()
to_aug = to_aug.apply(
lambda value: np.random.normal(value, column_std)
)
return to_aug if return_only_aug else pd.concat([not_to_aug, to_aug])
def augment_column_uniform(
self, col: pd.Series, freq=0.2, n_sigm=3, return_only_aug=False
) -> pd.Series:
"""Augment column data using uniform distribution. Pandas column"""
not_to_aug, to_aug = self._prepare_data_to_aug(col, freq=freq)
column_std = col.std()
to_aug = to_aug.apply(
lambda value: np.random.uniform(
value - n_sigm * column_std, value + n_sigm * column_std
)
)
return to_aug if return_only_aug else pd.concat([not_to_aug, to_aug])
``` |
{
"source": "JoeIOU/metedata_fusion_tools",
"score": 2
} |
#### File: src/common/logger.py
```python
import logging
import logging.handlers
import datetime,os
_log = None
#日志功能初始化
def init(log_format, log_format_error, log_level, log_path, env, date_format):
global _log
if _log is not None:
return
#dev环境,只打印控制台信息
if env == "dev":
logging.basicConfig(level=log_level, format=log_format, datefmt=date_format)
_log = logging
return _log
#其他环境,写入到日志文件
else:
logger = logging.getLogger('mylogger')
logger.setLevel(log_level)
#logging.basicConfig(filename=__C.LOG_FILE, level=__C.LOG_LEVEL, format=__C.LOG_FORMAT, datefmt=__C._TIME_FORMAT)
rf_handler = logging.handlers.TimedRotatingFileHandler(os.path.join(log_path, 'all.log'), when='midnight', interval=1, backupCount=7, atTime=datetime.time(0, 0, 0, 0), encoding='utf-8')
rf_handler.setFormatter(logging.Formatter(log_format))
#rf_handler.encoding="utf-8"
f_handler = logging.FileHandler(os.path.join(log_path, 'error.log'), encoding='utf-8')
f_handler.setLevel(logging.ERROR)
#f_handler.encoding="utf-8"
f_handler.setFormatter(logging.Formatter(log_format_error))
logger.addHandler(rf_handler)
logger.addHandler(f_handler)
_log = logger
return _log
```
#### File: src/db/neo4j_conn.py
```python
from py2neo import Graph
graph = None
def get_graph():
graph = Graph(uri="http://localhost:7474/db/data", username="neo4j", password="<PASSWORD>")
return graph
def neo4j_graph():
global graph
if graph is None:
graph = get_graph()
return graph
```
#### File: src/mdata/index_unique.py
```python
from db.db_conn import db_connection_metedata as db_md
from mdata import metadata as md
from privilege import user_mngt as ur
from config.config import cfg as config
logger = config.logger
FIELD_TYPE_TEXT = "Text"
FIELD_TYPE_CHAR = "Char"
FIELD_TYPE_INT = "Int"
FIELD_TYPE_DECIMAL = "Decimal"
FIELD_TYPE_DATE = "Date"
MD_INDEX_ENTITY_MAPPING_NAME = "index_mapping_t"
MD_ENTITY_INDEX_TEXT = "index_text_t"
MD_ENTITY_INDEX_INT = "index_int_t"
MD_ENTITY_INDEX_DEC = "index_decimal_t"
MD_ENTITY_INDEX_DATE = "index_date_t"
# MD_UNIQUE_ENTITY_MAPPING_NAME = "unique_mapping_t"
# MD_ENTITY_UNIQUE_TEXT = "unique_text_t"
# MD_ENTITY_UNIQUE_INT = "unique_int_t"
# MD_ENTITY_UNIQUE_DEC = "unique_decimal_t"
# MD_ENTITY_UNIQUE_DATE = "unique_date_t"
# 保存索引值的字段
MD_ENTITY_INDEX_FIELD_TEXT = "text_value"
MD_ENTITY_INDEX_FIELD_INT = "int_value"
MD_ENTITY_INDEX_FIELD_DEC = "num_value"
MD_ENTITY_INDEX_FIELD_DATE = "date_value"
# 保存数据id,默认data_id
MD_ENTITY_INDEX_DATA_FIELD = "data_id"
SQL_QUERY_INDEX_FORMAT = """
SELECT
t.index_mapping_id,
t.tenant_id,
t.md_entity_id,
t.md_fields_id,
t.mapping_type,
t.unique_flag,
f.md_fields_name,
f.md_columns_id
FROM
index_mapping_t AS t
INNER JOIN md_fields AS f ON f.tenant_id = t.tenant_id
AND f.active_flag = 'Y'
AND f.md_fields_id = t.md_fields_id
WHERE
t.tenant_id = %s
AND t.md_entity_id = %s
"""
def query_index_mapping(tenant_id, entity_id):
conn = db_md()
cursor = conn.cursor()
sql = SQL_QUERY_INDEX_FORMAT
cursor.execute(sql, args=(tenant_id, entity_id,))
result = cursor.fetchall()
logger.info("query_index_mapping,entity:{}".format(result))
conn.close() # 不是真正关闭,而是重新放回了连接池
return result
def query_index_all_type_list(tenant_id):
code_list = []
code_list.append(MD_ENTITY_INDEX_TEXT)
code_list.append(MD_ENTITY_INDEX_DEC)
code_list.append(MD_ENTITY_INDEX_INT)
code_list.append(MD_ENTITY_INDEX_DATE)
# code_list.append(MD_ENTITY_UNIQUE_TEXT)
# code_list.append(MD_ENTITY_UNIQUE_DEC)
# code_list.append(MD_ENTITY_UNIQUE_INT)
# code_list.append(MD_ENTITY_UNIQUE_DATE)
index_list = md.get_md_entities_id_by_code(code_list)
return index_list
def get_mapping_table_fields(mapping_type):
mapping_code, value_field = None, None
if mapping_type is not None and (
mapping_type.upper() == FIELD_TYPE_TEXT.upper() or mapping_type.upper() == FIELD_TYPE_CHAR.upper()):
value_field = MD_ENTITY_INDEX_FIELD_TEXT
mapping_code = MD_ENTITY_INDEX_TEXT
elif mapping_type is not None and mapping_type.upper() == FIELD_TYPE_INT.upper():
value_field = MD_ENTITY_INDEX_FIELD_INT
mapping_code = MD_ENTITY_INDEX_INT
elif mapping_type is not None and mapping_type.upper() == FIELD_TYPE_DECIMAL.upper():
value_field = MD_ENTITY_INDEX_FIELD_DEC
mapping_code = MD_ENTITY_INDEX_DEC
elif mapping_type is not None and mapping_type.upper() == FIELD_TYPE_DATE.upper():
value_field = MD_ENTITY_INDEX_FIELD_DATE
mapping_code = MD_ENTITY_INDEX_DATE
return mapping_code, value_field
def gen_index_mapping(tenant_id, data_list):
index_list = query_index_all_type_list(tenant_id)
classify_dict = {}
for item in data_list:
# unique_flag = item.get("unique_flag")
mapping_type = item.get("mapping_type")
# 删除非元数据存储字段信息,避免影响保存动作
item.pop("mapping_type")
item.pop("unique_flag")
# 是索引,在index对应的表有存记录。
mapping_code, value_field = get_mapping_table_fields(mapping_type)
for itm in index_list:
code = itm.get("md_entity_code")
if code == mapping_code:
entity_id = itm.get("md_entity_id")
obj_list = classify_dict.get(str(entity_id))
if obj_list is None:
tmp_li = []
tmp_li.append(item)
classify_dict[str(entity_id)] = tmp_li
else:
obj_list.append(item)
break
return classify_dict
def exec_index_action(user_id, tenant_id, new_data_list, delete_only=False):
classify_dict = gen_index_mapping(tenant_id, new_data_list)
re = None
for key in classify_dict.keys():
if key is not None:
entity_id1 = int(key)
ls_tmp = classify_dict.get(key)
ls_del = get_delete_list(ls_tmp)
re = md.delete_execute(user_id, tenant_id, entity_id1, ls_del)
if not delete_only:
re = md.insert_execute(user_id, tenant_id, entity_id1, ls_tmp)
return re
def get_delete_list(del_list):
if del_list is None:
return None
list_res = []
for item in del_list:
itm = {}
data_id = item.get(MD_ENTITY_INDEX_DATA_FIELD)
if data_id is None:
continue
itm[MD_ENTITY_INDEX_DATA_FIELD] = data_id
itm["md_entity_id"] = item.get("md_entity_id")
itm["md_fields_id"] = item.get("md_fields_id")
list_res.append(itm)
return list_res
def get_mapping_list(data_list, mapping_list, ids, delete_only=False):
data_list_new = []
if data_list is None or len(data_list) <= 0:
return None
if mapping_list is not None and len(mapping_list) >= 0:
for item in mapping_list:
mapping_entity_id = item.get("md_entity_id")
mapping_field_id = item.get("md_fields_id")
mapping_field_name = item.get("md_fields_name")
# unique_flag = item.get("unique_flag")
mapping_type = item.get("mapping_type")
mapping_code, value_field = get_mapping_table_fields(mapping_type)
i = 0
for itm in data_list:
id = ids[i]
i += 1
if mapping_field_name is not None and mapping_field_name in itm.keys():
item_new = {}
item_new["md_entity_id"] = mapping_entity_id
item_new["unique_flag"] = "N"
item_new["mapping_type"] = mapping_type
item_new["md_fields_id"] = mapping_field_id
if (not delete_only):
item_new[value_field] = itm.get(mapping_field_name)
item_new[MD_ENTITY_INDEX_DATA_FIELD] = id
data_list_new.append(item_new)
return data_list_new
def insert_index_data(user_id, tenant_id, md_entity_id, data_list, ids):
re = None
if ids is None:
logger.warning(
"insert_index_data,insert ids is None,entity_id=[{}],data:{}.".format(md_entity_id, data_list))
return None
new_list = None
if md_entity_id is not None:
mapping_list = query_index_mapping(tenant_id, md_entity_id)
new_list = get_mapping_list(data_list, mapping_list, ids)
if new_list is not None and len(new_list) > 0:
re = exec_index_action(user_id, tenant_id, new_list)
if new_list is not None and len(new_list) > 0 and re is None:
logger.warning(
"insert_index_data,insert nothing,entity_id=[{}],data:{}.".format(md_entity_id, data_list))
return re
def update_index_data(user_id, tenant_id, md_entity_id, data_list, ids):
re = None
new_list = None
if md_entity_id is not None:
mapping_list = query_index_mapping(tenant_id, md_entity_id)
new_list = get_mapping_list(data_list, mapping_list, ids)
if new_list is not None and len(new_list) > 0:
re = exec_index_action(user_id, tenant_id, new_list)
if new_list is not None and len(new_list) > 0 and re is None:
logger.warning(
"update_index_data,update nothing,entity_id=[{}],data:{}.".format(md_entity_id, data_list))
return re
def delete_index_data(user_id, tenant_id, md_entity_id, data_list, ids):
re, new_list = None, None
if md_entity_id is not None:
mapping_list = query_index_mapping(tenant_id, md_entity_id)
new_list = get_mapping_list(md_entity_id, mapping_list, ids, delete_only=True)
if new_list is not None:
re = exec_index_action(user_id, tenant_id, new_list, delete_only=True)
if re is None:
logger.warning(
"delete_index_data,delete nothing,entity_id=[{}],data:{}.".format(md_entity_id, new_list))
return re
if __name__ == '__main__':
# ##insert the index data
user = ur.get_user("test1")
user_id = user.get("user_id")
tenant_id = user.get("tenant_id")
# data_list = []
datas = [{
'test_fields': 'kiki-001',
'test_fields1': 'kiki',
'customer_name': "ABC.<EMAIL>",
"md_entity_id": 30001
}, {
'test_fields': 'goog-001',
'test_fields1': 'gogo',
'customer_name': "HW.com",
"md_entity_id": 30001
}]
md_entity_id = 30001
re = query_index_mapping(tenant_id, md_entity_id)
print(re)
ids = [12421421412, 12421421411]
re = insert_index_data(user_id, tenant_id, md_entity_id, datas, ids)
# re = update_index_data(user_id, tenant_id, md_entity_id, datas, ids)
# re = delete_index_data(user_id, tenant_id, md_entity_id, datas, ids)
```
#### File: src/mdata/metadata_initialize.py
```python
from config.config import cfg as config
from db.db_conn import db_connection_metedata as db_md
from mdata import metadata as md
from privilege import user_mngt as ur
from model import model_graph as mg
logger = config.logger
# 数据表映射关系表名和字段元数据表名
MD_TABLES_NAME = "md_tables"
MD_COLUMNS_NAME = "md_columns"
# 数据表映射关系表名和字段元数据表名
MD_ENTITY_NAME = "md_entities"
MD_FIELDS_NAME = "md_fields"
MD_ENTITY_ID = "md_entity_id"
MD_ENTITY_REL_NAME = "md_entities_rel"
SQL_QUERY_ENTITY_FORMAT = """
SELECT DISTINCT
e.md_entity_id,
e.md_entity_code,
e.md_entity_name,
f.md_fields_id,
f.md_fields_name,
tt.schema_code,
tt.database_id,
tt.md_tables_id,
tt.md_tables_name,
tt.md_tables_desc,
cc.md_columns_id,
cc.md_columns_name
FROM
md_entities e
INNER JOIN md_tables tt ON e.md_tables_id = tt.md_tables_id
INNER JOIN md_columns cc ON cc.md_tables_id = tt.md_tables_id
INNER JOIN md_fields f ON f.md_entity_id = e.md_entity_id
AND f.tenant_id = e.tenant_id
AND f.md_columns_id = cc.md_columns_id
WHERE
(e.tenant_id = %s or e.public_flag='Y')
AND tt.md_tables_name in %s
"""
SQL_QUERY_ENTITY_ALL_REL_FORMAT = """
SELECT DISTINCT r.md_entity_rel_id,r.rel_type,r.md_entity_rel_desc,
r.md_tables_id,r.from_columns_id,to_columns_id,
e.md_entity_id frm_md_entity_id,
e.md_entity_code frm_md_entity_code,
e.md_entity_name frm_md_entity_name,
e.md_entity_desc frm_md_entity_desc,
e.tenant_id frm_tenant_id,
e.public_flag frm_public_flag,
f.md_fields_id frm_md_fields_id,
f.md_fields_name frm_md_fields_name,
e1.md_entity_id to_md_entity_id,
e1.md_entity_code to_md_entity_code,
e1.md_entity_name to_md_entity_name,
e1.md_entity_desc to_md_entity_desc,
e1.tenant_id to_tenant_id,
e1.public_flag to_public_flag,
e1.sys_flag,
f1.md_fields_id to_md_fields_id,
f1.md_fields_name to_md_fields_name
FROM md_entities_rel r
INNER JOIN md_entities e on e.md_entity_id=r.from_entity_id
INNER JOIN md_fields f ON f.md_entity_id = e.md_entity_id
AND f.tenant_id = e.tenant_id and f.md_fields_id=r.from_field_id
INNER JOIN md_entities e1 on e1.md_entity_id=r.to_entity_id
INNER JOIN md_fields f1 ON f1.md_entity_id = e1.md_entity_id
AND f1.tenant_id = e1.tenant_id and f1.md_fields_id=r.to_field_id
WHERE
(e.tenant_id = %s or e.public_flag='Y')
"""
SQL_QUERY_ENTITY_REL_FORMAT = SQL_QUERY_ENTITY_ALL_REL_FORMAT + " AND (e.md_entity_code in %s or e1.md_entity_code in %s)"
SQL_QUERY_ENTITY_FIELDS_COLUMNS_FORMAT = """
SELECT
distinct *
FROM
(
SELECT 'N' blank_flag,
e.md_entity_id,
e.md_entity_code,
e.md_entity_name,
e.md_entity_name_en,
e.md_entity_desc,
e.public_flag entity_public_flag,
f.md_fields_id,
f.md_fields_name,
f.md_fields_name_cn,
f.md_fields_name_en,
f.md_fields_type,
f.md_fields_desc,
f.md_fields_length,
f.md_decimals_length,
f.lookup_flag,
f.lookup_entity,
f.lookup_type,
f.default_value,
f.is_null,
f.is_indexed,
f.is_unique,
f.public_flag,
f.active_flag,
tt.md_tables_id,
tt.md_tables_name,
tt.md_tables_desc,
cc.md_columns_id,
cc.md_columns_name,
cc.md_columns_type,
cc.md_columns_desc,
cc.md_columns_length,
cc.md_dec_length
FROM
md_entities e
INNER JOIN md_tables tt ON e.md_tables_id = tt.md_tables_id
AND tt.active_flag = 'Y'
LEFT JOIN md_fields f ON f.md_entity_id = e.md_entity_id
AND f.tenant_id = e.tenant_id
LEFT JOIN md_columns cc ON cc.md_tables_id = tt.md_tables_id
AND f.md_columns_id = cc.md_columns_id
AND cc.active_flag = 'Y'
WHERE
(e.tenant_id = %s or e.public_flag='Y')
AND e.md_entity_id = %s
AND e.active_flag = 'Y'
UNION
SELECT 'Y' blank_flag,
e.md_entity_id,
e.md_entity_code,
e.md_entity_name,
e.md_entity_name_en,
e.md_entity_desc,
e.public_flag entity_public_flag,
f.md_fields_id,
f.md_fields_name,
f.md_fields_name_cn,
f.md_fields_name_en,
f.md_fields_type,
f.md_fields_desc,
f.md_fields_length,
f.md_decimals_length,
f.lookup_flag,
f.lookup_entity,
f.lookup_type,
f.default_value,
f.is_null,
f.is_indexed,
f.is_unique,
f.public_flag,
f.active_flag,
tt.md_tables_id,
tt.md_tables_name,
tt.md_tables_desc,
cc.md_columns_id,
cc.md_columns_name,
cc.md_columns_type,
cc.md_columns_desc,
cc.md_columns_length,
cc.md_dec_length
FROM
md_tables tt
INNER JOIN md_entities e ON e.md_tables_id = tt.md_tables_id
AND e.active_flag = 'Y'
LEFT JOIN md_columns cc ON cc.md_tables_id = tt.md_tables_id
AND cc.active_flag = 'Y'
LEFT JOIN md_fields f ON f.md_entity_id = e.md_entity_id
AND f.md_columns_id = cc.md_columns_id
WHERE
(e.tenant_id = %s or e.public_flag='Y')
AND e.md_entity_id = %s
AND tt.active_flag = 'Y'
AND f.md_columns_id IS NULL
) aaa
ORDER BY
blank_flag,md_fields_id,md_columns_id
"""
# 查询实体和数据表属性和字段信息
def query_entity_fields_columns(tenant_id, entity_id):
if entity_id is None:
logger.warning("query_entity_fields_columns,entity_id should not be None")
return None
sql = SQL_QUERY_ENTITY_FIELDS_COLUMNS_FORMAT
conn = db_md()
cursor = conn.cursor()
cursor.execute(sql, args=(tenant_id, entity_id, tenant_id, entity_id))
result = cursor.fetchall()
return result
# 查询实体关系信息
def query_entity_rel_by_entity(tenant_id, entity_codes):
if entity_codes is None:
sql = SQL_QUERY_ENTITY_ALL_REL_FORMAT
else:
sql = SQL_QUERY_ENTITY_REL_FORMAT
conn = db_md()
cursor = conn.cursor()
if entity_codes is None:
cursor.execute(sql, args=(tenant_id))
else:
cursor.execute(sql, args=(tenant_id, entity_codes, entity_codes))
result = cursor.fetchall()
return result
def query_entity_by_tenant(tenant_id, table_names):
sql = SQL_QUERY_ENTITY_FORMAT
if table_names is None:
logger.warning("query_all_entity_in_schema,table_names should not be None")
return None
conn = db_md()
cursor = conn.cursor()
cursor.execute(sql, args=(tenant_id, table_names))
result = cursor.fetchall()
return result
def initialize_md_entities_from_tables(user_id, tenant_id, entity_list):
re = None
if entity_list is None or len(entity_list) <= 0:
return None
md_table_ids = []
table_name_list = []
entity_code_list = []
try:
for tabl in entity_list:
entity_code = tabl.get("entity_code")
table_name = tabl.get("table_name")
entity_code_list.append(entity_code)
table_name_list.append(table_name)
tables = md.get_md_tables_by_name(tenant_id, table_name_list)
for item in tables:
record = item.get("md_tables_id")
if md_table_ids.count(record) <= 0:
md_table_ids.append(record)
if tables is None:
return None
columns = md.get_md_columns_multi_table(tenant_id, md_table_ids)
i = 0
res_col = None
for code_ent in entity_code_list:
tab_name = table_name_list[i]
i += 1
for table in tables:
tab_name1 = table.get("md_tables_name")
if not (tab_name1 is not None and tab_name1 == tab_name):
continue
md_tables_id = table.get("md_tables_id")
cols_new = get_columns_by_tables_id(md_tables_id, columns)
is_sys_flag = True
if cols_new is not None and len(cols_new) > 0:
for cl in cols_new:
if cl is not None:
md_entity_id_new = cl.get("md_columns_name")
# 存在MD_ENTITY_ID字段的表就不是系统表,而是租户数据表
if MD_ENTITY_ID == md_entity_id_new:
is_sys_flag = False
break
res = ini_entities(user_id, tenant_id, code_ent, table, is_sys_flag)
if res is None:
logger.warning(
"initialize_md_entities_from_tables,insert entity nothing ,user_id={},table:{}".format(user_id,
table))
continue
ids = res.get("data").get("ids")
entity_id = ids[0]
res_col = ini_fields(user_id, tenant_id, entity_id, cols_new)
return res_col
except Exception as e:
logger.error("initialize_md_entities_from_tables insert error,msg:{}".format(e))
raise e
def get_columns_by_tables_id(table_id, columns):
if columns is None or table_id is None:
return None
cols = []
for col in columns:
md_tables_id = col.get("md_tables_id")
is_cols_null = col.get("is_cols_null")
# 排除必填字段,因为必填字段在创建实体对象时,已经写入。
if md_tables_id == table_id and is_cols_null is not None and is_cols_null != 'N':
cols.append(col)
return cols
def ini_entities(user_id, tenant_id, new_entity_code, table_dict, is_sys_flag):
res = md.get_md_entities_id_by_code([MD_ENTITY_NAME])
if res is None:
logger.warning("ini_entities,entity is None ,name:{}".format(MD_ENTITY_NAME))
return None
md_entity_id = res[0].get("md_entity_id")
entity_list = []
table = table_dict
entity_dict = {}
name = table.get("md_tables_desc")
code = table.get("md_tables_name")
if name is None or len(name.strip()) <= 0:
name = code
if new_entity_code is not None and len(new_entity_code.strip()) > 0:
code = new_entity_code.strip()
if name is not None and len(name) > 100:
name = name.strip()
name = name[:100]
entity_dict["md_entity_name"] = name
entity_dict["md_entity_code"] = code
entity_dict["md_entity_name_en"] = code
entity_dict["md_entity_desc"] = name
entity_dict["tenant_id"] = tenant_id
entity_dict["md_tables_id"] = table.get("md_tables_id")
if is_sys_flag:
entity_dict["sys_flag"] = "Y"
else:
entity_dict["sys_flag"] = "N"
entity_list.append(entity_dict)
re = md.insert_execute(user_id, tenant_id, md_entity_id, entity_list)
logger.info("insert entity,name={}".format(code))
return re
def ini_fields(user_id, tenant_id, obj_id, columns):
res = md.get_md_entities_id_by_code([MD_FIELDS_NAME])
if res is None:
logger.warning("ini_fields,entity is None ,name:{}".format(MD_FIELDS_NAME))
return None
if columns is None or len(columns) <= 0:
logger.warning("ini_fields,columns is None ")
return None
md_entity_id = res[0].get("md_entity_id")
fields_list = []
for col in columns:
field_dict = {}
field_dict["md_entity_id"] = obj_id
field_dict["md_columns_id"] = col.get("md_columns_id")
field_dict["md_fields_name"] = col.get("md_columns_name")
field_dict["md_fields_name_en"] = col.get("md_columns_name")
type1 = col.get("md_columns_type")
field_dict["md_fields_type"] = field_type_mapping(type1, col.get("md_dec_length"))
field_dict["md_fields_length"] = col.get("md_columns_length")
field_dict["md_decimals_length"] = col.get("md_dec_length")
field_dict["is_null"] = col.get("is_cols_null")
field_dict["md_fields_desc"] = col.get("md_columns_desc")
field_dict["is_unique"] = "N"
field_dict["is_indexed"] = "N"
field_dict["tenant_id"] = col.get("tenant_id")
fields_list.append(field_dict)
re = md.insert_execute(user_id, tenant_id, md_entity_id, fields_list)
return re
def field_type_mapping(type, deci):
new_type = None
if (type is not None):
if type.lower() == 'number' or type.lower() == 'smallint' or type.lower() == 'integer' \
or type.lower() == 'numeric' or type.lower() == 'real' or type.lower() == 'money':
if deci is not None and int(deci) > 0:
new_type = 'decimal'
else:
new_type = 'bigint'
elif type.lower() == 'date' or type.lower() == 'datetime':
new_type = 'timestamp'
elif type.lower() == 'bit' or type.lower() == 'boolean':
new_type = 'char'
else:
new_type = type.lower()
return new_type
def insert_metadata_table(user_id, tenant_id, tables):
# 数据表md_tables的元数据实体ID
# entity_id = 30018
rr = md.get_md_entities_id_by_code([MD_TABLES_NAME])
re = None
if rr is not None and len(rr) > 0:
entity_id = rr[0].get("md_entity_id")
if entity_id is not None:
re = md.insert_execute(user_id, tenant_id, entity_id, tables)
if re is None:
logger.warning("insert_metadata_table,get_md_entities_by_code is Null,tables=[{}].".format(MD_TABLES_NAME))
return re
def insert_metadata_columns(user_id, tenant_id, columns):
# 数据字段表md_columns的元数据实体ID
# entity_id = 30014
rr = md.get_md_entities_id_by_code([MD_COLUMNS_NAME])
re = None
if rr is not None and len(rr) > 0:
entity_id = rr[0].get("md_entity_id")
if entity_id is not None:
re = md.insert_execute(user_id, tenant_id, entity_id, columns)
if re is None:
logger.warning("insert_metadata_columns,get_md_entities_by_code is Null,columns=[{}].".format(MD_COLUMNS_NAME))
return re
def insert_metadata_entities_rel(user_id, tenant_id, entities_rel):
rr = md.get_md_entities_id_by_code([MD_ENTITY_REL_NAME])
re = None
if rr is not None and len(rr) > 0:
entity_id = rr[0].get("md_entity_id")
if entity_id is not None:
re = md.insert_execute(user_id, tenant_id, entity_id, entities_rel)
if re is None:
logger.warning(
"insert_metadata_entities_rel,get_md_entities_by_code is Null,rel_table=[{}].".format(MD_ENTITY_REL_NAME))
return re
def graph_data_mapping(entiies_rel_list):
list_entity = []
list_rel = []
if entiies_rel_list is not None:
for item in entiies_rel_list:
rel_id = item.get("md_entity_rel_id")
rel_type = item.get("rel_type")
rel_desc = item.get("md_entity_rel_desc")
frm_md_entity_id = item.get("frm_md_entity_id")
frm_md_entity_code = item.get("frm_md_entity_code")
frm_md_entity_name = item.get("frm_md_entity_name")
frm_md_entity_desc = item.get("frm_md_entity_desc")
frm_md_fields_id = item.get("frm_md_fields_id")
frm_md_fields_name = item.get("frm_md_fields_name")
frm_tenant_id = item.get("frm_tenant_id")
frm_public_flag = item.get("frm_public_flag")
to_md_entity_id = item.get("to_md_entity_id")
to_md_entity_code = item.get("to_md_entity_code")
to_md_entity_name = item.get("to_md_entity_name")
to_md_entity_desc = item.get("to_md_entity_desc")
to_md_fields_id = item.get("to_md_fields_id")
to_md_fields_name = item.get("to_md_fields_name")
to_tenant_id = item.get("to_tenant_id")
to_public_flag = item.get("to_public_flag")
rel_dict = {}
rel_dict["label"] = rel_type
rel_dict["name"] = rel_desc
rel_dict["relation_id"] = rel_id
rel_dict["relation_desc"] = rel_desc
rel_dict["relation_type"] = rel_type
rel_dict["from_entity_id"] = frm_md_entity_id
rel_dict["from_entity_name"] = frm_md_entity_name
rel_dict["from_entity_code"] = frm_md_entity_code
rel_dict["from_fields_id"] = frm_md_fields_id
rel_dict["from_fields_name"] = frm_md_fields_name
rel_dict["to_entity_id"] = to_md_entity_id
rel_dict["to_entity_name"] = to_md_entity_name
rel_dict["to_entity_code"] = to_md_entity_code
rel_dict["to_fields_id"] = to_md_fields_id
rel_dict["to_fields_name"] = to_md_fields_name
list_rel.append(rel_dict)
entity_dict = {}
entity_dict["label"] = frm_md_entity_code
entity_dict["name"] = frm_md_entity_name
entity_dict["tenant_id"] = frm_tenant_id
entity_dict["public_flag"] = frm_public_flag
entity_dict["entity_id"] = frm_md_entity_id
entity_dict["entity_code"] = frm_md_entity_code
entity_dict["entity_name"] = frm_md_entity_name
entity_dict["entity_desc"] = frm_md_entity_desc
if not is_entity_in_list("entity_id", frm_md_entity_id, list_entity):
list_entity.append(entity_dict)
entity_dict = {}
entity_dict["label"] = to_md_entity_code
entity_dict["name"] = to_md_entity_name
entity_dict["tenant_id"] = to_tenant_id
entity_dict["public_flag"] = to_public_flag
entity_dict["entity_id"] = to_md_entity_id
entity_dict["entity_code"] = to_md_entity_code
entity_dict["entity_name"] = to_md_entity_name
entity_dict["entity_desc"] = to_md_entity_desc
if not is_entity_in_list("entity_id", to_md_entity_id, list_entity):
list_entity.append(entity_dict)
return list_entity, list_rel
def is_entity_in_list(key, value, entity_list):
flag = False
if key is None:
return True
if entity_list is None or len(entity_list) <= 0:
return flag
for item in entity_list:
if item.get(key) == value:
flag = True
break
return flag
# 初始化数据模型关系模型图数据库
def ini_entity_model_graph(tenant_id, entity_codes, entity_catagory, schema):
# entity_codes = ["md_entities"]
result = query_entity_rel_by_entity(tenant_id, entity_codes)
entitie_model_list, rel_list = graph_data_mapping(result)
mg.create_object_from_metadata(entitie_model_list, entity_catagory, schema)
mg.create_object_rel_from_metadata(rel_list)
return (entitie_model_list, rel_list)
if __name__ == '__main__':
# [{"entity_code":"table_name"}]
entity_list = [{"entity_code": "Contract", "table_name": "data_t"}, {"entity_code": "BoQ", "table_name": "data_t"}]
user = ur.get_user("test1")
user_id = user.get("user_id")
tenant_id = user.get("tenant_id")
# 从数据库反向工程,初始化表和字段元数据
# re = initialize_md_entities_from_tables(user_id, tenant_id, entity_list)
tables = ["t001"]
# 查询指定租户的所有实体和对应主键。
# re = query_entity_by_tenant(tenant_id, tables)
# logger.info("query all tables ,re={}".format(re))
# entity_codes = ["md_entities"]
# re = query_entity_rel_by_entity(tenant_id, entity_codes)
# logger.info("query_entity_rel_by_entity ,re={}".format(re))
entity_codes = ["Part", "BOM"]
entity_catagory = "DEMO"
schema = "Test"
# Noe4j模型关系初始化
# entity_codes = None
# re = ini_entity_model_graph(tenant_id, entity_codes, entity_catagory, schema)
# logger.info("ini_entity_model_graph ,re={}".format(re))
```
#### File: metedata_fusion_tools/test/test_lookup.py
```python
from data import lookup
from privilege import user_mngt as ur
def test_lookup():
# ##insert the lookup data
user = ur.get_user("test1")
user_id = user.get("user_id")
tenant_id = user.get("tenant_id")
data_list = []
data = {}
md_entity_id = 30001
md_field_id = 40005
data_id = 800001
data["data_id"] = data_id
data["md_entity_id"] = md_entity_id
data["lookup_classify_id"] = 123
data["lookup_key"] = md_field_id
data["lookup_value"] = "bbb"
data_list.append(data)
data = {}
data["data_id"] = data_id
data["md_entity_id"] = md_entity_id
data["lookup_classify_id"] = 123
data["lookup_key"] = md_field_id
data["lookup_value"] = "aaa"
data_list.append(data)
re= lookup.insert_lookup_data(user_id, tenant_id, data_list)
assert re is not None
re = lookup.update_lookup_data(user_id, tenant_id, md_entity_id, md_field_id, data_id, data_list)
assert re is not None
re = lookup.delete_lookup_data(user_id, tenant_id, md_entity_id, md_field_id, data_id)
assert re is not None
# ##query the lookup data
where_dict = {"md_entity_id": 30041, "lookup_key": 40005}
re = lookup.query_lookup_data(user_id, tenant_id, where_dict)
assert re is not None
test_lookup()
```
#### File: metedata_fusion_tools/test/test_privilege.py
```python
from mdata import metadata as md
from privilege import role_privilege as rp, data_privilege as dp, user_mngt as ur
from common import constants as const
def test_data_privilege():
user_account = "test1"
user = ur.get_user(user_account)
user_id = user.get("user_id")
tenant_id = user.get("tenant_id")
## Entity
md_entity_id = 30001
re = dp.query_data_privilege_info(tenant_id, user_id, md_entity_id, const.ENTITY_TYPE_ENTITY)
assert re is not None
## View
md_entity_id = 50001
re = dp.query_data_privilege_info(tenant_id, user_id, md_entity_id, const.ENTITY_TYPE_VIEW)
assert re is not None or re is None
def test_role_privilege():
user_account = "admin"
user = ur.get_user(user_account)
user_id = user.get("user_id")
tenant_id = user.get("tenant_id")
re = md.get_md_entities_id_by_code(["entity_privileges"])
if re is not None and len(re) > 0:
insert_entity_id = re[0].get("md_entity_id")
else:
# entity_privileges实体表元数据实体ID
insert_entity_id = 30029
# #1.实体权限导入
md_entity_ids = [30015]
re = rp.insert_entity_privilege(user_id, tenant_id, const.ENTITY_TYPE_ENTITY, md_entity_ids)
assert re is not None
# #2.视图权限写入
view_ids = [50002]
re = rp.insert_entity_privilege(user_id, tenant_id, const.ENTITY_TYPE_VIEW, view_ids)
assert re is not None
# re = query_user_priv_by_user_account(user_account)
re = rp.query_user_privilege_by_userid(tenant_id, user_id)
assert re is not None
test_data_privilege()
test_role_privilege()
``` |
{
"source": "joeirigoyen/MultiAgent-Projects",
"score": 3
} |
#### File: ActividadIntegradora/Entregables/robot_server.py
```python
__author__ = "<NAME>"
from typing import Any
from flask import Flask, request, jsonify
from robot_agents import *
from robot_model import *
# Initial variables
agents = 5
width = 20
height = 20
boxes = 25
step = 0
depot_x = 2
depot_y = 2
max_steps = 0
robot_model = None
# App initialization
app = Flask("Robot example")
# Sort a list using a tuple's first element
def get_first_in(elem: tuple) -> Any:
return elem[0]
# Return a list with the second element of each tuple in a given list
def get_second_elems_from(arr: list) -> list[Any]:
return [elem[1] for elem in arr]
# Route to initialize model
@app.route("/init", methods=["POST", "GET"])
def init_model():
global agents, width, height, boxes, robot_model, step, depot_x, depot_y, max_steps
if request.method == "POST":
# Define model's initial variables
agents = int(request.form.get("agents"))
width = int(request.form.get("width"))
height = int(request.form.get("height"))
boxes = int(request.form.get("boxes"))
depot_x = int(request.form.get("depot_x"))
depot_y = int(request.form.get("depot_y"))
max_steps = int(request.form.get("max_steps"))
step = 0
# Initialize model
print(agents, boxes, width, height, depot_x, depot_y, max_steps)
print(request.form)
robot_model = RobotModel(agents, width, height, boxes, depot_x,
depot_y, max_steps)
# Return JSON message if post method was OK
return jsonify({"message": "accepted"})
# Get trajectories from robots
@app.route("/getRobotPath", methods=["GET"])
def get_robot_path():
global robot_model
robot_position_tuples, robot_positions = [], []
if request.method == "GET":
for (a, x, z) in robot_model.grid.coord_iter():
if len(a) > 0:
for agent in a:
if agent.type_id == agt.ROBOT:
print("Added robot to list")
robot_position_tuples.append((agent.unique_id, {
"x": x,
"y": 0,
"z": z
}))
robot_position_tuples.sort(key=get_first_in)
robot_positions = get_second_elems_from(robot_position_tuples)
print(robot_position_tuples)
return jsonify({"positions": robot_positions})
# Get trajectories from boxes
@app.route("/getBoxPath", methods=["GET"])
def get_box_path():
global robot_model
box_position_tuples, box_positions = [], []
if request.method == "GET":
for (a, x, z) in robot_model.grid.coord_iter():
if len(a) > 0:
for agent in a:
if agent.type_id == agt.BOX:
print("Added box to list")
y = agent.y_pos
box_position_tuples.append((agent.unique_id, {
"x": x,
"y": y,
"z": z
}))
box_position_tuples.sort(key=get_first_in)
print(box_position_tuples)
box_positions = get_second_elems_from(box_position_tuples)
return jsonify({"positions": box_positions})
# Get trajectories from depots
@app.route("/getDepotPath", methods=["GET"])
def get_depot_path():
global robot_model
depot_positions = []
if request.method == "GET":
for (a, x, z) in robot_model.grid.coord_iter():
if len(a) > 0:
for agent in a:
if agent.type_id == agt.DEPOT:
print("Added depot to list")
depot_positions.append({"x": x, "y": 0, "z": z})
print(depot_positions)
return jsonify({"positions": depot_positions})
# Make model advance one step further
@app.route("/step", methods=["GET"])
def update_model():
global robot_model, step
if request.method == "GET":
robot_model.step()
step += 1
return jsonify({"step": step})
if __name__ == "__main__":
app.run(host="localhost", port=8585, debug=True)
```
#### File: FinalProject/Backend/astar.py
```python
from mesa.model import Model
from grid_manager import NodeTypes
from grid_manager import Node, h
from queue import PriorityQueue
def get_nodes_in_path(came_from: dict, current: Node):
path = []
while current in came_from:
path.append((current.row, current.col))
current = came_from[current]
path.reverse()
return path
def get_shortest_path(grid: list, start: Node, end: Node, model: Model) -> list:
# Open destination Node momentarily so the algorithm can detect it
model.standard_map[end.row][end.col].state = NodeTypes.END
model.standard_map[end.row][end.col].update_adj()
# Initialize counter, queue and set
count = 0
open_set = PriorityQueue()
open_set.put((0, count, start))
came_from = {}
# Initialize each cell's f score and g score
g_score = {node: float("inf") for row in grid for node in row}
f_score = {node: float("inf") for row in grid for node in row}
# Declare start node's scores
g_score[start] = 0
f_score[start] = h(start, end)
# Returns nodes in priority queue
open_set_hash = {start}
# Run until the open set is empty
while not open_set.empty():
# Current node will be the start node
current = open_set.get()[2]
# Remove current node from the open set hash
open_set_hash.remove(current)
# Check if current node is already the destination
if current == end:
model.standard_map[end.row][end.col].state = NodeTypes.OBSTACLE
model.standard_map[end.row][end.col].update_adj()
return get_nodes_in_path(came_from, current)
# Check the neighbors of the current node and add a temporary g score
for neighbor in current.neighbors:
# Check each neighbor's g score and look for the smallest one
temp_g = g_score[current] + 1
if temp_g < g_score[neighbor]:
# Tell program that the current path comes from the current node
came_from[neighbor] = current
# Set the neighbor's g score the new g score
g_score[neighbor] = temp_g
f_score[neighbor] = temp_g + h(neighbor, end)
# If neighbor has not been visited, change it's state and add it to the priority queue
if neighbor not in open_set_hash:
count += 1
open_set.put((f_score[neighbor], count, neighbor))
open_set_hash.add(neighbor)
neighbor.state = NodeTypes.VISITED
if current != start:
current.state = NodeTypes.CLOSED
model.standard_map[end.row][end.col].state = NodeTypes.OBSTACLE
model.standard_map[end.row][end.col].update_adj()
return []
```
#### File: FinalProject/Backend/grid_manager.py
```python
from enum import Enum, auto
from mesa import Model
from directions import Directions as dirs
class NodeTypes(Enum):
START = auto()
END = auto()
OBSTACLE = auto()
CLOSED = auto()
UNVISITED = auto()
VISITED = auto()
class Node:
def __init__(self, col: int, row: int, model: Model) -> None:
self.col = col
self.row = row
self.state = NodeTypes.UNVISITED
self.direction = None
self.model = model
self.neighbors = []
# Update the adjacent nodes of a node
def update_adj(self) -> None:
# Update every adjacent node
self.model.standard_map[self.row][self.col + 1].update_neighbors()
self.model.standard_map[self.row][self.col - 1].update_neighbors()
self.model.standard_map[self.row + 1][self.col].update_neighbors()
self.model.standard_map[self.row - 1][self.col].update_neighbors()
# Update the neighbor list of a node
def update_neighbors(self) -> None:
self.neighbors.clear()
if self.col < self.model.cols - 1 and not self.model.standard_map[self.row][self.col + 1].state == NodeTypes.OBSTACLE:
if self.model.standard_map[self.row][self.col].direction != dirs.LEFT:
if not (self.model.standard_map[self.row][self.col].direction == dirs.UP and self.model.standard_map[self.row][self.col + 1].direction == dirs.LEFT):
self.neighbors.append(self.model.standard_map[self.row][self.col + 1])
if self.col > 0 and not self.model.standard_map[self.row][self.col - 1].state == NodeTypes.OBSTACLE:
if self.model.standard_map[self.row][self.col].direction != dirs.RIGHT:
if not (self.model.standard_map[self.row][self.col].direction == dirs.DOWN and self.model.standard_map[self.row][self.col - 1].direction == dirs.RIGHT):
self.neighbors.append(self.model.standard_map[self.row][self.col - 1])
if self.row < self.model.rows - 1 and not self.model.standard_map[self.row + 1][self.col].state == NodeTypes.OBSTACLE:
if self.model.standard_map[self.row][self.col].direction != dirs.UP:
if not (self.model.standard_map[self.row][self.col].direction == dirs.RIGHT and self.model.standard_map[self.row + 1][self.col].direction == dirs.UP) and not (self.model.standard_map[self.row][self.col].direction == dirs.LEFT and self.model.standard_map[self.row + 1][self.col].direction == dirs.UP):
self.neighbors.append(self.model.standard_map[self.row + 1][self.col])
if self.row > 0 and not self.model.standard_map[self.row - 1][self.col].state == NodeTypes.OBSTACLE:
if self.model.standard_map[self.row][self.col].direction != dirs.DOWN:
if not (self.model.standard_map[self.row][self.col].direction == dirs.RIGHT and self.model.standard_map[self.row - 1][self.col].direction == dirs.DOWN) and not (self.model.standard_map[self.row][self.col].direction == dirs.LEFT and self.model.standard_map[self.row - 1][self.col].direction == dirs.DOWN):
self.neighbors.append(self.model.standard_map[self.row - 1][self.col])
# Update every node's neighbor list
def init_neighborhood(grid: list) -> None:
for i in range(len(grid)):
for j in range(len(grid[i])):
grid[i][j].update_neighbors()
# Create a grid with empty nodes
def make_grid(rows: int, cols: int, model: Model) -> list:
grid = []
for row in range(rows):
temp = []
for col in range(cols):
node = Node(col, row, model)
temp.append(node)
grid.append(temp)
return grid
# Get Manhattan distance from one node to another
def h(n1: Node, n2: Node):
return abs(n1.col - n2.col) + abs(n1.row - n2.row)
```
#### File: FinalProject/Backend/traffic_agents.py
```python
from mesa import Agent, Model
from directions import Directions
from agent_types import AgentTypes as agt
from directions import Directions as dirs
from grid_manager import NodeTypes
from astar import *
# Represents a building, not much will be happening with this agent since it's not even going to be added to the scheduler
class Building(Agent):
def __init__(self, unique_id: int, model: Model) -> None:
super().__init__(unique_id, model)
self.type_id = agt.BUILDING # Set the type of agent as Building
# Represents a traffic light, has a direction since it's part of the road, and a state for the cars to check and decide if they will advance
class Light(Agent):
def __init__(self, unique_id: int, model: Model, direction: Directions) -> None:
super().__init__(unique_id, model)
self.type_id = agt.LIGHT # Agent type
self.direction = direction # Direction of the road
self.state = False if direction == dirs.UP or direction == dirs.DOWN else True # Initial state depending on it's direction
# Represents the place where a car will try to get to
class Destination(Agent):
def __init__(self, unique_id: int, model: Model) -> None:
super().__init__(unique_id, model)
self.type_id = agt.DESTINATION # Agent type
self.occupied = False # Determines if a destination has a car inside it (obsolete after implementation of new car spawns)
# Represents a car within the model
class Car(Agent):
def __init__(self, unique_id: int, model: Model, start_pos: tuple) -> None:
super().__init__(unique_id, model)
# Set initial attributes
self.type_id = agt.CAR # Agent type
self.destination = self.random.choice(self.model.destinations) # Random destination from the model's destinations list
self.has_arrived = False # Contains if the car has arrived or not
# Set start and end in map
self.map = model.standard_map # A copy of the model's graph
self.map[start_pos[0]][start_pos[1]].state = NodeTypes.START
self.map[self.destination.pos[0]][self.destination.pos[1]].state = NodeTypes.END
# Find path to destination
self.path = get_shortest_path(self.map, self.map[start_pos[0]][start_pos[1]], self.map[self.destination.pos[0]][self.destination.pos[1]], model)
self.next_cell = None
self.last_dir = self.map[start_pos[0]][start_pos[1]].direction
self.turn_dir = None
self.main_av = False
# Check if destination cell is within the neighborhood
def check_destination(self, neighborhood: list) -> tuple:
# Check every cell in neighborhood
for cell in neighborhood:
# Check the content in every cell
content = self.model.grid.get_cell_list_contents(cell)
# Check every agent within the cell's content
for agent in content:
# Set next position as the destination cell
if agent.type_id == agt.DESTINATION:
return agent.pos
# If nothing was found, don't return anything
return None
# Return whether the car is on a main avenue or not
def is_in_main_av(self) -> bool:
# If car is in the first two columns or in the last two columns, return True
if (self.pos[0] >= 0 and self.pos[0] < 2) and (self.pos[0] >= self.model.cols - 2 and self.pos[0] < self.model.cols):
return True
# If car is in the first two rows or in the last two rows, return True
elif (self.pos[1] >= 0 and self.pos[1] < 2) and (self.pos[1] >= self.model.rows - 2 and self.pos[1] < self.model.rows):
return True
# Otherwise, return False
else:
return False
# Get the direction of the car's next move
def get_turn_dir(self) -> tuple:
return (self.next_cell[0] - self.pos[0], self.next_cell[1] - self.pos[1])
# Check if a car will let other car go first in a cell
def give_priority(self, other: Agent) -> bool:
# If the car is in the main avenue but the other isn't, don't give priority
if self.main_av and not other.main_av:
return False
# If this car is not in the main avenue but the other is, give priority
elif not self.main_av and other.main_av:
return True
# If this car is going straight and the other car is going to turn
elif self.turn_dir == self.last_dir and other.turn_dir != other.last_dir:
return False
# If both cars are going to turn
elif other.turn_dir != other.last_dir and self.turn_dir != self.last_dir:
return True
# If both cars are going straight
else:
return True
# Check if next cell is being targeted by other agents and return if car can go to it
def can_get_to_next_cell(self) -> bool:
# If next cell is not the car's destination, check if it can move towards it
if self.next_cell != self.destination.pos:
# If next cell is empty:
if self.model.grid.is_cell_empty(self.next_cell):
# Get next cell's neighbors
next_neighbors = self.model.grid.get_neighborhood(self.next_cell, moore=False, include_center=False)
# Remove self position from the neighborhood
if self.pos in next_neighbors:
next_neighbors.remove(self.pos)
# Check each cell within the neighborhood
for cell in next_neighbors:
# Check each cell's contents
for agent in self.model.grid.get_cell_list_contents(cell):
# If there's a car in that cell, check if it's going to this cell too
if agent.type_id == agt.CAR:
if not agent.has_arrived:
# If they're going to the same cell, check if this car will let the other go first
if agent.next_cell == self.next_cell:
return not self.give_priority(agent)
# If no agents were found in that cell, let the car advance
return True
# Otherwise, check if the contents of the next cell contain a car
else:
contents = self.model.grid.get_cell_list_contents(self.next_cell)
for agent in contents:
# If agent is a car, don't let it advance
if agent.type_id == agt.CAR:
return False
# If no car was found, let the car advance
return True
# Else, just return True since it can move no matter what
else:
return True
# Check if there are cars or lights in front of the car
def has_green_light(self) -> bool:
# If there's a light in the next cell, check if it's a light
content = self.model.grid.get_cell_list_contents(self.next_cell)
for agent in content:
# If agent is a light, return it's state
if agent.type_id == agt.LIGHT:
return agent.state
# If there is no light, advance to next cell
return True
# Try to move to the next cell
def move_next(self) -> None:
# Check if any other car is directed to the next cell
if self.can_get_to_next_cell():
# If there's a green light, advance to it
if self.has_green_light():
# Remove waypoint from path
self.path.pop(0)
# Move agent towards the next cell
self.model.grid.move_agent(self, self.next_cell)
# Return a boolean value representing if two cars are in the same position
def check_crashes(self) -> bool:
# Get contents from current cell
cell_content = self.model.grid.get_cell_list_contents(self.pos)
# Remove self from content
cell_content.remove(self)
# Check if there's content in the cell other than self
if len(cell_content) > 0:
# Check every agent that is a car
for agent in cell_content:
if agent.type_id == agt.CAR:
# If the car's position is already in the car's definition, return false
if agent.pos == agent.destination.pos:
return False
# Otherwise, return true
else:
print(f"Crash in {self.pos}")
return True
return False
# Otherwise return false
else:
return False
def step(self) -> None:
# If car hasn't arrived to it's destination
if not self.has_arrived:
# Set car's current direction
self.last_dir = self.map[self.pos[0]][self.pos[1]].direction
# If path still has remaining cells, assign the first one to the car's next cell
if len(self.path) > 0:
# Remove the next cell from the path
self.next_cell = self.path[0]
self.turn_dir = self.get_turn_dir()
self.main_av = self.is_in_main_av()
self.turn_dir = self.map[self.next_cell[0]][self.next_cell[1]].direction
# Try to move to the next cell
self.move_next()
# Check if car is in the same position as another car
self.model.running = not self.check_crashes()
# Otherwise, set the car's has_arrived attribute to True
else:
self.model.arrivals += 1
self.has_arrived = True
```
#### File: FinalProject/Backend/traffic_visualizer.py
```python
from mesa import Agent
from mesa.visualization.modules import CanvasGrid
from mesa.visualization.ModularVisualization import ModularServer
from agent_types import AgentTypes as agt
from traffic_model import TrafficModel
MAX_STEPS = 1000
CARS = 20
MODEL = TrafficModel(CARS, MAX_STEPS)
GRID_WIDTH = 500
GRID_HEIGHT = 500
def agent_portrayal(agent: Agent) -> dict[str, str]:
portrayal = {"Filled": "true",
"Shape": "circle"}
if agent.type_id == agt.CAR:
portrayal["Layer"] = 2
portrayal["r"] = 0.5
portrayal["Color"] = '#000000'
if agent.type_id == agt.BUILDING:
portrayal["Layer"] = 0
portrayal["r"] = 1.1
portrayal["Color"] = "#192b36"
if agent.type_id == agt.DESTINATION:
portrayal["Layer"] = 1
portrayal["r"] = 1
portrayal["Color"] = "#b600d6"
if agent.type_id == agt.LIGHT:
portrayal["Layer"] = 0
portrayal["r"] = 0.8
if agent.state:
portrayal["Color"] = "#00d62e"
else:
portrayal["Color"] = "#c20000"
return portrayal
if __name__ == "__main__":
grid = CanvasGrid(agent_portrayal, MODEL.cols, MODEL.rows, GRID_WIDTH, GRID_HEIGHT)
server = ModularServer(TrafficModel, [grid], "Traffic Model", {"cars": CARS, "max_steps": MAX_STEPS})
server.port = 8521
server.launch()
``` |
{
"source": "joeirigoyen/PathfindingVisualizer",
"score": 2
} |
#### File: PathfindingVisualizer/app/main.py
```python
from flask import *
app = Flask("Pathfinding Algorithms Visualizer")
@app.route('/')
def main():
return render_template('index.html')
``` |
{
"source": "joeirigoyen/PythonLexicon",
"score": 3
} |
#### File: PythonLexicon/source/reader.py
```python
import time
import os
import os.path
import re
import html_writer as hw
import pattern_finder as pf
# Directories
curr_state = 0
file_directory = ""
source_directory = os.path.abspath("C:\\Users\\Joe\\Documents\\TEC\\Materias\\4to\\IMC\\PythonHighlighter\\PythonLexicon\\source\\source.txt")
# Read full file by char
def read_file():
global curr_state
# Open file
f = open(source_directory, "r")
# Get lines
lines = f.readlines()
# Read lines by character
for line in lines:
# Add html paragraph
hw.write_to_file("\t<p>")
# Set state to 0
curr_state = 0
# Find regexps in line
# Comments
line = pf.find_matches(line, pf.comment_token, "comment")
# Decorators
line = pf.find_matches(line, pf.decorator_token, "decorator")
# Arguments
line = pf.find_matches(line, pf.arg_token, "argument")
# Find tabs
line = pf.find_matches(line, pf.tab_token, "tab")
# Find function calls
line = pf.find_matches(line, pf.function_token, "function")
# Find numbers
line = pf.find_matches(line, pf.number_token, "number")
# Find special elements
for word in line.split():
# Keywords
if word.replace("</span>", "") in pf.keywords:
line = pf.find_matches(line, word.replace("</span>", ""), "keyword")
# Strings
if "class=" not in word:
if "\"" in word or "\'" in word:
line = re.sub(word, "<span class=\"string\">" + word + "</span>", line)
# End html paragraph
if line != None:
hw.write_to_file(line)
hw.write_to_file("</p>\n\t\t\t")
# Run full program
if __name__ == '__main__':
init_time = time.time()
hw.init_file("test.html")
pf.assign_regex()
read_file()
hw.end_file()
finish_time = time.time() - init_time
print("Runtime: " + str(finish_time))
``` |
{
"source": "joeirimpan/git_kloc",
"score": 2
} |
#### File: git_kloc/user/views.py
```python
import os
from flask import (
request, session, Blueprint, render_template, jsonify,
url_for, redirect
)
from flask_login import login_user, logout_user, current_user
from requests_oauthlib import OAuth2Session
from .models import User
from ..extensions import login_manager
blueprint = Blueprint(
'user', __name__, static_folder='../static'
)
GITHUB_OAUTH_URL = 'https://github.com/login/oauth/%s'
@login_manager.user_loader
def user_loader(user_id):
if 'token' not in session:
return
return User(token=session['token'])
@blueprint.route('/')
def index():
"""Home page
"""
return render_template('index.html')
@blueprint.route('/is_logged_in')
def is_logged_in():
return jsonify(
isLoggedIn=current_user.is_authenticated
)
@blueprint.route('/login')
def login():
"""Login handler
"""
github = OAuth2Session(os.environ['CLIENT_ID'])
authorization_url, state = github.authorization_url(
GITHUB_OAUTH_URL % 'authorize'
)
session['oauth_state'] = state
return jsonify(auth_url=authorization_url)
@blueprint.route('/authorize')
def authorize():
"""Oauth2 callback route
"""
github = OAuth2Session(
os.environ['CLIENT_ID'],
state=session['oauth_state']
)
token = github.fetch_token(
GITHUB_OAUTH_URL % 'access_token',
client_secret=os.environ['CLIENT_SECRET'],
authorization_response=request.url
)
github.token = token
user = User(token=token)
session['token'] = token
login_user(user)
next_url = request.args.get('next') or url_for('user.index')
return redirect(next_url)
@blueprint.route('/logout')
def logout():
"""Logout handler
"""
session.clear()
logout_user()
return redirect(url_for('user.index'))
``` |
{
"source": "joeirimpan/nse_scraper",
"score": 2
} |
#### File: nse_scraper/nse_scraper/app.py
```python
import json
import cherrypy
from cherrypy.process.plugins import BackgroundTask
from .nse_api import NSE
from .extensions import redis_store
from .utils import fp
class App(object):
"""Cherrypy Application
"""
def __init__(self, *args, **kwargs):
self.nse = NSE()
# Start the cherrypy background cron-like task
BackgroundTask(
interval=5 * 60,
function=self.store_now,
bus=cherrypy.engine
).start()
@property
def stock_data_key(self):
"""Return the stock data key
"""
return 'nse:stock_info'
def store_now(self):
"""Persist data onto redis
"""
data = {}
for type in ['gainers', 'losers']:
# Serialize list of dict to string
data[type] = json.dumps(self.nse.get_stocks_data(type))
# Store as hashmap datastructure to improve memory usage
redis_store.hmset(self.stock_data_key, data)
@cherrypy.expose
def index(self):
return file(fp('templates/index.html'))
@cherrypy.expose(['stocks_info.json'])
@cherrypy.tools.json_out()
def get_stocks_info(self):
"""JSON endpoint which returns the stocks info
"""
data = redis_store.hgetall(self.stock_data_key)
# De-serialiaze the values
for key, value in data.iteritems():
data[key] = json.loads(value)
return data
``` |
{
"source": "joeirimpan/pyosticket",
"score": 3
} |
#### File: pyosticket/pyosticket/pyosticket.py
```python
import atexit
import requests
__all__ = ['OSTicketAPI', 'TicketModel']
class OSTicketAPI(object):
def __init__(self, url, api_key):
self.url = url
self.session = requests.Session()
atexit.register(self.session.close)
self.session.headers["X-API-Key"] = api_key
@property
def ticket(self):
return Ticket(connection=self)
class TicketModel(object):
__slots__ = [
"source", "name", "email", "ip",
"subject", "topic_id", "attachments"
]
def __init__(self, name, email, subject, topic_id, attachments, **kwargs):
self.source = kwargs.get("source", "API")
self.name = name
self.email = email
self.ip = kwargs.get("ip", "")
self.subject = subject
self.topic_id = topic_id
self.attachments = attachments
def to_dict(self):
return {
"source": self.source,
"name": self.name,
"email": self.email,
"ip": self.ip,
"subject": self.subject,
"topicId": self.topic_id,
"message": self.message,
"attachments": self.attachments
}
class Ticket(object):
def __init__(self, connection):
self.connection = connection
@property
def url(self):
return self.connection.url
def _raise_or_return_json(self, response):
"""Raise HTTPError before converting response to json
:param response: Request response object
"""
response.raise_for_status()
try:
json_value = response.json()
except ValueError:
return response.content, response.status_code
else:
return json_value, response.status_code
def create(self, ticket):
"""Create a ticket
:param ticket: An instance of `TicketModel`
"""
response = self.connection.session.post(
"%s/api/tickets.json" % self.url,
json=ticket.to_dict(),
)
return self._raise_or_return_json(response)
def all(self, email):
"""XXX: Custom API to fetch all tickets for this email
:param email: Email of the user
"""
response = self.connection.session.get(
"%s/api/tickets/all" % self.url,
params={"email": email},
)
return self._raise_or_return_json(response)
def get(self, ticket_number, email):
"""XXX: Custom API to fetch a specific ticket
:param ticket_number: Ticket number
:param email: Email of the user
"""
response = self.connection.session.get(
"%s/api/tickets/%s" % (self.url, ticket_number),
params={"email": email},
)
return self._raise_or_return_json(response)
``` |
{
"source": "joeirimpan/pyparallel",
"score": 2
} |
#### File: pyparallel/pyparallel/cli.py
```python
import click
from pyparallel import Downloader
@click.command()
@click.option('--url', required=True, help='Download URL')
@click.option('--conns', required=True, help='Number of parts')
@click.option('--filename', required=True, help='Output filename')
def cli(url, conns, filename):
downloader = Downloader(
url=url,
conns=int(conns),
filename=filename
)
downloader.start()
``` |
{
"source": "JoeIsHere/yakbarber",
"score": 2
} |
#### File: JoeIsHere/yakbarber/yakbarber.py
```python
from codecs import open
import os
import sys
import pystache
from itertools import islice
import argparse
import imp
import shutil
import re
import datetime
import time
import pytz
import BeautifulSoup
import markdown
import mdx_smartypants
import cProfile
# Settings Import
parser = argparse.ArgumentParser(description='Yak Barber is a fiddly little time sink, and blog system.')
parser.add_argument('-s','--settings',nargs=1,help='Specify a settings.py file to use.')
parser.add_argument('-c','--cprofile',action='store_true', default=False,help='Run cProfile to check run time and elements.')
args = parser.parse_args()
if args.settings is not None:
settingsPath = args.settings[0]
else:
settingsPath = './settings.py'
settings = imp.load_source('settings.py',settingsPath)
# Settings Local
root = settings.root
webRoot = settings.webRoot
contentDir = settings.contentDir
templateDir = settings.templateDir
outputDir = settings.outputDir
sitename = settings.sitename
author = settings.author
postsPerPage = settings.postsPerPage
typekitId = settings.typekitId
# 'meta','fenced_code','footnotes','smart_strong','smarty'
md = markdown.Markdown(extensions=['meta','smartypants','toc(anchorlink=True)'])
def safeMkDir(f):
d = f
if not os.path.exists(d):
os.makedirs(d)
def splitEvery(n, iterable):
i = iter(iterable)
piece = list(islice(i, n))
while piece:
yield piece
piece = list(islice(i, n))
def removePunctuation(text):
text = re.sub(r'\s[^a-zA-Z0-9]\s',' ',text)
text = re.sub(r'[^a-zA-Z0-9\s]+','',text)
text = text.encode('ascii','xmlcharrefreplace')
return text
def templateResources():
tList = os.listdir(templateDir)
tList = [x for x in tList if ('.html' or '.xml') not in x]
for tr in tList:
fullPath = os.path.join(templateDir, tr)
if (os.path.isfile(fullPath)):
shutil.copy(fullPath, outputDir)
def openConvert(mdfile):
with open(mdfile, 'r', 'utf-8') as f:
rawfile = f.read()
converted = md.convert(rawfile)
try:
if re.match(r'[a-zA-Z0-9]+',md.Meta[u'title'][0]):
converted = converted
convertedMeta = [md.Meta, converted]
return convertedMeta
else:
return None
except:
return None
def aboutPage():
with open(contentDir + u'about.markdown', 'r', 'utf-8') as f:
rawfile = f.read()
converted = {'about': md.convert(rawfile),'sitename':sitename,'webRoot':webRoot}
converted[u'typekitId'] = typekitId
with open(templateDir + u'about.html','r','utf-8') as f:
aboutTemplate = f.read()
with open(outputDir + u'about.html', 'w', 'utf-8') as f:
aboutResult = pystache.render(aboutTemplate,converted)
return f.write(aboutResult)
def renderPost(post, posts):
metadata = {}
for k, v in post[0].iteritems():
metadata[k] = v[0]
metadata[u'content'] = post[1]
metadata[u'sitename'] = sitename
metadata[u'webRoot'] = webRoot
metadata[u'author'] = author
metadata[u'typekitId'] = typekitId
postName = removePunctuation(metadata[u'title'])
postName = metadata[u'date'].split(' ')[0] + '-' + postName.replace(u' ',u'-').replace(u'‑',u'-')
postName = u'-'.join(postName.split('-'))
postFileName = outputDir + postName + '.html'
metadata[u'postURL'] = webRoot + postName + '.html'
metadata[u'title'] = unicode(mdx_smartypants.unicode_entities(metadata[u'title']))
if u'link' in metadata:
templateType = u'/post-content-link.html'
else:
templateType = u'/post-content.html'
with open(templateDir + templateType,'r','utf-8') as f:
postContentTemplate = f.read()
postContent = pystache.render(postContentTemplate,metadata,decode_errors='ignore')
metadata['post-content'] = postContent
with open(templateDir + u'/post-page.html','r','utf-8') as f:
postPageTemplate = f.read()
postPageResult = pystache.render(postPageTemplate,metadata,decode_errors='ignore')
with open(postFileName,'w','utf-8') as f:
f.write(postPageResult)
return posts.append(metadata)
def extractTags(html,tag):
soup = BeautifulSoup.BeautifulSoup(html)
to_extract = soup.findAll(tag)
for item in to_extract:
item.extract()
return unicode(soup)
def RFC3339Convert(timeString):
strip = time.strptime(timeString, '%Y-%m-%d %H:%M:%S')
dt = datetime.datetime.fromtimestamp(time.mktime(strip))
pacific = pytz.timezone('US/Pacific')
ndt = dt.replace(tzinfo=pacific)
utc = pytz.utc
return ndt.astimezone(utc).isoformat().split('+')[0] + 'Z'
def feed(posts):
feedDict = posts[0]
entryList = str()
feedDict['gen-time'] = datetime.datetime.utcnow().isoformat('T') + 'Z'
with open(templateDir + u'/atom.xml','r','utf-8') as f:
atomTemplate = f.read()
with open(templateDir + u'/atom-entry.xml','r','utf-8') as f:
atomEntryTemplate = f.read()
for e,p in enumerate(posts):
p[u'date'] = RFC3339Convert(p[u'date'])
p[u'content'] = extractTags(p[u'content'],'script')
p[u'content'] = extractTags(p[u'content'],'object')
p[u'content'] = extractTags(p[u'content'],'iframe')
if e < 100:
atomEntryResult = pystache.render(atomEntryTemplate,p)
entryList += atomEntryResult
feedDict['atom-entry'] = entryList
feedResult = pystache.render(atomTemplate,feedDict,string_encode='utf-8')
with open(outputDir + 'feed','w','utf-8') as f:
f.write(feedResult)
def paginatedIndex(posts):
indexList = sorted(posts,key=lambda k: k[u'date'])[::-1]
feed(indexList)
postList = []
for i in indexList:
postList.append(i['post-content'])
indexOfPosts = splitEvery(postsPerPage,indexList)
with open(templateDir + u'/index.html','r','utf-8') as f:
indexTemplate = f.read()
indexDict = {}
indexDict[u'sitename'] = sitename
indexDict[u'typekitId'] = typekitId
for e,p in enumerate(indexOfPosts):
indexDict['post-content'] = p
print e
#for x in p:
#print x['title']
if e == 0:
fileName = u'index.html'
if len(indexList) > postsPerPage:
indexDict[u'previous'] = webRoot + u'index2.html'
else:
fileName = u'index' + str(e+1) + u'.html'
if e == 1:
indexDict[u'next'] = webRoot + u'index.html'
indexDict[u'previous'] = webRoot + u'index' + str(e+2) + u'.html'
else:
indexDict[u'previous'] = webRoot + u'index' + str(e+2) + u'.html'
if e < len(indexList):
indexDict[u'next'] = webRoot + u'index' + str(e-1) + u'.html'
indexPageResult = pystache.render(indexTemplate,indexDict)
with open(outputDir + fileName,'w','utf-8') as f:
f.write(indexPageResult)
def start():
convertedList = []
posts =[]
contentList = os.listdir(contentDir)
for c in contentList:
if c.endswith('.md') or c.endswith('.markdown'):
mdc = openConvert(contentDir+c)
if mdc is not None:
convertedList.append(mdc)
sortedList = sorted(convertedList, key=lambda x: x[0], reverse=True)
#pprint.pprint(convertedList, indent=1, depth=4)
aboutPage()
for post in sortedList:
renderPost(post,posts)
paginatedIndex(posts)
templateResources()
def main():
safeMkDir(contentDir)
safeMkDir(templateDir)
safeMkDir(outputDir)
start()
if __name__ == "__main__":
if args.cprofile:
cProfile.run('main()')
else:
main()
``` |
{
"source": "joeization/cv_finalproject",
"score": 3
} |
#### File: joeization/cv_finalproject/demo.py
```python
from imutils.video import VideoStream
from imutils import face_utils
import datetime
import argparse
import imutils
import time
import dlib
import cv2
import threading
import numpy as np
from makeup import *
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--shape-predictor", required=True,
help="path to facial landmark predictor")
args = vars(ap.parse_args())
# initialize dlib's face detector (HOG-based) and then create
# the facial landmark predictor
print("[INFO] loading facial landmark predictor...")
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(args["shape_predictor"])
# initialize the video stream and allow the cammera sensor to warmup
print("[INFO] camera sensor warming up...")
vs = cv2.VideoCapture(0)
time.sleep(2.0)
fill = False
blur = False
#alpha = 0.5
#minit()
a = mu()
lip_color = (255, 96, 96)
eyeshadow_color = (161, 125, 108)
blush_color = (255, 216, 226)
def nothing(x):
pass
lip_color = (lip_color[2], lip_color[1], lip_color[0])
eyeshadow_color = (eyeshadow_color[2], eyeshadow_color[1], eyeshadow_color[0])
blush_color = (blush_color[2], blush_color[1], blush_color[0])
cv2.namedWindow('frame')
cv2.createTrackbar('R','frame',255,255,nothing)
cv2.createTrackbar('G','frame',96,255,nothing)
cv2.createTrackbar('B','frame',96,255,nothing)
cv2.createTrackbar('R2','frame',161,255,nothing)
cv2.createTrackbar('G2','frame',125,255,nothing)
cv2.createTrackbar('B2','frame',108,255,nothing)
switch = '0 : OFF \n1 : ON'
cv2.createTrackbar(switch, 'frame',0,1,nothing)
# loop over the frames from the video stream
while True:
# grab the frame from the threaded video stream, resize it to
# have a maximum width of 400 pixels, and convert it to
# grayscale
_, frame = vs.read()
#frame = imutils.resize(frame, width=400)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# detect faces in the grayscale frame
rects = detector(gray, 0)
# loop over the face detections
for rect in rects:
# determine the facial landmarks for the face region, then
# convert the facial landmark (x, y)-coordinates to a NumPy
# array
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
# loop over the (x, y)-coordinates for the facial landmarks
# and draw them on the image
#for (x, y) in shape:
# cv2.circle(frame, (x, y), 1, (0, 0, 255), -1)
#for i in range(16):
# cv2.line(frame, (shape[i][0], shape[i][1]), (shape[i+1][0], shape[i+1][1]), (0, 255, 0))
if fill:
a.add_lip(frame, gray, np.concatenate((shape[48:55], shape[60:65][::-1])), lip_color)
a.add_lip(frame, gray, np.concatenate((shape[54:60], [shape[48]], [shape[60]], shape[64:68][::-1])), lip_color)
a.add_eyeshadow(frame, gray, shape[36:40],
np.int32([np.int32((shape[40][0]+shape[41][0])/2), np.int32((shape[40][1]+shape[41][1])/2)]),
eyeshadow_color)
a.add_eyeshadow(frame, gray, shape[42:46],
np.int32([np.int32((shape[46][0]+shape[47][0])/2), np.int32((shape[46][1]+shape[47][1])/2)]),
eyeshadow_color)
#cv2.addWeighted(overlay, alpha, frame, 1 - alpha, 0, frame)
if blur:
dr = np.linalg.norm(shape[14]-shape[33])/3
dl = np.linalg.norm(shape[2]-shape[33])/3
d = np.linalg.norm(shape[14]-shape[35])/2.5
m = np.int32((shape[31][0]-d*1.5, shape[31][1]-d*dl/(dl+dr)))
a.add_blush(frame, gray, m, np.int32(d), blush_color)
#cv2.circle(frame, (shape[2][0], shape[2][1]), 1, (0, 0, 255), -1)
#cv2.circle(frame, (shape[31][0], shape[31][1]), 1, (0, 0, 255), -1)
m = np.int32((shape[35][0]+d*1.5, shape[35][1]-d*dr/(dl+dr)))
a.add_blush(frame, gray, m, np.int32(d), blush_color)
#cv2.circle(frame, (shape[14][0], shape[14][1]), 1, (0, 0, 255), -1)
#cv2.circle(frame, (shape[35][0], shape[35][1]), 1, (0, 0, 255), -1)
# show the frame
cv2.imshow("frame", frame)
r = cv2.getTrackbarPos('R','frame')
g = cv2.getTrackbarPos('G','frame')
b = cv2.getTrackbarPos('B','frame')
r2 = cv2.getTrackbarPos('R2','frame')
g2 = cv2.getTrackbarPos('G2','frame')
b2 = cv2.getTrackbarPos('B2','frame')
s = cv2.getTrackbarPos(switch,'frame')
if s>0:
lip_color = (b, g, r)
eyeshadow_color = (b2, g2, r2)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
elif key == ord("a"):
fill = not fill
elif key == ord("s"):
blur = not blur
# do a bit of cleanup
cv2.destroyAllWindows()
vs.release()
``` |
{
"source": "joeization/CycleGAN",
"score": 3
} |
#### File: joeization/CycleGAN/CustomDataset.py
```python
import random
import matplotlib.pyplot as plt
import numpy as np
import torchvision.transforms as transforms
from PIL import Image, ImageFilter
from scipy.ndimage import generate_binary_structure
from scipy.ndimage.filters import gaussian_filter
from scipy.ndimage.interpolation import map_coordinates
from scipy.ndimage.morphology import binary_dilation
from scipy.signal import wiener
from utility import elastic_transform
class Affine(object):
'''PyTorch affine adapter
Args:
img (PIL image): Images to be affined
Usage:
set the args for affine then call Affine(image), see the def for more information
Returns:
affined images
'''
angle = None
translations = None
scale = None
shear = None
@staticmethod
def __call__(img):
return transforms.functional.affine(img, Affine.angle, Affine.translations, Affine.scale, Affine.shear)
class DatasetStorage():
storage = {}
label = {}
def __init__(self):
pass
class CustomDataset():
# initial logic happens like transform
def __init__(self, image_paths, fetch=False, f_size=0, train=True):
# DatasetStorage.storage = {}
# DatasetStorage.label = {}
if fetch:
ips = image_paths.copy()
random.shuffle(ips)
self.image_paths = ips[:f_size]
else:
self.image_paths = image_paths
self.train = train
self.transforms_distor = transforms.Compose([
transforms.Grayscale(),
Affine(),
transforms.ToTensor(),
])
self.transforms = transforms.Compose([
# transforms.Grayscale(),
transforms.Scale(size=(128, 128)),
# transforms.RandomCrop((32, 32)),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]),
])
self.transforms_no_scale = transforms.Compose([
# transforms.Grayscale(),
# transforms.Scale(size=(256, 256)),
# transforms.RandomCrop((32, 32)),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]),
])
def __getitem__(self, index):
'''
if index in DatasetStorage.storage:
return DatasetStorage.storage[index].clone()
if self.train:
return DatasetStorage.storage[index].clone()
else:
return DatasetStorage.storage[index].clone()
else:
'''
# plt.ion()
image = Image.open(self.image_paths[index])
image = image.convert('RGB')
if self.train:
# Affine.angle, Affine.translations, Affine.scale, Affine.shear = transforms.RandomAffine.get_params(
# degrees=(-30, 30), translate=(0.1, 0.1), scale_ranges=(0.95, 1.05), shears=None, img_size=image.size)
# t_image = self.transforms_distor(image)
t_image = self.transforms(image)
# DatasetStorage.storage[index] = t_image.clone()
# return DatasetStorage.storage[self.image_paths[index]].clone(), DatasetStorage.label[self.image_paths[index]]
return t_image
else:
t_image = self.transforms_no_scale(image)
# DatasetStorage.storage[index] = t_image.clone()
# return DatasetStorage.storage[self.image_paths[index]].clone()
return t_image
def __len__(self): # return count of sample we have
return len(self.image_paths)
```
#### File: joeization/CycleGAN/cyclegan.py
```python
import glob
import math
import random
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.distributions as tdist
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import torchvision.transforms.functional as TF
from PIL import Image
from skimage import feature
from tqdm import trange, tqdm
from CustomDataset import CustomDataset
from discriminator import Discriminator
from UNet import UNet
from utility import center_crop
def KL_loss(mu, logvar):
# -0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
KLD_element = mu.pow(2).add_(logvar.exp()).mul_(-1).add_(1).add_(logvar)
KLD = torch.mean(KLD_element).mul_(-0.5)
return KLD
class cropper(nn.Module):
def __init__(self, img_size=64, crop_size=32):
super(cropper, self).__init__()
self.isz = img_size
self.csz = crop_size
def forward(self, x):
sx = random.randint(0, self.isz-1-self.csz)
sy = random.randint(0, self.isz-1-self.csz)
return x[:, :, sx:sx+self.csz, sy:sy+self.csz]
def run():
print('loop')
# torch.backends.cudnn.enabled = False
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# device = torch.device("cpu")
# Assuming that we are on a CUDA machine, this should print a CUDA device:
print(device)
Dx = Discriminator().to(device)
Gx = UNet(3, 3).to(device)
Dy = Discriminator().to(device)
Gy = UNet(3, 3).to(device)
ld = False
if ld:
try:
Gx.load_state_dict(torch.load('./genx'))
Dx.load_state_dict(torch.load('./fcnx'))
Gy.load_state_dict(torch.load('./geny'))
Dy.load_state_dict(torch.load('./fcny'))
print('net loaded')
except Exception as e:
print(e)
dataset = 'ukiyoe2photo'
# A 562
image_path_A = './datasets/'+dataset+'/trainA/*.jpg'
image_path_B = './datasets/'+dataset+'/trainB/*.jpg'
plt.ion()
train_image_paths_A = glob.glob(image_path_A)
train_image_paths_B = glob.glob(image_path_B)
print(len(train_image_paths_A), len(train_image_paths_B))
b_size = 8
train_dataset_A = CustomDataset(
train_image_paths_A, train=True)
train_loader_A = torch.utils.data.DataLoader(
train_dataset_A, batch_size=b_size, shuffle=True, num_workers=4, pin_memory=False, drop_last=True)
train_dataset_B = CustomDataset(
train_image_paths_B, True, 562, train=True)
train_loader_B = torch.utils.data.DataLoader(
train_dataset_B, batch_size=b_size, shuffle=True, num_workers=4, pin_memory=False, drop_last=True)
Gx.train()
Dx.train()
Gy.train()
Dy.train()
criterion = nn.BCEWithLogitsLoss().to(device)
# criterion2 = nn.SmoothL1Loss().to(device)
criterion2 = nn.L1Loss().to(device)
g_lr = 2e-4
d_lr = 2e-4
optimizer_x = optim.Adam(Gx.parameters(), lr=g_lr, betas=(0.5, 0.999))
optimizer_x_d = optim.Adam(Dx.parameters(), lr=d_lr, betas=(0.5, 0.999))
optimizer_y = optim.Adam(Gy.parameters(), lr=g_lr, betas=(0.5, 0.999))
optimizer_y_d = optim.Adam(Dy.parameters(), lr=d_lr, betas=(0.5, 0.999))
# cp = cropper().to(device)
_zero = torch.from_numpy(
np.zeros((b_size, 1))).float().to(device)
_zero.requires_grad = False
_one = torch.from_numpy(
np.ones((b_size, 1))).float().to(device)
_one.requires_grad = False
for epoch in trange(100, desc='epoch'):
# loop = tqdm(zip(train_loader_A, train_loader_B), desc='iteration')
loop = zip(tqdm(train_loader_A, desc='iteration'),
train_loader_B)
batch_idx = 0
for data_A, data_B in loop:
batch_idx += 1
zero = _zero
one = _one
_data_A = data_A.to(device)
_data_B = data_B.to(device)
# Dy loss (A -> B)
gen = Gy(_data_A)
optimizer_y_d.zero_grad()
output2_p = Dy(_data_B.detach())
output_p = Dy(gen.detach())
errD = (criterion(output2_p-torch.mean(output_p), one.detach()) +
criterion(output_p-torch.mean(output2_p), zero.detach()))/2
errD.backward()
optimizer_y_d.step()
# Dx loss (B -> A)
gen = Gx(_data_B)
optimizer_x_d.zero_grad()
output2_p = Dx(_data_A.detach())
output_p = Dx(gen.detach())
errD = (criterion(output2_p-torch.mean(output_p), one.detach()) +
criterion(output_p-torch.mean(output2_p), zero.detach()))/2
errD.backward()
optimizer_x_d.step()
# Gy loss (A -> B)
optimizer_y.zero_grad()
gen = Gy(_data_A)
output_p = Dy(gen)
output2_p = Dy(_data_B.detach())
g_loss = (criterion(output2_p-torch.mean(output_p), zero.detach()) +
criterion(output_p-torch.mean(output2_p), one.detach()))/2
# Gy cycle loss (B -> A -> B)
fA = Gx(_data_B)
gen = Gy(fA.detach())
c_loss = criterion2(gen, _data_B)
errG = g_loss + c_loss
errG.backward()
optimizer_y.step()
if batch_idx % 10 == 0:
fig = plt.figure(1)
fig.clf()
plt.imshow((np.transpose(
_data_B.detach().cpu().numpy()[0], (1, 2, 0))+1)/2)
fig.canvas.draw()
fig.canvas.flush_events()
fig = plt.figure(2)
fig.clf()
plt.imshow((np.transpose(
fA.detach().cpu().numpy()[0], (1, 2, 0))+1)/2)
fig.canvas.draw()
fig.canvas.flush_events()
fig = plt.figure(3)
fig.clf()
plt.imshow((np.transpose(
gen.detach().cpu().numpy()[0], (1, 2, 0))+1)/2)
fig.canvas.draw()
fig.canvas.flush_events()
# Gx loss (B -> A)
optimizer_x.zero_grad()
gen = Gx(_data_B)
output_p = Dx(gen)
output2_p = Dx(_data_A.detach())
g_loss = (criterion(output2_p-torch.mean(output_p), zero.detach()) +
criterion(output_p-torch.mean(output2_p), one.detach()))/2
# Gx cycle loss (A -> B -> A)
fB = Gy(_data_A)
gen = Gx(fB.detach())
c_loss = criterion2(gen, _data_A)
errG = g_loss + c_loss
errG.backward()
optimizer_x.step()
torch.save(Gx.state_dict(), './genx')
torch.save(Dx.state_dict(), './fcnx')
torch.save(Gy.state_dict(), './geny')
torch.save(Dy.state_dict(), './fcny')
print('\nFinished Training')
if __name__ == '__main__':
torch.backends.cudnn.benchmark = True
random.seed()
np.random.seed()
torch.multiprocessing.freeze_support()
run()
```
#### File: joeization/CycleGAN/UNet.py
```python
import torch
import torch.nn as nn
from cbam import cbam_channel, cbam_spatial, cbam
from unet_blocks import unet_down_conv, unet_output_conv, unet_up_conv
from utility import center_crop
class UNet(nn.Module):
'''Implement of UNet
<NAME>, <NAME>, and <NAME>.
U-Net: Convolutional Networks for Biomedical Image Segmentation.
International Conference on Medical Image Computing and Computer-Assisted Intervention, 2015.
Args:
in_ch (int): The channels of input
n_classes (int): The output categories, default to 1
'''
def __init__(self, in_ch, n_classes=1, mode='add'):
super(UNet, self).__init__()
# self.drop = nn.Dropout2d(p=0.5)
base = 32
# self.inconv = unet_double_conv(in_ch, base)
self.down1 = unet_down_conv(in_ch, base*1)
self.down2 = unet_down_conv(base*1, base*2)
self.down3 = unet_down_conv(base*2, base*4)
self.down4 = unet_down_conv(base*4, base*8)
self.down5 = unet_down_conv(base*8, base*16)
if mode == 'cat':
self.up1 = unet_up_conv(
base*16, base*16, base*8, mode=mode)
self.up2 = unet_up_conv(
base*8, base*8, base*4, mode=mode)
self.up3 = unet_up_conv(
base*4, base*4, base*2, mode=mode)
self.up4 = unet_up_conv(
base*2, base*2, base*1, mode=mode)
else:
self.up1 = unet_up_conv(
base*16, base*8, base*8, mode=mode)
self.up2 = unet_up_conv(
base*8, base*4, base*4, mode=mode)
self.up3 = unet_up_conv(
base*4, base*2, base*2, mode=mode)
self.up4 = unet_up_conv(
base*2, base*1, base*1, mode=mode)
'''UNet++
self.up11 = unet_up_conv(base*2, base*1*2, base*1)
self.up21 = unet_up_conv(base*4, base*2*2, base*2)
self.up22 = unet_up_conv(base*2, base*1*3, base*1)
self.up31 = unet_up_conv(base*8, base*4*2, base*4)
self.up32 = unet_up_conv(base*4, base*2*3, base*2)
self.up33 = unet_up_conv(base*2, base*1*4, base*1)
self.up41 = unet_up_conv(base*16, base*8*2, base*8)
self.up42 = unet_up_conv(base*8, base*4*3, base*4)
self.up43 = unet_up_conv(base*4, base*2*4, base*2)
self.up44 = unet_up_conv(base*2, base*1*5, base*1)
self.ds1 = unet_output_conv(base, n_classes)
self.ds2 = unet_output_conv(base, n_classes)
self.ds3 = unet_output_conv(base, n_classes)
'''
self.outconv = unet_output_conv(base, n_classes)
def forward(self, x):
# x00 = self.inconv(x00)
x00 = self.down1(x)
x10 = self.down2(x00)
x20 = self.down3(x10)
x30 = self.down4(x20)
x40 = self.down5(x30)
# x30 = self.drop(x30)
# x40 = self.cbam1(x40)
# x30 = self.drop(x30)
x31 = self.up1(x30, x40)
x22 = self.up2(x20, x31)
# x22 = self.cbam2(x22)
x13 = self.up3(x10, x22)
# x13 = self.cbam3(x13)
x04 = self.up4(x00, x13)
'''UNet++
x01 = self.up11([x00], x10)
x11 = self.up21([x10], x20)
x02 = self.up22([x00, x01], x11)
x21 = self.up31([x20], x30)
x12 = self.up32([x10, x11], x21)
x03 = self.up33([x00, x01, x02], x12)
x31 = self.up41([x30], x40)
x22 = self.up42([x20, x21], x31)
x13 = self.up43([x10, x11, x12], x22)
x04 = self.up44([x00, x01, x02, x03], x13)
x01 = center_crop(x01, x04.size()[2], x04.size()[3])
x02 = center_crop(x02, x04.size()[2], x04.size()[3])
x03 = center_crop(x03, x04.size()[2], x04.size()[3])
x1 = self.ds1(x01)
x2 = self.ds2(x02)
x3 = self.ds3(x03)
'''
x = self.outconv(x04)
x = torch.tanh(x)
# x = torch.sigmoid(x)
return x
``` |
{
"source": "Joeization/pyGaen",
"score": 3
} |
#### File: Joeization/pyGaen/choice.py
```python
class Choice(object):
'''Choice
when you are playing a game
there must be choices
content = content
ino = choice id
font = font
value = leads to which dialog
weight = affect the 'san'
l = text length
'''
def __init__(self, ct, font, ino, val, wei):
self.content = ct
self.ino = ino
self.font = font
self.value = val
self.weight = wei
self.l = (350 - len(self.content) * 15) / 2
def blit(self, screen, pos, img):
screen.blit(img, pos)
text_surface = self.font.render(self.content, True, (0, 0, 0))
screen.blit(text_surface, (pos[0] + self.l, pos[1] + 15))
def id(self):
return self.ino
def to(self):
return self.value
def w(self):
return self.weight
def log(self):
s = ['<'+self.content+'>\n']
return s
def cgetpos(i):
'''
get position of the ith choice
'''
return (225, 150 + i * 62.5)
```
#### File: Joeization/pyGaen/main.py
```python
import pygame
try:
import pygame._view
except ImportError:
pass
from choice import *
from bgm import *
from dialog import *
from settings import *
from text import *
from log import *
def main():
pygame.init()
pygame.font.init()
screen = pygame.display.set_mode((800, 600), 0, 32)
pygame.display.set_caption('alpha')
imglib = {}
imglib['load'] = pygame.image.load(resource_path('img/load.png')).convert_alpha()
screen.blit(imglib['load'], (0, 0))
pygame.display.update()
imgres = open(resource_path('src/img.txt'), 'r')
for img in imgres:
tag, tar = map(str, img.strip().split(' '))
imglib[tag] = pygame.image.load(resource_path(tar)).convert_alpha()
sfxlib = {}
sfxres = open(resource_path('src/sfx.txt'), 'r')
for sfx in sfxres:
tag, tar = map(str, sfx.strip().split(' '))
sfxlib[tag] = resource_path(tar)
sfplayer = Bgm('')
ft18 = pygame.font.SysFont('simhei', 18)
ft24 = pygame.font.SysFont('simhei', 24)
ftpk = (ft24, ft18)
setting = Settings(ft18)
cho = Text(resource_path('src/cho.ga'))
dia = Text(resource_path('src/dia.ga'))
dialoglib = {}
choicelib = {}
dpos = 'main'
cpos = '-1'
pick = -1
vmode = 0
'''
0 = normal
1 = image
2 = log
'''
clock = pygame.time.Clock()
san = 0
ddone = False
if dia.has():
while True:
ne = dia.parse()
if ne[0] == -1:
break
elif ne[0] == 0:
dialoglib[ne[7]] = ne
ddone = True
del dia
if cho.has():
while True:
ne = cho.parse()
if ne[0] == -1:
break
elif ne[0] == 1:
choicelib[ne[2]] = ne
del cho
if not ddone:
pygame.quit()
sys.exit()
ddone = False
cdone = False
ce = []
log = Log()
while True:
if not ddone:
dg = Dialog(dialoglib[dpos][1], dialoglib[dpos][2], dialoglib[dpos][3],
dialoglib[dpos][4], dialoglib[dpos][5], dialoglib[dpos][6],
dialoglib[dpos][8], dialoglib[dpos][9])
log.add(dg.log())
ddone = True
cpos = dg.ask()
if not cdone:
if cpos != '-1':
ce = []
for chi in choicelib[cpos][1]:
ce.append(Choice(chi[0], ft18, chi[1], chi[2], chi[3]))
cdone = True
(x, y) = pygame.mouse.get_pos()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 3:
if vmode == 0:
vmode = 3
elif vmode == 3:
vmode = 0
if event.button == 4:
if vmode == 0:
vmode = 2
if event.button == 5:
if vmode == 2:
vmode = 0
if event.button == 1:
scl = setting.click((x, y), dpos, cpos, san)
if scl[0] == 0:
#reverse show
pass
elif scl[0] == 1:
#save
pass
elif scl[0] == 2:
#load
dg.reset()
dpos = scl[1][0]
cpos = scl[1][1]
san = scl[1][2]
if vmode == 0 and scl[0] == -1:
if cpos != u'-1':
for c in ce:
(lx, ly) = cgetpos(c.id())
if (x >= lx and x <= lx + 350 and
y >= ly and y <= ly + 50):
pick = c.id()
if pick != -1:
pass
else:
if dg.check():
if dg.nxt() != '-1':
if dg.nxt() == '-2':
pygame.quit()
sys.exit()
dg.reset()
dpos = dg.next(san)
ce = []
ddone = False
cdone = False
screen.blit(imglib['bk'], (0, 0))
if vmode == 0:
dg.blit(screen, whe(dg.wh()), imglib,
sfxlib, sfplayer, pygame.time.get_ticks(), ftpk)
if len(ce) > 0:
for c in ce:
(lx, ly) = cgetpos(c.id())
if (x >= lx and x <= lx + 350 and
y >= ly and y <= ly + 50):
c.blit(screen, (lx, ly), imglib['chiy'])
else:
c.blit(screen, (lx, ly), imglib['chin'])
else:
dg.showimg(screen, whe(dg.wh()), imglib, False)
if vmode == 1:
dg.showimg(screen, whe(dg.wh()), imglib, False)
elif vmode == 2:
screen.blit(imglib['lg'], (200, 100))
log.blit(screen, ft24)
setting.blit(screen, imglib, (x, y))
pygame.display.update()
if pick != -1:
pygame.time.delay(300)
dg.reset()
log.add(ce[pick].log())
dpos = ce[pick].to()
san += ce[pick].w()
ddone = False
cdone = False
ce = []
cpos = -1
pick = -1
clock.tick(60)
#if python says run, then we should run
if __name__ == '__main__':
main()
```
#### File: Joeization/pyGaen/text.py
```python
class Text(object):
'''Load dialog and choice files
to parse a dialog/choice file
it is not necessary to separate into 2 files
first we need to load the text
'''
def __init__(self, tar):
f = open(tar, 'r')
self.content = []
for x in f:
sen = x.decode('utf-8').strip()
if len(sen) > 0:
'''
leading # is for comments
just like python
'''
if sen[0] != '#':
if sen[len(sen) - 1] == '\n':
sen = sen[:-1]
self.content.append(sen)
f.close()
self.pos = 0
self.len = 0
def parse(self):
'''
parse the text file to dialog or choice file
the code is very dirty
'''
while self.pos < len(self.content) and len(self.content[self.pos]) == 0:
self.pos += 1
#EOF
if self.pos >= len(self.content):
n = (-1, u'')
# (type, content)
return n
else:
if self.content[self.pos].find('dialog') == 0:
'''
dialog
name = dialog tag
sfx = sfx
poi = san threshold
fi = jmp to fi if less than
se = jmp to se else
choi = choice tag
wh = dialog position
im = dialog image
na = talker's name
bk = background
'''
s = []
t, name = self.content[self.pos].strip().split(' ')
self.pos += 1
na = self.content[self.pos]
self.pos += 1
sfx = str(self.content[self.pos])
self.pos += 1
poi, fi, se = self.content[self.pos].strip().split(' ')
self.pos += 1
choi = str(self.content[self.pos])
self.pos += 1
wh = long(self.content[self.pos])
self.pos += 1
im = self.content[self.pos].strip().split(' ')
self.pos += 1
bk = self.content[self.pos]
self.pos += 1
'''
load content until end
'''
while not (self.content[self.pos].find('end') == 0 and len(self.content[self.pos]) == 3):
if len(self.content[self.pos]) != 0:
s.append(self.content[self.pos])
self.pos += 1
self.pos += 1
'''
create a dialog
'''
n = (0, s, choi, im, wh, sfx, (long(poi), fi, se), name, na, bk)
# (type, content, ask, image, where, sfx, (beanch), name)
return n
elif self.content[self.pos].find('choice') == 0:
'''
choice
name = choice tag
c = options
'''
c = []
t, name = map(str, self.content[self.pos].strip().split(' '))
self.pos += 1
cnt = 0
while not (self.content[self.pos].find('end') == 0 and len(self.content[self.pos]) == 3):
if len(self.content[self.pos]) != 0:
s = self.content[self.pos].strip()
self.pos += 1
v = self.content[self.pos].strip()
self.pos += 1
w = long(self.content[self.pos])
self.pos += 1
c.append((s, cnt, v, w))
# (content, id, value, weight)
cnt += 1
self.pos += 1
n = (1, c, name)
# (type, pack, name)
return n
else:
'''
other trash
'''
n = (-1, u'')
return n
def has(self):
return len(self.content) > 0
``` |
{
"source": "joej164/Meeting_Scheduler",
"score": 3
} |
#### File: joej164/Meeting_Scheduler/meeting.py
```python
from settings import settings
from surveymonkey import surveymonkey
def main():
config = settings.Settings()
survey = surveymonkey.Surveymonkey(config)
r = survey.create_new_survey()
print(r)
s = survey.get_survey_details()
print(s)
t = survey.update_survey_page()
print(t)
print(config.survey_href)
print(config.pages_href)
u = survey.add_survey_questions()
print(u)
v = survey.create_collector()
print(v)
print()
print(f'The survey url is: {v["url"]}')
main()
```
#### File: Meeting_Scheduler/settings/settings.py
```python
import yaml
from datetime import datetime
class SurveySettingsError(Exception):
pass
class Settings():
def __init__(self):
self.token = None
self.valid_months = ["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"]
self.valid_days = list(range(1, 32))
self.valid_weekdays = ['Friday', 'Saturday', 'Sunday']
self.valid_years = list(range(2019, 2025)) # Update in 2026
self.meetup_month = None
self.meetup_day = None
self.meetup_weekday = None
self.meetup_year = None
self.meetup_time = None
self.survey_api_url = "https://api.surveymonkey.com/v3/"
self.survey_month = None
self.survey_days = None
self.survey_year = None
self.survey_weekday = None
self.survey_title = None
self.survey_time = None
self.survey_page_title = None
self.question_filenames = ["question1.j2", "question2.j2", "question3.j2"]
self.load_settings()
self.prompt_for_missing_info()
self.calculate_and_validate_weekdays()
self.set_survey_names()
self.calculate_meeting_time()
# Load data from the settings.yml file
def load_settings(self):
with open("settings.yml", "r") as settings_file:
data = yaml.safe_load(settings_file)
for k, v in data.items():
setattr(self, k, v)
def set_survey_names(self):
if not self.survey_title:
self.survey_title = f"{self.survey_month} {self.survey_year} - PDT Alumni Meetup"
if not self.survey_page_title:
self.survey_page_title = f"{self.survey_month} {self.survey_year} - PDT Alumni Meetup Test"
def calculate_and_validate_weekdays(self):
meetup_weekday = self.calculate_weekday(self.meetup_year, self.meetup_month, self.meetup_day)
if meetup_weekday not in self.valid_weekdays:
raise ValueError(f"The day of the next meetup is not in {self.valid_weekdays}")
else:
self.meetup_weekday = meetup_weekday
survey_weekdays = []
for day in self.survey_days:
survey_weekdays.append(self.calculate_weekday(self.survey_year, self.survey_month, day))
if not all(x == survey_weekdays[0] for x in survey_weekdays):
raise ValueError(f"Not all of the survey days are on the same day")
else:
self.survey_weekday = survey_weekdays[0]
@staticmethod
def calculate_weekday(year, month, day):
# Month is the name of the Full Month
day = datetime.strptime(f"{year} {month} {day}", "%Y %B %d")
weekday = datetime.strftime(day, "%A")
return weekday
def calculate_meeting_time(self):
# Based on the day of week, return the time of the get together
day_to_time = {
"Friday": "7-9pm",
"Saturday": "6-8pm",
"Sunday": "1-3pm"
}
if self.survey_weekday not in day_to_time.keys():
raise SurveySettingsError(f"The value '{self.survey_weekday}' was not in the conversion dictionary")
else:
self.survey_time = day_to_time[self.survey_weekday]
def prompt_for_missing_info(self):
# Prompt the user for any data missing after the settings load
while self.meetup_month not in self.valid_months:
month = input("Enter the Month for the next Meetup: ")
if month not in self.valid_months:
print(f"Invalid Month! Enter a month in the following format: {self.valid_months}")
else:
self.meetup_month = month
while self.meetup_day not in self.valid_days:
try:
meetup_day = int(input("Enter the day of the month for the next meetup: "))
except ValueError:
meetup_day = None
if meetup_day not in self.valid_days:
print(f"Invalid Day! Enter a day in the following format: {self.valid_days}")
else:
self.meetup_day = meetup_day
while self.meetup_year not in self.valid_years:
try:
meetup_year = int(input("Enter the year for the next meetup: "))
except ValueError:
meetup_year = None
if meetup_year not in self.valid_years:
print(f"Enter a valid year from the list of valid years: {self.valid_years}")
else:
self.meetup_year = meetup_year
while self.survey_month not in self.valid_months:
month = input("Enter the Month for the next Survey: ")
if month not in self.valid_months:
print(f"Invalid Month! Enter a month in the following format: {self.valid_months}")
else:
self.survey_month = month
while not self.survey_days or not all(True if x in self.valid_days else False for x in self.survey_days):
survey_days = input("Enter the days of the month for the next meetup as numbers seperated by spaces (Ex: 2 4 6): ")
days = [int(x) for x in survey_days.split() if x.isdigit() and int(x) in self.valid_days]
if not days:
print(f"No valid days. Enter a at least one valid day from the following list: {self.valid_days}")
else:
self.survey_days = days
while self.survey_year not in self.valid_years:
try:
survey_year = int(input("Enter the year for the next survey: "))
except ValueError:
survey_year = None
if survey_year not in self.valid_years:
print(f"Enter a valid year from the list of valid years: {self.valid_years}")
else:
self.survey_year = survey_year
``` |
{
"source": "joej164/molecule",
"score": 2
} |
#### File: unit/provisioner/test_ansible.py
```python
import collections
import os
import pytest
from molecule import config, util
from molecule.provisioner import ansible, ansible_playbooks
from molecule.test.conftest import is_subset
@pytest.fixture
def _patched_ansible_playbook(mocker):
m = mocker.patch("molecule.provisioner.ansible_playbook.AnsiblePlaybook")
m.return_value.execute.return_value = b"patched-ansible-playbook-stdout"
return m
@pytest.fixture
def _patched_write_inventory(mocker):
return mocker.patch("molecule.provisioner.ansible.Ansible._write_inventory")
@pytest.fixture
def _patched_remove_vars(mocker):
return mocker.patch("molecule.provisioner.ansible.Ansible._remove_vars")
@pytest.fixture
def _patched_link_or_update_vars(mocker):
return mocker.patch("molecule.provisioner.ansible.Ansible._link_or_update_vars")
@pytest.fixture
def _provisioner_section_data():
return {
"provisioner": {
"name": "ansible",
"config_options": {"defaults": {"foo": "bar"}},
"connection_options": {"foo": "bar"},
"options": {"foo": "bar", "become": True, "v": True},
"env": {
"FOO": "bar",
"ANSIBLE_ROLES_PATH": "foo/bar",
"ANSIBLE_LIBRARY": "foo/bar",
"ANSIBLE_FILTER_PLUGINS": "foo/bar",
},
"inventory": {
"hosts": {
"all": {
"hosts": {"extra-host-01": {}},
"children": {"extra-group": {"hosts": ["extra-host-01"]}},
}
},
"host_vars": {
"instance-1": [{"foo": "bar"}],
"localhost": [{"foo": "baz"}],
},
"group_vars": {
"example_group1": [{"foo": "bar"}],
"example_group2": [{"foo": "bar"}],
},
},
}
}
@pytest.fixture
def _instance(_provisioner_section_data, config_instance):
return ansible.Ansible(config_instance)
def test_config_private_member(_instance):
assert isinstance(_instance._config, config.Config)
def test_default_config_options_property(_instance):
x = {
"defaults": {
"ansible_managed": "Ansible managed: Do NOT edit this file manually!",
"display_failed_stderr": True,
"forks": 50,
"host_key_checking": False,
"interpreter_python": "auto",
"nocows": 1,
"retry_files_enabled": False,
},
"ssh_connection": {
"control_path": "%(directory)s/%%h-%%p-%%r",
"scp_if_ssh": True,
},
}
assert x == _instance.default_config_options
def test_default_options_property(_instance):
assert {"skip-tags": "molecule-notest,notest"} == _instance.default_options
def test_default_env_property(_instance):
x = _instance._config.provisioner.config_file
assert x == _instance.default_env["ANSIBLE_CONFIG"]
assert "MOLECULE_FILE" in _instance.default_env
assert "MOLECULE_INVENTORY_FILE" in _instance.default_env
assert "MOLECULE_SCENARIO_DIRECTORY" in _instance.default_env
assert "MOLECULE_INSTANCE_CONFIG" in _instance.default_env
assert "ANSIBLE_CONFIG" in _instance.env
assert "ANSIBLE_ROLES_PATH" in _instance.env
assert "ANSIBLE_LIBRARY" in _instance.env
assert "ANSIBLE_FILTER_PLUGINS" in _instance.env
def test_name_property(_instance):
assert "ansible" == _instance.name
@pytest.mark.parametrize(
"config_instance", ["_provisioner_section_data"], indirect=True
)
def test_config_options_property(_instance):
x = {
"defaults": {
"ansible_managed": "Ansible managed: Do NOT edit this file manually!",
"display_failed_stderr": True,
"foo": "bar",
"forks": 50,
"host_key_checking": False,
"interpreter_python": "auto",
"nocows": 1,
"retry_files_enabled": False,
},
"ssh_connection": {
"control_path": "%(directory)s/%%h-%%p-%%r",
"scp_if_ssh": True,
},
}
assert x == _instance.config_options
@pytest.mark.parametrize(
"config_instance", ["_provisioner_section_data"], indirect=True
)
def test_options_property(_instance):
x = {"become": True, "foo": "bar", "v": True, "skip-tags": "molecule-notest,notest"}
assert x == _instance.options
def test_options_property_does_not_merge(_instance):
for action in ["create", "destroy"]:
_instance._config.action = action
assert {"skip-tags": "molecule-notest,notest"} == _instance.options
def test_options_property_handles_cli_args(_instance):
_instance._config.args = {"debug": True}
x = {
"vvv": True,
"become": True,
"diff": True,
"skip-tags": "molecule-notest,notest",
}
assert x == _instance.options
@pytest.mark.parametrize(
"config_instance", ["_provisioner_section_data"], indirect=True
)
def test_env_property(_instance):
x = _instance._config.provisioner.config_file
assert x == _instance.env["ANSIBLE_CONFIG"]
assert "bar" == _instance.env["FOO"]
@pytest.mark.parametrize(
"config_instance", ["_provisioner_section_data"], indirect=True
)
def test_env_appends_env_property(_instance):
x = [
util.abs_path(
os.path.join(_instance._config.scenario.ephemeral_directory, "roles")
),
util.abs_path(
os.path.join(_instance._config.project_directory, os.path.pardir)
),
util.abs_path(os.path.join(os.path.expanduser("~"), ".ansible", "roles")),
"/usr/share/ansible/roles",
"/etc/ansible/roles",
util.abs_path(os.path.join(_instance._config.scenario.directory, "foo", "bar")),
]
assert x == _instance.env["ANSIBLE_ROLES_PATH"].split(":")
x = _instance._get_modules_directories()
x.append(
util.abs_path(os.path.join(_instance._config.scenario.directory, "foo", "bar"))
)
assert x == _instance.env["ANSIBLE_LIBRARY"].split(":")
x = [
_instance._get_filter_plugin_directory(),
util.abs_path(
os.path.join(
_instance._config.scenario.ephemeral_directory, "plugins", "filter"
)
),
util.abs_path(
os.path.join(_instance._config.project_directory, "plugins", "filter")
),
util.abs_path(
os.path.join(os.path.expanduser("~"), ".ansible", "plugins", "filter")
),
"/usr/share/ansible/plugins/filter",
util.abs_path(os.path.join(_instance._config.scenario.directory, "foo", "bar")),
]
assert x == _instance.env["ANSIBLE_FILTER_PLUGINS"].split(":")
@pytest.mark.parametrize(
"config_instance", ["_provisioner_section_data"], indirect=True
)
def test_host_vars_property(_instance):
x = {"instance-1": [{"foo": "bar"}], "localhost": [{"foo": "baz"}]}
assert x == _instance.host_vars
@pytest.mark.parametrize(
"config_instance", ["_provisioner_section_data"], indirect=True
)
def test_group_vars_property(_instance):
x = {"example_group1": [{"foo": "bar"}], "example_group2": [{"foo": "bar"}]}
assert x == _instance.group_vars
@pytest.mark.parametrize(
"config_instance", ["_provisioner_section_data"], indirect=True
)
def test_hosts_property(_instance):
hosts = {
"all": {
"hosts": {"extra-host-01": {}},
"children": {"extra-group": {"hosts": ["extra-host-01"]}},
}
}
assert hosts == _instance.hosts
def test_links_property(_instance):
assert {} == _instance.links
@pytest.mark.parametrize(
"config_instance", ["_provisioner_section_data"], indirect=True
)
def test_inventory_property(_instance):
x = {
"ungrouped": {"vars": {}},
"bar": {
"hosts": {"instance-1": {"foo": "bar", "ansible_connection": "docker"}},
"children": {
"child1": {
"hosts": {
"instance-1": {"foo": "bar", "ansible_connection": "docker"}
}
}
},
"vars": {
"molecule_file": "{{ lookup('env', 'MOLECULE_FILE') }}",
"molecule_ephemeral_directory": "{{ lookup('env', 'MOLECULE_EPHEMERAL_DIRECTORY') }}",
"molecule_scenario_directory": "{{ lookup('env', 'MOLECULE_SCENARIO_DIRECTORY') }}",
"molecule_yml": "{{ lookup('file', molecule_file) | molecule_from_yaml }}",
"molecule_instance_config": "{{ lookup('env', 'MOLECULE_INSTANCE_CONFIG') }}",
"molecule_no_log": "{{ lookup('env', 'MOLECULE_NO_LOG') or not "
"molecule_yml.provisioner.log|default(False) | bool }}",
},
},
"all": {
"hosts": {
"instance-1": {"foo": "bar", "ansible_connection": "docker"},
"instance-2": {"foo": "bar", "ansible_connection": "docker"},
},
"vars": {
"molecule_file": "{{ lookup('env', 'MOLECULE_FILE') }}",
"molecule_ephemeral_directory": "{{ lookup('env', 'MOLECULE_EPHEMERAL_DIRECTORY') }}",
"molecule_scenario_directory": "{{ lookup('env', 'MOLECULE_SCENARIO_DIRECTORY') }}",
"molecule_yml": "{{ lookup('file', molecule_file) | molecule_from_yaml }}",
"molecule_instance_config": "{{ lookup('env', 'MOLECULE_INSTANCE_CONFIG') }}",
"molecule_no_log": "{{ lookup('env', 'MOLECULE_NO_LOG') or not "
"molecule_yml.provisioner.log|default(False) | bool }}",
},
},
"foo": {
"hosts": {
"instance-1": {"foo": "bar", "ansible_connection": "docker"},
"instance-2": {"foo": "bar", "ansible_connection": "docker"},
},
"children": {
"child1": {
"hosts": {
"instance-1": {"foo": "bar", "ansible_connection": "docker"}
}
},
"child2": {
"hosts": {
"instance-2": {"foo": "bar", "ansible_connection": "docker"}
}
},
},
"vars": {
"molecule_file": "{{ lookup('env', 'MOLECULE_FILE') }}",
"molecule_ephemeral_directory": "{{ lookup('env', 'MOLECULE_EPHEMERAL_DIRECTORY') }}",
"molecule_scenario_directory": "{{ lookup('env', 'MOLECULE_SCENARIO_DIRECTORY') }}",
"molecule_yml": "{{ lookup('file', molecule_file) | molecule_from_yaml }}",
"molecule_instance_config": "{{ lookup('env', 'MOLECULE_INSTANCE_CONFIG') }}",
"molecule_no_log": "{{ lookup('env', 'MOLECULE_NO_LOG') or not "
"molecule_yml.provisioner.log|default(False) | bool }}",
},
},
"baz": {
"hosts": {"instance-2": {"foo": "bar", "ansible_connection": "docker"}},
"children": {
"child2": {
"hosts": {
"instance-2": {"foo": "bar", "ansible_connection": "docker"}
}
}
},
"vars": {
"molecule_file": "{{ lookup('env', 'MOLECULE_FILE') }}",
"molecule_ephemeral_directory": "{{ lookup('env', 'MOLECULE_EPHEMERAL_DIRECTORY') }}",
"molecule_scenario_directory": "{{ lookup('env', 'MOLECULE_SCENARIO_DIRECTORY') }}",
"molecule_yml": "{{ lookup('file', molecule_file) | molecule_from_yaml }}",
"molecule_instance_config": "{{ lookup('env', 'MOLECULE_INSTANCE_CONFIG') }}",
"molecule_no_log": "{{ lookup('env', 'MOLECULE_NO_LOG') or not "
"molecule_yml.provisioner.log|default(False) | bool }}",
},
},
}
assert is_subset(x, _instance.inventory)
@pytest.mark.parametrize(
"config_instance", ["_provisioner_section_data"], indirect=True
)
def test_inventory_property_handles_missing_groups(temp_dir, _instance):
platforms = [{"name": "instance-1"}, {"name": "instance-2"}]
_instance._config.config["platforms"] = platforms
x = {
"ungrouped": {
"hosts": {
"instance-1": {"foo": "bar", "ansible_connection": "docker"},
"instance-2": {"foo": "bar", "ansible_connection": "docker"},
},
"vars": {},
},
"all": {
"hosts": {
"instance-1": {"foo": "bar", "ansible_connection": "docker"},
"instance-2": {"foo": "bar", "ansible_connection": "docker"},
},
"vars": {
"molecule_file": "{{ lookup('env', 'MOLECULE_FILE') }}",
"molecule_ephemeral_directory": "{{ lookup('env', 'MOLECULE_EPHEMERAL_DIRECTORY') }}",
"molecule_scenario_directory": "{{ lookup('env', 'MOLECULE_SCENARIO_DIRECTORY') }}",
"molecule_yml": "{{ lookup('file', molecule_file) | molecule_from_yaml }}",
"molecule_instance_config": "{{ lookup('env', 'MOLECULE_INSTANCE_CONFIG') }}",
"molecule_no_log": "{{ lookup('env', 'MOLECULE_NO_LOG') or not "
"molecule_yml.provisioner.log|default(False) | bool }}",
},
},
}
assert is_subset(x, _instance.inventory)
def test_inventory_directory_property(_instance):
x = os.path.join(_instance._config.scenario.ephemeral_directory, "inventory")
assert x == _instance.inventory_directory
def test_inventory_file_property(_instance):
x = os.path.join(
_instance._config.scenario.inventory_directory, "ansible_inventory.yml"
)
assert x == _instance.inventory_file
def test_config_file_property(_instance):
x = os.path.join(_instance._config.scenario.ephemeral_directory, "ansible.cfg")
assert x == _instance.config_file
def test_playbooks_property(_instance):
assert isinstance(_instance.playbooks, ansible_playbooks.AnsiblePlaybooks)
def test_directory_property(_instance):
result = _instance.directory
parts = pytest.helpers.os_split(result)
assert ("molecule", "provisioner", "ansible") == parts[-3:]
def test_playbooks_cleaned_property_is_optional(_instance):
assert _instance.playbooks.cleanup is None
def test_playbooks_create_property(_instance):
x = os.path.join(
_instance._config.provisioner.playbooks._get_playbook_directory(),
"docker",
"create.yml",
)
assert x == _instance.playbooks.create
def test_playbooks_converge_property(_instance):
x = os.path.join(_instance._config.scenario.directory, "converge.yml")
assert x == _instance.playbooks.converge
def test_playbooks_destroy_property(_instance):
x = os.path.join(
_instance._config.provisioner.playbooks._get_playbook_directory(),
"docker",
"destroy.yml",
)
assert x == _instance.playbooks.destroy
def test_playbooks_side_effect_property(_instance):
assert _instance.playbooks.side_effect is None
@pytest.mark.parametrize(
"config_instance", ["_provisioner_section_data"], indirect=True
)
def test_connection_options(_instance):
x = {"ansible_connection": "docker", "foo": "bar"}
assert is_subset(x, _instance.connection_options("foo"))
def test_check(_instance, mocker, _patched_ansible_playbook):
_instance.check()
_patched_ansible_playbook.assert_called_once_with(
_instance._config.provisioner.playbooks.converge, _instance._config
)
_patched_ansible_playbook.return_value.add_cli_arg.assert_called_once_with(
"check", True
)
_patched_ansible_playbook.return_value.execute.assert_called_once_with()
def test_converge(_instance, mocker, _patched_ansible_playbook):
result = _instance.converge()
_patched_ansible_playbook.assert_called_once_with(
_instance._config.provisioner.playbooks.converge, _instance._config
)
# NOTE(retr0h): This is not the true return type. This is a mock return
# which didn't go through str.decode().
assert result == b"patched-ansible-playbook-stdout"
_patched_ansible_playbook.return_value.execute.assert_called_once_with()
def test_converge_with_playbook(_instance, mocker, _patched_ansible_playbook):
result = _instance.converge("playbook")
_patched_ansible_playbook.assert_called_once_with("playbook", _instance._config)
# NOTE(retr0h): This is not the true return type. This is a mock return
# which didn't go through str.decode().
assert result == b"patched-ansible-playbook-stdout"
_patched_ansible_playbook.return_value.execute.assert_called_once_with()
def test_cleanup(_instance, mocker, _patched_ansible_playbook):
_instance.cleanup()
_patched_ansible_playbook.assert_called_once_with(
_instance._config.provisioner.playbooks.cleanup, _instance._config
)
_patched_ansible_playbook.return_value.execute.assert_called_once_with()
def test_destroy(_instance, mocker, _patched_ansible_playbook):
_instance.destroy()
_patched_ansible_playbook.assert_called_once_with(
_instance._config.provisioner.playbooks.destroy, _instance._config
)
_patched_ansible_playbook.return_value.execute.assert_called_once_with()
def test_side_effect(_instance, mocker, _patched_ansible_playbook):
_instance.side_effect()
_patched_ansible_playbook.assert_called_once_with(
_instance._config.provisioner.playbooks.side_effect, _instance._config
)
_patched_ansible_playbook.return_value.execute.assert_called_once_with()
def test_create(_instance, mocker, _patched_ansible_playbook):
_instance.create()
_patched_ansible_playbook.assert_called_once_with(
_instance._config.provisioner.playbooks.create, _instance._config
)
_patched_ansible_playbook.return_value.execute.assert_called_once_with()
def test_prepare(_instance, mocker, _patched_ansible_playbook):
_instance.prepare()
_patched_ansible_playbook.assert_called_once_with(
_instance._config.provisioner.playbooks.prepare, _instance._config
)
_patched_ansible_playbook.return_value.execute.assert_called_once_with()
def test_syntax(_instance, mocker, _patched_ansible_playbook):
_instance.syntax()
_patched_ansible_playbook.assert_called_once_with(
_instance._config.provisioner.playbooks.converge, _instance._config
)
_patched_ansible_playbook.return_value.add_cli_arg.assert_called_once_with(
"syntax-check", True
)
_patched_ansible_playbook.return_value.execute.assert_called_once_with()
def test_verify(_instance, mocker, _patched_ansible_playbook):
_instance.verify()
if _instance._config.provisioner.playbooks.verify:
_patched_ansible_playbook.assert_called_once_with(
_instance._config.provisioner.playbooks.verify, _instance._config
)
_patched_ansible_playbook.return_value.execute.assert_called_once_with()
def test_write_config(temp_dir, _instance):
_instance.write_config()
assert os.path.isfile(_instance.config_file)
def test_manage_inventory(
_instance,
_patched_write_inventory,
_patched_remove_vars,
patched_add_or_update_vars,
_patched_link_or_update_vars,
):
_instance.manage_inventory()
_patched_write_inventory.assert_called_once_with()
_patched_remove_vars.assert_called_once_with()
patched_add_or_update_vars.assert_called_once_with()
assert not _patched_link_or_update_vars.called
def test_manage_inventory_with_links(
_instance,
_patched_write_inventory,
_patched_remove_vars,
patched_add_or_update_vars,
_patched_link_or_update_vars,
):
c = _instance._config.config
c["provisioner"]["inventory"]["links"] = {"foo": "bar"}
_instance.manage_inventory()
_patched_write_inventory.assert_called_once_with()
_patched_remove_vars.assert_called_once_with()
assert not patched_add_or_update_vars.called
_patched_link_or_update_vars.assert_called_once_with()
@pytest.mark.parametrize(
"config_instance", ["_provisioner_section_data"], indirect=True
)
def test_add_or_update_vars(_instance):
inventory_dir = _instance._config.scenario.inventory_directory
host_vars_directory = os.path.join(inventory_dir, "host_vars")
host_vars = os.path.join(host_vars_directory, "instance-1")
_instance._add_or_update_vars()
assert os.path.isdir(host_vars_directory)
assert os.path.isfile(host_vars)
host_vars_localhost = os.path.join(host_vars_directory, "localhost")
assert os.path.isfile(host_vars_localhost)
group_vars_directory = os.path.join(inventory_dir, "group_vars")
group_vars_1 = os.path.join(group_vars_directory, "example_group1")
group_vars_2 = os.path.join(group_vars_directory, "example_group2")
assert os.path.isdir(group_vars_directory)
assert os.path.isfile(group_vars_1)
assert os.path.isfile(group_vars_2)
hosts = os.path.join(inventory_dir, "hosts")
assert os.path.isfile(hosts)
assert util.safe_load_file(hosts) == _instance.hosts
@pytest.mark.parametrize(
"config_instance", ["_provisioner_section_data"], indirect=True
)
def test_add_or_update_vars_without_host_vars(_instance):
c = _instance._config.config
c["provisioner"]["inventory"]["host_vars"] = {}
inventory_dir = _instance._config.scenario.inventory_directory
host_vars_directory = os.path.join(inventory_dir, "host_vars")
host_vars = os.path.join(host_vars_directory, "instance-1")
_instance._add_or_update_vars()
assert not os.path.isdir(host_vars_directory)
assert not os.path.isfile(host_vars)
host_vars_localhost = os.path.join(host_vars_directory, "localhost")
assert not os.path.isfile(host_vars_localhost)
group_vars_directory = os.path.join(inventory_dir, "group_vars")
group_vars_1 = os.path.join(group_vars_directory, "example_group1")
group_vars_2 = os.path.join(group_vars_directory, "example_group2")
assert os.path.isdir(group_vars_directory)
assert os.path.isfile(group_vars_1)
assert os.path.isfile(group_vars_2)
hosts = os.path.join(inventory_dir, "hosts")
assert os.path.isfile(hosts)
assert util.safe_load_file(hosts) == _instance.hosts
def test_add_or_update_vars_does_not_create_vars(_instance):
c = _instance._config.config
c["provisioner"]["inventory"]["hosts"] = {}
c["provisioner"]["inventory"]["host_vars"] = {}
c["provisioner"]["inventory"]["group_vars"] = {}
inventory_dir = _instance._config.scenario.inventory_directory
hosts = os.path.join(inventory_dir, "hosts")
host_vars_directory = os.path.join(inventory_dir, "host_vars")
group_vars_directory = os.path.join(inventory_dir, "group_vars")
_instance._add_or_update_vars()
assert not os.path.isdir(host_vars_directory)
assert not os.path.isdir(group_vars_directory)
assert not os.path.isfile(hosts)
@pytest.mark.parametrize(
"config_instance", ["_provisioner_section_data"], indirect=True
)
def test_write_inventory(temp_dir, _instance):
_instance._write_inventory()
assert os.path.isfile(_instance.inventory_file)
data = util.safe_load_file(_instance.inventory_file)
x = {
"ungrouped": {"vars": {}},
"bar": {
"hosts": {"instance-1": {"foo": "bar", "ansible_connection": "docker"}},
"children": {
"child1": {
"hosts": {
"instance-1": {"foo": "bar", "ansible_connection": "docker"}
}
}
},
"vars": {
"molecule_file": "{{ lookup('env', 'MOLECULE_FILE') }}",
"molecule_ephemeral_directory": "{{ lookup('env', 'MOLECULE_EPHEMERAL_DIRECTORY') }}",
"molecule_scenario_directory": "{{ lookup('env', 'MOLECULE_SCENARIO_DIRECTORY') }}",
"molecule_yml": "{{ lookup('file', molecule_file) | molecule_from_yaml }}",
"molecule_instance_config": "{{ lookup('env', 'MOLECULE_INSTANCE_CONFIG') }}",
"molecule_no_log": "{{ lookup('env', 'MOLECULE_NO_LOG') or not "
"molecule_yml.provisioner.log|default(False) | bool }}",
},
},
"all": {
"hosts": {
"instance-1": {"foo": "bar", "ansible_connection": "docker"},
"instance-2": {"foo": "bar", "ansible_connection": "docker"},
},
"vars": {
"molecule_file": "{{ lookup('env', 'MOLECULE_FILE') }}",
"molecule_ephemeral_directory": "{{ lookup('env', 'MOLECULE_EPHEMERAL_DIRECTORY') }}",
"molecule_scenario_directory": "{{ lookup('env', 'MOLECULE_SCENARIO_DIRECTORY') }}",
"molecule_yml": "{{ lookup('file', molecule_file) | molecule_from_yaml }}",
"molecule_instance_config": "{{ lookup('env', 'MOLECULE_INSTANCE_CONFIG') }}",
"molecule_no_log": "{{ lookup('env', 'MOLECULE_NO_LOG') or not "
"molecule_yml.provisioner.log|default(False) | bool }}",
},
},
"foo": {
"hosts": {
"instance-1": {"foo": "bar", "ansible_connection": "docker"},
"instance-2": {"foo": "bar", "ansible_connection": "docker"},
},
"children": {
"child1": {
"hosts": {
"instance-1": {"foo": "bar", "ansible_connection": "docker"}
}
},
"child2": {
"hosts": {
"instance-2": {"foo": "bar", "ansible_connection": "docker"}
}
},
},
"vars": {
"molecule_file": "{{ lookup('env', 'MOLECULE_FILE') }}",
"molecule_ephemeral_directory": "{{ lookup('env', 'MOLECULE_EPHEMERAL_DIRECTORY') }}",
"molecule_scenario_directory": "{{ lookup('env', 'MOLECULE_SCENARIO_DIRECTORY') }}",
"molecule_yml": "{{ lookup('file', molecule_file) | molecule_from_yaml }}",
"molecule_instance_config": "{{ lookup('env', 'MOLECULE_INSTANCE_CONFIG') }}",
"molecule_no_log": "{{ lookup('env', 'MOLECULE_NO_LOG') or not "
"molecule_yml.provisioner.log|default(False) | bool }}",
},
},
"baz": {
"hosts": {"instance-2": {"foo": "bar", "ansible_connection": "docker"}},
"children": {
"child2": {
"hosts": {
"instance-2": {"foo": "bar", "ansible_connection": "docker"}
}
}
},
"vars": {
"molecule_file": "{{ lookup('env', 'MOLECULE_FILE') }}",
"molecule_ephemeral_directory": "{{ lookup('env', 'MOLECULE_EPHEMERAL_DIRECTORY') }}",
"molecule_scenario_directory": "{{ lookup('env', 'MOLECULE_SCENARIO_DIRECTORY') }}",
"molecule_yml": "{{ lookup('file', molecule_file) | molecule_from_yaml }}",
"molecule_instance_config": "{{ lookup('env', 'MOLECULE_INSTANCE_CONFIG') }}",
"molecule_no_log": "{{ lookup('env', 'MOLECULE_NO_LOG') or not "
"molecule_yml.provisioner.log|default(False) | bool }}",
},
},
}
assert is_subset(x, data)
@pytest.mark.parametrize(
"config_instance", ["_provisioner_section_data"], indirect=True
)
def test_remove_vars(_instance):
inventory_dir = _instance._config.scenario.inventory_directory
hosts = os.path.join(inventory_dir, "hosts")
host_vars_directory = os.path.join(inventory_dir, "host_vars")
host_vars = os.path.join(host_vars_directory, "instance-1")
_instance._add_or_update_vars()
assert os.path.isfile(hosts)
assert os.path.isdir(host_vars_directory)
assert os.path.isfile(host_vars)
host_vars_localhost = os.path.join(host_vars_directory, "localhost")
assert os.path.isfile(host_vars_localhost)
group_vars_directory = os.path.join(inventory_dir, "group_vars")
group_vars_1 = os.path.join(group_vars_directory, "example_group1")
group_vars_2 = os.path.join(group_vars_directory, "example_group2")
assert os.path.isdir(group_vars_directory)
assert os.path.isfile(group_vars_1)
assert os.path.isfile(group_vars_2)
_instance._remove_vars()
assert not os.path.isfile(hosts)
assert not os.path.isdir(host_vars_directory)
assert not os.path.isdir(group_vars_directory)
def test_remove_vars_symlinks(_instance):
inventory_dir = _instance._config.scenario.inventory_directory
source_group_vars = os.path.join(inventory_dir, os.path.pardir, "group_vars")
target_group_vars = os.path.join(inventory_dir, "group_vars")
os.mkdir(source_group_vars)
os.symlink(source_group_vars, target_group_vars)
_instance._remove_vars()
assert not os.path.lexists(target_group_vars)
def test_link_vars(_instance):
c = _instance._config.config
c["provisioner"]["inventory"]["links"] = {
"hosts": "../hosts",
"group_vars": "../group_vars",
"host_vars": "../host_vars",
}
inventory_dir = _instance._config.scenario.inventory_directory
scenario_dir = _instance._config.scenario.directory
source_hosts = os.path.join(scenario_dir, os.path.pardir, "hosts")
target_hosts = os.path.join(inventory_dir, "hosts")
source_group_vars = os.path.join(scenario_dir, os.path.pardir, "group_vars")
target_group_vars = os.path.join(inventory_dir, "group_vars")
source_host_vars = os.path.join(scenario_dir, os.path.pardir, "host_vars")
target_host_vars = os.path.join(inventory_dir, "host_vars")
open(source_hosts, "w").close()
os.mkdir(source_group_vars)
os.mkdir(source_host_vars)
_instance._link_or_update_vars()
assert os.path.lexists(target_hosts)
assert os.path.lexists(target_group_vars)
assert os.path.lexists(target_host_vars)
def test_link_vars_raises_when_source_not_found(_instance, patched_logger_critical):
c = _instance._config.config
c["provisioner"]["inventory"]["links"] = {"foo": "../bar"}
with pytest.raises(SystemExit) as e:
_instance._link_or_update_vars()
assert 1 == e.value.code
source = os.path.join(_instance._config.scenario.directory, os.path.pardir, "bar")
msg = "The source path '{}' does not exist.".format(source)
patched_logger_critical.assert_called_once_with(msg)
def test_verify_inventory(_instance):
_instance._verify_inventory()
def test_verify_inventory_raises_when_missing_hosts(
temp_dir, patched_logger_critical, _instance
):
_instance._config.config["platforms"] = []
with pytest.raises(SystemExit) as e:
_instance._verify_inventory()
assert 1 == e.value.code
msg = "Instances missing from the 'platform' section of molecule.yml."
patched_logger_critical.assert_called_once_with(msg)
def test_vivify(_instance):
d = _instance._vivify()
d["bar"]["baz"] = "qux"
assert "qux" == str(d["bar"]["baz"])
def test_default_to_regular(_instance):
d = collections.defaultdict()
assert isinstance(d, collections.defaultdict)
d = _instance._default_to_regular(d)
assert isinstance(d, dict)
def test_get_plugin_directory(_instance):
result = _instance._get_plugin_directory()
parts = pytest.helpers.os_split(result)
assert ("molecule", "provisioner", "ansible", "plugins") == parts[-4:]
def test_get_modules_directories(_instance, monkeypatch):
result = _instance._get_modules_directories()[0]
parts = pytest.helpers.os_split(result)
x = ("molecule", "provisioner", "ansible", "plugins", "modules")
assert x == parts[-5:]
lib_prev = os.environ.get("ANSIBLE_LIBRARY")
monkeypatch.setenv("ANSIBLE_LIBRARY", "/foo/bar")
result = _instance._get_modules_directories()[-1]
monkeypatch.setenv("ANSIBLE_LIBRARY", lib_prev if lib_prev else "")
env_lib_result_parts = pytest.helpers.os_split(result)
env_lib_expected_parts = ("foo", "bar")
assert env_lib_result_parts == env_lib_expected_parts[-2:]
def test_get_filter_plugin_directory(_instance):
result = _instance._get_filter_plugin_directory()
parts = pytest.helpers.os_split(result)
x = ("molecule", "provisioner", "ansible", "plugins", "filter")
assert x == parts[-5:]
def test_absolute_path_for(_instance):
env = {"foo": "foo:bar"}
x = ":".join(
[
os.path.join(_instance._config.scenario.directory, "foo"),
os.path.join(_instance._config.scenario.directory, "bar"),
]
)
assert x == _instance._absolute_path_for(env, "foo")
def test_absolute_path_for_raises_with_missing_key(_instance):
env = {"foo": "foo:bar"}
with pytest.raises(KeyError):
_instance._absolute_path_for(env, "invalid")
``` |
{
"source": "joejacobs/octodns",
"score": 2
} |
#### File: octodns/tests/test_octodns_provider_yaml.py
```python
from __future__ import absolute_import, division, print_function, \
unicode_literals
from os import makedirs
from os.path import basename, dirname, isdir, isfile, join
from unittest import TestCase
from yaml import safe_load
from yaml.constructor import ConstructorError
from octodns.record import Create
from octodns.provider.base import Plan
from octodns.provider.yaml import _list_all_yaml_files, \
SplitYamlProvider, YamlProvider
from octodns.zone import SubzoneRecordException, Zone
from helpers import TemporaryDirectory
class TestYamlProvider(TestCase):
def test_provider(self):
source = YamlProvider('test', join(dirname(__file__), 'config'))
zone = Zone('unit.tests.', [])
dynamic_zone = Zone('dynamic.tests.', [])
# With target we don't add anything
source.populate(zone, target=source)
self.assertEquals(0, len(zone.records))
# without it we see everything
source.populate(zone)
self.assertEquals(18, len(zone.records))
source.populate(dynamic_zone)
self.assertEquals(5, len(dynamic_zone.records))
# Assumption here is that a clean round-trip means that everything
# worked as expected, data that went in came back out and could be
# pulled in yet again and still match up. That assumes that the input
# data completely exercises things. This assumption can be tested by
# relatively well by running
# ./script/coverage tests/test_octodns_provider_yaml.py and
# looking at the coverage file
# ./htmlcov/octodns_provider_yaml_py.html
with TemporaryDirectory() as td:
# Add some subdirs to make sure that it can create them
directory = join(td.dirname, 'sub', 'dir')
yaml_file = join(directory, 'unit.tests.yaml')
dynamic_yaml_file = join(directory, 'dynamic.tests.yaml')
target = YamlProvider('test', directory)
# We add everything
plan = target.plan(zone)
self.assertEquals(15, len(filter(lambda c: isinstance(c, Create),
plan.changes)))
self.assertFalse(isfile(yaml_file))
# Now actually do it
self.assertEquals(15, target.apply(plan))
self.assertTrue(isfile(yaml_file))
# Dynamic plan
plan = target.plan(dynamic_zone)
self.assertEquals(5, len(filter(lambda c: isinstance(c, Create),
plan.changes)))
self.assertFalse(isfile(dynamic_yaml_file))
# Apply it
self.assertEquals(5, target.apply(plan))
self.assertTrue(isfile(dynamic_yaml_file))
# There should be no changes after the round trip
reloaded = Zone('unit.tests.', [])
target.populate(reloaded)
self.assertDictEqual(
{'included': ['test']},
filter(
lambda x: x.name == 'included', reloaded.records
)[0]._octodns)
self.assertFalse(zone.changes(reloaded, target=source))
# A 2nd sync should still create everything
plan = target.plan(zone)
self.assertEquals(15, len(filter(lambda c: isinstance(c, Create),
plan.changes)))
with open(yaml_file) as fh:
data = safe_load(fh.read())
# '' has some of both
roots = sorted(data.pop(''), key=lambda r: r['type'])
self.assertTrue('values' in roots[0]) # A
self.assertTrue('geo' in roots[0]) # geo made the trip
self.assertTrue('value' in roots[1]) # CAA
self.assertTrue('values' in roots[2]) # SSHFP
# these are stored as plural 'values'
self.assertTrue('values' in data.pop('_srv._tcp'))
self.assertTrue('values' in data.pop('mx'))
self.assertTrue('values' in data.pop('naptr'))
self.assertTrue('values' in data.pop('sub'))
self.assertTrue('values' in data.pop('txt'))
# these are stored as singular 'value'
self.assertTrue('value' in data.pop('aaaa'))
self.assertTrue('value' in data.pop('cname'))
self.assertTrue('value' in data.pop('included'))
self.assertTrue('value' in data.pop('ptr'))
self.assertTrue('value' in data.pop('spf'))
self.assertTrue('value' in data.pop('www'))
self.assertTrue('value' in data.pop('www.sub'))
# make sure nothing is left
self.assertEquals([], data.keys())
with open(dynamic_yaml_file) as fh:
data = safe_load(fh.read())
# make sure new dynamic records made the trip
dyna = data.pop('a')
self.assertTrue('values' in dyna)
# self.assertTrue('dynamic' in dyna)
# TODO:
# make sure new dynamic records made the trip
dyna = data.pop('aaaa')
self.assertTrue('values' in dyna)
# self.assertTrue('dynamic' in dyna)
dyna = data.pop('cname')
self.assertTrue('value' in dyna)
# self.assertTrue('dynamic' in dyna)
dyna = data.pop('real-ish-a')
self.assertTrue('values' in dyna)
# self.assertTrue('dynamic' in dyna)
dyna = data.pop('simple-weighted')
self.assertTrue('value' in dyna)
# self.assertTrue('dynamic' in dyna)
# make sure nothing is left
self.assertEquals([], data.keys())
def test_empty(self):
source = YamlProvider('test', join(dirname(__file__), 'config'))
zone = Zone('empty.', [])
# without it we see everything
source.populate(zone)
self.assertEquals(0, len(zone.records))
def test_unsorted(self):
source = YamlProvider('test', join(dirname(__file__), 'config'))
zone = Zone('unordered.', [])
with self.assertRaises(ConstructorError):
source.populate(zone)
source = YamlProvider('test', join(dirname(__file__), 'config'),
enforce_order=False)
# no exception
source.populate(zone)
self.assertEqual(2, len(zone.records))
def test_subzone_handling(self):
source = YamlProvider('test', join(dirname(__file__), 'config'))
# If we add `sub` as a sub-zone we'll reject `www.sub`
zone = Zone('unit.tests.', ['sub'])
with self.assertRaises(SubzoneRecordException) as ctx:
source.populate(zone)
self.assertEquals('Record www.sub.unit.tests. is under a managed '
'subzone', ctx.exception.message)
class TestSplitYamlProvider(TestCase):
def test_list_all_yaml_files(self):
yaml_files = ('foo.yaml', '1.yaml', '$unit.tests.yaml')
all_files = ('something', 'else', '1', '$$', '-f') + yaml_files
all_dirs = ('dir1', 'dir2/sub', 'tricky.yaml')
with TemporaryDirectory() as td:
directory = join(td.dirname)
# Create some files, some of them with a .yaml extension, all of
# them empty.
for emptyfile in all_files:
open(join(directory, emptyfile), 'w').close()
# Do the same for some fake directories
for emptydir in all_dirs:
makedirs(join(directory, emptydir))
# This isn't great, but given the variable nature of the temp dir
# names, it's necessary.
self.assertItemsEqual(
yaml_files,
(basename(f) for f in _list_all_yaml_files(directory)))
def test_zone_directory(self):
source = SplitYamlProvider(
'test', join(dirname(__file__), 'config/split'))
zone = Zone('unit.tests.', [])
self.assertEqual(
join(dirname(__file__), 'config/split/unit.tests.'),
source._zone_directory(zone))
def test_apply_handles_existing_zone_directory(self):
with TemporaryDirectory() as td:
provider = SplitYamlProvider('test', join(td.dirname, 'config'))
makedirs(join(td.dirname, 'config', 'does.exist.'))
zone = Zone('does.exist.', [])
self.assertTrue(isdir(provider._zone_directory(zone)))
provider.apply(Plan(None, zone, [], True))
self.assertTrue(isdir(provider._zone_directory(zone)))
def test_provider(self):
source = SplitYamlProvider(
'test', join(dirname(__file__), 'config/split'))
zone = Zone('unit.tests.', [])
dynamic_zone = Zone('dynamic.tests.', [])
# With target we don't add anything
source.populate(zone, target=source)
self.assertEquals(0, len(zone.records))
# without it we see everything
source.populate(zone)
self.assertEquals(18, len(zone.records))
source.populate(dynamic_zone)
self.assertEquals(5, len(dynamic_zone.records))
with TemporaryDirectory() as td:
# Add some subdirs to make sure that it can create them
directory = join(td.dirname, 'sub', 'dir')
zone_dir = join(directory, 'unit.tests.')
dynamic_zone_dir = join(directory, 'dynamic.tests.')
target = SplitYamlProvider('test', directory)
# We add everything
plan = target.plan(zone)
self.assertEquals(15, len(filter(lambda c: isinstance(c, Create),
plan.changes)))
self.assertFalse(isdir(zone_dir))
# Now actually do it
self.assertEquals(15, target.apply(plan))
# Dynamic plan
plan = target.plan(dynamic_zone)
self.assertEquals(5, len(filter(lambda c: isinstance(c, Create),
plan.changes)))
self.assertFalse(isdir(dynamic_zone_dir))
# Apply it
self.assertEquals(5, target.apply(plan))
self.assertTrue(isdir(dynamic_zone_dir))
# There should be no changes after the round trip
reloaded = Zone('unit.tests.', [])
target.populate(reloaded)
self.assertDictEqual(
{'included': ['test']},
filter(
lambda x: x.name == 'included', reloaded.records
)[0]._octodns)
self.assertFalse(zone.changes(reloaded, target=source))
# A 2nd sync should still create everything
plan = target.plan(zone)
self.assertEquals(15, len(filter(lambda c: isinstance(c, Create),
plan.changes)))
yaml_file = join(zone_dir, '$unit.tests.yaml')
self.assertTrue(isfile(yaml_file))
with open(yaml_file) as fh:
data = safe_load(fh.read())
roots = sorted(data.pop(''), key=lambda r: r['type'])
self.assertTrue('values' in roots[0]) # A
self.assertTrue('geo' in roots[0]) # geo made the trip
self.assertTrue('value' in roots[1]) # CAA
self.assertTrue('values' in roots[2]) # SSHFP
# These records are stored as plural "values." Check each file to
# ensure correctness.
for record_name in ('_srv._tcp', 'mx', 'naptr', 'sub', 'txt'):
yaml_file = join(zone_dir, '{}.yaml'.format(record_name))
self.assertTrue(isfile(yaml_file))
with open(yaml_file) as fh:
data = safe_load(fh.read())
self.assertTrue('values' in data.pop(record_name))
# These are stored as singular "value." Again, check each file.
for record_name in ('aaaa', 'cname', 'included', 'ptr', 'spf',
'www.sub', 'www'):
yaml_file = join(zone_dir, '{}.yaml'.format(record_name))
self.assertTrue(isfile(yaml_file))
with open(yaml_file) as fh:
data = safe_load(fh.read())
self.assertTrue('value' in data.pop(record_name))
# Again with the plural, this time checking dynamic.tests.
for record_name in ('a', 'aaaa', 'real-ish-a'):
yaml_file = join(
dynamic_zone_dir, '{}.yaml'.format(record_name))
self.assertTrue(isfile(yaml_file))
with open(yaml_file) as fh:
data = safe_load(fh.read())
dyna = data.pop(record_name)
self.assertTrue('values' in dyna)
self.assertTrue('dynamic' in dyna)
# Singular again.
for record_name in ('cname', 'simple-weighted'):
yaml_file = join(
dynamic_zone_dir, '{}.yaml'.format(record_name))
self.assertTrue(isfile(yaml_file))
with open(yaml_file) as fh:
data = safe_load(fh.read())
dyna = data.pop(record_name)
self.assertTrue('value' in dyna)
self.assertTrue('dynamic' in dyna)
def test_empty(self):
source = SplitYamlProvider(
'test', join(dirname(__file__), 'config/split'))
zone = Zone('empty.', [])
# without it we see everything
source.populate(zone)
self.assertEquals(0, len(zone.records))
def test_unsorted(self):
source = SplitYamlProvider(
'test', join(dirname(__file__), 'config/split'))
zone = Zone('unordered.', [])
with self.assertRaises(ConstructorError):
source.populate(zone)
zone = Zone('unordered.', [])
source = SplitYamlProvider(
'test', join(dirname(__file__), 'config/split'),
enforce_order=False)
# no exception
source.populate(zone)
self.assertEqual(2, len(zone.records))
def test_subzone_handling(self):
source = SplitYamlProvider(
'test', join(dirname(__file__), 'config/split'))
# If we add `sub` as a sub-zone we'll reject `www.sub`
zone = Zone('unit.tests.', ['sub'])
with self.assertRaises(SubzoneRecordException) as ctx:
source.populate(zone)
self.assertEquals('Record www.sub.unit.tests. is under a managed '
'subzone', ctx.exception.message)
``` |
{
"source": "JoeJasinski/password-safe",
"score": 3
} |
#### File: password-safe/safe/crypto.py
```python
from Crypto.Cipher import PKCS1_OAEP
from Crypto.PublicKey import RSA
from base64 import b64decode
from safe.exceptions import *
def get_key(text):
try:
key = RSA.importKey(text)
except ValueError, e:
raise EncryptionImportKeyException(e.message)
key_cipher = PKCS1_OAEP.new(key)
return key_cipher
def encrypt(public_key, clear_text):
"""
param: public_key PublicKey to encrypt with
param: clear_text String to encrypt
return encrypted string
"""
ciphertext = public_key.encrypt(clear_text.encode("utf-8"))
return ciphertext.encode('base64')
def decrypt(private_key_string, ciphertext):
'''
param: private_key_string string of your private key
param: ciphertext String to be decrypted
return decrypted string
'''
try:
private_key = RSA.importKey(private_key_string)
except ValueError, e:
raise DecryptionImportKeyException(e.message)
private_key = PKCS1_OAEP.new(private_key)
try:
decrypted = private_key.decrypt(b64decode(ciphertext))
except TypeError, e:
raise DecryptionBase64Exception(e.message)
return decrypted
```
#### File: password-safe/safe/forms.py
```python
import hashlib
from django import forms
from safe import crypto
from safe.exceptions import *
from safe.models import Credential
class KeyField(forms.Field):
def __init__(self, *args, **kw):
kwargs = {'widget': forms.Textarea(attrs={'style': "width:100%;"})}
kwargs.update(kw)
super(KeyField, self).__init__(*args, **kwargs)
def to_python(self, value):
return value
def validate(self, value):
super(KeyField, self).validate(value)
# try to import it as an RSA key
try:
crypto.get_key(value)
except EncryptionImportKeyException as e:
raise forms.ValidationError("Failed to import key: %s" % (e.message))
return value
_hashit = lambda s: hashlib.sha1(s).hexdigest()
class CreatePublicKeyForm(forms.Form):
pubkey = KeyField()
def __init__(self, *args, **kw):
show_privkey = kw.pop('show_privkey', None)
super(CreatePublicKeyForm, self).__init__(*args, **kw)
self.fields['pubkey'].widget.attrs['rows'] = "6"
self.fields['pubkey'].widget.attrs['id'] = "pubkey"
if show_privkey:
self.fields['privkey'] = KeyField(required=False)
self.fields['privkey'].widget.attrs['rows'] = "15"
self.fields['privkey'].widget.attrs['id'] = "privkey"
self.fields.keyOrder = ['privkey', 'pubkey']
for name, field in self.fields.items():
if field.widget.attrs.has_key('class'):
field.widget.attrs['class'] += ' form-control'
else:
field.widget.attrs.update({'class': 'form-control'})
@classmethod
def calculate_hash(cls, value):
return _hashit(value)
def clean(self):
cleaned_data = super(CreatePublicKeyForm, self).clean()
cleaned_data['hash'] = CreatePublicKeyForm.calculate_hash(
cleaned_data['pubkey'])
return cleaned_data
class CreateUpdateCredentialForm(forms.ModelForm):
class Meta:
model = Credential
widgets = {
'tags': forms.Textarea(attrs={'rows': 1, 'cols': 30}),
}
fields = '__all__'
def __init__(self, edit=False, *args, **kw):
super(CreateUpdateCredentialForm, self).__init__(*args, **kw)
if not edit:
self.fields['secret'] = forms.CharField()
for name, field in self.fields.items():
if field.widget.attrs.has_key('class'):
field.widget.attrs['class'] += ' form-control'
else:
field.widget.attrs.update({'class': 'form-control'})
self.fields.keyOrder = ['title', 'login_name'] + \
[x for x in ['secret'] if not edit] + \
['url', 'tags', 'notes']
class SearchTagField(forms.Form):
q = forms.CharField(required=False)
``` |
{
"source": "JoeJasinski/WindyTransit",
"score": 2
} |
#### File: mobiletrans/mtapi/views.py
```python
from django.contrib.gis.measure import D
from mobiletrans.mtlocation.models import (
TransitRoute, TransitStop, Location, Neighborhood, Zipcode,
TRANSIT_STOP_TYPE_STATION, CityBorder, CTARailLines )
from mobiletrans.mtcore import utils
from django.shortcuts import get_object_or_404
from rest_framework import generics
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import serializers
from .serializers import (CTARailLinesSerializer, CityBorderSerializer,
NeighborhoodSerializer, LocationSerializer)
class ListTransitRoutesSerializer(serializers.Serializer):
pass
class ListTransitRoutesView(generics.ListAPIView):
serializer_class = ListTransitRoutesSerializer
lookup_url_kwarg = "route_id"
def get_queryset(self):
kwargs = {}
type = self.request.GET.get('type','').lower()
if type in ['1','3','bus','train']:
if type == "bus":
type = 3
if type == "train":
type = 1
kwargs.update({'type':type})
return TransitRoute.objects.filter(**kwargs)
def get(self, request, *args, **kwargs):
transit_routes = []
self.object_list = self.filter_queryset(self.get_queryset())
for route in self.object_list:
transit_route = route.serialize()
transit_routes.append(transit_route)
return Response({'transit_routes':transit_routes})
class ListTransitRouteView(generics.ListAPIView):
methods_allowed = ('GET',)
def get(self, request, *args, **kwargs):
transit_route_dict = {}
if kwargs.get('uuid'):
kwargs = {'uuid':kwargs['uuid']}
elif kwargs.get('route_id'):
kwargs = {'route_id':kwargs['route_id']}
else:
kwargs = {}
try:
transit_route = TransitRoute.objects.get(**kwargs)
except:
transit_route = []
else:
transit_route_dict = transit_route.serialize()
stops = map(lambda x: x.serialize(), transit_route.transitstop_set.all() )
transit_route_dict.update({'stops':stops,})
return Response({'transit_route':transit_route_dict})
class LocationDataView(generics.ListAPIView):
methods_allowed = ('GET',)
def get(self, request, *args, **kwargs):
context = {}
params = utils.PrepParams(request)
location_objs = Location.objects.filter(
point__distance_lte=(params.ref_pnt, D(**params.d) )).distance(params.ref_pnt).order_by('distance')
if params.point_types_input:
location_objs = location_objs.filter(content_type__model__in=params.point_types)
if params.neighborhood:
neighborhood = map(lambda x: x.serialize(), Neighborhood.objects.filter(area__contains=params.ref_pnt))
context.update({'neighborhood':neighborhood})
if params.zipcode:
zipcode = map(lambda x: x.serialize(), Zipcode.objects.filter(area__contains=params.ref_pnt))
context.update({'zipcode':zipcode})
locations = []
for location, distance in [ (l, l.distance) for l in location_objs.distance(params.ref_pnt) ]:
distance = location.distance
location = location.as_leaf_class().serialize()
location.update({'distance':str(distance)})
locations.append(location,)
context.update({ 'locations': locations[:params.limit], })
return Response(context)
class TransitStopDataView(generics.ListAPIView):
methods_allowed = ('GET',)
def get(self, request, *args, **kwargs):
params = utils.PrepParams(request)
location_objs = TransitStop.orig_objects.filter(location_type=TRANSIT_STOP_TYPE_STATION,
point__distance_lte=(params.ref_pnt, D(**params.d) )).distance(params.ref_pnt).order_by('distance')
if params.point_types_input:
location_objs = location_objs.filter(content_type__model__in=params.point_types)
locations = []
for location, distance in [ (l, l.distance) for l in location_objs.distance(params.ref_pnt) ]:
distance = location.distance
location = location.as_leaf_class().serialize()
location.update({'distance':str(distance)})
locations.append(location,)
return Response({ 'locations': locations[:params.limit], })
class CTARailLinesRoutesView(generics.ListAPIView):
model = CTARailLines
methods_allowed = ('GET',)
serializer_class = CTARailLinesSerializer
lookup_url_kwarg = "objectid"
def get_queryset(self):
if self.kwargs.get('objectid'):
kwargs = {'objectid':self.kwargs['objectid']}
else:
kwargs = {}
return self.model.objects.filter(**kwargs)
class CityBorderView(generics.ListAPIView):
model = CityBorder
methods_allowed = ('GET',)
serializer_class = CityBorderSerializer
lookup_url_kwarg = "name"
def get_queryset(self):
if self.kwargs.get('name'):
kwargs = {'name':self.kwargs['name']}
else:
kwargs = {}
return self.model.objects.filter(**kwargs)
class NeighborhoodFromCoordView(generics.RetrieveAPIView):
model = Neighborhood
methods_allowed = ('GET',)
serializer_class = NeighborhoodSerializer
lookup_url_kwarg = "name"
def dispatch(self, request, *args, **kwargs):
self.params = utils.PrepParams(self.request)
return super(NeighborhoodFromCoordView, self).dispatch(request, *args, **kwargs)
def get_object(self, queryset=None):
queryset = self.filter_queryset(self.get_queryset())
if self.params.neighborhood:
try:
neighborhood = Neighborhood.sub_objects.filter(area__contains=self.params.ref_pnt)[0]
except IndexError:
neighborhood = ""
#neighborhood = get_object_or_404(queryset, **filter_kwargs)
self.check_object_permissions(self.request, neighborhood)
return neighborhood
def get(self, request, format=None):
placemarks = Location.objects.all().displayable().get_closest(from_point=self.params.ref_pnt, distance_dict=self.params.d )
neighborhood = self.get_object()
data = {'neighborhood':NeighborhoodSerializer(neighborhood).data,
'placemarks':LocationSerializer(placemarks).data,}
return Response(data)
class RailLinesRouteBorderView(APIView):
def get(self, request, *args, **kwargs):
routes = CTARailLines.objects.all()
name = self.kwargs.get('name')
border = get_object_or_404(CityBorder, name=name)
kwargs = {'name':name}
border = CityBorder.objects.filter(**kwargs)
data = {'border':CityBorderSerializer(border).data,
'routes':CTARailLinesSerializer(routes).data,}
return Response(data)
```
#### File: mobiletrans/mtdistmap/area_grid.py
```python
import logging, json
from Queue import Queue
from threading import Thread, Semaphore
from UserDict import DictMixin
#from mobiletrans.mtdistmap.utils import shapefile
import shapefile
from django.contrib.gis.geos import Point
from pyproj import Geod
logger = logging.getLogger('simple_example')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
g = Geod(ellps='WGS84')
class GridPoint(object):
"""
This represents a geographic coordinate and
where it falls on the Grid object. The value
of x and y refer to the x or y distance from the
origin. The xint and yint are the spacings between
points.
"""
def __init__(self, x, y, point,):
self.x = x
self.y = y
self.point = point
@property
def grid_coords(self):
return (self.x, self.y)
@property
def geo_coords(self):
return self.point.x, self.point.y
@property
def geo_coords_r(self):
return self.point.y, self.point.x
def __repr__(self):
return "GridPoint(lng=%s (%s), lat=%s (%s))" % (
self.point.x, self.x, self.point.y, self.y)
"""
from mobiletrans.mtdistmap.area_grid import generate_grid, GridPoint
region = CityBorder.objects.all()[0]
gp = GridPoint(0, 0, region.area.centroid)
g = generate_grid(region, gp)
"""
class Grid(DictMixin):
"""
This represents a grid of GridPoints, the center of which
is at (0,0). The key to this dict-like object is an (x,y)
tuple representing the distance from the origin (0,0).
"""
def __init__(self, xint, yint, *args, **kwargs):
self.grid = {}
self.xint = xint
self.yint = yint
def __getitem__(self, key):
return self.grid[key]
def __setitem__(self, key, item):
self.grid[key] = item
def __delitem__(self, key):
del self.grid[key]
def keys(self):
return self.grid.keys()
def json(self):
features = []
for k, point in self.items():
d = { "type": "Feature",
"geometry": {"type": "Point", "coordinates": point.geo_coords},
}
features.append(d)
json_dict = { "type": "FeatureCollection",
"features": features
}
return json.dumps(json_dict)
def generate_shapefile(self, shape_file_name='shapefiles/test'):
"""
Exports the data grid into a set of shapefiles.
Input:
shape_file_name="/home/test/shapefiles/myshape"
Output:
In directory /home/test/shapefiles/
myshape.shp
myshape.shx
myshape.dbf
"""
w = shapefile.Writer(shapeType=shapefile.POINT)
w.autoBalance = 1
w.field('ID', 'N') # id field of type Number
w.field('x' , "N")
w.field('y', "N")
count = 1
for i in self.items():
graphpoint = i[1]
w.point(graphpoint.point.x, graphpoint.point.y)
w.record(count, graphpoint.x, graphpoint.y, )
count += 1
w.save(shape_file_name)
class GridGenerator(object):
"""
Generate a grid of evenly spaced points across the region
area geography object.
Executing the run() method will actually start the calculation.
It will create generated a grid by first taking a start
point and then visit points to the north, south, east, and
west of that point. This will continue until all points
in the region have been visited.
Inputs:
region - (required) the bounding region for the grid
grid - (required) the empty new Grid object
"""
grid = None
directions = ['north', 'south', 'east', 'west']
num_worker_threads=4
def __init__(self, region, grid, *args, **kwargs):
self.q = Queue()
self.region = region
self.grid = grid
@property
def grid_point_class(self):
return GridPoint
def transpose(self, grid_point, azimuth, distance):
return g.fwd(grid_point.point.x, grid_point.point.y, azimuth, distance)
def north(self, grid_point):
lng, lat, az = self.transpose(grid_point, 0, self.grid.yint)
return self.grid_point_class(grid_point.x, grid_point.y + self.grid.yint, Point(lng, lat))
def south(self, grid_point):
lng, lat, az = self.transpose(grid_point, 180, self.grid.yint)
return self.grid_point_class(grid_point.x, grid_point.y - self.grid.yint, Point(lng, lat), )
def east(self, grid_point):
lng, lat, az = self.transpose(grid_point, 90, self.grid.xint)
return self.grid_point_class(grid_point.x + self.grid.xint, grid_point.y, Point(lng, lat), )
def west(self, grid_point):
lng, lat, az = self.transpose(grid_point, 270, self.grid.xint)
return self.grid_point_class(grid_point.x - self.grid.xint, grid_point.y, Point(lng, lat),)
def create_grid(self, grid_point):
for direction in self.directions:
new_point = getattr(self, direction)(grid_point)
if not self.grid.has_key((new_point.x, new_point.y)):
self.grid[(new_point.x, new_point.y)] = new_point
if self.region.area.contains(new_point.point):
self.work(new_point)
self.q.put(new_point)
def work(self, new_point):
pass
def worker(self):
while True:
item = self.q.get()
self.create_grid(item)
self.q.task_done()
def run(self, start_grid_point):
for i in range(self.num_worker_threads):
t = Thread(target=self.worker)
t.daemon = True
t.start()
self.create_grid(start_grid_point)
self.q.join()
return self.grid
"""
#from mobiletrans.mtdistmap.area_grid import GridGenerator, GridPoint, Grid
region = CityBorder.objects.all()[0]
center = region.area.centroid
grid = Grid(xint=300, yint=300)
gridgen = GridGenerator(region, grid)
g = gridgen.run(GridPoint(0,0,center))
"""
from mobiletrans.mtdistmap.cta_conn import load_transitnetwork
from mobiletrans.mtdistmap.route_planner import RoutePlanner
from django.contrib.gis.geos import Point, fromstr
class RouteGridGenerator(GridGenerator):
"""
Generate a grid a cross the region area geography object
and calculate the routes to the given train stop (to_point) for each
grid point.
Inputs:
region - (required) the bounding region for the grid
grid - (required) the empty new RouteGrid object
to_point - (required) train stop id where all the grid point should search to
max_distance - (default 1500) the max search radius around each search point in which
to search for a nearby start train stop.
num_routes - (default 2) will perform a route calculation for the closest num_routes
to a given grid point.
"""
def __init__(self, to_point, *args, **kwargs):
self.to_point = to_point
tn = load_transitnetwork()
self.t = RoutePlanner(tn, max_distance=kwargs.get('max_distance', 1500),
num_routes=kwargs.get('num_routes', 2))
super(RouteGridGenerator, self).__init__(*args, **kwargs)
@property
def grid_point_class(self):
return RouteGridPoint
def work(self, new_point):
from_point = new_point.point
p = self.t.fastest_route_from_point(from_point, self.to_point)
new_point.routes += p
class RouteGrid(Grid):
def generate_shapefile(self, shape_file_name='shapefiles/test'):
"""
Exports the data grid into a set of shapefiles.
Input:
shape_file_name="/home/test/shapefiles/myshape"
Output:
In directory /home/test/shapefiles/
myshape.shp
myshape.shx
myshape.dbf
"""
w = shapefile.Writer(shapeType=shapefile.POINT)
w.autoBalance = 1
w.field('ID', 'N') # id field of type Number
w.field('NUM_ROUTES', 'N') # route field of type Number
w.field('x' , "N")
w.field('y', "N")
w.field('total_time', 'N', decimal=4) # long
count = 1
for i in self.items():
graphpoint = i[1]
w.point(graphpoint.point.x, graphpoint.point.y)
w.record(count, len(graphpoint.routes), graphpoint.x, graphpoint.y, "%s" % graphpoint.shortest_time() )
count += 1
w.save(shape_file_name)
class RouteGridPoint(GridPoint):
def __init__(self, *args, **kwargs):
super(RouteGridPoint, self).__init__(*args, **kwargs)
self.routes = []
def __repr__(self):
return "Route" + super(RouteGridPoint, self).__repr__()
def shortest_route(self):
return_value = None
if self.routes:
return_value = sorted( self.routes, key=lambda x: x.total_time,reverse=True)[0]
return return_value
def shortest_time(self, default=9999999):
return_value = default
route = self.shortest_route()
if route:
return_value = route.total_time
return return_value
"""
# generate the grid with routes
from mobiletrans.mtlocation.models import CityBorder
from mobiletrans.mtdistmap.area_grid import RouteGridGenerator, RouteGridPoint, RouteGrid
region = CityBorder.objects.all()[0]
center = region.area.centroid
grid = RouteGrid(xint=300, yint=300)
#gridgen = RouteGridGenerator('41320', region, grid, max_distance=2000, num_routes=3)
gridgen = RouteGridGenerator('40170', region, grid, max_distance=2000, num_routes=3)
g = gridgen.run(RouteGridPoint(0,0,center))
g.generate_shapefile('shapefiles/chicago_pts5')
"""
```
#### File: mobiletrans/mtdistmap/cta_heatmap.py
```python
import os
import mapnik
from django.conf import settings
DB_NAME = settings.DATABASES['default']['NAME']
DB_USER = settings.DATABASES['default']['USER']
DB_HOST = settings.DATABASES['default']['HOST']
DB_PORT = settings.DATABASES['default']['PORT']
DB_PASSWORD = settings.DATABASES['default']['PASSWORD']
class CTAHeatmap(object):
"""
Usage:
from mobiletrans.mtdistmap.cta_heatmap import CTAHeatmap
import mapnik
m = mapnik.Map(600,600)
cta_map = CTAHeatmap(m)
cta_map.render_image()
"""
def __init__(self, empty_map, *args, **kwargs):
self.define_map(empty_map)
def get_subqueries(self):
subquery={}
subquery[1] = """(SELECT * FROM public.mtlocation_region, public.mtlocation_neighborhood, public.django_content_type WHERE
mtlocation_region.id = mtlocation_neighborhood.region_ptr_id AND
mtlocation_region.content_type_id = django_content_type.id) as foo"""
subquery[2] = """(SELECT * FROM
mtlocation_location as loc
inner join django_content_type as ct on (loc.content_type_id = ct.id)
left outer join public.mtlocation_gplace as place on ( loc.id = place.location_ptr_id )
where ct.model not in ('transitstop')) as foo"""
subquery[4] = """(SELECT
l.id, l.created, l.modified, l.active, l.name,
s.location_type, s.url, s.description, s.stop_code, s.stop_id, s.location_ptr_id,
l.slug, l.uuid, l.content_type_id, l.point
FROM
mtlocation_location l,
mtlocation_transitstop s
WHERE
s.location_ptr_id = l.id
and s.location_type = 1) as bar
"""
return subquery
def get_stylesheet(self):
cta_heatmap_xml = os.path.join(settings.PROJECT_ROOT, 'mtdistmap', 'cta_heatmap.xml')
f = open(cta_heatmap_xml)
schema = f.read()
f.close()
return schema
def define_map(self, map):
subquery = self.get_subqueries()
schema = self.get_stylesheet()
mapnik.load_map_from_string(map, schema)
connection_params = dict(dbname=DB_NAME, user=DB_USER, host=DB_HOST, password=<PASSWORD>, port=DB_PORT, )
ds = mapnik.PostGIS(table='mtlocation_cityborder', **connection_params)
map.layers[0].datasource = ds
ds = mapnik.PostGIS(table=subquery[1], geometry_table="mtlocation_region", geometry_field='area', **connection_params)
map.layers[1].datasource = ds
ds = mapnik.PostGIS(table=subquery[2], geometry_table="mtlocation_location", geometry_field='point', **connection_params)
map.layers[2].datasource = ds
ds = mapnik.PostGIS(table='mtlocation_ctaraillines', **connection_params)
map.layers[3].datasource = ds
ds = mapnik.PostGIS(table=subquery[4], geometry_table="mtlocation_location", geometry_field='point', **connection_params)
map.layers[4].datasource = ds
self.map = map
def render_image(self):
map = self.map
map.zoom_all()
#map.zoom_to_box(bbox)
mapnik.render_to_file(map, 'chicago.png', 'png')
```
#### File: mtdistmap/dijkstra/graph_to_dot.py
```python
def graph_to_dot(G):
s = """digraph G {\nnode [width=.3,height=.3,shape=octagon,style=filled,color=skyblue];\noverlap="false";\nrankdir="LR";\n%s}"""
r = ''
for i in G:
for j in G[i]:
r+='%s -> %s [label="%s"];\n' % (i, j, str(G[i][j]))
return s % (r)
# http://graphviz-dev.appspot.com/
# http://ashitani.jp/gv/#
```
#### File: management/commands/shortest_path.py
```python
import os
from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
from mobiletrans.mtdistmap.cta_conn import load_transitnetwork
class Command(BaseCommand):
args = '<stop_id stop_id >'
help = 'caculates the shortest route between two transit stop ids.'
def handle(self, *args, **options):
if not len(args) == 2:
self.stdout.write("2 args required")
exit(1)
tn = load_transitnetwork()
sp = tn.shortest_path(args[0], args[1])
if sp:
self.stdout.write(str(sp.to_json()))
else:
self.stdout.write({'error':'no route'})
```
#### File: mobiletrans/mtimport/importer.py
```python
import json, csv, logging, traceback
from django.contrib.gis import gdal
from xml.dom import minidom
from mobiletrans.mtimport import models
from mobiletrans.mtimport.exceptions import *
import logging
logger = logging.getLogger(__name__)
class ImportBase(object):
def __init__(self, input_record, input_data):
self.stats = {'new':0, 'existing':0, 'errors':0}
self.input_record = input_record
self.input_data = input_data
@classmethod
def get_model_class(cls,):
raise NotImplementedError("implement this get_model_class method in a subclass")
@classmethod
def data_import(cls, *input_parameters):
data = []
status=None
input_record = models.InputRecord()
input_record.type = cls.get_model_class().__name__
input_record.save()
try:
data = cls.open_data(*input_parameters)
except ImportException, error:
stacktrace_text = traceback.format_exc(limit=50)
models.InputRecord.objects.make_note(
input_record=input_record,
note='Data Error: %s\n\n%s' % (error, stacktrace_text),
type=models.TRANSFER_NOTE_STATUS_ERROR,
exception=error.__class__.__name__,
)
models.InputRecord.objects.end_import(input_record, models.TRANSFER_STATUS_FAILED)
raise ImportException("Data load problem: %s" % (error))
import_class = cls(input_record, data, )
stats = import_class.process()
if not input_record.status == models.TRANSFER_STATUS_FAILED:
if input_record.inputnote_set.filter(type__in=[models.TRANSFER_NOTE_STATUS_ERROR,]):
status = models.TRANSFER_STATUS_PARTIAL
else:
status = models.TRANSFER_STATUS_SUCCESS
models.InputRecord.objects.make_note(
input_record=input_record,
note='# new records %s - # existing records %s - error records %s' % (
stats['new'], stats['existing'], stats['errors']),
type=models.TRANSFER_NOTE_STATUS_NOTE,
)
models.InputRecord.objects.end_import(input_record, status)
return input_record
def process(self):
count = 1
for row in self.get_iteration_root():
try:
import_object = self.parse_row(row)
except ValueError as error:
stacktrace_text = traceback.format_exc(limit=50)
models.InputRecord.objects.make_note(
input_record=self.input_record,
note="Row %s: ValueError Parse error: %s\n\n%s" % (count, error, stacktrace_text),
type=models.TRANSFER_NOTE_STATUS_ERROR,
exception=error.__class__.__name__,
)
self.stats['errors'] += 1
models.InputRecord.objects.end_import(self.input_record, models.TRANSFER_STATUS_FAILED)
except IndexError as error:
stacktrace_text = traceback.format_exc(limit=50)
models.InputRecord.objects.make_note(
input_record=self.input_record,
note="Row %s: IndexError Parse error: %s\n\n%s" % (count, error, stacktrace_text),
type=models.TRANSFER_NOTE_STATUS_ERROR,
exception=error.__class__.__name__,
)
self.stats['errors'] += 1
models.InputRecord.objects.end_import(self.input_record, models.TRANSFER_STATUS_FAILED)
except ImportException as error:
stacktrace_text = traceback.format_exc(limit=50)
models.InputRecord.objects.make_note(
input_record=self.input_record,
note="Row %s: Import Exception Parse error: %s\n\n%s" % (count, error, stacktrace_text),
type=models.TRANSFER_NOTE_STATUS_ERROR,
exception=error.__class__.__name__,
)
self.stats['errors'] += 1
models.InputRecord.objects.end_import(self.input_record, models.TRANSFER_STATUS_FAILED)
except Exception as error:
stacktrace_text = traceback.format_exc(limit=50)
models.InputRecord.objects.make_note(
input_record=self.input_record,
note="Row %s: Unknown Parse error: %s\n\n%s" % (count, error, stacktrace_text),
type=models.TRANSFER_NOTE_STATUS_ERROR,
exception=error.__class__.__name__,
)
self.stats['errors'] += 1
models.InputRecord.objects.end_import(self.input_record, models.TRANSFER_STATUS_FAILED)
else:
try:
logger.debug("%s" % vars(import_object))
import_object.full_clean()
import_object.save()
except Exception as error:
stacktrace_text = traceback.format_exc(limit=50)
models.InputRecord.objects.make_note(
input_record=self.input_record,
note="Row %s: Save Error: %s\n\n%s" % (count, error, stacktrace_text),
type=models.TRANSFER_NOTE_STATUS_ERROR,
exception=error.__class__.__name__,
)
self.stats['errors'] += 1
count += 1
return self.stats
def get_iteration_root(self):
raise NotImplementedError("implement this get_iteration_root method in a subclass")
def open_data(self, **input_parameters):
raise NotImplementedError("implement this open_data method in a subclass")
def parse_row(self, row):
raise NotImplementedError("implement this parse_row method in a subclass")
########################################
# Format-Specific loader base classes
########################################
class JSONImportBase(ImportBase):
@classmethod
def open_data(cls, input_file_path):
try:
input_file = open(input_file_path,'r')
input_data = json.load(input_file)
except IOError as error:
raise IOImportException("Error with file read: %s - %s" % (input_file_path, error, ))
except ValueError:
raise DataFormatImportException("Error with JSON read: %s - %s" % (input_file_path, error, ))
except Exception as error:
raise ImportException("Unknown import error: %s - %s" % (input_file_path, error, ))
else:
input_file.close()
return input_data
def get_iteration_root(self):
if self.input_data.has_key('data'):
data = self.input_data['data']
else:
models.InputRecord.objects.make_note(
input_record=self.input_record,
note="Input file missing 'data' element.",
type=models.TRANSFER_NOTE_STATUS_ERROR,
)
models.InputRecord.objects.end_import(self.input_record, models.TRANSFER_STATUS_FAILED)
raise ImportException("JSON missing 'data' element.")
return data
class CSVImportBase(ImportBase):
@classmethod
def open_data(cls, input_file_path):
try:
input_file = open(input_file_path,'r')
input_data = csv.reader(input_file, delimiter=',', quotechar='"')
for row in input_data:
data.append(row)
data = data[1:]
except IOError as error:
raise IOImportException("Error with file load %s - %s" % (input_file_path, error))
except Exception, error:
raise ImportException("Unknown import error: %s - %s" % (input_file_path, error, ))
else:
input_file.close()
return data
def get_iteration_root(self):
return self.input_data
class KMLImportBase(ImportBase):
@classmethod
def open_data(cls, input_file_path):
try:
input_file = open(input_file_path,'r')
input_data = minidom.parse(input_file)
except IOError as error:
raise IOImportException("Error with file read: %s - %s" % (input_file_path, error, ))
except ValueError:
raise DataFormatImportException("Error with KML read: %s - %s" % (input_file_path, error, ))
except Exception as error:
raise ImportException("Unknown import error: %s - %s" % (input_file_path, error, ))
else:
input_file.close()
return input_data
def get_iteration_root(self):
placemarks = self.input_data.getElementsByTagName("Placemark")
if not placemarks:
models.InputRecord.objects.make_note(
input_record=self.input_record,
note="Missing 'Placemark' elements.",
type=models.TRANSFER_NOTE_STATUS_ERROR,
)
models.InputRecord.objects.end_import(self.input_record, models.TRANSFER_STATUS_FAILED)
raise ImportException("Missing 'Placemark' elements.")
return placemarks
from django.db import connections, router
class ShapeFileImportBase(ImportBase):
def get_geom_field(self):
raise NotImplementedError("subclasses must implement this to specify a field name string of the geography field.")
def prepare_srid_transform(self, source_srs, model_class, geom_field, ):
using = router.db_for_write(model_class)
spatial_backend = connections[using].ops
opts = model_class._meta
geo_field, model, direct, m2m = opts.get_field_by_name(geom_field)
SpatialRefSys = spatial_backend.spatial_ref_sys()
target_srs = SpatialRefSys.objects.using(using).get(srid=geo_field.srid).srs
return gdal.CoordTransform(source_srs, target_srs)
@classmethod
def open_data(cls, input_file_path):
try:
input_data = gdal.DataSource(input_file_path)
except IOError as error:
raise IOImportException("Error with ShapeFile read: %s - %s" % (input_file_path, error, ))
except gdal.error.OGRException as error:
raise DataFormatImportException("Error with ShapeFile read: %s - %s" % (input_file_path, error, ))
except Exception as error:
raise ImportException("Unknown import ShapeFile error: %s - %s" % (input_file_path, error, ))
return input_data
def get_iteration_root(self):
try:
input_data = self.input_data[0]
except IndexError as error:
raise ImportException("Root element not found: %s" % (error))
self.coord_transform = self.prepare_srid_transform(
source_srs=input_data.srs,
model_class=self.get_model_class(),
geom_field=self.get_geom_field(),
)
return input_data
########################################
```
#### File: mtimport/importers/importer_cityborder.py
```python
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.contrib.gis.geos import Point, fromstr, fromfile, GEOSGeometry, MultiPoint, MultiPolygon, Polygon
from django.contrib.gis.gdal.error import OGRIndexError
from autoslug.settings import slugify
from mobiletrans.mtimport.importer import ShapeFileImportBase
from mobiletrans.mtlocation import models as loc_models
from mobiletrans.mtimport import models
from mobiletrans.mtimport.exceptions import *
class CityBorder(ShapeFileImportBase):
@classmethod
def get_model_class(cls,):
return loc_models.CityBorder
def get_geom_field(self):
return "area"
def parse_row(self, row):
existing = False
try:
primary_key = row.get("OBJECTID")
except OGRIndexError as error:
raise ImportException("primary key 'OBJECTID' not available", error)
try:
cityborder = self.get_model_class().objects.get(objectid=primary_key)
existing = True
except ObjectDoesNotExist:
cityborder = self.get_model_class()(objectid=primary_key)
existing = False
except MultipleObjectsReturned:
raise ImportException("multiple objects returned with OBJECTID %s " % primary_key)
try:
cityborder.name = row.get("NAME")
except OGRIndexError as error:
raise ImportException("field 'NAME' not available", error)
try:
cityborder.shape_area = row.get("SHAPE_AREA")
except OGRIndexError as error:
raise ImportException("field 'SHAPE_AREA' not available", error)
try:
cityborder.shape_len = row.get("SHAPE_LEN")
except OGRIndexError as error:
raise ImportException("field 'SHAPE_LEN' not available", error)
try:
geom = row.geom
geom.transform(self.coord_transform)
cityborder.area = geom.wkt
except Exception, error:
raise ImportException("attribute 'geom' not available", error)
if existing:
self.stats['existing'] += 1
else:
self.stats['new'] += 1
return cityborder
```
#### File: mtimport/importers/importer_googleplace.py
```python
import decimal
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.contrib.gis.geos import Point, fromstr, fromfile, GEOSGeometry, MultiPoint, MultiPolygon, Polygon
from autoslug.settings import slugify
from mobiletrans.mtimport.importer import ImportBase
from mobiletrans.mtlocation import models as loc_models
from mobiletrans.mtimport import models
from mobiletrans.mtimport.exceptions import *
from django.conf import settings
from googleplaces import GooglePlaces, types, lang, GooglePlacesError
class GoogleIOImportException(IOImportException):
pass
def search_places_from_lat_long(lag_lng, radius=3200, keyword=None, types=[]):
google_places = GooglePlaces(settings.GOOGLE_PLACES_API_KEY)
query_result = google_places.query(
lat_lng=lag_lng, keyword=keyword,
radius =radius, types=types)
return query_result
class GPlaceLocation(ImportBase):
@classmethod
def get_model_class(cls,):
return loc_models.GPlace
@classmethod
def open_data(cls, lat_lng, radius):
try:
data = search_places_from_lat_long(lat_lng, radius=radius)
except GooglePlacesError as error:
raise GoogleIOImportException("GooglePlaces lookup error: %s" % (error))
except Exception as error:
raise ImportException("Unknown import error: lat_lng [%s] radius [%s]: %s" % (lat_lng, radius, error, ))
except:
raise ImportException("Unknown import error: lat_lng [%s] radius [%s]" % (lat_lng, radius, ))
return data
def get_iteration_root(self):
if hasattr(self.input_data, 'places'):
data = self.input_data.places
else:
models.InputRecord.objects.make_note(
input_record=self.input_record,
note="query data missing 'places' element.",
type=models.TRANSFER_NOTE_STATUS_ERROR,
)
models.InputRecord.objects.end_import(self.input_record, models.TRANSFER_STATUS_FAILED)
raise ImportException("query data missing 'places' element.")
return data
def parse_row(self, row):
existing = False
row.get_details()
try:
uuid = row.id
except AttributeError as error:
raise AttributeError("id %s: uuid %s" % (id, error))
try:
place = self.get_model_class().objects.get(uuid=uuid)
existing = True
except ObjectDoesNotExist:
place = self.get_model_class()(uuid=uuid)
existing = False
except MultipleObjectsReturned:
raise ImportException("multiple objects returned with uuid %s " % uuid)
try:
name = row.name
except AttributeError as error:
raise AttributeError("id %s: name %s" % (id, error))
place.name = name
try:
geo = row.geo_location
except AttributeError as error:
raise AttributeError("id %s: geo_location %s" % (id, error))
else:
if geo.has_key('lng'):
longitude = geo['lng']
else:
raise AttributeError("id %s: geo_location.lng" % (id))
if geo.has_key('lat'):
lattitude = geo['lat']
else:
raise AttributeError("id %s: geo_location.lat " % (id ))
point = fromstr('POINT(%s %s)' % (longitude, lattitude))
place.point = point
if hasattr(row, 'rating'):
try:
d = decimal.Decimal("%s" % row.rating)
except decimal.InvalidOperation:
pass
else:
place.rating = d
if hasattr(row, 'vicinity'):
place.vicinity = row.vicinity
if hasattr(row, 'types'):
place.types = row.types
if hasattr(row, 'reference'):
place.reference = row.reference
if hasattr(row, 'international_phone_number'):
place.international_phone_number = row.international_phone_number
if hasattr(row, 'local_phone_number'):
place.local_phone_number = row.local_phone_number
if hasattr(row, 'website'):
place.website = row.website
if hasattr(row, 'formatted_address'):
place.address = row.formatted_address
if hasattr(row, 'url'):
place.url = row.url
if existing:
self.stats['existing'] += 1
else:
self.stats['new'] += 1
return place
```
#### File: mtimport/importers/importer_transitroute.py
```python
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.contrib.gis.geos import Point, fromstr, fromfile, GEOSGeometry, MultiPoint, MultiPolygon, Polygon
from autoslug.settings import slugify
from mobiletrans.mtimport.importer import CSVImportBase
from mobiletrans.mtlocation import models as loc_models
from mobiletrans.mtimport import models
from mobiletrans.mtimport.exceptions import *
class TransitRoute(CSVImportBase):
@classmethod
def get_model_class(cls,):
return loc_models.TransitRoute
def parse_row(self, row):
row = list(row)
existing = False
pk = "route_id"
try:
pk_val = row[0]
except IndexError as error:
raise IndexError("%s %s" % (pk, error))
try:
transitroute = self.get_model_class().objects.get(route_id=pk_val)
existing = True
except ObjectDoesNotExist:
transitroute = self.get_model_class()(route_id=pk_val)
existing = False
except MultipleObjectsReturned:
raise ImportException("multiple objects returned with %s %s " % (pk, pk_val))
attr = (1, 'short_name')
try:
value = row[attr[0]]
except IndexError as error:
raise IndexError("%s %s: %s %s" % (pk, pk_val, attr[1], error))
setattr(transitroute, attr[1], value)
attr = (2, 'long_name')
try:
value = row[attr[0]]
except IndexError as error:
raise IndexError("%s %s: %s %s" % (pk, pk_val, attr[1], error))
setattr(transitroute, attr[1], value)
attr = (3, 'type')
try:
value = row[attr[0]]
except IndexError as error:
raise IndexError("%s %s: %s %s" % (pk, pk_val, attr[1], error))
setattr(transitroute, attr[1], value)
attr = (4, 'url')
try:
value = row[attr[0]]
except IndexError as error:
raise IndexError("%s %s: %s %s" % (pk, pk_val, attr[1], error))
setattr(transitroute, attr[1], value)
attr = (5, 'color')
try:
value = row[attr[0]]
except IndexError as error:
raise IndexError("%s %s: %s %s" % (pk, pk_val, attr[1], error))
setattr(transitroute, attr[1], value)
attr = (6, 'text_color')
try:
value = row[attr[0]]
except IndexError as error:
raise IndexError("%s %s: %s %s" % (pk, pk_val, attr[1], error))
setattr(transitroute, attr[1], value)
if existing:
self.stats['existing'] += 1
else:
self.stats['new'] += 1
return transitroute
```
#### File: mobiletrans/mtimport/models.py
```python
from django.db import models
import datetime
import logging
logger = logging.getLogger(__name__)
TRANSFER_STATUS_RUNNING=1
TRANSFER_STATUS_SUCCESS=2
TRANSFER_STATUS_FAILED=3
TRANSFER_STATUS_PARTIAL=4
TRANSFER_STATUS = (
(TRANSFER_STATUS_RUNNING, "Running"),
(TRANSFER_STATUS_FAILED, "Failed"),
(TRANSFER_STATUS_SUCCESS, "Complete"),
(TRANSFER_STATUS_PARTIAL, "Partial Import"),
)
TRANSFER_NOTE_STATUS_NOTE = "note"
TRANSFER_NOTE_STATUS_ERROR = "error"
TRANSFER_NOTE_STATUS_WARNING = "warning"
TRANSFER_NOTE_STATUS = (
(TRANSFER_NOTE_STATUS_NOTE, "Note"),
(TRANSFER_NOTE_STATUS_ERROR, "Error"),
(TRANSFER_NOTE_STATUS_WARNING, "Warning"),
)
class InputRecordManager(models.Manager):
def make_note(self, input_record, note, type, exception=None):
ir = InputNote(note=note, type=type)
ir.input_record = input_record
ir.exception = exception
getattr(logger, {'note':'info', 'error':'error', 'warning':'warning'}.get(type, 'debug'))("Transfer Note %s" % vars(ir))
#print "Transfer Note", vars(ir)
ir.save()
def end_import(self, input_record, status=None):
if status:
input_record.status = status
input_record.end = datetime.datetime.now()
input_record.save()
class InputRecord(models.Model):
start = models.DateTimeField(auto_now_add=True)
end = models.DateTimeField(blank=True, null=True)
type = models.CharField(max_length=64, blank=True, null=True)
exception = models.CharField(max_length=128, blank=True, null=True)
status = models.IntegerField(choices=TRANSFER_STATUS, default=TRANSFER_STATUS_RUNNING)
objects = InputRecordManager()
def __unicode__(self):
return "%s" % self.start
def get_notes(self):
return self.inputnote_set.all()
def get_errors(self):
return self.get_notes().filter(type=TRANSFER_NOTE_STATUS_ERROR)
def get_num_errors(self):
return self.get_errors().count()
get_num_errors.short_description = "Errors"
def get_warnings(self):
return self.get_notes().filter(type=TRANSFER_NOTE_STATUS_WARNING)
def get_num_warnings(self):
return self.get_warnings().count()
get_num_warnings.short_description = "Warnings"
class InputNote(models.Model):
input_record = models.ForeignKey(InputRecord)
note = models.TextField(blank=True, null=True)
exception = models.CharField(max_length=128, blank=True, null=True)
created = models.DateTimeField(auto_now_add=True)
type = models.CharField(choices=TRANSFER_NOTE_STATUS, max_length=24)
def __unicode__(self):
return "%s" % self.created
```
#### File: mobiletrans/mtlocation/fields.py
```python
from django.db import models
from south.modelsinspector import add_introspection_rules
from django_extensions.db.fields import UUIDField as ExtUUIDField
class UUIDField(ExtUUIDField):
def __init__(self, verbose_name=None, name=None, auto=True, version=1, node=None, clock_seq=None, namespace=None, **kwargs):
kwargs['max_length'] = 40
if auto:
self.empty_strings_allowed = False
kwargs['blank'] = True
kwargs.setdefault('editable', False)
self.auto = auto
self.version = version
if version == 1:
self.node, self.clock_seq = node, clock_seq
elif version == 3 or version == 5:
self.namespace, self.name = namespace, name
models.CharField.__init__(self, verbose_name, name, **kwargs)
class SeparatedValuesField(models.TextField):
__metaclass__ = models.SubfieldBase
def __init__(self, *args, **kwargs):
self.token = kwargs.pop('token', ',')
super(SeparatedValuesField, self).__init__(*args, **kwargs)
def to_python(self, value):
if not value: return
if isinstance(value, list):
return value
return value.split(self.token)
def get_db_prep_value(self, value, connection, *args, **kwargs):
if not value: return
assert(isinstance(value, list) or isinstance(value, tuple))
return self.token.join([unicode(s) for s in value])
def value_to_string(self, obj):
value = self._get_val_from_obj(obj)
add_introspection_rules([], ["^mobiletrans\.mtlocation\.fields\.SeparatedValuesField"])
``` |
{
"source": "joejcollins/BagbatchWebsite",
"score": 3
} |
#### File: BagbatchWebsite/web_app/main.py
```python
import logging
import sys
import traceback
import forms
from models import Settings
from flask import Flask, render_template
from google.appengine.api import app_identity # pylint: disable=E0401
from google.appengine.api import mail # pylint: disable=E0401
from google.appengine.api import users
import pdb
# Initialize the application with CSRF
app = Flask(__name__) # pylint: disable=invalid-name
# Set the Flask debug to false so you can use GAE debug
app.config.update(DEBUG=False)
app.secret_key = Settings.get('SECRET_KEY')
app.config['RECAPTCHA_USE_SSL'] = False
app.config['RECAPTCHA_PUBLIC_KEY'] = Settings.get('RECAPTCHA_PUBLIC_KEY')
app.config['RECAPTCHA_PRIVATE_KEY'] = Settings.get('RECAPTCHA_PRIVATE_KEY')
app.config['RECAPTCHA_OPTIONS'] = {'theme': 'white'}
@app.before_request
def enable_local_error_handling():
''' test of log '''
app.logger.addHandler(logging.StreamHandler())
app.logger.setLevel(logging.INFO)
@app.route('/', methods=['GET', 'POST'])
def form():
''' Show the message form for the user to fill in '''
message_form = forms.MessageForm()
if message_form.validate_on_submit():
send_mail(message_form.email.data, message_form.message.data)
return render_template('submitted_form.html', title="Thanks", form=message_form)
return render_template('form.html', title="Message", form=message_form)
def send_mail(their_email, their_message):
''' Send an email message to me '''
message = mail.EmailMessage(sender=app_identity.get_application_id() +
'@appspot.gserviceaccount.com>')
message.subject = 'Message from Bagbatch Website'
message.to = Settings.get('EMAIL')
message.body = """From: {}\n\n<<BEGINS>>\n\n{}\n\n<<ENDS>>""".format(their_email, their_message)
message.send()
@app.errorhandler(500)
def server_error(error):
''' Log any errors to the browser because you are too lazy to look at the console
The Flask DEBUG setting must the set to false for this to work '''
exception_type, exception_value, trace_back = sys.exc_info()
no_limit = None
exception = ''.join(traceback.format_exception(exception_type, exception_value,
trace_back, no_limit))
logging.exception('An error occurred during a request. ' + str(error))
return render_template('500.html', title=error, exception=exception)
@app.route('/admin', methods=['GET'])
def admin_page():
''' Authentication required page '''
user = users.get_current_user()
return render_template('admin.html', email=user.email())
``` |
{
"source": "joejcollins/lieutenant-dean",
"score": 3
} |
#### File: data/models/application.py
```python
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, String, INTEGER
Base = declarative_base()
metadata = Base.metadata
class Application(Base):
__tablename__ = 'application'
id = Column(INTEGER, primary_key=True)
name = Column(String(255))
def __repr__(self):
return '<Application %r>' % self.name
```
#### File: lieutenant-dean/info_module/text.py
```python
def reverse_string(string_to_reverse):
""" Do the string reversal. """
reversed_string = string_to_reverse[::-1]
return reversed_string
```
#### File: lieutenant-dean/info_module/text_test.py
```python
import info_module.text as text
def test_method_is_present():
""" Confirm that the methods are present. """
assert "reverse_string" in dir(text)
def test_reverse_text():
""" Confirm the text is reversed. """
string_to_reverse = 'qwerty'
reversed_text = text.reverse_string(string_to_reverse)
assert reversed_text == 'ytrewq'
```
#### File: lieutenant-dean/rest_api/flask_app.py
```python
import flask
import flasgger
import flasgger.utils as swag_utils
import rest_api.flask_app_apidocs as apidocs
import rest_api.text as text
api = flask.Flask(__name__)
api.register_blueprint(text.text_api)
swagger_template = {
"swagger": "2.0",
"info": {
"title": "Celery Demo API",
"description": "Demo of Flask and Celery in action.",
"version": "0.0.1"
},
"basePath": "/"
}
swagger = flasgger.Swagger(api, template=swagger_template)
@api.route('/', methods=['GET'])
@swag_utils.swag_from(apidocs.INDEX)
def index():
""" Confirm that the flask app is running. """
greeting = {
'message': "Hello there",
'docs': '/apidocs/'
}
return flask.jsonify(greeting)
```
#### File: lieutenant-dean/rest_api/text.py
```python
import flask
import flasgger.utils as swag_utils
import rest_api.text_apidocs as apidocs
import info_module.text as info_text
import task_queuing.tasks.text as text_tasks
text_api = flask.Blueprint("text_api", __name__, url_prefix="/text/")
def _reverse_fast(string_to_reverse):
""" Provide a json response with the reversal. """
reversed_string = info_text.reverse_string(string_to_reverse)
message = {
'original': string_to_reverse,
'reversed': reversed_string
}
return message
@text_api.route('/reverse/fast/<string_to_reverse>', methods=['GET'])
@swag_utils.swag_from(apidocs.REVERSE_FAST_GET)
def reverse_fast_get(string_to_reverse):
""" Provide a json response with the reversal. """
message = _reverse_fast(string_to_reverse)
return flask.jsonify(message)
@text_api.route('/reverse/fast', methods=['POST'])
@swag_utils.swag_from(apidocs.REVERSE_FAST_POST)
def reverse_fast_post():
""" Provide a json response with the reversal. """
string_to_reverse = flask.request.json.get("string_to_reverse")
message = _reverse_fast(string_to_reverse)
return flask.jsonify(message)
@text_api.route('/reverse/slow/<string_to_reverse>', methods=['GET'])
@swag_utils.swag_from(apidocs.REVERSE_SLOW_GET)
def reverse_slow_get(string_to_reverse):
""" Provide a json response with the reversal. """
result = text_tasks.slowly_reverse_string.delay(
string_to_reverse=string_to_reverse
)
result_dict = vars(result)
# Filter the dictionary to send back.
result_dict = {
k: v for (k, v) in result_dict.items()
if isinstance(v, (str, bool, int))
}
return flask.jsonify(result_dict)
```
#### File: lieutenant-dean/task_queuing/consumers.py
```python
from celery import bootsteps
from kombu import Consumer
import time
from task_queuing.queues import my_queues
class SubstitutionCypher(bootsteps.ConsumerStep):
"""Custom handling of substitution cypher"""
def handle_message(self, body, message):
print('Received message: {0!r}'.format(body))
message.ack() # so it doesn't get redelivered.
def get_consumers(self, channel):
print(f"Registering custom consumer: {__class__}")
return [Consumer(channel,
queues=my_queues,
callbacks=[self.handle_message],
accept=['json'])]
```
#### File: lieutenant-dean/task_queuing/task_logging.py
```python
import logging
from celery.signals import task_prerun, task_postrun
# Attempt to set up logger as per the example at:
# https://stackoverflow.com/questions/25281612/celery-log-each-task-run-to-its-own-file
# I could not get the decorator `@task_prerun.connect(sender='slowly_reverse_string', weak=False)` to work so
# the selection of which tasks to log is made with a conditional statement in the @task_prerun.connect.
@task_prerun.connect
def prepare_logging(sender=None, task_id=None, **kwargs):
"""Log each task to a separate file."""
# Get a logger based on the unique name of the task.
logger = logging.getLogger(task_id)
logger.propagate = False
# Logging to the console.
stream_handler = logging.StreamHandler()
formatter = logging.Formatter('[%(asctime)s][%(levelname)s] %(message)s')
stream_handler.setFormatter(formatter)
stream_handler.setLevel(logging.INFO)
# @task_prerun.connect(sender='slowly_reverse_string', weak=False) did not work for my but YMMV so
# add the file log for some tasks based on the name of the task.
if sender.__name__ == 'slowly_reverse_string':
# Adding File Handle with file path. Filename is task_id
task_handler = logging.FileHandler(f'../logs/{task_id}.log')
task_handler.setLevel(logging.INFO)
logger.addHandler(task_handler)
logger.addHandler(stream_handler)
@task_postrun.connect
def close_logging(sender=None, task_id=None, **kwargs):
"""Close the task logger, even if it wasn't opened."""
logger = logging.getLogger(task_id)
for handler in logger.handlers:
handler.flush()
handler.close()
logger.handlers = []
```
#### File: task_queuing/tasks/custom.py
```python
import task_queuing.celery_app as app
class Capitalize(app.queue_broker.Task):
"""Custom task without the decorator"""
def run(self, text):
capitalized = text.upper()
return capitalized
@app.queue_broker.task(base=Capitalize)
def shit(x):
print('shit')
return "val"
# app.queues.tasks.register(Capitalize)
``` |
{
"source": "JoeJimFlood/NFLPrediction2014",
"score": 3
} |
#### File: JoeJimFlood/NFLPrediction2014/matchup.py
```python
import os
import sys
import pandas as pd
import numpy as np
from numpy.random import poisson, uniform
from numpy import mean
import time
import math
po = True
teamsheetpath = sys.path[0] + '/teamcsvs/'
compstat = {'TDF': 'TDA', 'TDA': 'TDF', #Dictionary to use to compare team stats with opponent stats
'FGF': 'FGA', 'FGA': 'FGF',
'SFF': 'SFA', 'SFA': 'SFF',
'PAT1%F': 'PAT1%A', 'PAT1%A': 'PAT1%F',
'PAT2%F': 'PAT2%A', 'PAT2%A': 'PAT2%F'}
def get_opponent_stats(opponent): #Gets summaries of statistics for opponent each week
opponent_stats = {}
global teamsheetpath
opp_stats = pd.DataFrame.from_csv(teamsheetpath + opponent + '.csv')
for stat in opp_stats.columns:
if stat in ['TDF', 'FGF', 'SFF', 'TDA', 'FGA', 'SFA']:
opponent_stats.update({stat: opp_stats[stat].mean()})
try:
opponent_stats.update({'PAT1%F': float(opp_stats['PAT1FS'].sum()) / opp_stats['PAT1FA'].sum()})
except ZeroDivisionError:
opponent_stats.update({'PAT1%F': .99})
try:
opponent_stats.update({'PAT2%F': float(opp_stats['PAT2FS'].sum()) / opp_stats['PAT2FA'].sum()})
except ZeroDivisionError:
opponent_stats.update({'PAT2%F': .5})
try:
opponent_stats.update({'PAT1%A': float(opp_stats['PAT1AS'].sum()) / opp_stats['PAT1AA'].sum()})
except ZeroDivisionError:
opponent_stats.update({'PAT1%A': .99})
try:
opponent_stats.update({'PAT2%A': float(opp_stats['PAT2AS'].sum()) / opp_stats['PAT2AA'].sum()})
except ZeroDivisionError:
opponent_stats.update({'PAT2%A': .5})
return opponent_stats
def get_residual_performance(team): #Get how each team has done compared to the average performance of their opponents
global teamsheetpath
score_df = pd.DataFrame.from_csv(teamsheetpath + team + '.csv')
residual_stats = {}
score_df['PAT1%F'] = np.nan
score_df['PAT2%F'] = np.nan
score_df['PAT1%A'] = np.nan
score_df['PAT2%A'] = np.nan
for week in score_df.index:
try:
score_df['PAT1%F'][week] = float(score_df['PAT1FS'][week]) / score_df['PAT1FA'][week]
except ZeroDivisionError:
score_df['PAT1%F'][week] = 0.99
#print ('For: ' + str(score_df['PAT1%F'][week]))
try:
score_df['PAT2%F'][week] = float(score_df['PAT2FS'][week]) / score_df['PAT2FA'][week]
except ZeroDivisionError:
score_df['PAT2%F'][week] = 0.5
try:
score_df['PAT1%A'][week] = float(score_df['PAT1AS'][week]) / score_df['PAT1AA'][week]
except ZeroDivisionError:
score_df['PAT1%A'][week] = 0.99
#print ('Against: ' + str(score_df['PAT1%F'][week]))
try:
score_df['PAT2%A'][week] = float(score_df['PAT2AS'][week]) / score_df['PAT2AA'][week]
except ZeroDivisionError:
score_df['PAT2%A'][week] = 0.5
opponent_stats = get_opponent_stats(score_df['OPP'][week])
for stat in opponent_stats:
if week == 1:
score_df['OPP_' + stat] = np.nan
score_df['OPP_' + stat][week] = opponent_stats[stat]
for stat in opponent_stats:
score_df['R_' + stat] = score_df[stat] - score_df['OPP_' + compstat[stat]]
if stat in ['TDF', 'FGF', 'SFF', 'TDA', 'FGA', 'SFA']:
residual_stats.update({stat: score_df['R_' + stat].mean()})
elif stat == 'PAT1%F':
residual_stats.update({stat: (score_df['R_PAT1%F'].multiply(score_df['PAT1FA'])).sum() / score_df['PAT1FA'].sum()})
elif stat == 'PAT2%F':
residual_stats.update({stat: (score_df['R_PAT2%F'].multiply(score_df['PAT2FA'])).sum() / score_df['PAT2FA'].sum()})
elif stat == 'PAT1%A':
residual_stats.update({stat: (score_df['R_PAT1%A'].multiply(score_df['PAT1AA'])).sum() / score_df['PAT1AA'].sum()})
elif stat == 'PAT2%A':
residual_stats.update({stat: (score_df['R_PAT2%A'].multiply(score_df['PAT2AA'])).sum() / score_df['PAT2AA'].sum()})
try:
residual_stats.update({'GOFOR2': float(score_df['PAT2FA'].sum()) / score_df['TDF'].sum()})
except ZeroDivisionError:
residual_stats.update({'GOFOR2': .1})
#print team
#print residual_stats
return residual_stats
def get_score(expected_scores): #Get the score for a team based on expected scores
score = 0
if expected_scores['TD'] > 0:
tds = poisson(expected_scores['TD'])
else:
tds = poisson(0.01)
score = score + 6 * tds
if expected_scores['FG'] > 0:
fgs = poisson(expected_scores['FG'])
else:
fgs = poisson(0.01)
score = score + 3 * fgs
if expected_scores['S'] > 0:
sfs = poisson(expected_scores['S'])
else:
sfs = poisson(0.01)
score = score + 2 * sfs
for td in range(tds):
go_for_2_determinant = uniform(0, 1)
if go_for_2_determinant <= expected_scores['GOFOR2']: #Going for 2
successful_pat_determinant = uniform(0, 1)
if successful_pat_determinant <= expected_scores['PAT2PROB']:
score = score + 2
else:
continue
else: #Going for 1
#print(expected_scores['PAT1PROB'])
successful_pat_determinant = uniform(0, 1)
if successful_pat_determinant <= expected_scores['PAT1PROB']:
score = score + 1
else:
continue
return score
def game(team_1, team_2,
expected_scores_1, expected_scores_2,
playoff): #Get two scores and determine a winner
score_1 = get_score(expected_scores_1)
score_2 = get_score(expected_scores_2)
if score_1 > score_2:
win_1 = 1
win_2 = 0
draw_1 = 0
draw_2 = 0
elif score_2 > score_1:
win_1 = 0
win_2 = 1
draw_1 = 0
draw_2 = 0
else:
if playoff:
win_1 = 0.5
win_2 = 0.5
draw_1 = 0
draw_2 = 0
else:
win_1 = 0
win_2 = 0
draw_1 = 1
draw_2 = 1
summary = {team_1: [win_1, draw_1, score_1]}
summary.update({team_2: [win_2, draw_2, score_2]})
return summary
def get_expected_scores(team_1_stats, team_2_stats, team_1_df, team_2_df): #Get the expected scores for a matchup based on the previous teams' performances
expected_scores = {}
for stat in team_1_stats:
expected_scores.update({'TD': mean([team_1_stats['TDF'] + team_2_df['TDA'].mean(),
team_2_stats['TDA'] + team_1_df['TDF'].mean()])})
expected_scores.update({'FG': mean([team_1_stats['FGF'] + team_2_df['FGA'].mean(),
team_2_stats['FGA'] + team_1_df['FGF'].mean()])})
expected_scores.update({'S': mean([team_1_stats['SFF'] + team_2_df['SFA'].mean(),
team_2_stats['SFA'] + team_1_df['SFF'].mean()])})
#print mean([team_1_stats['PAT1%F'] + team_2_df['PAT1AS'].astype('float').sum() / team_2_df['PAT1AA'].sum(),
# team_2_stats['PAT1%A'] + team_1_df['PAT1FS'].astype('float').sum() / team_1_df['PAT1FA'].sum()])
expected_scores.update({'GOFOR2': team_1_stats['GOFOR2']})
pat1prob = mean([team_1_stats['PAT1%F'] + team_2_df['PAT1AS'].astype('float').sum() / team_2_df['PAT1AA'].sum(),
team_2_stats['PAT1%A'] + team_1_df['PAT1FS'].astype('float').sum() / team_1_df['PAT1FA'].sum()])
if not math.isnan(pat1prob):
expected_scores.update({'PAT1PROB': pat1prob})
else:
expected_scores.update({'PAT1PROB': 0.99})
#print(expected_scores['PAT1PROB'])
pat2prob = mean([team_1_stats['PAT2%F'] + team_2_df['PAT2AS'].astype('float').sum() / team_2_df['PAT2AA'].sum(),
team_2_stats['PAT2%A'] + team_1_df['PAT2FS'].astype('float').sum() / team_1_df['PAT2FA'].sum()])
if not math.isnan(pat2prob):
expected_scores.update({'PAT2PROB': pat2prob})
else:
expected_scores.update({'PAT2PROB': 0.5})
#print(expected_scores)
return expected_scores
def matchup(team_1, team_2):
ts = time.time()
team_1_season = pd.DataFrame.from_csv(teamsheetpath + team_1 + '.csv')
team_2_season = pd.DataFrame.from_csv(teamsheetpath + team_2 + '.csv')
stats_1 = get_residual_performance(team_1)
stats_2 = get_residual_performance(team_2)
expected_scores_1 = get_expected_scores(stats_1, stats_2, team_1_season, team_2_season)
expected_scores_2 = get_expected_scores(stats_2, stats_1, team_2_season, team_1_season)
team_1_wins = 0
team_2_wins = 0
team_1_draws = 0
team_2_draws = 0
team_1_scores = []
team_2_scores = []
i = 0
error = 1
while error > 0.000001 or i < 5000000: #Run until convergence after 5 million iterations
summary = game(team_1, team_2,
expected_scores_1, expected_scores_2,
po)
team_1_prev_wins = team_1_wins
team_1_wins += summary[team_1][0]
team_2_wins += summary[team_2][0]
team_1_draws += summary[team_1][1]
team_2_draws += summary[team_2][1]
team_1_scores.append(summary[team_1][2])
team_2_scores.append(summary[team_2][2])
team_1_prob = float(team_1_wins) / len(team_1_scores)
team_2_prob = float(team_2_wins) / len(team_2_scores)
if i > 0:
team_1_prev_prob = float(team_1_prev_wins) / i
error = team_1_prob - team_1_prev_prob
i = i + 1
if i == 5000000:
print('Probability converged within 5 million iterations')
else:
print('Probability converged after ' + str(i) + ' iterations')
games = pd.DataFrame.from_items([(team_1, team_1_scores), (team_2, team_2_scores)])
summaries = games.describe(percentiles = [0.025, 0.1, 0.25, 0.5, 0.75, 0.9, 0.975])
output = {'ProbWin': {team_1: team_1_prob, team_2: team_2_prob}, 'Scores': summaries}
print(team_1 + '/' + team_2 + ' score distributions computed in ' + str(round(time.time() - ts, 1)) + ' seconds')
return output
```
#### File: JoeJimFlood/NFLPrediction2014/reorder_index.py
```python
import pandas as pd
import sys
import os
def reorder_index(team_csv):
location = sys.path[0] + '\\teamcsvs'
team_df = pd.DataFrame.from_csv(location + '\\' + team_csv)
cols_df = pd.DataFrame.from_csv(location + '\\ARI.csv')
cols = cols_df.columns
team_df = team_df[cols]
team_df.to_csv(location + '\\' + team_csv)
files = os.listdir(sys.path[0] + '\\teamcsvs')
for f in files:
reorder_index(f)
``` |
{
"source": "JoeJimFlood/RugbyPredictifier",
"score": 3
} |
#### File: 2017SuperRugby/Validation/travelWeightValidation.py
```python
import pandas as pd
import numpy as np
import os
import sys
def bin_scores(score, bins):
if score <= bins[0]:
return [1, 0, 0, 0]
elif score <= bins[1]:
return [0, 1, 0, 0]
elif score <= bins[2]:
return [0, 0, 1, 0]
else:
return [0, 0, 0, 1]
base_path = os.path.split(__file__)[0]
source_path = os.path.split(base_path)[0]
sys.path.append(base_path)
validation_file = os.path.join(base_path, 'ValidationData.xlsx')
import matchup as m0
import matchup_w_distance as m1
rounds = list(range(8, 18)) + ['QF', 'SF', 'F']
#rounds = [17, 'QF', 'SF', 'F']
outdata0 = pd.DataFrame(columns = ['ROUND', 'TEAM', 'OPP', 'VENUE', 'SCORE',
'EXPECTED', 'Q1', 'Q2', 'Q3',
'INQ1', 'INQ2', 'INQ3', 'INQ4'])
outdata1 = outdata0.copy()
counter = 0
for round in rounds:
print('ROUND {}'.format(round))
validation_data = pd.read_excel(validation_file, 'Round{}'.format(round))
for f in os.listdir(os.path.join(source_path, 'teamcsvs')):
data = pd.read_csv(os.path.join(source_path, 'teamcsvs', f))
data['ROUND'] = data['ROUND'].astype(str)
try:
data.set_index('ROUND').loc[:str(round)].iloc[:-1].to_csv(os.path.join(base_path, 'teamcsvs', f))
except KeyError:
try:
last_round = round - 1
except TypeError:
guide = {'QF': 17, 'SF': 'QF', 'F': 'SF'}
last_round = guide[round]
if last_round in data.index:
data.set_index('ROUND').loc[:str(last_round)].to_csv(os.path.join(base_path, 'teamcsvs', f))
else:
pass
for matchupno in validation_data.index:
(home_team, away_team, venue, hscore, ascore) = validation_data.loc[matchupno]
results0 = m0.matchup(home_team, away_team)
results1 = m1.matchup(home_team, away_team, venue)
hexpected0 = results0['Scores'][home_team]['mean']
aexpected0 = results0['Scores'][away_team]['mean']
hexpected1 = results1['Scores'][home_team]['mean']
aexpected1 = results1['Scores'][away_team]['mean']
hq0 = list(results0['Scores'][home_team][['25%', '50%', '75%']])
aq0 = list(results0['Scores'][away_team][['25%', '50%', '75%']])
hq1 = list(results1['Scores'][home_team][['25%', '50%', '75%']])
aq1 = list(results1['Scores'][away_team][['25%', '50%', '75%']])
hb0 = bin_scores(hscore, hq0)
ab0 = bin_scores(ascore, hq0)
hb1 = bin_scores(hscore, hq1)
ab1 = bin_scores(ascore, hq1)
outdata0.loc[counter] = [round, home_team, away_team, venue, hscore, hexpected0] + hq0 + hb0
outdata1.loc[counter] = [round, home_team, away_team, venue, hscore, hexpected1] + hq1 + hb1
counter += 1
outdata0.loc[counter] = [round, away_team, home_team, venue, ascore, aexpected0] + aq0 + ab0
outdata1.loc[counter] = [round, away_team, home_team, venue, ascore, aexpected1] + aq1 + ab1
counter += 1
pd.Panel({'NoWeights': outdata0, 'Weights': outdata1}).to_excel(os.path.join(base_path, 'ValidationResultsRaw.xlsx'))
print(outdata0)
print(outdata1)
```
#### File: 2018SuperRugby/Validation/Validation.py
```python
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.api as sm
from scipy.stats import pearsonr as correl
from scipy.special import erfinv
import os
import sys
def probit(p):
'''
Probit function (inverse of standard normal cummulative distribution function)
'''
return np.sqrt(2)*erfinv(2*p-1)
#Set paths and read in data
base_path = os.path.split(__file__)[0]
sys.path.append(base_path)
from validation_util import *
infile = os.path.join(base_path, 'ValidationData.csv')
data = pd.read_csv(infile)
#Define regression data as data from after international break (Round 17 onwards) and perform regression analysis
reg_data = data.query('Round >= 17')
reg_data['Constant'] = np.ones_like(reg_data.index)
reg = sm.OLS(reg_data['Actual Score'], reg_data[['Expected Score', 'Constant']])
res = reg.fit()
#Write regression results to file
regression_file = os.path.join(base_path, 'RegressionResults.txt')
f = open(regression_file, 'w')
f.write(str(res.summary()))
f.close()
#Obtain color gradient for plotting
color_gradient = color_interpolate('#ff0000', '#0000ff', 19)
#Create figure
plt.figure(figsize = (7.5, 7.5))
#For each round plot actual vs expected scores with the size and color based on the round number
for r in range(2, 21):
label = 'Round %s'%(r)
if r == 20:
label = 'Finals'
round_data = data.query('Round == @r')
plt.scatter(round_data['Expected Score'], round_data['Actual Score'],
s = r, color = color_gradient[r-2], alpha = 0.8, label = label)
#Plot regression line against the data
(xmin, xmax) = (reg_data['Expected Score'].min(), reg_data['Expected Score'].max())
(ymin, ymax) = (res.params[0]*xmin + res.params[1], res.params[0]*xmax + res.params[1])
plt.plot([xmin, xmax], [ymin, ymax], 'b--', label = 'Round 17+')
#Format plot
plt.xlabel('Expected Score')
plt.ylabel('Actual Score')
plt.xticks(range(-10, 90, 10))
plt.yticks(range(0, 90, 10))
plt.axis('equal')
plt.legend(loc = 'upper right')
title_lines = ['Rugby Predictifier Validation: 2018 Super Rugby Season',
'Actual Scores vs. Expected Scores (Average of 5,000,000 Simulations)',
'Round 17+: Actual = {0}\u00d7Expected - {1} + \u03b5, r\u00b2 = {2}'.format(round(res.params[0], 2),
abs(round(res.params[1], 2)),
round(res.rsquared_adj, 3))]
plt.title('\n'.join(title_lines))
ax = plt.gca()
ax.set_axisbelow(True)
plt.grid(True)
#Write plot to file
scatterplot_file = os.path.join(base_path, 'ScoreScatterplot.png')
plt.savefig(scatterplot_file)
plt.clf()
plt.close()
#Compute percentage of actual scores in forecast quartiles
#Scores on the boundary go half to lower and half to upper
N_reg = len(reg_data.index)
q1_l = (reg_data['Actual Score'] <= reg_data['25%']).sum() / N_reg
q2_l = ((reg_data['Actual Score'] <= reg_data['50%']) * (reg_data['Actual Score'] > reg_data['25%'])).sum() / N_reg
q3_l = ((reg_data['Actual Score'] <= reg_data['75%']) * (reg_data['Actual Score'] > reg_data['50%'])).sum() / N_reg
q4_l = (reg_data['Actual Score'] > reg_data['75%']).sum() / N_reg
q1_u = (reg_data['Actual Score'] < reg_data['25%']).sum() / N_reg
q2_u = ((reg_data['Actual Score'] < reg_data['50%']) * (reg_data['Actual Score'] >= reg_data['25%'])).sum() / N_reg
q3_u = ((reg_data['Actual Score'] < reg_data['75%']) * (reg_data['Actual Score'] >= reg_data['50%'])).sum() / N_reg
q4_u = (reg_data['Actual Score'] >= reg_data['75%']).sum() / N_reg
q1 = 0.5*(q1_l+q1_u)
q2 = 0.5*(q2_l+q2_u)
q3 = 0.5*(q3_l+q3_u)
q4= 0.5*(q4_l+q4_u)
p = np.array([q1, q2, q3, q4])
n = np.array(4*[N_reg])
se = np.sqrt(p*(1-p)/n)
#Create bar plot
plt.figure(figsize = (7.5, 2.5))
plt.plot([0, 1], [0.25, 0.25], 'k--')
plt.bar([0, 0.25, 0.5, 0.75], p, 4*[0.25],
yerr = probit(0.975)*se, error_kw = {'capsize': 7},
align = 'edge', facecolor = '#00d3ca', edgecolor = 'k')
plt.xticks([0, 0.25, 0.5, 0.75, 1], ['', '1st Quartile', '2nd Quartile', '3rd Quartile', ''])
plt.yticks(np.arange(0, 0.6, 0.1), ['0%', '10%', '20%', '30%', '40%', '50%'])
plt.xlim(0, 1)
plt.title('Round 17+ Score Distribution Validation')
plt.ylabel('% of Scores within\nForecast Quartiles')
#Write plot to file
barplot_file = os.path.join(base_path, 'QuartileBarPlot.png')
plt.savefig(barplot_file)
plt.clf()
plt.close()
print('Done')
``` |
{
"source": "Joejiong/mindspore",
"score": 3
} |
#### File: nn/metrics/metric.py
```python
from abc import ABCMeta, abstractmethod
import numpy as np
from mindspore.common.tensor import Tensor
class Metric(metaclass=ABCMeta):
"""
Base class of metric.
Note:
For examples of subclasses, please refer to the definition of class `MAE`, 'Recall' etc.
"""
def __init__(self):
pass
def _convert_data(self, data):
"""
Convert data type to numpy array.
Args:
data (Object): Input data.
Returns:
Ndarray, data with `np.ndarray` type.
"""
if isinstance(data, Tensor):
data = data.asnumpy()
elif isinstance(data, list):
data = np.array(data)
elif isinstance(data, np.ndarray):
pass
else:
raise TypeError('Input data type must be tensor, list or numpy.ndarray')
return data
def _check_onehot_data(self, data):
"""
Whether input data are one-hot encoding.
Args:
data (numpy.array): Input data.
Returns:
bool, return trun, if input data are one-hot encoding.
"""
if data.ndim > 1 and np.equal(data ** 2, data).all():
shp = (data.shape[0],) + data.shape[2:]
if np.equal(np.ones(shp), data.sum(axis=1)).all():
return True
return False
def __call__(self, *inputs):
"""
Evaluate input data once.
Args:
inputs (tuple): The first item is predict array, the second item is target array.
Returns:
Float, compute result.
"""
self.clear()
self.update(*inputs)
return self.eval()
@abstractmethod
def clear(self):
"""
An interface describes the behavior of clearing the internal evaluation result.
Note:
All subclasses should override this interface.
"""
raise NotImplementedError('Must define clear function to use this base class')
@abstractmethod
def eval(self):
"""
An interface describes the behavior of computing the evaluation result.
Note:
All subclasses should override this interface.
"""
raise NotImplementedError('Must define eval function to use this base class')
@abstractmethod
def update(self, *inputs):
"""
An interface describes the behavior of updating the internal evaluation result.
Note:
All subclasses should override this interface.
Args:
inputs: A variable-length input argument list.
"""
raise NotImplementedError('Must define update function to use this base class')
```
#### File: infer/variational/svi.py
```python
import mindspore.common.dtype as mstype
from mindspore.common.tensor import Tensor
from ....wrap.cell_wrapper import TrainOneStepCell
class SVI:
r"""
Stochastic Variational Inference(SVI).
Variational inference casts the inference problem as an optimization. Some distributions over the hidden
variables that is indexed by a set of free parameters, and then optimize the parameters to make it closest to
the posterior of interest.
see more details in `Variational Inference: A Review for Statisticians<https://arxiv.org/abs/1601.00670>`_.
Args:
net_with_loss(Cell): Cell with loss function.
optimizer (Cell): Optimizer for updating the weights.
"""
def __init__(self, net_with_loss, optimizer):
self.net_with_loss = net_with_loss
self.optimizer = optimizer
self._loss = 0.0
def run(self, train_dataset, epochs=10):
"""
Optimize the parameters by training the probability network, and return the trained network.
Args:
epochs (int): Total number of iterations on the data. Default: 10.
train_dataset (Dataset): A training dataset iterator.
Outputs:
Cell, the trained probability network.
"""
train_net = TrainOneStepCell(self.net_with_loss, self.optimizer)
train_net.set_train()
for _ in range(1, epochs+1):
train_loss = 0
dataset_size = 0
for data in train_dataset.create_dict_iterator():
x = Tensor(data['image'], dtype=mstype.float32)
y = Tensor(data['label'], dtype=mstype.int32)
dataset_size += len(x)
loss = train_net(x, y).asnumpy()
train_loss += loss
self._loss = train_loss / dataset_size
model = self.net_with_loss.backbone_network
return model
def get_train_loss(self):
"""
Returns:
numpy.dtype, the loss after training.
"""
return self._loss
```
#### File: composite/multitype_ops/getitem_impl.py
```python
from . import _compile_utils as compile_utils
from .. import base
from ... import functional as F
getitem = base.MultitypeFuncGraph('getitem')
"""
getitem is a metafuncgraph object which will get item from an object according to input type
using ".register" decorator.
"""
class _TupleSlice(base.TupleSlice_):
"""
Slices a tuple.
Inputs:
data (tuple): A tuple to be sliced.
s (slice): The index to slice tuple data.
Outputs:
Tuple, consists of some elements of data.
"""
def __init__(self, name):
base.TupleSlice_.__init__(self, name)
def __call__(self, *args):
pass
_tuple_slice = _TupleSlice('tuple_slice')
"""_tuple_slice is an metafuncgraph object which will slice a tuple."""
class _TupleGetItemTensor(base.TupleGetItemTensor_):
"""
Getting item of tuple by tensor index.
Inputs:
data (tuple): A tuple of items.
index (Tensor): The index in tensor.
Outputs:
Type, is the same as the element type of data.
"""
def __init__(self, name):
base.TupleGetItemTensor_.__init__(self, name)
def __call__(self, *args):
pass
_tuple_get_item_tensor = _TupleGetItemTensor('tuple_get_item_tensor')
"""_tuple_get_item_tensor is an metafuncgraph object which will select indexed item."""
@getitem.register("Tuple", "Number")
def _tuple_getitem_by_number(data, number_index):
"""
Getting item of tuple by number index.
Inputs:
data (tuple): A tuple to be sliced.
number_index (Number): Index in scalar.
Outputs:
Type, is the same as the element type of data.
"""
return F.tuple_getitem(data, number_index)
@getitem.register("Tuple", "Slice")
def _tuple_getitem_by_slice(data, slice_index):
"""
Getting item of tuple by slice index.
Inputs:
data (tuple): data
slice_index (Slice): Index in slice.
Outputs:
Tuple, element type is the same as the element type of data.
"""
return _tuple_slice(data, slice_index)
@getitem.register("Tuple", "Tensor")
def _tuple_getitem_by_tensor(data, tensor_index):
"""
Getting item out of tuple by tensor index.
Inputs:
data (tuple): A tuple of items to index.
tensor_index (Tensor): Index to select item.
Outputs:
Type, is the same as the element type of data.
"""
return _tuple_get_item_tensor(data, tensor_index)
@getitem.register("List", "Number")
def _list_getitem_by_number(data, number_index):
"""
Getting item of list by number index.
Inputs:
data (list): data in list.
number_index (Number): Index in scalar.
Outputs:
Type is the same as the element type of data.
"""
return F.list_getitem(data, number_index)
@getitem.register("Dictionary", "String")
def _dict_getitem_by_key(data, key):
"""
Getting item of dictionary by key which is a string.
Inputs:
data (Dictionary): data
key (str): Key of the data.
Outputs:
Type, is as same as the element type of data.
"""
return F.dict_getitem(data, key)
@getitem.register("Tensor", "Number")
def _tensor_getitem_by_number(data, number_index):
"""
Getting item of tensor by number index.
Inputs:
data (Tensor): A tensor.
number_index (Number): Index in scalar.
Outputs:
Tensor, element type is as same as the element type of data.
"""
return compile_utils.tensor_index_by_number(data, number_index)
@getitem.register("Tensor", "None")
def _tensor_getitem_by_none(data, index):
"""
For none indexing , expand data with one dim.
Inputs:
data (Tensor): A tensor.
index (None): None.
Outputs:
Tensor, element type is as same as the element type of data.
"""
return F.expand_dims(data, 0)
@getitem.register("Tensor", "Slice")
def _tensor_getitem_by_slice(data, slice_index):
"""
Getting item of tensor by slice.
Inputs:
data (Tensor): A tensor.
slice_index (Slice): Index in slice.
Outputs:
Tensor, element type is the same as the element type of data.
"""
return compile_utils.tensor_index_by_slice(data, slice_index)
@getitem.register("Tensor", "Tensor")
def _tensor_getitem_by_tensor(data, tensor_index):
"""
Getting item of tensor by tensor indice.
Inputs:
data (Tensor): A tensor.
tensor_index (Tensor): An index expressed by tensor.
Outputs:
Tensor, element type is the same as the element type of data.
"""
return compile_utils.tensor_index_by_tensor(data, tensor_index)
@getitem.register("Tensor", "Tuple")
def _tensor_getitem_by_tuple(data, tuple_index):
"""
Getting item of tensor by tuple.
Inputs:
data (Tensor): A tensor.
tuple_index (tuple): Index in tuple.
Outputs:
Tensor, element type is the same as the element type of data.
"""
return compile_utils.tensor_index_by_tuple(data, tuple_index)
@getitem.register("Tensor", "Ellipsis")
def _tensor_getitem_by_ellipsis(data, ellipsis_index):
"""
Getting item of tensor by Ellipsis.
Inputs:
data (Tensor): A tensor.
ellipsis (Ellipsis): A Ellipsis object.
Outputs:
Tensor, same as data.
"""
return data
```
#### File: ops/operations/inner_ops.py
```python
import numbers
from ..._checkparam import Validator as validator
from ..._checkparam import Rel
from ...common.dtype import tensor, dtype_to_pytype
from ..primitive import prim_attr_register, PrimitiveWithInfer
class ScalarCast(PrimitiveWithInfer):
"""
Cast the input scalar to another type.
Inputs:
- **input_x** (scalar) - The input scalar. Only constant value is allowed.
- **input_y** (mindspore.dtype) - The type should cast to be. Only constant value is allowed.
Outputs:
Scalar. The type is the same as the python type corresponding to `input_y`.
Examples:
>>> scalar_cast = P.ScalarCast()
>>> output = scalar_cast(255.0, mindspore.int32)
"""
@prim_attr_register
def __init__(self):
pass
def __infer__(self, x, t):
validator.check_integer('x shape', len(x['shape']), 0, Rel.EQ, self.name)
value, to = x['value'], t['value']
if value is not None:
validator.check_value_type("value", value, [numbers.Number, bool], self.name)
if isinstance(to, type(tensor)):
to = to.element_type()
np_type = dtype_to_pytype(to)
value = np_type(value)
out = {'shape': x['shape'],
'dtype': t['value'],
'value': value}
return out
```
#### File: bert_thor/src/thor_layer.py
```python
import numpy as np
import mindspore.common.dtype as mstype
from mindspore._checkparam import check_bool, check_int_positive
from mindspore.common.initializer import TruncatedNormal, initializer
from mindspore.common.parameter import Parameter
from mindspore.common.tensor import Tensor
from mindspore.nn.cell import Cell
from mindspore.nn.layer.activation import get_activation
from mindspore.ops import operations as P
class Embedding_Thor(Cell):
"""
A embeddings lookup table with a fixed dictionary and size.
Args:
vocab_size (int): Size of the dictionary of embeddings.
embedding_size (int): The size of each embedding vector.
embedding_shape (list): [batch_size, seq_length, embedding_size], the shape of
each embedding vector.
use_one_hot_embeddings (bool): Specifies whether to use one hot encoding form. Default: False.
initializer_range (float): Initialization value of TruncatedNormal. Default: 0.02.
"""
def __init__(self,
vocab_size,
embedding_size,
embedding_shape,
use_one_hot_embeddings=False,
initializer_range=0.02,
name='embedding_table',
batch_size=12,
damping=0.03,
loss_scale=1,
frequency=100,
):
super(Embedding_Thor, self).__init__()
self.vocab_size = vocab_size
self.use_one_hot_embeddings = use_one_hot_embeddings
self.embedding_table = Parameter(initializer
(TruncatedNormal(initializer_range),
[vocab_size, embedding_size]),
name=name)
self.thor = True
self.expand = P.ExpandDims()
self.shape_flat = (-1,)
self.gather = P.GatherV2()
self.one_hot = P.OneHot()
self.on_value = Tensor(1.0, mstype.float32)
self.off_value = Tensor(0.0, mstype.float32)
self.array_mul = P.MatMul()
self.reshape = P.Reshape()
self.em_shape = tuple(embedding_shape)
self.shape = P.Shape()
self.loss_scale = Tensor(1 / loss_scale, mstype.float16)
self.matrix_A_inv = Parameter(Tensor(np.zeros([vocab_size]).astype(np.float16)),
name='matrix_A_inv', requires_grad=False)
self.matrix_G_inv = Parameter(Tensor(np.zeros([embedding_size, embedding_size]).astype(np.float16)),
name="matrix_G_inv", requires_grad=False)
self.fake_G = Tensor(np.zeros([embedding_size, embedding_size]).astype(np.float16))
self.dampingA = Tensor(np.ones([vocab_size]).astype(np.float32))
self.dampingG = Tensor(np.identity(embedding_size), mstype.float32)
self.cov_step = Parameter(initializer(0, [1], mstype.int32), name="cov_step", requires_grad=False)
self.freq = Tensor(frequency, mstype.int32)
self.axis = 0
self.damping = damping
self.gather = P.GatherV2()
self.sqrt = P.Sqrt()
self.mul = P.Mul()
self.cast = P.Cast()
self.cube_matmul = P.CusMatMulCube(transpose_a=True)
self.vector_matmul = P.CusBatchMatMul()
self.cholesky = P.CusCholeskyTrsm()
self.matrix_combine = P.CusMatrixCombine()
self.reduce_sum = P.ReduceSum(keep_dims=False)
self.inv = P.Inv()
self.getG = P.InsertGradientOf(self.save_gradient)
self.batch_size = batch_size
def save_gradient(self, dout):
"""save_gradient"""
bs = self.batch_size
bs = self.cast(bs, mstype.float32)
out = dout
dout = self.mul(dout, self.loss_scale)
dout = self.mul(dout, bs)
shape = self.shape(dout)
normalizer = self.cast(shape[0], mstype.float32)
matrix_G = self.cube_matmul(dout, dout)
matrix_G = self.mul(matrix_G, 1.0 / normalizer)
damping_step = self.gather(self.damping, self.cov_step, 0)
damping_step = self.cast(damping_step, mstype.float32)
self.cov_step = self.cov_step + self.freq
damping = self.sqrt(damping_step)
dampingG = self.cast(self.dampingG, mstype.float32)
matrix_G = matrix_G + damping * dampingG
matrix_G_inv = self.cholesky(matrix_G)
matrix_G_inv = self.vector_matmul(matrix_G_inv, matrix_G_inv)
matrix_G_inv = self.matrix_combine(matrix_G_inv)
matrix_G_inv = self.cast(matrix_G_inv, mstype.float16)
self.matrix_G_inv = matrix_G_inv
return out
def construct(self, input_ids):
"""construct of Embedding_Thor"""
flat_ids = self.reshape(input_ids, self.shape_flat)
if self.use_one_hot_embeddings:
one_hot_ids = self.one_hot(flat_ids, self.vocab_size, self.on_value, self.off_value)
output_for_reshape = self.array_mul(one_hot_ids, self.embedding_table)
else:
if self.thor:
one_hot_ids = self.one_hot(flat_ids, self.vocab_size, self.on_value, self.off_value)
matrix_A = self.reduce_sum(one_hot_ids, 0)
normalizer = self.batch_size
normalizer = self.cast(normalizer, mstype.float32)
matrix_A = self.mul(matrix_A, 1.0 / normalizer)
damping_step = self.gather(self.damping, self.cov_step, self.axis)
damping_step = self.cast(damping_step, mstype.float32)
damping = self.sqrt(damping_step)
dampingA = self.cast(self.dampingA, mstype.float32)
matrix_A = matrix_A + damping * dampingA
matrix_A_inv = self.inv(matrix_A)
matrix_A_inv = self.cast(matrix_A_inv, mstype.float16)
self.matrix_A_inv = matrix_A_inv
self.matrix_G_inv = self.fake_G
output_for_reshape = self.gather(self.embedding_table, flat_ids, 0)
output_for_reshape = self.getG(output_for_reshape)
else:
output_for_reshape = self.gather(self.embedding_table, flat_ids, 0)
output = self.reshape(output_for_reshape, self.em_shape)
return output, self.embedding_table
class Dense_Thor(Cell):
"""Dense_Thor"""
def __init__(self,
in_channels,
out_channels,
weight_init='normal',
bias_init='zeros',
damping=0.03,
loss_scale=1,
frequency=100,
has_bias=False,
activation=None,
batch_size=12):
super(Dense_Thor, self).__init__()
self.in_channels = check_int_positive(in_channels)
self.out_channels = check_int_positive(out_channels)
self.has_bias = check_bool(has_bias)
self.thor = True
if isinstance(weight_init, Tensor):
if weight_init.dim() != 2 or weight_init.shape()[0] != out_channels or \
weight_init.shape()[1] != in_channels:
raise ValueError("weight_init shape error")
self.weight = Parameter(initializer(weight_init, [out_channels, in_channels]), name="weight")
if self.has_bias:
if isinstance(bias_init, Tensor):
if bias_init.dim() != 1 or bias_init.shape()[0] != out_channels:
raise ValueError("bias_init shape error")
self.bias = Parameter(initializer(bias_init, [out_channels]), name="bias")
self.matmul = P.MatMul(transpose_b=True)
self.bias_add = P.BiasAdd()
self.activation = get_activation(activation)
self.activation_flag = self.activation is not None
self.matrix_A_inv = Parameter(Tensor(np.zeros([in_channels, in_channels]).astype(np.float16)),
name='matrix_A_inv', requires_grad=False)
self.matrix_G_inv = Parameter(Tensor(np.zeros([out_channels, out_channels]).astype(np.float16)),
name="matrix_G_inv", requires_grad=False)
self.fake_G = Tensor(np.zeros([out_channels, out_channels]).astype(np.float16))
self.matmul = P.MatMul(transpose_b=True)
self.cube_matmul = P.CusMatMulCube(transpose_a=True)
self.matrix_combine = P.CusMatrixCombine()
self.cholesky = P.CusCholeskyTrsm()
self.shape = P.Shape()
self.reshape = P.Reshape()
self.transpose = P.Transpose()
self.cov_step = Parameter(initializer(0, [1], mstype.int32), name="cov_step", requires_grad=False)
self.mul = P.Mul()
self.cast = P.Cast()
self.damping = damping
self.loss_scale = Tensor(1 / loss_scale, mstype.float16)
self.vector_matmul = P.CusBatchMatMul()
self.gather = P.GatherV2()
self.assignadd = P.AssignAdd()
self.freq = Tensor(frequency, mstype.int32)
self.axis = 0
self.abs = P.Abs()
self.reduce_max = P.ReduceMax(keep_dims=False)
self.log = P.Log()
self.exp = P.Exp()
self.dampingA = Tensor(np.identity(in_channels), mstype.float32)
self.dampingG = Tensor(np.identity(out_channels), mstype.float32)
self.sqrt = P.Sqrt()
self.getG = P.InsertGradientOf(self.save_gradient)
self.batch_size = batch_size
def save_gradient(self, dout):
"""save_gradient"""
bs = self.cast(self.batch_size, mstype.float32)
out = dout
dout = self.mul(dout, self.loss_scale)
dout = self.mul(dout, bs)
shape = self.shape(dout)
normalizer = self.cast(shape[0], mstype.float32)
matrix_G = self.cube_matmul(dout, dout)
matrix_G = self.mul(matrix_G, 1.0 / normalizer)
damping_step = self.gather(self.damping, self.cov_step, 0)
damping_step = self.cast(damping_step, mstype.float32)
self.cov_step = self.cov_step + self.freq
damping = self.sqrt(damping_step)
dampingG = self.cast(self.dampingG, mstype.float32)
matrix_G = matrix_G + damping * dampingG
matrix_G_inv = self.cholesky(matrix_G)
matrix_G_inv = self.vector_matmul(matrix_G_inv, matrix_G_inv)
matrix_G_inv = self.matrix_combine(matrix_G_inv)
matrix_G_inv = self.cast(matrix_G_inv, mstype.float16)
self.matrix_G_inv = matrix_G_inv
return out
def construct(self, x):
"""construct"""
if self.thor:
inputs = self.cube_matmul(x, x)
shape = self.shape(x)
normalizer = self.cast(shape[0], mstype.float32)
matrix_A = self.mul(inputs, 1.0 / normalizer)
damping_step = self.gather(self.damping, self.cov_step, self.axis)
damping_step = self.cast(damping_step, mstype.float32)
damping = self.sqrt(damping_step)
dampingA = self.cast(self.dampingA, mstype.float32)
matrix_A = matrix_A + damping * dampingA
matrix_A_inv = self.cholesky(matrix_A)
matrix_A_inv = self.vector_matmul(matrix_A_inv, matrix_A_inv)
matrix_A_inv = self.matrix_combine(matrix_A_inv)
matrix_A_inv = self.cast(matrix_A_inv, mstype.float16)
self.matrix_A_inv = matrix_A_inv
self.matrix_G_inv = self.fake_G
output = self.matmul(x, self.weight)
output = self.getG(output)
else:
output = self.matmul(x, self.weight)
if self.has_bias:
output = self.bias_add(output, self.bias)
if self.activation_flag:
return self.activation(output)
return output
def extend_repr(self):
"""extend_repr"""
str_info = 'in_channels={}, out_channels={}, weight={}, has_bias={}' \
.format(self.in_channels, self.out_channels, self.weight, self.has_bias)
if self.has_bias:
str_info = str_info + ', bias={}'.format(self.bias)
if self.activation_flag:
str_info = str_info + ', activation={}'.format(self.activation)
return str_info
```
#### File: python/pynative_mode/test_implicit_conversion.py
```python
import numpy as np
import pytest
from mindspore import Tensor, nn
from mindspore.ops import composite as C
def test_float_tensor_and_int_add():
x = Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], dtype=np.float32))
y = 2
ret_actual = x + y
ret_expect = Tensor(np.array([[2.1, 2.2, 2.3], [2.4, 2.5, 2.6]], dtype=np.float32))
assert ret_actual.dtype == ret_expect.dtype
assert (ret_actual.asnumpy() == ret_expect.asnumpy()).all()
def test_bool_tensor_and_float_add():
x = Tensor(np.array([[True, False], [False, True]], dtype=np.bool_))
y = 3.3
ret_actual = x + y
ret_expect = Tensor(np.array([[4.3, 3.3], [3.3, 4.3]], dtype=np.float32))
assert ret_actual.dtype == ret_expect.dtype
assert (ret_actual.asnumpy() == ret_expect.asnumpy()).all()
def test_bool_tensor_and_int_add():
x = Tensor(np.array([[True, False], [False, True]], dtype=np.bool_))
y = 3
ret_actual = x + y
ret_expect = Tensor(np.array([[4, 3], [3, 4]], dtype=np.int32))
assert ret_actual.dtype == ret_expect.dtype
assert (ret_actual.asnumpy() == ret_expect.asnumpy()).all()
def test_bool_and_int_tensor_add():
x = True
y = Tensor(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32))
ret_actual = x + y
ret_expect = Tensor(np.array([[2, 3, 4], [5, 6, 7]], dtype=np.int32))
assert ret_actual.dtype == ret_expect.dtype
assert (ret_actual.asnumpy() == ret_expect.asnumpy()).all()
def test_float_tensor_and_int_tensor_add():
x = Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], dtype=np.float32))
y = Tensor(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32))
ret_actual = x + y
ret_expect = Tensor(np.array([[1.1, 2.2, 3.3], [4.4, 5.5, 6.6]], dtype=np.float32))
assert ret_actual.dtype == ret_expect.dtype
assert (ret_actual.asnumpy() == ret_expect.asnumpy()).all()
def test_float_tensor_and_float_tensor_add():
x = Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], dtype=np.float32))
y = Tensor(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=np.float16))
ret_actual = x + y
ret_expect = Tensor(np.array([[1.1, 2.2, 3.3], [4.4, 5.5, 6.6]], dtype=np.float32))
assert ret_actual.dtype == ret_expect.dtype
assert (ret_actual.asnumpy() == ret_expect.asnumpy()).all()
def test_int_tensor_and_int_tensor_add():
x = Tensor(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int8))
y = Tensor(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32))
ret_actual = x + y
ret_expect = Tensor(np.array([[2, 4, 6], [8, 10, 12]], dtype=np.int32))
assert ret_actual.dtype == ret_expect.dtype
assert (ret_actual.asnumpy() == ret_expect.asnumpy()).all()
def test_float_tensor_and_bool_tensors_add():
x = Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], dtype=np.float32))
y = Tensor(np.array([[True, True, True], [False, False, False]], dtype=np.bool_))
ret_actual = x + y
ret_expect = Tensor(np.array([[1.1, 1.2, 1.3], [0.4, 0.5, 0.6]], dtype=np.float32))
assert (ret_actual.asnumpy() == ret_expect.asnumpy()).all()
def test_float_tensor_and_str_add():
x = Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], dtype=np.float32))
y = "ok"
with pytest.raises(TypeError) as er:
ret = x + y
assert "For 'TensorAdd', the 1th input is a not support implicit conversion type: str" in str(er.value)
def test_float_tensor_and_tuple_add():
x = Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], dtype=np.float32))
y = (1, 2, 3)
with pytest.raises(TypeError) as er:
ret = x + y
assert "For 'TensorAdd', the 1th input is a not support implicit conversion type: tuple" in str(er.value)
def test_float_tensor_and_list_add():
x = Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], dtype=np.float32))
y = [1, 2, 3]
with pytest.raises(TypeError) as er:
ret = x + y
assert "For 'TensorAdd', the 1th input is a not support implicit conversion type: list" in str(er.value)
def test_float_tensor_and_bool_tensors_add_grad():
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
def construct(self, x, y):
return x + y
class GradNet(nn.Cell):
def __init__(self, net):
super(GradNet, self).__init__()
self.net = net
def construct(self, x, y, sens):
return C.grad_all_with_sens(self.net)(x, y, sens)
x = Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], dtype=np.float32))
y = Tensor(np.array([[True, True, True], [False, False, False]], dtype=np.bool_))
sens = Tensor(np.array([[1.0, 2.0, 0.0], [0.0, 3.0, 4.0]], dtype=np.float32))
net = Net()
grad_net = GradNet(net)
ret = grad_net(x, y, sens)
assert ret[0].dtype == x.dtype
assert ret[1].dtype == y.dtype
assert (ret[0].asnumpy() == sens.asnumpy()).all()
assert (ret[1].asnumpy() == sens.asnumpy().astype(np.bool_)).all()
def test_float_tensor_and_int_tensors_sub_grad():
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
def construct(self, x, y):
return x - y
class GradNet(nn.Cell):
def __init__(self, net):
super(GradNet, self).__init__()
self.net = net
def construct(self, x, y, sens):
return C.grad_all_with_sens(self.net)(x, y, sens)
x = Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], dtype=np.float32))
y = Tensor(np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int32))
sens = Tensor(np.array([[1.0, 2.0, 0.0], [0.0, 3.0, 4.0]], dtype=np.float32))
net = Net()
grad_net = GradNet(net)
ret = grad_net(x, y, sens)
print(ret)
assert ret[0].dtype == x.dtype
assert ret[1].dtype == y.dtype
assert (ret[0].asnumpy() == sens.asnumpy()).all()
assert (ret[1].asnumpy() == sens.asnumpy() * -1).all()
def test_float16_tensor_and_float32_tensors_sub_grad():
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
def construct(self, x, y):
return x - y
class GradNet(nn.Cell):
def __init__(self, net):
super(GradNet, self).__init__()
self.net = net
def construct(self, x, y, sens):
return C.grad_all_with_sens(self.net)(x, y, sens)
x = Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], dtype=np.int32))
y = Tensor(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=np.float32))
sens = Tensor(np.array([[1.0, 2.0, 0.0], [0.0, 3.0, 4.0]], dtype=np.float32))
net = Net()
grad_net = GradNet(net)
ret = grad_net(x, y, sens)
print(ret)
assert ret[0].dtype == x.dtype
assert ret[1].dtype == y.dtype
assert (ret[0].asnumpy() == sens.asnumpy()).all()
assert (ret[1].asnumpy() == sens.asnumpy() * -1).all()
def test_float_tensor_and_int_add_grad():
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
def construct(self, x):
return x + 2
class GradNet(nn.Cell):
def __init__(self, net):
super(GradNet, self).__init__()
self.net = net
def construct(self, x, sens):
return C.grad_all_with_sens(self.net)(x, sens)
x = Tensor(np.array([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], dtype=np.float32))
sens = Tensor(np.array([[1.0, 2.0, 0.0], [0.0, 3.0, 4.0]], dtype=np.float32))
net = Net()
grad_net = GradNet(net)
ret = grad_net(x, sens)
assert ret[0].dtype == x.dtype
assert (ret[0].asnumpy() == sens.asnumpy()).all()
``` |
{
"source": "Joejiong/models-1",
"score": 2
} |
#### File: video/reader/kinetics_reader.py
```python
import os
import sys
import cv2
import math
import random
import functools
try:
import cPickle as pickle
from cStringIO import StringIO
except ImportError:
import pickle
from io import BytesIO
import numpy as np
import paddle
try:
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import nvidia.dali.types as types
import tempfile
from nvidia.dali.plugin.paddle import DALIGenericIterator
except:
print("DALI is not installed, you can improve performance if use DALI")
from PIL import Image, ImageEnhance
import logging
from .reader_utils import DataReader
logger = logging.getLogger(__name__)
python_ver = sys.version_info
class KineticsReader(DataReader):
"""
Data reader for kinetics dataset of two format mp4 and pkl.
1. mp4, the original format of kinetics400
2. pkl, the mp4 was decoded previously and stored as pkl
In both case, load the data, and then get the frame data in the form of numpy and label as an integer.
dataset cfg: format
num_classes
seg_num
short_size
target_size
num_reader_threads
buf_size
image_mean
image_std
batch_size
list
"""
def __init__(self, name, mode, cfg):
super(KineticsReader, self).__init__(name, mode, cfg)
self.format = cfg.MODEL.format
self.num_classes = self.get_config_from_sec('model', 'num_classes')
self.seg_num = self.get_config_from_sec('model', 'seg_num')
self.seglen = self.get_config_from_sec('model', 'seglen')
self.seg_num = self.get_config_from_sec(mode, 'seg_num', self.seg_num)
self.short_size = self.get_config_from_sec(mode, 'short_size')
self.target_size = self.get_config_from_sec(mode, 'target_size')
self.num_reader_threads = self.get_config_from_sec(mode,
'num_reader_threads')
self.buf_size = self.get_config_from_sec(mode, 'buf_size')
self.fix_random_seed = self.get_config_from_sec(mode, 'fix_random_seed')
self.img_mean = np.array(cfg.MODEL.image_mean).reshape(
[3, 1, 1]).astype(np.float32)
self.img_std = np.array(cfg.MODEL.image_std).reshape(
[3, 1, 1]).astype(np.float32)
# set batch size and file list
self.batch_size = cfg[mode.upper()]['batch_size']
self.filelist = cfg[mode.upper()]['filelist']
# set num_trainers and trainer_id when distributed training is implemented
self.num_trainers = self.get_config_from_sec(mode, 'num_trainers', 1)
self.trainer_id = self.get_config_from_sec(mode, 'trainer_id', 0)
self.use_dali = self.get_config_from_sec(mode, 'use_dali', False)
self.dali_mean = cfg.MODEL.image_mean * (self.seg_num * self.seglen)
self.dali_std = cfg.MODEL.image_std * (self.seg_num * self.seglen)
if self.mode == 'infer':
self.video_path = cfg[mode.upper()]['video_path']
else:
self.video_path = ''
if self.fix_random_seed:
random.seed(0)
np.random.seed(0)
self.num_reader_threads = 1
def create_reader(self):
# if use_dali to improve performance
if self.use_dali:
return self.build_dali_reader()
# if set video_path for inference mode, just load this single video
if (self.mode == 'infer') and (self.video_path != ''):
# load video from file stored at video_path
_reader = self._inference_reader_creator(
self.video_path,
self.mode,
seg_num=self.seg_num,
seglen=self.seglen,
short_size=self.short_size,
target_size=self.target_size,
img_mean=self.img_mean,
img_std=self.img_std)
else:
assert os.path.exists(self.filelist), \
'{} not exist, please check the data list'.format(self.filelist)
_reader = self._reader_creator(self.filelist, self.mode, seg_num=self.seg_num, seglen = self.seglen, \
short_size = self.short_size, target_size = self.target_size, \
img_mean = self.img_mean, img_std = self.img_std, \
shuffle = (self.mode == 'train'), \
num_threads = self.num_reader_threads, \
buf_size = self.buf_size, format = self.format)
def _batch_reader():
batch_out = []
for imgs, label in _reader():
if imgs is None:
continue
batch_out.append((imgs, label))
if len(batch_out) == self.batch_size:
yield batch_out
batch_out = []
return _batch_reader
def _inference_reader_creator(self, video_path, mode, seg_num, seglen,
short_size, target_size, img_mean, img_std):
def reader():
try:
imgs = mp4_loader(video_path, seg_num, seglen, mode)
if len(imgs) < 1:
logger.error('{} frame length {} less than 1.'.format(
video_path, len(imgs)))
yield None, None
except:
logger.error('Error when loading {}'.format(mp4_path))
yield None, None
imgs_ret = imgs_transform(imgs, mode, seg_num, seglen, short_size,
target_size, img_mean, img_std)
label_ret = video_path
yield imgs_ret, label_ret
return reader
def _reader_creator(self,
pickle_list,
mode,
seg_num,
seglen,
short_size,
target_size,
img_mean,
img_std,
shuffle=False,
num_threads=1,
buf_size=1024,
format='pkl'):
def decode_mp4(sample, mode, seg_num, seglen, short_size, target_size,
img_mean, img_std):
sample = sample[0].split(' ')
mp4_path = sample[0]
# when infer, we store vid as label
label = int(sample[1])
try:
imgs = mp4_loader(mp4_path, seg_num, seglen, mode)
if len(imgs) < 1:
logger.error('{} frame length {} less than 1.'.format(
mp4_path, len(imgs)))
return None, None
except:
logger.error('Error when loading {}'.format(mp4_path))
return None, None
return imgs_transform(imgs, mode, seg_num, seglen, \
short_size, target_size, img_mean, img_std, name = self.name), label
def decode_pickle(sample, mode, seg_num, seglen, short_size,
target_size, img_mean, img_std):
pickle_path = sample[0]
try:
if python_ver < (3, 0):
data_loaded = pickle.load(open(pickle_path, 'rb'))
else:
data_loaded = pickle.load(
open(pickle_path, 'rb'), encoding='bytes')
vid, label, frames = data_loaded
if len(frames) < 1:
logger.error('{} frame length {} less than 1.'.format(
pickle_path, len(frames)))
return None, None
except:
logger.info('Error when loading {}'.format(pickle_path))
return None, None
if mode == 'train' or mode == 'valid' or mode == 'test':
ret_label = label
elif mode == 'infer':
ret_label = vid
imgs = video_loader(frames, seg_num, seglen, mode)
return imgs_transform(imgs, mode, seg_num, seglen, \
short_size, target_size, img_mean, img_std, name = self.name), ret_label
def reader_():
with open(pickle_list) as flist:
full_lines = [line.strip() for line in flist]
if self.mode == 'train':
if (not hasattr(reader_, 'seed')):
reader_.seed = 0
random.Random(reader_.seed).shuffle(full_lines)
print("reader shuffle seed", reader_.seed)
if reader_.seed is not None:
reader_.seed += 1
per_node_lines = int(
math.ceil(len(full_lines) * 1.0 / self.num_trainers))
total_lines = per_node_lines * self.num_trainers
# aligned full_lines so that it can evenly divisible
full_lines += full_lines[:(total_lines - len(full_lines))]
assert len(full_lines) == total_lines
# trainer get own sample
lines = full_lines[self.trainer_id:total_lines:
self.num_trainers]
logger.info("trainerid %d, trainer_count %d" %
(self.trainer_id, self.num_trainers))
logger.info(
"read images from %d, length: %d, lines length: %d, total: %d"
% (self.trainer_id * per_node_lines, per_node_lines,
len(lines), len(full_lines)))
assert len(lines) == per_node_lines
for line in lines:
pickle_path = line.strip()
yield [pickle_path]
if format == 'pkl':
decode_func = decode_pickle
elif format == 'mp4':
decode_func = decode_mp4
else:
raise "Not implemented format {}".format(format)
mapper = functools.partial(
decode_func,
mode=mode,
seg_num=seg_num,
seglen=seglen,
short_size=short_size,
target_size=target_size,
img_mean=img_mean,
img_std=img_std)
return paddle.reader.xmap_readers(mapper, reader_, num_threads,
buf_size)
def build_dali_reader(self):
"""
build dali training reader
"""
def reader_():
with open(self.filelist) as flist:
full_lines = [line for line in flist]
if self.mode == 'train':
if (not hasattr(reader_, 'seed')):
reader_.seed = 0
random.Random(reader_.seed).shuffle(full_lines)
print("reader shuffle seed", reader_.seed)
if reader_.seed is not None:
reader_.seed += 1
per_node_lines = int(
math.ceil(len(full_lines) * 1.0 / self.num_trainers))
total_lines = per_node_lines * self.num_trainers
# aligned full_lines so that it can evenly divisible
full_lines += full_lines[:(total_lines - len(full_lines))]
assert len(full_lines) == total_lines
# trainer get own sample
lines = full_lines[self.trainer_id:total_lines:
self.num_trainers]
assert len(lines) == per_node_lines
logger.info("trainerid %d, trainer_count %d" %
(self.trainer_id, self.num_trainers))
logger.info(
"read images from %d, length: %d, lines length: %d, total: %d"
% (self.trainer_id * per_node_lines, per_node_lines,
len(lines), len(full_lines)))
video_files = ''
for item in lines:
video_files += item
tf = tempfile.NamedTemporaryFile()
tf.write(str.encode(video_files))
tf.flush()
video_files = tf.name
device_id = int(os.getenv('FLAGS_selected_gpus', 0))
print('---------- device id -----------', device_id)
if self.mode == 'train':
pipe = VideoPipe(
batch_size=self.batch_size,
num_threads=1,
device_id=device_id,
file_list=video_files,
sequence_length=self.seg_num * self.seglen,
seg_num=self.seg_num,
seg_length=self.seglen,
resize_shorter_scale=self.short_size,
crop_target_size=self.target_size,
is_training=(self.mode == 'train'),
dali_mean=self.dali_mean,
dali_std=self.dali_std)
else:
pipe = VideoTestPipe(
batch_size=self.batch_size,
num_threads=1,
device_id=device_id,
file_list=video_files,
sequence_length=self.seg_num * self.seglen,
seg_num=self.seg_num,
seg_length=self.seglen,
resize_shorter_scale=self.short_size,
crop_target_size=self.target_size,
is_training=(self.mode == 'train'),
dali_mean=self.dali_mean,
dali_std=self.dali_std)
logger.info(
'initializing dataset, it will take several minutes if it is too large .... '
)
video_loader = DALIGenericIterator(
[pipe], ['image', 'label'],
len(lines),
dynamic_shape=True,
auto_reset=True)
return video_loader
dali_reader = reader_()
def ret_reader():
for data in dali_reader:
yield data[0]['image'], data[0]['label']
return ret_reader
class VideoPipe(Pipeline):
def __init__(self,
batch_size,
num_threads,
device_id,
file_list,
sequence_length,
seg_num,
seg_length,
resize_shorter_scale,
crop_target_size,
is_training=False,
initial_prefetch_size=10,
num_shards=1,
shard_id=0,
dali_mean=0.,
dali_std=1.0):
super(VideoPipe, self).__init__(batch_size, num_threads, device_id)
self.input = ops.VideoReader(
device="gpu",
file_list=file_list,
sequence_length=sequence_length,
seg_num=seg_num,
seg_length=seg_length,
is_training=is_training,
num_shards=num_shards,
shard_id=shard_id,
random_shuffle=is_training,
initial_fill=initial_prefetch_size)
# the sequece data read by ops.VideoReader is of shape [F, H, W, C]
# Because the ops.Resize does not support sequence data,
# it will be transposed into [H, W, F, C],
# then reshaped to [H, W, FC], and then resized like a 2-D image.
self.transpose = ops.Transpose(device="gpu", perm=[1, 2, 0, 3])
self.reshape = ops.Reshape(
device="gpu", rel_shape=[1.0, 1.0, -1], layout='HWC')
self.resize = ops.Resize(
device="gpu", resize_shorter=resize_shorter_scale)
# crops and mirror are applied by ops.CropMirrorNormalize.
# Normalization will be implemented in paddle due to the difficulty of dimension broadcast,
# It is not sure whether dimension broadcast can be implemented correctly by dali, just take the Paddle Op instead.
self.pos_rng_x = ops.Uniform(range=(0.0, 1.0))
self.pos_rng_y = ops.Uniform(range=(0.0, 1.0))
self.mirror_generator = ops.Uniform(range=(0.0, 1.0))
self.cast_mirror = ops.Cast(dtype=types.DALIDataType.INT32)
self.crop_mirror_norm = ops.CropMirrorNormalize(
device="gpu",
crop=[crop_target_size, crop_target_size],
mean=dali_mean,
std=dali_std)
self.reshape_back = ops.Reshape(
device="gpu",
shape=[
seg_num, seg_length * 3, crop_target_size, crop_target_size
],
layout='FCHW')
self.cast_label = ops.Cast(device="gpu", dtype=types.DALIDataType.INT64)
def define_graph(self):
output, label = self.input(name="Reader")
output = self.transpose(output)
output = self.reshape(output)
output = self.resize(output)
output = output / 255.
pos_x = self.pos_rng_x()
pos_y = self.pos_rng_y()
mirror_flag = self.mirror_generator()
mirror_flag = (mirror_flag > 0.5)
mirror_flag = self.cast_mirror(mirror_flag)
#output = self.crop(output, crop_pos_x=pos_x, crop_pos_y=pos_y)
output = self.crop_mirror_norm(
output, crop_pos_x=pos_x, crop_pos_y=pos_y, mirror=mirror_flag)
output = self.reshape_back(output)
label = self.cast_label(label)
return output, label
class VideoTestPipe(Pipeline):
def __init__(self,
batch_size,
num_threads,
device_id,
file_list,
sequence_length,
seg_num,
seg_length,
resize_shorter_scale,
crop_target_size,
is_training=False,
initial_prefetch_size=10,
num_shards=1,
shard_id=0,
dali_mean=0.,
dali_std=1.0):
super(VideoTestPipe, self).__init__(batch_size, num_threads, device_id)
self.input = ops.VideoReader(
device="gpu",
file_list=file_list,
sequence_length=sequence_length,
seg_num=seg_num,
seg_length=seg_length,
is_training=is_training,
num_shards=num_shards,
shard_id=shard_id,
random_shuffle=is_training,
initial_fill=initial_prefetch_size)
# the sequece data read by ops.VideoReader is of shape [F, H, W, C]
# Because the ops.Resize does not support sequence data,
# it will be transposed into [H, W, F, C],
# then reshaped to [H, W, FC], and then resized like a 2-D image.
self.transpose = ops.Transpose(device="gpu", perm=[1, 2, 0, 3])
self.reshape = ops.Reshape(
device="gpu", rel_shape=[1.0, 1.0, -1], layout='HWC')
self.resize = ops.Resize(
device="gpu", resize_shorter=resize_shorter_scale)
# crops and mirror are applied by ops.CropMirrorNormalize.
# Normalization will be implemented in paddle due to the difficulty of dimension broadcast,
# It is not sure whether dimension broadcast can be implemented correctly by dali, just take the Paddle Op instead.
self.crop_mirror_norm = ops.CropMirrorNormalize(
device="gpu",
crop=[crop_target_size, crop_target_size],
crop_pos_x=0.5,
crop_pos_y=0.5,
mirror=0,
mean=dali_mean,
std=dali_std)
self.reshape_back = ops.Reshape(
device="gpu",
shape=[
seg_num, seg_length * 3, crop_target_size, crop_target_size
],
layout='FCHW')
self.cast_label = ops.Cast(device="gpu", dtype=types.DALIDataType.INT64)
def define_graph(self):
output, label = self.input(name="Reader")
output = self.transpose(output)
output = self.reshape(output)
output = self.resize(output)
output = output / 255.
#output = self.crop(output, crop_pos_x=pos_x, crop_pos_y=pos_y)
output = self.crop_mirror_norm(output)
output = self.reshape_back(output)
label = self.cast_label(label)
return output, label
def imgs_transform(imgs,
mode,
seg_num,
seglen,
short_size,
target_size,
img_mean,
img_std,
name=''):
imgs = group_scale(imgs, short_size)
if mode == 'train':
if name == "TSM":
imgs = group_multi_scale_crop(imgs, short_size)
imgs = group_random_crop(imgs, target_size)
imgs = group_random_flip(imgs)
else:
imgs = group_center_crop(imgs, target_size)
np_imgs = (np.array(imgs[0]).astype('float32').transpose(
(2, 0, 1))).reshape(1, 3, target_size, target_size) / 255
for i in range(len(imgs) - 1):
img = (np.array(imgs[i + 1]).astype('float32').transpose(
(2, 0, 1))).reshape(1, 3, target_size, target_size) / 255
np_imgs = np.concatenate((np_imgs, img))
imgs = np_imgs
imgs -= img_mean
imgs /= img_std
imgs = np.reshape(imgs, (seg_num, seglen * 3, target_size, target_size))
return imgs
def group_multi_scale_crop(img_group, target_size, scales=None, \
max_distort=1, fix_crop=True, more_fix_crop=True):
scales = scales if scales is not None else [1, .875, .75, .66]
input_size = [target_size, target_size]
im_size = img_group[0].size
# get random crop offset
def _sample_crop_size(im_size):
image_w, image_h = im_size[0], im_size[1]
base_size = min(image_w, image_h)
crop_sizes = [int(base_size * x) for x in scales]
crop_h = [
input_size[1] if abs(x - input_size[1]) < 3 else x
for x in crop_sizes
]
crop_w = [
input_size[0] if abs(x - input_size[0]) < 3 else x
for x in crop_sizes
]
pairs = []
for i, h in enumerate(crop_h):
for j, w in enumerate(crop_w):
if abs(i - j) <= max_distort:
pairs.append((w, h))
crop_pair = random.choice(pairs)
if not fix_crop:
w_offset = random.randint(0, image_w - crop_pair[0])
h_offset = random.randint(0, image_h - crop_pair[1])
else:
w_step = (image_w - crop_pair[0]) / 4
h_step = (image_h - crop_pair[1]) / 4
ret = list()
ret.append((0, 0)) # upper left
if w_step != 0:
ret.append((4 * w_step, 0)) # upper right
if h_step != 0:
ret.append((0, 4 * h_step)) # lower left
if h_step != 0 and w_step != 0:
ret.append((4 * w_step, 4 * h_step)) # lower right
if h_step != 0 or w_step != 0:
ret.append((2 * w_step, 2 * h_step)) # center
if more_fix_crop:
ret.append((0, 2 * h_step)) # center left
ret.append((4 * w_step, 2 * h_step)) # center right
ret.append((2 * w_step, 4 * h_step)) # lower center
ret.append((2 * w_step, 0 * h_step)) # upper center
ret.append((1 * w_step, 1 * h_step)) # upper left quarter
ret.append((3 * w_step, 1 * h_step)) # upper right quarter
ret.append((1 * w_step, 3 * h_step)) # lower left quarter
ret.append((3 * w_step, 3 * h_step)) # lower righ quarter
w_offset, h_offset = random.choice(ret)
return crop_pair[0], crop_pair[1], w_offset, h_offset
crop_w, crop_h, offset_w, offset_h = _sample_crop_size(im_size)
crop_img_group = [
img.crop((offset_w, offset_h, offset_w + crop_w, offset_h + crop_h))
for img in img_group
]
ret_img_group = [
img.resize((input_size[0], input_size[1]), Image.BILINEAR)
for img in crop_img_group
]
return ret_img_group
def group_random_crop(img_group, target_size):
w, h = img_group[0].size
th, tw = target_size, target_size
assert (w >= target_size) and (h >= target_size), \
"image width({}) and height({}) should be larger than crop size".format(w, h, target_size)
out_images = []
x1 = random.randint(0, w - tw)
y1 = random.randint(0, h - th)
for img in img_group:
if w == tw and h == th:
out_images.append(img)
else:
out_images.append(img.crop((x1, y1, x1 + tw, y1 + th)))
return out_images
def group_random_flip(img_group):
v = random.random()
if v < 0.5:
ret = [img.transpose(Image.FLIP_LEFT_RIGHT) for img in img_group]
return ret
else:
return img_group
def group_center_crop(img_group, target_size):
img_crop = []
for img in img_group:
w, h = img.size
th, tw = target_size, target_size
assert (w >= target_size) and (h >= target_size), \
"image width({}) and height({}) should be larger than crop size".format(w, h, target_size)
x1 = int(round((w - tw) / 2.))
y1 = int(round((h - th) / 2.))
img_crop.append(img.crop((x1, y1, x1 + tw, y1 + th)))
return img_crop
def group_scale(imgs, target_size):
resized_imgs = []
for i in range(len(imgs)):
img = imgs[i]
w, h = img.size
if (w <= h and w == target_size) or (h <= w and h == target_size):
resized_imgs.append(img)
continue
if w < h:
ow = target_size
oh = int(target_size * 4.0 / 3.0)
resized_imgs.append(img.resize((ow, oh), Image.BILINEAR))
else:
oh = target_size
ow = int(target_size * 4.0 / 3.0)
resized_imgs.append(img.resize((ow, oh), Image.BILINEAR))
return resized_imgs
def imageloader(buf):
if isinstance(buf, str):
img = Image.open(StringIO(buf))
else:
img = Image.open(BytesIO(buf))
return img.convert('RGB')
def video_loader(frames, nsample, seglen, mode):
videolen = len(frames)
average_dur = int(videolen / nsample)
imgs = []
for i in range(nsample):
idx = 0
if mode == 'train':
if average_dur >= seglen:
idx = random.randint(0, average_dur - seglen)
idx += i * average_dur
elif average_dur >= 1:
idx += i * average_dur
else:
idx = i
else:
if average_dur >= seglen:
idx = (average_dur - seglen) // 2
idx += i * average_dur
elif average_dur >= 1:
idx += i * average_dur
else:
idx = i
for jj in range(idx, idx + seglen):
imgbuf = frames[int(jj % videolen)]
img = imageloader(imgbuf)
imgs.append(img)
return imgs
def mp4_loader(filepath, nsample, seglen, mode):
cap = cv2.VideoCapture(filepath)
videolen = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
sampledFrames = []
for i in range(videolen):
ret, frame = cap.read()
# maybe first frame is empty
if ret == False:
continue
img = frame[:, :, ::-1]
sampledFrames.append(img)
average_dur = int(len(sampledFrames) / nsample)
imgs = []
for i in range(nsample):
idx = 0
if mode == 'train':
if average_dur >= seglen:
idx = random.randint(0, average_dur - seglen)
idx += i * average_dur
elif average_dur >= 1:
idx += i * average_dur
else:
idx = i
else:
if average_dur >= seglen:
idx = (average_dur - 1) // 2
idx += i * average_dur
elif average_dur >= 1:
idx += i * average_dur
else:
idx = i
for jj in range(idx, idx + seglen):
imgbuf = sampledFrames[int(jj % len(sampledFrames))]
img = Image.fromarray(imgbuf, mode='RGB')
imgs.append(img)
return imgs
``` |
{
"source": "Joejiong/Paddle_AST_Infrastructure",
"score": 2
} |
#### File: Paddle_AST_Infrastructure/api_upgrade_src/modify_transformer.py
```python
import astor
import gast
import inspect
from api_upgrade_src.node_operation import get_attr_full_name, delete_keywords_from, rename_keywords_to, add_keywords_to
from api_upgrade_src.upgrade_models_api_utils import print_info
class AddParamTransformer(gast.NodeTransformer):
def __init__(self, node):
assert isinstance(node, gast.AST)
self.root = node
self.modify_dict = ""
def add(self, modify_dict):
self.modify_dict = modify_dict
self.visit(self.root)
def visit_Call(self, node):
attribute_node = node.func
attr_full_name = get_attr_full_name(attribute_node)
if attr_full_name in self.modify_dict:
if "add" not in self.modify_dict[attr_full_name]:
return node
add_dict = self.modify_dict[attr_full_name]["add"]
add_keywords_to(node, add_dict)
for param in add_dict:
print_info("\033[1;33mAdd Params (%s) to API (%s)\033[0m" % (param, attr_full_name))
return node
class DelParamTransformer(gast.NodeTransformer):
def __init__(self, node):
assert isinstance(node, gast.AST)
self.root = node
self.modify_dict = ""
def delete(self, modify_dict):
self.modify_dict = modify_dict
self.visit(self.root)
def visit_Call(self, node):
attribute_node = node.func
attr_full_name = get_attr_full_name(attribute_node)
if attr_full_name in self.modify_dict:
if "delete" not in self.modify_dict[attr_full_name]:
return node
delete_dict = self.modify_dict[attr_full_name]["delete"]
delete_keywords_from(node, delete_dict)
for param in delete_dict:
print_info("\033[1;33mDelete Params (%s) from API (%s)\033[0m" % (param, attr_full_name))
return node
class RenameParamTransformer(gast.NodeTransformer):
def __init__(self, node):
assert isinstance(node, gast.AST)
self.root = node
self.modify_dict = ""
def replace(self, modify_dict):
self.modify_dict = modify_dict
self.visit(self.root)
def visit_Call(self, node):
attribute_node = node.func
attr_full_name = get_attr_full_name(attribute_node)
if attr_full_name in self.modify_dict:
if "rename" not in self.modify_dict[attr_full_name]:
return node
modify_dict = self.modify_dict[attr_full_name]["rename"]
rename_keywords_to(node, modify_dict)
for param in modify_dict:
print_info("\033[1;33mRename Params (%s->%s) in API (%s)\033[0m" % (param, modify_dict[param], attr_full_name))
return node
class RepAttributeTransformer(gast.NodeTransformer):
def __init__(self, node):
assert isinstance(node, gast.AST)
self.root = node
self.modify_dict = ""
def replace(self, modify_dict):
self.modify_dict = modify_dict
self.visit(self.root)
def visit_Attribute(self, node):
self.generic_visit(node)
attr_full_name = get_attr_full_name(node)
if attr_full_name in self.modify_dict:
new_api_name = self.modify_dict[attr_full_name]['name']
new_api_node = gast.parse(new_api_name).body[0].value
print_info("\033[1;33mUpgrade API (%s->%s)\033[0m" % (attr_full_name, new_api_name))
return new_api_node
return node
```
#### File: api_upgrade_src/script/convert_dict.py
```python
import sys
import json
def _num(s):
try:
return int(s)
except ValueError:
return float(s)
def _string2bool(string):
d = {'True': True, 'False': False}
return d.get(string, string)
def load_src_json(f):
try:
with open(f, 'r') as fr:
json_dict = json.load(fr)
except:
json_dict = dict()
return json_dict
def get_key_value(term):
value = term.strip("\"").strip("\'").split("|")
key_value = {}
for v in value:
prek,prev = v.split('=')
if prev == 'None':
prev = None
elif prev == 'False' or prev == 'True':
prev = _string2bool(prev)
elif prev.isnumeric():
prev = _num(prev)
key_value[prek] = prev
return key_value
def get_key(term):
value = term.strip("\"").strip("\'").split("|")
key_value = {v: None for v in value}
return key_value
def check_conflict(add_dict, rename_dict):
rename_list = rename_dict.values()
add_list = add_dict.keys()
inter = list(set(rename_list).intersection(set(add_list)))
if not inter:
return add_dict, rename_dict
else:
for c in inter:
add_dict.pop(c)
return add_dict, rename_dict
def convert_dict(f):
json_dict = load_src_json(f)
upgrade_api = dict()
if not json_dict:
print("parser json dict error")
exit(1)
json_dict_src = json_dict["Sheet1"]
for term in json_dict_src:
name = term.get("paddle.17", None)
upgrade_name = term.get("paddle2.0", None)
assert name, "old version doesn't exist"
assert upgrade_name, "new version doesn't exist"
add_dict = dict()
if "add" in term:
add_dict = get_key_value(term["add"])
rename_dict = dict()
if "rename" in term:
rename_dict = get_key_value(term["rename"])
delete_dict = dict()
if "delete" in term:
delete_dict = get_key(term["delete"])
if rename_dict and add_dict:
add_dict, rename_dict = check_conflict(add_dict, rename_dict)
upgrade_api[name] = dict()
upgrade_api[name]["name"] = upgrade_name
if delete_dict:
upgrade_api[name]["delete"] = delete_dict
if add_dict:
upgrade_api[name]["add"] = add_dict
if rename_dict:
upgrade_api[name]["rename"] = rename_dict
res = json.dumps(upgrade_api, sort_keys=True, indent=4)
return res
if __name__ == "__main__":
# ../dict/data.json
json_file = convert_dict(sys.argv[1])
with open("../dict/modify.dict", "w") as outfile:
outfile.write(json_file)
``` |
{
"source": "Joejiong/pytorch2paddle",
"score": 3
} |
#### File: pytorch2paddle/Joe_nn_transfer/paddle_leNet.py
```python
import paddle
import paddle.fluid as fluid
import numpy as np
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, Linear, Conv2DTranspose
from paddle.fluid.dygraph.base import to_variable
# K.set_image_data_format('channels_first')
# 定义 LeNet 网络结构
class LeNet(fluid.dygraph.Layer):
def __init__(self, name_scope, num_classes=1):
super(LeNet, self).__init__(name_scope)
self.conv1 = Conv2D(num_channels=1, num_filters=6, filter_size=5, act='relu')
self.pool1 = Pool2D(pool_size=2, pool_stride=2, pool_type='max')
self.conv2 = Conv2D(num_channels=6, num_filters=16, filter_size=5, act='relu')
self.pool2 = Pool2D(pool_size=2, pool_stride=2, pool_type='max')
# self.conv3 = Conv2D(num_channels=16, num_filters=120, filter_size=4, act='relu')
# 创建全连接层,第一个全连接层的输出神经元个数为64, 第二个全连接层输出神经元个数为分裂标签的类别数
self.fc1 = Linear(input_dim=16*5*5, output_dim=120, act='relu')
self.fc2 = Linear(input_dim=120, output_dim=84, act='relu')
self.fc3 = Linear(input_dim=84, output_dim=num_classes)
# 网络的前向计算过程
def forward(self, x):
x = self.conv1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.pool2(x)
x = self.conv3(x)
x = fluid.layers.reshape(x, [x.shape[0], -1])
x = self.fc1(x)
x = self.fc2(x)
return x
```
#### File: pytorch2paddle/Joe_nn_transfer/pytorch2fluid.py
```python
from __future__ import print_function
from collections import OrderedDict
import numpy as np
import h5py
import torch
import paddle
import paddle.fluid as fluid
from . import util
PADDLE_MOVING_MEAN_KEY = 'moving_mean'
PADDLE_MOVING_VARIANCE_KEY = 'moving_variance'
PADDLE_EPSILON = 1e-3
PYTORCH_EPSILON = 1e-5
def check_for_missing_layers(paddle_layer_names, pytorch_layer_names, verbose):
if verbose:
print("Layer names in PyTorch state_dict", pytorch_layer_names)
print("Layer names in paddle state_dict", paddle_layer_names)
if not all(x in paddle_layer_names for x in pytorch_layer_names):
missing_layers = list(set(pytorch_layer_names) - set(paddle_layer_names))
raise Exception("Missing layer(s) in paddle HDF5 that are present" +
" in state_dict: {}".format(missing_layers))
def pytorch_to_paddle(pytorch_model, paddle_model,
flip_filters=False, flip_channels=None, verbose=True):
paddle_dict = paddle_model.state_dict()
fluid.save_dygraph(paddle_dict, "save_temp")
pytorch_input_state_dict = pytorch_model.state_dict()
pytorch_layer_names = util.state_dict_layer_names(pytorch_input_state_dict)
with open('save_temp', 'a') as f:
model_weights = f['model_weights']
target_layer_names = list(map(str, model_weights.keys()))
check_for_missing_layers(
target_layer_names,
pytorch_layer_names,
verbose)
for layer in pytorch_layer_names:
paddle_h5_layer_param = util.dig_to_params_pf(model_weights[layer])
weight_key = layer + '.weight'
bias_key = layer + '.bias'
running_mean_key = layer + '.running_mean'
running_var_key = layer + '.running_var'
# Load weights (or other learned parameters)
if weight_key in pytorch_input_state_dict:
weights = pytorch_input_state_dict[weight_key].numpy()
weights = convert_weights(weights,
to_pytorch=False,
flip_filters=flip_filters,
flip_channels=flip_channels)
# Load bias
if bias_key in pytorch_input_state_dict:
bias = pytorch_input_state_dict[bias_key].numpy()
if running_var_key in pytorch_input_state_dict:
paddle_h5_layer_param[bias_key][:] = bias
else:
paddle_h5_layer_param[bias_key][:] = bias
# Load batch normalization running mean
if running_mean_key in pytorch_input_state_dict:
running_mean = pytorch_input_state_dict[running_mean_key].numpy()
paddle_h5_layer_param[PADDLE_MOVING_MEAN_KEY][:] = running_mean
# Load batch normalization running variance
if running_var_key in pytorch_input_state_dict:
running_var = pytorch_input_state_dict[running_var_key].numpy()
# account for difference in epsilon used
running_var += PYTORCH_EPSILON - PADDLE_EPSILON
paddle_h5_layer_param[PADDLE_MOVING_VARIANCE_KEY][:] = running_var
# pytorch_model.load_state_dict(state_dict)
paddle_model.load_weights('temp.h5')
# TODO
def convert_weights(weights, to_pytorch=False, flip_filters=False, flip_channels=False):
if to_pytorch:
# TODO
weights = weights
else:
# TODO
weights = weights
return weights
``` |
{
"source": "joejnke/security",
"score": 4
} |
#### File: joejnke/security/caesar_cipher.py
```python
L2I = dict(zip("ABCDEFGHIJKLMNOPQRSTUVWXYZ",range(26)))
# A mapping from Interger to lettter
I2L = dict(zip(range(26),"ABCDEFGHIJKLMNOPQRSTUVWXYZ"))
'''-------------------------------------------------------------------------------
Function: Get the encryption or decryption key if it is in range between 1- 26
Returns: Key
Arguments: None
----------------------------------------------------------------------------------'''
def getKey():
while True:
print('Enter the key for encryption (between 1 - 26)')
key = int(input())
if (key >= 1 and key <= 26):
return key
return key
'''----------------------------------------------------------------------
Function: Encrypts a text input with a given key using caesar cipher
Returns: Ciphered Text
Arguments: None
-------------------------------------------------------------------------'''
def encryptCaesarCipher():
plainText = input("Enter text to be encrypted here:")
key = getKey()
cipherText =''
for letter in plainText.upper():
if letter.isalpha():
cipherText += I2L[ (L2I[letter] + key)%26 ]
else:
cipherText += letter
return cipherText
'''----------------------------------------------------------------------
Function: Descrypts a ciphered text with a given key using caesar cipher
Returns: Plain Text
Arguments: None
-------------------------------------------------------------------------'''
def decryptCaesarCipher():
cipherText = input("Enter text to be decrypted here:")
key = getKey()
print (key)
plainText = ''
for letter in cipherText.upper():
if letter.isalpha():
plainText += I2L[ (L2I[letter] - key)%26 ]
else:
plainText += letter
return plainText
``` |
{
"source": "Joejn/myDrive",
"score": 2
} |
#### File: server/apis/users.py
```python
import os
import shutil
from datetime import datetime
from core.consts import DATA_PATH, HOME_DIR, TRASH_DIR
from core.utils import Admin, Database, Password
from flask import json, request
from flask_jwt_extended import jwt_required
from flask_jwt_extended.utils import get_jwt
from flask_restx import Namespace, Resource
api = Namespace("users", description="user related operations")
@api.route("/get_all_users")
class GetAllUsers(Resource):
@api.doc("return all users")
@jwt_required()
def get(self):
id = get_jwt()["id"]
isAdmin = Admin.checkIfAdmin(id)
if not isAdmin:
return "Unauthorized", 401
db = Database()
data = []
users = db.select(
"SELECT id, username, firstname, lastname, birthday, email FROM public.users ORDER BY id;")
for item in users:
user_id, username, firstname, lastname, birthday, email = item
data.append({
"id": user_id,
"username": username,
"firstname": firstname,
"lastname": lastname,
"birthday": birthday,
"email": email
})
return json.jsonify(data)
@api.route("/add_user")
class AddUser(Resource):
@api.doc("Add a user")
@jwt_required()
def post(self):
id = get_jwt()["id"]
isAdmin = Admin.checkIfAdmin(id)
if not isAdmin:
return "Unauthorized", 401
data = json.loads(request.data)
username, firstname, lastname, birthday, email, password = data.values()
hashed_password = Password.hash(password).decode("utf-8")
db = Database()
statement = """
INSERT INTO public.users(
username, firstname, lastname, birthday, email, password)
VALUES
(%(username)s , %(firstname)s, %(lastname)s, to_date(%(birthday)s, 'mm/dd/yyyy'), %(email)s, %(password)s);
"""
vars = {
"username": username,
"firstname": firstname,
"lastname": lastname,
"birthday": datetime.utcfromtimestamp(birthday).strftime("%m/%d/%y"),
"email": email,
"password": <PASSWORD>
}
db.exec(statement, vars)
# Create home directory for new User ##################
data_dir_of_user = os.path.join(DATA_PATH, username)
os.mkdir(data_dir_of_user)
os.mkdir(os.path.join(data_dir_of_user, HOME_DIR))
os.mkdir(os.path.join(data_dir_of_user, TRASH_DIR))
#######################################################
data = []
users = db.select(
"SELECT id, username, firstname, lastname, birthday, email FROM public.users ORDER BY id;")
for item in users:
user_id, username, firstname, lastname, birthday, email = item
data.append({
"id": user_id,
"username": username,
"firstname": firstname,
"lastname": lastname,
"birthday": birthday,
"email": email
})
return json.jsonify(data)
@api.route("/delete_user")
class DeleteUser(Resource):
@api.doc("Delete a user")
@jwt_required()
def delete(self):
id = get_jwt()["id"]
isAdmin = Admin.checkIfAdmin(id)
if not isAdmin:
return "Unauthorized", 401
data = json.loads(request.data)
user_id = data.get("id")
username = data.get("username")
db = Database()
statement = "DELETE FROM public.users WHERE id=%(id)s;"
db.exec(statement, {"id": user_id})
# Delete home directory from User ##################
data_dir_of_user = os.path.join(DATA_PATH, username)
shutil.rmtree(data_dir_of_user)
#######################################################
data = []
users = db.select(
"SELECT id, username, firstname, lastname, birthday, email FROM public.users ORDER BY id;")
for item in users:
user_id, username, firstname, lastname, birthday, email = item
data.append({
"id": user_id,
"username": username,
"firstname": firstname,
"lastname": lastname,
"birthday": birthday,
"email": email
})
return json.jsonify(data)
@api.route("/get_registerd_users_count")
class GetRegisterdUsersCount(Resource):
@api.doc("Get the count of the registered users")
@jwt_required()
def get(self):
id = get_jwt()["id"]
isAdmin = Admin.checkIfAdmin(id)
if not isAdmin:
return "Unauthorized", 401
db = Database()
user_count = db.select("SELECT COUNT(id) FROM public.users;")[0][0]
body = {
"user_count": user_count
}
return json.jsonify(body)
@api.route("/get_all_usernames_and_ids")
class GetAllUsernamesAndIds(Resource):
@api.doc("return all usernames and there ids")
@jwt_required()
def get(self):
db = Database()
data = []
users = db.select(
"SELECT id, username FROM public.users ORDER BY id;")
for item in users:
user_id, username = item
data.append({
"id": user_id,
"name": username
})
return json.jsonify(data)
``` |
{
"source": "joejoe233/-",
"score": 4
} |
#### File: joejoe233/-/SoftwareBlockchain-demo.py
```python
import hashlib as hasher
import datetime
def create_genesis_block():
'''Constructs a block with index 0 and arbitrary previous hash'''
return Block(0, datetime.datetime.now(), "Genesis Block" ,"0","0","0","0")
#操作的是区块对象
class Block():
def __init__(self, index, timestamp, software,company, addr,cite,previous_hash):
self.index = index
self.timestamp = timestamp
self.software = software
self.company = company
self.addr = addr
self.cite = cite #引用字段
self.previous_hash = previous_hash
self.hash = self.hash_block()
def hash_block(self):
sha = hasher.sha256()
sha.update(str(self.index).encode("utf-8")
+ str(self.timestamp).encode("utf-8")
+ str(self.software).encode("utf-8")
+ str(self.company).encode("utf-8")
+ str(self.addr).encode("utf-8")
+ str(self.cite).encode("utf-8")
+ str(self.previous_hash).encode("utf-8")
)
return sha.hexdigest()
# Create the blockchain and add genesis block.
blockchain = [create_genesis_block()]
#print(blockchain[0].cite)
previous_block = blockchain[0]
#print(previous_block)
# How many blocks to add after the genesis block?
#num_of_blocks_to_add = 10
def next_block(last_block):
this_index = last_block.index + 1
this_timestamp = datetime.datetime.now()
#this_data = "Hey! I'm block " + str(this_index)
this_software = input("请输入第 #{} 个区块的软件名 : ".format(this_index))
this_company = input("请输入第 #{} 个区块的公司名 : ".format(this_index))
this_addr = input("请输入第 #{} 个区块的软件地址 : ".format(this_index))
this_cite = input("请输入第 #{} 个区块的引用记录 : ".format(this_index))
return Block(this_index, this_timestamp, this_software, this_company,this_addr,this_cite,last_block.hash)
# Add blocks to the chain.
#for i in range(num_of_blocks_to_add):
while 1:
block_to_add = next_block(previous_block)
blockchain.append(block_to_add)
previous_block = block_to_add
# Tell everyone about it!
print("区块 #{} 成功上链!".format(block_to_add.index))
print("软件名 :{}".format(block_to_add.software))
print("公司名 :{}".format(block_to_add.company))
print("软件地址 :{}".format(block_to_add.addr))
print("引用记录: {}".format(block_to_add.cite))
print("哈希值: {}".format(block_to_add.hash))
print("时间戳: {}\n".format(block_to_add.timestamp))
#存入数据库
# print(blockchain[0].previous_hash)
``` |
{
"source": "joejoe233/TrafficClassification",
"score": 3
} |
#### File: joejoe233/TrafficClassification/LoadImage.py
```python
import os
import random
import cv2 as cv
import numpy as np
import re
import copy
classkind= 10
labeldict = {}
def train_image(path,classname=None,labeldicts = None):
global labeldict
count = 0
templabels = [0 for i in range(classkind)]
images = []
labels = []
if labeldicts !=None:
labeldict=labeldicts
imagenamelist = []
if classname==None:
imagenamelist = [path+"\\"+name for name in os.listdir(path) if name.lower().endswith('jpg')] #生成一个列表 lower()把所有大写字母转换成小写字母
else:
imagenamelist = [path+"\\"+name for name in os.listdir(path) if name.lower().endswith('jpg')and name.lower().startswith(classname)]
random.shuffle(imagenamelist)
random.shuffle(imagenamelist) #随机排序
for i in imagenamelist:
image = cv.imread(i,flags=0) #读入图像
image = image[:,:,np.newaxis] #添加数组维度
images.append(image)
pattern = re.compile('^[a-z]+')
vpnpattern = re.compile('(vpn_[a-z]+)')
name = i.split('\\')[-1]
if name.startswith('vpn'):
name = vpnpattern.findall(name.lower())[0]
else:
name = pattern.findall(name.lower())[0]
if name in labeldict:
label = labeldict[name]
labels.append(label)
count +=1
else:
labellength = len(labeldict)
templabel = copy.deepcopy(templabels)
templabel[labellength] = 1
labeldict.update({name:templabel})
label = templabel
labels.append(label)
count += 1
images = np.array(images)
labels = np.array(labels)
if classname!=None:
return images, labels
else:
return images,labels,labeldict
``` |
{
"source": "joejoeyjoseph/mmctools",
"score": 3
} |
#### File: mmctools/mmctools/mmcdata.py
```python
from math import *
import collections
import numpy as np
import datetime as dt
import pandas as pd
import xarray
import pickle
from matplotlib import pyplot as plt
from matplotlib import rcParams, cycler
import matplotlib.dates as mdates
from matplotlib.ticker import AutoMinorLocator
# legacy file format
header = """INSTITUTION:{institution:s}
LOCATION:{location:s}
LATITUDE:{latitude:10.4f}
LONGITUDE:{longitude:10.4f}
CODENAME:{codename:s}
CODETYPE:{codetype:s}
CASENAME:{casename:s}
BENCHMARK:{benchmark:s}
LEVELS:{levels:7d}
"""
record = """
DATE:{date:s}
TIME:{time:s}
FRICTION VELOCITY [m/s] = {ustar:10.5f}
SURFACE ROUGHNESS [m] = {z0:10.5f}
SKIN TEMPERATURE [K] = {T0:10.5f}
SURFACE FLUX [Km/s] = {qwall:10.5f}
Z [m] U [m/s] V [m/s] W [m/s] TH [K] P [mbar] TKE [m^2/s^2] TAU11 [m^2/s^2] TAU12 [m^2/s^2] TAU13 [m^2/s^2] TAU22 [m^2/s^2] TAU23 [m^2/s^2] TAU33 [m^2/s^2] HFLUX [Km/s]
"""
datarow = 4*'{:18.3f}' + 2*'{:18.2f}' + '{:18.3f}' + 7*'{:18.5f}' + '\n'
class MMCData():
"""A given set of 'observed' (via instrument or model) timeseries of
U,V,W, and other state variables and any 'derived' (via calculation
methods) data like mean, perturbation, variance, correlations, etc...
that are attributes (could be defined or missing a value) in a given
MMCData instance
"""
def __init__(self,asciifile=None,pklfile=None,pkldata=None,**kwargs):
"""Read ascii data in the legacy MMC format from `asciifile` or
pickled data in list form from `pklfile`. **kwargs can include
convert_ft_to_m=True, or specified_date="YYYY-MM-DD", e.g.
specified_date='2013-11-08' if necessary for specific legacy data
files.
"""
self.description = None
self.records = []
self.dataDict = collections.defaultdict(list)
if asciifile:
with open(asciifile,'r') as f:
data = self._read_ascii(f)
if self.dataSetLength > 0:
self._process_data(data,**kwargs)
elif pklfile or pkldata:
if pkldata is None:
with open(pklfile,'rb') as f:
pkldata = pickle.load(f)
# first item is a dictionary with metadata
self.dataSetLength = len(pkldata) - 1
self.description = pkldata[0]
if self.dataSetLength > 0:
self._process_data(pkldata[1:],**kwargs)
else:
raise ValueError('Need to specify asciifile, pklfile, or pkldata')
def _read_ascii(self,f):
"""Read entire legacy MMC file"""
self.description = read_ascii_header(f)
self.dataSetLength = 0
data = []
while True:
recordheader = read_ascii_recordheader(f);
if (len(recordheader)==0) or (recordheader is None): #is recordheader={} or Null value? if so break, otherwise read a record
break
else:
recordarray = read_ascii_records(f,self.description['levels'])
data.append([recordheader, recordarray])
self.dataSetLength += 1
return data
def _process_data(self,data,convert_ft_to_m=False,specified_date=None,map_to_met_coords=False):
"""Updates dataset description, records, and dataDict"""
time=[]
datetime=[]
z=[]
u=[]
v=[]
w=[]
theta=[]
pres=[]
tke=[]
tau11=[]
tau12=[]
tau13=[]
tau22=[]
tau23=[]
tau33=[]
hflux=[]
for record in data:
recordheader, recordarray = record
self.records.append(recordheader)
time.append(recordheader['time'].strip())
if specified_date is None:
dtstr = recordheader['date'] + "_" + recordheader['time'].strip()
else:
dtstr = '{:s}_{:s}'.format(specified_date,recordheader['time'].strip())
datetime.append(dt.datetime.strptime(dtstr, '%Y-%m-%d_%H:%M:%S'))
z.append(recordarray[:,0])
u.append(recordarray[:,1])
v.append(recordarray[:,2])
w.append(recordarray[:,3])
theta.append(recordarray[:,4])
pres.append(recordarray[:,5])
tke.append(recordarray[:,6])
tau11.append(recordarray[:,7])
tau12.append(recordarray[:,8])
tau13.append(recordarray[:,9])
tau22.append(recordarray[:,10])
tau23.append(recordarray[:,11])
tau33.append(recordarray[:,12])
hflux.append(recordarray[:,13])
assert len(z) == self.dataSetLength
# Re-cast fields as numpy arrays and add to 'dataDict' object attribute
self.dataDict['datetime'] = np.asarray(datetime)
if convert_ft_to_m:
# Convert TTU/SWiFT height to meters from feet ;-(
self.dataDict['z'] = np.asarray(z)*0.3048
else:
# Otherwise expect heights in meters as they should be
self.dataDict['z'] = np.asarray(z)
if map_to_met_coords: #map TTU-sonic (unorth, vwest) coords to standard meteorology coordinates
self.dataDict['u'] = np.asarray(v)
self.dataDict['v'] = -np.asarray(u)
else:
self.dataDict['u'] = np.asarray(u)
self.dataDict['v'] = np.asarray(v)
self.dataDict['w'] = np.asarray(w)
self.dataDict['theta'] = np.asarray(theta)
self.dataDict['pres'] = np.asarray(pres)
self.dataDict['tke'] = np.asarray(tke)
self.dataDict['tau11'] = np.asarray(tau11)
self.dataDict['tau12'] = np.asarray(tau12)
self.dataDict['tau13'] = np.asarray(tau13)
self.dataDict['tau22'] = np.asarray(tau22)
self.dataDict['tau23'] = np.asarray(tau23)
self.dataDict['tau33'] = np.asarray(tau33)
self.dataDict['hflux'] = np.asarray(hflux)
self.dataDict['wspd'] = np.sqrt(self.dataDict['u']**2
+ self.dataDict['v']**2)
self.dataDict['wdir'] = (270.0-np.arctan2(self.dataDict['v'],self.dataDict['u'])*180./np.pi)%360
### The follwing will yield correct results, but sneaky usage of arctan2 where first argument is defined as y-oriented
### self.dataDict['wdir'] = 180. + np.arctan2(self.dataDict['v'],self.dataDict['u'])*180./np.pi
#Declare and initialize to 0 the *_mean arrays
self.dataDict['u_mean'] = np.zeros(self.dataDict['u'].shape)
self.dataDict['v_mean'] = np.zeros(self.dataDict['u'].shape)
self.dataDict['w_mean'] = np.zeros(self.dataDict['u'].shape)
self.dataDict['theta_mean'] = np.zeros(self.dataDict['u'].shape)
self.dataDict['tke_mean'] = np.zeros(self.dataDict['u'].shape)
self.dataDict['hflux_mean'] = np.zeros(self.dataDict['u'].shape)
self.dataDict['uu_mean'] = np.zeros(self.dataDict['u'].shape)
self.dataDict['uv_mean'] = np.zeros(self.dataDict['u'].shape)
self.dataDict['uw_mean'] = np.zeros(self.dataDict['u'].shape)
self.dataDict['vv_mean'] = np.zeros(self.dataDict['u'].shape)
self.dataDict['vw_mean'] = np.zeros(self.dataDict['u'].shape)
self.dataDict['ww_mean'] = np.zeros(self.dataDict['u'].shape)
self.dataDict['wt_mean'] = np.zeros(self.dataDict['u'].shape)
self.dataDict['wspd_mean'] = np.zeros(self.dataDict['u'].shape)
self.dataDict['wdir_mean'] = np.zeros(self.dataDict['u'].shape)
self.dataDict['shear_mean'] = np.zeros(self.dataDict['u'].shape)
def to_pickle(self,pklfile):
"""pickle the entire class instance"""
with open(pklfile,'wb') as f:
pickle.dump(self,f)
def to_dataframe(self):
"""return a multi-indexed pandas dataframe with standard
variables
"""
df = self.to_xarray().to_dataframe()
# the resulting dataframe has an integer multiindex formed by
# range(num_samples) and range(num_levels)
df = df.reset_index().drop(columns=['Times','bottom_top'])
return df.set_index(['datetime','height'])
def to_xarray(self,timedim='Times',heightdim='bottom_top'):
"""return an xarray dataset with standard variables"""
coords = {
'datetime': xarray.DataArray(self.dataDict['datetime'],
name='datetime',
dims=[timedim]),
'height': xarray.DataArray(self.dataDict['z'],
name='height',
dims=[timedim, heightdim],
attrs={'units':'m'}),
}
data_vars = {
'u': xarray.DataArray(self.dataDict['u'],
name='west-east velocity',
dims=[timedim, heightdim],
attrs={'units':'m s-1'}),
'v': xarray.DataArray(self.dataDict['v'],
name='south-north velocity',
dims=[timedim, heightdim],
attrs={'units':'m s-1'}),
'w': xarray.DataArray(self.dataDict['w'],
name='vertical velocity',
dims=[timedim, heightdim],
attrs={'units':'m s-1'}),
'theta': xarray.DataArray(self.dataDict['theta'],
name='potential temperature',
dims=[timedim, heightdim],
attrs={'units':'K'}),
'pres': xarray.DataArray(self.dataDict['pres'],
name='pressure',
dims=[timedim, heightdim],
attrs={'units':'mbar'}),
}
#ds=xarray.decode_cf(xarray.Dataset(data_vars,coords))
ds=xarray.Dataset(data_vars,coords)
#Remove the time dependence of heights by setting heights from each level as their temporal mean
heights=ds['height'].groupby('bottom_top').mean()
Nt,Nz = ds['height'].shape
for i in range(Nt):
ds['height'][i,:]=heights
return ds
def getDataSetDict(self):
return self.description
def getDataSetFieldShape(self):
return self.dataDict['u'].shape
def getRecordDict(self,recNum):
return self.records[recNum]
def setRunningMeans(self,windowLength,levels):
#def getDataSetRunningMean(self,windowLength,levels, start_datetime,stop_datetime):
for k in range(levels):
#print("setRunningMeans: k = {:d}".format(k))
self.dataDict['u_mean'][:,k] = running_mean(self.dataDict['u'][:,k],windowLength)
self.dataDict['v_mean'][:,k] = running_mean(self.dataDict['v'][:,k],windowLength)
self.dataDict['w_mean'][:,k] = running_mean(self.dataDict['w'][:,k],windowLength)
self.dataDict['theta_mean'][:,k] = running_mean(self.dataDict['theta'][:,k],windowLength)
self.dataDict['tke_mean'][:,k] = running_mean(self.dataDict['tke'][:,k],windowLength)
self.dataDict['hflux_mean'][:,k] = running_mean(self.dataDict['hflux'][:,k],windowLength)
self.dataDict['uu_mean'][:,k] = running_mean( np.square(np.subtract(self.dataDict['u'][:,k],self.dataDict['u_mean'][:,k])),windowLength)
self.dataDict['uv_mean'][:,k] = running_mean( np.multiply(np.subtract(self.dataDict['u'][:,k],self.dataDict['u_mean'][:,k]),
np.subtract(self.dataDict['v'][:,k],self.dataDict['v_mean'][:,k])) ,windowLength)
self.dataDict['uw_mean'][:,k] = running_mean( np.multiply(np.subtract(self.dataDict['u'][:,k],self.dataDict['u_mean'][:,k]),
np.subtract(self.dataDict['w'][:,k],self.dataDict['w_mean'][:,k])) ,windowLength)
self.dataDict['vv_mean'][:,k] = running_mean( np.square(np.subtract(self.dataDict['v'][:,k],self.dataDict['v_mean'][:,k])),windowLength)
self.dataDict['vw_mean'][:,k] = running_mean( np.multiply(np.subtract(self.dataDict['v'][:,k],self.dataDict['v_mean'][:,k]),
np.subtract(self.dataDict['w'][:,k],self.dataDict['w_mean'][:,k])) ,windowLength)
self.dataDict['ww_mean'][:,k] = running_mean( np.square(np.subtract(self.dataDict['w'][:,k],self.dataDict['w_mean'][:,k])),windowLength)
self.dataDict['wt_mean'][:,k] = running_mean( np.multiply(np.subtract(self.dataDict['w'][:,k],self.dataDict['w_mean'][:,k]),
np.subtract(self.dataDict['theta'][:,k],self.dataDict['theta_mean'][:,k])) ,windowLength)
self.dataDict['wspd_mean'] = np.sqrt(np.square(self.dataDict['u_mean'])+np.square(self.dataDict['v_mean']))
#self.dataDict['wdir_mean'] = np.arctan2(self.dataDict['v_mean'],self.dataDict['u_mean'])*180./np.pi+180.0 #From Branko's original, but this seems incorrect...
self.dataDict['wdir_mean'] = (270.0-np.arctan2(self.dataDict['v_mean'],self.dataDict['u_mean'])*180./np.pi)%360
# self.dataDict['shear_mean']=np.sqrt(np.square(self.uw_mean)+np.square(self.vw_mean))
#
# Plotting functions (TODO: move to plotting.py)
#
def plotDataSetByKey(self,xVarKey,yVarKey):
plt.figure()
plt.plot(self.dataDict[xVarKey],self.dataDict[yVarKey],'bo-')
#plt.show(block=False)
plt.draw()
#plt.show()
#plt.pause(0.0001)
def plotObsVsModelProfileAsSubplot(self,fig,axs,fldString,obsData,obsIndepVar,obsLabel,modelData,modelIndepVar,modelLabel):
#Set the Marker styles
obs_marker_style = dict(color='r', linestyle='None', marker='s', markersize=5, markerfacecolor='None')
model_marker_style = dict(color='b', linestyle='--', marker='o', markersize=3, markerfacecolor='None')
#Setup up the shared y-axis ticks and labels
yticks=np.arange(0,251,50)
ylabels=[]
ylabels.extend(str(z) for z in range(0,251,50))
#Compute the standard deviation of obsField
obs_std = np.std(obsData,axis=0)
model_std = np.std(modelData,axis=0)
#Find the x-axis Min,Max, and Interval
deltaXp = np.nanmax(np.append(np.abs(np.nanmax(np.mean(obsData,axis=0))- \
np.nanmax(np.mean(modelData,axis=0))) \
,np.nanmax(obs_std)))
deltaXm = np.nanmax(np.append(np.abs(np.nanmin(np.mean(obsData,axis=0))- \
np.nanmin(np.mean(modelData,axis=0))) \
,np.nanmax(obs_std)))
deltaX = np.nanmax(np.append(deltaXp,deltaXm))
xTickMin = np.floor(np.nanmin(np.mean(obsData,axis=0))-deltaX)
xTickMax = np.ceil(np.nanmax(np.mean(obsData,axis=0))+deltaX)
xTickInterval = np.round_((xTickMax-xTickMin)/3.0,0)
#print the x-axis characteristics
print('{:s}'.format(fldString+" x-axis traits..."))
print('{:s}'.format("xTickMin ="+str(xTickMin)))
print('{:s}'.format("xTickMax ="+str(xTickMax)))
print('{:s}'.format("xTickInterval ="+str(xTickInterval)))
#Setup the x-axis ticks and labels
xticks=np.arange(xTickMin,xTickMax+xTickInterval,xTickInterval)
xlabels=[]
xlabels.extend(str(u).split('.')[0] for u in xticks.tolist())
#Plot the observations (and uncertainty via errorbars)
axs.errorbar(np.mean(obsData,axis=0), obsIndepVar, xerr=obs_std, capsize=2, \
label=obsLabel, **obs_marker_style)
axs.errorbar(np.mean(modelData,axis=0), modelIndepVar, xerr=model_std, capsize=2, \
label=modelLabel, **model_marker_style)
#axs.plot(np.mean(modelData,axis=0), modelIndepVar, \
# label=modelLabel, **model_marker_style)
axs.set(xticks=xticks,xticklabels=xlabels,yticks=yticks,yticklabels=ylabels)
#Format and beautify the axes ticks and limits
axs.yaxis.set_minor_locator(AutoMinorLocator(4))
axs.xaxis.set_minor_locator(AutoMinorLocator(4))
axs.tick_params(direction='in',top=True,right=True,length=10, width=2, which='major')
axs.tick_params(direction='in',top=True,right=True,length=5, width=1, which='minor')
axs.tick_params(axis='both', which='major', labelsize=8)
axs.tick_params(axis='both', which='minor', labelsize=6)
axs.set_ylim(0.0,np.max(obsIndepVar+50))
axs.set_xlim(xTickMin,xTickMax)
def plotObsVsModelTimeSeriesAsSubplot(self,fig,axs,fldString, \
obsData,obsIndepVar,obsLabel,obsLevs, \
modelData,modelIndepVar,modelLabel,modelLevs):
obs_marker_style = dict(linestyle='-')
model_marker_style = dict(linestyle=':')
for lev in range(obsData.shape[1]):
axs.plot(obsIndepVar,obsData[:,lev],label = obsLabel+": "+str(obsLevs[0,lev]),**obs_marker_style)
for lev in range(modelData.shape[1]):
axs.plot(modelIndepVar,modelData[:,lev],label = modelLabel+": "+str(modelLevs[0,lev]),**model_marker_style)
def plotSingleSourceTimeSeriesAsSubplot(self,fig,axs,fldString, \
fldData,fldIndepVar,fldLabel,fldLevs):
fld_marker_style = dict(linestyle='-')
for lev in range(fldData.shape[1]):
axs.plot(fldIndepVar,fldData[:,lev],label = fldLabel+": "+str(fldLevs[0,lev]),**fld_marker_style)
#####END OF the MMC_CLASS
### Readers for legacy MMC data
def read_ascii_header(f):
"""Read header from legacy MMC file, called by _read_ascii()"""
head1 = f.readline()
head2 = f.readline()
head3 = f.readline()
head4 = f.readline()
head5 = f.readline()
head6 = f.readline()
head7 = f.readline()
head8 = f.readline()
head9 = f.readline()
lab = head1[12:25].strip()
print("lab: {:s}".format(lab))
location = head2[12:25].strip()
latitude = float(head3[12:25].strip())
longitude = float(head4[12:25].strip())
codename = head5[12:25].strip()
print("codename: {:s}".format(codename))
codetype = head6[12:25].strip()
casename = head7[12:25].strip()
benchmark = head8[12:25].strip()
levels = int(head9[12:25].strip())
print("levels: {:d}".format(levels))
fileheader = {
'lab':lab,
'location':location,
'latitude':latitude,
'longitude':longitude,
'codename':codename,
'codetype':codetype,
'casename':casename,
'benchmark':benchmark,
'levels':levels,
}
return fileheader
def read_ascii_recordheader(f):
"""Read a record from legacy MMC file, called by _read_ascii()"""
try:
pos0=f.tell()
head0 = f.readline()
pos1=f.tell()
if pos1==pos0:
recordheader = {}
else:
head1 = f.readline()
head2 = f.readline()
head3 = f.readline()
head4 = f.readline()
head5 = f.readline()
head6 = f.readline()
head7 = f.readline()
date = head1[12:22]
time = head2[12:22]
ustar = float(head3[26:36].strip())
z0 = float(head4[26:36].strip())
tskin = float(head5[26:36])
hflux = float(head6[26:36])
varlist = head7.split()
varnames=[]
varunits=[]
for i in range(len(varlist)):
if (i % 2) == 0:
varnames.append(varlist[i])
if (i % 2) == 1:
varunits.append(varlist[i])
recordheader = {
'date':date,
'time':time,
'ustar':ustar,
'z0':z0,
'tskin':tskin,
'hflux':hflux,
'varnames':varnames,
'varunits':varunits,
}
except:
print("Error in readrecordheader... Check your datafile for bad records!!\n Lines read are")
print("head1 = ",head1)
print("head2 = ",head2)
print("head3 = ",head3)
print("head4 = ",head4)
print("head5 = ",head5)
print("head6 = ",head6)
print("head7 = ",head7)
return recordheader
def read_ascii_records(f,Nlevels):
"""Read specified number of records from legacy MMC file, called
by _read_ascii().
"""
record=[]
for i in range(Nlevels):
line = f.readline()
#data = map(float,line.split())
for data in map(float,line.split()):
record.append(data)
#print("len(data) = {:d}",len(data))
#record.append(data)
#print("len(record) = {:d}",len(record))
recordarray=np.array(record).reshape(Nlevels,floor(len(record)/Nlevels))
#print("recordarray.shape = ",recordarray.shape)
return recordarray
### Utility functions for MMC class
def linearly_interpolate_nans(y):
# Fit a linear regression to the non-nan y values
# Create X matrix for linreg with an intercept and an index
X = np.vstack((np.ones(len(y)), np.arange(len(y))))
# Get the non-NaN values of X and y
X_fit = X[:, ~np.isnan(y)]
y_fit = y[~np.isnan(y)].reshape(-1, 1)
# Estimate the coefficients of the linear regression
beta = np.linalg.lstsq(X_fit.T, y_fit, rcond=None)[0]
# Fill in all the nan values using the predicted coefficients
y.flat[np.isnan(y)] = np.dot(X[:, np.isnan(y)].T, beta)
return y
def running_mean(x, N):
M = len(x)
bad = np.where(np.isnan(x))
B = bad[0].size
if (B > 0) and (float(B)/float(M) <= 0.1):
x = linearly_interpolate_nans(x)
if (B > 0) and (float(B)/float(M) > 0.1):
sys.exit("More than 10% data is NaN!")
y = x
cumsum = np.cumsum(np.insert(x, 0, 0))
xavg = (cumsum[N:] - cumsum[:-N]) / N
for i in range(0,floor(N/2)):
xavg = np.insert(xavg,i,np.nanmean(y[i:i+N]))
for i in range(M-floor(N/2)+1,M):
xavg = np.append(xavg,np.nanmean(y[i-N:i]))
return xavg
def running_mean2(x,N):
xavg=[]
M=len(x)
for i in range(0,floor(N/2)):
xavg.append(np.nanmean(x[i:i+N]))
for i in range(floor(N/2),M-floor(N/2)):
xavg.append(np.nanmean(x[i-floor(N/2):i+floor(N/2)]))
for i in range(M-floor(N/2),M):
xavg.append(np.nanmean(x[i-N:i]))
return xavg
```
#### File: mmctools/mmctools/similarity.py
```python
import numpy as np
def Paulson_m(x):
"""Momentum similarity function for unstable conditions
Ref: <NAME>., 1970: The mathematical representation of wind
speed and temperature in the unstable atmospheric surface layer.
J. Appl. Meteor., 9, 857-861.
"""
return np.pi/2 - 2*np.arctan(x) + np.log((1+x)**2 * (1 + x**2) / 8)
def Paulson_h(x):
"""Heat similarity function for unstable conditions
Ref: <NAME>., 1970: The mathematical representation of wind
speed and temperature in the unstable atmospheric surface layer.
J. Appl. Meteor., 9, 857-861.
"""
return 2 * np.log((1 + x**2) / 2)
def Jimenez_m(z_L, a=6.1, b=2.5, alpha_m=10.0):
"""Momentum similarity function used by WRF
Ref: <NAME>., <NAME>, <NAME>, <NAME>.
Montavez and <NAME>, 2012: A Revised Scheme for
the WRF Surface Layer Formulation. Mon. Weather Rev., 140, 898-918.
"""
zeta = np.array(z_L)
psi = np.zeros(zeta.shape)
# Unstable conditions (Eqn. 17)
uns = np.where(zeta < 0)
x = (1 - 16*zeta[uns])**0.25
paulson_func = Paulson_m(x) # "Kansas-type" functions
y = (1 - alpha_m*zeta[uns])**(1./3)
conv_func = 3./2 * np.log(y**2 + y + 1./3) \
- np.sqrt(3) * np.arctan(2*y + 1/np.sqrt(3)) \
+ np.pi/np.sqrt(3) # convective contribution
psi[uns] = (paulson_func + zeta[uns]**2 * conv_func) \
/ (1 + zeta[uns]**2)
# Stable conditions (Eqn. 18)
sta = np.where(zeta >= 0)
psi[sta] = -a * np.log(zeta[sta] + (1 + zeta[sta]**b)**(1./b))
return psi
def Jimenez_h(z_L, c=5.3, d=1.1, alpha_h=34.0):
"""Heat similarity function used by WRF
Ref: <NAME>., <NAME>, <NAME>, <NAME>, J.P.
Montavez and <NAME>, 2012: A Revised Scheme for
the WRF Surface Layer Formulation. Mon. Weather Rev., 140, 898-918.
"""
zeta = np.array(z_L)
psi = np.zeros(zeta.shape)
# Unstable conditions (Eqn. 17)
uns = np.where(zeta < 0)
x = (1 - 16*zeta[uns])**0.25
paulson_func = Paulson_h(x) # "Kansas-type" functions
y = (1 - alpha_h*zeta[uns])**(1./3)
conv_func = 3./2 * np.log(y**2 + y + 1./3) \
- np.sqrt(3) * np.arctan(2*y + 1/np.sqrt(3)) \
+ np.pi/np.sqrt(3) # convective contribution
psi[uns] = (paulson_func + zeta[uns]**2 * conv_func) \
/ (1 + zeta[uns]**2)
# Stable conditions (Eqn. 19)
sta = np.where(zeta >= 0)
psi[sta] = -c * np.log(zeta[sta] + (1 + zeta[sta]**d)**(1./d))
return psi
```
#### File: windtools/windtools/openfoam.py
```python
import re
class InputFile(dict):
"""Object to parse and store openfoam input file data
Written by <NAME> (<EMAIL>)
Includes support for parsing:
- single values, with attempted cast to float/bool
- lists
- dictionaries
"""
DEBUG = False
block_defs = [
('{','}',dict),
('(',')',list),
('[',']',list),
]
true_values = [
'true',
'on',
'yes',
]
false_values = [
'false',
'off',
'no',
'none',
]
special_keywords = [
'uniform',
'nonuniform',
'table',
]
def __init__(self,fpath,nodef=False):
"""Create a dictionary of definitions from an OpenFOAM-style
input file.
Inputs
------
fpath : str
Path to OpenFOAM file
nodef : bool, optional
If the file only contains OpenFOAM data, e.g., a table of
vector values to be included from another OpenFOAM file,
then create a generic 'data' parent object to contain the
file data.
"""
# read full file
with open(fpath) as f:
lines = f.readlines()
if nodef:
lines = ['data ('] + lines + [')']
# trim single-line comments and remove directives
for i,line in enumerate(lines):
line = line.strip()
if line.startswith('#'):
if self.DEBUG:
print('Ignoring directive:',line)
lines[i] = ''
else:
idx = line.find('//')
if idx >= 0:
lines[i] = line[:idx].strip()
# trim multi-line comments
txt = '\n'.join(lines)
idx0 = txt.find('/*')
while idx0 >= 0:
idx1 = txt.find('*/',idx0+1)
assert (idx1 > idx0), 'Mismatched comment block'
if self.DEBUG:
print('Remove comment block:',txt[idx0:idx1])
txt = txt[:idx0] + txt[idx1+2:]
idx0 = txt.find('/*')
# consolidate definitions into single lines
txt = txt.replace('\n',' ')
txt = txt.replace('\t',' ')
txt = txt.strip()
# now parse each line
for name,line,containertype in self._split_defs(txt):
if self.DEBUG:
print('\nPARSING',name,'FROM',line,'of TYPE',containertype)
self._parse(name,line,containertype)
self._sanitycheck()
def _sanitycheck(self):
"""Make sure the InputFile was read properly"""
noparent = [key is None for key in self.keys()]
if any(noparent):
print('Definitions improperly read, some values without keys')
print('If you believe this is an error, then re-run with the nodef keyword')
def _format_item_str(self,val,maxstrlen=60):
printval = str(val)
if isinstance(val,list) and (len(printval) > maxstrlen):
printval = '[list of length {:d}]'.format(len(val))
return printval
def __repr__(self):
descstrs = [
'{:s} : {:s}'.format(key, self._format_item_str(val))
for key,val in self.items()
]
return '\n'.join(descstrs)
def _split_defs(self,txt):
"""Splits blocks of text into lines in the following forms:
key value;
key (values...)
key {values...}
(values...)
((values...) (values...))
where lists and dicts may be nested. The outlier case is the
(nested) list which takes on the key of its parent.
"""
names, lines, container = [], [], []
while len(txt) > 0:
if self.DEBUG:
print('current text:',txt)
if (txt[0] == '('):
# special treatment for lists, or lists within a list
name = None
else:
# - find first word (name)
idx = txt.find(' ')
name = txt[:idx]
if self.DEBUG: print('name=',name)
txt = txt[idx+1:].strip()
# - find next word (either a value/block)
idx = txt.find(' ')
if idx < 0:
# EOF
string = txt
txt = '' # to exit loop
if self.DEBUG: print('EOF',string)
else:
string = txt[:idx].strip()
if string in self.special_keywords:
# append special keyword to name and read the next word
name += '_'+string
txt = txt[idx+1:].strip()
idx = txt.find(' ')
assert (idx > 0), 'problem parsing '+string+' field'
string = txt[:idx].strip()
if string.endswith(';'):
# found single definition
if self.DEBUG: print('value=',string[:-1])
names.append(name)
lines.append(string[:-1]) # strip ;
container.append(None)
else:
# found block
if self.DEBUG: print('current string:',string)
blockstart = string[0]
blockend = None
blocktype = None
for block in self.block_defs:
if blockstart == block[0]:
blockend = block[1]
blocktype = block[2]
break
assert (blockend is not None), 'Unknown input block '+blockstart
# find end of block
idx = txt.find(blockend) + 1
assert (idx > 0), 'Mismatched input block'
# consolidate spaces
blockdef = re.sub(' +',' ',txt[:idx].strip())
Nopen = blockdef.count(blockstart)
Nclose = blockdef.count(blockend)
while Nopen != Nclose:
if self.DEBUG:
print(' incomplete:',blockdef)
idx = txt.find(blockend, idx) + 1
blockdef = txt[:idx].strip()
Nopen = blockdef.count(blockstart)
Nclose = blockdef.count(blockend)
# select block
if self.DEBUG: print('complete block=',blockdef)
names.append(name)
lines.append(blockdef)
container.append(blocktype)
if self.DEBUG: print('container type=',container[-1])
# trim text block
txt = txt[idx+1:].strip()
return zip(names, lines, container)
def _parse(self,name,defn,containertype,parent=None):
"""Parse values split up by _split_defs()
Casts to float and bool (the latter by checking against a list
of known true/false values, since bool(some_str) will return
True if the string has a nonzero length) will be attempted.
If the value is a container (i.e., list or dict), then
_split_defs() and _parse() will be called recursively.
"""
if self.DEBUG:
print('----------- parsing block -----------')
if parent is not None:
print('name:',name,'parent:',str(parent))
if containertype is not None:
print('container type:',containertype)
defn = defn.strip()
if containertype is None:
# set single value in parent
defn = self._try_cast(defn)
# SET VALUE HERE
if self.DEBUG:
print(name,'-->',defn)
if parent is None:
self.__setitem__(name, defn)
elif isinstance(parent, dict):
parent[name] = defn
else:
assert isinstance(parent, list)
parent.append(defn)
else:
# we have a subblock, create new container
if parent is None:
# parent is the InputFile object
if self.DEBUG:
print('CREATING',containertype,'named',name)
self.__setitem__(name, containertype())
newparent = self.__getitem__(name)
elif isinstance(parent, dict):
# parent is a dictionary
if self.DEBUG:
print('ADDING dictionary entry,',name)
parent[name] = containertype()
newparent = parent[name]
else:
assert isinstance(parent, list)
# parent is a list
if self.DEBUG:
print('ADDING list item, name=',name)
if name is not None:
# if we have nested nists with mixed types we could
# end up here...
parent.append(self._try_cast(name))
newparent = containertype()
parent.append(newparent)
newdefn = defn[1:-1].strip()
if (containertype is list) \
and ('(' not in newdefn) and (')' not in newdefn):
# special treatment for lists
for val in newdefn.split():
# recursively call parse wihout a name (None for
# list) and without a container type to indicate
# that a new value should be set
self._parse(None,val,None,parent=newparent)
else:
for newname,newdef,newcontainertype in self._split_defs(newdefn):
self._parse(newname,newdef,newcontainertype,parent=newparent)
def _try_cast(self,s):
assert(s.find(' ') < 0)
try:
# attempt float cast
s = float(s)
except ValueError:
# THIS IS A TRAP
#try:
# # attempt boolean cast
# s = bool(s)
#except ValueError:
# # default to string
# pass
if s.lower() in self.true_values:
s = True
elif s.lower() in self.false_values:
s = False
else:
# default to string
s = s.strip('"')
s = s.strip('\'')
return s
```
#### File: mmctools/wrf/ts.py
```python
import os, glob
import time
import numpy as np
import pandas as pd
import xarray as xr
import f90nml
from .utils import Tower
from .utils import combine_towers
def read_tslist(fpath,
snap_to_grid=None,grid_order='F',max_shift=1e-3,
convert_to_xy=None, latlon_ref=(0,0)):
"""Read the description of sampling locations
Parameters
----------
fpath : str
Path to tslist file
snap_to_grid : list or tuple, or None
If not None, then adjust the lat/lon coordinates so that they
lie on a regular grid with shape (Nlat, Nlon). Assume that the
sampling locations are regularly ordered.
grid_order : str
Either 'F' or 'C' for Fortran (axis=0 changes fastest) and C
ordering (axis=-1 changes fastest), respectively.
max_shift : float
If snap_to_grid is True, then this is the maximum amount (in
degrees) that a tower location will change in latitude or
longitude.
convert_to_xy : str or None
Mapping to use for converting from lat/lon to x/y coordinates.
If None, x and y are not calculated
latlon_ref : list or tuple
Latitude and longitude to use as a reference to determine the
zone number and relative distances x,y.
"""
df = pd.read_csv(fpath,comment='#',delim_whitespace=True,
names=['name','prefix','lat','lon'])
if snap_to_grid is not None:
print('Attemping to adjust grid lat/lon')
assert (len(snap_to_grid) == 2), 'snap_to_grid should be (Nlat,Nlon)'
Nlat,Nlon = snap_to_grid
# original center of sampling grid
lat0 = df['lat'].mean()
lon0 = df['lon'].mean()
# lat/lon have correspond to the first and second dims, respectively
lat = np.reshape(df['lat'].values, snap_to_grid, order=grid_order)
lon = np.reshape(df['lon'].values, snap_to_grid, order=grid_order)
# calculate 1-d lat/lon vectors from average spacing
delta_lat = np.mean(np.diff(lat, axis=0))
delta_lon = np.mean(np.diff(lon, axis=1))
print(' lat/lon spacings:',delta_lat,delta_lon)
new_lat1 = np.linspace(lat[0,0], lat[0,0]+(Nlat-1)*delta_lat, Nlat)
new_lon1 = np.linspace(lon[0,0], lon[0,0]+(Nlon-1)*delta_lon, Nlon)
# calculate new lat/lon grid
new_lat, new_lon = np.meshgrid(new_lat1, new_lon1, indexing='ij')
# calculate new center
new_lat0 = np.mean(new_lat1)
new_lon0 = np.mean(new_lon1)
# shift
lat_shift = lat0 - new_lat0
lon_shift = lon0 - new_lon0
if (np.abs(lat_shift) < max_shift) and (np.abs(lon_shift) < max_shift):
print(' shifting lat/lon grid by ({:g}, {:g})'.format(lat_shift, lon_shift))
new_lat += lat_shift
new_lon += lon_shift
new_lat = new_lat.ravel(order=grid_order)
new_lon = new_lon.ravel(order=grid_order)
# one last sanity check, to make sure we didn't screw up
# anything during renumbering
assert np.all(np.abs(new_lat - df['lat']) < max_shift)
assert np.all(np.abs(new_lon - df['lon']) < max_shift)
# now update the df
df['lat'] = new_lat
df['lon'] = new_lon
else:
print(' grid NOT shifted, delta lat/lon ({:g}, {:g}) > {:g}'.format(
lat_shift, lon_shift, max_shift))
if convert_to_xy == 'utm':
import utm
x0,y0,zone0,_ = utm.from_latlon(*latlon_ref)
for prefix,row in df.iterrows():
x,y,_,_ = utm.from_latlon(row['lat'], row['lon'],
force_zone_number=zone0)
df.loc[prefix,'x'] = x - x0
df.loc[prefix,'y'] = y - y0
elif convert_to_xy is not None:
print('Unrecognized mapping:',convert_to_xy)
return df
class Toof(object):
"""Class for processing WRF outputs for coupling to microscale
solvers. The name toof stems from the original fortran
implementation of "wrftoof" (i.e., WRF to OpenFOAM) found at
https://github.com/NREL/SOWFA/tree/master/tools/WRFextraction.
"""
def __init__(self,dpath,
prefixes,
starttime,
targetdomain,
wrfdomain=-1,
namelist='namelist.input',
tsdir='tsout',
verbose=True):
"""Process a series of tsout files representing a grid of WRF
profiles from tslist sampling.
Parameters
----------
dpath : str
Path to wrf case directory
prefixes : list
List of tslist prefixes to use for constructing a WRF
subdomain; virtual towers should form an ordered grid
starttime : str or timestamp
Datetime to convert ts output (in hours) to timestamps
targetdomain : Domain object
Instance of mmctools.coupling.domain.Domain class describing
the microscale domain
wrfdomain : int, optional
Index (0-based) of wrf domain from which to sample (default:
-1, i.e., the innermost domain)
namelist : str, optional
Filename in `dpath` of wrf namelist input
tsdir : str, optional
Path to subdirectory containing tsout files
"""
self.dpath = dpath
self.prefixes = prefixes
self.starttime = starttime
self.domain = targetdomain
self.wrfdomain = wrfdomain
self.namelist = namelist
self.tsdir = os.path.join(dpath,tsdir)
self.verbose = verbose
self._setup()
self._read_towers()
def _setup(self):
# scrape WRF namelist for additional parameters
nmlpath = os.path.join(self.dpath,'namelist.input')
nml = f90nml.read(nmlpath)
self.max_dom = int(nml['domains']['max_dom'])
dxlist = nml['domains']['dx']
dylist = nml['domains']['dy']
if self.wrfdomain >= 0:
assert self.wrfdomain < self.max_dom,\
'Requested domain {:d}, max_dom={:d}'.format(domain,self.max_dom)
idx = self.wrfdomain
else:
idx = self.max_dom + self.wrfdomain
# update prefixes
self.prefixes = [prefix+'.d{:02d}'.format(idx+1) for prefix in self.prefixes]
# get grid spacing
self.dx = float(dxlist[idx])
self.dy = float(dylist[idx])
if self.verbose:
print('Read',nmlpath)
print(' max_dom =',self.max_dom)
print(' dx,dy =',self.dx,self.dy)
# get lat/lon if needed
if not self.domain.have_latlon:
if self.verbose:
print('Calculating grid lat/lon')
self.domain.calc_latlon()
def _read_towers(self):
if self.verbose:
print('Calling combine_towers for',self.tsdir,'...')
print(' interpolating to z= [',
self.domain.z[0], self.domain.z[1], self.domain.z[2], '..',
self.domain.z[-2], self.domain.z[-1], ']')
self.ds = combine_towers(
self.tsdir,
restarts=None,
simulation_start=self.starttime,
fname=self.prefixes,
structure='ordered',
dx=self.dx, dy=self.dy,
heights=self.domain.z,
height_var='ph', # geopotential height
agl=True,
verbose=self.verbose
)
self.ds = self.ds.swap_dims({'nz':'height'}) # to facilitate ds.interp()
if self.verbose:
print('... done reading ts outputs')
def interp_to_latlon(self,latlon):
"""Get data column at specified latlon
Based on original wrftoof "cavalier approach" ignoring curvature
and assuming grid cells are square (i.e., WRF lat/lon are
Cartesian.
"""
tgtlat,tgtlon = latlon
# Not guaranteed to find correct indices:
#lat1 = self.ds.coords['lat'].mean(dim='nx').values
#lon1 = self.ds.coords['lon'].mean(dim='ny').values
#dlat = np.mean(np.diff(lat1))
#dlon = np.mean(np.diff(lon1))
#j = int((tgtlat - lat1[0]) / dlat) # lat changes over ny dimension
#i = int((tgtlon - lon1[0]) / dlon) # lon changes over nx dimension
#if self.verbose:
# print('Interpolating',latlon,'from')
# print(' approx lat {:g} and {:g}'.format(lat1[j], lat1[j+1]))
# print(' approx lon {:g} and {:g}'.format(lon1[i], lon1[i+1]))
#assert (tgtlat >= lat1[j]) and (tgtlat < lat1[j+1])
#assert (tgtlon >= lon1[i]) and (tgtlon < lon1[i+1])
wrflat = self.ds.coords['lat'].transpose('nx','ny',transpose_coords=True).values
wrflon = self.ds.coords['lon'].transpose('nx','ny',transpose_coords=True).values
dmin = 9e9
i,j = None,None
for ii in range(self.ds.dims['nx']-1):
for jj in range(self.ds.dims['ny']-1):
# "error" in distances from 4 corners
d = ((tgtlat - wrflat[ii,jj])**2 + (tgtlon - wrflon[ii,jj])**2)**0.5
if (d < dmin) and (tgtlat >= wrflat[ii,jj]) and (tgtlon >= wrflon[ii,jj]):
dmin = d
i,j = ii,jj
#print('selected',i,j)
assert (tgtlat >= wrflat[i,j]) and (tgtlat < wrflat[i,j+1])
assert (tgtlon >= wrflon[i,j]) and (tgtlon < wrflon[i+1,j])
# bilinear interpolation
f00 = self.ds.sel(nx=i ,ny=j)
f10 = self.ds.sel(nx=i+1,ny=j)
f01 = self.ds.sel(nx=i ,ny=j+1)
f11 = self.ds.sel(nx=i+1,ny=j+1)
finterp = f00 * (wrflon[i+1,j] - tgtlon ) * (wrflat[i,j+1] - tgtlat ) + \
f10 * (tgtlon - wrflon[i,j]) * (wrflat[i,j+1] - tgtlat ) + \
f01 * (wrflon[i+1,j] - tgtlon ) * (tgtlat - wrflat[i,j]) + \
f11 * (tgtlon - wrflon[i,j]) * (tgtlat - wrflat[i,j])
finterp = finterp / ((wrflon[i+1,j] - wrflon[i,j]) * (wrflat[i,j+1] - wrflat[i,j]))
# note: y and z coordinates don't get interpolated
finterp = finterp.assign_coords({'lon':tgtlon,'lat':tgtlat})
return finterp.drop_vars(['y','z'])
def map_to_boundary(self,i=None,j=None,k=None,allpts=False):
"""Get boundary data over time on the specified boundary from
the target domain. Setting `allpts` to True will interpolate to
all points at the target domain resolution; otherwise,
interpolate to data columns at the domain corners.
"""
assert np.count_nonzero([idx is not None for idx in [i,j,k]])==1, \
'Specify i, j, or k'
if allpts:
print('WARNING: current implementation of allpts is likely to result in extreme memory usage and may crash')
# interpolate to selected lat/lon
selected_x, selected_y, selected_lat, selected_lon = \
self._select_boundary_latlon(i,j,k,allpts)
if self.verbose:
print('selected lat:',selected_lat)
print('selected lon:',selected_lon)
dslist = self._get_datasets_at_locations(
selected_x,selected_y,selected_lat,selected_lon)
# combine all interpolated profiles
boundarydata = self._create_dataset_from_list(i,j,k,allpts,dslist)
return boundarydata
def _get_datasets_at_locations(self,selected_x, selected_y, selected_lat, selected_lon):
dslist = []
for x,y,lat,lon in zip(selected_x, selected_y, selected_lat, selected_lon):
ds = self.interp_to_latlon((lat,lon))
ds = ds.expand_dims({'x':[x],'y':[y]})
dslist.append(ds)
return dslist
def _create_dataset_from_list(self,i,j,k,allpts,dslist):
ds = xr.combine_by_coords(dslist)
if (i is not None):
mydim = 'x'
idx = i
elif (j is not None):
mydim = 'y'
idx = j
elif (k is not None):
mydim = 'height'
idx = k
if ((i is not None) or (j is not None)) and allpts:
# if allpts, interpolate side boundary profiles to exact domain heights
ds = ds.interp(height=self.domain.z)
elif k is not None:
# if horizontal boundary, interpolate to constant z
if self.verbose:
print('interpolating to',self.domain.z[k])
ds = ds.interp(height=self.domain.z[k])
else:
ds = ds.sel({mydim: ds.coords[mydim][idx]})
return ds
def _select_boundary_latlon(self,i,j,k,allpts):
"""Helper function for map_to_boundary"""
selected_lat = None
selected_lon = None
if i is not None:
assert i in [0,-1]
selected_x = self.domain.x[i]
if allpts:
selected_lat = self.domain.lat[i,:]
selected_lon = self.domain.lon[i,:]
selected_y = self.domain.y
else:
selected_lat = self.domain.lat[i,::self.domain.ny]
selected_lon = self.domain.lon[i,::self.domain.ny]
selected_y = self.domain.y[::self.domain.ny]
selected_x = np.repeat(selected_x, len(selected_y))
elif j is not None:
assert j in [0,-1]
selected_y = self.domain.y[j]
if allpts:
selected_lat = self.domain.lat[:,j]
selected_lon = self.domain.lon[:,j]
selected_x = self.domain.x
else:
selected_lat = self.domain.lat[::self.domain.nx,j]
selected_lon = self.domain.lon[::self.domain.nx,j]
selected_x = self.domain.x[::self.domain.nx]
selected_y = np.repeat(selected_y, len(selected_x))
elif k is not None:
assert k in [0,-1]
if allpts:
raise NotImplementedError("I don't think there's a use case for this...")
else:
selected_lat = self.domain.lat[::self.domain.nx,::self.domain.ny]
selected_lon = self.domain.lon[::self.domain.nx,::self.domain.ny]
selected_x = self.domain.x[::self.domain.nx]
selected_y = self.domain.y[::self.domain.ny]
selected_lat = selected_lat.ravel()
selected_lon = selected_lon.ravel()
xx,yy = np.meshgrid(selected_x, selected_y, indexing='ij')
selected_x = xx.ravel()
selected_y = yy.ravel()
assert (selected_lat is not None) and (selected_lon is not None)
return selected_x, selected_y, selected_lat, selected_lon
def map_to_internal_field(self,datetime):
"""Get internal field by interpolating between profiles at
domain corners
"""
dslist = []
for i in [0,-1]:
for j in [0,-1]:
ds = self.interp_to_latlon((self.domain.lat[i,j],
self.domain.lon[i,j]))
ds = ds.expand_dims({'x': [self.domain.x[i]],
'y': [self.domain.y[j]]})
dslist.append(ds.sel(datetime=datetime))
# combine all interpolated profiles
internaldata = xr.combine_by_coords(dslist)
# interpolate to cell centers
xcc = (self.domain.x[1:] + self.domain.x[:-1]) / 2
ycc = (self.domain.y[1:] + self.domain.y[:-1]) / 2
zcc = (self.domain.z[1:] + self.domain.z[:-1]) / 2
internaldata = internaldata.interp(x=xcc, y=ycc, height=zcc)
return internaldata
def estimate_horizontal_gradient(self,i=1,j=1,k=1,field='p'):
"""Estimate horizontal gradients centered at the specified tower
(i,j,k).
"""
print('stub')
class TowerArray(object):
"""Read and store an array of Tower objects sampled from WRF using
the tslist
"""
varnames = ['uu','vv','ww','th','pr','ph','ts']
def __init__(self,outdir,towerdir,domain,
starttime,timestep=10.0,
tslistpath=None,
verbose=True,
**tslist_args):
"""Create a TowerArray object from a WRF simulation with tslist
output. DEPRECATED--use wrf.utils.combine_towers() instead
Parameters
----------
outdir : str
Directory path to where data products, e.g., tower output
converted into netcdf files, are to be stored.
towerdir : str
Path to directory where tslist sampling outputs are stored.
domain : int
WRF domain to use (domain >= 1)
starttime : str or Timestamp
The datetime at which the simulation was started
(corresponding to t=0 in the sampling output), which should
correspond to the start_* namelist parameters in the WRF
namelist.input.
timestep : float
The timestep for the selected WRF domain, in seconds.
tslistpath : str, optional
Path to tslist file, which explicitly specifies the names
and lat/lon values for each tower.
tslist_args : optional
Keyword arguments passed to read_tslist, e.g., `snap_to_grid`
to enforce a regular lat/lon grid.
"""
self.verbose = verbose # for debugging
self.outdir = outdir
self.towerdir = towerdir
self.domain = domain
self.starttime = pd.to_datetime(starttime)
self.timestep = timestep
self.tslistpath = tslistpath
self._check_inputs()
self._load_tslist(**tslist_args)
def _check_inputs(self):
if not os.path.isdir(self.outdir):
os.makedirs(self.outdir)
if self.tslistpath is not None:
assert os.path.isfile(self.tslistpath), 'tslist not found'
assert os.path.isdir(self.towerdir), 'tower directory not found'
def _load_tslist(self,**kwargs):
try:
self.tslist = read_tslist(self.tslistpath, **kwargs)
except (ValueError,IOError):
self.tslist = None
# manually determine list of available tower prefixes
files = glob.glob(
os.path.join(self.towerdir,
'*.d{:02d}.TS'.format(self.domain)))
self.prefixlist = [
os.path.split(fpath)[1].split('.d')[0] for fpath in files
]
else:
# tslist was read successfully
self.prefixlist = list(self.tslist['prefix'])
self.tslist.set_index('prefix',inplace=True)
self._catalog_files()
def _catalog_files(self):
"""Check availability of all towers and then create a list of
filepaths for each tower, domain, and output variable.
"""
self.tsfiles = {}
for prefix in self.prefixlist:
self.tsfiles[prefix] = {}
for varname in self.varnames:
fpath = os.path.join(self.towerdir,
'{:s}.d{:02d}.{:2s}'.format(prefix,
self.domain,
varname.upper()))
assert os.path.isfile(fpath), '{:s} not found'.format(fpath)
self.tsfiles[prefix][varname] = fpath
def load_data(self,
heights=None,height_var='height',approx_height=True,
overwrite=False):
"""Load ncfile(s) if they exist, or generate them using the
Tower class
Parameters
----------
heights : array-like or None
Interpolate to these heights at all times; ignored if data
are read from disk instead of processed from WRF output.
height_var : str
If `heights` is not None, this indicates how the height
values are determined:
- 'height': Tower.height has been loaded; no actions are
performed
- 'ph': The tower elevation has been stored in the geo-
potential variable; the height (above ground level) will
be automatically calculated as Tower.ph - Tower.stationz.
approx_height : bool
If `heights` is not None, then assume that height is
approximately constant in time. This speeds up the
interpolation because interpolation does not need to be
performed at each time step.
overwrite : bool
Generate new data (to be written as nc files).
"""
self.data = {}
for prefix in self.prefixlist:
fpath = os.path.join(self.outdir, prefix+'.nc')
if os.path.isfile(fpath) and not overwrite:
if self.verbose: print('Reading',fpath)
self.data[prefix] = xr.open_dataset(fpath)
else:
if self.verbose: print('Creating',fpath)
self.data[prefix] = self._process_tower(prefix,
heights=heights,
height_var=height_var,
approx_height=approx_height,
outfile=fpath)
def load_combined_data(self,fpath,chunks=None):
"""Load data generated with combine() to avoid having to reload
the combined dataset. The `chunks` kwarg may be used to load the
dataset with dask. Tips:
http://xarray.pydata.org/en/stable/dask.html#chunking-and-performance
Parameters
----------
chunks : int or dict, optional
If chunks is provided, it used to load the new dataset into dask
arrays. ``chunks={}`` loads the dataset with dask using a single
chunk for all arrays.
"""
self.ds = xr.open_dataset(fpath, chunks=chunks)
return self.ds
def _process_tower(self,prefix,
heights=None,height_var=None,approx_height=True,
outfile=None):
"""Use Tower.to_dataframe() to create a dataframe, to which we
add tower latitude/longitude. Setting them as indices makes the
recognizable as coordinates by xarray.
"""
towerfile = '{:s}.d{:02d}.*'.format(prefix, self.domain)
fpath = os.path.join(self.towerdir,towerfile)
# create Tower object
totaltime0 = time.time()
tow = Tower(fpath,varlist=self.varnames)
excludelist = ['ts'] # skip surface data
# set up height variable if needed
if heights is not None:
assert (height_var is not None), 'height attribute unknown'
if height_var == 'ph':
# this creates a time-heightlevel varying height
tow.height = getattr(tow,height_var) - tow.stationz
excludelist += [height_var]
if approx_height:
mean_height = np.mean(tow.height, axis=0)
if self.verbose:
# diagnostics
stdev0 = np.std(tow.height, axis=0)
kmax0 = np.argmax(stdev0)
print(' max stdev in height at (z~={:g}m) : {:g}'.format(
mean_height[kmax0], stdev0[kmax0]))
if heights is not None:
zmax = np.max(heights)
heights_within_micro_dom = np.ma.masked_array(
tow.height, tow.height > zmax)
stdev = np.std(heights_within_micro_dom, axis=0)
kmax = np.argmax(stdev)
print(' max stdev in height (up to z={:g} m) at (z~={:g} m) : {:g}'.format(
np.max(heights_within_micro_dom), mean_height[kmax], stdev[kmax]))
tow.height = mean_height
elif height_var != 'height':
raise ValueError('Unexpected height_var='+height_var+'; heights not calculated')
# now convert to a dataframe (note that height interpolation
# will be (optionally) performed here
time0 = time.time()
ds = tow.to_xarray(start_time=self.starttime,
time_step=self.timestep,
heights=heights,
exclude=excludelist)
time1 = time.time()
if self.verbose: print(' to_xarray() time = {:g}s'.format(time1-time0))
# save
time0 = time.time()
if outfile is not None:
ds.to_netcdf(outfile)
totaltime1 = time.time()
if self.verbose:
print(' xarray output time = {:g}s'.format(totaltime1-time0))
print(' TOTAL time = {:g}s'.format(totaltime1-totaltime0))
return ds
def combine(self,cleanup=False):
"""Create volume data (excluding surface data) by combining
lat/lon coordinates across all datasets. Tested for data on a
regular grid.
Notes:
- This has a _very_ large memory overhead, i.e., need enough
memory to store and manipulate all of the tower data
simultaneously, otherwise it may hang.
- xarray.combine_by_coords fails with a cryptic "the supplied
objects do not form a hypercube" message if the lat/lon values
do not form a regular grid
"""
datalist = [ data for key,data in self.data.items() ]
self.ds = xr.combine_by_coords(datalist)
if cleanup is True:
import gc # garbage collector
try:
del self.data
except AttributeError:
pass
else:
if self.verbose:
print('Cleared data dict from memory')
finally:
gc.collect()
return self.ds
``` |
{
"source": "joejoeyjoseph/playground",
"score": 3
} |
#### File: advance_py/05 Classes/enums_start.py
```python
def main():
pass
# TODO: enums have human-readable values and types
# TODO: enums have name and value properties
# TODO: print the auto-generated value
# TODO: enums are hashable - can be used as keys
if __name__ == "__main__":
main()
```
#### File: advance_py/06 Logging/basiclog_finished.py
```python
import logging
def main():
# Use basicConfig to configure logging
# this is only executed once, subsequent calls to
# basicConfig will have no effect
logging.basicConfig(level=logging.DEBUG,
filemode="w",
filename="output.log")
# Try out each of the log levels
logging.debug("This is a debug-level log message")
logging.info("This is an info-level log message")
logging.warning("This is a warning-level message")
logging.error("This is an error-level message")
logging.critical("This is a critical-level message")
# Output formatted string to the log
logging.info("Here's a {} variable and an int: {}".format("string", 10))
if __name__ == "__main__":
main()
```
#### File: advance_py/06 Logging/customlog_start.py
```python
import logging
# TODO: add another function to log from
def main():
# set the output file and debug level, and
# TODO: use a custom formatting specification
logging.basicConfig(filename="output.log",
level=logging.DEBUG)
logging.info("This is an info-level log message")
logging.warning("This is a warning-level message")
if __name__ == "__main__":
main()
```
#### File: advance_py/07 Comprehensions/dictcomp_start.py
```python
def main():
# define a list of temperature values
ctemps = [0, 12, 34, 100]
# TODO: Use a comprehension to build a dictionary
# TODO: Merge two dictionaries with a comprehension
team1 = {"Jones": 24, "Jameson": 18, "Smith": 58, "Burns": 7}
team2 = {"White": 12, "Macke": 88, "Perce": 4}
if __name__ == "__main__":
main()
``` |
{
"source": "joejohnston203/ricochet_cevns_spectra",
"score": 3
} |
#### File: bump/fit_bump/fit_neos.py
```python
import numpy as np
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
data = np.loadtxt("neos_bump_ratio.csv", delimiter=",")
x = data[:, 0]
# x was from 0 to 8, but it should have been 1 to 8
x = 1+7./8.*x
y = data[:, 1]
x_fit = x[np.logical_and(0<x,x<8.)]
y_fit = y[np.logical_and(0<x,x<8.)]
def f(x, a0, b0, mu, sig):
return a0 + b0*np.exp(-(x-mu)**2/ (2*sig**2))
f = np.vectorize(f)
res = curve_fit(f, x, y, [1., 0.1, 4.5, 0.5])
plt.figure()
plt.plot(x, y, "k-", label="NEOS Ratio")
plt.plot(x_fit, f(x_fit, *res[0]), label="Gaussian Fit")
plt.title("Fit Res: a0=%.3f, b0=%.3f, mu=%.2f, sig=%.3f"%tuple(res[0]))
plt.xlabel("Prompt Energy (MeV)")
plt.ylabel("Ratio to Prediction")
plt.legend()
plt.savefig("neos_fit.png")
print("Fit results: %s"%res[0])
```
#### File: results/cevns_xsec/make_cevns_plots.py
```python
from reactor_tools import NeutrinoSpectrum
import cevns_spectra
from cevns_spectra import dsigmadT_cns, dsigmadT_cns_rate, dsigmadT_cns_rate_compound, total_cns_rate_an, total_cns_rate_an_compound, total_XSec_cns, total_XSec_cns_compound, total_XSec_cns_compound_in_bin, get_atomic_arrs
import numpy as np
from scipy.optimize import curve_fit
import scipy.integrate as spint
from scipy.interpolate import interp1d
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import os
#plt.rcParams.update({'font.size': 18})
def plot_cevns_rate_fixed_T():
fig2 = plt.figure()
Tmin = 0.001
Z = 32
N = 74.
e_arr = np.linspace(0.,1e7, 10000)
fig2.patch.set_facecolor('white')
plt.ylim((1e-44, 1e-42))
plt.semilogy(e_arr*1e-6,dsigmadT_cns(10.,e_arr,Z,N),'k:',label='T=10 eV',linewidth=2)
plt.semilogy(e_arr*1e-6,dsigmadT_cns(50.,e_arr,Z,N),'b-',label='T=50 eV',linewidth=2)
plt.semilogy(e_arr*1e-6,dsigmadT_cns(100.,e_arr,Z,N),'r--',label='T=100 eV',linewidth=2)
plt.semilogy(e_arr*1e-6,dsigmadT_cns(200.,e_arr,Z,N),'g-.',label='T=200 eV',linewidth=2)
plt.legend(prop={'size':11})
plt.xlabel('Neutrino Energy (MeV)')
plt.ylabel('Differential XSec, cm^2/eV')
plt.title('Ge-74 Differential CEvNS XSec, Fixed T')
plt.savefig('plots/diff_xsec_fixed_T.png')
fig2.clf()
def plot_cevns_rate_fixed_Enu():
fig2 = plt.figure()
Tmin = 0.001
Z = 32
N = 74
t_arr = np.logspace(0, 4, 10000)
fig2.patch.set_facecolor('white')
plt.ylim((1e-44, 1e-42))
plt.loglog(t_arr,dsigmadT_cns(t_arr,1e6,Z,N),'k:',label='Enu = 1 MeV',linewidth=2)
plt.loglog(t_arr,dsigmadT_cns(t_arr,2e6,Z,N),'b-',label='Enu = 2 MeV',linewidth=2)
plt.loglog(t_arr,dsigmadT_cns(t_arr,4e6,Z,N),'r--',label='Enu = 4 MeV',linewidth=2)
plt.loglog(t_arr,dsigmadT_cns(t_arr,6e6,Z,N),'g-.',label='Enu = 6 MeV',linewidth=2)
plt.legend(prop={'size':11})
plt.xlabel('Recoil Energy (eV)')
plt.ylabel('Differential XSec, cm^2/eV')
plt.title('Ge-74 Differential CEvNS XSec, Fixed Enu')
plt.savefig('plots/diff_xsec_fixed_Enu.png')
fig2.clf()
def plot_cevns_rate_vs_T_Enu(nu_spec=None, nbins=1000):
fig = plt.figure()
fig.patch.set_facecolor('white')
e_arr = np.linspace(0.,1e7, nbins)
Tmin = 0.001
t_arr = np.logspace(0, 3, nbins)
Z = 32
N = 74
T, E = np.meshgrid(t_arr,e_arr)
spec = dsigmadT_cns(T,E,Z,N)
smax = spec.max()
smin = smax*1e-3
spec[spec<smin] = smin
im = plt.pcolor(T, E*1e-6, spec,
norm=LogNorm(vmin=smin, vmax=smax),
cmap='PuBu_r')
fig.colorbar(im)
plt.xlabel("Recoil Energy T (eV)")
plt.ylabel("Neutrino Energy Enu (MeV)")
plt.title("Ge-74 Differential XSec, cm^2/eV")
plt.savefig('plots/diff_xsec_vs_E_T.png')
if __name__ == "__main__":
try:
os.mkdir('plots')
except OSError as e:
pass
# CEvNS Differential Cross Section
plot_cevns_rate_fixed_T()
plot_cevns_rate_fixed_Enu()
plot_cevns_rate_vs_T_Enu(nbins=10)
labels=["Ge", "Zn", "Si",
"CaWO4",
"Al2O3"]
thresholds = [0., 10., 100.]
emin = 0.
emax = 12.e6
bin_size = 1.e4 # 10 keV
elbs = np.arange(emin, emax, bin_size)
for i in range(len(labels)):
(Z_arr, N_arr, atom_arr) = get_atomic_arrs(labels[i])
f = open("cevns_xsec_binned_%s.txt"%labels[i], 'w')
f.write("# "+labels[i]+"\n")
f.write("# Z=%s, N=%s, atoms=%s\n"%(Z_arr, N_arr, atom_arr))
f.write("# Columns 2-4 contain CEvNS xsec averaged over a %.2e keV bin\n"%(bin_size/1.e3))
f.write("# xsec units: cm^2\n")
f.write("#\n")
f.write("# Enu (keV) Thr=0 eV Thr=10 eV Thr=100 eV\n")
for elb in elbs:
line = "%.4e, "%(elb/1.e3)
for Tmin in thresholds:
line += "%.5e, "%\
(total_XSec_cns_compound_in_bin(Tmin,
elb, elb+bin_size,
Z_arr, N_arr,
atom_arr)/bin_size)
line = line[:-2]+"\n"
f.write(line)
f.close()
for i in range(len(labels)):
(Z_arr, N_arr, atom_arr) = get_atomic_arrs(labels[i])
f = open("cevns_xsec_%s.txt"%labels[i], 'w')
f.write("# "+labels[i]+"\n")
f.write("# Z=%s, N=%s, atoms=%s\n"%(Z_arr, N_arr, atom_arr))
f.write("# Columns 2-4 contain CEvNS xsec in cm^2\n")
f.write("#\n")
f.write("# Enu (keV) Thr=0 eV Thr=10 eV Thr=100 eV\n")
for elb in elbs:
line = "%.4e, "%(elb/1.e3)
for Tmin in thresholds:
line += "%.5e, "%\
total_XSec_cns_compound(Tmin, elb,
Z_arr, N_arr,
atom_arr,
form_factor=True, helm_ff=True)
line = line[:-2]+"\n"
f.write(line)
f.close()
```
#### File: results/commercial_reactor/make_commercial_reactor_plots.py
```python
from reactor_tools import NeutrinoSpectrum
import cevns_spectra
from cevns_spectra import dsigmadT_cns, dsigmadT_cns_rate, dsigmadT_cns_rate_compound, total_cns_rate_an, total_cns_rate_an_compound, cns_total_rate_integrated, cns_total_rate_integrated_compound, total_XSec_cns, total_XSec_cns_compound, cevns_yield_compound, ibd_yield, get_atomic_arrs
import numpy as np
from scipy.optimize import curve_fit
import scipy.integrate as spint
from scipy.interpolate import interp1d
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import os
plt.rcParams.update({'font.size': 18})
Mn = cevns_spectra.Mn
Mn_eV = Mn*1e3
s_per_day = 60.0*60.0*24.0
def store_reactor_flux_kev(nu_spec1, outfile="flux.txt",
e_kev_lb=0.,
e_kev_ub=1.e4,
num_points=10000):
'''
Store the flux in a text file
Output is in two columns:
E_nu (keV), Flux (nu/(keV*day*cm^2))
The stored flux is always from 1 eV to 10 MeV. e_kev_lb
and e_kev_ub are used to set the stored flux to 0
outside of that region
'''
e_arr = np.linspace(1., 1.e7, num_points)
spec = s_per_day*nu_spec1.d_phi_d_enu_ev(e_arr)*1e3
spec[np.logical_or(e_arr<e_kev_lb*1000.,
e_arr>e_kev_ub*1000.)] = 0.
np.savetxt(outfile, np.stack((e_arr/1.e3, spec), 1),
header="E_nu (keV), Flux (nu/(keV*day*cm^2))")
def plot_neutrino_spectrum_comparison(nu_spec1, nu_spec_kopeikin,
num_points=1000):
'''
Make a plot of the comparison of two neutrino spectra
Args:
nu_spec1, nu_spec_kopeikin: Initialized NeutrinoSpectrum object
'''
# Plot neutrino spectrum + kopeikin spectrum
e_arr = np.linspace(0., 1e7, num_points)
fig0 = plt.figure()
fig0.patch.set_facecolor('white')
spec_tot1 = s_per_day*nu_spec1.d_phi_d_enu_ev(e_arr)
plt.plot(e_arr*1e-6,spec_tot1*1e6, "r-",linewidth=1, label="Bestiole")
spec_tot_kopeikin = s_per_day*nu_spec_kopeikin.d_phi_d_enu_ev(e_arr)
plt.plot(e_arr*1e-6,spec_tot_kopeikin*1e6, "b-", linewidth=1, label="Kopeikin")
plt.legend(prop={'size':11})
plt.xlabel('Neutrino Energy (MeV)')
plt.ylabel('Flux, nu/(MeV*day*cm^2)')
plt.title('Commercial Reactor (%s) Neutrino Flux'%
nu_spec1.get_settings_string())
plt.savefig('plots/commercial_reactor_neutrino_spectrum.png')
fig0.clf()
# Difference
fig0 = plt.figure()
fig0.patch.set_facecolor('white')
#diff = (spec_tot1-spec_tot_kopeikin)/spec_tot1
diff = spec_tot1/spec_tot_kopeikin
plt.plot(e_arr*1e-6,diff, "k-", linewidth=1)
plt.plot(e_arr*1e-6,0*e_arr+1.0, "r--", linewidth=1)
plt.legend(prop={'size':11})
plt.xlabel('Neutrino Energy (MeV)')
plt.ylabel('Bestiole/Kopeikin')
plt.ylim(0., 2.)
plt.xlim(0., 4.)
plt.title('Commercial Reactor Spectrum Comparison With Kopeikin')
plt.savefig('plots/commercial_reactor_kopeikin_comparison.png')
fig0.clf()
def plot_neutrino_spectrum_other(nu_spec, num_points=1000):
'''
Make a plot of the fission and other spectra
Args:
nu_spec: Initialized NeutrinoSpectrum object
'''
# Plot neutrino spectrum + kopeikin spectrum
e_arr = np.linspace(0., 10.e6, num_points)
fig0 = plt.figure()
fig0.patch.set_facecolor('white')
spec_tot = s_per_day*nu_spec.d_phi_d_enu_ev(e_arr)
plt.plot(e_arr*1e-6,spec_tot*1e6, "k-",linewidth=2, label="Total")
include_other = nu_spec.include_other
nu_spec.include_other = False
spec_fission = s_per_day*nu_spec.d_phi_d_enu_ev(e_arr)
plt.plot(e_arr*1e-6,spec_fission*1e6, 'r:', linewidth=2, label="Fission")
nu_spec.include_other = include_other
fractions = nu_spec.get_fractions()
nu_spec.set_fractions([0., 0., 0., 0.])
spec_other = s_per_day*nu_spec.d_phi_d_enu_ev(e_arr)
plt.plot(e_arr*1e-6,spec_other*1e6, 'b--', linewidth=2, label="Capture")
nu_spec.set_fractions(fractions)
plt.legend(prop={'size':11})
plt.xlabel('Neutrino Energy (MeV)')
plt.ylabel('Flux, nu/(MeV*day*cm^2)')
plt.xlim(0., 8.)
plt.ylim(0., 1.7e17)
plt.grid()
#plt.title('Commercial Reactor Neutrino Flux')
plt.savefig('plots/commercial_reactor_fission_vs_capture.png')
fig0.clf()
# Fractional Contribution
fig0 = plt.figure(figsize=(4., 3.))
fig0.patch.set_facecolor('white')
plt.plot(e_arr*1e-6, spec_tot/spec_tot, "k-", linewidth=1)
plt.plot(e_arr*1e-6, spec_fission/spec_tot, "r:", linewidth=1)
plt.plot(e_arr*1e-6, spec_other/spec_tot, "b--", linewidth=1)
#plt.xlabel('Neutrino Energy (MeV)')
#plt.ylabel('Fractional Contribution')
plt.xlim(0., 2.)
plt.ylim(0., 1.1)
plt.savefig('plots/commercial_reactor_fission_vs_capture_fraction.png')
fig0.clf()
def plot_dsigmadT_cns_rate(nu_spec,
bounds=[1e-4, 1e1],
num_points=100):
t_arr = np.logspace(0, 3, num=num_points)
fig3 = plt.figure()
fig3.patch.set_facecolor('white')
plt.ylim(bounds)
plt.xlim(1e0, 1e3)
labels = ["Si", "Zn", "Ge", "Al2O3", "CaWO4"]
lines = ['g-', 'b-', 'r-', 'c-.', 'm:']
widths = [1,1,1,2,2]
for i in range(len(labels)):
(Z_arr, N_arr, atom_arr) = get_atomic_arrs(labels[i])
plt.loglog(t_arr,dsigmadT_cns_rate_compound(t_arr, Z_arr, N_arr, atom_arr, nu_spec),lines[i],label=labels[i],linewidth=widths[i])
plt.legend(prop={'size':11})
plt.xlabel('Recoil Energy T (eV)')
plt.ylabel('Differential Event Rate (Events/kg/day/eV)')
plt.title("Commercial Reactor (%s) Differential Rate"%
nu_spec.get_settings_string())
plt.axvline(x=10.)
plt.axvline(x=100.)
plt.savefig('plots/commercial_reactor_dsigmadT_event_rate.png')
fig3.clf()
def plot_total_cns_rate(nu_spec, num_points=100):
# Make a plot of integrated event rate per eV vs threshold energy
t_arr = np.logspace(0, 3, num=num_points)
fig4 = plt.figure()
fig4.patch.set_facecolor('white')
labels = ["Si", "Zn", "Ge", "Al2O3", "CaWO4"]
lines = ['g-', 'b-', 'r-', 'c-.', 'm:']
widths = [1,1,1,2,2]
for i in range(len(labels)):
(Z_arr, N_arr, atom_arr) = get_atomic_arrs(labels[i])
plt.loglog(t_arr,total_cns_rate_an_compound(t_arr, 1e7, Z_arr, N_arr, atom_arr, nu_spec),lines[i],label=labels[i],linewidth=widths[i])
plt.legend(prop={'size':11})
plt.xlabel('Recoil Threshold (eV)')
plt.ylabel('Event Rate (Events/kg/day)')
plt.title("Commercial Reactor (%s) Total Rate"%
nu_spec.get_settings_string())
plt.axvline(x=10.)
plt.axvline(x=100.)
plt.savefig('plots/commercial_reactor_event_rate_integrated.png')
def lighten_color(color, amount=0.5):
"""
Lightens the given color by multiplying (1-luminosity) by the given amount.
Input can be matplotlib color string, hex string, or RGB tuple.
Examples:
>> lighten_color('g', 0.3)
>> lighten_color('#F034A3', 0.6)
>> lighten_color((.3,.55,.1), 0.5)
"""
import matplotlib.colors as mc
import colorsys
try:
c = mc.cnames[color]
except:
c = color
c = colorsys.rgb_to_hls(*mc.to_rgb(c))
return colorsys.hls_to_rgb(c[0], 1 - amount * (1 - c[1]), c[2])
def plot_flux_xsec(nu_spec):
# Ge
(Z_arr, N_arr, atom_arr) = get_atomic_arrs("Ge")
e_arr = np.linspace(0., 1e7, 100000)
# Plot neutrino flux
fig, host = plt.subplots(figsize=(7, 4))
fig.subplots_adjust(left=0.075, right=0.95, bottom=0.15, top=0.95)
fig.patch.set_facecolor('white')
par1 = host.twinx()
par2 = host.twinx()
# Offset the right spine of par2. The ticks and label have already been
# placed on the right by twinx above.
#par2.spines["right"].set_position(("axes", 1.2))
# Having been created by twinx, par2 has its frame off, so the line of its
# detached spine is invisible. First, activate the frame but make the patch
# and spines invisible.
#make_patch_spines_invisible(par2)
# Second, show the right spine.
#par2.spines["right"].set_visible(True)
lines = []
# Spectrum in nu/(MeV*day*cm^2)
spec_tot = s_per_day*nu_spec.d_phi_d_enu_ev(e_arr)*1e6
p_spec, = host.plot(e_arr*1e-6,spec_tot, "k-", label=r"$\nu$ Flux", linewidth=2.)
lines.append(p_spec)
xsec_0eV = total_XSec_cns_compound(0., e_arr, Z_arr, N_arr, atom_arr)
p_xsec_0, = par1.plot(e_arr*1e-6,xsec_0eV, color="#e41a1c", linestyle="-", label=r'T$_{Thr}$=0 eV')
lines.append(p_xsec_0)
prod_0eV = spec_tot*xsec_0eV
p_prod_0, = par2.plot(e_arr*1e-6,spec_tot*xsec_0eV, color=lighten_color("#e41a1c", 1.0), linestyle="-")
xsec_10eV = total_XSec_cns_compound(10., e_arr, Z_arr, N_arr, atom_arr)
p_xsec_10, = par1.plot(e_arr*1e-6,xsec_10eV, color="#377eb8", linestyle="--", label='T$_{Thr}$=10 eV')
lines.append(p_xsec_10)
prod_10eV = spec_tot*xsec_10eV
p_prod_10, = par2.plot(e_arr*1e-6,spec_tot*xsec_10eV, color=lighten_color("#377eb8", 1.0), linestyle="--")
xsec_50eV = total_XSec_cns_compound(50., e_arr, Z_arr, N_arr, atom_arr)
p_xsec_50, = par1.plot(e_arr*1e-6,xsec_50eV, color="#4daf4a", linestyle=":", label='T$_{Thr}$=50 eV')
lines.append(p_xsec_50)
prod_50eV = spec_tot*xsec_50eV
p_prod_50, = par2.plot(e_arr*1e-6,spec_tot*xsec_50eV, color=lighten_color('#4daf4a', 1.0), linestyle=":")
xsec_100eV = total_XSec_cns_compound(100., e_arr, Z_arr, N_arr, atom_arr)
p_xsec_100, = par1.plot(e_arr*1e-6,xsec_100eV, color="#984ea3", linestyle="-.", label='T$_{Thr}$=100 eV')
lines.append(p_xsec_100)
prod_100eV = spec_tot*xsec_100eV
p_prod_100, = par2.plot(e_arr*1e-6,spec_tot*xsec_100eV, color=lighten_color("#984ea3", 1.0), linestyle="-.")
host.set_xlim(0, 8)
'''host.set_xlim(0, 2)
host.set_ylim(0, 2)
par1.set_ylim(0, 4)
par2.set_ylim(1, 65)'''
host.set_xlabel("Neutrino Energy (MeV)")
host.set_ylabel("Arbitrary Units")
#par1.set_ylabel("CEvNS XSec [cm^2]")
#par2.set_ylabel("Product [nu/(MeV*day)]")
#plt.text(9.8, 1.96*1.e-24, "1.e-40", bbox=dict(facecolor='white', alpha=1.0))
#plt.text(12., 1.96*1.e-24, "1.e-24", bbox=dict(facecolor='white', alpha=1.0))
#host.yaxis.label.set_color('k')
#par1.yaxis.label.set_color('k')
#par2.yaxis.label.set_color('k')
tkw = dict(size=4, width=1.5)
#host.tick_params(axis='y', colors='k', **tkw)
#par1.tick_params(axis='y', colors='k', **tkw)
#par2.tick_params(axis='y', colors='k', **tkw)
host.tick_params(axis='x', **tkw)
host.tick_params(axis='y',
which='both', # both major and minor ticks are affected
left=True, # ticks along the bottom edge are off
right=False, # ticks along the top edge are off
labelleft=False, # labels along the bottom edge are off
labelcolor='white')
#host.get_yaxis().set_visible(False)
par1.get_yaxis().set_visible(False)
par2.get_yaxis().set_visible(False)
host.legend(lines, [l.get_label() for l in lines], loc=(0.585, 0.5), prop={'size':14}, framealpha=0.9)
#plt.legend(loc=4)
plt.axvline(1.8, color='k')
host.set_ylim(bottom=0)
par1.set_ylim(bottom=0)
par2.set_ylim(bottom=0)
plt.title('')
host.grid()
plt.savefig('plots/flux_xsec_product.pdf', bbox_inches='tight')
fig.clf()
# Save results to file
np.savetxt("plots/flux_xsec_product.txt",
np.column_stack((1e-6*e_arr,spec_tot,
xsec_0eV, prod_0eV,
xsec_10eV, prod_10eV,
xsec_50eV, prod_50eV,
xsec_100eV, prod_100eV)),
header="Neutrino Energies: MeV\n"+
"Neutrino Flux: nu/(MeV*day*cm^2)\n"+
"Cross Sections: cm^2\n"+
"Product: nu/(MeV*day)\n"+
"Neutrino Energy, Neutrino Flux, Ethr=0eV xsec, Ethr=0eV xsec,"+
"Ethr=10eV xsec, Ethr=10eV xsec, Ethr=50eV xsec, Ethr=50eV xsec,"+
"Ethr=100eV xsec, Ethr=100eV xsec, Ethr=200eV xsec, Ethr=200eV xsec")
def print_cevns_xsec(nu_spec):
print("CEvNS Yields per Average Atom (10^-43 cm^2/fission):")
labels = ["Al2O3", "Si", "Zn", "Ge", "CaWO4"]
for i in range(len(labels)):
(Z_arr, N_arr, atom_arr) = get_atomic_arrs(labels[i])
print("\t%s 10 eV: %.3e"%
(labels[i],
(cevns_yield_compound(10., 1.e8, Z_arr, N_arr, atom_arr, nu_spec)/1.e-43)))
print("\t%s 100 eV: %.3e"%
(labels[i],
(cevns_yield_compound(100., 1.e8, Z_arr, N_arr, atom_arr, nu_spec)/1.e-43)))
print("IBD Yield per Nucleon (10^-43 cm^2/fission): %.3e"%(ibd_yield(nu_spec)/1.e-43))
print("")
print("CEvNS Yields per Gram (10^-20 cm^2/fission/g):")
for i in range(len(labels)):
(Z_arr, N_arr, atom_arr) = get_atomic_arrs(labels[i])
print("\t%s 10 eV: %.3e"%
(labels[i],
(cevns_yield_compound(10., 1.e8, Z_arr, N_arr, atom_arr, nu_spec, per_gram=True)/1.e-43)))
print("\t%s 100 eV: %.3e"%
(labels[i],
(cevns_yield_compound(100., 1.e8, Z_arr, N_arr, atom_arr, nu_spec, per_gram=True)/1.e-43)))
print("IBD Yield (10^-20 cm^2/fission/g): %.3e"%(ibd_yield(nu_spec, per_gram=True)/1.e-20))
def plot_lowe_spectra(nu_spec,
output_path_prefix="plots/",
Z=32, A=74, isotope_name='Ge-74',
site_title="Commerical Reactor",
enu_low=1.8e6,
lt18=False, u238n=False,
neutron_shapes=True,
neutron_levels=True):
t_arr = np.logspace(0, 3, num=100)
fig3 = plt.figure(figsize=[8., 4.8])
fig3.patch.set_facecolor('white')
plt.loglog(t_arr*1.e-3,dsigmadT_cns_rate(t_arr, Z, A-Z, nu_spec)*1.e3,'k-',label='CEvNS Total',linewidth=1.)
if(lt18):
plt.loglog(t_arr*1.e-3,dsigmadT_cns_rate(t_arr, Z, A-Z, nu_spec, enu_min=enu_low)*1.e3, color="#e41a1c", linestyle="--", label='CEvNS %s>%.1f MeV'%(r'E$_\nu$', enu_low/1.e6), linewidth=2.)
plt.loglog(t_arr*1.e-3,dsigmadT_cns_rate(t_arr, Z, A-Z, nu_spec, enu_max=enu_low)*1.e3, color="#377eb8", linestyle=":", label='CEvNS %s<%.1f MeV'%(r'E$_\nu$', enu_low/1.e6), linewidth=2.)
if(u238n):
include_other = nu_spec.include_other
nu_spec.include_other = False
plt.loglog(t_arr*1.e-3,dsigmadT_cns_rate(t_arr, Z, A-Z, nu_spec)*1.e3, color="#e41a1c", linestyle="--", label='Fission', linewidth=2.)
nu_spec.include_other = include_other
fractions = nu_spec.get_fractions()
nu_spec.set_fractions([0., 0., 0., 0.])
plt.loglog(t_arr*1.e-3,dsigmadT_cns_rate(t_arr, Z, A-Z, nu_spec)*1.e3, color="#377eb8", linestyle=":", label='U-238 n', linewidth=2.)
nu_spec.set_fractions(fractions)
def n_back(T_keV, tau_1, tau_2, fac_2,
scale, norm=1., n_xsec=0.081):
# Returns rate in evts/kg/day/keV
rescale = n_xsec/0.081
return 1.e-3*norm*rescale*scale*\
(np.exp(-tau_1*T_keV)+fac_2*np.exp(-tau_2*T_keV))
n_back = np.vectorize(n_back)
n_cons_int = spint.quad(n_back, 0.01, 0.9,
args=(0.081*1.e3,
0.0086*1.e3, 0.23/0.38,
1.))[0]
n_cons_scale = 1/n_cons_int
if(neutron_levels):
plt.loglog(t_arr*1.e-3, n_back(t_arr*1.e-3,
0.081*1.e3,
0.0086*1.e3, 0.23/0.38,
n_cons_scale,
1000.*1.e-3)*1.e3,
':', color='darkorange', label="B=100., Cons")
plt.loglog(t_arr*1.e-3, n_back(t_arr*1.e-3,
0.081*1.e3,
0.0086*1.e3, 0.23/0.38,
n_cons_scale,
100.*1.e-3)*1.e3,
':', color='orange', label="B=10., Cons")
if(neutron_shapes or neutron_levels):
plt.loglog(t_arr*1.e-3, n_back(t_arr*1.e-3,
0.081*1.e3,
0.0086*1.e3, 0.23/0.38,
n_cons_scale,
1000.*1.e-3)*1.e3,
color="#4daf4a", linestyle='-.',
linewidth=1.,
label="B (Conservative)")
if(neutron_shapes):
n_med_int = spint.quad(n_back, 0.01, 0.9,
args=(0.004*1.e3,
0.0005*1.e3, 0.64,
1.))[0]
n_med_scale = 1./n_med_int
plt.loglog(t_arr*1.e-3, n_back(t_arr*1.e-3,
0.004*1.e3,
0.0005*1.e3, 0.64,
n_med_scale,
100.*1.e-3)*1.e3,
color="#984ea3", linestyle='-.',
linewidth=1.5,
label="B (Medium)")
n_opt_int = spint.quad(n_back, 0.01, 0.9,
args=(0.0004*1.e3,
0.00006*1.e3, 0.64,
1.))[0]
n_opt_scale = 1./n_opt_int
plt.loglog(t_arr*1.e-3, n_back(t_arr*1.e-3,
0.0004*1.e3,
0.00006*1.e3, 0.64,
n_opt_scale,
10.*1.e-3)*1.e3,
color="#ff7f00", linestyle='-.',
linewidth=2.,
label="B (Optimistic)")
ax = plt.gca()
plt.subplots_adjust(right=0.8)
ax.legend(bbox_to_anchor=(1.04,1), borderaxespad=0, prop={'size':14})
plt.xlabel('Recoil Energy (keV)')
plt.ylabel('Differential Event Rate (dru)')
pre_label = "%s (A=%.1f)"%(isotope_name, A)
#plt.title(site_title+" "+pre_label+" Differential Rates")
plt.xlim(1.e-3, 1.0)
plt.ylim(1e-1, 1.e5)
plt.axvline(x=1.e-3, color="k")
plt.axvline(x=10.e-3, color="k")
plt.axvline(x=50.e-3, color="k")
plt.grid()
filename = output_path_prefix+'lowe_'+isotope_name
if(lt18):
filename += "_lt18"
if(u238n):
filename += "_u238n"
if(neutron_shapes):
filename += "_nShapes"
if(neutron_levels):
filename += "_nLevels"
filename += '.pdf'
plt.savefig(filename, bbox_inches='tight')
fig3.clf()
def plot_lowe_spectra_isotopes(nu_spec,
output_path_prefix="plots/",
Z_arrs=[[32]], A_arrs=[[72.64]], weights=[[1]],
isotope_names=['Ge'],
site_title="Commerical Reactor",
enu_low=1.8e6,
lt18=False, u238n=False,
plot_total=False,
plot_low=True,
plot_high=False,
plot_back=True):
t_arr = np.logspace(0, 3, num=100)
fig3 = plt.figure()
fig3.patch.set_facecolor('white')
high_colors = ["#a6cee3", "#b2df8a", "#fb9a99", "#fdbf6f", "#cab2d6"]
low_colors = ["#1f78b4", "#33a02c", "#e31a1c", "#ff7f00", "#6a3d9a"]
for i in range(len(Z_arrs)):
Z_arr = np.array(Z_arrs[i])
A_arr = np.array(A_arrs[i])
weight_arr = weights[i]
name = isotope_names[i]
if(plot_total):
plt.loglog(t_arr*1.e-3,dsigmadT_cns_rate_compound(t_arr, Z_arr, A_arr-Z_arr, weight_arr, nu_spec)*1.e3,'-.', color=high_colors[i], label='%s Tot'%name,linewidth=float(i)/2.+0.5)
if(lt18):
if(plot_high):
plt.loglog(t_arr*1.e-3,dsigmadT_cns_rate_compound(t_arr, Z_arr, A_arr-Z_arr, weight_arr, nu_spec, enu_min=enu_low)*1.e3, color=high_colors[i], linestyle="--", label='%s enu>%.1f MeV'%(name, enu_low/1.e6), linewidth=float(i)/2.+0.5)
if(plot_low):
plt.loglog(t_arr*1.e-3,dsigmadT_cns_rate_compound(t_arr, Z_arr, A_arr-Z_arr, weight_arr, nu_spec, enu_max=enu_low)*1.e3, color=low_colors[i], linestyle="-", label='%s enu<%.1f MeV'%(name, enu_low/1.e6), linewidth=float(i)/2.+0.5)
if(u238n):
if(plot_high):
include_other = nu_spec.include_other
nu_spec.include_other = False
plt.loglog(t_arr*1.e-3,dsigmadT_cns_rate_compound(t_arr, Z_arr, A_arr-Z_arr, weight_arr, nu_spec)*1.e3, color=high_colors[i], linestyle="--", label='%s Fission'%name, linewidth=float(i)/2.+0.5)
nu_spec.include_other = include_other
if(plot_low):
fractions = nu_spec.get_fractions()
nu_spec.set_fractions([0., 0., 0., 0.])
plt.loglog(t_arr*1.e-3,dsigmadT_cns_rate_compound(t_arr, Z_arr, A_arr-Z_arr, weight_arr, nu_spec)*1.e3, color=low_colors[i], linestyle="-", label='%s U-238 n'%name, linewidth=float(i)/2.+0.5)
nu_spec.set_fractions(fractions)
if(plot_back):
def n_back(T_keV, tau_1, tau_2, fac_2,
scale, norm=1., n_xsec=0.081):
# Returns rate in evts/kg/day/keV
rescale = n_xsec/0.081
return 1.e-3*norm*rescale*scale*\
(np.exp(-tau_1*T_keV)+fac_2*np.exp(-tau_2*T_keV))
n_back = np.vectorize(n_back)
n_cons_int = spint.quad(n_back, 0.01, 0.9,
args=(0.081*1.e3,
0.0086*1.e3, 0.23/0.38,
1.))[0]
n_cons_scale = 1/n_cons_int
plt.loglog(t_arr*1.e-3, n_back(t_arr*1.e-3,
0.081*1.e3,
0.0086*1.e3, 0.23/0.38,
n_cons_scale,
10.*1.e-3)*1.e3,
color="lightgrey", linestyle=':',
linewidth=2.,
label="B=10, Conservative")
n_med_int = spint.quad(n_back, 0.01, 0.9,
args=(0.004*1.e3,
0.0005*1.e3, 0.64,
1.))[0]
n_med_scale = 1./n_med_int
plt.loglog(t_arr*1.e-3, n_back(t_arr*1.e-3,
0.004*1.e3,
0.0005*1.e3, 0.64,
n_med_scale,
10.*1.e-3)*1.e3,
color="grey", linestyle=':',
linewidth=1.5,
label="B=10, Medium")
n_opt_int = spint.quad(n_back, 0.01, 0.9,
args=(0.0004*1.e3,
0.00006*1.e3, 0.64,
1.))[0]
n_opt_scale = 1./n_opt_int
plt.loglog(t_arr*1.e-3, n_back(t_arr*1.e-3,
0.0004*1.e3,
0.00006*1.e3, 0.64,
n_opt_scale,
10.*1.e-3)*1.e3,
color="k", linestyle=':',
linewidth=1.,
label="B=10, Optimistic")
plt.legend(prop={'size':9})
plt.xlabel('Recoil Energy (keV)')
plt.ylabel('Differential Event Rate (dru)')
plt.ylim(1e-1, 1.e4)
plt.axvline(x=1.e-3, color="k")
plt.axvline(x=10.e-3, color="k")
plt.axvline(x=50.e-3, color="k")
title="CEvNS Spectrum for "
if(lt18):
title += "Enu<1.8/Enu>1.8 MeV"
if(u238n):
title += " and "
if(u238n):
title += "U-238 n Capture/Fission"
plt.title(title)
filename = output_path_prefix+'lowe'
for name in isotope_names:
filename += '_'+name
if(lt18):
filename += "_lt18"
if(u238n):
filename += "_u238n"
if(plot_total):
filename += "_tot"
if(plot_low):
filename += "_low"
if(plot_high):
filename += "_high"
if(plot_back):
filename += "_back"
filename += '.png'
plt.savefig(filename)
fig3.clf()
def calc_lowe_fraction(nu_spec,
output_path_prefix="",
Z_arr=[32], A_arr=[72.64],
weights_arr=[1],
isotope_name='Ge',
site_title="Commerical Reactor"):
enu_low = 1.8e6
t_arr = np.logspace(-2, 4, num=200)
N_arr = []
for i in range(len(Z_arr)):
N_arr.append(A_arr[i]-Z_arr[i])
A_sum = 0
weight_sum = 0
for j in range(len(Z_arr)):
A_sum += (Z_arr[j]+N_arr[j])*weights_arr[j]
weight_sum += weights_arr[j]
frac = total_cns_rate_an_compound(t_arr, enu_low, Z_arr, N_arr, weights_arr, nu_spec)/\
total_cns_rate_an_compound(t_arr, 1e7, Z_arr, N_arr, weights_arr, nu_spec)
frac[np.isnan(frac)] = 0
frac[frac>1.] = 1.1 # It's a fraction, so it should be <1.0
np.savetxt("%sthresh_vs_fraction_lt_1_8_%s.txt"%
(output_path_prefix,isotope_name),
np.column_stack((t_arr, frac)),
header="# T (eV), Fraction")
flux_lt_18 = spint.quad(lambda enu: nu_spec.d_phi_d_enu_ev(enu),
0., 1.8e6)[0]
flux_tot = spint.quad(lambda enu: nu_spec.d_phi_d_enu_ev(enu),
0., 20.e6)[0]
print("Isotope: %s"%isotope_name)
print("\tFraction of flux <1.8 MeV: %.4f"%(flux_lt_18/flux_tot))
for thresh in [0.0001, 1., 10., 50.]:
frac = total_cns_rate_an_compound(thresh, enu_low, Z_arr, N_arr, weights_arr, nu_spec)/\
total_cns_rate_an_compound(thresh, 1e7, Z_arr, N_arr, weights_arr, nu_spec)
print("\tT=%.2e, Frac=%.5f"%(thresh,frac))
if __name__ == "__main__":
try:
os.mkdir('plots')
except OSError as e:
pass
# The averaged spectrum is stored in U-235
fractions = [1.0, 0.0, 0.0, 0.0]
# Chooz reactors are at 102 m and 72, each 4.25 GW
# With both on, this is equivalent to 58.82 m from one 4.25 GW reactor
power = 4250
distance = 5882 # cm
# The stored spectra are in neutrinos/MeV/s for a 4250 MW reactor
# reactor_tools will multiply by: power*200./2.602176565e-19
# We need to rescale to undo this
scale = 1./(power/200.0/1.602176565e-19)
nu_spec = NeutrinoSpectrum(distance, power, False, *fractions,
include_other=True)
nu_spec.initialize_d_r_d_enu("u235", "root",
"../../../final_spectra/sum_U_Pu_20gspt_Tengblad-TAGSnew-ENSDF2020-Qbeta5br_FERMI.screen.QED.aW.root",
"nsim_Fission_avg",
scale=scale)
nu_spec.initialize_d_r_d_enu("u238", "zero")
nu_spec.initialize_d_r_d_enu("pu239", "zero")
nu_spec.initialize_d_r_d_enu("pu241", "zero")
nu_spec.initialize_d_r_d_enu("other", "root",
"../../../final_spectra/sum_U_Pu_20gspt_Tengblad-TAGSnew-ENSDF2020-Qbeta5br_FERMI.screen.QED.aW.root",
"nsim_U239_Np239_Pu239_avg",
scale=scale)
# Kopeikin spectra
nu_spec_kopeikin = NeutrinoSpectrum(nu_spec.distance, nu_spec.power, False,
*fractions)
nu_spec_kopeikin.initialize_d_r_d_enu("u235", "txt",
"../../data/kopeikin_spectrum.txt")
nu_spec_kopeikin.initialize_d_r_d_enu("u238", "zero")
nu_spec_kopeikin.initialize_d_r_d_enu("pu239", "zero")
nu_spec_kopeikin.initialize_d_r_d_enu("pu241", "zero")
nu_spec_kopeikin.initialize_d_r_d_enu("other", "zero")
# Mueller spectra
nu_spec_mueller = NeutrinoSpectrum(nu_spec.distance, nu_spec.power, True,
0.564, 0.076, 0.304, 0.056) # Daya Bay Numbers (10.1103/PhysRevD.100.052004)
nu_spec_mueller.initialize_d_r_d_enu("u235", "txt",
"../../data/huber/U235-anti-neutrino-flux-250keV.dat")
nu_spec_mueller.initialize_d_r_d_enu("u238", "mueller")
nu_spec_mueller.initialize_d_r_d_enu("pu239", "txt",
"../../data/huber/Pu239-anti-neutrino-flux-250keV.dat")
nu_spec_mueller.initialize_d_r_d_enu("pu241", "txt",
"../../data/huber/Pu241-anti-neutrino-flux-250keV.dat")
nu_spec_mueller.initialize_d_r_d_enu("other", "mueller")
# Store flux to file, for use by statistical code
store_reactor_flux_kev(nu_spec, "flux_commercial_reactor_all.txt")
store_reactor_flux_kev(nu_spec,
"flux_commercial_reactor_lt1800.txt",
0., 1800.)
store_reactor_flux_kev(nu_spec,
"flux_commercial_reactor_gt1800.txt",
1800., 1.e4)
nu_spec.include_other = False
nu_spec.set_fractions(fractions)
store_reactor_flux_kev(nu_spec, "flux_commercial_reactor_fission.txt")
store_reactor_flux_kev(nu_spec, "flux_commercial_reactor_fission_lt1800.txt",
0., 1800.)
store_reactor_flux_kev(nu_spec, "flux_commercial_reactor_fission_gt1800.txt",
1800., 1.e4)
nu_spec.include_other = True
nu_spec.set_fractions([0., 0., 0., 0.])
store_reactor_flux_kev(nu_spec, "flux_commercial_reactor_u238n.txt")
store_reactor_flux_kev(nu_spec, "flux_commercial_reactor_u238n_lt1800.txt",
0., 1800.)
store_reactor_flux_kev(nu_spec, "flux_commercial_reactor_u238n_gt1800.txt",
1800., 1.e4)
nu_spec.set_fractions(fractions)
# Plot neutrino spectrum and CEvNS Rates
plot_neutrino_spectrum_comparison(nu_spec, nu_spec_kopeikin, num_points=1000)
plot_dsigmadT_cns_rate(nu_spec, num_points=100)
plot_total_cns_rate(nu_spec, num_points=100)
# Plot flux spectrum CEvNS xsec, and product
plot_flux_xsec(nu_spec)
print_cevns_xsec(nu_spec)
print("IBD Yield Summation: %.3e [cm^2/fission]"%ibd_yield(nu_spec, per_gram=False))
print("IBD Yield Mueller: %.3e [cm^2/fission]"%ibd_yield(nu_spec_mueller, per_gram=False))
# Compare fission and n capture neutrino spectra
plot_neutrino_spectrum_other(nu_spec, num_points=1000)
plot_lowe_spectra(nu_spec, "plots/",
Z=32, A=74, isotope_name='Ge-74',
lt18=True, neutron_levels=False)
plot_lowe_spectra(nu_spec, "plots/",
Z=32, A=74, isotope_name='Ge-74',
u238n=True, neutron_levels=False)
labels = ["CaWO4",
"Ge", "Zn", "Si",
"Al2O3"]
Z_arrs = list()
A_arrs = list()
weight_arrs = list()
for i in range(len(labels)):
(Z_arr, N_arr, atom_arr) = get_atomic_arrs(labels[i])
Z_arrs.append(Z_arr)
A_arr = np.array(Z_arr)+np.array(N_arr)
A_arrs.append(A_arr)
weight_arrs.append(atom_arr)
plot_lowe_spectra_isotopes(nu_spec, "plots/",
Z_arrs=Z_arrs,
A_arrs=A_arrs,
weights=weight_arrs,
isotope_names=labels,
lt18=True)
plot_lowe_spectra_isotopes(nu_spec, "plots/",
Z_arrs=Z_arrs,
A_arrs=A_arrs,
weights=weight_arrs,
isotope_names=labels,
lt18=True, plot_high=True,
plot_back=False)
plot_lowe_spectra_isotopes(nu_spec, "plots/",
Z_arrs=Z_arrs,
A_arrs=A_arrs,
weights=weight_arrs,
isotope_names=labels,
u238n=True)
plot_lowe_spectra_isotopes(nu_spec, "plots/",
Z_arrs=Z_arrs,
A_arrs=A_arrs,
weights=weight_arrs,
isotope_names=labels,
u238n=True, plot_high=True,
plot_back=False)
# Store fraction of neutrinos below 1.8 MeV for various threshold
try:
os.mkdir("fractions")
except OSError:
pass
labels = ["CaWO4",
"Ge", "Zn", "Si",
"Al2O3"]
for i in range(len(labels)):
(Z_arr, N_arr, atom_arr) = get_atomic_arrs(labels[i])
A_arr = np.array(Z_arr)+np.array(N_arr)
calc_lowe_fraction(nu_spec, "fractions/",
Z_arr=Z_arr, A_arr=A_arr,
weights_arr=atom_arr,
isotope_name=labels[i])
```
#### File: results/precision_plot/make_precision_plot.py
```python
import numpy as np
import matplotlib.pyplot as plt
import os
from scipy.interpolate import make_interp_spline, BSpline
def comparison_plots(shape="medium", bkgd="1.0e+02", label="medium"):
# PAPER PLOT- Medium case, 5% signal uncertainty and 0% uncertainty
fig = plt.figure()
fig.patch.set_facecolor('white')
data_var = np.loadtxt("lowe_nu_commercial_allLowE_precision_varying/ge_bkgd_"+shape+"_"+bkgd+"_thresh_1.0e-02/precision_ge.txt")
# Exposure in kg*years
x_var = data_var[:,0]*data_var[:,1]/365.
# Precision in pct
y_var = data_var[:,4]*100.
spl_var = make_interp_spline(np.log10(x_var), y_var, k=3)
x_var_new = np.logspace(np.log10(x_var[0]), np.log10(x_var[-1]))
y_var_new = spl_var(np.log10(x_var_new))
plt.semilogx(x_var_new, y_var_new, 'r-', label="5% Uncertainty, One Shape")
data_fix = np.loadtxt("lowe_nu_commercial_allLowE_precision_fixed/ge_bkgd_"+shape+"_"+bkgd+"_thresh_1.0e-02/precision_ge.txt")
# Exposure in kg*years
x_fix = data_fix[:,0]*data_fix[:,1]/365.
# Precision in pct
y_fix = data_fix[:,4]*100.
spl_fix = make_interp_spline(np.log10(x_fix), y_fix, k=3)
x_fix_new = np.logspace(np.log10(x_fix[0]), np.log10(x_fix[-1]))
y_fix_new = spl_fix(np.log10(x_fix_new))
plt.semilogx(x_fix_new, y_fix_new, 'b-.', label="No Uncertainty, One Shape")
plt.axhline(y=1., color='k', linestyle=":")
plt.axhline(y=5., color='k', linestyle=":")
plt.legend(prop={'size':11})
plt.gca().set_xlim(1.e-1, 100.)
plt.gca().set_ylim(0., 20.)
plt.xlabel("Exposure (kg*years)")
plt.ylabel("Precision (%)")
plt.title("Precision vs Exposure, %s Shape, Bkgd %s"%(shape, bkgd))
plt.savefig('plots/lowenu_comm_precision_'+label+'.png')
data_var = np.loadtxt("lowe_nu_commercial_allLowE_precision_summed_shapes_varying/ge_bkgd_"+shape+"_"+bkgd+"_thresh_1.0e-02/precision_ge.txt")
# Exposure in kg*years
x_var = data_var[:,0]*data_var[:,1]/365.
# Precision in pct
y_var = data_var[:,4]*100.
spl_var = make_interp_spline(np.log10(x_var), y_var, k=3)
x_var_new = np.logspace(np.log10(x_var[0]), np.log10(x_var[-1]))
y_var_new = spl_var(np.log10(x_var_new))
plt.semilogx(x_var_new, y_var_new, 'k--', label="5% Uncertainty, Summed Shapes")
data_fix = np.loadtxt("lowe_nu_commercial_allLowE_precision_summed_shapes_fixed/ge_bkgd_"+shape+"_"+bkgd+"_thresh_1.0e-02/precision_ge.txt")
# Exposure in kg*years
x_fix = data_fix[:,0]*data_fix[:,1]/365.
# Precision in pct
y_fix = data_fix[:,4]*100.
spl_fix = make_interp_spline(np.log10(x_fix), y_fix, k=3)
x_fix_new = np.logspace(np.log10(x_fix[0]), np.log10(x_fix[-1]))
y_fix_new = spl_fix(np.log10(x_fix_new))
plt.semilogx(x_fix_new, y_fix_new, 'c:', label="No Uncertainty, Summed Shapes")
plt.legend(prop={'size':11})
plt.savefig('plots/lowenu_comm_precision_summed_shapes_'+label+'.png')
def debug_plots(dir_prefix="lowe_nu_commercial_allLowE_precision_",
label="one_shape"):
# DEBUG PLOT: Varying low e envelope
fig = plt.figure()
fig.patch.set_facecolor('white')
data_var = np.loadtxt(dir_prefix+"varying/ge_bkgd_conservative_1.0e+03_thresh_1.0e-02/precision_ge.txt")
# Exposure in kg*years
x_var = data_var[:,0]*data_var[:,1]/365.
# Precision in pct
y_var = data_var[:,4]*100.
plt.semilogx(x_var, y_var, linestyle='-', label="B=1000, Conservative")
data_var = np.loadtxt(dir_prefix+"varying/ge_bkgd_medium_1.0e+03_thresh_1.0e-02/precision_ge.txt")
# Exposure in kg*years
x_var = data_var[:,0]*data_var[:,1]/365.
# Precision in pct
y_var = data_var[:,4]*100.
plt.semilogx(x_var, y_var, linestyle='-', label="B=1000, Medium")
data_var = np.loadtxt(dir_prefix+"varying/ge_bkgd_optimistic_1.0e+03_thresh_1.0e-02/precision_ge.txt")
# Exposure in kg*years
x_var = data_var[:,0]*data_var[:,1]/365.
# Precision in pct
y_var = data_var[:,4]*100.
plt.semilogx(x_var, y_var, linestyle='-', label="B=1000, Optimistic")
data_var = np.loadtxt(dir_prefix+"varying/ge_bkgd_conservative_1.0e+02_thresh_1.0e-02/precision_ge.txt")
# Exposure in kg*years
x_var = data_var[:,0]*data_var[:,1]/365.
# Precision in pct
y_var = data_var[:,4]*100.
plt.semilogx(x_var, y_var, linestyle='--', label="B=100, Conservative")
data_var = np.loadtxt(dir_prefix+"varying/ge_bkgd_medium_1.0e+02_thresh_1.0e-02/precision_ge.txt")
# Exposure in kg*years
x_var = data_var[:,0]*data_var[:,1]/365.
# Precision in pct
y_var = data_var[:,4]*100.
plt.semilogx(x_var, y_var, linestyle='--', label="B=100, Medium")
data_var = np.loadtxt(dir_prefix+"varying/ge_bkgd_optimistic_1.0e+02_thresh_1.0e-02/precision_ge.txt")
# Exposure in kg*years
x_var = data_var[:,0]*data_var[:,1]/365.
# Precision in pct
y_var = data_var[:,4]*100.
plt.semilogx(x_var, y_var, linestyle='--', label="B=100, Optimistic")
data_var = np.loadtxt(dir_prefix+"varying/ge_bkgd_conservative_1.0e+01_thresh_1.0e-02/precision_ge.txt")
# Exposure in kg*years
x_var = data_var[:,0]*data_var[:,1]/365.
# Precision in pct
y_var = data_var[:,4]*100.
plt.semilogx(x_var, y_var, linestyle='--', label="B=10, Conservative")
data_var = np.loadtxt(dir_prefix+"varying/ge_bkgd_medium_1.0e+01_thresh_1.0e-02/precision_ge.txt")
# Exposure in kg*years
x_var = data_var[:,0]*data_var[:,1]/365.
# Precision in pct
y_var = data_var[:,4]*100.
plt.semilogx(x_var, y_var, linestyle='--', label="B=10, Medium")
data_var = np.loadtxt(dir_prefix+"varying/ge_bkgd_optimistic_1.0e+01_thresh_1.0e-02/precision_ge.txt")
# Exposure in kg*years
x_var = data_var[:,0]*data_var[:,1]/365.
# Precision in pct
y_var = data_var[:,4]*100.
plt.semilogx(x_var, y_var, linestyle='--', label="B=10, Optimistic")
plt.axhline(y=1., color='k', linestyle=":")
plt.axhline(y=5., color='k', linestyle=":")
plt.legend(prop={'size':11})
plt.gca().set_xlim(1.e-1, 100.)
plt.gca().set_ylim(0., 20.)
plt.xlabel("Exposure (kg*years)")
plt.ylabel("Precision (%)")
plt.title("Precision with 5% Uncertainty on Enu<1.8 MeV")
plt.savefig('plots/lowenu_comm_precision_debug_5'+label+'.png')
# DEBUG PLOT: Fixed low e envelope
fig = plt.figure()
fig.patch.set_facecolor('white')
data_fix = np.loadtxt(dir_prefix+"fixed/ge_bkgd_conservative_1.0e+03_thresh_1.0e-02/precision_ge.txt")
# Exposure in kg*years
x_fix = data_fix[:,0]*data_fix[:,1]/365.
# Precision in pct
y_fix = data_fix[:,4]*100.
plt.semilogx(x_fix, y_fix, linestyle='-', label="B=1000, Conservative")
data_fix = np.loadtxt(dir_prefix+"fixed/ge_bkgd_medium_1.0e+03_thresh_1.0e-02/precision_ge.txt")
# Exposure in kg*years
x_fix = data_fix[:,0]*data_fix[:,1]/365.
# Precision in pct
y_fix = data_fix[:,4]*100.
plt.semilogx(x_fix, y_fix, linestyle='-', label="B=1000, Medium")
data_fix = np.loadtxt(dir_prefix+"fixed/ge_bkgd_optimistic_1.0e+03_thresh_1.0e-02/precision_ge.txt")
# Exposure in kg*years
x_fix = data_fix[:,0]*data_fix[:,1]/365.
# Precision in pct
y_fix = data_fix[:,4]*100.
plt.semilogx(x_fix, y_fix, linestyle='-', label="B=1000, Optimistic")
data_fix = np.loadtxt(dir_prefix+"fixed/ge_bkgd_conservative_1.0e+02_thresh_1.0e-02/precision_ge.txt")
# Exposure in kg*years
x_fix = data_fix[:,0]*data_fix[:,1]/365.
# Precision in pct
y_fix = data_fix[:,4]*100.
plt.semilogx(x_fix, y_fix, linestyle='--', label="B=100, Conservative")
data_fix = np.loadtxt(dir_prefix+"fixed/ge_bkgd_medium_1.0e+02_thresh_1.0e-02/precision_ge.txt")
# Exposure in kg*years
x_fix = data_fix[:,0]*data_fix[:,1]/365.
# Precision in pct
y_fix = data_fix[:,4]*100.
plt.semilogx(x_fix, y_fix, linestyle='--', label="B=100, Medium")
data_fix = np.loadtxt(dir_prefix+"fixed/ge_bkgd_optimistic_1.0e+02_thresh_1.0e-02/precision_ge.txt")
# Exposure in kg*years
x_fix = data_fix[:,0]*data_fix[:,1]/365.
# Precision in pct
y_fix = data_fix[:,4]*100.
plt.semilogx(x_fix, y_fix, linestyle='--', label="B=100, Optimistic")
data_fix = np.loadtxt(dir_prefix+"fixed/ge_bkgd_conservative_1.0e+01_thresh_1.0e-02/precision_ge.txt")
# Exposure in kg*years
x_fix = data_fix[:,0]*data_fix[:,1]/365.
# Precision in pct
y_fix = data_fix[:,4]*100.
plt.semilogx(x_fix, y_fix, linestyle=':', label="B=10, Conservative")
data_fix = np.loadtxt(dir_prefix+"fixed/ge_bkgd_medium_1.0e+01_thresh_1.0e-02/precision_ge.txt")
# Exposure in kg*years
x_fix = data_fix[:,0]*data_fix[:,1]/365.
# Precision in pct
y_fix = data_fix[:,4]*100.
plt.semilogx(x_fix, y_fix, linestyle=':', label="B=10, Medium")
data_fix = np.loadtxt(dir_prefix+"fixed/ge_bkgd_optimistic_1.0e+01_thresh_1.0e-02/precision_ge.txt")
# Exposure in kg*years
x_fix = data_fix[:,0]*data_fix[:,1]/365.
# Precision in pct
y_fix = data_fix[:,4]*100.
plt.semilogx(x_fix, y_fix, linestyle=':', label="B=10, Optimistic")
plt.axhline(y=1., color='k', linestyle=":")
plt.axhline(y=5., color='k', linestyle=":")
plt.legend(prop={'size':11})
plt.gca().set_xlim(1.e-1, 100.)
plt.gca().set_ylim(0., 20.)
plt.xlabel("Exposure (kg*years)")
plt.ylabel("Precision (%)")
plt.title("Precision with No Uncertainty on Enu<1.8 MeV")
plt.savefig('plots/lowenu_comm_precision_debug_0'+label+'.png')
if __name__ == "__main__":
try:
os.mkdir('plots')
except OSError as e:
pass
comparison_plots("optimistic", "1.0e+01", "optimistic")
comparison_plots("medium", "1.0e+02", "medium")
comparison_plots()
debug_plots()
debug_plots("lowe_nu_commercial_allLowE_precision_summed_shapes_",
"summed_shapes")
``` |
{
"source": "joe-jordan/minimalisp",
"score": 3
} |
#### File: minimalisp/minimalisp/parse.py
```python
import re
class sexpr(list):
def __init__(self, *args, **kwargs):
self.quoted = False
super(sexpr, self).__init__(*args, **kwargs)
def __repr__(self):
outer = super(sexpr, self).__repr__()
if self.quoted:
return "'" + outer
return outer
def remove_comments(inp):
commentless_lines = []
# kill comment characters ; onwards, when not inside a string.
for l in inp.split("\n"):
splits = l.split(';')
if len(splits) == 1:
commentless_lines.append(l)
continue
previous_string = splits.pop(0)
while len(re.findall(r'(?<!\\)"', previous_string)) % 2 != 0:
previous_string += ';' + splits.pop()
commentless_lines.append(previous_string)
return "\n".join(commentless_lines)
def clever_split(inp):
commentless_inp = remove_comments(inp)
# rather than splitting on all whitespace, we want to split only on
# whitespace not inside a string.
# strings cannot contain newlines, so start by splitting on those:
lines = commentless_inp.split("\n")
tokens = []
for i, l in enumerate(lines):
splits = [s for s in l.split('"')]
# if there were no "s in this line, split the whole thing by whitespace:
if len(splits) == 1:
tokens.extend(l.split())
continue
# first " can't be escaped.
tokens.extend(splits.pop(0).split())
while splits:
this_string = splits.pop(0)
# while the quotes are escaped:
while this_string[-1] == "\\":
assert len(splits) > 0, "string on line %d is not terminated correctly." % i
this_string = this_string[:-1] + '\\"' + splits.pop(0)
# we've got to the end of this string, add it as its own token:
tokens.append('"' + this_string + '"')
# the next tokens are not strings, so just extend them (if any):
if splits:
tokens.extend(splits.pop(0).split())
return tokens
def parse(inp):
toks = clever_split(inp)
parents = {}
output_list = sexpr()
current_list = output_list
while True:
try:
tok = toks.pop(0)
except IndexError:
break
while tok and tok[0] in "'(" and (tok[0] == '(' or tok[:2] == "'("):
current_list.append(sexpr())
parents[id(current_list[-1])] = current_list
current_list = current_list[-1]
if tok[0] == '(':
tok = tok[1:]
else:
tok = tok[2:]
current_list.quoted = True
end_here = 0
while tok and tok[-1] == ')':
end_here += 1
tok = tok[:-1]
if tok:
current_list.append(tok)
while end_here:
end_here -= 1
current_list = parents[id(current_list)]
assert id(current_list) == id(output_list), "s expressions not closed"
return output_list
PAIR_SEPARATOR = '.'
class PAIR_LITERALS(object):
def __repr__(self):
return '.'
PAIR_LITERAL = PAIR_LITERALS()
NUMERIC = "0123456789"
BEGIN_HEXANUMERIC = "#"
HEXANUMERIC = NUMERIC + "aAbBcCdDeEfF"
STRINGY = '"'
DUBIOUS = "+-"
ALLOWED_IN_NUMERIC = NUMERIC + DUBIOUS + "eE."
from values import Value, Symbol, NIL, Pair, LispType
def parse_token(t):
char1 = t[0]
# shift the decision onto the second char.
if char1 in DUBIOUS:
# check ahead to see what type we're dealing with.
if len(t) == 1:
# this is a single + or - as a symbol. do nothing.
pass
elif t[1] in NUMERIC:
# if numeric, just set char1 so that the next section categorises properly.
char1 = t[1]
if char1 in NUMERIC:
assert all([c in ALLOWED_IN_NUMERIC for c in t]), "token %s contains invalid characters to be a numeric literal." % t
v = Value(t)
elif char1 in STRINGY:
v = Value(t)
elif char1 in BEGIN_HEXANUMERIC:
assert all([c in HEXANUMERIC for c in t[1:]]), "token %s contains invalid characters for a hexadecimal literal." % t
t = t.replace('#', '0x').upper()
v = Value(t)
elif char1 in PAIR_SEPARATOR and len(t) == 1:
v = PAIR_LITERAL
else:
if t.upper() == "NIL":
v = NIL()
elif t[0] in "'":
v = Symbol(t[1:], True)
else:
v = Symbol(t)
return v
def parse_token_prompt(t):
v = parse_token(t)
if v == PAIR_LITERAL:
return Value('"."')
elif isinstance(v, Symbol):
return Value('"%s"' % t)
return v
def parse_tokens(s):
for i, t in enumerate(s):
if type(t) == sexpr:
parse_tokens(t)
continue
s[i] = parse_token(t)
if s[i] == PAIR_LITERAL:
assert i == 1 and len(s) == 3, "incorrect context for a pair literal, %s." % repr(s)
def do_pair_literals(s):
for i, t in enumerate(s):
if type(t) == sexpr:
do_pair_literals(t)
if len(t) == 3 and t[1] == PAIR_LITERAL:
s[i] = Pair(t[0], t[2], t.quoted)
def sexprs_to_pairs(s):
if isinstance(s, LispType):
# value literal or pair literal at top level in source, not a problem.
return s
for i, t in enumerate(s):
if type(t) == sexpr:
s[i] = sexprs_to_pairs(t)
elif type(t) == Pair:
# was specified as a pair literal in the source, may contain s-expressions inside.
if type(t.left) == sexpr:
t.left = sexprs_to_pairs(t.left)
if type(t.right) == sexpr:
t.right = sexprs_to_pairs(t.right)
assert all([type(t) is not sexpr for t in s]), "programming error in parser."
return Pair.pair_list_from_sexpr(s, s.quoted)
def parse_program(inp):
prog = parse(inp)
# now recursively walk over all s-expressions, building tokens into Values, Symbols, and Pair Separator placeholders.
parse_tokens(prog)
# identify any pair literals and instantiate those first:
do_pair_literals(prog)
# finally, build actual Pairs for all the proper S-expressions:
prog = [sexprs_to_pairs(s) for s in prog]
return prog
```
#### File: minimalisp/minimalisp/values.py
```python
from __future__ import print_function
def crepr(s):
assert type(s) is str, "crepr is for strings, i.e. str() instances."
prepr = repr(s)
if prepr[:3] == '"""':
# anything goes, need to manually escape everything.
raise NotImplementedError("not implemented for this repr type.")
elif prepr[0] == '"':
return prepr
else:
assert prepr[0] == "'", "unknown repr output from python."
return '"' + prepr[1:-1].replace("\\'", "'").replace('"', '\\"') + '"'
def lrepr(s):
if type(s) is str:
return crepr(s)
return repr(s)
class LispType(object):
pass
class Symbol(LispType):
"""only stores its text representation as a python string in upper case
- can therefore be used as a dict key."""
def __init__(self, s, quoted=False):
self.s = s.upper()
self.quoted = quoted
def __eq__(self, other):
if type(other) is not Symbol:
return False
return self.s == other.s
def __ne__(self, other):
return not self == other
def __repr__(self):
if self.quoted:
return "'" + self.s
return self.s
def __hash__(self):
return hash(self.s)
class LispValue(LispType):
pass
class NIL(LispValue):
def __eq__(self, other):
return isinstance(other, NIL)
def __ne__(self, other):
return not self == other
def __repr__(self):
return 'NIL'
class Value(LispValue):
"""stored simply as the relevant python type (string, int or float)."""
def __init__(self, v, actual=False):
if actual:
self.v = v
else:
self.v = eval(v)
def __eq__(self, other):
if not isinstance(other, Value):
return False
return self.v == other.v
def __ne__(self, other):
return not self == other
def __repr__(self):
if isinstance(self.v, (str, unicode)):
return self.v
else:
return repr(self.v)
class Pair(LispType):
def __init__(self, left=None, right=None, quoted=False):
self.left = left
self.right = right
self.quoted = quoted
def __eq__(self, other):
if not isinstance(other, Pair):
return False
# This is recursive for large pair structures, and should test equality of all
# leaf values.
# We change the order to avoid making depth-first the enemy of speed (one side may be a
# single different value, the other may be a large, equal linked list.)
left_is_pair = isinstance(self.left, Pair) and isinstance(other.left, Pair)
right_is_pair = isinstance(self.right, Pair) and isinstance(other.right, Pair)
if left_is_pair and not right_is_pair:
return (self.right == other.right and self.left == other.left)
return (self.left == other.left and self.right == other.right)
def __ne__(self, other):
return not self == other
@classmethod
def pair_list_from_sexpr(cls, s, outermost_quoted = False):
right = NIL()
for v in reversed(s):
right = Pair(v, right)
if outermost_quoted:
right.quoted = True
return right
def __repr__(self):
q = ""
if self.quoted:
q = "'"
return "%s(%s . %s)" % (q, self.left, self.right)
```
#### File: minimalisp/minimalisp/vm.py
```python
from __future__ import print_function, division
from values import NIL, LispType, LispValue, Symbol, Value, Pair
from parse import parse_token_prompt, parse_program
from operator import mul
import random
random.seed()
import os.path
# overwritten in the executible
PERMISSIVE = False
class LispRuntimeError(BaseException):
pass
class UnboundSymbolError(LispRuntimeError):
pass
def sexpr_from_iterator(it):
pair = NIL()
for i in reversed(it):
pair = Pair(i, pair)
return pair
class Context(dict):
"""stack-like dictionary."""
def __init__(self, *args, **kwargs):
# default arg that can only be specified by keyword. Python 3 fixes this problem.
self.parent = kwargs.pop('parent', None)
self.env = kwargs.pop('environment', None)
if self.env is None:
try:
self.env = self.parent.env
except AttributeError:
raise LispRuntimeError('context constructor passed neither parent nor environment.')
super(Context, self).__init__(*args, **kwargs)
def __getitem__(self, key):
try:
return super(Context, self).__getitem__(key)
except KeyError:
if self.parent is None:
if PERMISSIVE:
return NIL()
raise UnboundSymbolError("symbol %r was used unbound." % key)
return self.parent[key]
def __contains__(self, key):
ret = super(Context, self).__contains__(key)
if ret is False and self.parent is not None:
ret = key in self.parent
return ret
# evaluate - should be a Symbol, Value or a Pair.
def peval(context, o):
if not isinstance(o, LispType):
raise LispRuntimeError("peval was passed %r, which is not a LispType instance." % o)
# Value's and NIL
if isinstance(o, LispValue):
return o
# if object is quoted, un-quote it:
if isinstance(o, Pair) and o.quoted:
return Pair(o.left, o.right)
if isinstance(o, Symbol) and o.quoted:
return Symbol(o.s)
# if object is a bound symbol, substitute its value:
if isinstance(o, Symbol):
return context[o]
# if o is a function:
if hasattr(o, '__call__'):
raise LispRuntimeError("cannot evaluate a function")
# in other cases, o must be a pair.
if not isinstance(o, Pair):
raise LispRuntimeError("cannot evaluate %s", o)
pair = o
# in which case, if we have been asked to run a function!
if isinstance(pair.left, Symbol):
try:
function = context[pair.left]
except KeyError:
raise LispRuntimeError("unbound symbol %r" % pair.left)
if not hasattr(function, '__call__'):
raise LispRuntimeError("symbol %r is not bound to a function, but %r" % (pair.left, function))
elif isinstance(pair.left, Pair):
# pair.left is a pair. it is important to eval it here - this is the one context in which
# it won't be evaled by pre_execute_impl, which only acts on arguments - and check we get a
# function, rather than dying here.
function = peval(context, pair.left)
if not hasattr(function, '__call__'):
raise LispRuntimeError("result %r cannot be executed as a function" % function)
elif hasattr(pair.left, '__call__'):
# someone has got a function object in the right place for us. Go them!
function = pair.left
else:
# pair.left is a Value, or something.
raise LispRuntimeError("result %r cannot be executed as a function" % pair.left)
return function(context, pair.right)
def pre_execute_impl(context, arguments):
"""This function is run before ANY user or library function, and evaluates the arguments to
be passed in. This step cannot be skipped, by anyone."""
pair = arguments
evaled_args = []
while not isinstance(pair, NIL):
evaled_args.append(peval(context, pair.left))
pair = pair.right
return evaled_args
def pre_execute(method="", minc=0, maxc=float('inf')):
def inner_decorator(execute):
def actual_execute(context, arguments):
evaled_arguments = pre_execute_impl(context, arguments)
count = len(evaled_arguments)
if not PERMISSIVE and (count < minc or count > maxc):
raise LispRuntimeError("%s: incorrect number of arguments. accepts %r-%r, recieved %r." % (
method, minc, maxc, count))
return execute(*([context] + evaled_arguments))
return actual_execute
return inner_decorator
def instance_pre_execute(method=""):
def inner_decorator(execute):
def actual_execute(self, context, arguments):
evaled_arguments = pre_execute_impl(context, arguments)
count = len(evaled_arguments)
if not PERMISSIVE and (count < self.minc or count > self.maxc):
raise LispRuntimeError("%s: incorrect number of arguments. accepts %r-%r, recieved %r." % (
method, self.minc, self.maxc, count))
return execute(self, *([context] + evaled_arguments))
return actual_execute
return inner_decorator
# numeric functions use the static_validate_value_type decorator:
numbers = (int, long, float)
integers = (int, long)
floats = (float,)
strings = (str, unicode)
values = numbers + strings
def static_validate_value_type(method="", types=(object,)):
def inner_decorator(execute):
def actual_validate(context, *terms):
for t in terms:
if not isinstance(t, Value):
raise ValueError("%s: cannot compute with non-value %r" % (method, t))
if not isinstance(t.v, types):
raise ValueError("%s: expected %r, found %r" % (method, types, t))
# *args arrives as a tuple, not a list.
return execute(*([context] + list(terms)))
return actual_validate
return inner_decorator
@pre_execute("BIND", 2, 2)
def bind(context, symbol=None, value=NIL(), *args):
if not isinstance(symbol, Symbol):
if not PERMISSIVE:
raise LispRuntimeError('cannot BIND value %r to non-symbol %r' % (value, symbol))
else:
context[symbol] = value
return NIL()
def noop(*args):
pass
@pre_execute("WITH", 2)
def _with(context, arg_bindings=NIL(), *lines_of_function_body):
# unwind with's arguments; two pairs.
args_as_list = False
if not (isinstance(arg_bindings, Pair) or isinstance(arg_bindings, Symbol)):
if not PERMISSIVE:
raise LispRuntimeError('WITH: %r is not an argument list.' % arg_bindings)
else:
if isinstance(arg_bindings, Symbol):
args_as_list = True
if not lines_of_function_body:
if PERMISSIVE:
return noop
else:
raise LispRuntimeError('WITH: cannot define an empty function.')
# actually build the LispFunction object:
return UserLispFunction(arg_bindings, lines_of_function_body, context.env, args_as_list=args_as_list)
@pre_execute("EVAL", 1)
def _eval(context, *lines):
retval = NIL()
for l in lines:
retval = peval(context, l)
return retval
import_cache = {}
def eval_library(context, canonical_module_name, program):
fn = UserLispFunction(NIL(), program, canonical_module_name)
fn(Context(default_context_bindings(), environment=canonical_module_name), NIL())
import_cache[canonical_module_name] = fn.last_execute_context
def internal_import(context, canonical_module_name, program):
"""For importing names from within the minimalisp implementation"""
if canonical_module_name not in import_cache:
eval_library(context, canonical_module_name, program)
context.update(import_cache[canonical_module_name])
@pre_execute("IMPORT", 1)
@static_validate_value_type('IMPORT', strings)
def _import(context, source_file):
global import_cache
canonical_module_name = os.path.abspath(source_file.v)
if canonical_module_name not in import_cache:
try:
program = parse_program(open(canonical_module_name, 'r').read())
except IOError:
raise LispRuntimeError('IMPORT: invalid file to load "%s"' % canonical_module_name)
eval_library(context, canonical_module_name, program)
context.update(import_cache[canonical_module_name])
return NIL()
@pre_execute("PUTS")
def puts(context, *values):
if not PERMISSIVE and any([
not isinstance(v, (Value, Pair, Symbol, NIL)) and not hasattr(v, '__call__') for v in values
]):
raise LispRuntimeError("expected lisp objects, got %s" % repr(values))
print("".join([repr(value) for value in values]))
return NIL()
@pre_execute("GETS", 0)
def gets(context, *symbols_to_bind):
# if called with no arguments, returns a single gets.
if len(symbols_to_bind) == 0:
return parse_token_prompt(raw_input(">"))
# with arguments, binds N gets' to them.
for s in symbols_to_bind:
if not isinstance(s, Symbol):
if not PERMISSIVE:
raise LispRuntimeError("GETS: cannot bind to non-symbol %r." % s)
else:
context[s] = parse_token_prompt(raw_input("%s>" % repr(s)))
return NIL()
@pre_execute("CONS", 2, 2)
def cons(context, left=NIL(), right=NIL(), *args):
return Pair(left, right)
@pre_execute("CAR", 1, 1)
def car(context, pair=NIL(), *args):
if not isinstance(pair, Pair):
if PERMISSIVE:
return pair
raise LispRuntimeError('CAR: %r is not a pair.' % pair)
return pair.left
@pre_execute("CDR", 1, 1)
def cdr(context, pair=NIL(), *args):
if not isinstance(pair, Pair):
if PERMISSIVE:
return pair
raise LispRuntimeError('CDR: %r is not a pair.' % pair)
return pair.right
@pre_execute("+")
@static_validate_value_type("+", numbers)
def plus(context, *terms):
return Value(sum([i.v for i in terms]), actual=True)
@pre_execute("-", 1)
@static_validate_value_type("-", numbers)
def minus(context, *terms):
return Value(terms[0].v - sum([i.v for i in terms[1:]]), actual=True)
@pre_execute("*")
@static_validate_value_type("*", numbers)
def multiply(context, *terms):
return Value(reduce(mul, [i.v for i in terms], 1), actual=True)
@pre_execute("/", 1)
@static_validate_value_type("/", numbers)
def divide(context, *terms):
# We use python 3's "true division", which gives floats for two int arguments.
return Value(terms[0].v / reduce(mul, [i.v for i in terms[1:]], 1), actual=True)
@pre_execute("i/", 1)
@static_validate_value_type("i/", integers)
def idivide(context, *terms):
return Value(terms[0].v // reduce(mul, [i.v for i in terms[1:]], 1), actual=True)
@pre_execute("%", 2)
@static_validate_value_type("%", integers)
def modulo(context, *terms):
return Value(terms[0].v % reduce(mul, [i.v for i in terms[1:]], 1), actual=True)
@pre_execute("ROUND", 1, 1)
@static_validate_value_type("ROUND", floats)
def _round(context, f):
return Value(int(round(f.v)), actual=True)
@pre_execute("JOIN")
@static_validate_value_type("JOIN", strings)
def concatinate(context, *terms):
return Value("".join([t.v for t in terms]), actual=True)
@pre_execute('SPLIT', 1, 2)
@static_validate_value_type('SPLIT', strings)
def split(context, input, substring=None):
retvalue = NIL()
args = []
if substring:
args.append(substring)
for tok in reversed(input.v.split(*args)):
retvalue = Pair(Value(tok, actual=True), retvalue)
return retvalue
@pre_execute("RAND", 0, 0)
def rand(context):
return Value(random.random(), actual=True)
# Logical Functions:
# By convention we use NIL as false, as well as using 0, the empty string and unbound Symbols
# likewise. Thus, any other numeric value is true, as is a string, Pair or bound Symbol.
# We must choose a value to return from logical comparisons. The value that was compared is not
# sufficient, since this breaks (== 0 x), and so on. We also do not want to introduce another type
# (boolean) when we only want True but not False.
# So, we choose to return Value(1, actual=True). This means we can (+ test test2 test3) and see how
# many passed, among other things.
@pre_execute("IF", 2, 3)
def _if(context, test, then_do, else_do=None):
retvalue = NIL()
if (isinstance(test, Pair) or
(isinstance(test, Symbol) and test in context) or
(isinstance(test, Value) and test.v)):
retvalue = peval(context, then_do)
elif else_do:
retvalue = peval(context, else_do)
return retvalue
@pre_execute("=", 2)
def equal(context, *terms):
retvalue = Value(1, actual=True)
lvalue = terms[0]
for rvalue in terms[1:]:
if lvalue != rvalue:
retvalue = NIL()
break
return retvalue
@pre_execute("==", 2)
def identical(context, *terms):
"""This function is not very useful, I think, but can't possibly be implemented in the language
without a minimalisp version of python's `id`, which is even worse."""
retvalue = Value(1, actual=True)
lvalue = terms[0]
for rvalue in terms[1:]:
if id(lvalue) != id(rvalue):
retvalue = NIL()
break
return retvalue
@pre_execute(">", 2)
@static_validate_value_type(">", values)
def greater_than(context, *terms):
retvalue = Value(1, actual=True)
lvalue = terms[0]
for rvalue in terms[1:]:
if lvalue.v <= rvalue.v:
retvalue = NIL()
break
return retvalue
@pre_execute("<", 2)
@static_validate_value_type("<", values)
def less_than(context, *terms):
retvalue = Value(1, actual=True)
lvalue = terms[0]
for rvalue in terms[1:]:
if lvalue.v >= rvalue.v:
retvalue = NIL()
break
return retvalue
@pre_execute("DOWHILE", 1)
def dowhile(context, *body):
"""works like EVAL, except it repeats the function body again and again until its return
value is NIL or 0. Always returns NIL, but leaks bindings."""
result = NIL()
for line in body:
result = peval(context, line)
while (isinstance(result, Pair) or
(isinstance(result, Symbol) and result in context) or
(isinstance(result, Value) and result.v)):
for line in body:
result = peval(context, line)
return NIL()
class UserLispFunction(object):
def __init__(self, argbindings, functionbody, definition_env, args_as_list=False):
# both are unquoted pairs, which WITH will check for us.
self.args_as_list = args_as_list
self.argbindings = argbindings
self.functionbody = functionbody
self.minc = 0
self.env = definition_env
if args_as_list:
# a list of arguments may be arbitrarily long.
self.maxc = float('inf')
else:
# find out how long argbindings is:
args = []
sargs = argbindings
while not isinstance(sargs, NIL):
args.append(sargs.left)
sargs = sargs.right
self.maxc = len(args)
self.argbindings = args
def __repr__(self):
return "(user function)"
@instance_pre_execute("(user function)")
def __call__(self, outer_context, *ap):
# initialise a new context, with arguments bound to names specified (or NIL if none passed):
if self.env == outer_context.env:
context = Context(parent=outer_context)
else:
# executing a function defined in a different file: go and retrieve
# the correct outer scope.
context = Context(parent=import_cache[self.env], environment=self.env)
ab = self.argbindings
# bind the arguments passed:
if self.args_as_list:
context[ab] = sexpr_from_iterator(ap)
else:
for i, arg_passed in enumerate(ap):
# instance_pre_execute should have checked we don't have too many args passed.
arg_binding = ab[i]
context[arg_binding] = arg_passed
retval = NIL()
for line in self.functionbody:
retval = peval(context, line)
self.last_execute_context = context
return retval
lib = {
Symbol('bind'): bind,
Symbol('with'): _with,
Symbol('eval'): _eval,
Symbol('import'): _import,
Symbol('puts'): puts,
Symbol('gets'): gets,
Symbol('cons'): cons,
Symbol('car'): car,
Symbol('cdr'): cdr,
Symbol('+'): plus,
Symbol('-'): minus,
Symbol('*'): multiply,
Symbol('/'): divide,
Symbol('i/'): idivide,
Symbol('%'): modulo,
Symbol('round'): _round,
Symbol('join'): concatinate,
Symbol('split'): split,
Symbol('rand'): rand,
Symbol('if'): _if,
Symbol('>'): greater_than,
Symbol('<'): less_than,
Symbol('='): equal,
Symbol('=='): identical,
Symbol('dowhile'): dowhile
}
def default_context_bindings():
return lib
def run(program, program_environment, with_math=False):
if with_math:
import maths
lib.update(maths.maths_functions)
# we initialise the functions not implemented in the language (who do not
# care about contexts) as being in the user's own environment.
context = Context(default_context_bindings(), environment=program_environment)
# before we execute the program, ensure it's context is registered in the
# import cache so that the functions can load an environment when run as
# callbacks.
import_cache[program_environment] = context
UserLispFunction(NIL(), program, program_environment)(context, NIL())
``` |
{
"source": "joe-jordan/picosdk-python-wrappers",
"score": 3
} |
#### File: picosdk-python-wrappers/test/test_open_close_unit.py
```python
from __future__ import print_function
from test.test_helpers import DriverTest, drivers_to_load, drivers_with_device_connected
from picosdk.library import DeviceNotFoundError
class OpenCloseTest(DriverTest):
def test_open_unit_failure(self):
"""test_open_unit_failure
note: test assumes that at maximum one device is attached for each driver."""
drivers_to_use = drivers_to_load
def test(driver):
threw = False
devices = []
try:
devices.append(driver.open_unit())
devices.append(driver.open_unit())
except DeviceNotFoundError:
threw = True
finally:
for device in devices:
device.close()
if not threw:
return "didn't throw a DeviceNotFoundError."
self.run_snippet_and_count_problems(drivers_to_use, test)
def test_open_unit_success(self):
"""test_open_unit_success
note: test assumes you have set test_helpers.drivers_with_device_connected"""
if not drivers_with_device_connected:
return
drivers_to_use = drivers_with_device_connected[:]
def test(driver):
threw = False
devices = []
try:
devices.append(driver.open_unit())
except DeviceNotFoundError as e:
threw = e
finally:
for device in devices:
print("closing device %s" % device.handle)
device.close()
if threw is not False:
return "no device found (%s)." % threw
self.run_snippet_and_count_problems(drivers_to_use, test)
def test_close_unit_success(self):
"""test_close_unit_success
note: test assumes you have set test_helpers.drivers_with_device_connected"""
if not drivers_with_device_connected:
return
drivers_to_use = drivers_with_device_connected
def test(driver):
devices = []
try:
devices.append(driver.open_unit())
except DeviceNotFoundError as e:
return "no device found (%s)." % e
device = devices.pop()
info = device.info
device.close()
# To test the success of close(), we try to re-open the device.
# If we fail, then we have not closed it correctly.
try:
devices.append(driver.open_unit(serial=info.serial))
except DeviceNotFoundError as e:
return "Could not close and then re-open the device (%s)." % e
finally:
for device in devices:
device.close()
self.run_snippet_and_count_problems(drivers_to_use, test)
def test_with_statement_open_close(self):
"""test_with_statement_open_close
note: test assumes you have set test_helpers.drivers_with_device_connected"""
if not drivers_with_device_connected:
return
drivers_to_use = drivers_with_device_connected[:]
def test(driver):
threw = False
was_open = False
was_closed = False
outer_scope_device = None
try:
with driver.open_unit() as device:
was_open = device.is_open
outer_scope_device = device
was_closed = not outer_scope_device.is_open
except DeviceNotFoundError as e:
threw = e
finally:
if outer_scope_device is not None and not was_closed:
outer_scope_device.close()
if threw is not False:
return "no device found (%s)." % threw
elif not was_open:
return "device was not opened correctly"
elif not was_closed:
return "device was not closed after exiting the scope"
self.run_snippet_and_count_problems(drivers_to_use, test)
``` |
{
"source": "joejulian/saltstack-debug",
"score": 3
} |
#### File: saltstack-debug/_modules/logger_mod.py
```python
__virtualname__ = 'logger'
import logging as log
from pprint import pformat
import json
import yaml
def pretty(var):
return pformat(yaml.load(json.dumps(var)))
def __virtual__():
return __virtualname__
def debug(var, string = ""):
'''
Print a var to a debug output.
'''
ret = string + pretty(var)
log.debug(ret)
return ret
def info(var, string = ""):
'''
Print a var to a info output.
'''
ret = string + pretty(var)
log.info(ret)
return ret
def warning(var, string = ""):
'''
Print a var to a warning output.
'''
ret = string + pretty(var)
log.warning(ret)
return ret
def error(var, string = ""):
'''
Print a var to a error output.
'''
ret = string + pretty(var)
log.error(ret)
return ret
def critical(var, string = ""):
'''
Print a var to a critical output.
'''
ret = string + pretty(var)
log.critical(ret)
return ret
``` |
{
"source": "joejuzl/pipenv",
"score": 2
} |
#### File: tests/integration/test_install_uri.py
```python
import pytest
from flaky import flaky
@pytest.mark.vcs
@pytest.mark.install
@pytest.mark.needs_internet
@flaky
def test_basic_vcs_install(PipenvInstance, pip_src_dir, pypi):
with PipenvInstance(pypi=pypi, chdir=True) as p:
c = p.pipenv('install git+https://github.com/benjaminp/six.git#egg=six')
assert c.return_code == 0
# edge case where normal package starts with VCS name shouldn't be flagged as vcs
c = p.pipenv('install gitdb2')
assert c.return_code == 0
assert all(package in p.pipfile['packages'] for package in ['six', 'gitdb2'])
assert 'git' in p.pipfile['packages']['six']
assert p.lockfile['default']['six'] == {"git": "https://github.com/benjaminp/six.git"}
assert 'gitdb2' in p.lockfile['default']
@pytest.mark.files
@pytest.mark.urls
@pytest.mark.needs_internet
@flaky
def test_urls_work(PipenvInstance, pypi, pip_src_dir):
with PipenvInstance(pypi=pypi) as p:
c = p.pipenv('install https://github.com/divio/django-cms/archive/release/3.4.x.zip')
assert c.return_code == 0
dep = list(p.pipfile['packages'].values())[0]
assert 'file' in dep, p.pipfile
dep = list(p.lockfile['default'].values())[0]
assert 'file' in dep, p.lockfile
@pytest.mark.files
@pytest.mark.urls
@pytest.mark.needs_internet
@flaky
def test_install_remote_requirements(PipenvInstance, pypi):
with PipenvInstance(pypi=pypi) as p:
# using a github hosted requirements.txt file
c = p.pipenv('install -r https://raw.githubusercontent.com/kennethreitz/pipenv/3688148ac7cfecefb085c474b092c31d791952c1/tests/test_artifacts/requirements.txt')
assert c.return_code == 0
# check Pipfile with versions
assert 'requests' in p.pipfile['packages']
assert p.pipfile['packages']['requests'] == u'==2.18.4'
assert 'records' in p.pipfile['packages']
assert p.pipfile['packages']['records'] == u'==0.5.2'
# check Pipfile.lock
assert 'requests' in p.lockfile['default']
assert 'records' in p.lockfile['default']
@pytest.mark.e
@pytest.mark.vcs
@pytest.mark.install
@pytest.mark.needs_internet
@flaky
def test_editable_vcs_install(PipenvInstance, pip_src_dir, pypi):
with PipenvInstance(pypi=pypi) as p:
c = p.pipenv('install -e git+https://github.com/requests/requests.git#egg=requests')
assert c.return_code == 0
assert 'requests' in p.pipfile['packages']
assert 'git' in p.pipfile['packages']['requests']
assert 'editable' in p.pipfile['packages']['requests']
assert 'editable' in p.lockfile['default']['requests']
assert 'chardet' in p.lockfile['default']
assert 'idna' in p.lockfile['default']
assert 'urllib3' in p.lockfile['default']
assert 'certifi' in p.lockfile['default']
@pytest.mark.install
@pytest.mark.vcs
@pytest.mark.tablib
@pytest.mark.needs_internet
@flaky
def test_install_editable_git_tag(PipenvInstance, pip_src_dir, pypi):
# This uses the real PyPI since we need Internet to access the Git
# dependency anyway.
with PipenvInstance(pypi=pypi) as p:
c = p.pipenv('install -e git+https://github.com/benjaminp/[email protected]#egg=six')
assert c.return_code == 0
assert 'six' in p.pipfile['packages']
assert 'six' in p.lockfile['default']
assert 'git' in p.lockfile['default']['six']
assert p.lockfile['default']['six']['git'] == 'https://github.com/benjaminp/six.git'
assert 'ref' in p.lockfile['default']['six']
``` |
{
"source": "joek295/brainfuck",
"score": 4
} |
#### File: brainfuck/tools/bfify.py
```python
import sys
from collections import Counter
try:
string = sys.argv[1]
except IndexError:
print "Fatal Error: Brainfuck expected a source file."
sys.exit(1)
try:
with open(string,"r") as srcfile:
string = srcfile.read()
except IOError:
pass
codes = [ord(c) for c in string]
def bfify(codes):
c = Counter(codes)
sorted_codes = []
for i in c.most_common():
sorted_codes.append(i[0])
source = ">"
prevcode = 0
change = ""
for i in sorted_codes:
codediff = (max(i,prevcode) - min(i,prevcode))
# if we would make a saving by copy and alter, rather than writing from scratch, do so...
alter = "<[->+>+<<]>>[-<<+>>]"
if i > prevcode:
alter += "<" + "+"*codediff + ">"
elif i < prevcode:
alter += "<" + "-"*codediff + ">"
tenth = i/10
new = ">" + "+"*10 + "[<" + "+" * tenth + ">-]<" + "+" * (i - 10*tenth) + ">"
prevcode = i
if len(new) < len(alter):
source += new
else:
source += alter
source += "<"
for i in codes:
if sorted_codes.index(i) < len(sorted_codes)/2:
if sorted_codes.index(prevcode) <= sorted_codes.index(i):
move = (sorted_codes.index(i) - sorted_codes.index(prevcode))*">" + "."
elif (sorted_codes.index(prevcode) - sorted_codes.index(i)) < sorted_codes.index(i):
move = (sorted_codes.index(prevcode) - sorted_codes.index(i))*"<" + "."
else:
move = "[<]>" + ">"*sorted_codes.index(i) + "."
else:
if sorted_codes.index(prevcode) >= sorted_codes.index(i):
move = (sorted_codes.index(prevcode) - sorted_codes.index(i))*"<" + "."
elif (sorted_codes.index(i) - sorted_codes.index(prevcode)) < len(sorted_codes) - sorted_codes.index(i):
move = (sorted_codes.index(i) - sorted_codes.index(prevcode))*">" + "."
else:
move = "[>]<" + "<"*(len(sorted_codes) - sorted_codes.index(i) - 1) + "."
source += move
prevcode = i
cleansource(source)
def cleansource(source):
cleansource = ""
last = ""
for i in source:
if i == "<" and last == ">":
cleansource = cleansource [0:-1]
last = ""
elif i == ">" and last == "<":
cleansource = cleansource [0:-1]
last = ""
else:
cleansource += i
last = i
print cleansource
bfify(codes)
``` |
{
"source": "joekabucho/ppe-detection-",
"score": 3
} |
#### File: ppe-detection-/backend/main.py
```python
from datetime import datetime
from flask import Flask, jsonify, request
from config import config
from notification.factory import NotificationFactory
notification_svc = NotificationFactory.instance(config)
MSG_TEMPLATE = """
### ppe demo
**Alert** at **{point}** at {time}
> total_person={total_person} without_hardhat={without_hardhat} without_vest={without_vest} without_both={without_both}
"""
def _construct_msg(ts, point, total_person, without_hardhat, without_vest, without_both):
t = datetime.utcfromtimestamp(ts / 1000).strftime("%Y-%m-%d %H:%M:%S UTC")
return MSG_TEMPLATE.format(
time=t, point=point, total_person=total_person, without_hardhat=without_hardhat, without_vest=without_vest, without_both=without_both)
class HttpError(Exception):
status_code = 400
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
return rv
app = Flask(__name__)
@app.errorhandler(HttpError)
def handle_http_error(error):
response = jsonify(error.to_dict())
response.status_code = error.status_code
print("[Error]:", error.message)
return response
@app.route("/")
def home():
return "ok"
# {
# "id": "331404be-7c57-11e9-a345-dca90488d3b9",
# "cameraId": "camera1",
# "timestamp": 1558506692,
# "persons": [
# {
# "hardhat": true,
# "vest": true
# },
# {
# "hardhat": true,
# "vest": true
# }
# ],
# "image": {
# "height": 200,
# "width": 300,
# "format": "jpeg",
# "raw": "base64 encoded data",
# "url": "http://ppe-backend:7200/images/uuid1"
# },
# "createdAt": 1558506697000,
# "updatedAt": 1558506697000
# }
@app.route("/v1/detections", methods=["POST"])
def create_detections_v1():
js = request.json
js["image"]["raw"] = "omited"
cameraId = js.get("cameraId")
if cameraId is None:
print("json field missing")
raise HttpError("cameraId missing", status_code=400)
print("[Info] recieved:", js["cameraId"], js["timestamp"])
without_hardhat = len(list(filter(lambda p: not p["hardhat"], js["persons"])))
without_vest = len(list(filter(lambda p: not p["vest"], js["persons"])))
without_both = len(list(filter(lambda p: not p["vest"] and not p["hardhat"], js["persons"])))
if without_hardhat > 0 or without_vest > 0 or without_both > 0:
print("[Warn]", "someone violate the rule")
msg = _construct_msg(js["timestamp"], js["cameraId"], len(js["persons"]), without_hardhat - without_both, without_vest - without_both, without_both)
notification_svc.send(msg)
else:
print("[Info]", "no one violate the rule the person is not properly equipped")
return jsonify(request.json), 201
if __name__ == "__main__":
app.run(host="0.0.0.0", port=config["port"])
``` |
{
"source": "joekaiser/ulauncher-raindrop",
"score": 2
} |
#### File: ulauncher-raindrop/raindrop/extension.py
```python
import logging
from ulauncher.api.client.Extension import Extension
from ulauncher.api.shared.event import KeywordQueryEvent, PreferencesEvent, PreferencesUpdateEvent
from ulauncher.api.shared.item.ExtensionResultItem import ExtensionResultItem
from ulauncher.api.shared.action.RenderResultListAction import RenderResultListAction
from ulauncher.api.shared.action.OpenUrlAction import OpenUrlAction
from raindrop.preferences import PreferencesEventListener, PreferencesUpdateEventListener
from raindropio import Raindrop
from raindrop.query_listener import KeywordQueryEventListener
logger = logging.getLogger(__name__)
class RaindropExtension(Extension):
""" Main Extension Class """
def __init__(self):
""" Initializes the extension """
super(RaindropExtension, self).__init__()
self.subscribe(KeywordQueryEvent, KeywordQueryEventListener())
self.subscribe(PreferencesEvent, PreferencesEventListener())
self.subscribe(PreferencesUpdateEvent,
PreferencesUpdateEventListener())
def get_keyword_id(self, keyword):
for key, value in self.preferences.items():
if value == keyword:
return key
return ""
def show_open_app_menu(self):
""" Shows the menu to Open Raindrop website """
return RenderResultListAction([
ExtensionResultItem(
icon='images/icon.png',
name='Open Raindrop Website',
on_enter=OpenUrlAction('https://app.raindrop.io'))
])
def search(self, query):
drops = Raindrop.search(self.rd_client, word=query, perpage=10)
if len(drops) == 0:
return RenderResultListAction([
ExtensionResultItem(
icon='images/icon.png',
name='No results found matcing your criteria',
highlightable=False)
])
items = []
for drop in drops:
items.append(
ExtensionResultItem(icon='images/icon.png',
name=drop.title,
description=drop.excerpt,
on_enter=OpenUrlAction(drop.link)))
return RenderResultListAction(items)
``` |
{
"source": "joekakone/deputes-fr",
"score": 3
} |
#### File: deputes-fr/dashboard/main.py
```python
import pandas as pd
from bokeh.io import curdoc
# Charts module
from plot import draw_piechart, draw_barplot, draw_age_bar, create_table
# Paths & Config
DATA_PATH = "dashboard/data/deputes-active.csv"
##### title ####
curdoc().title = 'Tableau de board | Assemblée Nationale'
# 1. Import data
data = pd.read_csv(DATA_PATH)
# 2. Prepare data
# Remove "ans" in the `experienceDepute` column
data["experienceDepute"] = data["experienceDepute"].apply(lambda x: int(x.split()[0]))
# Add Full name column
data["nomComplet"] = data["prenom"] + " " + data["nom"]
##### KPIS #####
# Nombre de députés
total_deputies = len(data)
# Nombre de groupes
total_groups = len(data["groupe"].unique())
# Age moyen
mean_age = data["age"].mean()
mean_age = round(mean_age, 1)
# Expérience moyenne
mean_experience = data["experienceDepute"].mean()
mean_experience = round(mean_experience, 1)
# make variables available in html templates
curdoc().template_variables['totalDeputies'] = str(total_deputies)
curdoc().template_variables['totalGroups'] = str(total_groups)
curdoc().template_variables['meanAge'] = str(mean_age)
curdoc().template_variables['meanExperienceDepute'] = str(mean_experience)
##### Bokeh Plots ####
### Start Pie Chart ###
piechart = draw_piechart(data, 'piechart')
curdoc().add_root(piechart)
### End Pie Chart ###
### Start Line Plot ###
bar = draw_barplot(data, "age_bar")
curdoc().add_root(bar)
### End Line Plot ###
##### Charts #####
barchart = draw_age_bar(data, 'barchart')
curdoc().add_root(barchart)
### End Bar Chart ###
# ### Map Plot ###
# def gen_geo_data():
# geodata = {
# 'city': ['Cotonou', 'Porto-Novo', 'Ouidah'],
# 'latitude': [6.366667, 6.497222, 6.366667],
# 'longitude': [2.433333, 2.605, 2.083333]
# }
# geodata = pd.DataFrame(geodata)
# return geodata
# geodata = gen_geo_data()
# def MapPlot(data):
# def wgs84_to_web_mercator(df, lon="longitude", lat="latitude"):
# """Converts decimal longitude/latitude to Web Mercator format"""
# k = 6378137
# df["x"] = df[lon] * (k * np.pi/180.0)
# df["y"] = np.log(np.tan((90 + df[lat]) * np.pi/360.0)) * k
# return df
# data = wgs84_to_web_mercator(data)
# x_range = (data['x'].min()-10000, data['x'].max()+10000)
# y_range = (data['y'].min() ,data['y'].max())
# # convert into ColumnDataSource
# source = ColumnDataSource(data)
# mapplot = figure(plot_width=540,
# plot_height=250,
# x_range=x_range,
# y_range=y_range,
# x_axis_type="mercator",
# y_axis_type="mercator",
# toolbar_location=None,
# tools='',
# name='geoplot')
# # credits
# MAP_URL = 'http://a.basemaps.cartocdn.com/rastertiles/voyager/{Z}/{X}/{Y}.png'
# attribution = "Tiles by Carto, under CC BY 3.0. Data by OSM, under ODbL"
# mapplot.add_tile(WMTSTileSource(url=MAP_URL, attribution=attribution))
# mapplot.circle(x='x', y='y', fill_color='pink', size=20, fill_alpha=0.3, line_color=None, source=source)
# # hover
# mapplot.add_tools(HoverTool(tooltips=[
# ('City', '@city'),
# ('Latitude', "@latitude"),
# ('Longitude', "@longitude")
# ]))
# # others params
# mapplot.axis.visible = False
# return mapplot
# mapplot = MapPlot(geodata)
# curdoc().add_root(mapplot)
# ### End Map Plot ###
### Start Table ###
def gen_client_top10():
clients = {
'client': [
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>ine',
'<NAME>'
],
'orders': [1200, 3750, 2500, 2080, 2275, 750, 2000, 6200, 4500, 4850]
}
clients = pd.DataFrame(clients)
clients = clients.sort_values(by='orders', ascending=False)
return clients
table = create_table(data, 'table')
curdoc().add_root(table)
### End Table ###
``` |
{
"source": "joekakone/image-similarity-detection",
"score": 3
} |
#### File: joekakone/image-similarity-detection/encode.py
```python
import os
import argparse
import json
import tqdm
import glob
import numpy as np
import pandas as pd
import tensorflow as tf
from functions import read_data, \
load_model, load_image, extract_image_id
def encode_image(image):
features = encoder(image)
features = np.squeeze(features)
return features
def main():
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--data_dir", required=True,
help="Path to the images directory")
ap.add_argument("-m", "--model_path", required=True,
help="Path to the the model")
ap.add_argument("-i", "--input", type=int, required=True, default=299,
help="The input size")
ap.add_argument("-o", "--output", required=True,
help="Path to the output file")
args = vars(ap.parse_args())
size = args['input']
# model
print("Loading model...")
subdir = args["model_path"]
model_path = glob.glob(subdir+'*.h5')[-1]
model = load_model(model_path)
# data
print("Reading data...")
filenames, _, _ = read_data(args["data_dir"])
n_files = len(filenames)
# encoding
print("Encoding images...")
index_to_filename = {}
filename_to_path = {}
features = np.zeros((n_files, model.output.shape[1]))
for i in tqdm.tqdm(range(n_files)):
image_id = extract_image_id(filenames[i])
index_to_filename[i] = image_id
filename_to_path[image_id] = filenames[i]
#print("->", image_id)
image = load_image(filenames[i], (size, size))
image = image.reshape((1,)+image.shape)
features[i] = np.squeeze(model(image))
# save transfer values
np.save(args["output"], features)
with open("index_to_filename.json", "w") as f:
json.dump(index_to_filename, f, indent=4, ensure_ascii=False)
with open("filename_to_path.json", "w") as f:
json.dump(filename_to_path, f, indent=4, ensure_ascii=False)
if __name__ == "__main__":
main()
``` |
{
"source": "joekallen/dotfiles",
"score": 2
} |
#### File: workflows/user.workflow.BC9837BA-F60E-475C-B524-E761FBD0E1EB/common.py
```python
import os
import argparse
import subprocess
from workflow import Workflow
def get_kubectl_cmd_path():
wf = Workflow()
return wf.settings.get("KUBECTL_CMD_PATH") or os.environ.get("KUBECTL_CMD_PATH", '/usr/local/bin/kubectl')
class KService:
def __init__(self, type, name, age, status):
self.type = type
self.name = name
self.age = age
self.status = status
def get_args(args):
parser = argparse.ArgumentParser()
parser.add_argument('query', nargs='?', default="")
return parser.parse_args(args)
def get_pods():
res = []
pods = subprocess.Popen("%s get pods" % get_kubectl_cmd_path(), shell=True, stdout=subprocess.PIPE).stdout.read().split(
'\n')[
1:-1]
for pod_str in pods:
try:
dep_name, _, status, _, age = " ".join(pod_str.split()).split(' ')
res.append(KService("Pod", dep_name, age, status))
except:
print("ASd")
return res
def get_deployments():
res = []
deps = subprocess.Popen("%s get deploy" % get_kubectl_cmd_path(), shell=True, stdout=subprocess.PIPE).stdout.read().split(
'\n')[1:-1]
for dep_str in deps:
dep_name, _, current, _, _, age = " ".join(dep_str.split()).split(' ')
res.append(KService("Deploy", dep_name, age, current))
return res
def get_replica_sets():
res = []
deps = subprocess.Popen("%s get rs" % get_kubectl_cmd_path(), shell=True, stdout=subprocess.PIPE).stdout.read().split(
'\n')[1:-1]
for dep_str in deps:
dep_name, desired, current, _, age = " ".join(dep_str.split()).split(' ')
res.append(KService("Deploy", dep_name, age, "%s/%s" % (desired, current)))
return res
def get_services():
res = []
res += get_pods()
res += get_deployments()
return res
def search_key_for_service(service):
return u' '.join([
service.name
])
def process_and_feedback(wf, wf_cached_data_key, data_func, icon, include_type_in_arg=False):
args = get_args(wf.args)
data = wf.cached_data(wf_cached_data_key, data_func, max_age=60)
query = args.query.strip()
if query:
data = wf.filter(query, data, key=search_key_for_service, min_score=20)
for d in data:
if include_type_in_arg:
arg = "{type} {name}".format(type=d.type.lower(), name=d.name)
else:
arg = d.name
wf.add_item(title=d.name,
subtitle="%s - Age: %s | Extra: %s" % (d.type, d.age, d.status),
arg=arg,
valid=True,
icon=icon)
wf.send_feedback()
def update_local_path_vars(wf):
set_path_to = os.environ.get('set_path_to')
configured_path = os.environ.get('configured_path')
wf.settings[set_path_to] = configured_path
wf.settings.save()
print("Successfully set path to %s with %s" % (set_path_to, wf.settings[set_path_to]))
def _report_missing_var(wf, var_name):
print("Missing dashbaord url; use *ksetenv*")
"""
wf.add_item(title="Hit enter to set %s environment variable." % var_name,
arg="setenv",
valid=True)
wf.send_feedback()
"""
```
#### File: workflows/user.workflow.BC9837BA-F60E-475C-B524-E761FBD0E1EB/kdr.py
```python
import sys
from icons import SHELL
from workflow import Workflow
from common import process_and_feedback, get_replica_sets
def main(wf):
process_and_feedback(wf, 'kube_replica_sets', get_replica_sets, SHELL)
if __name__ == u"__main__":
wf = Workflow()
sys.exit(wf.run(main))
``` |
{
"source": "joekarasek/webfaction-deploy-tools",
"score": 3
} |
#### File: webfaction-deploy-tools/scripts/hwdevs.py
```python
class Webfaction(object):
def __init__(self, server, siteConfig):
self.siteConfig = siteConfig
self.server = server
self.session_id, self.account = server.login(
siteConfig.userName,
siteConfig.userPass,
siteConfig.machineName,
1)
def checkApp(self):
appList = self.server.list_apps(self.session_id)
for app in appList:
if self.siteConfig.appName == app['name']:
return True
return False
def createApp(self, appType='static_php56'):
self.server.create_app(
self.session_id,
self.siteConfig.appName,
appType,
False,
'',
False)
def checkLink(self):
appList = self.server.list_apps(self.session_id)
for app in appList:
if self.siteConfig.appLink == app['name']:
return True
return False
def createLink(self, appType='symlink56'):
self.server.create_app(
self.session_id,
self.siteConfig.appLink,
appType,
False,
"/home/danlinn/webapps/"+self.siteConfig.appName+"/build",
False)
def checkSite(self):
websiteList = self.server.list_websites(self.session_id)
for website in websiteList:
if self.siteConfig.websiteName == website['name']:
return website
return False
def createSite(self):
self.server.create_website(self.session_id,
self.siteConfig.websiteName,
self.siteConfig.ipAddress,
False,
[self.siteConfig.domainName],
[self.siteConfig.appLink, '/'])
def checkDomain(self):
domainList = self.server.list_domains(self.session_id)
for domain in domainList:
if domain['domain'] == self.siteConfig.domainName:
return True
return False
def createDomain(self):
self.server.create_domain(
self.session_id,
self.siteConfig.domainName)
def runCmd(self, command):
commandAtApp = "cd /home/danlinn/webapps/"+self.siteConfig.appName+" && "+command
self.server.system(self.session_id, commandAtApp)
def gitClone(self):
initGit = "cd /home/danlinn/webapps/"+self.siteConfig.appName+" && if [ -e index.html ]; then rm index.html; fi && git clone -q "+self.siteConfig.repoUrl+" ."
self.server.system(self.session_id, initGit)
def gitPull(self):
gitPull = "cd /home/danlinn/webapps/"+self.siteConfig.appName+" && git pull -q origin master"
self.server.system(self.session_id, gitPull)
def npmInstall(self):
installCmd = "cd /home/danlinn/webapps/"+self.siteConfig.appName+" && ~/bin/npm run install --silent"
self.server.system(self.session_id, installCmd)
def buildSite(self):
buildCmd = "cd /home/danlinn/webapps/"+self.siteConfig.appName+" && ~/bin/npm run build --silent"
self.server.system(self.session_id, buildCmd)
# def addHtaccess(server, session_id, siteConfig):
``` |
{
"source": "joekelley120/PyTorchTimeSeries",
"score": 3
} |
#### File: PyTorchTimeSeries/Optimizer/particle.py
```python
import torch
from torch.optim import Optimizer
class PARTICLE(Optimizer):
"""
Implements Gradient based Particle Optimization.
... warning::
This optimizer doesn't support per-parameter options and
parameters groups.
Arguments:
ds (float) - step size for particle swarm
"""
def __init__(self, params, ds=0.01, contraction=0.8, expansion=1.2, number_batches=1):
"""
Initializer for implementation of particle optimization.
:param params: model parameters
:param ds: step size for particle swarm
:param contraction: coefficient for contraction of swarm
:param expansion: coefficient for expansion of swarm
:param number_batches: number of batches during training
"""
defaults = dict(
ds=ds,
contraction=contraction,
expansion=expansion,
number_batches=number_batches,
)
super(PARTICLE, self).__init__(params, defaults)
if len(self.param_groups) != 1:
raise ValueError("Particle doesn't support per-parameter options "
"(parameter group)")
self._params = self.param_groups[0]['params']
self.state.setdefault('ds', ds)
self.state.setdefault('contraction', contraction)
self.state.setdefault('expansion', expansion)
self.state.setdefault('iteration', 0)
self.state.setdefault('ds_min', 1e-7)
self.state.setdefault('ds_max', 5)
self.state.setdefault('number_batches', number_batches)
self.state.setdefault('reset', False)
def _clone_param(self):
"""
Clone model parameters.
:return: cloned model parameters.
"""
return [p.clone() for p in self._params if p.requires_grad]
@staticmethod
def _clone_passed_params(params):
"""
Clone passed in parameters.
:param params: parameters
:return: cloned parameters
"""
return [p.clone() for p in params if p.requires_grad]
def _set_param(self, params_data):
"""
Set model parameters.
:param params_data: desired parameters for model.
:return: none
"""
i = 0
for p in self._params:
if p.requires_grad:
p.data.copy_(params_data[i].data)
i += 1
def _set_param_vector(self, vector):
"""
Set model parameters from a vector of parameters.
:param vector: parameters
:return: none
"""
new_param = self._clone_passed_params(self._params)
torch.nn.utils.vector_to_parameters(vector, new_param)
self._set_param(new_param)
def step(self, closure):
"""
Performs a single optimization step.
:param closure: A closure evaluates the model gradient and returns a loss.
:return: loss
"""
assert len(self.param_groups) == 1
# Model parameters
params = self._clone_param()
# Perform a step of particle
final_loss = self._particle(closure, params)
return final_loss
def _particle(self, closure, params):
"""
Perform a step of the particle optimization algorithm.
:param closure: forward pass though network
:return: loss
"""
contraction = self.state['contraction']
expansion = self.state['expansion']
xb = self.state['xb']
fb = self.state['fb']
ds = self.state['ds']
ds_min = self.state['ds_min']
ds_max = self.state['ds_max']
n = self.state['number_batches']
iteration = self.state['iteration']
reset = self.state['reset']
def grad():
"""
Calculate gradient.
:return: gradients
"""
# Calculate loss and gradients
_loss = closure()
# Create a list of gradients
g = [p.grad for p in self._params if p.requires_grad]
# return loss value and vector for the gradients
return _loss.item(), torch.nn.utils.parameters_to_vector(g)
# Convert parameters to vector
xo = torch.nn.utils.parameters_to_vector(params)
if iteration is 0 or reset:
xb = xo.clone()
fb, _ = grad()
reset = False
# Evaluate fitness
self._set_param_vector(xo)
_, gradient = grad()
# Calculate learning rate
step = ds / torch.sqrt(torch.sum(torch.pow(gradient, 2)))
# Update Particle
random = xo.new(xo.size(0)).uniform_()
x = xo + 2.0 * random * (xb - xo) - step * gradient
# Evaluate fitness
self._set_param_vector(x)
loss, gradient = grad()
# Determine if player has improved
if loss >= fb:
ds = max([contraction * ds, ds_min])
if ds is ds_min:
reset = True
elif loss <= fb:
ds = min([expansion * ds, ds_max])
fb = fb + (1 / n) * (loss - fb)
xb = xb + (1 / n) * (x - xb)
self._set_param_vector(x)
self.state['contraction'] = contraction
self.state['expansion'] = expansion
self.state['xb'] = xb
self.state['fb'] = fb
self.state['ds'] = ds
self.state['iteration'] = iteration + 1
self.state['reset'] = reset
return loss
``` |
{
"source": "joekendal/forex-arbitrage",
"score": 3
} |
#### File: joekendal/forex-arbitrage/main.py
```python
import requests, json, math
CURRENCIES = {'GBP', 'USD', 'JPY', 'EUR'}
API_KEY = ''
class Node(object):
def __init__(self, currency_id, CURRENCIES):
super(Node, self).__init__()
self.id = currency_id
self.childs = []
for id in CURRENCIES:
if id == self.id:
pass
else:
self.childs.append(id)
self.rates = []
for child in self.childs:
url = 'https://v3.exchangerate-api.com/pair/{}/{}/{}'.format(API_KEY, self.id, child)
response = requests.get(url)
data = response.json()
self.rates.append((child, math.log(data['rate'])))
self.rates = dict(self.rates)
def pull_data():
nodes = []
for currency_id in CURRENCIES:
node = Node(currency_id, CURRENCIES)
node = (node.id, node.rates)
nodes.append(node)
return nodes
currencies = pull_data()
currencies = dict(currencies)
def initialize(graph, source):
destination = {}
predecessor = {}
for node in graph:
destination[node] = float('Inf')
predecessor[node] = None
destination[source] = 0
return destination, predecessor
def relax(node, neighbour, graph, destination, predecessor):
if destination[neighbour] > destination[node] + graph[node][neighbour]:
destination[neighbour] = destination[node] + graph[node][neighbour]
predecessor[neighbour] = node
def retrace_negative_loop(p, start):
arbitrageLoop = [start]
next_node = start
while True:
next_node = p[next_node]
if next_node not in arbitrageLoop:
arbitrageLoop.append(next_node)
else:
arbitrageLoop.append(next_node)
arbitrageLoop = arbitrageLoop[arbitrageLoop.index(next_node):]
return arbitrageLoop
def bellman_ford(graph, source):
destination, predecessor = initialize(graph, source)
for i in range(len(graph)-1):
for u in graph:
for v in graph[u]:
relax(u, v, graph, destination, predecessor)
for u in graph:
for v in graph[u]:
return(retrace_negative_loop(predecessor, source))
return None
paths = []
graph = currencies
for key in graph:
path = bellman_ford(graph, key)
if path not in paths and not None:
paths.append(path)
for path in paths:
if path == None:
pass
elif len(path) < 4:
pass
else:
x = 100000
print("x = {} {}".format(x, path[0]))
for i,value in enumerate(path):
if i+1 < len(path):
start = path[i]
end = path[i+1]
rate = math.exp(-graph[start][end])
x *= rate
print("\t{}->{} @{} = {} {}".format(start, end, rate, x, end))
print("\n")
``` |
{
"source": "joekickass/python-kinesis-logger",
"score": 3
} |
#### File: python-kinesis-logger/kinesishandler/kinesishandler.py
```python
from logging.handlers import BufferingHandler
class KinesisHandler(BufferingHandler):
"""
Sends logs in batches to Kinesis
Uses a queue to dispatch batched data to worker thread
"""
def __init__(self, capacity, queue):
"""
Initialize the handler with buffer size and queue
"""
BufferingHandler.__init__(self, capacity)
self.queue = queue
def prepare(self, records):
"""
Prepare data for queuing
TODO: Is self.format() keeping all info? what about errors?
"""
return [self.format(record) for record in records]
def flush(self):
"""
Put buffered data in queue and zap buffer.
"""
self.acquire()
try:
self.queue.put(self.prepare(self.buffer))
self.buffer = []
finally:
self.release()
``` |
{
"source": "JoeKifle/ISPR-RBM-From-scratch-implementaion",
"score": 3
} |
#### File: JoeKifle/ISPR-RBM-From-scratch-implementaion/RBM.py
```python
import math
import numpy as np
from scipy.special import expit # sigmoid
import matplotlib.pyplot as plt
class RBM():
def __init__(self, visibleLayers=784, hiddenLayers=100):
self.visibleLayers = visibleLayers
self.visibleLayers = visibleLayers
# Parameters
self.vhW = 0.1 * np.random.randn(visibleLayers, hiddenLayers)
self.vlbias = np.zeros(visibleLayers)
self.hlbias = -4.0 * np.ones(hiddenLayers)
# Gradients
self.vhW_delta = np.zeros(self.vhW.shape) # W_gradient
self.vb_delta = np.zeros(visibleLayers) # visible unit bias gradient
self.hb_delta = np.zeros(hiddenLayers) # hidden unit bias gradient
def posetivePhase(self, visibleLayer):
# probability distribution of the hidden layer.
pdH = self.sigmoid(np.matmul(visibleLayer, self.vhW) + self.hlbias)
return (pdH, np.random.binomial(1, p=pdH))
def negativePhase(self, hiddenLayer):
# probability distribution of the visible layer.
pdV = self.sigmoid(np.matmul(hiddenLayer, self.vhW.T) + self.vlbias)
return (pdV, np.random.binomial(1, p=pdV))
def compute_error_and_grads(self, batch):
batchSize = batch.shape[0]
v0 = batch.reshape(batchSize, -1)
# Compute gradients - Positive Phase
ph0, h0 = self.posetivePhase(v0)
vhW_delta = np.matmul(v0.T, ph0)
vb_delta = np.sum(v0, axis=0)
hb_delta = np.sum(ph0, axis=0)
# Compute gradients - Negative Phase
pv1, v1 = self.negativePhase(h0)
ph1, h1 = self.posetivePhase(pv1)
vhW_delta -= np.matmul(pv1.T, ph1)
vb_delta -= np.sum(pv1, axis=0)
hb_delta -= np.sum(ph1, axis=0)
self.vhW_delta = vhW_delta/batchSize
self.hb_delta = hb_delta/batchSize
self.vb_delta = vb_delta/batchSize
recon_err = np.mean(np.sum((v0 - pv1)**2, axis=1), axis=0) # sum of squared error averaged over the batch
return recon_err
def update_params(self, eta):
self.vhW += (eta * self.vhW_delta)
self.vlbias +=(eta * self.vb_delta)
self.hlbias += (eta * self.hb_delta)
def plot_weights(self, weight, savefile=""):
plt.clf()
fig, axes = plt.subplots(10, 10, gridspec_kw = {'wspace':0.1, 'hspace':0.1}, figsize=(10, 10))
for i in range(10):
for j in range(10):
axes[i, j].imshow(weight[:,i*10+j].reshape(28, 28), cmap='gray')
axes[i, j].axis('off')
plt.savefig(savefile)
def reconstruct(self, V):
Hp, Hs = self.posetivePhase(V)
Vp, Vs = self.negativePhase(Hs) # reconstructionPhase
return Vp,Hs
def sigmoid(self,x):
return 1 / (1 + np.exp(-x))
``` |
{
"source": "Joe-Kileel/manifold-learning-arbitrary-norms",
"score": 3
} |
#### File: Joe-Kileel/manifold-learning-arbitrary-norms/pickler.py
```python
import os
import pickle
import gzip
import errno
import inspect
import time
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
PICKLES_PATH = os.path.join(CURRENT_DIR, 'pickles')
def _pickled_name(name):
return os.path.join(PICKLES_PATH, name) + '.pickle.gz'
def dump(filename, **kwargs):
os.makedirs(PICKLES_PATH, exist_ok=True)
filename = _pickled_name(filename)
metadata = {'date': time.ctime()}
print('Saving to', filename)
print("Saved fields: ", ', '.join(sorted(kwargs.keys())))
with gzip.GzipFile(filename, 'wb') as f:
pickle.dump({'metadata': metadata, 'data': kwargs}, f, 2)
class StructFromDict(object):
def __init__(self, d):
self.__dict__.update(d)
def __repr__(self):
return repr(self.__dict__)
def load(name):
filename = _pickled_name(name)
print('Loading', filename)
with gzip.GzipFile(filename, 'rb') as f:
d = pickle.load(f)
print('Creation time:', d['metadata']['date'])
return StructFromDict(d['data'])
```
#### File: Joe-Kileel/manifold-learning-arbitrary-norms/produce_all_figures.py
```python
import os
import matplotlib.pyplot as plt
import numpy as np
import ATP_synthase_datagen
import euclidean_vs_wemd_metrics
import euclidean_vs_wemd_embeddings
import weighted_L1_eigenfunctions_on_circle
WAVELET = 'sym3'
LEVEL = 6
FIGURES_DIR = 'figures'
def figure_rotor_slice(figures_dir):
VMIN =-0.06
VMAX = 0.150
(volumes, angles) = ATP_synthase_datagen.build_dataset(1, angles_deg=[0])
m = volumes[0]
plt.figure()
plt.imshow(m[25,::-1,:], vmin=VMIN, vmax=VMAX)
fn = os.path.join(figures_dir, 'slice_noiseless.png')
print('Saving', fn)
plt.savefig(fn, bbox_inches='tight', pad_inches=0)
plt.figure()
noisy_m = m + np.random.normal(loc=0, scale=euclidean_vs_wemd_embeddings.STD, size=m.shape)
plt.imshow(noisy_m[25,::-1,:], vmin=VMIN, vmax=VMAX)
fn = os.path.join(figures_dir, 'slice_noisy.png')
print('Saving', fn)
plt.savefig(fn, bbox_inches='tight', pad_inches=0)
def main():
os.makedirs(FIGURES_DIR, exist_ok=True)
print('==== Saving rotor slices =========================================================')
figure_rotor_slice(FIGURES_DIR)
print('= Computing and saving WEMD vs Euclidean figure')
euclidean_vs_wemd_metrics.compute_and_plot(WAVELET, LEVEL, FIGURES_DIR)
print(' ==== Running all WEMD embedding calculations ====')
euclidean_vs_wemd_embeddings.precalc_all(WAVELET, LEVEL)
print(' ==== Producing all WEMD embedding figures ====')
euclidean_vs_wemd_embeddings.plot_all_gaussian_kernel(FIGURES_DIR)
print(' ==== Computing and plotting all sanity check figures for the empirical L1 Laplacian ====')
weighted_L1_eigenfunctions_on_circle.plot_all_check_L1_laplacian()
print(' ==== Computing and plotting first eigenvectors of the weighted L1 norm Laplacian on the circle')
weighted_L1_eigenfunctions_on_circle.plot_all_first_eigenvectors()
if __name__ == '__main__':
main()
``` |
{
"source": "joekiller/moto",
"score": 2
} |
#### File: moto/dynamodb2/responses.py
```python
from __future__ import unicode_literals
import json
import six
from moto.core.responses import BaseResponse
from moto.core.utils import camelcase_to_underscores
from .models import dynamodb_backend2, dynamo_json_dump
GET_SESSION_TOKEN_RESULT = """
<GetSessionTokenResponse xmlns="https://sts.amazonaws.com/doc/2011-06-15/">
<GetSessionTokenResult>
<Credentials>
<SessionToken>
<KEY>
</SessionToken>
<SecretAccessKey>
<KEY>
</SecretAccessKey>
<Expiration>2011-07-11T19:55:29.611Z</Expiration>
<AccessKeyId>AKIAIOSFODNN7EXAMPLE</AccessKeyId>
</Credentials>
</GetSessionTokenResult>
<ResponseMetadata>
<RequestId>58c5dbae-abef-11e0-8cfe-09039844ac7d</RequestId>
</ResponseMetadata>
</GetSessionTokenResponse>"""
def sts_handler():
return GET_SESSION_TOKEN_RESULT
class DynamoHandler(BaseResponse):
def get_endpoint_name(self, headers):
"""Parses request headers and extracts part od the X-Amz-Target
that corresponds to a method of DynamoHandler
ie: X-Amz-Target: DynamoDB_20111205.ListTables -> ListTables
"""
# Headers are case-insensitive. Probably a better way to do this.
match = headers.get('x-amz-target') or headers.get('X-Amz-Target')
if match:
return match.split(".")[1]
def error(self, type_, status=400):
return status, self.response_headers, dynamo_json_dump({'__type': type_})
def call_action(self):
body = self.body.decode('utf-8')
if 'GetSessionToken' in body:
return 200, self.response_headers, sts_handler()
self.body = json.loads(body or '{}')
endpoint = self.get_endpoint_name(self.headers)
if endpoint:
endpoint = camelcase_to_underscores(endpoint)
response = getattr(self, endpoint)()
if isinstance(response, six.string_types):
return 200, self.response_headers, response
else:
status_code, new_headers, response_content = response
self.response_headers.update(new_headers)
return status_code, self.response_headers, response_content
else:
return 404, self.response_headers, ""
def list_tables(self):
body = self.body
limit = body.get('Limit')
if body.get("ExclusiveStartTableName"):
last = body.get("ExclusiveStartTableName")
start = list(dynamodb_backend2.tables.keys()).index(last) + 1
else:
start = 0
all_tables = list(dynamodb_backend2.tables.keys())
if limit:
tables = all_tables[start:start + limit]
else:
tables = all_tables[start:]
response = {"TableNames": tables}
if limit and len(all_tables) > start + limit:
response["LastEvaluatedTableName"] = tables[-1]
return dynamo_json_dump(response)
def create_table(self):
body = self.body
#get the table name
table_name = body['TableName']
#get the throughput
throughput = body["ProvisionedThroughput"]
#getting the schema
key_schema = body['KeySchema']
#getting attribute definition
attr = body["AttributeDefinitions"]
#getting the indexes
table = dynamodb_backend2.create_table(table_name,
schema = key_schema,
throughput = throughput,
attr = attr)
return dynamo_json_dump(table.describe)
def delete_table(self):
name = self.body['TableName']
table = dynamodb_backend2.delete_table(name)
if table is not None:
return dynamo_json_dump(table.describe)
else:
er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'
return self.error(er)
def update_table(self):
name = self.body['TableName']
throughput = self.body["ProvisionedThroughput"]
table = dynamodb_backend2.update_table_throughput(name, throughput)
return dynamo_json_dump(table.describe)
def describe_table(self):
name = self.body['TableName']
try:
table = dynamodb_backend2.tables[name]
except KeyError:
er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'
return self.error(er)
return dynamo_json_dump(table.describe)
def put_item(self):
name = self.body['TableName']
item = self.body['Item']
result = dynamodb_backend2.put_item(name, item)
if result:
item_dict = result.to_json()
item_dict['ConsumedCapacityUnits'] = 1
return dynamo_json_dump(item_dict)
else:
er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'
return self.error(er)
def batch_write_item(self):
table_batches = self.body['RequestItems']
for table_name, table_requests in table_batches.items():
for table_request in table_requests:
request_type = list(table_request.keys())[0]
request = list(table_request.values())[0]
if request_type == 'PutRequest':
item = request['Item']
dynamodb_backend2.put_item(table_name, item)
elif request_type == 'DeleteRequest':
keys = request['Key']
item = dynamodb_backend2.delete_item(table_name, keys)
response = {
"Responses": {
"Thread": {
"ConsumedCapacityUnits": 1.0
},
"Reply": {
"ConsumedCapacityUnits": 1.0
}
},
"UnprocessedItems": {}
}
return dynamo_json_dump(response)
def get_item(self):
name = self.body['TableName']
key = self.body['Key']
try:
item = dynamodb_backend2.get_item(name, key)
except ValueError:
er = 'com.amazon.coral.validate#ValidationException'
return self.error(er, status=400)
if item:
item_dict = item.describe_attrs(attributes = None)
item_dict['ConsumedCapacityUnits'] = 0.5
return dynamo_json_dump(item_dict)
else:
# Item not found
er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'
return self.error(er, status=404)
def batch_get_item(self):
table_batches = self.body['RequestItems']
results = {
"ConsumedCapacity":[],
"Responses": {
},
"UnprocessedKeys": {
}
}
for table_name, table_request in table_batches.items():
items = []
keys = table_request['Keys']
attributes_to_get = table_request.get('AttributesToGet')
results["Responses"][table_name]=[]
for key in keys:
item = dynamodb_backend2.get_item(table_name, key)
if item:
item_describe = item.describe_attrs(attributes_to_get)
results["Responses"][table_name].append(item_describe["Item"])
results["ConsumedCapacity"].append({
"CapacityUnits": len(keys),
"TableName": table_name
})
return dynamo_json_dump(results)
def query(self):
name = self.body['TableName']
keys = self.body['KeyConditions']
hash_key_name, range_key_name = dynamodb_backend2.get_table_keys_name(name)
if hash_key_name is None:
er = "'com.amazonaws.dynamodb.v20120810#ResourceNotFoundException"
return self.error(er)
hash_key = keys[hash_key_name]['AttributeValueList'][0]
if len(keys) == 1:
range_comparison = None
range_values = []
else:
if range_key_name == None:
er = "com.amazon.coral.validate#ValidationException"
return self.error(er)
else:
range_condition = keys[range_key_name]
if range_condition:
range_comparison = range_condition['ComparisonOperator']
range_values = range_condition['AttributeValueList']
else:
range_comparison = None
range_values = []
items, last_page = dynamodb_backend2.query(name, hash_key, range_comparison, range_values)
if items is None:
er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'
return self.error(er)
limit = self.body.get("Limit")
if limit:
items = items[:limit]
reversed = self.body.get("ScanIndexForward")
if reversed != False:
items.reverse()
result = {
"Count": len(items),
"Items": [item.attrs for item in items],
"ConsumedCapacityUnits": 1,
}
# Implement this when we do pagination
# if not last_page:
# result["LastEvaluatedKey"] = {
# "HashKeyElement": items[-1].hash_key,
# "RangeKeyElement": items[-1].range_key,
# }
return dynamo_json_dump(result)
def scan(self):
name = self.body['TableName']
filters = {}
scan_filters = self.body.get('ScanFilter', {})
for attribute_name, scan_filter in scan_filters.items():
# Keys are attribute names. Values are tuples of (comparison, comparison_value)
comparison_operator = scan_filter["ComparisonOperator"]
comparison_values = scan_filter.get("AttributeValueList", [])
filters[attribute_name] = (comparison_operator, comparison_values)
items, scanned_count, last_page = dynamodb_backend2.scan(name, filters)
if items is None:
er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException'
return self.error(er)
limit = self.body.get("Limit")
if limit:
items = items[:limit]
result = {
"Count": len(items),
"Items": [item.attrs for item in items],
"ConsumedCapacityUnits": 1,
"ScannedCount": scanned_count
}
# Implement this when we do pagination
# if not last_page:
# result["LastEvaluatedKey"] = {
# "HashKeyElement": items[-1].hash_key,
# "RangeKeyElement": items[-1].range_key,
# }
return dynamo_json_dump(result)
def delete_item(self):
name = self.body['TableName']
keys = self.body['Key']
return_values = self.body.get('ReturnValues', '')
item = dynamodb_backend2.delete_item(name, keys)
if item:
if return_values == 'ALL_OLD':
item_dict = item.to_json()
else:
item_dict = {'Attributes': []}
item_dict['ConsumedCapacityUnits'] = 0.5
return dynamo_json_dump(item_dict)
else:
er = 'com.amazonaws.dynamodb.v20120810#ConditionalCheckFailedException'
return self.error(er)
```
#### File: moto/s3/utils.py
```python
from __future__ import unicode_literals
import re
import sys
from six.moves.urllib.parse import urlparse, unquote
bucket_name_regex = re.compile("(.+).s3.amazonaws.com")
def bucket_name_from_url(url):
domain = urlparse(url).netloc
if domain.startswith('www.'):
domain = domain[4:]
if 'amazonaws.com' in domain:
bucket_result = bucket_name_regex.search(domain)
if bucket_result:
return bucket_result.groups()[0]
else:
if '.' in domain:
return domain.split(".")[0]
else:
# No subdomain found.
return None
def clean_key_name(key_name):
return unquote(key_name)
class _VersionedKeyStore(dict):
""" A simplified/modified version of Django's `MultiValueDict` taken from:
https://github.com/django/django/blob/70576740b0bb5289873f5a9a9a4e1a26b2c330e5/django/utils/datastructures.py#L282
"""
def __sgetitem__(self, key):
return super(_VersionedKeyStore, self).__getitem__(key)
def __getitem__(self, key):
return self.__sgetitem__(key)[-1]
def __setitem__(self, key, value):
try:
current = self.__sgetitem__(key)
current.append(value)
except (KeyError, IndexError):
current = [value]
super(_VersionedKeyStore, self).__setitem__(key, current)
def get(self, key, default=None):
try:
return self[key]
except (KeyError, IndexError):
pass
return default
def getlist(self, key, default=None):
try:
return self.__sgetitem__(key)
except (KeyError, IndexError):
pass
return default
def setlist(self, key, list_):
if isinstance(list_, tuple):
list_ = list(list_)
elif not isinstance(list_, list):
list_ = [list_]
super(_VersionedKeyStore, self).__setitem__(key, list_)
def _iteritems(self):
for key in self:
yield key, self[key]
def _itervalues(self):
for key in self:
yield self[key]
def _iterlists(self):
for key in self:
yield key, self.getlist(key)
items = iteritems = _iteritems
lists = iterlists = _iterlists
values = itervalues = _itervalues
if sys.version_info[0] < 3:
def items(self):
return list(self.iteritems())
def values(self):
return list(self.itervalues())
def lists(self):
return list(self.iterlists())
```
#### File: tests/test_sns/test_publishing.py
```python
from __future__ import unicode_literals
from six.moves.urllib.parse import parse_qs
import boto
from freezegun import freeze_time
import httpretty
import sure # noqa
from moto import mock_sns, mock_sqs
@mock_sqs
@mock_sns
def test_publish_to_sqs():
conn = boto.connect_sns()
conn.create_topic("some-topic")
topics_json = conn.get_all_topics()
topic_arn = topics_json["ListTopicsResponse"]["ListTopicsResult"]["Topics"][0]['TopicArn']
sqs_conn = boto.connect_sqs()
sqs_conn.create_queue("test-queue")
conn.subscribe(topic_arn, "sqs", "arn:aws:sqs:us-east-1:123456789012:test-queue")
conn.publish(topic=topic_arn, message="my message")
queue = sqs_conn.get_queue("test-queue")
message = queue.read(1)
message.get_body().should.equal('my message')
@freeze_time("2013-01-01")
@mock_sns
def test_publish_to_http():
httpretty.HTTPretty.register_uri(
method="POST",
uri="http://example.com/foobar",
)
conn = boto.connect_sns()
conn.create_topic("some-topic")
topics_json = conn.get_all_topics()
topic_arn = topics_json["ListTopicsResponse"]["ListTopicsResult"]["Topics"][0]['TopicArn']
conn.subscribe(topic_arn, "http", "http://example.com/foobar")
response = conn.publish(topic=topic_arn, message="my message", subject="my subject")
message_id = response['PublishResponse']['PublishResult']['MessageId']
last_request = httpretty.last_request()
last_request.method.should.equal("POST")
parse_qs(last_request.body.decode('utf-8')).should.equal({
"Type": ["Notification"],
"MessageId": [message_id],
"TopicArn": ["arn:aws:sns:us-east-1:123456789012:some-topic"],
"Subject": ["my subject"],
"Message": ["my message"],
"Timestamp": ["2013-01-01T00:00:00Z"],
"SignatureVersion": ["1"],
"Signature": ["<KEY>],
"SigningCertURL": ["https://sns.us-east-1.amazonaws.com/SimpleNotificationService-f3ecfb7224c7233fe7bb5f59f96de52f.pem"],
"UnsubscribeURL": ["https://sns.us-east-1.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:us-east-1:123456789012:some-topic:2bcfbf39-05c3-41de-beaa-fcfcc21c8f55"],
})
```
#### File: tests/test_sns/test_subscriptions.py
```python
from __future__ import unicode_literals
import boto
import sure # noqa
from moto import mock_sns
@mock_sns
def test_creating_subscription():
conn = boto.connect_sns()
conn.create_topic("some-topic")
topics_json = conn.get_all_topics()
topic_arn = topics_json["ListTopicsResponse"]["ListTopicsResult"]["Topics"][0]['TopicArn']
conn.subscribe(topic_arn, "http", "http://example.com/")
subscriptions = conn.get_all_subscriptions()["ListSubscriptionsResponse"]["ListSubscriptionsResult"]["Subscriptions"]
subscriptions.should.have.length_of(1)
subscription = subscriptions[0]
subscription["TopicArn"].should.equal(topic_arn)
subscription["Protocol"].should.equal("http")
subscription["SubscriptionArn"].should.contain(topic_arn)
subscription["Endpoint"].should.equal("http://example.com/")
# Now unsubscribe the subscription
conn.unsubscribe(subscription["SubscriptionArn"])
# And there should be zero subscriptions left
subscriptions = conn.get_all_subscriptions()["ListSubscriptionsResponse"]["ListSubscriptionsResult"]["Subscriptions"]
subscriptions.should.have.length_of(0)
@mock_sns
def test_getting_subscriptions_by_topic():
conn = boto.connect_sns()
conn.create_topic("topic1")
conn.create_topic("topic2")
topics_json = conn.get_all_topics()
topics = topics_json["ListTopicsResponse"]["ListTopicsResult"]["Topics"]
topic1_arn = topics[0]['TopicArn']
topic2_arn = topics[1]['TopicArn']
conn.subscribe(topic1_arn, "http", "http://example1.com/")
conn.subscribe(topic2_arn, "http", "http://example2.com/")
topic1_subscriptions = conn.get_all_subscriptions_by_topic(topic1_arn)["ListSubscriptionsByTopicResponse"]["ListSubscriptionsByTopicResult"]["Subscriptions"]
topic1_subscriptions.should.have.length_of(1)
topic1_subscriptions[0]['Endpoint'].should.equal("http://example1.com/")
``` |
{
"source": "joekir/crypt-van-impl",
"score": 3
} |
#### File: python/Cryptography/crypt.py
```python
import sys
from cryptography.hazmat.primitives.ciphers import (
Cipher, algorithms, modes
)
from cryptography.hazmat.backends import default_backend
def encrypt(ptext, iv, aad, key):
encryptor = Cipher(
algorithms.AES(key),
modes.GCM(iv),
backend=default_backend()
).encryptor()
encryptor.authenticate_additional_data(aad)
# Encrypt the plaintext and get the associated ciphertext.
# GCM does not require padding.
ciphertext = encryptor.update(ptext) + encryptor.finalize()
return (ciphertext.hex(), encryptor.tag.hex())
ptext = bytes(sys.argv[1],'utf8')
iv = bytes.fromhex(sys.argv[2])
aad = bytes(sys.argv[3],'utf8')
key = bytes.fromhex(sys.argv[4])
#print(ptext,iv,aad,key)
print(encrypt(ptext,iv,aad,key))
``` |
{
"source": "JoeKlemmer/sinktheship",
"score": 4
} |
#### File: JoeKlemmer/sinktheship/sinktheship.py
```python
import random
import os
random.seed()
# Variables
playing_the_game = True
ship_location = [[int(j) for j in range(25)] for _ in range(25)]
playing_grid = [[int(j) for j in range(25)] for _ in range(25)]
display_help_info = """
The object of the game is to sink the ship. There will be one ship
on the grid. The player will choose horizontal and virtical coordinates
and the game will calculate whether those coordiantes were a hit or a
miss. An \"X\" will mark a hit and \"*\" a miss. The difficulty levels
will be set as follows;
- Easy will be a grid of 10 x 10
- Normal will be a grid of 15 x 15
- Hard will be a grid of 20 x 20
You will have 15, 20, and 25 attempts, respectively, to sink the ship.
If you do not, the ship will escape and you lose.
You may exit the game at any time by hitting \"Ctrl-C\".
"""
# Build a 2 dimensional array of the size chosen
def build_grid(gsize):
for i in range(gsize):
for j in range(gsize):
playing_grid[i][j] = " ."
# Display the playing grid the size chosen
def display_grid(gsize):
os.system("clear")
for x in range(gsize):
if x < 10:
print(" " + str(x+1), end="")
else:
print(" " + str(x+1), end="")
print()
for y in range(gsize):
if y == 0:
print(" --", end="")
else:
print("---", end="")
if y == gsize - 1:
print("-\\")
for i in range(gsize):
for j in range(gsize):
print(playing_grid[i][j], end="")
print(" |" + str(i+1))
# Get the users coordinates
def get_users_coordinates(gsize):
while True:
try:
global acrossCoord
acrossCoord = int(input("Enter across coordinate: "))
except ValueError:
print("Value must be a number")
else:
if acrossCoord > gsize or acrossCoord <= 0:
print("Value must be greater than zero and less than or equal to " + str(gsize))
else:
break
while True:
try:
global downCoord
downCoord = int(input("Enter down coordinate: "))
except ValueError:
print("Value must be a number")
else:
if (downCoord > gsize or downCoord <= 0):
print("Value must be greater than zero and less than or equal to " + str(gsize))
else:
break
# Determin where and in what doirecton to place the ship
def place_ship(gsize):
direction = random.randint(1, 4)
# The limiter keeps the ship from being placed outside the grid
limiter = random.randint(0, 5)
gridsize = random.randint(0, (gsize - 1))
i = 0
while i < 4:
if direction == 1: # North
# playing_grid[limiter+i][gridsize] = " @" # <-- Used for debugging
ship_location[limiter+i][gridsize] = " @"
elif direction == 2: #South
# playing_grid[(limiter + 3) - i][gridsize] = " @" # <-- Used for debugging
ship_location[(limiter + 3) - i][gridsize] = " @"
elif direction == 3: #East
# playing_grid[gridsize][(limiter + 3) - i] = " @" # <-- Used for debugging
ship_location[gridsize][(limiter + 3) - i] = " @"
elif direction == 4: #West
# playing_grid[gridsize][limiter + i] = " @" # <-- Used for debugging
ship_location[gridsize][limiter + i] = " @"
else:
# If we end up here, something broke
print("Shit done did blowed up!")
i += 1
# CHeck to see if the coordinates the user entered hit the ship
def calculate_hit():
hit_counter = 0
if playing_grid[downCoord - 1][acrossCoord - 1] == ship_location[downCoord - 1][acrossCoord - 1]:
playing_grid[downCoord-1][acrossCoord-1] = " X"
hit_counter += 1
else:
playing_grid[downCoord-1][acrossCoord-1] = " *"
return hit_counter
# Basically, the main game loop
def play_game(gsize):
is_it_sunk = 0
play_counter = 0
build_grid(gsize)
place_ship(gsize)
display_grid(gsize)
while is_it_sunk < 4 and play_counter < gsize + 5:
play_counter += 1
print("You have " + str((gsize + 6) - play_counter) + " attempts left")
get_users_coordinates(gsize)
is_it_sunk += calculate_hit()
display_grid(gsize)
if is_it_sunk == 4:
print("\n\"Hey! You sunk my ship!\"")
print("\"It took you " + str(play_counter) + " shots to sink it.\"")
else:
print("\n\"Ha! You took too long. I got away!\"")
print("Better luck next time.")
# Initial game menu
print("\nGreetings <NAME>.")
print("Welcome to \"Sink My Ship\"")
while playing_the_game:
print("\nPlease make a selection:")
print("1 - Easy")
print("2 - Normal")
print("3 - Hard")
print()
print("H - Help")
print("Q - Quit")
print("-------------------------")
choice = input("Enter your choice: ")
if str(choice) == "1":
play_game(10)
break
elif str(choice) == "2":
play_game(15)
break
elif str(choice) == "3":
play_game(20)
break
elif str(choice) == "H" or str(choice) == "h":
print(display_help_info)
elif str(choice) == "Q" or str(choice) == "q":
playing_the_game = False
print("\n\nThank you for playing.")
print("Goodbye.\n")
break
else:
print("\nError: You must enter one of the above options\n")
pause = input("Press any key to continue...")
``` |
{
"source": "Joeklepko/akshare",
"score": 2
} |
#### File: akshare/economic/macro_canada.py
```python
import pandas as pd
import requests
from akshare.utils import demjson
# 新屋开工
def macro_canada_new_house_rate():
"""
东方财富-经济数据-加拿大-新屋开工
http://data.eastmoney.com/cjsj/foreign_7_0.html
:return: 新屋开工
:rtype: pandas.DataFrame
"""
url = "http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"type": "GJZB",
"sty": "HKZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "7",
"stat": "0",
"pageNo": "1",
"pageNum": "1",
"_": "1625474966006",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"时间",
"前值",
"现值",
"发布日期",
]
temp_df["前值"] = pd.to_numeric(temp_df["前值"])
temp_df["现值"] = pd.to_numeric(temp_df["现值"])
return temp_df
# 失业率
def macro_canada_unemployment_rate():
"""
东方财富-经济数据-加拿大-失业率
http://data.eastmoney.com/cjsj/foreign_7_1.html
:return: 失业率
:rtype: pandas.DataFrame
"""
url = "http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"type": "GJZB",
"sty": "HKZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "7",
"stat": "1",
"pageNo": "1",
"pageNum": "1",
"_": "1625474966006",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"时间",
"前值",
"现值",
"发布日期",
]
temp_df["前值"] = pd.to_numeric(temp_df["前值"])
temp_df["现值"] = pd.to_numeric(temp_df["现值"])
return temp_df
# 贸易帐
def macro_canada_trade():
"""
东方财富-经济数据-加拿大-贸易帐
http://data.eastmoney.com/cjsj/foreign_7_2.html
:return: 贸易帐
:rtype: pandas.DataFrame
"""
url = "http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"type": "GJZB",
"sty": "HKZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "7",
"stat": "2",
'pageNo': '1',
'pageNum': '1',
"_": "1625474966006",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"时间",
"前值",
"现值",
"发布日期",
]
temp_df["前值"] = pd.to_numeric(temp_df["前值"]) / 100
temp_df["现值"] = pd.to_numeric(temp_df["现值"]) / 100
return temp_df
# 零售销售月率
def macro_canada_retail_rate_monthly():
"""
东方财富-经济数据-加拿大-零售销售月率
http://data.eastmoney.com/cjsj/foreign_7_3.html
:return: 零售销售月率
:rtype: pandas.DataFrame
"""
url = "http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"type": "GJZB",
"sty": "HKZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "7",
"stat": "3",
'pageNo': '1',
'pageNum': '1',
"_": "1625474966006",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"时间",
"前值",
"现值",
"发布日期",
]
temp_df["前值"] = pd.to_numeric(temp_df["前值"])
temp_df["现值"] = pd.to_numeric(temp_df["现值"])
return temp_df
# 央行公布利率决议
def macro_canada_bank_rate():
"""
东方财富-经济数据-加拿大-央行公布利率决议
http://data.eastmoney.com/cjsj/foreign_7_4.html
:return: 央行公布利率决议
:rtype: pandas.DataFrame
"""
url = "http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"type": "GJZB",
"sty": "HKZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "7",
"stat": "4",
'pageNo': '1',
'pageNum': '1',
"_": "1625474966006",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"时间",
"前值",
"现值",
"发布日期",
]
temp_df["前值"] = pd.to_numeric(temp_df["前值"])
temp_df["现值"] = pd.to_numeric(temp_df["现值"])
return temp_df
# 核心消费者物价指数年率
def macro_canada_core_cpi_yearly():
"""
东方财富-经济数据-加拿大-核心消费者物价指数年率
http://data.eastmoney.com/cjsj/foreign_7_5.html
:return: 核心消费者物价指数年率
:rtype: pandas.DataFrame
"""
url = "http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"type": "GJZB",
"sty": "HKZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "7",
"stat": "5",
'pageNo': '1',
'pageNum': '1',
"_": "1625474966006",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"时间",
"前值",
"现值",
"发布日期",
]
temp_df["前值"] = pd.to_numeric(temp_df["前值"])
temp_df["现值"] = pd.to_numeric(temp_df["现值"])
return temp_df
# 核心消费者物价指数月率
def macro_canada_core_cpi_monthly():
"""
东方财富-经济数据-加拿大-核心消费者物价指数月率
http://data.eastmoney.com/cjsj/foreign_7_6.html
:return: 核心消费者物价指数月率
:rtype: pandas.DataFrame
"""
url = "http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"type": "GJZB",
"sty": "HKZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "7",
"stat": "6",
'pageNo': '1',
'pageNum': '1',
"_": "1625474966006",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"时间",
"前值",
"现值",
"发布日期",
]
temp_df["前值"] = pd.to_numeric(temp_df["前值"])
temp_df["现值"] = pd.to_numeric(temp_df["现值"])
return temp_df
# 消费者物价指数年率
def macro_canada_cpi_yearly():
"""
东方财富-经济数据-加拿大-消费者物价指数年率
http://data.eastmoney.com/cjsj/foreign_7_7.html
:return: 消费者物价指数年率
:rtype: pandas.DataFrame
"""
url = "http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"type": "GJZB",
"sty": "HKZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "7",
"stat": "7",
'pageNo': '1',
'pageNum': '1',
"_": "1625474966006",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"时间",
"前值",
"现值",
"发布日期",
]
temp_df["前值"] = pd.to_numeric(temp_df["前值"])
temp_df["现值"] = pd.to_numeric(temp_df["现值"])
return temp_df
# 消费者物价指数月率
def macro_canada_cpi_monthly():
"""
东方财富-经济数据-加拿大-消费者物价指数月率
http://data.eastmoney.com/cjsj/foreign_7_8.html
:return: 消费者物价指数月率
:rtype: pandas.DataFrame
"""
url = "http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"type": "GJZB",
"sty": "HKZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "7",
"stat": "8",
'pageNo': '1',
'pageNum': '1',
"_": "1625474966006",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"时间",
"前值",
"现值",
"发布日期",
]
temp_df["前值"] = pd.to_numeric(temp_df["前值"])
temp_df["现值"] = pd.to_numeric(temp_df["现值"])
return temp_df
# GDP 月率
def macro_canada_gdp_monthly():
"""
东方财富-经济数据-加拿大-GDP 月率
http://data.eastmoney.com/cjsj/foreign_7_9.html
:return: GDP 月率
:rtype: pandas.DataFrame
"""
url = "http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx"
params = {
"type": "GJZB",
"sty": "HKZB",
"js": "({data:[(x)],pages:(pc)})",
"p": "1",
"ps": "2000",
"mkt": "7",
"stat": "9",
'pageNo': '1',
'pageNum': '1',
"_": "1625474966006",
}
r = requests.get(url, params=params)
data_text = r.text
data_json = demjson.decode(data_text[1:-1])
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]])
temp_df.columns = [
"时间",
"前值",
"现值",
"发布日期",
]
temp_df["前值"] = pd.to_numeric(temp_df["前值"])
temp_df["现值"] = pd.to_numeric(temp_df["现值"])
return temp_df
if __name__ == '__main__':
macro_canada_new_house_rate_df = macro_canada_new_house_rate()
print(macro_canada_new_house_rate_df)
macro_canada_unemployment_rate_df = macro_canada_unemployment_rate()
print(macro_canada_unemployment_rate_df)
macro_canada_trade_df = macro_canada_trade()
print(macro_canada_trade_df)
macro_canada_retail_rate_monthly_df = macro_canada_retail_rate_monthly()
print(macro_canada_retail_rate_monthly_df)
macro_canada_bank_rate_df = macro_canada_bank_rate()
print(macro_canada_bank_rate_df)
macro_canada_core_cpi_yearly_df = macro_canada_core_cpi_yearly()
print(macro_canada_core_cpi_yearly_df)
macro_canada_core_cpi_monthly_df = macro_canada_core_cpi_monthly()
print(macro_canada_core_cpi_monthly_df)
macro_canada_cpi_yearly_df = macro_canada_cpi_yearly()
print(macro_canada_cpi_yearly_df)
macro_canada_cpi_monthly_df = macro_canada_cpi_monthly()
print(macro_canada_cpi_monthly_df)
macro_canada_gdp_monthly_df = macro_canada_gdp_monthly()
print(macro_canada_gdp_monthly_df)
```
#### File: akshare/futures_derivative/nh_index_return.py
```python
import time
import requests
import pandas as pd
def num_to_str_data(str_date: int) -> str:
"""
num to str format
:param str_date: time of int format
:type str_date: int
:return: format time
:rtype: str
"""
str_date = str_date / 1000
str_date = time.localtime(str_date)
strp_time = time.strftime("%Y-%m-%d %H:%M:%S", str_date)
return strp_time
def get_nh_list_table() -> pd.DataFrame:
"""
南华期货-南华指数所有品种一览表
:return: 所有品种一览表
:rtype: pandas.DataFrame
"""
url_name = "http://www.nanhua.net/ianalysis/plate-variety.json"
res = requests.get(url_name)
futures_name = [item["name"] for item in res.json()]
futures_code = [item["code"] for item in res.json()]
futures_exchange = [item["exchange"] for item in res.json()]
futures_first_day = [item["firstday"] for item in res.json()]
futures_index_cat = [item["indexcategory"] for item in res.json()]
futures_df = pd.DataFrame(
[
futures_code,
futures_exchange,
futures_first_day,
futures_index_cat,
futures_name,
]
).T
futures_df.columns = ["code", "exchange", "start_date", "category", "name"]
return futures_df
def nh_return_index(code: str = "Y") -> pd.DataFrame:
"""
南华期货-南华指数单品种所有历史数据
:param code: str 通过 get_nh_list 提供
:return: pandas.Series
"""
if code in get_nh_list_table()["code"].tolist():
t = time.time()
base_url = f"http://www.nanhua.net/ianalysis/varietyindex/index/{code}.json?t={int(round(t * 1000))}"
r = requests.get(base_url)
date = [num_to_str_data(item[0]).split(" ")[0] for item in r.json()]
data = [item[1] for item in r.json()]
df_all = pd.DataFrame([date, data]).T
df_all.columns = ["date", "value"]
df_all.index = pd.to_datetime(df_all["date"])
del df_all["date"]
return df_all
if __name__ == "__main__":
nh_return_index_df = nh_return_index()
print(nh_return_index_df)
```
#### File: akshare/futures/futures_inventory.py
```python
import os
import pandas as pd
import requests
from bs4 import BeautifulSoup
from akshare.futures.cons import (
qh_headers,
sample_headers,
inventory_temp_headers,
)
def futures_inventory_99(
exchange: int = 3, symbol: int = 11, plot: bool = False
) -> pd.DataFrame:
"""
调用此函数, 请调用 try except
# 交易所代码
{'1': '上海期货交易所', '2': '郑州商品交易所', '3': '大连商品交易所', '7': 'LME', '8': 'NYMEX', '9': 'CBOT', '11': 'NYBOT', '12': 'TOCOM', '14': '上海国际能源交易中心', '15': 'OSE'}
# 交易所对应合约代码
'上海期货交易所': {'6': '铜', '7': '铝', '8': '橡胶', '21': '燃料油', '54': '锌', '58': '黄金', '59': '螺纹钢', '62': '线材', '64': '铅', '69': '白银', '78': '石油沥青', '85': '热轧卷板', '93': '锡', '94': '镍', '103': '纸浆', '109': '不锈钢'},
'郑州商品交易所': {'9': '强麦', '10': '硬麦', '23': '一号棉', '51': '白糖', '53': 'PTA', '55': '菜籽油', '60': '早籼稻', '66': '甲醇',
'67': '普麦', '72': '玻璃', '73': '油菜籽', '74': '菜籽粕', '81': '粳稻', '88': '晚籼稻',
'90': '硅铁', '91': '锰硅', '99': '棉纱', '100': '苹果', '105': '红枣', '106': '尿素', '111': '纯碱'},
'大连商品交易所': {'11': '豆一', '12': '豆二', '16': '豆粕', '24': '玉米', '52': '豆油', '56': '聚乙烯',
'57': '棕榈油', '61': '聚氯乙烯', '65': '焦炭', '75': '焦煤', '79': '铁矿石', '80': '鸡蛋',
'82': '中密度纤维板', '83': '细木工板', '84': '聚丙烯', '92': '玉米淀粉', '104': '乙二醇', '108': '粳米', '110': '苯乙烯', '112': '纤维板', '113': '液化石油气'},
'上海国际能源交易中心': {'102': '原油', '107': '20号胶', '114': '低硫燃料油'}}
'LME': {'18': 'LME铜', '19': 'LME铝', '25': 'LME镍', '26': 'LME铅', '27': 'LME锌', '45': 'LME锡', '50': 'LME铝合金'},
'NYMEX': {'20': 'COMEX铜', '31': 'COMEX金', '32': 'COMEX银'},
'CBOT': {'22': 'CBOT大豆', '46': 'CBOT小麦', '47': 'CBOT玉米', '48': 'CBOT燕麦', '49': 'CBOT糙米'},
'NYBOT': {'30': 'NYBOT2号棉'}
'TOCOM': {'44': 'TOCOM橡胶'}
'OSE': {'44': 'OSE橡胶'}
:param exchange: int 交易所, 请对照 __doc__ 中的代码输入
:param symbol: int 品种, 请对照 __doc__ 中的代码输入对应交易所的品种
# :param dir_path: str 存放图片的目录
:param plot: Bool 画出历史库存曲线图
:return: pandas.DataFrame and picture
"""
data_code = {
"1": [
"6",
"7",
"8",
"21",
"54",
"58",
"59",
"62",
"64",
"69",
"78",
"85",
"93",
"94",
"103",
"109",
],
"2": [
"9",
"10",
"23",
"51",
"53",
"55",
"60",
"66",
"67",
"72",
"73",
"74",
"81",
"88",
"90",
"91",
"99",
"100",
"105",
"106",
"111",
],
"3": [
"11",
"12",
"16",
"24",
"52",
"56",
"57",
"61",
"65",
"75",
"79",
"80",
"82",
"83",
"84",
"92",
"104",
'108',
'110',
'112',
'113',
],
"7": ["18", "19", "25", "26", "27", "45", "50"],
"8": ["20", "31", "32"],
"9": ["22", "46", "47", "48", "49"],
"11": ["30"],
"12": ["44"],
"14": ["102",
'107',
'114'],
"15": ["6"],
}
data_name = {
"1": [
"铜",
"铝",
"橡胶",
"燃料油",
"锌",
"黄金",
"螺纹钢",
"线材",
"铅",
"白银",
"石油沥青",
"热轧卷板",
"锡",
"镍",
"纸浆",
"不锈钢",
],
"2": [
"强麦",
"硬麦",
"一号棉",
"白糖",
"PTA",
"菜籽油",
"早籼稻",
"甲醇",
"普麦",
"玻璃",
"油菜籽",
"菜籽粕",
"粳稻",
"晚籼稻",
"硅铁",
"锰硅",
"棉纱",
"苹果",
"红枣",
"尿素",
"纯碱",
],
"3": [
"豆一",
"豆二",
"豆粕",
"玉米",
"豆油",
"聚乙烯",
"棕榈油",
"聚氯乙烯",
"焦炭",
"焦煤",
"铁矿石",
"鸡蛋",
"中密度纤维板",
"细木工板",
"聚丙烯",
"玉米淀粉",
"乙二醇",
'粳米',
'苯乙烯',
'纤维板',
'液化石油气',
],
"7": ["LME铜", "LME铝", "LME镍", "LME铅", "LME锌", "LME锡", "LME铝合金"],
"8": ["COMEX铜", "COMEX金", "COMEX银"],
"9": ["CBOT大豆", "CBOT小麦", "CBOT玉米", "CBOT燕麦", "CBOT糙米"],
"11": ["NYBOT2号棉"],
"12": ["TOCOM橡胶"],
"14": ["原油", '20号胶', '低硫燃料油'],
"15": ["OSE橡胶"],
}
out_exchange_name = {
"1": "上海期货交易所",
"2": "郑州商品交易所",
"3": "大连商品交易所",
"7": "LME",
"8": "NYMEX",
"9": "CBOT",
"11": "NYBOT",
"12": "TOCOM",
"14": "上海国际能源交易中心",
"15": "OSE",
}
name_temp_dict = {}
code_temp_dict = {}
for num in data_code.keys():
name_temp_dict[out_exchange_name[num]] = dict(
zip(data_code[num], data_name[num])
)
code_temp_dict[num] = dict(zip(data_code[num], data_name[num]))
while True:
try:
if exchange != 1:
url = "http://service.99qh.com/Storage/Storage.aspx"
params = {
"page": "99qh"
}
res = requests.get(url, params=params, headers=sample_headers)
soup = BeautifulSoup(res.text, "lxml")
view_state = soup.find_all(attrs={"id": "__VIEWSTATE"})[0]["value"]
even_validation = soup.find_all(attrs={"id": "__EVENTVALIDATION"})[0][
"value"
]
# print(symbol)
payload = {
"__EVENTTARGET": "ddlExchName",
"__EVENTARGUMENT": "",
"__LASTFOCUS": "",
"__VIEWSTATE": view_state,
"__VIEWSTATEGENERATOR": "6EAC22FA",
"__EVENTVALIDATION": even_validation,
"ddlExchName": int(exchange),
"ddlGoodsName": 6,
}
res = requests.post(url, data=payload, headers=qh_headers)
soup = BeautifulSoup(res.text, "lxml")
exchange_name = (
soup.find_all("select")[0]
.find_all(attrs={"selected": "selected"})[0]
.get_text()
)
# print("切换后", exchange_name)
view_state = soup.find_all(attrs={"id": "__VIEWSTATE"})[0]["value"]
even_validation = soup.find_all(attrs={"id": "__EVENTVALIDATION"})[0][
"value"
]
payload = {
"__EVENTTARGET": "ddlGoodsName",
"__EVENTARGUMENT": "",
"__LASTFOCUS": "",
"__VIEWSTATE": view_state,
"__VIEWSTATEGENERATOR": "6EAC22FA",
"__EVENTVALIDATION": even_validation,
"ddlExchName": int(exchange),
"ddlGoodsName": int(symbol),
}
res = requests.post(url, data=payload, headers=qh_headers)
soup = BeautifulSoup(res.text, "lxml")
small_code = (
soup.find_all(attrs={"id": "chartData"})[0]["src"]
.split("&")[-2]
.split("=")[1]
)
# print(small_code)
payload = {
"__EVENTTARGET": "btnZoomAll",
"__EVENTARGUMENT": "",
"__LASTFOCUS": "",
"__VIEWSTATE": view_state,
"__VIEWSTATEGENERATOR": "6EAC22FA",
"__EVENTVALIDATION": even_validation,
"ddlExchName": int(exchange),
"ddlGoodsName": int(symbol),
}
res = requests.post(url, data=payload, headers=qh_headers)
soup = BeautifulSoup(res.text, "lxml")
inventory_table = pd.read_html(res.text)[-1].T
inventory_table.columns = inventory_table.iloc[0, :]
inventory_table = inventory_table.iloc[1:, :]
inventory_table.reset_index(inplace=True, drop=True)
inventory_table.columns.name = None
params = {
"ChartDirectorChartImage": "chart_chartData",
"cacheId": soup.find_all(attrs={"id": "chartData"})[0]["src"]
.split("&")[-2]
.split("=")[1],
"page": "99qh",
}
res = requests.get(
"http://service.99qh.com/Storage/Storage.aspx",
params=params,
headers=inventory_temp_headers,
)
if plot:
with open(
"{}_{}.jpg".format(
exchange_name, code_temp_dict[str(exchange)][str(symbol)]
),
"wb",
) as fs:
print("保存图片到本地: {}".format(os.getcwd()))
fs.write(res.content)
return inventory_table
else:
url = "http://service.99qh.com/Storage/Storage.aspx"
params = {
"page": "99qh"
}
res = requests.get(url, params=params, headers=sample_headers)
soup = BeautifulSoup(res.text, "lxml")
view_state = soup.find_all(attrs={"id": "__VIEWSTATE"})[0]["value"]
even_validation = soup.find_all(attrs={"id": "__EVENTVALIDATION"})[0][
"value"
]
url = "http://service.99qh.com/Storage/Storage.aspx"
params = {
"page": "99qh"
}
res = requests.get(url, params=params, headers=sample_headers)
soup = BeautifulSoup(res.text, "lxml")
view_state = soup.find_all(attrs={"id": "__VIEWSTATE"})[0]["value"]
even_validation = soup.find_all(attrs={"id": "__EVENTVALIDATION"})[0][
"value"
]
# print(symbol)
payload = {
"__EVENTTARGET": "btnZoomAll",
"__EVENTARGUMENT": "",
"__LASTFOCUS": "",
"__VIEWSTATE": view_state,
"__VIEWSTATEGENERATOR": "6EAC22FA",
"__EVENTVALIDATION": even_validation,
"ddlExchName": int(exchange),
"ddlGoodsName": int(symbol),
}
res = requests.post(url, data=payload, headers=qh_headers)
inventory_table = pd.read_html(res.text)[-1].T
inventory_table.columns = inventory_table.iloc[0, :]
inventory_table = inventory_table.iloc[1:, :]
inventory_table.reset_index(inplace=True, drop=True)
inventory_table.columns.name = None
soup = BeautifulSoup(res.text, "lxml")
exchange_name = (
soup.find_all("select")[0]
.find_all(attrs={"selected": "selected"})[0]
.get_text()
)
params = {
"ChartDirectorChartImage": "chart_chartData",
"cacheId": soup.find_all(attrs={"id": "chartData"})[0]["src"]
.split("&")[-2]
.split("=")[1],
"page": "99qh",
}
res = requests.get(
"http://service.99qh.com/Storage/Storage.aspx",
params=params,
headers=inventory_temp_headers,
)
if plot:
with open(
"{}_{}.jpg".format(
exchange_name, code_temp_dict[str(exchange)][str(symbol)]
),
"wb",
) as fs:
print("保存图片到本地: {}".format(os.getcwd()))
fs.write(res.content)
return inventory_table
except:
continue
if __name__ == "__main__":
futures_inventory_99_df = futures_inventory_99(exchange=1, symbol=6, plot=False)
print(futures_inventory_99_df)
```
#### File: akshare/stock_fundamental/stock_finance_hk.py
```python
import pandas as pd
import requests
def stock_financial_hk_report_em(
stock: str = "00700", symbol: str = "现金流量表", indicator: str = "年度"
) -> pd.DataFrame:
"""
东方财富-港股-财务报表-三大报表
https://emweb.securities.eastmoney.com/PC_HKF10/FinancialAnalysis/index?type=web&code=00700
:param stock: 股票代码
:type stock: str
:param symbol: choice of {"资产负债表", "利润表", "现金流量表"}
:type symbol:
:param indicator: choice of {"年度", "报告期"}
:type indicator:
:return: 东方财富-港股-财务报表-三大报表
:rtype: pandas.DataFrame
"""
if indicator == "年度":
rtype = 6
elif indicator == "报告期":
rtype = 0
else:
raise Exception("非法的关键字!", indicator)
if symbol == "资产负债表":
url = f"https://emweb.securities.eastmoney.com/PC_HKF10/NewFinancialAnalysis/GetZCFZB?code={stock}&startdate=&ctype=4&rtype={rtype}" # 资产负债表
elif symbol == "利润表":
url = f"https://emweb.securities.eastmoney.com/PC_HKF10/NewFinancialAnalysis/GetLRB?code={stock}&startdate=&ctype=4&rtype={rtype}" # 利润表
elif symbol == "现金流量表":
url = f"https://emweb.securities.eastmoney.com/PC_HKF10/NewFinancialAnalysis/GetXJLLB?code={stock}&startdate=&rtype={rtype}" # 现金流量表
r = requests.get(url)
temp_df = pd.DataFrame(eval(r.text)["data"])
temp_df.columns = temp_df.loc[0]
temp_df = temp_df.drop(0, axis=0)
temp_df['截止日期'] = pd.to_datetime(temp_df["截止日期"], format="%y-%m-%d").dt.date
temp_df.reset_index(drop=True, inplace=True)
temp_df.columns.name = None
return temp_df
def stock_financial_hk_analysis_indicator_em(
stock: str = "00700", indicator: str = "年度"
) -> pd.DataFrame:
"""
东方财富-港股-财务分析-主要指标
https://emweb.securities.eastmoney.com/PC_HKF10/NewFinancialAnalysis/index?type=web&code=00700
:param stock: 股票代码
:type stock: str
:param indicator: choice of {"年度", "报告期"}
:type indicator: str
:return: 新浪财经-港股-财务分析-主要指标
:rtype: pandas.DataFrame
"""
if indicator == "年度":
key = "zyzb_an"
elif indicator == "报告期":
key = "zyzb_abgq"
else:
raise Exception("非法的关键字!", indicator)
url = f"http://emweb.securities.eastmoney.com/PC_HKF10/NewFinancialAnalysis/GetZYZB?code={stock}"
r = requests.get(url)
temp_df = pd.DataFrame.from_records(eval(r.text)["data"][key])
temp_df.columns = temp_df.loc[0]
temp_df = temp_df.drop(0, axis=0)
temp_df["周期"] = pd.to_datetime(temp_df["每股指标"], format="%y-%m-%d").dt.date
temp_df = temp_df.drop("每股指标", axis=1)
temp_df = temp_df[
[
"周期",
"基本每股收益(元)",
"稀释每股收益(元)",
"TTM每股收益(元)",
"每股净资产(元)",
"每股经营现金流(元)",
"每股营业收入(元)",
"成长能力指标",
"营业总收入(元)",
"毛利润",
"归母净利润",
"营业总收入同比增长(%)",
"毛利润同比增长(%)",
"归母净利润同比增长(%)",
"营业总收入滚动环比增长(%)",
"毛利润滚动环比增长(%)",
"归母净利润滚动环比增长(%)",
"盈利能力指标",
"平均净资产收益率(%)",
"年化净资产收益率(%)",
"总资产净利率(%)",
"毛利率(%)",
"净利率(%)",
"年化投资回报率(%)",
"盈利质量指标",
"所得税/利润总额(%)",
"经营现金流/营业收入(%)",
"财务风险指标",
"资产负债率(%)",
"流动负债/总负债(%)",
"流动比率",
]
]
temp_df.reset_index(drop=True, inplace=True)
temp_df.columns.name = None
temp_df['周期'] = pd.to_datetime(temp_df['周期']).dt.date
return temp_df
if __name__ == "__main__":
stock_financial_hk_analysis_indicator_em_df = (
stock_financial_hk_analysis_indicator_em(stock="00700", indicator="年度")
)
print(stock_financial_hk_analysis_indicator_em_df)
stock_financial_hk_analysis_indicator_em_df = (
stock_financial_hk_analysis_indicator_em(stock="00700", indicator="报告期")
)
print(stock_financial_hk_analysis_indicator_em_df)
stock_financial_hk_report_em_df = stock_financial_hk_report_em(
stock="00700", symbol="资产负债表", indicator="年度"
)
print(stock_financial_hk_report_em_df)
stock_financial_hk_report_em_df = stock_financial_hk_report_em(
stock="00700", symbol="资产负债表", indicator="报告期"
)
print(stock_financial_hk_report_em_df)
stock_financial_hk_report_em_df = stock_financial_hk_report_em(
stock="00700", symbol="利润表", indicator="年度"
)
print(stock_financial_hk_report_em_df)
stock_financial_hk_report_em_df = stock_financial_hk_report_em(
stock="00700", symbol="利润表", indicator="报告期"
)
print(stock_financial_hk_report_em_df)
stock_financial_hk_report_em_df = stock_financial_hk_report_em(
stock="00700", symbol="现金流量表", indicator="年度"
)
print(stock_financial_hk_report_em_df)
stock_financial_hk_report_em_df = stock_financial_hk_report_em(
stock="00700", symbol="现金流量表", indicator="报告期"
)
print(stock_financial_hk_report_em_df)
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.