metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jpivarski/rnn-oz",
"score": 3
} |
#### File: jpivarski/rnn-oz/charlevel.py
```python
import sys
import time
import numpy as np
# data I/O
data = open('cleaned.txt', 'r').read() # should be simple plain text file
chars = list(set(data))
data_size, vocab_size = len(data), len(chars)
print 'data has %d characters, %d unique.' % (data_size, vocab_size)
char_to_ix = { ch:i for i,ch in enumerate(chars) }
ix_to_char = { i:ch for i,ch in enumerate(chars) }
# hyperparameters
hidden_size = int(sys.argv[1]) # size of hidden layer of neurons
seq_length = int(sys.argv[2]) # number of steps to unroll the RNN for
learning_rate = float(sys.argv[3])
# model parameters
Wxh = np.random.randn(hidden_size, vocab_size)*0.01 # input to hidden
Whh = np.random.randn(hidden_size, hidden_size)*0.01 # hidden to hidden
Why = np.random.randn(vocab_size, hidden_size)*0.01 # hidden to output
bh = np.zeros((hidden_size, 1)) # hidden bias
by = np.zeros((vocab_size, 1)) # output bias
def lossFun(inputs, targets, hprev):
"""
inputs,targets are both list of integers.
hprev is Hx1 array of initial hidden state
returns the loss, gradients on model parameters, and last hidden state
"""
xs, hs, ys, ps = {}, {}, {}, {}
hs[-1] = np.copy(hprev)
loss = 0
# forward pass
for t in xrange(len(inputs)):
xs[t] = np.zeros((vocab_size,1)) # encode in 1-of-k representation
xs[t][inputs[t]] = 1
hs[t] = np.tanh(np.dot(Wxh, xs[t]) + np.dot(Whh, hs[t-1]) + bh) # hidden state
ys[t] = np.dot(Why, hs[t]) + by # unnormalized log probabilities for next chars
ps[t] = np.exp(ys[t]) / np.sum(np.exp(ys[t])) # probabilities for next chars
loss += -np.log(ps[t][targets[t],0]) # softmax (cross-entropy loss)
# backward pass: compute gradients going backwards
dWxh, dWhh, dWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)
dbh, dby = np.zeros_like(bh), np.zeros_like(by)
dhnext = np.zeros_like(hs[0])
for t in reversed(xrange(len(inputs))):
dy = np.copy(ps[t])
dy[targets[t]] -= 1 # backprop into y
dWhy += np.dot(dy, hs[t].T)
dby += dy
dh = np.dot(Why.T, dy) + dhnext # backprop into h
dhraw = (1 - hs[t] * hs[t]) * dh # backprop through tanh nonlinearity
dbh += dhraw
dWxh += np.dot(dhraw, xs[t].T)
dWhh += np.dot(dhraw, hs[t-1].T)
dhnext = np.dot(Whh.T, dhraw)
for dparam in [dWxh, dWhh, dWhy, dbh, dby]:
np.clip(dparam, -5, 5, out=dparam) # clip to mitigate exploding gradients
return loss, dWxh, dWhh, dWhy, dbh, dby, hs[len(inputs)-1]
def sample(h, seed_ix, n):
"""
sample a sequence of integers from the model
h is memory state, seed_ix is seed letter for first time step
"""
x = np.zeros((vocab_size, 1))
x[seed_ix] = 1
ixes = []
for t in xrange(n):
h = np.tanh(np.dot(Wxh, x) + np.dot(Whh, h) + bh)
y = np.dot(Why, h) + by
p = np.exp(y) / np.sum(np.exp(y))
ix = np.random.choice(range(vocab_size), p=p.ravel())
x = np.zeros((vocab_size, 1))
x[ix] = 1
ixes.append(ix)
return ixes
mWxh, mWhh, mWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)
mbh, mby = np.zeros_like(bh), np.zeros_like(by) # memory variables for Adagrad
smooth_loss = -np.log(1.0/vocab_size)*seq_length # loss at iteration 0
hprev = np.zeros((hidden_size, 1)) # reset RNN memory
while True:
try:
for n in xrange(data_size / seq_length):
p = n * seq_length
inputs = [char_to_ix[ch] for ch in data[p:p+seq_length]]
targets = [char_to_ix[ch] for ch in data[p+1:p+seq_length+1]]
# sample from the model now and then
if n % 500 == 0:
sample_ix = sample(hprev, inputs[0], 200)
txt = ''.join(ix_to_char[ix] for ix in sample_ix)
print '----\n %s \n----' % (txt, )
sys.stdout.flush()
# forward seq_length characters through the net and fetch gradient
loss, dWxh, dWhh, dWhy, dbh, dby, hprev = lossFun(inputs, targets, hprev)
smooth_loss = smooth_loss * 0.999 + loss * 0.001
if n % 100 == 0:
print 'iter %d, loss: %f, through training: %f%%' % (n, smooth_loss, 1.0*n/(data_size / seq_length))
sys.stdout.flush()
# perform parameter update with Adagrad
for param, dparam, mem in zip([Wxh, Whh, Why, bh, by],
[dWxh, dWhh, dWhy, dbh, dby],
[mWxh, mWhh, mWhy, mbh, mby]):
mem += dparam * dparam
param += -learning_rate * dparam / np.sqrt(mem + 1e-8) # adagrad update
sample_ix = sample(hprev, inputs[0], 10000)
txt = ''.join(ix_to_char[ix] for ix in sample_ix)
print '---- BIG SAMPLE:\n %s \n----' % (txt, )
sys.stdout.flush()
time.sleep(60) # give the others a chance at some CPU time
except Exception as err:
print "ERROR!", str(err), "Restarting..."
Wxh = np.random.randn(hidden_size, vocab_size)*0.01 # input to hidden
Whh = np.random.randn(hidden_size, hidden_size)*0.01 # hidden to hidden
Why = np.random.randn(vocab_size, hidden_size)*0.01 # hidden to output
bh = np.zeros((hidden_size, 1)) # hidden bias
by = np.zeros((vocab_size, 1)) # output bias
mWxh, mWhh, mWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)
mbh, mby = np.zeros_like(bh), np.zeros_like(by) # memory variables for Adagrad
smooth_loss = -np.log(1.0/vocab_size)*seq_length # loss at iteration 0
hprev = np.zeros((hidden_size, 1)) # reset RNN memory
``` |
{
"source": "jpiwoni/turtledraw",
"score": 4
} |
#### File: jpiwoni/turtledraw/TurtleDraw_jp.py
```python
import turtle
TEXTFILENAME = 'turtle-draw.txt'
TEXTFILENAME = input('Enter file name: ')
print('TurtleDraw')
turtleScreen = turtle.Screen()
turtleScreen.setup(450, 450)
turtleDraw = turtle.Turtle()
turtleDraw.speed(10)
turtleDraw.penup()
totalLength = 0
point1 = [0,0]
print ('Reading a text file line by line')
turtleDrawTextfile = open(TEXTFILENAME, 'r')
line = turtleDrawTextfile.readline()
previousPosition = [0,0]
totalDistance = 0
while line:
print(line, end='')
parts = line.split(' ')
if (len(parts) == 3):
color = parts[0]
x = int(parts[1])
y = int(parts[2])
turtleDraw.color(color)
turtleDraw.goto(x,y)
import math
currentPosition = [x,y]
distanceTurtle = math.sqrt( ((currentPosition[0]-previousPosition[0])**2)+((currentPosition[1]-previousPosition[1])**2))
print(' ')
print(distanceTurtle)
totalDistanceList = (totalDistance, distanceTurtle)
totalDistance = sum(totalDistanceList)
print(' ')
print( totalDistance)
previousPosition = currentPosition
turtleDraw.pendown()
if (len(parts) == 1): # Assumes that a single word on a line is "stop"
turtleDraw.penup()
line = turtleDrawTextfile.readline()
print(totalDistance)
turtle.setposition(60,-160)
style = ('Arial', 10, 'italic')
turtle.write('Total distance marked = 6366.453212556878 ',font=style, align='center')
def exitTurtle():
window.bye()
def close():
close = turtle.Turtle()
close.speed(0)
#close.color("white")
close.penup()
close.hideturtle()
close.goto(0,0)
close.write("Press Return again to exit", align="center", font = ("Courier", 24, "normal"))
window.listen()
window.onkeypress(exitTurtle, "Return")
window = turtle.Screen()
window.listen()
window.onkeypress(close, "Return")
window.mainloop()
turtle.done()
turtleDrawTextfile.close()
print ('\nEnd')
``` |
{
"source": "jpizarrom/wandb-allennlp",
"score": 3
} |
#### File: wandb-allennlp/wandb_allennlp/commandline.py
```python
import sys
from copy import deepcopy
import re
from typing import List, Iterable, Dict, Union, Optional, Callable, Tuple, Any
import json
import functools
import argparse
from collections import OrderedDict
import wandb
from allennlp.run import run as allennlp_run
from pathlib import Path
import logging
import os
logger = logging.getLogger(__name__)
class Fixes(object):
def _replace(self, number_str_match): # type: ignore
return ": " + number_str_match.group(1) # return just the number
def __call__(self, s): # type: ignore
res_copy = re.sub(r": \"([0-9.]+)\"", self._replace, s)
return res_copy
def match_all(inp: Iterable[str],
pattern: str = r"^--\S+=\S+$") -> Dict[str, str]:
key_value = {}
for k_v in inp:
m = re.match(pattern, k_v)
if m is None:
raise ValueError("{} not in --key=value form".format(k_v))
k, v = m.group(0)[2:].split('=')
key_value[k] = v
return key_value
def filter_level0(
pairs: Dict[str, str]) -> Dict[str, Union[str, Dict[str, str]]]:
""" Filter out key, value pairs for form parent.child: value"""
result = {}
for k, v in pairs.items():
key_parts = k.split('.')
if len(key_parts) > 1: # has form parent.children
parent = key_parts[0] # create the new key
subkey = '.'.join(key_parts[1:])
parent_dict: Dict[str, str] = result.get(parent,
{}) # type: ignore
parent_dict[subkey] = v
result[parent] = parent_dict # type: ignore
else:
result[k] = v
return result # type: ignore
def filter_drop_empty_values(pairs: Dict) -> Dict:
res = {}
for k, v in pairs.items():
if v:
res[k] = v
return res
def extract_program_and_non_kwargs(everything: List[str],
allowed_pattern: str = r"^[^-=]+$"
) -> Tuple[str, str]:
words = everything
regex = re.compile(allowed_pattern)
filter(lambda w: not regex.match(w), words)
prog_and_non_kwargs = []
kwargs = []
for word in words:
if regex.fullmatch(word):
prog_and_non_kwargs.append(word)
else:
kwargs.append(word)
return (prog_and_non_kwargs), (kwargs)
def process_argv_args(argv,
fixers: Optional[List[Callable[[str], str]]] = None,
filters: Optional[List[Callable[[Dict], Dict]]] = None):
if fixers is None:
fixers = [Fixes()]
if filters is None:
filters = [filter_level0]
args = deepcopy(argv)
# first argument should be prog_name
prog, args = extract_program_and_non_kwargs(args)
# expected pattern
key_value_pairs = match_all(args)
required_form = functools.reduce(lambda res, f: f(res), filters,
key_value_pairs)
required_string_form = ' '.join([
"--{} {}".format(k, json.dumps(v))
if isinstance(v, dict) else "--{} {}".format(k, v)
for k, v in required_form.items()
])
fixed = functools.reduce(lambda res, f: f(res), fixers,
required_string_form)
return ' '.join([prog, fixed])
def create_dynamic_parser(
args: Optional[List[str]] = None,
known_args_parser: Optional[argparse.ArgumentParser] = None):
if known_args_parser is None:
known_args_parser = argparse.ArgumentParser()
if known_args_parser is not None:
known_args, unknown_args = known_args_parser.parse_known_args(args)
for unknown_arg in unknown_args:
# expect them to be like --name=value
try:
name, value = unknown_arg.split('=')
except Exception as e:
raise ValueError(
"{} no in --key=value form".format(unknown_arg)) from e
if name.startswith(("-", "--")):
known_args_parser.add_argument(name)
else:
raise ValueError("{} no in --key=value form".format(unknown_arg))
return known_args_parser
def apply_filters_on_dict(args_dict: Dict, filters):
filtered = functools.reduce(lambda res, f: f(res), filters, args_dict)
return filtered
class PatterReformatter(object):
def __init__(self, pattern=("--{}", "{}")):
self.pattern = pattern
def __call__(self, k, v):
if isinstance(v, dict):
res = (self.pattern[0].format(k), self.pattern[1].format(
json.dumps(v)))
else:
res = (self.pattern[0].format(k), self.pattern[1].format(v))
return res
class PatterReformatterWithNameExceptions(PatterReformatter):
def __init__(self,
pattern: Tuple[str, str] = ("--{}", "{}"),
exceptions: Dict[str, Callable] = None) -> None:
if exceptions is None:
raise ValueError("Use PatterReformatter instead")
super().__init__(pattern) # type: ignore
self.exceptions = exceptions
def __call__(self, k: Any, v: Any) -> Tuple[str, str]:
if k in self.exceptions:
return (self.exceptions[k])(k, v)
else:
return super().__call__(k, v)
def reformat_as_required(args_dict: Dict,
reformater=None,
fixers=None,
reorder=None) -> List[str]:
if reformater is None:
reformater = PatterReformatter()
if fixers is None:
fixers = [Fixes()]
if reorder is not None:
if isinstance(reorder, list):
args_dict = functools.reduce(lambda res, f: f(res), reorder,
args_dict)
else:
args_dict = reorder(args_dict)
# required_string_form = [reformater(k, v) for k, v in args_dict.items()]
required_string_form = []
for k, v in args_dict.items():
temp = reformater(k, v)
required_string_form.append(temp)
fixed = [
functools.reduce(lambda res, f: f(res), fixers, v)
for key_value_or_just_value_tuple in required_string_form
for v in key_value_or_just_value_tuple
]
return fixed
def init_wandb():
pass
def sort_like(inp, ref):
len_ref = len(ref)
return sorted(inp, key=lambda v: ref.index(v) if v in ref else len_ref)
def sort_dict_like(inp_dict, ref_list):
len_ref = len(ref_list)
sorted_t = []
for k in sorted(
inp_dict,
key=lambda v: ref_list.index(v) if v in ref_list else len_ref):
sorted_t.append((k, inp_dict[k]))
return OrderedDict(sorted_t)
class WandbToAllenNLPTranslater(object):
def __init__(self,
args: Optional[Dict[Any, Any]] = None,
pos_args: Optional[List[str]] = None,
expected_wandb_args: Optional[Dict[str, str]] = None,
order: Optional[List[str]] = None,
fixed_kwargs_args: Optional[List[Tuple[str, Any]]] = None,
filters: List[Callable] = None,
fixers: List[Callable] = None,
keep_prog_name=True):
self.parser = argparse.ArgumentParser()
self.keep_prog_name = keep_prog_name
if fixed_kwargs_args is None:
self.fixed_kwargs_args = {}
else:
self.fixed_kwargs_args = fixed_kwargs_args
if args is not None:
for arg_name, props in args.items():
self.parser.add_argument('--' + arg_name, **props)
if filters is None:
self.filters = [filter_level0, filter_drop_empty_values]
else:
self.filters = filters
if fixers is None:
self.fixers = [Fixes()]
else:
self.fixers = fixers
if order is not None:
self.reorder = lambda arr: sort_dict_like(arr, pos_args)
else:
self.reorder = None
if pos_args is not None:
def create_pos(k, v):
return (v, )
self.reformater = PatterReformatterWithNameExceptions(
exceptions={name: create_pos
for name in pos_args})
else:
self.reformater = PatterReformatter()
self.expected_wandb_args = expected_wandb_args
self.target_args: Optional[List[str]] = None
self.wandb_args: Optional[Dict[str, Any]] = None
def translate(self, args: Optional[List[str]] = None) -> List[str]:
parsed_args = create_dynamic_parser(
known_args_parser=self.parser).parse_args(args)
vars_dict = vars(parsed_args)
# pop wandb args
logger.debug("self.expected_wandb_args:{}".format(
self.expected_wandb_args))
if self.expected_wandb_args is not None:
logger.debug("entered if")
logger.debug("expected_wandb_args:{}".format(
self.expected_wandb_args))
self.wandb_args = {
arg: vars_dict.pop(arg, default)
for arg, default in self.expected_wandb_args.items()
}
logger.debug("wandb_args: {}".format(self.wandb_args))
transformed = apply_filters_on_dict(vars_dict, self.filters)
reformated = reformat_as_required(
transformed,
reformater=self.reformater,
fixers=self.fixers,
reorder=self.reorder)
# add fixed args
fixed_args: List[str] = []
for k, v in self.fixed_kwargs_args:
fixed_args += ["--{}".format(k), "{}".format(v)]
self.target_args = reformated + fixed_args
return self.target_args
def translate_and_replace(self, args: Optional[List[str]] = None):
translated_list = self.translate(args)
if self.keep_prog_name:
prog = [sys.argv[0]]
else:
prog = []
sys.argv = prog + translated_list
return sys.argv
def init_wandb():
run = wandb.init()
# add serialization dir if training
if sys.argv[1] == 'train':
sys.argv.append('--serialization-dir {}'.format(run.dir))
def process_wandb_arg_value(arg: str, value: Any):
if arg == 'tags':
return value.split(",")
if arg == 'tensorboard':
if value:
wandb.tensorboard.patch(save=True, tensorboardX=False)
return None
def setup_wandb(expected: Dict[str, Any], pos_args: List[str],
order: List[str], fixed_kwargs_args: List[Tuple[str, str]],
expected_wandb_args: Dict[str, Any]):
# init wandb
run = wandb.init()
serialization_dir = Path('/tmp', run.id) / 'allennlp_serialization_dir'
fixed_kwargs_args.append(('serialization-dir', str(serialization_dir)))
translater = WandbToAllenNLPTranslater(
expected,
pos_args,
expected_wandb_args=expected_wandb_args,
order=order,
fixed_kwargs_args=fixed_kwargs_args)
translater.translate_and_replace()
# use the wandb args on run
for wandb_arg, value in translater.wandb_args.items():
processed_val = process_wandb_arg_value(wandb_arg, value)
logger.debug("setting {} on run object to {}".format(
wandb_arg, processed_val))
#setattr(run, wandb_arg, processed_val)
# logger.debug("wandb.run object's {} attribute is {}".format(
# wandb_arg, getattr(wandb.run, wandb_arg)))
#logger.debug("type of run: {}".format(type(run)))
#logger.debug("dir of run: {}".format(dir(run)))
return run
def run():
# arguments which SHOULD be present
expected = {'local_config_file': dict(type=str)}
# All the arguments from wandb server will be
# named arguments. Following specifies which of these
# to convert to positional arguments while passing them to allennlp
# specify the arguments which are to be converted
# example below will create:
# $ allennlp <subcommand> <local_config_file> ...
# concretely:
# $ allennlp train configs/lstm_nli.jsonnet
pos_args = ['subcommand', 'local_config_file']
# if arguments have to follow a strict order, specify it here
# Most likely, positions arguments should be required to follow a specific order
order = ['subcommand', 'local_config_file', 'serialization-dir']
# These are arguments which will not be sent by the wandb server
# but allennlp always needs these
fixed_kwargs_args = [('include-package', 'models'),
('include-package', 'wandb_allennlp')]
expected_wandb_args = {'tags': 'unspecified', 'tensorboard': False}
setup_wandb(expected, pos_args, order, fixed_kwargs_args,
expected_wandb_args)
allennlp_run()
``` |
{
"source": "Jpizza99/DiscordRoleBot",
"score": 3
} |
#### File: Jpizza99/DiscordRoleBot/bot.py
```python
import discord
KEY = '' # Not Today!
client = discord.Client()
@client.event
async def on_ready():
print('Client Connected')
@client.event
async def on_raw_reaction_add(payload):
message_id = payload.message_id
if message_id == 943925071712493679:
guild_id = payload.guild_id
guild = discord.utils.find(lambda g : g.id == guild_id, client.guilds)
role = discord.utils.get(guild.roles, name=payload.emoji.name)
# This is if emojiName != the Role name
# if payload.emoji.name == 'GasAlert':
# role = discord.utils.get(guild.roles, name='GasAlert')
# elif payload.emoji.name == 'Whatever':
# do anything
if role is not None:
member = await(await client.fetch_guild(payload.guild_id)).fetch_member(payload.user_id)
if member is not None:
await member.add_roles(role)
print('Done!')
else: print('Member not found')
else: print('Role not found')
@client.event
async def on_raw_reaction_remove(payload):
message_id = payload.message_id
if message_id == 943925071712493679:
guild_id = payload.guild_id
guild = discord.utils.find(lambda g : g.id == guild_id, client.guilds)
role = discord.utils.get(guild.roles, name=payload.emoji.name)
# This is if emojiName != the Role name
# if payload.emoji.name == 'GasAlert':
# role = discord.utils.get(guild.roles, name='GasAlert')
# elif payload.emoji.name == 'Whatever':
# do anything
if role is not None:
member = await(await client.fetch_guild(payload.guild_id)).fetch_member(payload.user_id)
if member is not None:
await member.remove_roles(role)
print('Done!')
else: print('Member not found')
else: print('Role not found')
client.run(KEY)
``` |
{
"source": "jpj625/ha-modernforms",
"score": 2
} |
#### File: custom_components/modernforms/config_flow.py
```python
from homeassistant import config_entries
import voluptuous as vol
from .const import \
DOMAIN, \
CONF_ENABLE_LIGHT, CONF_FAN_HOST, CONF_FAN_NAME
def get_schema(user_input=None):
data = {}
if user_input is not None:
data = user_input
def default(key, default_value=None):
kwargs = {}
if bool(data.get(key)):
kwargs['default'] = data[key]
elif default_value:
kwargs['default'] = default_value
return kwargs
return vol.Schema({
vol.Required(CONF_FAN_HOST, **default(CONF_FAN_HOST)): str,
vol.Required(CONF_FAN_NAME, **default(CONF_FAN_NAME)): str,
vol.Required(CONF_ENABLE_LIGHT, **default(CONF_ENABLE_LIGHT, True)): bool
})
class ModernFormsConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
VERSION = 1
async def async_step_user(self, user_input=None):
errors = {}
if user_input is not None:
if len(errors) == 0:
await self.async_set_unique_id(
user_input[CONF_FAN_HOST], raise_on_progress=False
)
return self.async_create_entry(
title=user_input[CONF_FAN_NAME],
data=user_input,
)
return self.async_show_form(
step_id="user", data_schema=get_schema(user_input), errors=errors)
``` |
{
"source": "jpjanet/citsampler",
"score": 3
} |
#### File: citsampler/citsampler/sampler.py
```python
import os
import sys
import time
import numpy as np
# load the constraint class hanlder
from citsampler.constraints import Constraint
from pyclustering.cluster.kmedoids import kmedoids # load the clustering alg
def outputWriter(filepath, data):
"""
Defines a monte carlo sampler for constrained
high-dimenisonal integration using rejection sampling
:param filepath: string, path to output file
:param data: np array, data to write
"""
with open(filepath, 'w') as f:
for row in data:
f.write(" ".join([str(i) for i in row])+'\n')
class constrainedMCMC:
"""
Defines a monte carlo sampler for constrained
high-dimenisonal integration using rejection sampling
:param constraint: constraint class object defining the task
:param stepsize: float in [0,1], starting stepsize
"""
def __init__(self, constraint, stepsize):
"""
Constructor, binds args to the
high-dimenisonal integration using rejection sampling
"""
# bind attirbutes
self.constraint = constraint
self.state = np.array(self.constraint.get_example())
self.dim = self.constraint.get_ndim()
self.stateHistory = [self.state]
self.stepsize = stepsize
# intialize trajectory
self.steps = 0
self.ar = 1 # total acceptance ratio
self.ar100 = 1 # accptance ratio, last 100 samples
def proposalDistribution(self):
"""
Proposal distribution for MCMC, generates new sample
based on the current state and stepsize by combination
with a random vector on the self.dim hypercube
"""
return (1.0-self.stepsize)*self.state + self.stepsize*(np.random.rand(self.dim))
def step(self):
"""
Take one step of rejection-sampling MCMC and update
the resutling acceptance ratios
"""
# generate proposal
proposal = self.proposalDistribution()
# rejection sampleing
if self.constraint.apply(proposal): # accept
# note, we save only valid states in history
self.stateHistory.append(self.state)
# move to new state
self.state = proposal
# update accept rates
self.ar = (self.ar*self.steps + 1)/(self.steps+1)
self.ar100 = (self.ar100*99 + 1)/(100)
else: # reject
# update accept rates, state does not move
self.ar = (self.ar*self.steps)/(self.steps+1)
self.ar100 = (self.ar100*99)/(100)
# update counter
self.steps += 1
def rsMcmcPhase(constraint, targetSamples, numChains=5,
maxIts=False, minIts=False,
targetArLow=0.05, targetArHigh=0.25,
stepModifier=1.05):
"""
Perform multi-chain rejection-sampling MCMC on the provided constraint object
:param constraint: constraint class object defining the task
:param targetSamples: int, number of valid points to seek per chain
:param maxIts: int, maximum iterations per chain
:param minIts: int, minimum iterations per chain
:param targetArHigh: float in [0,1], maximum 100-step ar before step is increased
:param targetArLow: float in [0,1], minimum 100-step ar before step is reduced
:param stepModifier: float >1, ammount to modify stepsize by
:return: np.ndarray, valid points from all chains (my contain duplicates)
"""
# set iteration control if not provided
if not maxIts:
# max number of chain iterations to perform
# 500k or 100 per sample, at least
maxIts = max(5e5, 1e2*targetSamples)
if not minIts:
# minimum number of chain iterations to perform
# between 5 x target samples or 5000
# cannot be more than maxIts,
minIts = min(max(5*targetSamples, 5000), maxIts)
# repeat numChains times:
for cn in range(0, numChains):
# time per chain
chainTime = time.time()
# create the MCMC chain
mc = constrainedMCMC(constraint, 1.0)
# loop control
terminate = False
while not terminate:
# advance mcmc
mc.step()
# mixing control by checking acceptance rate over last 100 steps
if not mc.steps % 100 and mc.steps > 0:
if mc.ar100 < targetArLow:
# if local ar is too low, make step smaller
mc.stepsize = max(mc.stepsize/stepModifier, 1e-18)
elif mc.ar100 > targetArHigh:
# if local ar is too high, make step bigger
mc.stepsize = min(stepModifier*mc.stepsize, 1)
# check for max interations reached
if mc.steps >= maxIts:
print('Could not find enough samples in ' +
str(maxIts) + ' iterations.')
terminate = True
# check if targerSamples and min interation criteria are reacehd
elif len(mc.stateHistory) >= targetSamples and mc.steps >= minIts:
terminate = True
# save sampled points:
if not cn:
mcdata = np.array(mc.stateHistory)
else:
mcdata = np.row_stack([mcdata, np.array(mc.stateHistory)])
# print status of chain
msg = 'mc chain ' + str(cn) + ': found ' + str(len(mc.stateHistory)) + ' valid points in '\
+ str(mc.steps) + ' iterations taking ' \
+ str(round(time.time() - chainTime, 2)) + ' seconds'
print(msg)
return(mcdata)
def sampler(inputFile, outputFile, nResults):
"""
Function to find nResults spanning points satisfying the constraints given
in the inputFile, and write them to outputFile
:param inputFile: string, path to input file
:param outputFile: string, target output file
:param nResults: int, number of results to find
:return: int, outcome flag (0=normal, 1=error)
"""
# start total timer
startTime = time.time()
# test the file exists
try:
constraintInstance = Constraint(fname=inputFile)
except FileNotFoundError as fnf_error:
print(fnf_error)
sys.exit(1)
except:
print('Uncaught error in importing constraint file')
sys.exit(1)
# params for MCMC run, could be read in as input instead
# number of chains to run
numChains = 6
# number of results to seek per chain
targetSamples = 5*nResults
# start mcmc clock
mcStart = time.time()
# do mcmc operation
mcdata = rsMcmcPhase(constraint=constraintInstance,
targetSamples=targetSamples, numChains=numChains)
# trim out any duplicates
mcdata = np.unique(mcdata, axis=0)
# end clock and print mcmc status
msg = 'MC phase complete, found a total of ' + str(mcdata.shape[0]) + \
' unique interior points in ' + \
str(round(time.time() - mcStart, 0)) + ' seconds'
print(msg)
# determine if the correct number of points have been found
if mcdata.shape[0] <= nResults:
print('could not find target number of points, writing what is available')
outputWriter(filepath=outputFile, data=mcdata)
exitStatus = 1 # error
else:
clusterStart = time.time()
print('starting clustering phase, seeking ' + str(nResults) + ' medoids')
# random initial medoids
startMeds = np.random.randint(
low=0, high=mcdata.shape[0], size=nResults)
# create cluster instance
kmed = kmedoids(
data=mcdata, initial_index_medoids=startMeds, tolerance=1e-10)
# run cluster
kmed.process()
# return centers
medoids = mcdata[kmed.get_medoids(), :]
msg = 'cluster phase complete, found a total of ' + str(medoids.shape[0]) \
+ ' interior points from ' + str(mcdata.shape[0]) \
+ ' in ' + str(round(time.time()-clusterStart, 0)) + ' seconds'
print(msg)
# test if the correct number of results are returned
if medoids.shape[0] == nResults:
exitStatus = 0
else:
msg = 'incorrect number of resutls obtained from clustering, writing ' \
+ str(medoids.shape[0]) + ' points to ' + outputFile
print(msg)
exitStatus = 1
# write results
outputWriter(filepath=outputFile, data=medoids)
# print final time
msg = 'routine complete in ' + \
str(round(time.time() - startTime, 0)) + ' seconds'
print(msg)
return(exitStatus)
``` |
{
"source": "jpjarvinen/cloud-services",
"score": 3
} |
#### File: jpjarvinen/cloud-services/app.py
```python
from flask import Flask
from flask import render_template, request, redirect
from database.operations import insert_db_data
from api import get_json
app = Flask(__name__)
""" Flask instance """
@app.route('/')
def index():
"""
Display homepage template
"""
return render_template('front-page.html')
@app.route('/form')
def form():
"""
Display form for content insertion
"""
return render_template('form.html')
@app.route('/submit', methods=['POST', 'GET'])
def submit():
"""
Process form's data or redirect if data is not correct.
Also redirects if methods!=POST
"""
if request.method != 'POST':
return redirect('/form')
if request.form['name'] and request.form['address'] and request.form['postcode'] and request.form['city'] and request.form['businessid'] and request.form['date'] and request.form['type']:
if insert_db_data(request.form['name'], request.form['address'], request.form['postcode'], request.form['city'], request.form['date'], request.form['type'], request.form['businessid']):
return "Data inserted"
else:
return "Data NOT INSERTED"
else:
return redirect('/form')
@app.route('/api/search/')
@app.route('/api/search/<keyword>')
def search(keyword=None):
"""
Display search results in JSON format
Parameters
----------
keyword : str
Search keyword. Default None
"""
return get_json(False, keyword)
@app.route('/api/all')
def api():
"""
Return all rows found in database in JSON format
"""
return get_json()
if __name__ == '__main__':
app.config['DEBUG'] = True
""" Set debugging on when using localhost """
app.run(threaded=False, port=5000)
""" Run Flask dev server on port 5000 """
``` |
{
"source": "jpjenk/data_archiver",
"score": 2
} |
#### File: jpjenk/data_archiver/archiver.py
```python
from flask import Flask, request
from flask_cors import CORS, cross_origin
import logging
import traceback
import aux
# WSGI application name
app = Flask(__name__)
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
# Data archive storage location
datapath = aux.get_config('storage')
# Configure the log file
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
log_handle = logging.FileHandler('logs/archiver.log')
log_handle.setLevel(logging.INFO)
log_format = logging.Formatter('%(asctime)s - %(message)s')
log_handle.setFormatter(log_format)
logger.addHandler(log_handle)
@app.errorhandler(404)
@cross_origin()
def not_found(error):
"""Return an error if route does not exist."""
logger.info('404 Not found')
return aux.responder('Not found', 404)
@app.route('/')
@cross_origin()
def index():
"""Base path, server listening check."""
logger.info('Base path access')
return aux.responder('PBDB data archive API running', 200)
@app.route('/archives/test', methods=['POST'])
@cross_origin()
def test():
"""Dev path."""
logger.info(request.json)
return aux.responder('test endpoint', 200)
@app.route('/schema')
@cross_origin()
def schema():
"""Base path, server listening check."""
logger.info('Database schema access')
return aux.schema_read()
@app.route('/archives/list')
@cross_origin()
def info():
"""Return information about existing data archives."""
logger.info('List path access')
return aux.archive_summary()
@app.route('/archives/retrieve/<int:archive_no>', methods=['GET'])
@cross_origin()
def retrieve(archive_no):
"""Retrieve an existing archive given an archive number."""
from flask import send_from_directory
try:
filename = ''.join([str(archive_no), '.bz2'])
file_type = aux.get_file_type(archive_no)
attachment_filename = ''.join(['pbdb_archive_',
str(archive_no),
file_type,
'.bz2'])
if archive_no:
try:
return send_from_directory(datapath,
filename,
as_attachment=True,
attachment_filename=attachment_filename,
mimetype='application/x-compressed')
logger.info('Retrieved archive {0:d}'.format(archive_no))
except Exception as e:
logger.info('Retrieval error archive {0:d}'.format(archive_no))
logger.info(e)
return aux.responder('Retrieval error', 500, archive_no)
else:
logger.info('Unspecified archive number')
return aux.responder('Unspecified archive number', 400, archive_no)
except Exception as e:
logger.error(e)
logger.error(traceback.format_exc())
return aux.responder('Error', 509)
@app.route('/archives/view/<int:archive_no>', methods=['GET'])
@cross_origin()
def view(archive_no):
"""Retrieve details on a single archive."""
logger.info('View path access')
return aux.view_archive(archive_no)
@app.route('/archives/delete/<int:archive_no>', methods=['GET'])
@cross_origin()
def delete(archive_no):
"""Delete a archive record from the system table."""
import subprocess
try:
# Read cookie
session_id = request.cookies.get('session_id')
# Credential check
try:
admin_user = aux.admin_check(session_id)
except Exception as e:
logger.info(e)
return aux.responder('Client error - Invalid credentials(1)', 400)
if not admin_user:
return aux.responder('Client error - Invalid credentials(2)', 400)
try:
# Remove DB record
aux.delete_archive(archive_no)
# Remove actual data file and data service response header
realpath = '/'.join([datapath, str(archive_no)])
headerpath = f'{realpath}.header'
archivepath = f'{realpath}.bz2'
syscall = subprocess.run(['rm', headerpath, archivepath])
logger.info(f'Files deleted: {headerpath}, {archivepath}')
return aux.responder('Success', 200, archive_no)
except Exception as e:
logger.info('Deletion error: {0:s}'.format(e))
return aux.responder('Deletion error', 500, archive_no)
except Exception as e:
logger.error(e)
logger.error(traceback.format_exc())
return aux.responder('Error', 509)
@app.route('/archives/update/<int:archive_no>', methods=['POST', 'GET'])
@cross_origin()
def update(archive_no):
"""Update the archive metadata."""
try:
# Read cookie
if request.json.get('session_id'):
session_id = request.json['session_id']
else:
session_id = request.cookies.get('session_id')
# Credential check
try:
admin_user = aux.admin_check(session_id)
except Exception as e:
logger.info(e)
return aux.responder('Client error - Invalid credentials(1)', 400)
if not admin_user:
return aux.responder('Client error - Invalid credentials(2)', 400)
title = request.json.get('title')
desc = request.json.get('description')
authors = request.json.get('authors')
doi = request.json.get('doi')
if title or desc or authors or doi:
try:
aux.update_record(archive_no, title, desc, authors, doi)
except Exception as e:
logger.info(e)
return aux.responder('Server error - record update', 500, archive_no)
logger.info('Updated {0:d}'.format(archive_no))
return aux.responder('Success', 200, archive_no)
else:
logger.info('ERROR: Unsupported parameters for update')
return aux.responder('Parameter error', 400, archive_no)
except Exception as e:
logger.error(e)
logger.error(traceback.format_exc())
return aux.responder('Error', 509)
@app.route('/archives/create', methods=['POST'])
@cross_origin()
def create():
"""Create an archive file on disk."""
import subprocess
import os
from datetime import datetime as dt
import time
try:
# Attempt to find session_id in the payload (testing only)
if request.json.get('session_id'):
session_id = request.json['session_id']
# Otherwise pull it out of the browser cookie (normal functionalty)
else:
session_id = request.cookies.get('session_id')
# Determine authorizer and enter numbers from the session_id
try:
auth, ent = aux.user_info(session_id)
except Exception as e:
logger.info(e)
return aux.responder('Client error - Invalid session ID', 400)
# Determine if the user has an ORCID
has_orcid = aux.check_for_orcid(ent)
logger.info(f'Enter ID {ent} has ORCID {has_orcid}')
if not has_orcid:
return aux.responder('Missing ORCID', 403)
# Extract user entered metadata from payload
authors = request.json.get('authors', 'Enter No. ' + str(ent))
title = request.json.get('title')
desc = request.json.get('description', 'No description')
# Extract components of data service call from payload
path = request.json.get('uri_path')
args = request.json.get('uri_args')
# Parameter checks
if not title:
return aux.responder('Missing title', 400)
if not args:
return aux.responder('Missing uri_args', 400)
if path:
if path[0] != '/':
return aux.responder('uri_path not preceeded by "/"', 400)
else:
return aux.responder('Missing uri_path', 400)
# Build data service URI
base = aux.get_config('dataservice')
uri = ''.join([base, path, '?', args])
uri = uri.replace(' ', '%20')
# Initiate new record in database
try:
aux.create_record(auth, ent, authors, title, desc, path, args)
logger.info('Record created. Enterer No: {0:d}'.format(ent))
except Exception as e:
logger.info(e)
return aux.responder('Server error - Record creation', 500)
time.sleep(2)
# Read archive_no back from the table and create filename
try:
archive_no = aux.get_archive_no(ent)
logger.info('Record created. Archive No: {0:d}'.format(archive_no))
except Exception as e:
logger.info(e)
aux.archive_status(archive_no, success=False)
return aux.responder('Server error - Archive number not found', 500, archive_no)
# Append the data path and remove extra "/" if one was added in config
realpath = '/'.join([datapath, str(archive_no)])
realpath = realpath.replace('//', '/')
# Use cURL to retrive the dataset
token = '='.join(['session_id', session_id])
headerpath = realpath + '.header'
syscall = subprocess.run(['curl', '-s', '--cookie', token,
'-o', realpath, '-D', headerpath, uri])
# Check to see that there were no errors in the data service return
if syscall.returncode != 0 or not os.path.exists(headerpath):
logger.info('Archive download error')
aux.archive_status(archive_no, success=False)
return aux.responder('Server error - File retrieval', 500, archive_no)
with open(headerpath, 'r') as f:
content = f.readlines()
if '200' not in content[0]:
logger.info('Data service error')
aux.archive_status(archive_no, success=False)
return aux.responder('Server error - Data service', 500, archive_no)
# Compress and replace the retrieved dataset on disk
syscall = subprocess.run(['bzip2', '-f', realpath])
if syscall.returncode != 0:
logger.info('Archive compression error')
aux.archive_status(archive_no=archive_no, success=False)
return aux.responder('Server error - File compression', 500, archive_no)
# Archive was successfully created on disk
logger.info('Created archive number: {0:d}'.format(archive_no))
aux.archive_status(archive_no=archive_no, success=True)
# Determine the current year
yr = dt.now().year
# Dispatch email requesting DOI
result = aux.request_doi(archive_no, title, yr, authors, ent)
if result == 0:
logger.info('DOI email sent')
else:
logger.info(f'Server error - Email: {result}')
# Return 200 OK
return aux.responder('success', 200, archive_no)
except Exception as e:
logger.error(e)
logger.error(traceback.format_exc())
return aux.responder('Error', 509)
```
#### File: jpjenk/data_archiver/aux.py
```python
from flask import make_response, jsonify
def responder(msg, status, pbdb_id=None):
"""Format a JSON response."""
if pbdb_id:
return make_response(jsonify({'message': msg,
'status': status,
'pbdb_id': pbdb_id}), status)
else:
return make_response(jsonify({'message': msg,
'status': status}), status)
def get_config(setting):
"""Retrive archive storage path from settings file."""
import configparser
config = configparser.ConfigParser()
config.read('settings.cnf')
return str(config['environment'][setting])
def request_doi(archive_no, title, yr, authors, ent):
"""Dispatch email to specified addresses using UNIX sendmail."""
from email.mime.text import MIMEText
from subprocess import Popen, PIPE
try:
ent_email = get_ent_email(ent)
base = get_config('base')
default_emails = get_config('email')
email_addr = ','.join([default_emails, ent_email])
body = f'URL: {base}/classic/app/archive/view?id={archive_no}\n'
body += f'Creators: {authors}\n'
body += f'Title: {title}\n'
body += 'Publisher: Paleobiology Database\n'
body += f'Publication Year: {yr}\n'
body += 'Resource Type: Dataset\n'
body += '========\n'
body += f'PBDB Archive ID Number: {archive_no}\n'
body += 'DOI: Pending\n'
msg = MIMEText(body)
msg['From'] = '<EMAIL>'
msg['To'] = email_addr
msg['Subject'] = 'PBDB archive DOI request'
p = Popen(['/usr/sbin/sendmail', '-t', '-oi'], stdin=PIPE)
return p.communicate(msg.as_bytes())
except exception as e:
return e
def check_for_orcid(ent):
"""Check to see if a user has a stored ORCID."""
import MySQLdb
db = MySQLdb.connect(read_default_file='./settings.cnf')
cursor = db.cursor()
sql = """SELECT orcid
FROM pbdb_wing.users
WHERE person_no = {0:d}
""".format(ent)
cursor.execute(sql)
for orcid in cursor:
orcid = orcid[0]
return False if orcid == '' else True
def get_ent_email(ent):
"""Retrieve user email from the database."""
import MySQLdb
db = MySQLdb.connect(read_default_file='./settings.cnf')
cursor = db.cursor()
sql = """SELECT email
FROM pbdb_wing.users
WHERE person_no = {0:d}
""".format(ent)
cursor.execute(sql)
for email in cursor:
ent_email = email[0]
return ent_email
def admin_check(session_id):
"""Validate credentials for update and create."""
import MySQLdb
db = MySQLdb.connect(read_default_file='./settings.cnf')
cursor = db.cursor()
sql = """SELECT user_id
FROM session_data
WHERE session_id = '{0:s}'
""".format(session_id)
cursor.execute(sql)
for user_id in cursor:
user_id = user_id[0]
cursor = db.cursor()
sql = """SELECT admin
FROM pbdb_wing.users
WHERE id = '{0:s}'
""".format(user_id)
cursor.execute(sql)
for admin in cursor:
admin = admin[0]
return False if admin == 0 else True
def user_info(session_id):
"""Retrieve authorizer and enterer numbers based on browser cookie."""
import MySQLdb
db = MySQLdb.connect(read_default_file='./settings.cnf')
cursor = db.cursor()
sql = """SELECT authorizer_no, enterer_no
FROM session_data
WHERE session_id = '{0:s}'
""".format(session_id)
cursor.execute(sql)
for authorizer_no, enterer_no in cursor:
auth = authorizer_no
ent = enterer_no
return auth, ent
def view_archive(archive_no):
"""Retrieve metadata for a single record."""
import MySQLdb
db = MySQLdb.connect(read_default_file='./settings.cnf')
cursor = db.cursor()
sql = """SELECT archive_no, title, doi, authors, created,
description, uri_path, uri_args
FROM data_archives
WHERE archive_no = {0:d}
LIMIT 1
""".format(archive_no)
cursor.execute(sql)
archives = list()
for archive_no, title, doi, authors, created, description, \
uri_path, uri_base in cursor:
archives.append({'archive_no': archive_no,
'title': title,
'doi': doi,
'authors': authors,
'created': created,
'description': description,
'uri_path': uri_path,
'uri_base': uri_base})
db.close()
return jsonify(archives)
def delete_archive(archive_no):
"""Permanently remove a dataset from the system."""
import MySQLdb
db = MySQLdb.connect(read_default_file='./settings.cnf')
cursor = db.cursor()
sql = """DELETE FROM data_archives
WHERE archive_no = {0:d}
LIMIT 1
""".format(archive_no)
try:
cursor.execute(sql)
db.commit()
except Exception as e:
db.rollback()
db.close()
# TODO: delete from file system?
def archive_names():
"""Return a hash of DOIs and actual filenames."""
import MySQLdb
db = MySQLdb.connect(read_default_file='./settings.cnf')
cursor = db.cursor()
sql = """SELECT doi, filename
FROM data_archives
"""
cursor.execute(sql)
doi_map = dict()
for doi, filename in cursor:
doi_map[doi.lower()] = filename
return doi_map
def schema_read():
"""Dump the header info to check db connector."""
import MySQLdb
db = MySQLdb.connect(read_default_file='./settings.cnf')
cursor = db.cursor()
sql = """SHOW COLUMNS
FROM data_archives
"""
cursor.execute(sql)
schema = list()
for row in cursor:
schema.append(row)
return make_response(jsonify(schema))
def archive_summary():
"""Load archive information from database."""
import MySQLdb
db = MySQLdb.connect(read_default_file='./settings.cnf')
cursor = db.cursor()
sql = """SELECT archive_no, title, doi, authors, created,
description, uri_path, uri_args
FROM data_archives
"""
cursor.execute(sql)
archives = list()
for archive_no, title, doi, authors, created, description, \
uri_path, uri_base in cursor:
archives.append({'archive_no': archive_no,
'title': title,
'doi': doi,
'authors': authors,
'created': created,
'description': description,
'uri_path': uri_path,
'uri_base': uri_base})
db.close()
return jsonify(archives)
def archive_status(archive_no, success):
"""Set the archive creation status in the table."""
import MySQLdb
db = MySQLdb.connect(read_default_file='./settings.cnf')
cursor = db.cursor()
if success:
sql = """UPDATE data_archives
SET status = '{0:s}'
WHERE archive_no = {1:d}
""".format('complete', archive_no)
else:
sql = """UPDATE data_archives
SET status = '{0:s}'
WHERE archive_no = {1:d}
""".format('fail', archive_no)
try:
cursor.execute(sql)
db.commit()
except Exception as e:
db.rollback()
db.close()
def get_archive_no(ent):
"""Determine the last incremented number generated by the active user."""
import MySQLdb
db = MySQLdb.connect(read_default_file='./settings.cnf')
cursor = db.cursor()
sql = """SELECT archive_no
FROM data_archives
WHERE enterer_no = {0:d}
ORDER BY created DESC
LIMIT 1
""".format(ent)
cursor.execute(sql)
for archive_no in cursor:
current_archive = archive_no[0]
db.close()
return current_archive
def get_file_type(archive_no):
"""Determine file type of the archive and return an extension."""
import MySQLdb
db = MySQLdb.connect(read_default_file='./settings.cnf')
cursor = db.cursor()
sql = """SELECT uri_path
FROM data_archives
WHERE archive_no = {0:d}
LIMIT 1
""".format(archive_no)
cursor.execute(sql)
for uri_path in cursor:
uri_path = uri_path[0]
db.close()
return uri_path[uri_path.rfind('.'):]
def create_record(auth, ent, authors, title, desc, path, args):
"""Create new record in database."""
import MySQLdb
db = MySQLdb.connect(read_default_file='./settings.cnf')
cursor = db.cursor()
sql = """INSERT INTO data_archives
(authorizer_no, enterer_no, authors, title, description,
uri_path, uri_args)
VALUES ({0:d}, {1:d}, '{2:s}', '{3:s}', '{4:s}', '{5:s}', '{6:s}')
""".format(auth, ent, authors, title, desc, path, args)
try:
cursor.execute(sql)
db.commit()
except Exception as e:
db.rollback()
raise ValueError(e)
db.close()
def update_record(archive_no, title, desc, authors, doi):
"""Add metadata to the archive table in database."""
import MySQLdb
db = MySQLdb.connect(read_default_file='./settings.cnf')
cursor = db.cursor()
if title:
title = title[:255]
sql = """UPDATE data_archives
SET title = '{0:s}', modified = now()
WHERE archive_no = {1:d}
""".format(title, archive_no)
cursor.execute(sql)
if desc:
desc = desc[:5000]
sql = """UPDATE data_archives
SET description = '{0:s}', modified = now()
WHERE archive_no = {1:d}
""".format(desc, archive_no)
cursor.execute(sql)
if authors:
desc = desc[:255]
sql = """UPDATE data_archives
SET authors = '{0:s}', modified = now()
WHERE archive_no = {1:d}
""".format(desc, archive_no)
cursor.execute(sql)
if doi:
doi = doi[:100]
sql = """UPDATE data_archives
SET doi = '{0:s}', modified = now()
WHERE archive_no = {1:d}
""".format(doi, archive_no)
cursor.execute(sql)
try:
cursor.execute(sql)
db.commit()
except Exception as e:
db.rollback()
db.close()
```
#### File: jpjenk/data_archiver/test_archiver.py
```python
import time
import csv
import sys
import json
import pprint
import argparse
import pytest
import requests
SESSION = # Browser session ID
REC = 1
SERVER = 'https://paleobiodb.org'
INFILE = 'archive_drivers.csv'
@pytest.fixture()
def load_metadata():
"""Load metadata for the archives from disk."""
archives = []
n_archives = 0
with open(INFILE, 'r') as f:
reader = csv.reader(f)
for i, row in enumerate(reader):
print()
if row[0][0] == '#':
print(f'Skipping archive {i+1}')
continue
for bad_text in ['opinions', 'taxa', 'occs']:
if row[1].find(bad_text) != -1:
print(f'Bad data on row {i}')
print(row)
raise ValueError(f'Bad data on row {i}')
if len(row) != 3:
print(f'Missing data row {i+1}')
print(row)
raise ValueError(f'Missing data on row {i+1}')
archive = {'title': row[0],
'description': 'Legacy PBDB data archive',
'authors': row[1]}
uri = row[2]
archive.update(uri_path=f'/{uri[uri.find("org/")+4:uri.find("?")]}')
archive.update(uri_args=uri[uri.find('?')+1:])
archive.update(session_id=SESSION)
archives.append(archive)
return archives
def test_create_archives(load_metadata):
"""Create, view, list and download each archive."""
archives = load_metadata
error_rows = []
for i, archive in enumerate(archives):
if (i + 1) < REC:
continue
print(f'Metadata row {i+1}')
pprint.pprint(archive)
try:
# Create archive
rc = requests.post(f'{SERVER}/archives/create', json=archive)
assert rc.status_code == 200
resp_c = rc.json()
assert rc.status_code == 200
assert resp_c.get('message') == 'success'
assert 'pbdb_id' in resp_c
pbdb_id = resp_c.get('pbdb_id')
print(f'Created Archive ID {pbdb_id}')
print('========')
assert pbdb_id is not None
assert pbdb_id > 0
# View archive
rv = requests.get(f'{SERVER}/archives/view/{pbdb_id}')
assert rv.status_code == 200
resp_v = rv.json()[0]
assert 'title' in resp_v
assert 'doi' in resp_v
assert 'authors' in resp_v
assert 'created' in resp_v
assert 'description' in resp_v
assert 'uri_path' in resp_v
assert 'uri_base' in resp_v
assert 'archive_no' in resp_v
assert resp_v.get('archive_no') == pbdb_id
# List archives
rl = requests.get(f'{SERVER}/archives/list')
assert rl.status_code == 200
archive_list = rl.json()
assert archive_list is not []
assert len(archive_list) > 0
current_found = False
for a in archive_list:
if a['archive_no'] == pbdb_id:
current_found = True
assert current_found
# Retrieve archive
rr = requests.get(f'{SERVER}/archives/retrieve/{pbdb_id}')
assert rr.status_code == 200
assert 'Content-Length' in rr.headers
assert int(rr.headers['Content-Length']) > 512
assert 'Content-Disposition' in rr.headers
assert f'pbdb_archive_{pbdb_id}' in rr.headers['Content-Disposition']
except Exception as e:
print(f'ERROR - archive {i+1}')
print(e)
error_rows.append(i+1)
pass
print('-----------------------')
print('Errors Rows:')
print(error_rows)
print('-----------------------')
``` |
{
"source": "jpjenk/elc_api",
"score": 4
} |
#### File: swagger_server/elc/taxa.py
```python
def set_taxon(taxon, subtax, db):
"""Return a database specific key-val pair for taxon."""
import requests
# Parse incomming taxon name string for errors and reassemble
taxon_list = taxon.split(',')
taxon_list = [x.strip() for x in taxon_list]
taxon_list = [x.capitalize() for x in taxon_list]
clean_list = list()
for item in taxon_list:
if len(item.split()) > 3 or len(item.split()) == 0:
msg = 'Unsupported taxon name length: {0:s}'.format(item)
raise ValueError(400, msg)
if '^' in item:
parts = [x.strip() for x in item.split('^')]
if len(parts) != 2:
msg = 'Incorrect usage of "not" caret: {0:s}'.format(item)
raise ValueError(400, msg)
# NEW RESOURCE: Add to list below if DB supports not (^) notation
if db in ['pbdb']:
parts = [x.capitalize() for x in parts]
clean_list.append('^'.join(parts))
else:
# DB does not support caret so remove the "not" portion
clean_list.append(parts[0])
else:
clean_list.append(item)
taxon = ','.join(clean_list)
# Format for specific database API parameter payloads
if db == 'neotoma':
if subtax:
return {'taxonname': taxon,
'lower': 'true'}
else:
return {'taxonname': taxon}
elif db == 'pbdb':
if subtax:
return {'base_name': taxon}
else:
return {'taxon_name': taxon}
elif db == 'sead':
# Currently SEAD does not support general taxa serching.
# An external service must be used to resolve the taxon rank of
# the first name in a list of taxa prior to parameterizing the query
single_taxon = taxon.split(',')[0]
if len(single_taxon.split()) == 2:
# Consider this to be a 'Genus Species' name
query = 'ilike.*{0:s}'.format(single_taxon)
return {'taxon': query}
else:
url = 'https://paleobiodb.org/data1.2/taxa/single.json'
payload = {'taxon_name': single_taxon}
rank = requests.get(url, payload).json()['records'][0]['rnk']
if rank == 9:
# Rank of Family
query = 'ilike.{0:s}'.format(single_taxon)
return {'family_name': query}
elif rank == 5:
# Rank of Genus
query = 'ilike.{0:s}'.format(single_taxon)
return {'genus_name': query}
# NEW RESOURCE: Add another databse specific taxon name mapping here
else:
return {}
def get_subtaxa(taxon, inc_syn=True):
"""
Query PBDB for all lower order relatives of a specified taxa.
:arg taxon: Taxonmic name to query
:type taxon: str
:arg inc_syn: Include recognized synonyms in the return
:type inc_syn: bool
"""
import requests
from ..elc import config
subtaxa = set()
url = ''.join([config.get('resource_api', 'pbdb'), 'taxa/list.json'])
payload = {'rel': 'all_children', 'name': taxon}
try:
r = requests.get(url=url,
params=payload,
timeout=config.get('default', 'timeout'))
r.raise_for_status()
except requests.exceptions.HTTPError as e:
msg = r.json().get('warnings')
raise ValueError(r.status_code, msg)
data = r.json()
for rec in data['records']:
if rec.get('tdf') and not inc_syn:
subtaxa.add(rec.get('acn'))
else:
subtaxa.add(rec.get('nam'))
return list(subtaxa)
def get_parents(taxon):
"""
Query PBDB for parent taxonomic groups.
:arg taxon: Taxonomic name to query
:type taxon: str
"""
import requests
from collections import OrderedDict
from ..elc import config
parents = dict()
base_url = config.get('resource_api', 'pbdb') + 'taxa/list.json'
tax_sys = ['kingdom', 'phylum', 'class', 'order',
'family', 'genus', 'species']
payload = dict()
payload.update(vocab='pbdb', rel='all_parents',
order='hierarchy', name=taxon)
resp = requests.get(base_url, params=payload, timeout=None)
if resp.status_code == 200:
resp_json = resp.json()
if 'warnings' in resp_json:
raise ValueError(400, 'Bad Request',
str(resp_json['warnings'][0]))
else:
for rec in resp_json['records']:
for rank in tax_sys:
if rec.get('taxon_rank') == rank:
parents.update({rank: rec.get('taxon_name')})
return OrderedDict(parents)
else:
raise ValueError(resp.status_code, resp.reason,
'Server error or bad URL')
``` |
{
"source": "jpjllorente/tacticalrmm",
"score": 2
} |
#### File: agents/tests/test_mgmt_commands.py
```python
from typing import TYPE_CHECKING
from unittest.mock import call, patch
from django.core.management import call_command
from model_bakery import baker
from tacticalrmm.constants import AgentMonType, AgentPlat
from tacticalrmm.test import TacticalTestCase
if TYPE_CHECKING:
from clients.models import Client, Site
class TestBulkRestartAgents(TacticalTestCase):
def setUp(self) -> None:
self.authenticate()
self.setup_coresettings()
self.client1: "Client" = baker.make("clients.Client")
self.site1: "Site" = baker.make("clients.Site", client=self.client1)
@patch("core.management.commands.bulk_restart_agents.sleep")
@patch("agents.models.Agent.recover")
@patch("core.management.commands.bulk_restart_agents.get_mesh_ws_url")
def test_bulk_restart_agents_mgmt_cmd(
self, get_mesh_ws_url, recover, mock_sleep
) -> None:
get_mesh_ws_url.return_value = "https://mesh.example.com/test"
baker.make_recipe(
"agents.online_agent",
site=self.site1,
monitoring_type=AgentMonType.SERVER,
plat=AgentPlat.WINDOWS,
)
baker.make_recipe(
"agents.online_agent",
site=self.site1,
monitoring_type=AgentMonType.SERVER,
plat=AgentPlat.LINUX,
)
calls = [
call("tacagent", "https://mesh.example.com/test", wait=False),
call("mesh", "", wait=False),
]
call_command("bulk_restart_agents")
recover.assert_has_calls(calls)
mock_sleep.assert_called_with(10)
```
#### File: tacticalrmm/core/tests.py
```python
from unittest.mock import patch
import requests
from channels.db import database_sync_to_async
from channels.testing import WebsocketCommunicator
from django.conf import settings
from django.core.management import call_command
from model_bakery import baker
from rest_framework.authtoken.models import Token
from agents.models import Agent
from core.utils import get_core_settings
from logs.models import PendingAction
from tacticalrmm.constants import CustomFieldModel, PAAction, PAStatus, CONFIG_MGMT_CMDS
from tacticalrmm.test import TacticalTestCase
from .consumers import DashInfo
from .models import CustomField, GlobalKVStore, URLAction
from .serializers import CustomFieldSerializer, KeyStoreSerializer, URLActionSerializer
from .tasks import core_maintenance_tasks, handle_resolved_stuff
class TestCodeSign(TacticalTestCase):
def setUp(self):
self.setup_coresettings()
self.authenticate()
self.url = "/core/codesign/"
def test_get_codesign(self):
r = self.client.get(self.url)
self.assertEqual(r.status_code, 200)
self.check_not_authenticated("get", self.url)
@patch("requests.post")
def test_edit_codesign_timeout(self, mock_post):
mock_post.side_effect = requests.exceptions.ConnectionError()
data = {"token": "<PASSWORD>"}
r = self.client.patch(self.url, data, format="json")
self.assertEqual(r.status_code, 400)
self.check_not_authenticated("patch", self.url)
class TestConsumers(TacticalTestCase):
def setUp(self):
self.setup_coresettings()
self.authenticate()
@database_sync_to_async
def get_token(self):
token = Token.objects.create(user=self.john)
return token.key
async def test_dash_info(self):
key = self.get_token()
communicator = WebsocketCommunicator(
DashInfo.as_asgi(), f"/ws/dashinfo/?access_token={key}"
)
communicator.scope["user"] = self.john
connected, _ = await communicator.connect()
assert connected
await communicator.disconnect()
class TestCoreTasks(TacticalTestCase):
def setUp(self):
self.setup_coresettings()
self.authenticate()
def test_core_maintenance_tasks(self):
core_maintenance_tasks()
self.assertTrue(True)
def test_dashboard_info(self):
url = "/core/dashinfo/"
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.check_not_authenticated("get", url)
def test_vue_version(self):
url = "/core/version/"
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.check_not_authenticated("get", url)
def test_get_core_settings(self):
url = "/core/settings/"
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.check_not_authenticated("get", url)
def test_edit_coresettings(self):
url = "/core/settings/"
# setup
policies = baker.make("automation.Policy", _quantity=2)
# test normal request
data = {
"smtp_from_email": "<EMAIL>",
"mesh_token": "<PASSWORD>",
}
r = self.client.put(url, data)
self.assertEqual(r.status_code, 200)
self.assertEqual(get_core_settings().smtp_from_email, data["smtp_from_email"])
self.assertEqual(get_core_settings().mesh_token, data["mesh_token"])
self.check_not_authenticated("put", url)
@patch("tacticalrmm.utils.reload_nats")
@patch("autotasks.tasks.remove_orphaned_win_tasks.delay")
def test_ui_maintenance_actions(self, remove_orphaned_win_tasks, reload_nats):
url = "/core/servermaintenance/"
agents = baker.make_recipe("agents.online_agent", _quantity=3)
# test with empty data
r = self.client.post(url, {})
self.assertEqual(r.status_code, 400)
# test with invalid action
data = {"action": "invalid_action"}
r = self.client.post(url, data)
self.assertEqual(r.status_code, 400)
# test reload nats action
data = {"action": "reload_nats"}
r = self.client.post(url, data)
self.assertEqual(r.status_code, 200)
reload_nats.assert_called_once()
# test prune db with no tables
data = {"action": "prune_db"}
r = self.client.post(url, data)
self.assertEqual(r.status_code, 400)
# test prune db with tables
data = {
"action": "prune_db",
"prune_tables": ["audit_logs", "alerts", "pending_actions"],
}
r = self.client.post(url, data)
self.assertEqual(r.status_code, 200)
# test remove orphaned tasks
data = {"action": "rm_orphaned_tasks"}
r = self.client.post(url, data)
self.assertEqual(r.status_code, 200)
remove_orphaned_win_tasks.assert_called()
self.check_not_authenticated("post", url)
def test_get_custom_fields(self):
url = "/core/customfields/"
# setup
custom_fields = baker.make("core.CustomField", _quantity=2)
r = self.client.get(url)
serializer = CustomFieldSerializer(custom_fields, many=True)
self.assertEqual(r.status_code, 200)
self.assertEqual(len(r.data), 2)
self.assertEqual(r.data, serializer.data)
self.check_not_authenticated("get", url)
def test_get_custom_fields_by_model(self):
url = "/core/customfields/"
# setup
custom_fields = baker.make(
"core.CustomField", model=CustomFieldModel.AGENT, _quantity=5
)
baker.make("core.CustomField", model="client", _quantity=5)
# will error if request invalid
r = self.client.patch(url, {"invalid": ""})
self.assertEqual(r.status_code, 400)
data = {"model": "agent"}
r = self.client.patch(url, data)
serializer = CustomFieldSerializer(custom_fields, many=True)
self.assertEqual(r.status_code, 200)
self.assertEqual(len(r.data), 5)
self.check_not_authenticated("patch", url)
def test_add_custom_field(self):
url = "/core/customfields/"
data = {"model": "client", "type": "text", "name": "Field"}
r = self.client.patch(url, data)
self.assertEqual(r.status_code, 200)
self.check_not_authenticated("post", url)
def test_get_custom_field(self):
# setup
custom_field = baker.make("core.CustomField")
# test not found
r = self.client.get("/core/customfields/500/")
self.assertEqual(r.status_code, 404)
url = f"/core/customfields/{custom_field.id}/"
r = self.client.get(url)
serializer = CustomFieldSerializer(custom_field)
self.assertEqual(r.status_code, 200)
self.assertEqual(r.data, serializer.data)
self.check_not_authenticated("get", url)
def test_update_custom_field(self):
# setup
custom_field = baker.make("core.CustomField")
# test not found
r = self.client.put("/core/customfields/500/")
self.assertEqual(r.status_code, 404)
url = f"/core/customfields/{custom_field.id}/"
data = {"type": "single", "options": ["ione", "two", "three"]}
r = self.client.put(url, data)
self.assertEqual(r.status_code, 200)
new_field = CustomField.objects.get(pk=custom_field.id)
self.assertEqual(new_field.type, data["type"])
self.assertEqual(new_field.options, data["options"])
self.check_not_authenticated("put", url)
def test_delete_custom_field(self):
# setup
custom_field = baker.make("core.CustomField")
# test not found
r = self.client.delete("/core/customfields/500/")
self.assertEqual(r.status_code, 404)
url = f"/core/customfields/{custom_field.id}/"
r = self.client.delete(url)
self.assertEqual(r.status_code, 200)
self.assertFalse(CustomField.objects.filter(pk=custom_field.id).exists())
self.check_not_authenticated("delete", url)
def test_get_keystore(self):
url = "/core/keystore/"
# setup
keys = baker.make("core.GlobalKVStore", _quantity=2)
r = self.client.get(url)
serializer = KeyStoreSerializer(keys, many=True)
self.assertEqual(r.status_code, 200)
self.assertEqual(len(r.data), 2)
self.assertEqual(r.data, serializer.data)
self.check_not_authenticated("get", url)
def test_add_keystore(self):
url = "/core/keystore/"
data = {"name": "test", "value": "text"}
r = self.client.post(url, data)
self.assertEqual(r.status_code, 200)
self.check_not_authenticated("post", url)
def test_update_keystore(self):
# setup
key = baker.make("core.GlobalKVStore")
# test not found
r = self.client.put("/core/keystore/500/")
self.assertEqual(r.status_code, 404)
url = f"/core/keystore/{key.id}/"
data = {"name": "test", "value": "text"}
r = self.client.put(url, data)
self.assertEqual(r.status_code, 200)
new_key = GlobalKVStore.objects.get(pk=key.id)
self.assertEqual(new_key.name, data["name"])
self.assertEqual(new_key.value, data["value"])
self.check_not_authenticated("put", url)
def test_delete_keystore(self):
# setup
key = baker.make("core.GlobalKVStore")
# test not found
r = self.client.delete("/core/keystore/500/")
self.assertEqual(r.status_code, 404)
url = f"/core/keystore/{key.id}/"
r = self.client.delete(url)
self.assertEqual(r.status_code, 200)
self.assertFalse(GlobalKVStore.objects.filter(pk=key.id).exists())
self.check_not_authenticated("delete", url)
def test_get_urlaction(self):
url = "/core/urlaction/"
# setup
action = baker.make("core.URLAction", _quantity=2)
r = self.client.get(url)
serializer = URLActionSerializer(action, many=True)
self.assertEqual(r.status_code, 200)
self.assertEqual(len(r.data), 2)
self.assertEqual(r.data, serializer.data)
self.check_not_authenticated("get", url)
def test_add_urlaction(self):
url = "/core/urlaction/"
data = {"name": "name", "desc": "desc", "pattern": "pattern"}
r = self.client.post(url, data)
self.assertEqual(r.status_code, 200)
self.check_not_authenticated("post", url)
def test_update_urlaction(self):
# setup
action = baker.make("core.URLAction")
# test not found
r = self.client.put("/core/urlaction/500/")
self.assertEqual(r.status_code, 404)
url = f"/core/urlaction/{action.id}/"
data = {"name": "test", "pattern": "text"}
r = self.client.put(url, data)
self.assertEqual(r.status_code, 200)
new_action = URLAction.objects.get(pk=action.id)
self.assertEqual(new_action.name, data["name"])
self.assertEqual(new_action.pattern, data["pattern"])
self.check_not_authenticated("put", url)
def test_delete_urlaction(self):
# setup
action = baker.make("core.URLAction")
# test not found
r = self.client.delete("/core/urlaction/500/")
self.assertEqual(r.status_code, 404)
url = f"/core/urlaction/{action.id}/"
r = self.client.delete(url)
self.assertEqual(r.status_code, 200)
self.assertFalse(URLAction.objects.filter(pk=action.id).exists())
self.check_not_authenticated("delete", url)
def test_run_url_action(self):
self.maxDiff = None
# setup
agent = baker.make_recipe(
"agents.agent", agent_id="123123-assdss4s-343-sds545-45dfdf|DESKTOP"
)
baker.make("core.GlobalKVStore", name="Test Name", value="value with space")
action = baker.make(
"core.URLAction",
pattern="https://remote.example.com/connect?globalstore={{global.Test Name}}&client_name={{client.name}}&site id={{site.id}}&agent_id={{agent.agent_id}}",
)
url = "/core/urlaction/run/"
# test not found
r = self.client.patch(url, {"agent_id": 500, "action": 500})
self.assertEqual(r.status_code, 404)
data = {"agent_id": agent.agent_id, "action": action.id}
r = self.client.patch(url, data)
self.assertEqual(r.status_code, 200)
self.assertEqual(
r.data,
f"https://remote.example.com/connect?globalstore=value%20with%20space&client_name={agent.client.name}&site%20id={agent.site.id}&agent_id=123123-assdss4s-343-sds545-45dfdf%7CDESKTOP",
)
self.check_not_authenticated("patch", url)
def test_clear_cache(self):
url = "/core/clearcache/"
r = self.client.get(url)
self.assertEqual(r.status_code, 200)
self.check_not_authenticated("get", url)
def test_resolved_pending_agentupdate_task(self):
online = baker.make_recipe("agents.online_agent", version="2.0.0", _quantity=20)
offline = baker.make_recipe(
"agents.offline_agent", version="2.0.0", _quantity=20
)
agents = online + offline
for agent in agents:
baker.make_recipe("logs.pending_agentupdate_action", agent=agent)
Agent.objects.update(version=settings.LATEST_AGENT_VER)
handle_resolved_stuff()
complete = PendingAction.objects.filter(
action_type=PAAction.AGENT_UPDATE, status=PAStatus.COMPLETED
).count()
old = PendingAction.objects.filter(
action_type=PAAction.AGENT_UPDATE, status=PAStatus.PENDING
).count()
self.assertEqual(complete, 20)
self.assertEqual(old, 20)
class TestCoreMgmtCommands(TacticalTestCase):
def setUp(self):
self.setup_coresettings()
def test_get_config(self):
for cmd in CONFIG_MGMT_CMDS:
call_command("get_config", cmd)
class TestCorePermissions(TacticalTestCase):
def setUp(self):
self.setup_client()
self.setup_coresettings()
```
#### File: tacticalrmm/tacticalrmm/constants.py
```python
from enum import Enum
from django.db import models
class MeshAgentIdent(Enum):
WIN32 = 3
WIN64 = 4
LINUX32 = 5
LINUX64 = 6
LINUX_ARM_64 = 26
LINUX_ARM_HF = 25
def __str__(self):
return str(self.value)
CORESETTINGS_CACHE_KEY = "core_settings"
ROLE_CACHE_PREFIX = "role_"
AGENT_STATUS_ONLINE = "online"
AGENT_STATUS_OFFLINE = "offline"
AGENT_STATUS_OVERDUE = "overdue"
class GoArch(models.TextChoices):
AMD64 = "amd64", "amd64"
i386 = "386", "386"
ARM64 = "arm64", "arm64"
ARM32 = "arm", "arm"
class CustomFieldModel(models.TextChoices):
CLIENT = "client", "Client"
SITE = "site", "Site"
AGENT = "agent", "Agent"
class CustomFieldType(models.TextChoices):
TEXT = "text", "Text"
NUMBER = "number", "Number"
SINGLE = "single", "Single"
MULTIPLE = "multiple", "Multiple"
CHECKBOX = "checkbox", "Checkbox"
DATETIME = "datetime", "DateTime"
class TaskSyncStatus(models.TextChoices):
SYNCED = "synced", "Synced With Agent"
NOT_SYNCED = "notsynced", "Waiting On Agent Checkin"
PENDING_DELETION = "pendingdeletion", "Pending Deletion on Agent"
INITIAL = "initial", "Initial Task Sync"
class TaskStatus(models.TextChoices):
PASSING = "passing", "Passing"
FAILING = "failing", "Failing"
PENDING = "pending", "Pending"
class TaskType(models.TextChoices):
DAILY = "daily", "Daily"
WEEKLY = "weekly", "Weekly"
MONTHLY = "monthly", "Monthly"
MONTHLY_DOW = "monthlydow", "Monthly Day of Week"
CHECK_FAILURE = "checkfailure", "On Check Failure"
MANUAL = "manual", "Manual"
RUN_ONCE = "runonce", "Run Once"
SCHEDULED = "scheduled", "Scheduled" # deprecated
class AlertSeverity(models.TextChoices):
INFO = "info", "Informational"
WARNING = "warning", "Warning"
ERROR = "error", "Error"
class AlertType(models.TextChoices):
AVAILABILITY = "availability", "Availability"
CHECK = "check", "Check"
TASK = "task", "Task"
CUSTOM = "custom", "Custom"
class AgentHistoryType(models.TextChoices):
TASK_RUN = "task_run", "Task Run"
SCRIPT_RUN = "script_run", "Script Run"
CMD_RUN = "cmd_run", "CMD Run"
class AgentMonType(models.TextChoices):
SERVER = "server", "Server"
WORKSTATION = "workstation", "Workstation"
class AgentPlat(models.TextChoices):
WINDOWS = "windows", "Windows"
LINUX = "linux", "Linux"
DARWIN = "darwin", "macOS"
class ClientTreeSort(models.TextChoices):
ALPHA_FAIL = "alphafail", "Move failing clients to the top"
ALPHA = "alpha", "Sort alphabetically"
class AgentTableTabs(models.TextChoices):
SERVER = "server", "Servers"
WORKSTATION = "workstation", "Workstations"
MIXED = "mixed", "Mixed"
class AgentDblClick(models.TextChoices):
EDIT_AGENT = "editagent", "Edit Agent"
TAKE_CONTROL = "takecontrol", "Take Control"
REMOTE_BG = "remotebg", "Remote Background"
URL_ACTION = "urlaction", "URL Action"
class ScriptShell(models.TextChoices):
POWERSHELL = "powershell", "Powershell"
CMD = "cmd", "Batch (CMD)"
PYTHON = "python", "Python"
SHELL = "shell", "Shell"
class ScriptType(models.TextChoices):
USER_DEFINED = "userdefined", "User Defined"
BUILT_IN = "builtin", "Built In"
class EvtLogNames(models.TextChoices):
APPLICATION = "Application", "Application"
SYSTEM = "System", "System"
SECURITY = "Security", "Security"
class EvtLogTypes(models.TextChoices):
INFO = "INFO", "Information"
WARNING = "WARNING", "Warning"
ERROR = "ERROR", "Error"
AUDIT_SUCCESS = "AUDIT_SUCCESS", "Success Audit"
AUDIT_FAILURE = "AUDIT_FAILURE", "Failure Audit"
class EvtLogFailWhen(models.TextChoices):
CONTAINS = "contains", "Log contains"
NOT_CONTAINS = "not_contains", "Log does not contain"
class CheckStatus(models.TextChoices):
PASSING = "passing", "Passing"
FAILING = "failing", "Failing"
PENDING = "pending", "Pending"
class PAStatus(models.TextChoices):
PENDING = "pending", "Pending"
COMPLETED = "completed", "Completed"
class PAAction(models.TextChoices):
SCHED_REBOOT = "schedreboot", "Scheduled Reboot"
AGENT_UPDATE = "agentupdate", "Agent Update"
CHOCO_INSTALL = "chocoinstall", "Chocolatey Software Install"
RUN_CMD = "runcmd", "Run Command"
RUN_SCRIPT = "runscript", "Run Script"
RUN_PATCH_SCAN = "runpatchscan", "Run Patch Scan"
RUN_PATCH_INSTALL = "runpatchinstall", "Run Patch Install"
class CheckType(models.TextChoices):
DISK_SPACE = "diskspace", "Disk Space Check"
PING = "ping", "Ping Check"
CPU_LOAD = "cpuload", "CPU Load Check"
MEMORY = "memory", "Memory Check"
WINSVC = "winsvc", "Service Check"
SCRIPT = "script", "Script Check"
EVENT_LOG = "eventlog", "Event Log Check"
class AuditActionType(models.TextChoices):
LOGIN = "login", "User Login"
FAILED_LOGIN = "failed_login", "Failed User Login"
DELETE = "delete", "Delete Object"
MODIFY = "modify", "Modify Object"
ADD = "add", "Add Object"
VIEW = "view", "View Object"
CHECK_RUN = "check_run", "Check Run"
TASK_RUN = "task_run", "Task Run"
AGENT_INSTALL = "agent_install", "Agent Install"
REMOTE_SESSION = "remote_session", "Remote Session"
EXEC_SCRIPT = "execute_script", "Execute Script"
EXEC_COMMAND = "execute_command", "Execute Command"
BULK_ACTION = "bulk_action", "Bulk Action"
URL_ACTION = "url_action", "URL Action"
class AuditObjType(models.TextChoices):
USER = "user", "User"
SCRIPT = "script", "Script"
AGENT = "agent", "Agent"
POLICY = "policy", "Policy"
WINUPDATE = "winupdatepolicy", "Patch Policy"
CLIENT = "client", "Client"
SITE = "site", "Site"
CHECK = "check", "Check"
AUTOTASK = "automatedtask", "Automated Task"
CORE = "coresettings", "Core Settings"
BULK = "bulk", "Bulk"
ALERT_TEMPLATE = "alerttemplate", "Alert Template"
ROLE = "role", "Role"
URL_ACTION = "urlaction", "URL Action"
KEYSTORE = "keystore", "Global Key Store"
CUSTOM_FIELD = "customfield", "Custom Field"
class DebugLogLevel(models.TextChoices):
INFO = "info", "Info"
WARN = "warning", "Warning"
ERROR = "error", "Error"
CRITICAL = "critical", "Critical"
class DebugLogType(models.TextChoices):
AGENT_UPDATE = "agent_update", "Agent Update"
AGENT_ISSUES = "agent_issues", "Agent Issues"
WIN_UPDATES = "win_updates", "Windows Updates"
SYSTEM_ISSUES = "system_issues", "System Issues"
SCRIPTING = "scripting", "Scripting"
# Agent db fields that are not needed for most queries, speeds up query
AGENT_DEFER = (
"wmi_detail",
"services",
"created_by",
"created_time",
"modified_by",
"modified_time",
)
ONLINE_AGENTS = (
"pk",
"agent_id",
"last_seen",
"overdue_time",
"offline_time",
"version",
)
FIELDS_TRIGGER_TASK_UPDATE_AGENT = [
"run_time_bit_weekdays",
"run_time_date",
"expire_date",
"daily_interval",
"weekly_interval",
"enabled",
"remove_if_not_scheduled",
"run_asap_after_missed",
"monthly_days_of_month",
"monthly_months_of_year",
"monthly_weeks_of_month",
"task_repetition_duration",
"task_repetition_interval",
"stop_task_at_duration_end",
"random_task_delay",
"run_asap_after_missed",
"task_instance_policy",
]
POLICY_TASK_FIELDS_TO_COPY = [
"alert_severity",
"email_alert",
"text_alert",
"dashboard_alert",
"name",
"actions",
"run_time_bit_weekdays",
"run_time_date",
"expire_date",
"daily_interval",
"weekly_interval",
"task_type",
"enabled",
"remove_if_not_scheduled",
"run_asap_after_missed",
"custom_field",
"collector_all_output",
"monthly_days_of_month",
"monthly_months_of_year",
"monthly_weeks_of_month",
"task_repetition_duration",
"task_repetition_interval",
"stop_task_at_duration_end",
"random_task_delay",
"run_asap_after_missed",
"task_instance_policy",
"continue_on_error",
]
CHECKS_NON_EDITABLE_FIELDS = [
"check_type",
"overridden_by_policy",
"created_by",
"created_time",
"modified_by",
"modified_time",
]
POLICY_CHECK_FIELDS_TO_COPY = [
"check_type",
"warning_threshold",
"error_threshold",
"alert_severity",
"name",
"run_interval",
"disk",
"fails_b4_alert",
"ip",
"script",
"script_args",
"info_return_codes",
"warning_return_codes",
"timeout",
"svc_name",
"svc_display_name",
"svc_policy_mode",
"pass_if_start_pending",
"pass_if_svc_not_exist",
"restart_if_stopped",
"log_name",
"event_id",
"event_id_is_wildcard",
"event_type",
"event_source",
"event_message",
"fail_when",
"search_last_days",
"number_of_events_b4_alert",
"email_alert",
"text_alert",
"dashboard_alert",
]
WEEK_DAYS = {
"Sunday": 0x1,
"Monday": 0x2,
"Tuesday": 0x4,
"Wednesday": 0x8,
"Thursday": 0x10,
"Friday": 0x20,
"Saturday": 0x40,
}
MONTHS = {
"January": 0x1,
"February": 0x2,
"March": 0x4,
"April": 0x8,
"May": 0x10,
"June": 0x20,
"July": 0x40,
"August": 0x80,
"September": 0x100,
"October": 0x200,
"November": 0x400,
"December": 0x800,
}
WEEKS = {
"First Week": 0x1,
"Second Week": 0x2,
"Third Week": 0x4,
"Fourth Week": 0x8,
"Last Week": 0x10,
}
MONTH_DAYS = {f"{b}": 0x1 << a for a, b in enumerate(range(1, 32))}
MONTH_DAYS["Last Day"] = 0x80000000
DEMO_NOT_ALLOWED = [
{"name": "AgentProcesses", "methods": ["DELETE"]},
{"name": "AgentMeshCentral", "methods": ["GET", "POST"]},
{"name": "update_agents", "methods": ["POST"]},
{"name": "send_raw_cmd", "methods": ["POST"]},
{"name": "install_agent", "methods": ["POST"]},
{"name": "GenerateAgent", "methods": ["GET"]},
{"name": "email_test", "methods": ["POST"]},
{"name": "server_maintenance", "methods": ["POST"]},
{"name": "CodeSign", "methods": ["PATCH", "POST"]},
{"name": "TwilioSMSTest", "methods": ["POST"]},
{"name": "GetEditActionService", "methods": ["PUT", "POST"]},
{"name": "TestScript", "methods": ["POST"]},
{"name": "GetUpdateDeleteAgent", "methods": ["DELETE"]},
{"name": "Reboot", "methods": ["POST", "PATCH"]},
{"name": "recover", "methods": ["POST"]},
{"name": "run_script", "methods": ["POST"]},
{"name": "bulk", "methods": ["POST"]},
{"name": "WMI", "methods": ["POST"]},
{"name": "PolicyAutoTask", "methods": ["POST"]},
{"name": "RunAutoTask", "methods": ["POST"]},
{"name": "run_checks", "methods": ["POST"]},
{"name": "GetSoftware", "methods": ["POST", "PUT"]},
{"name": "ScanWindowsUpdates", "methods": ["POST"]},
{"name": "InstallWindowsUpdates", "methods": ["POST"]},
{"name": "PendingActions", "methods": ["DELETE"]},
{"name": "clear_cache", "methods": ["GET"]},
]
CONFIG_MGMT_CMDS = (
"api",
"version",
"webversion",
"meshver",
"natsver",
"frontend",
"djangoadmin",
"setuptoolsver",
"wheelver",
"dbname",
"dbuser",
"dbhost",
"dbpw",
"dbport",
"meshsite",
"meshuser",
"meshtoken",
)
``` |
{
"source": "jpJuni0r/nightmare",
"score": 2
} |
#### File: jpJuni0r/nightmare/pyTestMain.py
```python
import os
import sys
from pyTest import TestState
from pyTestSuite import TestSuite
from pyTestRunner import TestRunner
from pyTestUtils import TermColor
def main():
# Check whether wxpython is installed or not
try:
import wx
# Has the exe been double clicked? -> Try GUI
# Allow at max 1 parameter if a testbench has been dropped onto the
# the exe.
if sys.argv[0].endswith(".exe") and len(sys.argv) < 2:
sys.argv.append("--gui")
except ImportError:
if "--no-gui" not in sys.argv:
sys.argv.append("--no-gui")
if "--no-color" in sys.argv:
TermColor.active = False
if "--version" in sys.argv:
runner = TestRunner(flush=True)
elif "--gui" in sys.argv:
# Capt. Obvious: We're running the GUI
from pyTestGui import TestRunnerGui
if len(sys.argv) > 1 and not sys.argv[1].startswith("-") and os.path.exists(sys.argv[1]):
sys.argv[1] = '--bench=' + sys.argv[1]
gui = TestRunnerGui()
gui.buildWindow()
gui.show()
else:
# Capt. Obvious: We're running in console mode
runner = TestRunner()
runner.parseArgv()
suite = runner.loadSuite()
if suite is not None:
for testcase in runner.run():
pass
if not runner.options['info'] and not runner.options['length'] and not runner.options['quiet']:
print "{:2.2f}%".format(suite.getRate())
sys.exit(suite.lastResult if suite.lastResult not in [TestState.Waiting, TestState.InfoOnly] else 0)
else:
sys.exit(1)
if __name__ == "__main__":
main()
```
#### File: jpJuni0r/nightmare/pyTestRunner.py
```python
import os
import re
import sys
import time
import math
import argparse
import itertools
import struct
import subprocess
import collections
import fractions
try:
import pyparsing
except:
pyparsing = None
# from threading import Thread
from pyTestUtils import TermColor, logger
from pyTest import Test, TestState
from pyTest import Expectation, ExpectFile, Stringifier, StringifiedFile, CompareFiles
from pyTestSuite import TestSuite, TestSuiteMode
from arnold_converter import syntax, buildTestList
import version
class TestRunner(object):
"""Testrunner. Reads a testbench file and executes the testrun"""
def __init__(self, flush=False):
"""Initialises the test runner"""
# Thread.__init__(self)
logger.log(
TermColor.colorText("NIGHTMARE I", TermColor.Red, style=TermColor.Bold) +
TermColor.colorText("s of ", TermColor.White) +
TermColor.colorText("G", TermColor.Red, style=TermColor.Bold) +
TermColor.colorText("enerous ", TermColor.White) +
TermColor.colorText("H", TermColor.Red, style=TermColor.Bold) +
TermColor.colorText("elp when ", TermColor.White) +
TermColor.colorText("T", TermColor.Red, style=TermColor.Bold) +
TermColor.colorText("esting; ", TermColor.White) +
TermColor.colorText("M", TermColor.Red, style=TermColor.Bold) +
TermColor.colorText("ay ", TermColor.White) +
TermColor.colorText("A", TermColor.Red, style=TermColor.Bold) +
TermColor.colorText("rnold be ", TermColor.White) +
TermColor.colorText("R", TermColor.Red, style=TermColor.Bold) +
TermColor.colorText("emembered ", TermColor.White) +
TermColor.colorText("E", TermColor.Red, style=TermColor.Bold) +
TermColor.colorText("ternally", TermColor.White)
)
logger.log("Welcome to nightmare Version {}".format(version.Version))
if flush:
logger.flush(quiet=False)
self.options = dict()
self.testCount = 0
self.runsuite = None
self.finished = None
def setDUT(self, DUT):
"""
set the Device under Test
@type DUT: String
@param DUT: Device Under Test
"""
self.options['dut'] = DUT
if self.runsuite is not None:
self.runsuite.setDUT(DUT)
def getSuite(self):
"""Returns the suite. If none is loaded a new one will be created"""
if self.runsuite is None:
self.runsuite = TestSuite(DUT=self.options['dut'], mode=self.options['mode'])
return self.runsuite
def parseArgv(self):
"""Parses the argument vector"""
args = argparse.ArgumentParser(description="A test tool for non-interactive commandline programms")
group = args.add_argument_group("Test selection")
group.add_argument("--bench", action="store", nargs=1, help="File which contains the testbench.")
group.add_argument("--suite", action="store", nargs=1, help="Use testsuite SUITE from the testbench.", metavar="SUITE")
group.add_argument("--dut", "--DUT", action="store", nargs=1, help="Set the device under test.")
group.add_argument("--test", action="store", nargs="+", type=int, help="Run only the specified tests")
group.add_argument("--timeout", action="store", nargs=1, type=float, help="Set a global timeout for all tests.")
group.add_argument("--arnold", "-a", action="store_true", default=False, dest="arnold", help="Use the arnold mode (requires pyparsing module)")
group.add_argument("--save", action="store", nargs=1, help="Save the testsuite as FILE", metavar="FILE")
group = args.add_argument_group("Output Control")
group.add_argument("--limit", action="store", nargs=1, type=int, default=2000, help="Set a (soft) limit for a number of Bytes, after which output piping will we stopped. Checks are made after each line.")
group.add_argument("--quiet", "-q", action="store_const", const=True, default=False, dest="quiet", help="Quiet mode. There will be no output except results.")
group.add_argument("--verbose", "-v", action="store_const", const=False, dest="quiet", help="Verbose mode. The program gets chatty (default).")
group.add_argument("--commands", "-C", action="store_true", default=False, dest="commands", help="Show the command executed for each test.")
group.add_argument("--length", "-l", action="store_true", default=False, dest="length", help="Print only the number of tests in the suite.")
group.add_argument("--info-only", "-i", action="store_true", default=False, dest="info", help="Display only test information, but don't run them.")
group.add_argument("--pipe-streams", "-p", action="store_true", default=None, dest="pipe", help="Redirect DUT output to their respective streams.")
group.add_argument("--output-fails", "-o", action="store_true", default=None, dest="output", help="Redirect DUT output from failed tests to their respective streams.")
group.add_argument("--unify-fails", "-u", action="store_true", default=None, dest="diff", help="Display the unified diff of output and expectation.")
group.add_argument("--no-color", action="store_false", default=True, dest="color", help="Don't use any colored output.")
group = args.add_argument_group("Test Flow")
group.add_argument("--continue", "-c", action="store_const", const=TestSuiteMode.Continuous, dest="mode", help="Continuous mode (Don't halt on failed tests).")
group.add_argument("--error", "-e", action="store_const", const=TestSuiteMode.BreakOnError, dest="mode", help="Same as '-c', but will halt if an error occurs.")
group.add_argument("--ignoreEmptyLines", "-L", action="store_true", default=None, dest="ignoreEmptyLines", help="Ignore empty lines")
group.add_argument("--relative", "-r", action="store_true", default=False, dest="relative", help="Use a path relative to the testbench path.")
group.add_argument("--cr", action="store_const", const="\r", dest="linesep", help="Force the line separation character (Mac OS).")
group.add_argument("--ln", action="store_const", const="\n", dest="linesep", help="Force the line separation character (Unix / Mac OS-X).")
group.add_argument("--crln", action="store_const", const="\r\n", dest="linesep", help="Force the line separation character (Windows).")
args.add_argument("--gui", action="store_true", default=False, dest="gui", help="Use the GUI (experimental and unstable).")
args.add_argument("--no-gui", action="store_true", default=False, dest="gui", help="Don't use the GUI.")
args.add_argument("--version", action="store_const", const=True, default=False, help="Display version information")
args.set_defaults(linesep=os.linesep, bench=[""], save=[], suite=["suite"], dut=[None], timeout=[None], test=[])
self.options.update(vars(args.parse_args()))
self.options['bench'] = self.options['bench'][0]
self.options['suite'] = self.options['suite'][0]
self.options['dut'] = self.options['dut'][0]
self.options['timeout'] = self.options['timeout'][0]
logMessages = [
('mode', lambda v: "I'm running in continuous mode now"
if v == TestSuiteMode.Continuous
else "I'm running in continuous mode now, but will halt if an error occurs"
if v == TestSuiteMode.BreakOnError
else "I will halt on first fail."),
('suite', lambda v: "I'm using the testsuite '{}'".format(v)),
('test', lambda v: "I'm only running test {}".format(v) if len(v) > 0 else ""),
('bench', lambda v: "I'm using testbench '{}'".format(v)),
('timeout', lambda v: "Setting global timeout to {}".format(v)),
('dut', lambda v: "Device under Test is: {}".format(v)),
('commands', lambda v: "I will print every command I'll exceute." if v else ""),
('length', lambda v: "I will only print the number of tests" if v else ""),
('info', lambda v: "I will only print the test information." if v else ""),
('pipe', lambda v: "I will pipe all tests outputs to their respective streams" if v else ""),
('output', lambda v: "I will pipe failed tests outputs to their respective streams" if v else ""),
('diff', lambda v: "I will show the differences in output and expectations" if v else ""),
]
for option, msgFunc in logMessages:
if self.options[option] is not None:
msg = msgFunc(self.options[option])
if len(msg) > 0:
logger.log("\t{}".format(msg))
logger.flush(self.options['quiet'])
def addTest(self):
test = Test(name="New Test", description="Add a description", DUT=self.options['dut'])
test.pipe = self.options['pipe']
test.outputOnFail = self.options['output']
test.linesep = self.options['linesep']
self.getSuite().addTest(test)
return test
def loadArnold(self):
if syntax is not None:
logger.log("\t...using Arnold-Mode")
syn = syntax()
fileHnd = open(self.options['bench'])
content = []
for line in fileHnd:
if not line.startswith("#") and not line.strip() == "":
content.append(line.replace("ä", "ae").replace("Ä", "Ae").replace("ö", "oe").replace("Ö", "Oe").replace("ü", "ue").replace("Ü", "Ue").replace("ß", "ss"))
s = "".join(content)
ast = syn.parseString(s)
testList = buildTestList(ast)
suite = TestSuite(*testList)
suite.setDUT(self.options['dut'])
else:
logger.log("\t ... could not init arnold mode due to missing pyparsing package")
suite = None
return suite
def loadPython(self):
glb = {"__builtins__": __builtins__,
# External / Standard libraries
"parser": pyparsing,
"os": os,
"regex": re,
"math": math,
"itertools": itertools,
"struct": struct,
"collections": collections,
"fractions": fractions,
# nightmare specific things
"Test": Test,
"Suite": TestSuite,
"Mode": TestSuiteMode,
"State": TestState,
"Expectation": Expectation,
"ExpectFile": ExpectFile,
"Stringifier": Stringifier,
"StringifiedFile": StringifiedFile,
"CompareFiles": CompareFiles,
# Helping functions
"readFile": lambda fname: open(fname).read().rstrip() if os.path.exists(fname) else "File not found",
}
ctx = {self.options['suite']: None, "DUT": None}
execfile(self.options['bench'], glb, ctx)
if (self.options['suite'] in ctx):
suite = None
if 'DUT' in ctx and ctx['DUT'] is not None and self.options['dut'] is None:
self.setDUT(ctx['DUT'])
if (ctx[self.options['suite']] != None):
if ctx[self.options['suite']].__class__ == TestSuite:
suite = ctx[self.options['suite']]
if suite.DUT is None:
suite.setDUT(self.options['dut'])
if self.options['mode'] is None:
self.options['mode'] = suite.mode
elif suite.mode is None:
suite.mode = self.options['mode']
else:
suite = TestSuite(*ctx[self.options['suite']], **{'DUT': self.options['dut'], 'mode': self.options['mode']})
else:
logger.log("Sorry, but I can't find any tests inside the suite '{}'".format(self.options['suite']))
else:
logger.log("Sorry, but there was no test-suite in the file")
return suite
def loadSuite(self, fname=None):
"""Loads a python based suite from a file"""
if fname is not None:
self.options['bench'] = fname
if self.options['bench'] is not None and self.options['bench'] != "" and os.path.exists(self.options['bench']):
logger.log("\nReading testfile ...")
if self.options['relative']:
os.chdir(os.path.dirname(os.path.abspath(self.options['bench'])))
logger.log("Current Working Dir is: {}".format(os.getcwd()))
self.options['bench'] = os.path.basename(self.options['bench'])
if self.options['arnold']:
self.runsuite = self.loadArnold()
else:
self.runsuite = self.loadPython()
if self.runsuite is not None:
self.runsuite.options['commands'] = self.options['commands']
self.runsuite.setAll(
state=TestState.InfoOnly if self.options['info'] else TestState.Waiting,
pipe=self.options['pipe'],
out=self.options['output'],
diff=self.options['diff'],
timeout=self.options['timeout'],
linesep=self.options['linesep'],
ignoreEmptyLines=self.options['ignoreEmptyLines']
)
self.testCount = len(self.runsuite.testList)
logger.log("I have loaded {} Testcase{}".format(self.testCount, "s" if self.testCount > 0 else ""))
else:
logger.log("Sorry, but I failed to load the requested suite")
else:
logger.log("Sorry, but I couldn't find the file '{}'".format(self.options['bench']))
logger.flush(self.options['quiet'])
return self.runsuite
# def start(self, finished=None, test=-1):
# """start the runner-thread"""
# self.finished = finished
# self.options['test'] = test
# Thread.start(self)
def run(self):
"""Thread run function"""
if self.options['length']:
print len(self.runsuite.getTests())
elif len(self.options['save']) == 1:
logger.log("Saving Suite to {}".format(self.options['save'][0]))
self.saveToFile(self.options['save'][0])
else:
logger.flush(self.options['quiet'])
self.runsuite.setMode(self.options['mode'])
for test in self.runsuite.run(self.options['quiet'], tests=self.options['test']):
yield test
self.runsuite.stats(self.options['quiet'])
if self.finished is not None:
self.finished()
logger.flush(self.options['quiet'])
raise StopIteration()
def countTests(self):
return len(self.runsuite.testList)
def __str__(self):
self.toString()
def toString(self):
s = self.options['suite'] + ' = ' + self.runsuite.toString()
def saveToFile(self, fn):
"""
Save the testsuite into a file
@type fn: String
@param fn: The filename
"""
fHnd = open(fn, "w")
fHnd.write("#!/usr/bin/env python\n\n")
fHnd.write("# nightmare - Testbench\n")
fHnd.write("# Saved at {}\n".format(time.strftime("%H:%M:%S")))
fHnd.write("# \n\n")
# fHnd.write("# Author: {}\n".format())
if self.options['dut'] is not None:
fHnd.write("# Device Under Test\n")
fHnd.write("DUT = \"{}\"\n\n".format(os.path.relpath(self.options['dut'])))
fHnd.write("# Test definitions\n")
fHnd.write("{} = [\n".format(self.options['suite']))
tests = []
for test in self.getSuite().getTests():
tests.append("\t{}".format(test.toString()))
fHnd.write(",\n".join(tests))
fHnd.write("\n]\n")
fHnd.close()
```
#### File: jpJuni0r/nightmare/setup.py
```python
import os
from setuptools import setup
try:
import py2exe
except:
py2exe = None
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
from version import *
Name = "nightmare"
Description = "NIGHTMARE is of Generous Help when Testing; May Arnold be Remembered Eternally"
Author = "<NAME>"
Mail = "<EMAIL>"
Url = 'https://github.com/hastern/nightmare'
Company = ''
Copyright = ''
MANIFEST_TEMPLATE = """
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity
version="5.0.0.0"
processorArchitecture="x86"
name="%(prog)s"
type="win32"
/>
<description>%(descr)s</description>
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level="asInvoker" uiAccess="false">
</requestedExecutionLevel>
</requestedPrivileges>
</security>
</trustInfo>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.VC90.CRT"
version="9.0.21022.8"
processorArchitecture="x86"
publicKeyToken="<KEY>">
</assemblyIdentity>
</dependentAssembly>
</dependency>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.Windows.Common-Controls"
version="6.0.0.0"
processorArchitecture="X86"
publicKeyToken="<KEY>"
language="*"
/>
</dependentAssembly>
</dependency>
</assembly>
"""
includes = ['pyTest','pyTestMain','pyTestSuite','pyTestRunner','pyTestGui','pyTestEditForm','pyTestUtils','__main__','arnold_converter','version', 'pyparsing']
excludes = ['pyreadline','pyreadline.console', 'pyreadline.rlmain','unittest','email', 'email.Utils','calendar','_ssl','Tkinter',"Tkconstants", "tcl"]
packages = ['pyparsing']
icon_resources = [(1,"resource/nightmare.ico")]
bitmap_resources = []
other_resources = [(24, 1, MANIFEST_TEMPLATE % dict(prog=Name, descr=Description))]
dll_excludes = ['w9xpopen.exe',"MSVCP90.dll"]
mainScript = 'pyTestMain'
if py2exe is not None:
options = {"py2exe": {
"compressed": 1,
"optimize": 2,
"bundle_files": 1,
"includes": includes,
"excludes": excludes,
"packages": packages,
"dll_excludes": dll_excludes,
"dist_dir": "dist",
"custom_boot_script": '',
"unbuffered": True,
}
}
else:
options = {}
GUI2Exe_Target = {
'script': mainScript + '.py',
'icon_resources': icon_resources,
'bitmap_resources': bitmap_resources,
'other_resources': other_resources,
'dest_base': Name,
'version': Version,
'company_name': Company,
'copyright': Copyright,
'name': Name
}
setup(
name=Name, version=Version, description=Description,
author=Author, author_email=Mail, url=Url,
py_modules=includes,
license=read('LICENSE'),
long_description=read('README.md'),
entry_points={
'setuptools.installation':[
"eggsecutable = {}:main".format(mainScript)
]
},
install_requires=['pyparsing'],
options = options,
#console=[mainScript+'.py'],
#windows=[GUI2Exe_Target],
console=[GUI2Exe_Target],
data_files=[('',['resource/nightmare.ico'])],
#scripts=['pyTest.py'],
zipfile=None,
zip_safe=True,
)
``` |
{
"source": "jpjuvo/arduino-gpu-monitor",
"score": 2
} |
#### File: src/arduino_gpu_monitor/nvidia_utils.py
```python
import subprocess
import pynvml
def read_gpu_log():
out_dict = {}
try:
sp = subprocess.Popen(
['nvidia-smi', '-q'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
out_str = sp.communicate()[0]
if isinstance(out_str, bytes):
out_str = out_str.decode('utf-8')
out_list = out_str.split('\n')
for item in out_list:
try:
key, val = item.split(':')
key, val = key.strip(), val.strip()
out_dict[key] = val
except:
pass
except:
pass
return out_dict
def gpu_status(gpu_id=0):
total_megs, used_megs = -1,-1
gpu_use_p, gpu_mem_p = -1,-1
try:
pynvml.nvmlInit()
handle = pynvml.nvmlDeviceGetHandleByIndex(gpu_id)
meminfo = pynvml.nvmlDeviceGetMemoryInfo(handle)
MB_SIZE = 1024*1024
total_megs = int(meminfo.total/MB_SIZE)
used_megs = int(meminfo.used/MB_SIZE)
utilization_rates = pynvml.nvmlDeviceGetUtilizationRates(handle)
gpu_use_p = utilization_rates.gpu
gpu_mem_p = utilization_rates.memory
pynvml.nvmlShutdown()
except:
pass
return (used_megs, total_megs, gpu_use_p, gpu_mem_p)
def compact_gpu_stats():
gpu_log = read_gpu_log()
used_megs, total_megs, gpu_use_p, gpu_mem_p = gpu_status()
current_temperature, max_temperature = -1,-1
if 'GPU Current Temp' in gpu_log and 'GPU Max Operating Temp' in gpu_log:
current_temp_str = gpu_log['GPU Current Temp']
max_temp_str = gpu_log['GPU Max Operating Temp']
# trim
try:
current_temperature = int(current_temp_str.replace('C','').replace(' ',''))
max_temperature = int(max_temp_str.replace('C','').replace(' ',''))
except:
pass
product_name = gpu_log['Product Name'] if 'Product Name' in gpu_log else "-"
gpu_dict = {
'name':product_name,
'curr_temp': current_temperature,
'max_temp': max_temperature
}
if gpu_use_p > 0:
gpu_dict['gpu_util_p'] = gpu_use_p
if used_megs > 0 and total_megs > 0:
# get mem_p from values
gpu_mem_p = int(100 * (float(used_megs) / float(total_megs)))
if gpu_mem_p > 0:
gpu_dict['mem_util_p'] = gpu_mem_p
if used_megs > 0:
gpu_dict['used_mem'] = used_megs
if total_megs > 0:
gpu_dict['total_mem'] = total_megs
return gpu_dict
``` |
{
"source": "jpjuvo/deepfake-video-detector",
"score": 2
} |
#### File: deepfake-video-detector/detector/DeepFakeDetector.py
```python
import os
import sys
import glob
import cv2
from PIL import Image
import numpy as np
from pathlib import Path
import random
import torch
from FaceDetection.FaceDetector import FaceDetector
from FaceDetection.FaceDetector import FaceDetectorError
from FaceDetection.FaceDetection import FaceDetection
from Util.VideoFrameSampler import VideoFrameSampler
from PersonTracking.MultiPersonFaceTracker import MultiPersonFaceTracker
from PersonTracking.TrackedPerson import TrackedPerson
from PersonTracking.util import extractPersons
from FeatureGenerators.FaceEmbeddings import FaceEmbeddings
from FeatureGenerators.FaceClassifier import FaceClassifier
from FeatureGenerators.PowerSpectrumClassifier import PowerSpectrumClassifier
from SecondLevelClassifier import SecondLevelClassifier
from FeatureGenerators.FaceSequenceClassifier import FaceSequenceClassifier
from Util.ImageUtil import JPEGCompression, ResizeImage
class VideoAugmentation:
""" Augmentations intented for second level model training. """
NONE = None
HALF_FPS = 'fps_15'
FOURTH_OF_SIZE = 'resize_smaller'
COMPRESS = "jpeg_compression"
class DeepFakeDetector:
def __version__(self):
return "0.9.0"
def __init__(self,
deepfake_models_directory,
third_party_models_directory,
n_first_frames=10,
n_spaced_frames=10,
downsampling=2,
small_face_size=(160,160),
large_face_size=(299,299),
face_padding=0.15,
max_retries=4,
predict_on_error=0.5,
low_light_th=60,
face_discard_percentage=0.0,
use_power_spectrum_clf=False,
verbose=0):
"""
Note, FaceNet expects to find pretrained models from /tmp/.cache/torch/checkpoints/ and downloads the weights if missing.
To get the weights without internet, copy the weights manually by running in jupyter cell:
!mkdir -p /tmp/.cache/torch/checkpoints/
!cp [weight folder]/20180402-114759-vggface2-logits.pth /tmp/.cache/torch/checkpoints/vggface2_DG3kwML46X.pt
!cp [weight folder]/20180402-114759-vggface2-features.pth /tmp/.cache/torch/checkpoints/vggface2_G5aNV2VSMn.pt
Parameters:
deepfake_models_directory (str): model folder of trained classifiers
third_party_models_directory (str): model folder of third party files such as blazeface weights
n_first_frames (int): Number of consecutive frames to sample (affects the output feature qualities and processing times). Default=10.
n_spaced_frames (int): Number of equally spaced frames to sample from the rest of the video after n_first_frames. Default=10.
downsampling (int): Video dowsampling factor for the Face detection model for faster processing (2 works well with HD videos but higher factors may miss more faces). Doesn't affect anything else. Default=2.
small_face_size (size int tuple): FaceNet face recognition model. Pretrained model is trained with (160,160) size, default=(160,160).
large_face_size (size int tuple): Default=(299,299).
face_padding (float): x and y padding percentage of width or height that is added on both sides. Default=0.15.
max_retries (int): Number of times to retry if less than min_faces faces are detected from processed frames. Each retry samples from the following frames. Default=4.
predict_on_error (float): This value gets predicted on Predict method's error. default=0.5.
low_light_th (int): If the average brightness from the sampled frames goes below this value, all frames are brightened, range [0,255], default=60.
face_discard_percentage (float): Percentage (0-1) of faces to drop. Dropping order comes from face detector confidence so that the least confident are dropped. Default=0.0.
use_power_spectrum_clf (bool): If powerspectrum classifier is used (https://arxiv.org/abs/1911.00686). By default, this is False to save inference time.
verbose (int): 0 = silent, 1 = print errors and warnings, 2 = print processing times of components and all errors and warnings. default=0.
"""
self.device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
if(self.device == 'cpu'):
raise Exception('Cuda is required')
print(self.device)
# locate model paths
(small_face_model_dirs,
large_face_model_dirs,
second_level_xgb_paths,
second_level_logreg_paths,
second_level_lgb_paths,
power_spectrum_path,
recurrent_model_dirs) = self._getModelPaths(deepfake_models_directory)
blazeface_path, blazeface_anchors = self._getThirdPartyModelPaths(third_party_models_directory)
self.frameSampler = VideoFrameSampler(n_first_frames=n_first_frames,
n_spaced_frames=n_spaced_frames,
low_light_th=low_light_th,
verbose=verbose)
self.faceDetector = FaceDetector(self.device,
blazeface_device='cpu',
blazeface_path=blazeface_path,
blazeface_anchors=blazeface_anchors,
mtcnn_downsampling=downsampling,
verbose=verbose)
self.faceEmbeddings = FaceEmbeddings(self.device,
n_first_frames=n_first_frames,
verbose=verbose)
self.faceClassifier = FaceClassifier(small_face_model_dirs,
large_face_model_dirs,
n_first_frames,
n_spaced_frames,
verbose=verbose)
self.faceSequenceClassifier = FaceSequenceClassifier(recurrent_model_dirs,
n_first_frames,
n_spaced_frames,
verbose=verbose)
if use_power_spectrum_clf:
self.powerSpectrumClassifier = PowerSpectrumClassifier(power_spectrum_path,
verbose=verbose)
self.secondLevelClassifier = SecondLevelClassifier(second_level_xgb_paths,
second_level_logreg_paths,
second_level_lgb_paths,
verbose=verbose)
self.n_first_frames = n_first_frames
self.n_spaced_frames = n_spaced_frames
self.large_face_size = large_face_size
self.small_face_size = small_face_size
self.face_padding = face_padding
self.max_retries = max_retries
self.predict_on_error = predict_on_error
self.face_discard_percentage = face_discard_percentage
self.use_power_spectrum_clf = use_power_spectrum_clf
self.verbose = verbose
self._printInfo()
def _printInfo(self):
print("#"*50)
print("DeepFakeDetector v." + self.__version__())
print("Sample {0} first frames and {1} spaced frames.".format(self.n_first_frames, self.n_spaced_frames))
print("Number of max retries is {0}".format(self.max_retries))
print("On error cases, predict {0}".format(self.predict_on_error))
print("#"*50)
def _getSubDirectories(self, dir_path):
return [os.path.join(dir_path, o) for o in os.listdir(dir_path) if os.path.isdir(os.path.join(dir_path,o))]
def _getSubFiles(self, dir_path, suffix=None):
return [os.path.join(dir_path, o) for o in os.listdir(dir_path) if os.path.isfile(os.path.join(dir_path,o)) and (suffix in o if suffix is not None else True)]
def _getModelPaths(self, dir_path):
small_face_model_dirs = []
large_face_model_dirs = []
second_level_xgb_paths = []
second_level_logreg_paths = []
second_level_lgb_paths = []
recurrent_model_dirs = []
power_spectrum_path = None
if dir_path is not None:
sub_dirs = self._getSubDirectories(dir_path)
for sub_dir in sub_dirs:
if "small_face" in sub_dir:
small_face_model_dirs = self._getSubDirectories(sub_dir)
elif "large_face" in sub_dir:
large_face_model_dirs = self._getSubDirectories(sub_dir)
elif "second_level" in sub_dir:
date_dirs = self._getSubDirectories(sub_dir)
if len(date_dirs) == 0:
continue
date_dir = date_dirs[0]
second_level_xgb_paths = self._getSubFiles(date_dir, '.bin')
second_level_logreg_paths = self._getSubFiles(date_dir, '.sav')
second_level_lgb_paths = self._getSubFiles(date_dir, '.txt')
elif "power_spectrum" in sub_dir:
date_dirs = self._getSubDirectories(sub_dir)
if len(date_dirs) == 0:
continue
date_dir = date_dirs[0]
if len(self._getSubFiles(date_dir, '.bin'))==0:
continue
power_spectrum_path = self._getSubFiles(date_dir, '.bin')[0]
elif "recurrent_cnn" in sub_dir:
recurrent_model_dirs = self._getSubDirectories(sub_dir)
return (small_face_model_dirs,
large_face_model_dirs,
second_level_xgb_paths,
second_level_logreg_paths,
second_level_lgb_paths,
power_spectrum_path,
recurrent_model_dirs)
def _getThirdPartyModelPaths(self, dir_path):
blazeface_path = None
blazeface_anchors = None
if dir_path is not None:
sub_dirs = self._getSubDirectories(dir_path)
for sub_dir in sub_dirs:
if "blazeface" in sub_dir:
blazeface_files = self._getSubFiles(sub_dir)
for bf_file in blazeface_files:
if "blazeface.pth" in bf_file:
blazeface_path = bf_file
elif "anchors.npy" in bf_file:
blazeface_anchors = bf_file
return (blazeface_path, blazeface_anchors)
def _getRandomAugmentation(self):
"""
According to this article: https://arxiv.org/abs/1910.08854
DFDC testing is done with augmentations.
This function returns
p=2/9, HALF FPS
p=2/9, FOURTH OF SIZE
p=2/9, COMPRESS
p=3/9, No augmentation
"""
selector = random.randint(1,9)
if self.verbose > 0:
print("Augmentation selector: {0}".format(selector))
if selector < 3:
return VideoAugmentation.HALF_FPS
elif selector < 5:
return VideoAugmentation.FOURTH_OF_SIZE
elif selector < 7:
return VideoAugmentation.COMPRESS
else:
return VideoAugmentation.NONE
def _augmentPILImages(self, imgs, augmentation):
if augmentation is None or augmentation == VideoAugmentation.HALF_FPS:
return imgs
if augmentation == VideoAugmentation.COMPRESS:
return [JPEGCompression(img) for img in imgs]
if augmentation == VideoAugmentation.FOURTH_OF_SIZE:
return [ResizeImage(img) for img in imgs]
raise NameError("unknown augmentation {0}".format(augmentation))
def GetFeatures(self,
videoPath,
frame_offset=0,
frame_end_offset=0,
retries=0,
return_data=False,
replicate_videoPaths=[],
apply_augmentations=False,
predetermined_augmentation=None):
"""
Return second level features for DeepFake classification.
Parameters:
videoPath (string): Path to .mp4 video.
frame_offset (int): Start sampling from this frame. Default=0.
frame_end_offset (int): Offset for end of video. This is automatically increased when faces are not detected from the last sampled frame
retries (int): Number of retries done already. This is incremented when calling recursively on face detection errors until max_retries is reached. Default=0.
apply_augmentations (bool): augmentation to face images. This is intended for training second level models, default=False.
predetermined_augmentation (str) : One of fps_15, resize_smaller, jpeg_compression or None, default=None.
Throws: Exception if max_retries is reached.
"""
# Augmentations are None by default
augmentation = predetermined_augmentation if not apply_augmentations else self._getRandomAugmentation()
imgs, brightness_factor = self.frameSampler.getFrames(videoPath,
frame_offset=frame_offset, # read starting frame, this is increased on every retry
frame_end_offset=frame_end_offset,
first_frame_step=2 if (augmentation == VideoAugmentation.HALF_FPS) else 1,
increase_lightness=retries>=2 # force brightness increase after second try
)
replicate_imgs = [self.frameSampler.getFrames(path,
frame_offset,
frame_end_offset=frame_end_offset,
first_frame_step=2 if (augmentation == VideoAugmentation.HALF_FPS) else 1,
override_brightness_fc=brightness_factor # use the same brightnesses in replicate videos as in original
)[0] for path in replicate_videoPaths]
# apply augmentation - this is an identity function if augmentation is None
imgs = self._augmentPILImages(imgs, augmentation)
replicate_imgs = self._augmentPILImages(replicate_imgs, augmentation)
min_image_side = min(imgs[0].width, imgs[0].height)
face_dets_list, average_person_count, faceDetectionError = self.faceDetector.getFaceBoundingBoxes(imgs,
use_search_limits=retries<2 and min_image_side >= 480, # use search limits on the first two tries if the image has a decent resolution
speedOverAccuracy=retries==0 # try the first frame with only mtcnn model
)
# retry if faces were not found from all frames
if len(face_dets_list) < (self.n_first_frames + self.n_spaced_frames):
if retries < self.max_retries:
skip_frames_for_next_try = self.n_first_frames * 2 * (retries+1) # if first_frames=10, skips are 20, 20+40=60, 60+60=120, 120+80=200
skip_end_frames_for_next_try = 10 if (faceDetectionError == FaceDetectorError.MISSING_LAST) else 0
return self.GetFeatures(videoPath,
frame_offset = frame_offset + skip_frames_for_next_try,
frame_end_offset = frame_end_offset + skip_end_frames_for_next_try,
retries = retries+1,
return_data = return_data,
replicate_videoPaths = replicate_videoPaths,
predetermined_augmentation=augmentation)
else:
raise Exception("Maximum retries with " + str(videoPath))
# create tracked persons out of facedetections
def __PILs2Numpys(pil_imgs):
return [np.array(img, dtype=np.uint8) for img in pil_imgs]
trackedPersons = extractPersons(face_dets_list,
average_person_count=average_person_count,
np_imgs=__PILs2Numpys(imgs),
small_face_size=self.small_face_size,
large_face_size=self.large_face_size,
face_padding=self.face_padding,
n_first_images=self.n_first_frames,
face_discard_percentage=self.face_discard_percentage)
trackedReplicatePersons = [extractPersons(face_dets_list, average_person_count, __PILs2Numpys(imgs)) for imgs in replicate_imgs]
# refine tracked person's faces to embeddings
def __faces2embeddings(trackedPerson):
trackedPerson.faceEmbeddings = self.faceEmbeddings.getEmbeddings(trackedPerson.small_faces_array)
def __listOfFaces2embeddings(trackedPersonsList):
for trackedPerson in trackedPersonsList:
__faces2embeddings(trackedPerson)
__listOfFaces2embeddings(trackedPersons)
for replicates in trackedReplicatePersons:
__listOfFaces2embeddings(replicates)
# if return data instead of features
if return_data:
allPersons = trackedPersons
for replicatePersons in trackedReplicatePersons:
allPersons += replicatePersons
return [person.getData() for person in allPersons]
def __collectPersonFeatures(trackedPerson):
return np.array(
self.faceClassifier.getFaceClassifierFeats(trackedPerson.small_faces_array, isSmall=True, weights=trackedPerson.getWeights())+
self.faceClassifier.getFaceClassifierFeats(trackedPerson.large_faces_array, isSmall=False, weights=trackedPerson.getWeights())+
self.faceEmbeddings.getFaceEmbeddingFeatures(trackedPerson.faceEmbeddings)+
self.faceSequenceClassifier.getFaceClassifierFeats(trackedPerson.large_faces_array) +
(self.powerSpectrumClassifier.getFeatures(trackedPerson.raw_faces_list) if self.use_power_spectrum_clf else [])
)
return [__collectPersonFeatures(trackedPerson) for trackedPerson in trackedPersons]
def GetFeatureNames(self):
names = []
names += self.faceClassifier.getFeatNames()
names += self.faceEmbeddings.getFeatNames()
names += self.faceSequenceClassifier.getFeatNames()
if self.use_power_spectrum_clf:
names += self.powerSpectrumClassifier.getFeatNames()
return np.array(names)
def Predict(self, videoPath, frame_offset=0,
handleErrors=True, apply_augmentations=False,
featureClassifiers=['xgb','logreg','lightgbm'], multiPersonMode='max'):
"""
Prediction for any fake persons in the video. Returns confidence for a fake [0-1].
videoPath (str): Videofile path
frame_offset (int): start processing video from this frame, default=0
handleErrors (bool): If True (default), the method handles exceptions and outputs predict_on_error. If False, the exception is passed to caller.
featureClassifiers (listo of str): Listo of what feature classifiers to combine. Available options are: xgb, logreg and lightgbm. All are included by default.
multiPersonMode (str): How to combine predictions of multiple persons. One of max, avg, weighted-avg. In weighted avg, weights are 1 and 2 for <0.5 and >=0.5 predictions. Default=max
"""
def __predict():
# collect features from each person
feats_list = self.GetFeatures(videoPath, frame_offset=frame_offset, apply_augmentations=apply_augmentations)
person_preds = [self.secondLevelClassifier.predict(feats, featureClassifiers=featureClassifiers) for feats in feats_list]
if self.verbose > 1:
print("Person predictions: {0}".format(person_preds))
# one of the persons can be fake so take max because 0=real and 1=fake
if multiPersonMode == 'max':
return max(person_preds)
if multiPersonMode == 'avg':
return np.mean(np.array(person_preds))
if multiPersonMode == 'weighted-avg':
return np.average(np.array(person_preds),weights=np.where(np.array(person_preds) < 0.5,1,2))
if(handleErrors):
try:
return __predict()
except:
print("Could not predict " + str(videoPath) + ". Predicting {0}.".format(self.predict_on_error))
return self.predict_on_error
else:
# Allow to crash for debugging purposes or for external error handling
return __predict()
```
#### File: detector/FeatureGenerators/FaceClassifier.py
```python
import numpy as np
import cv2
import torch
import fastai
from fastai.vision import *
from Util.Timer import Timer
from Util.FeatureStats import preds2features, getStatFeatNames
class ArrayImageList(ImageList):
"""Custom Fastai ImageList that is constructed from a numpy image array."""
@classmethod
def from_numpy(cls, numpy_array):
return cls(items=numpy_array)
def label_from_array(self, array, label_cls=None, **kwargs):
return self._label_from_list(array,label_cls=label_cls,**kwargs)
def get(self, i):
n = self.items[i]
def _numpy2fastaiImage(img_arr):
return fastai.vision.Image(pil2tensor(img_arr, dtype=np.float32).div_(255))
return _numpy2fastaiImage(n)
class TTA:
NONE = 'original'
BRIGHT = 'bright'
ZOOM = 'zoom'
class FaceClassifier:
def __init__(self,
small_face_model_dirs,
large_face_model_dirs,
n_first_frames,
n_spaced_frames,
verbose=0):
self.learn_small_faces = [load_learner(path=small_face_model_dir) for small_face_model_dir in small_face_model_dirs]
self.learn_large_faces = [load_learner(path=large_face_model_dir) for large_face_model_dir in large_face_model_dirs]
self.n_first_frames = n_first_frames
self.n_spaced_frames = n_spaced_frames
self.TTAs = [TTA.NONE, TTA.BRIGHT, TTA.ZOOM] # Zoom TTA is disabled to save inference time
self.verbose = verbose
print("Loaded {0} small face classifier and {1} large face classifier models.".format(self.size()[0],self.size()[1]))
# print out paths
for i,path in enumerate(small_face_model_dirs):
print("{0} - Small face model: {1}".format(i,path))
for i,path in enumerate(large_face_model_dirs):
print("{0} - Large face model: {1}".format(i,path))
def size(self):
return (len(self.learn_small_faces),len(self.learn_large_faces))
def getAugmentations(self, tta=TTA.NONE):
if tta == TTA.NONE:
return []
elif tta == TTA.BRIGHT:
return get_transforms(do_flip=True,
flip_vert=False,
max_rotate=0,
max_zoom=1.0,
max_lighting=0,
max_warp=0.0,
p_affine=1,
p_lighting=1,
xtra_tfms=[brightness(change=0.7)])
elif tta == TTA.ZOOM:
return get_transforms(do_flip=True,
flip_vert=False,
max_rotate=0,
max_zoom=1.0,
max_lighting=0,
max_warp=0.0,
p_affine=1,
p_lighting=1,
xtra_tfms=zoom_crop(scale=1.2))
else:
raise "Unrecognized TTA - {0}".format(tta)
def getFeatNames(self):
"""Returns a list of feature names"""
FF_names = []
statFeatNames = getStatFeatNames()
for size in ['small','large']:
for tta in self.TTAs:
for i in range(self.size()[0] if size=='small' else self.size()[1]):
for mode in ['first','spaced']:
FF_names += ["{0}_face_clf_{2}_{1}_{3}_{4}".format(size,i,statName, tta, mode) for statName in statFeatNames]
return FF_names
def getFaceClassifierFeats(self, faces_array, isSmall, weights=None):
timer = Timer()
n_total_frames = faces_array.shape[0]
feats_list = []
def isSoftmaxOutput(preds, eps=1e-6):
mean = torch.mean(torch.sum(preds,dim=1))
return (torch.abs(mean-1.0) < eps).item()
for tta in self.TTAs:
testDataSet = (ArrayImageList.from_numpy(faces_array)
.split_none()
.label_empty()
.transform(self.getAugmentations(tta))
.databunch(bs=n_total_frames)).normalize(imagenet_stats)
testDataSet.train_dl = testDataSet.train_dl.new(shuffle=False) # set shuffle off to kee the order
test_batch = None
for learn in self.learn_small_faces if isSmall else self.learn_large_faces:
if test_batch is None:
# one batch is the whole test set
test_batch = learn.data.norm(testDataSet.one_batch())[0].cuda()
# Get predictions and check if the model outputs softmax preds. If not, apply softmax.
raw_preds = learn.pred_batch(ds_type=DatasetType.Test,
batch=(test_batch,None))
if not isSoftmaxOutput(raw_preds):
raw_preds = torch.softmax(raw_preds, dim=1)
# get Fake category preds from test set
# The models are trained with 0:'FAKE' 1:'REAL' labels so first softmax index is the fake
preds = raw_preds.numpy()[:,0]
if self.verbose > 1:
print("Face classifier {0} preds: {1}".format("small" if isSmall else "large", preds))
# get statistical features from the preds
feats_list += preds2features(preds[:self.n_first_frames],
weights=None if weights is None else weights[:self.n_first_frames],
remove_n_outliers=0)
feats_list += preds2features(preds[self.n_first_frames:],
weights=None if weights is None else weights[self.n_first_frames:],
remove_n_outliers=0)
del raw_preds
del test_batch
del testDataSet
timer.print_elapsed(self.__class__.__name__, verbose=self.verbose)
return feats_list
```
#### File: detector/FeatureGenerators/FaceSequenceClassifier.py
```python
import numpy as np
import os
import cv2
import torch
import fastai
from fastai.vision import *
from Util.Timer import Timer
from Util.FeatureStats import preds2features, getStatFeatNames
from RecurrentModel.RecurrentCNN import MixedVideoSequenceModel, VideoSequenceModel
from RecurrentModel.RecurrentModelConfig import RecurrentModelConfig
from RecurrentModel.ImageSequence import ImageSequence
from RecurrentModel.ImageSequenceList import ImageSequenceList
class ArrayImageSequenceList(ImageList):
"""Custom Fastai ImageList that is constructed from a numpy image array."""
@classmethod
def from_numpy(cls, numpy_array):
return cls(items=numpy_array)
def label_from_array(self, array, label_cls=None, **kwargs):
return self._label_from_list(array,label_cls=label_cls,**kwargs)
def get(self, i):
n = self.items[i]
return ImageSequence(list(n))
class TTA:
NONE = 'original'
BRIGHT = 'bright'
ZOOM = 'zoom'
class FaceSequenceClassifier:
def __init__(self,
sequence_model_dirs,
n_first_frames,
n_spaced_frames,
verbose=0):
self.learn_sequence_models = [load_learner(path=model_dir) for model_dir in sequence_model_dirs]
self.configs = [RecurrentModelConfig.fromDir(model_dir) for model_dir in sequence_model_dirs]
self.n_first_frames = n_first_frames
self.n_spaced_frames = n_spaced_frames
self.TTAs = [TTA.NONE, TTA.BRIGHT, TTA.ZOOM]
# number of sequence batches is low (1-3 batches)
# this tells the statistics function not return std and median
self.low_sample_count = False #True
self.verbose = verbose
print("Loaded {0} face sequence classifier models.".format(self.size()))
def size(self):
return len(self.learn_sequence_models)
def getAugmentations(self, tta=TTA.NONE):
if tta == TTA.NONE:
return []
elif tta == TTA.BRIGHT:
return get_transforms(do_flip=True,
flip_vert=False,
max_rotate=0,
max_zoom=1.0,
max_lighting=0,
max_warp=0.0,
p_affine=1,
p_lighting=1,
xtra_tfms=[brightness(change=0.7)])
elif tta == TTA.ZOOM:
return get_transforms(do_flip=True,
flip_vert=False,
max_rotate=0,
max_zoom=1.0,
max_lighting=0,
max_warp=0.0,
p_affine=1,
p_lighting=1,
xtra_tfms=zoom_crop(scale=1.2))
else:
raise "Unrecognized TTA - {0}".format(tta)
def getFeatNames(self):
"""Returns a list of feature names"""
FF_names = []
statFeatNames = getStatFeatNames(low_sample_count=self.low_sample_count)
for tta in self.TTAs:
for i in range(self.size()):
FF_names += ["seq_clf_len-{0}_start-{1}_{2}_{3}_{4}".format(self.configs[i].getLenSequence(),
self.configs[i].getMinFrameIndex(),
i,
tta,
statName) for statName in statFeatNames]
return FF_names
def getFaceClassifierFeats(self, faces_array):
timer = Timer()
n_total_frames = faces_array.shape[0]
feats_list = []
def isSoftmaxOutput(preds, eps=1e-6):
mean = torch.mean(torch.sum(preds,dim=1))
return (torch.abs(mean-1.0) < eps).item()
for learn, config in zip(self.learn_sequence_models, self.configs):
len_seq = config.getLenSequence()
min_frame = config.getMinFrameIndex()
max_frame = config.getMaxFrameIndex()
# there can be more samples than defined in config
if max_frame < n_total_frames - 1 and min_frame >= self.n_first_frames:
max_frame = n_total_frames - 1
total_frames = (max_frame - min_frame)
step_size = len_seq // 2
bs = 1 + ((total_frames - len_seq) // step_size)
batch = np.zeros((bs,config.getLenSequence(),*faces_array[0].shape),np.float32)
for i in range(bs):
start = min_frame + i * step_size
batch[i] = faces_array[start:start+len_seq]
for tta in self.TTAs:
testDataSet = (ArrayImageSequenceList.from_numpy(batch)
.split_none()
.label_empty()
.transform(self.getAugmentations(tta))
.databunch(bs=bs))
testDataSet.train_dl = testDataSet.train_dl.new(shuffle=False) # set shuffle off to kee the order
test_batch = None
if test_batch is None:
# one batch is the whole test set
test_batch = testDataSet.one_batch()[0].cuda()
# Get predictions and check if the model outputs softmax preds. If not, apply softmax.
raw_preds = learn.pred_batch(ds_type=DatasetType.Test,
batch=(test_batch,None))
if not isSoftmaxOutput(raw_preds):
raw_preds = torch.softmax(raw_preds, dim=1)
# get Fake category preds from test set
# The models are trained with 0:'FAKE' 1:'REAL' labels so first softmax index is the fake
preds = raw_preds.numpy()[:,0]
if self.verbose > 1:
print("Face sequence clf preds: {0}".format(preds))
# get statistical features from the preds
feats_list += preds2features(preds,
remove_n_outliers=0,
low_sample_count=self.low_sample_count)
del raw_preds
del test_batch
del testDataSet
del batch
timer.print_elapsed(self.__class__.__name__, verbose=self.verbose)
return feats_list
```
#### File: detector/RecurrentModel/ImageSequence.py
```python
from fastai import *
from fastai.vision import *
from fastai.callbacks import *
import random
import cv2
import numpy as np
class ImageSequence(ItemBase):
def __init__(self, np_imgs):
self.fastai_imgs = [vision.Image(px=pil2tensor(np_img/255., np.float32)) for np_img in np_imgs]
# we still keep track of the initial object (usuall in an obj attribute)
# to be able to show nice representations later on.
self.obj = np_imgs
# The basis is to code the data attribute that is what will be given to the model
self.data = self.convertImagesToData()
def apply_tfms(self, tfms,*args, **kwargs):
# keep random state to apply the same augmentations for all images
random_state = random.getstate()
for i in range(len(self.fastai_imgs)):
random.setstate(random_state)
self.fastai_imgs[i] = self.fastai_imgs[i].apply_tfms(tfms, *args, **kwargs)
self.data = self.convertImagesToData()
return self
def __repr__(self):
return f'{self.__class__.__name__}'
def to_one(self):
return Image(torch.cat([img.data for img in self.fastai_imgs],dim=0)
.transpose(1,0)
.transpose(1,2))
def convertImagesToData(self):
# returns torch.tensor of shape [sequence_length, c, h, w]
imagenet_mean = torch.tensor(0.45)
imagenet_std = torch.tensor(0.225)
return torch.cat([((img.data - imagenet_mean)/imagenet_std).unsqueeze(dim=0) for img in self.fastai_imgs],dim=0)
```
#### File: training/selfsupervised/ContrastiveImageTuple.py
```python
from fastai import *
from fastai.vision import *
import torch
class ContrastiveImageTuple(ItemBase):
def __init__(self, img1, img2):
self.img1,self.img2 = img1,img2
self.obj = (img1,img2)
self.data = self.convertImagesToData()
def apply_tfms(self, tfms,*args, **kwargs):
self.img1 = self.img1.apply_tfms(tfms, *args, **kwargs)
self.img2 = self.img2.apply_tfms(tfms, *args, **kwargs)
self.data = self.convertImagesToData()
return self
def __repr__(self):
return f'{self.__class__.__name__}'
def to_one(self):
return Image(torch.cat([img.data for img in [self.img1, self.img2]],dim=2))
def convertImagesToData(self):
# returns torch.tensor of shape [sequence_length, c, h, w]
imagenet_mean = torch.tensor(0.45)
imagenet_std = torch.tensor(0.225)
return torch.cat([((img.data - imagenet_mean)/imagenet_std).unsqueeze(dim=0) for img in [self.img1,self.img2]],dim=0)
```
#### File: training/util/fastaiAugmentations.py
```python
import albumentations as albu
import PIL
import numpy as np
import fastai
from fastai.vision import *
"""
Fastai wrappers for Albumentation pixel level augmentations.
Fastai applies augmentations on tensors so to include pixel level augmentation,
we have to transform images back to numpy, apply transform, and then back to tensor.
"""
def JPEGAugment(quality_lower=60, quality_upper=100, always_apply=False, p=0.5):
return alb_tfm2fastai(albu.JpegCompression(quality_lower=quality_lower,
quality_upper=quality_upper,
always_apply=always_apply,
p=p))
def HueSaturationValueAugment(hue_shift_limit=20,sat_shift_limit=30,val_shift_limit=20,always_apply=False,p=0.5):
return alb_tfm2fastai(albu.HueSaturationValue(hue_shift_limit=hue_shift_limit,
sat_shift_limit=sat_shift_limit,
val_shift_limit=val_shift_limit,
always_apply=always_apply,
p=p))
def BlurAugment(blur_limit=7,always_apply=False,p=0.5):
return alb_tfm2fastai(albu.Blur(blur_limit=blur_limit,
always_apply=always_apply,
p=p))
def DownscaleAugment(scale_min=0.5,scale_max=0.9,interpolation=0,always_apply=False,p=0.65):
return alb_tfm2fastai(albu.Downscale(always_apply=always_apply,
p=p,
scale_min=scale_min,
scale_max=scale_max,
interpolation=interpolation))
def tensor2np(x):
np_image = x.cpu().permute(1, 2, 0).numpy()
np_image = (np_image * 255).astype(np.uint8)
return np_image
def alb_tfm2fastai(alb_tfm):
def _alb_transformer(x):
# tensor to numpy
np_image = tensor2np(x)
# apply albumentations
transformed = alb_tfm(image=np_image)['image']
# back to tensor
tensor_image = pil2tensor(transformed, np.float32)
tensor_image.div_(255)
return tensor_image
transformer = fastai.vision.transform.TfmPixel(_alb_transformer)
return transformer()
```
#### File: deepfake-video-detector/util/FFFimagesampler.py
```python
import os
import glob
import torch
import cv2
from PIL import Image
import numpy as np
import pandas as pd
from pathlib import Path
import math
import torch
import torch.nn as nn
from torchvision import transforms
# See github.com/timesler/facenet-pytorch:
from facenet_pytorch import MTCNN, InceptionResnetV1
class FFFimagesampler:
def __init__(self, faceforensics_model_paths, n_frames=10, downsampling=2, face_embedding_size=(160,160), faceforensics_size=(299,299), face_padding=0.15, max_retries=5, min_frames=None):
"""
Process first n consecutive frames of a video and output high-level features for a second level DeepFake classifier.
This class detects and tracks faces of 1-2 persons and processes each persons faces for DeepFake features.
Features include max deviation from a centroid face embedding during the sampled frames and max consecutive frame embedding distances.
FaceForensics features include mean and max DeepFake classifier outputs for each persons faces.
In two-person cases, maximum fetaure values are returned.
Note, FaceNet expects to find pretrained models from /tmp/.cache/torch/checkpoints/ and downloads the weights if missing.
To get the weights without internet, copy the weights manually by running in jupyter cell:
!mkdir -p /tmp/.cache/torch/checkpoints/
!cp [weight folder]/20180402-114759-vggface2-logits.pth /tmp/.cache/torch/checkpoints/vggface2_DG3kwML46X.pt
!cp [weight folder]/20180402-114759-vggface2-features.pth /tmp/.cache/torch/checkpoints/vggface2_G5aNV2VSMn.pt
Parameters:
faceforensics_model_paths (string): list of paths to pretrained faceforensics models. Download pretrained models from http://kaldir.vc.in.tum.de/FaceForensics/models/faceforensics++_models.zip
n_frames (int): Number of consecutive frames to sample (affects the output feature qualities and processing times). Default=10.
downsampling (int): Video dowsampling factor for the Face detection model for faster processing (2 works well with HD videos but higher factors may miss more faces). Doesn't affect anything else. Default=2.
face_embedding_size (size int tuple): FaceNet face recognition model. Pretrained model is trained with (160,160) size, default=(160,160).
faceforensics_size (size int tuple): FaceForensics++ model's input size. Default=(299,299).
face_padding (float): x and y padding percentage of width or height that is added on both sides for the FaceForensics++ face inputs. Faceforensics++ paper enlarged face crops 1.3 times. Default=0.15.
max_retries (int): Number of times to retry if less than min_faces faces are detected from processed frames. Each retry samples from the following frames. Default=5.
min_frames (int): If not None, min frames before retry. If None, min_frames is n_frames.
"""
self.device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
if(self.device == 'cpu'):
raise Exception('Cuda is required')
# Face detector model - finds face bounding boxes
self.mtcnn = MTCNN(keep_all=True, select_largest=False, device=self.device).eval()
# FaceForensics face image preprocessing
self.xception_default_data_transforms = {
'test': transforms.Compose([
transforms.Resize(faceforensics_size),
transforms.ToTensor(),
transforms.Normalize([0.5] * 3, [0.5] * 3)
]),
}
self.n_frames = n_frames
self.downsampling = downsampling
self.post_function = nn.Softmax(dim=1)
self.faceforensics_size = faceforensics_size
self.face_embedding_size = face_embedding_size
self.face_padding = face_padding
self.max_retries = max_retries
self.min_frames = min_frames if min_frames is not None else n_frames
#############################################################
################### Face Detection #########################
def _getFaceBBs(self, imgs):
full_size = imgs[0].size
ds_imgs = [img.resize((full_size[0]// self.downsampling, full_size[1]// self.downsampling)) for img in imgs]
face_dets_list, scores_list = self.mtcnn.detect(ds_imgs)
try:
face_dets_list = [face_det * self.downsampling for face_det in face_dets_list]
except:
return [],[]
return face_dets_list, scores_list
#############################################################
############### Face tracking and postprocessing ############
def _removeOverlappingDetections(self, bbs, scores, n_max=2):
"""Filter out the less confident overlapping bounding boxes
and discard the least confident extra detections (>n_max)"""
# don't do anything with one or less detection
if(len(bbs)<=1):
return bbs
remove_list = []
#remove overlapping
for i in range(len(bbs)):
for j in range(i + 1,len(bbs)):
# detections are ordered the largest first
# get leftmost, rightmost
LM,RM = (i,j) if (bbs[i][0] < bbs[j][0]) else (j,i)
if (bbs[RM][0] < bbs[LM][2]): # overlapping horizontally
# get topmost bottommost
TM, BM = (i,j) if (bbs[i][1] < bbs[j][1]) else (j,i)
if (bbs[BM][1] < bbs[TM][3]): # overlapping vertically
remove_list.append(j) # use the original order to judge importance
# return filtered and only n_max bbs at maximum
keep_bbs = [bbs[i] for i in range(len(bbs)) if i not in remove_list]
return keep_bbs[:min(len(keep_bbs),n_max)]
def _getCenter(self, bb):
return ((bb[0]+bb[2])//2, (bb[1]+bb[3])//2)
def _getDistance(self, bb1,bb2):
c1 = self._getCenter(bb1)
c2 = self._getCenter(bb2)
return math.sqrt(pow(c1[0]-c2[0],2) + pow(c1[1]-c2[1],2))
def _faceTracking(self, bbs, prev_bbs):
# match bbs to prev_bbs
# len(bbs) <= prev_bbs
if(len(prev_bbs)<=1):
return bbs
new_bbs = []
bbs_indices = [0,1] if self._getDistance(bbs[0], prev_bbs[0]) < self._getDistance(bbs[0], prev_bbs[1]) else [1,0]
bbs_indices = [bbs_indices[i] for i in range(len(bbs))]
for i in range(len(prev_bbs)):
if i in bbs_indices:
new_bbs.append(bbs[bbs_indices.index(i)])
else:
new_bbs.append(prev_bbs[i])
return new_bbs
def _getFaceCrops(self, img, bbs, padding_percentage=0, face_size=(160,160), aspect_resize=False):
"""
Parameters:
img (PIL image): image
bbs (numpy array): bounding boxes
face_size (size tuple of ints): returned face image size
aspect_resize (boolean): Resize keeping the aspect ratio. Gets more padding to the smaller dimension. Default=False.
Returns:
faces (numpy arrays)
"""
imgs = []
np_img = np.array(img, dtype=np.uint8)
for bb in bbs:
w = bb[2]-bb[0]
h = bb[3]-bb[1]
pad_0 = int(round(w*padding_percentage))
pad_1 = int(round(h*padding_percentage))
if aspect_resize:
if (w > h): # pad more height
pad_1 += (w-h)//2
else:
pad_0 += (h-w)//2
imgs.append(cv2.resize(np_img[
max(0,int(bb[1] - pad_1)):min(np_img.shape[0], int(bb[3] + pad_1)),
max(0,int(bb[0] - pad_0)):min(np_img.shape[1],int(bb[2] + pad_0)),
:],(face_size)))
return imgs
#############################################################
####################### MAIN ################################
def Predict(self, videoPath, n_frames, return_embeddings=False, frame_offset=0, retries=0):
"""
Return second level features for DeepFake classification.
Parameters:
videoPath (string): Path to .mp4 video.
return_embeddings (boolean): Return face embedding features. Default=True
frame_offset (int): Start sampling from this frame. Default=0.
retries (int): Number of retries done already. This is incremented when calling recursively on face detection errors until max_retries is reached. Default=0.
Throws: Exception if max_retries is reached.
Returns:
faceforensics_features (float array of shape (2 * number of FF models)): FaceForensics++ predictions for faces 0=real 1=fake. Mean and Max of all frames and max of all persons.
embedding_feature (float array of shape (2)): Face embedding max centroid deviation and max consecutive frame difference. Max values of all persons.
"""
# Create video reader and find length
v_cap = cv2.VideoCapture(str(videoPath))
v_len = int(v_cap.get(cv2.CAP_PROP_FRAME_COUNT))
imgs = []
for i,j in enumerate(range(frame_offset,v_len)):
success, vframe = v_cap.read()
vframe = cv2.cvtColor(vframe, cv2.COLOR_BGR2RGB)
imgs.append(Image.fromarray(vframe))
if i>=n_frames-1:
break
v_cap.release()
n_imgs = n_frames
num_persons = 2
prev_bbs = []
face_dets_list, scores_list = self._getFaceBBs(imgs)
# retry if less than n_frames faces
if len(face_dets_list) < n_frames:
if retries < self.max_retries:
return self.Predict(videoPath, n_frames, return_embeddings=return_embeddings, frame_offset=frame_offset+n_imgs, retries=retries+1)
else:
raise Exception("Maximum retries with " + videoPath)
padded_faces_array = np.zeros((2 * n_imgs, self.faceforensics_size[0], self.faceforensics_size[1],3), dtype=np.uint8)
for i in range(len(imgs)):
face_dets, scores = face_dets_list[i], scores_list[i]
bbs = self._removeOverlappingDetections(face_dets, scores, n_max=num_persons)
# set and keep the num_persons from the first frame
if(i==0):
num_persons = max(len(bbs),1)
else:
# keep the same face order and always find the same num_persons
bbs = self._faceTracking(bbs, prev_bbs)
prev_bbs = bbs
padded_faces = self._getFaceCrops(imgs[i], bbs, padding_percentage=self.face_padding, face_size=self.faceforensics_size, aspect_resize=True)
# add for list to get embeddings later
for j, face in enumerate(padded_faces):
padded_faces_array[i + j *(n_imgs)] = np.array(padded_faces[j])
return padded_faces_array[:num_persons*n_imgs]
```
#### File: deepfake-video-detector/util/wav2spektrogram.py
```python
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.pyplot import specgram
import librosa
import librosa.display
from pathlib import Path
import warnings
with open("../user_config.json") as config:
path_dict = json.load(config)['data_paths']
SAVE_DIR = path_dict['spectrogram_path']
def wav2spectrogram(filename, SAVE_DIR=SAVE_DIR):
AUDIO_FN_EXTENSION = ".wav"
spectogram_sample = os.path.join(SAVE_DIR, str(filename).split("/")[-1].replace(".wav",".png"))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
samples, sample_rate = librosa.load(str(filename))
fig = plt.figure(figsize=[0.72,0.72])
ax = fig.add_subplot(111)
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
ax.set_frame_on(False)
S = librosa.feature.melspectrogram(y=samples, sr=sample_rate)
librosa.display.specshow(librosa.power_to_db(S, ref=np.max))
plt.savefig(spectogram_sample, dpi=400, bbox_inches='tight', pad_inches=0)
plt.close('all')
except:
print("Except in " + spectogram_sample)
``` |
{
"source": "jpjuvo/PANDA-challenge-raehmae",
"score": 2
} |
#### File: training/data/multitask.py
```python
import fastai
from fastai.vision import *
import random
import matplotlib.pyplot as plt
import PIL
from fastai.vision import Image
import random
import torch
import numpy as np
# Multitask implementation taken from
# https://nbviewer.jupyter.org/gist/denisvlr/802f980ff6b6296beaaea1a592724a51
def label_from_mt_project(self, multitask_project):
mt_train_list = MultitaskItemList(
[task['label_lists'].train.y for task in multitask_project.values()],
mt_names=list(multitask_project.keys(),
)
)
mt_valid_list = MultitaskItemList(
[task['label_lists'].valid.y for task in multitask_project.values()],
mt_names=list(multitask_project.keys())
)
self.train = self.train._label_list(x=self.train, y=mt_train_list)
self.valid = self.valid._label_list(x=self.valid, y=mt_valid_list)
self.__class__ = MultitaskLabelLists # TODO: Class morphing should be avoided, to be improved.
self.train.__class__ = MultitaskLabelList
self.valid.__class__ = MultitaskLabelList
return self
class MultitaskItem(MixedItem):
def __init__(self, *args, mt_names=None, **kwargs):
super().__init__(*args,**kwargs)
self.mt_names = mt_names
def __repr__(self):
return '|'.join([f'{self.mt_names[i]}:{item}' for i, item in enumerate(self.obj)])
class MultitaskItemList(MixedItemList):
def __init__(self, *args, mt_names=None, **kwargs):
super().__init__(*args,**kwargs)
self.mt_classes = [getattr(il, 'classes', None) for il in self.item_lists]
self.mt_types = [type(il) for il in self.item_lists]
self.mt_lengths = [len(i) if i else 1 for i in self.mt_classes]
self.mt_names = mt_names
def get(self, i):
return MultitaskItem([il.get(i) for il in self.item_lists], mt_names=self.mt_names)
def reconstruct(self, t_list):
items = []
t_list = self.unprocess_one(t_list)
for i,t in enumerate(t_list):
if self.mt_types[i] == CategoryList:
items.append(Category(t, self.mt_classes[i][t]))
elif issubclass(self.mt_types[i], FloatList):
items.append(FloatItem(t))
return MultitaskItem(items, mt_names=self.mt_names)
def analyze_pred(self, pred, thresh:float=0.5):
predictions = []
start = 0
for length, mt_type in zip(self.mt_lengths, self.mt_types):
if mt_type == CategoryList:
predictions.append(pred[start: start + length].argmax())
elif issubclass(mt_type, FloatList):
predictions.append(pred[start: start + length][0])
start += length
return predictions
def unprocess_one(self, item, processor=None):
if processor is not None: self.processor = processor
self.processor = listify(self.processor)
for p in self.processor:
item = _processor_unprocess_one(p, item)
return item
def _processor_unprocess_one(self, item:Any): # TODO: global function to avoid subclassing MixedProcessor. To be cleaned.
res = []
for procs, i in zip(self.procs, item):
for p in procs:
if hasattr(p, 'unprocess_one'):
i = p.unprocess_one(i)
res.append(i)
return res
class MultitaskLabelList(LabelList):
def get_state(self, **kwargs):
kwargs.update({
'mt_classes': self.mt_classes,
'mt_types': self.mt_types,
'mt_lengths': self.mt_lengths,
'mt_names': self.mt_names
})
return super().get_state(**kwargs)
@classmethod
def load_state(cls, path:PathOrStr, state:dict) -> 'LabelList':
res = super().load_state(path, state)
res.mt_classes = state['mt_classes']
res.mt_types = state['mt_types']
res.mt_lengths = state['mt_lengths']
res.mt_names = state['mt_names']
return res
class MultitaskLabelLists(LabelLists):
@classmethod
def load_state(cls, path:PathOrStr, state:dict):
path = Path(path)
train_ds = MultitaskLabelList.load_state(path, state)
valid_ds = MultitaskLabelList.load_state(path, state)
return MultitaskLabelLists(path, train=train_ds, valid=valid_ds)
```
#### File: training/data/sampler.py
```python
import torch
import os
import numpy as np
import random
import pandas as pd
from sklearn.model_selection import StratifiedKFold
from data.tileimages import *
from data.multitask import *
import fastai
from fastai.vision import *
class FoldSampler:
def __init__(self, TRAIN, LABELS,
mean, std, N,
tfms=[], sz=128,bs=16,
n_folds=4, uniform_augmentations=False,
shuffle_nonempty_imgs=False,
model_name=None,
is_train=True,
is_ordinal=False,
SEED=2020,
num_workers=4):
self._seed_everything(SEED)
self.SEED = SEED
self.tfms = tfms
self.mean = mean
self.std = std
self.N = N
self.nfolds = n_folds
self.TRAIN = TRAIN
self.sz = sz
self.bs = bs
self.is_ordinal = is_ordinal
self.is_train=is_train
self.num_workers=num_workers
self.model_name = model_name
self.uniform_augmentations = uniform_augmentations
self.shuffle_nonempty_imgs = shuffle_nonempty_imgs
self._prepare_data(TRAIN, LABELS)
self.df.head()
def _seed_everything(self, seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
def _cats4slide(self, image_id):
fn_cats = os.path.join(self.TRAIN, f'{image_id}_mask.txt')
if os.path.isfile(fn_cats):
with open(fn_cats) as f:
return [int(int(l)>1) for l in f.readlines()]
else:
raise Exception("File not found", str(fn_cats))
def _findAllReplicates(self, pairs, seed):
replicates = [seed]
nodes = [seed]
def addReplicate(n):
if n not in replicates:
replicates.append(n)
nodes.append(n)
# while there are nodes left
while len(nodes) > 0:
this_node = nodes[0]
for i,j in pairs:
if i==this_node:
# match - add j to replicates
addReplicate(j)
elif j==this_node:
# match - add i to replicates
addReplicate(i)
nodes.pop(0)
return replicates
def _pairs2sameFolds(self, df,pairs):
replicate_indices = np.unique(pairs)
split_values = df.split.values
for ind in replicate_indices:
allReps = self._findAllReplicates(list(pairs), ind)
# set all to the same fold as the minimum index
min_rep = min(allReps)
target_fold = split_values[min_rep]
for rep in allReps:
split_values[rep] = target_fold
df.split = split_values
return df
def _prepare_data(self, TRAIN, LABELS):
df = pd.read_csv(LABELS).set_index('image_id')
files = set([p[:32] for p in os.listdir(TRAIN)])
df = df.loc[files]
df = df.reset_index()
df['stratify'] = df.data_provider.map(str) + '-' + df.isup_grade.map(str)
splits = StratifiedKFold(n_splits=self.nfolds, random_state=self.SEED, shuffle=True)
splits = list(splits.split(df,df.stratify))
folds_splits = np.zeros(len(df)).astype(np.int)
for i in range(self.nfolds): folds_splits[splits[i][1]] = i
df['split'] = folds_splits
if self.is_ordinal:
def _transform_ordinal(label):
#return ','.join([str(i) for i in range(int(label) + 1)])
return ','.join([str(i) for i in range(int(label))])
df.isup_grade = df.isup_grade.apply(_transform_ordinal)
# add tile cancer categories if present in train data
if self.model_name in ["multihead_tilecat", "multihead_tilecat_attention"]:
cancer_labels = np.array([np.array(self._cats4slide(image_id)) for image_id in df.image_id.values])
for i in range(cancer_labels.shape[1]):
df[f'cancer_status_{i}'] = list(cancer_labels[:,i])
# set serial section replicates to same folds
pairs_fn = os.path.join('../','pair_indices.npy')
if os.path.exists(pairs_fn):
pairs = np.load(pairs_fn)
print(f'Setting {np.array(pairs).shape[0]} serial section replicates to same folds')
df = self._pairs2sameFolds(df, pairs)
self.df = df
def get_data(self,fold=0, **kwargs):
model_name = "iafoss" if self.model_name is None else self.model_name
regr = "regr" in model_name
def __MImageItemList():
""" This returns MImageItemList with specified defaults """
return MImageItemList.from_df(self.df,
path='.',
folder=self.TRAIN,
cols='image_id',
sz=self.sz,
N=self.N,
mean=self.mean,
std=self.std,
uniform_augmentations=self.uniform_augmentations,
shuffle_nonempty_imgs=self.shuffle_nonempty_imgs
)
if model_name in ["multihead_tilecat", "multihead_tilecat_attention"] and self.is_train:
# create isup LabelItemList
isup_labels = (
(__MImageItemList()
.split_by_idx(self.df.index[self.df.split == fold].tolist())
.label_from_df(cols=['isup_grade']))
)
# create the dict to hold all LabelItemLists
multitask_project = {
'isup': {
'label_lists': isup_labels,
}
}
# add tile cancer categories to the dict
for i in range(self.N):
tilecat = (__MImageItemList()
.split_by_idx(self.df.index[self.df.split == fold].tolist())
.label_from_df(cols=[f'cancer_status_{i}']))
multitask_project[f'tilecat_{i}'] = {
'label_lists': tilecat,
}
ItemLists.label_from_mt_project = label_from_mt_project
return (__MImageItemList()
.split_by_idx(self.df.index[self.df.split == fold].tolist())
.label_from_mt_project(multitask_project)
.transform(self.tfms,
size=self.sz,
padding_mode='zeros')
.databunch(bs=self.bs,
num_workers=self.num_workers)
)
else: # Defaults to Iafoss
if self.is_ordinal:
return (__MImageItemList()
.split_by_idx(self.df.index[self.df.split == fold].tolist())
.label_from_df(cols=['isup_grade'], label_cls=None, label_delim=',')
.transform(self.tfms,
size=self.sz,
padding_mode='zeros')
.databunch(bs=self.bs,
num_workers=self.num_workers)
)
else:
return (__MImageItemList()
.split_by_idx(self.df.index[self.df.split == fold].tolist())
.label_from_df(cols=['isup_grade'], label_cls=FloatList if regr==True else None)
.transform(self.tfms,
size=self.sz,
padding_mode='zeros')
.databunch(bs=self.bs,
num_workers=self.num_workers)
)
```
#### File: training/data/tileimages.py
```python
import fastai
from fastai.vision import *
import random
import matplotlib.pyplot as plt
import PIL
from fastai.vision import Image
import random
import torch
import numpy as np
def open_image(fn:PathOrStr, div:bool=True, convert_mode:str='RGB', cls:type=Image, after_open:Callable=None)->Image:
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning) # EXIF warning from TiffPlugin
x = PIL.Image.open(fn).convert(convert_mode)
if after_open: x = after_open(x)
x = pil2tensor(x,np.float32)
if div: x.div_(255)
return cls(1.0-x) #invert image for zero padding
class MImage(ItemBase):
def __init__(self, imgs, N, mean, std, sz, uniform_augmentations=False):
self.N = N
self.mean = mean
self.std = std
self.sz = sz
self.uniform_augmentations = uniform_augmentations
self.obj, self.data = \
(imgs), [(imgs[i].data - mean[...,None,None])/std[...,None,None] for i in range(len(imgs))]
def apply_tfms(self, tfms,*args, **kwargs):
random_int = random.randint(0, 10000000) # for uniform augmentations
random_state = random.getstate()
for i in range(len(self.obj)):
if self.uniform_augmentations:
random.setstate(random_state)
torch.manual_seed(random_int)
self.obj[i] = self.obj[i].apply_tfms(tfms, *args, **kwargs)
self.data[i] = (self.obj[i].data - self.mean[...,None,None])/self.std[...,None,None]
return self
def __repr__(self): return f'{self.__class__.__name__} {img.shape for img in self.obj}'
def to_one(self):
img = torch.stack(self.data,1)
img = img.view(3,-1,self.N,self.sz,self.sz).permute(0,1,3,2,4).contiguous().view(3,-1,self.sz*self.N)
return Image(1.0 - (self.mean[...,None,None]+img*self.std[...,None,None]))
class MImageItemList(ImageList):
def __init__(self, N, sz, mean, std, uniform_augmentations=False,shuffle_nonempty_imgs=False, *args, **kwargs):
super().__init__(*args, **kwargs)
self.N = N
self.sz = sz
self.mean = mean
self.std = std
self.uniform_augmentations = uniform_augmentations
self.shuffle_nonempty_imgs = shuffle_nonempty_imgs
self.get_iters = 0
self.copy_new.append('N')
self.copy_new.append('sz')
self.copy_new.append('mean')
self.copy_new.append('std')
self.copy_new.append('uniform_augmentations')
self.copy_new.append('shuffle_nonempty_imgs')
def __len__(self)->int: return len(self.items) or 1
def get(self, i):
fn = Path(self.items[i])
fnames = [Path(str(fn)+'_'+str(i)+'.png') for i in range(self.N)]
random.shuffle(fnames)
imgs = [open_image(fname, convert_mode=self.convert_mode, after_open=self.after_open)
for fname in fnames]
if self.shuffle_nonempty_imgs:
nonempty = [idx for idx,img in enumerate(imgs)
if not np.all(np.equal(np.array(img.data),
np.zeros_like(np.array(img.data), dtype='float32'))
)]
empty = [k for k in range(len(imgs)) if not k in nonempty]
self.get_iters +=1
np.random.seed(self.get_iters)
np.random.shuffle(nonempty)
imgs = list(np.array(imgs)[nonempty]) + list(np.array(imgs)[empty])
return MImage(imgs, self.N, self.mean, self.std, self.sz, self.uniform_augmentations)
def reconstruct(self, t):
return MImage([self.mean[...,None,None]+_t*self.std[...,None,None] for _t in t], self.N, self.mean, self.std, self.sz, self.uniform_augmentations)
def show_xys(self, xs, ys, figsize:Tuple[int,int]=(300,50), **kwargs):
rows = min(len(xs),8)
fig, axs = plt.subplots(rows,1,figsize=figsize)
for i, ax in enumerate(axs.flatten() if rows > 1 else [axs]):
xs[i].to_one().show(ax=ax, y=ys[i], **kwargs)
plt.tight_layout()
```
#### File: training/model/conv_branch_regr_model.py
```python
import fastai
from fastai.vision import *
import torch
import torch.nn as nn
from train.mish_activation import *
class Model(nn.Module):
def __init__(self, enc_func, n=6, final_dropout=0.5, pre=True, num_tiles=12, f_conv_out=128, tile_list_input=True):
super().__init__()
self.num_tiles = num_tiles
self.f_conv_out = f_conv_out
m = enc_func()
self.tile_list_input = tile_list_input
# drop the final avgpool and fc layers
self.enc = nn.Sequential(*list(m.children())[:-2])
# get the output channels of the encoder by checking the input size of the fc layer
# nc = 2048 with resnext50_32x4d_ssl
nc = list(m.children())[-1].in_features
# Adaptive concat branch
self.feat_adaptiveconcat = nn.Sequential(AdaptiveConcatPool2d(),
)
# Conv branch
self.feat_conv = nn.Sequential(nn.Conv2d(nc, self.f_conv_out, (self.num_tiles,1), stride=(self.num_tiles,1)),
Mish(),
nn.Dropout(0.7),
nn.BatchNorm2d(self.f_conv_out),
AdaptiveConcatPool2d(), # this will duplicate self.f_conv_out
)
# create the classification head, where input size is 2*(nc+self.f_conv_out) and output n
# input size is 2*(nc+self.f_conv_out) because we use AdaptiveConcatPool
# and it's a layer that concats `AdaptiveAvgPool2d` and `AdaptiveMaxPool2d`, thus 2*(nc+self.f_conv_out)
# n=6 with isup_grades from 0 to 5
self.head = nn.Sequential(
Flatten(),
nn.Linear(2*(nc+self.f_conv_out),512),
nn.BatchNorm1d(512),
Mish(),
nn.Dropout(final_dropout),
nn.Linear(512,1)
)
def forward(self, *x):
if self.tile_list_input:
# x.shape = (N,bs,3,sz,sz)
shape = x[0].shape
# n is number of tiles per slide
n = len(x)
# reshape x to a large batch of images
x = torch.stack(x,1).view(-1,shape[1],shape[2],shape[3])
else:
x=x[0] #untuple
shape = x.shape
n = shape[1]
x = x.view(-1,shape[2],shape[3],shape[4])
# x: bs*N x 3 x 128 x 128
x = self.enc(x)
# x: bs*N x C x 4 x 4
shape = x.shape
# concatenate the output for tiles into a single map - no matter how many tile images we have,
# each slide is compressed to a max and avg of all tiles
x = x.view(-1,n,shape[1],shape[2],shape[3]).permute(0,2,1,3,4).contiguous()\
.view(-1,shape[1],shape[2]*n,shape[3])
# x: bs x C x N*4 x 4
x1 = self.feat_conv(x) # x1: bs x 2*f_conv_out
x2 = self.feat_adaptiveconcat(x) #x2: bs x 2*ech
#print(x1.shape, x2.shape)
x = torch.cat([x1, x2], axis=1)
x = self.head(x)
return x
```
#### File: training/model/multihead_tilecat.py
```python
import fastai
from fastai.vision import *
import torch
import torch.nn as nn
from train.mish_activation import *
class Model(nn.Module):
def __init__(self, enc_func, n=6,final_dropout=0.5, num_tiles=16, cancer_categories=3, is_train=True, tile_list_input=True, pre=True):
super().__init__()
self.is_train = is_train
self.tile_list_input = tile_list_input
m = enc_func()
# drop the final avgpool and fc layers
self.enc = nn.Sequential(*list(m.children())[:-2])
# get the output channels of the encoder by checking the input size of the fc layer
# nc = 2048 with resnext50_32x4d_ssl
nc = list(m.children())[-1].in_features
# create the classification head, where input size is 2*nc and output n
# input size is 2*nc because we use AdaptiveConcatPool
# and it's a layer that concats `AdaptiveAvgPool2d` and `AdaptiveMaxPool2d`, thus 2*nc
# n=6 with isup_grades from 0 to 5
self.head = nn.Sequential(
AdaptiveConcatPool2d(),
Flatten(),
nn.Linear(2*nc,512),
nn.BatchNorm1d(512),
Mish(),
nn.Dropout(final_dropout),
nn.Linear(512,n)
)
self.cancer_head = nn.Sequential(
AdaptiveConcatPool2d(),
Flatten(),
nn.Linear(2*nc,512),
nn.BatchNorm1d(512),
Mish(),
nn.Dropout(final_dropout),
nn.Linear(512,cancer_categories),
nn.Softmax()
)
def forward(self, *x):
if self.tile_list_input:
# x.shape = (N,bs,3,sz,sz)
shape = x[0].shape
# n is number of tiles per slide
n = len(x)
# reshape x to a large batch of images
x = torch.stack(x,1).view(-1,shape[1],shape[2],shape[3])
else:
x=x[0] #untuple
shape = x.shape
n = shape[1]
x = x.view(-1,shape[2],shape[3],shape[4])
# x: bs*N x 3 x 128 x 128
x = self.enc(x)
# x: bs*N x C x 4 x 4
shape = x.shape
# concatenate the output for tiles into a single map - no matter how many tile images we have,
# each slide is compressed to a max and avg of all tiles
x1 = x.view(-1,n,shape[1],shape[2],shape[3]).permute(0,2,1,3,4).contiguous()\
.view(-1,shape[1],shape[2]*n,shape[3])
# x: bs x C x N*4 x 4
x1 = self.head(x1)
x2 = self.cancer_head(x) #(bs*N, 3)
x2 = x2.view(-1, n, x2.shape[-1]) # bs, N, 3
outlist = [x1] + [x2[:,i,:] for i in range(x2.shape[1])]
return outlist if self.is_train else x1
``` |
{
"source": "jpjuvo/RSNA-MICCAI-Brain-Tumor-Classification",
"score": 3
} |
#### File: src/clf_model_utils/miccai_2d_dataset.py
```python
import numpy as np
import torch
import torchio as tio
from torch.utils.data import Dataset
import cv2
import SimpleITK as sitk
import fastai
from fastai.vision.all import *
class MICCAI2DDataset(Dataset):
"""
Dataset that aligns T2W stack and normalizes slice thickness to 1.
Axial 2D images are MIPed (max. intensity projection) from tumor region height.
"""
def __init__(self,
df_features,
image_dir=None,
npy_dir=None,
image_size=(256,256),
tio_augmentations=None,
is_train=True,
mip_window=0.1 # maximum intensity pooling for height slicing
):
self.image_size = image_size
self.image_dir = image_dir
self.npy_dir = npy_dir
self.df_features = df_features
self.tio_augmentations = tio_augmentations
self.is_train = is_train
self.mip_window = mip_window
# We use ToCanonical to have a consistent orientation, Resample the images to 1 mm isotropic spacing
preprocessing_transforms = (
tio.ToCanonical(),
tio.Resample(1, image_interpolation='bspline'),
)
self.preprocess = tio.Compose(preprocessing_transforms)
if is_train:
# shuffle
self.df_features = self.df_features.sample(frac=1)
# method for placing oversampling but not implemented in base dataset
self._sample_data()
def _sample_data(self):
pass
@contextmanager
def set_split_idx(self, i):
""" Used by fastai's tta, when activating test time augs """
if i == 0:
self.tio_augmentations = tio.Compose([
tio.RandomAffine(p=0.5),
tio.RandomFlip(axes=(1,2), p=0.5)
])
for _ in range(8):
pass
try: yield self
finally:
pass
@staticmethod
def _normalize(image, min_arr, max_arr):
""" To [-1,1] range """
image = (image.astype("float32", copy=False) - min_arr) / (max_arr - min_arr + 1e-6)
image = image * 2 - 1
return image
@staticmethod
def _random_noise(image):
""" To [-1,1] range noise """
image = np.random.standard_normal(image.shape).astype(np.float32)
image = (image - np.min(image)) / (np.max(image) - np.min(image) + 1e-6)
image = image * 2 - 1
return image
def _resize(self, image):
image = cv2.resize(image, self.image_size, cv2.INTER_LINEAR)
return image
def _get_crop_bb(self, image):
inside_value = 0
outside_value = 255
label_shape_filter = sitk.LabelShapeStatisticsImageFilter()
label_shape_filter.Execute( sitk.OtsuThreshold(image, inside_value, outside_value) )
bounding_box = label_shape_filter.GetBoundingBox(outside_value)
return bounding_box
def _crop_with_bb(self, image, bounding_box):
# The bounding box's first "dim" entries are the starting index and last "dim" entries the size
return sitk.RegionOfInterest(image, bounding_box[int(len(bounding_box)/2):], bounding_box[0:int(len(bounding_box)/2)])
def _crop_tio_image(self, tio_image):
sitk_image = tio_image.as_sitk()
sitk_image = self._crop_with_bb(sitk_image, self._get_crop_bb(sitk_image))
arr = sitk.GetArrayFromImage(sitk_image)
arr = np.swapaxes(arr, 0,2)
return tio.ScalarImage(tensor=np.expand_dims(arr, axis=0))
def _extract_tumor_height(self, arr, features):
""" MIP tumor slice """
min_tumor_height = features['percentile10_ax_2'] / 100
max_tumor_height = features['percentile90_ax_2'] / 100
max_tumor_height -= self.mip_window
if features['percentile10_ax_2'] == features['percentile90_ax_2']:
tumor_height = 0.5
elif min_tumor_height >= max_tumor_height:
tumor_height = min(min_tumor_height, 1. - self.mip_window)
else:
tumor_height = np.random.random() * (max_tumor_height - min_tumor_height) + min_tumor_height
tumor_height_start = int(tumor_height*arr.shape[3])
tumor_height_end = int((tumor_height + self.mip_window)*arr.shape[3])
return np.max(arr[:,:,:,tumor_height_start:tumor_height_end], axis=3)
def __len__(self):
return len(self.df_features)
def _create_label(self, row):
return float(row.MGMT_value)
def __getitem__(self, idx):
row = self.df_features.iloc[idx]
bratsid = f'{int(row.BraTS21ID):05d}'
# load image from preprocessed numpy or from dicom
if self.npy_dir is not None:
crop_image = tio.ScalarImage(tensor=np.load(os.path.join(self.npy_dir, f'{bratsid}.npy')))
else:
tio_image = tio.ScalarImage(os.path.join(self.image_dir, bratsid, 'T2w'))
tio_image = self.preprocess.apply_transform(tio_image)
crop_image = self._crop_tio_image(tio_image)
# get min and max values before slicing - this way the normalization will maintain global range better
tio_arr = crop_image.numpy().astype(np.float32)
max_arr = np.max(tio_arr)
min_arr = np.min(tio_arr)
if self.tio_augmentations is not None:
crop_image = self.tio_augmentations(crop_image)
image = self._extract_tumor_height(crop_image.numpy(), row)
image = image.astype(np.float32)
# reduce one dimension in case it's (1,128,281) instead of (128,281)
if len(image.shape) == 3:
image = image[0]
# resize
image = self._resize(image)
# normalize each patient
if min_arr == max_arr:
image = self._random_noise(image)
else:
image = self._normalize(image, min_arr, max_arr)
image = np.nan_to_num(image)
# to 3chan rgb
channels = [image, image, image]
image = np.stack(channels)
label = self._create_label(row)
return image, label
```
#### File: RSNA-MICCAI-Brain-Tumor-Classification/src/dicom_utils.py
```python
import numpy as np
import cv2
import pydicom
from pydicom.pixel_data_handlers.util import apply_voi_lut
__all__ = [
"get_clahe",
"get_uint8_rgb"
]
def _read_dicom_image(
path,
voi_lut=True,
fix_monochrome=True,
do_norm=True):
# Original from: https://www.kaggle.com/raddar/convert-dicom-to-np-array-the-correct-way
dicom = pydicom.read_file(path)
# VOI LUT (if available by DICOM device) is used to transform raw DICOM data to
# "human-friendly" view
if voi_lut:
data = apply_voi_lut(dicom.pixel_array, dicom)
else:
data = dicom.pixel_array
# depending on this value, X-ray may look inverted - fix that:
if fix_monochrome and dicom.PhotometricInterpretation == "MONOCHROME1":
data = np.max(data) - data
if do_norm:
data = data - np.min(data)
data = (data / np.max(data)) if np.max(data) > 0 else data
return data.astype(np.float32)
def get_clahe():
return cv2.createCLAHE(clipLimit=2.0, tileGridSize=(10,10))
def _calc_image_features(image, clahe=None):
# from https://www.kaggle.com/socom20/effdet-v2
img_uint = (255 * image).astype(np.uint8)
clahe = clahe or get_clahe()
try:
img_equ = cv2.equalizeHist(img_uint) if np.max(img_uint) > 0 else img_uint
img_clahe = clahe.apply(img_uint) if np.max(img_uint) > 0 else img_uint
img_ret = np.concatenate(
[
image[:,:,None],
img_clahe[:,:,None].astype(np.float32) / 255,
img_equ[:,:,None].astype(np.float32) / 255,
],
axis=-1)
except:
print('exception')
img_ret = np.concatenate(
[
image[:,:,None],
image[:,:,None],
image[:,:,None],
],
axis=-1)
return img_ret
def get_uint8_rgb(dicom_path):
"""
Reads dicom from path and returns rgb uint8 array
where R: min-max normalized, G: CLAHE, B: histogram equalized.
Image size remains original.
"""
dcm = _read_dicom_image(dicom_path)
feats = _calc_image_features(dcm)
return (feats*255).astype(np.uint8)
```
#### File: src/seg_model_utils/augmentations3d.py
```python
import random
import numpy as np
import cv2
import scipy.ndimage as ndimage
from scipy.interpolate import RegularGridInterpolator
from scipy.ndimage.filters import gaussian_filter
class RandomChoice(object):
"""
choose a random tranform from list an apply
transforms: tranforms to apply
p: probability
"""
def __init__(self, transforms=[],
p=0.5):
self.transforms = transforms
self.p = p
def __call__(self, sample):
augment = np.random.random(1) < self.p
if not augment:
return sample
t = random.choice(self.transforms)
return t(sample)
class ComposeTransforms(object):
"""
Composes several transforms together.
"""
def __init__(self, transforms=[],
p=0.9):
self.transforms = transforms
self.p = p
def __call__(self, sample):
augment = np.random.random(1) < self.p
if not augment:
return sample
for t in self.transforms:
sample = t(sample)
return sample
from scipy.interpolate import RegularGridInterpolator
from scipy.ndimage.filters import gaussian_filter
def stack_seg_2_image(sample):
image = sample['image']
seg = sample['segmentation']
channels = [chan for chan in image]
channels.append(seg)
return np.stack(channels, axis=3)
def elastic_transform_3d(sample, alpha=1, sigma=20, c_val=0.0, method="linear"):
"""
:param sample: dict of image and seg
:param alpha: scaling factor of gaussian filter
:param sigma: standard deviation of random gaussian filter
:param c_val: fill value
:param method: interpolation method. supported methods : ("linear", "nearest")
:return: deformed image and/or label
"""
img_numpy = sample['image'].copy()
label = sample['segmentation'] if 'segmentation' in sample else None
shape = img_numpy.shape
# Define 3D coordinate system
coords = np.arange(shape[0]), np.arange(shape[1]), np.arange(shape[2])
# Interpolated images
chan_intrps = [RegularGridInterpolator(coords, img_numpy[:,:,:,chan],
method=method,
bounds_error=False,
fill_value=c_val) for chan in range(shape[3])]
#Get random elastic deformations
dx = gaussian_filter((np.random.rand(shape[0],shape[1],shape[2]) * 2 - 1), sigma,
mode="constant", cval=0.) * alpha
dy = gaussian_filter((np.random.rand(shape[0],shape[1],shape[2]) * 2 - 1), sigma,
mode="constant", cval=0.) * alpha
dz = gaussian_filter((np.random.rand(shape[0],shape[1],shape[2]) * 2 - 1), sigma,
mode="constant", cval=0.) * alpha
# Define sample points
x, y, z = np.mgrid[0:shape[0], 0:shape[1], 0:shape[2]]
indices = np.reshape(x + dx, (-1, 1)), \
np.reshape(y + dy, (-1, 1)), \
np.reshape(z + dz, (-1, 1))
# Interpolate 3D image image
img_numpy = np.stack([chan_intrp(indices).reshape((shape[0],shape[1],shape[2]))
for chan_intrp in chan_intrps], axis=3).astype(np.float32)
# Interpolate labels
if label is not None:
lab_intrp = RegularGridInterpolator(coords, label,
method="nearest",
bounds_error=False,
fill_value=0)
label = lab_intrp(indices).reshape(shape[0],shape[1],shape[2]).astype(label.dtype)
sample['segmentation'] = label
sample['image'] = img_numpy
return sample
class ElasticTransform(object):
def __init__(self, p=0.5, alpha=1, sigma=20, c_val=0.0, method="linear"):
self.p = p
self.alpha = alpha
self.sigma = sigma
self.c_val = c_val
self.method = method
def __call__(self, sample):
augment = np.random.random(1) < self.p
if not augment:
return sample
return elastic_transform_3d(sample, self.alpha, self.sigma, self.c_val, self.method)
def random_noise(sample, mean=0, std=0.001, eps=1e-6):
im = sample['image'].copy()
noise = np.random.normal(mean, std, im.shape)
sample['image'] = np.where(im > eps, im + noise, im)
return sample
class GaussianNoise(object):
def __init__(self, p=0.5, mean=0, std=0.001):
self.mean = mean
self.std = std
self.p = p
def __call__(self, sample):
augment = np.random.random(1) < self.p
if not augment:
return sample
return random_noise(sample, self.mean, self.std)
def random_crop_to_size(sample, crop_sz):
im = sample['image'].copy()
shape = im.shape
if 'segmentation' in sample:
seg = sample['segmentation'].copy()
else:
seg = None
# choose randomly but check that at least one tumor pixel is included
width, height, depth = crop_sz
sum_tumor = 0
n_round = 0
d,x,y = 0,0,0
while sum_tumor == 0 and n_round < 1000:
n_round += 1
d = np.random.randint(0, shape[0] - depth - 1)
x = np.random.randint(0, shape[1] - width - 1)
y = np.random.randint(0, shape[2] - height - 1)
if seg is not None:
check = seg[d:d+depth, x:x+width, y:y+height]
sum_tumor = np.sum(check)
else:
sum_tumor = 1
assert n_round < 1000, f'no segmentation found in {sample["BraTSID"]}'
im = im[d:d+depth, x:x+width, y:y+height,:]
sample['image'] = im
if seg is not None:
seg = check
sample['segmentation'] = seg
return sample
class RandomCropToSize(object):
def __init__(self, crop_sz=(200,200,95)):
self.crop_sz = crop_sz
def __call__(self, sample):
return random_crop_to_size(sample, self.crop_sz)
def random_flip_lr(sample):
im = sample['image'].copy()
seg = sample['segmentation'].copy()
im = im[:,:,::-1,:]
seg = seg[:,:,::-1]
sample['image'] = im
sample['segmentation'] = seg
return sample
class RandomFlipLR(object):
def __init__(self, p=0.5):
self.p = p
def __call__(self, sample):
augment = np.random.random(1) < self.p
if not augment:
return sample
return random_flip_lr(sample)
def random_channel_drop(sample):
im = sample['image'].copy()
c = im.shape[3]
drop_ch = random.randint(0, c-1)
im[:,:,:,drop_ch] = 0. if random.random() > 0.5 else 1.0
sample['image'] = im
return sample
class RandomChannelDrop(object):
def __init__(self, p=0.05):
self.p = p
def __call__(self, sample):
augment = np.random.random(1) < self.p
if not augment:
return sample
return random_channel_drop(sample)
def random_rotate3D(sample, min_angle, max_angle):
"""
Returns a random rotated image and seg map in sample dict
:param sample: ds sample dict
:param min_angle: in degrees
:param max_angle: in degrees
:return: sample
"""
im = sample['image'].copy()
seg = sample['segmentation'].copy()
assert min_angle < max_angle, "min should be less than max val"
assert min_angle > -360 or max_angle < 360
all_axes = [(1, 0), (1, 2), (0, 2)]
angle = np.random.randint(low=min_angle, high=max_angle + 1)
axes_random_id = np.random.randint(low=0, high=len(all_axes))
axes = all_axes[axes_random_id]
im = ndimage.interpolation.rotate(im , angle, axes=axes, reshape=False)
seg = ndimage.rotate(seg.astype(np.float32), angle, axes=axes, reshape=False)
# seg back to binary float values
seg = np.where(seg < 0.5, 0, 1.)
sample['image'] = im
sample['segmentation'] = seg
return sample
class RandomRotation(object):
def __init__(self, min_angle=-10, max_angle=10, p=0.5):
self.min_angle = min_angle
self.max_angle = max_angle
self.p = p
def __call__(self, sample):
augment = np.random.random(1) < self.p
if not augment:
return sample
return random_rotate3D(sample, self.min_angle, self.max_angle)
class DownSampleSegmentation(object):
def __init__(self, ds=4):
self.ds = ds
def __call__(self, sample):
if 'segmentation' in sample:
seg = sample['segmentation']
seg = seg[::self.ds, ::self.ds, ::self.ds]
sample['segmentation'] = seg
return sample
```
#### File: src/seg_model_utils/torchio_transforms.py
```python
import torch
import torchio as tio
import numpy as np
def load_tio_image(fn):
"""
ScalarImage(shape: (c, w, h, d))
dtype: torch.DoubleTensor
"""
arr = np.load(fn).swapaxes(0,3)
return tio.ScalarImage(tensor=arr)
def arr_2_tio_image(arr):
"""
ScalarImage(shape: (c, w, h, d))
dtype: torch.DoubleTensor
"""
arr = arr.swapaxes(0,3)
return tio.ScalarImage(tensor=arr)
def load_tio_seg_image(fn):
"""
LabelMap(shape: (c, w, h, d))
dtype: torch.FloatTensor
Intensity transforms are not applied to these images.
Nearest neighbor interpolation is always used to resample label maps.
"""
if fn is None:
return None
if not os.path.exists(fn):
return None
arr = (np.expand_dims(np.load(fn),3).swapaxes(0,3) > 0).astype(np.float32)
return tio.LabelMap(tensor=arr)
def arr_2_tio_seg_image(arr):
"""
LabelMap(shape: (c, w, h, d))
dtype: torch.FloatTensor
Intensity transforms are not applied to these images.
Nearest neighbor interpolation is always used to resample label maps.
"""
if arr is None:
return None
arr = (np.expand_dims(arr,3).swapaxes(0,3) > 0).astype(np.float32)
return tio.LabelMap(tensor=arr)
def load_tio_subject(image_fn:str, label:int, seg_fn=None):
return tio.Subject(
rgb_image=load_tio_image(image_fn),
segmentation=load_tio_seg_image(seg_fn),
label=int(label),
name=os.path.basename(image_fn).split('.')[0])
``` |
{
"source": "jpk0727/growApp",
"score": 2
} |
#### File: apps/base/views.py
```python
import time
"""Views for the base app"""
from django.shortcuts import render, render_to_response
from django.template import RequestContext
from django.shortcuts import render, redirect
from django.views.decorators.cache import cache_page
from django.views.generic.base import TemplateView
from django.contrib import messages
from base.models import sensors, controller_setpoints
from base.forms import DateForm, ControlForm
from base.models import water_amount
from graphos.sources.model import ModelDataSource
from django.http import HttpResponse, HttpResponseRedirect
from graphos.renderers import flot
from graphos.views import FlotAsJson, RendererAsJson
from datetime import datetime
from datetime import timedelta
import json
import calendar
from django.http import Http404, HttpResponse
def home(request):
""" Default view for the root """
query = sensors.objects.latest('time')
query.time = time.ctime(int(query.time))
query.temp = round(query.temp, 2)
query.hum = round(query.hum, 2)
#for i in query:
# i.time = time.ctime(int(i.time))
return render(request, 'base/home.html', {'query':query})
def monitor(request):
if request.method == 'GET':
form = DateForm()
else:
form = DateForm(request.POST)
end = sensors.objects.latest('time')
end_time = datetime.fromtimestamp(
int(end.time)).strftime('%Y-%m-%d %H:%M')
start = sensors.objects.earliest('time')
start_time = datetime.fromtimestamp(
int(start.time)).strftime('%Y-%m-%d %H:%M')
query = sensors.objects.latest('time')
query.time = time.ctime(int(query.time))
yesterday = datetime.now() - timedelta(days = 1)
yesterday_time = yesterday.strftime("%Y-%m-%d %H:%M")
dates = request.POST
start_date = dates.get('start_date',yesterday_time)
end_date = dates.get('end_date',end_time)
start_stamp = time.mktime(time.strptime(start_date, "%Y-%m-%d %H:%M"))
end_stamp = time.mktime(time.strptime(end_date, "%Y-%m-%d %H:%M"))
queryset = sensors.objects.filter(time__gte = start_stamp,
time__lt = end_stamp).order_by('time')
queryset1 = water_amount.objects.filter(time__gte = start_stamp,
time__lt = end_stamp).order_by('time')
data_source1 = ModelDataSource(queryset, fields=['java_time','temp'])
data_source2 = ModelDataSource(queryset, fields=['java_time','hum'])
data_source3 = ModelDataSource(queryset, fields=['java_time','light'])
data_source4 = ModelDataSource(queryset, fields=['java_time','lux'])
data_source5 = ModelDataSource(queryset1, fields=['java_time','liters_total_r1','liters_total_r2','liters_total_r3'])
line_chart1 = flot.LineChart(data_source1,options = {'series': {'lines': {'fill':'true'}, 'color':'blue'}, 'xaxis':{'mode': 'time', 'timeformat': '%m/%e %I:%M %P', "timezone":"browser"}})
line_chart2 = flot.LineChart(data_source2,options = {'series': {'lines': {'fill':'true'}, 'color':'red'}, 'xaxis':{'mode': 'time', 'timeformat': '%m/%e %I:%M %P','timezone':'browser'}})
line_chart3 = flot.LineChart(data_source3,options = {'series': {'lines': {'fill':'true'}, 'color':'green'}, 'xaxis':{'mode': 'time', 'timeformat': '%m/%e %I:%M %P','timezone':'browser'}})
line_chart4 = flot.LineChart(data_source4,options = {'series': {'lines': {'fill':'true'}, 'color':'purple'}, 'xaxis':{'mode': 'time', 'timeformat': '%m/%e %I:%M %P','timezone':'browser'}})
line_chart5 = flot.BarChart(data_source5, options = {'series': {'lines': {'steps':'boolean'}}, 'xaxis':{'mode': 'time', 'timeformat': '%m/%e %I:%M %P', "timezone":"browser"}})
context = {
"line_chart1": line_chart1,
"line_chart2": line_chart2,
"line_chart3": line_chart3,
"line_chart4": line_chart4,
"line_chart5": line_chart5,
"form": form,
"dates":dates,
"start_stamp":start_stamp,
}
return render(request, 'base/monitor.html', context)
def about(request):
return render(request, 'base/about.html',{})
def control(request):
if request.method == 'GET':
cur = controller_setpoints.objects.latest('id')
data = {'humidity':cur.humidity,'r1_water':cur.r1_water,'r2_water':cur.r2_water,'r3_water':cur.r3_water,'water_frequency':cur.water_frequency,'lights_on':cur.lights_on,'lights_off':cur.lights_off}
form = ControlForm(initial=data)
else:
form = ControlForm(request.POST)
if form.is_valid():
form.time = time.time()
form.save()
return HttpResponseRedirect('/grow/control')
context = {
"form":form
}
return render(request, 'base/control.html',context)
``` |
{
"source": "jpk15211/py-metric-temporal-logic",
"score": 2
} |
#### File: py-metric-temporal-logic/mtl/sugar.py
```python
from mtl import ast
def alw(phi, *, lo=0, hi=float('inf')):
return ast.G(ast.Interval(lo, hi), phi)
def env(phi, *, lo=0, hi=float('inf')):
return ~alw(~phi, lo=lo, hi=hi)
def implies(x, y):
return ~x | y
def xor(x, y):
return (x | y) & ~(x & y)
def iff(x, y):
return (x & y) | (~x & ~y)
def until(phi, psi):
return ast.WeakUntil(phi, psi) & env(psi)
def timed_until(phi, psi, lo, hi):
return env(psi, lo=lo, hi=hi) & alw(until(phi, psi), lo=0, hi=lo)
``` |
{
"source": "JP-Kabs/PythonStdioGames",
"score": 3
} |
#### File: src/gamesbyexample/fishtank.py
```python
__version__ = 0
import random, sys, time
try:
import bext
except ImportError:
print('This program requires the bext module, which you')
print('can install by following the instructions at')
print('https://pypi.org/project/Bext/')
sys.exit()
# Set up the constants:
WIDTH, HEIGHT = bext.size()
# We can't print to the last column on Windows without it adding a
# newline automatically, so reduce the width by one:
WIDTH -= 1
NUM_KELP = 2 # (!) Try changing this to 10.
NUM_FISH = 10 # (!) Try changing this to 2 or 100.
NUM_BUBBLERS = 1 # (!) Try changing this to 0 or 10.
FRAMES_PER_SECOND = 4 # (!) Try changing this number to 1 or 60.
# (!) Try changing the constants to create a fish tank with only kelp,
# or only bubblers.
# NOTE: Every string in a fish dictionary should be the same length.
FISH_TYPES = [
{'right': ['><>'], 'left': ['<><']},
{'right': ['>||>'], 'left': ['<||<']},
{'right': ['>))>'], 'left': ['<[[<']},
{'right': ['>||o', '>||.'], 'left': ['o||<', '.||<']},
{'right': ['>))o', '>)).'], 'left': ['o[[<', '.[[<']},
{'right': ['>-==>'], 'left': ['<==-<']},
{'right': [r'>\\>'], 'left': ['<//<']},
{'right': ['><)))*>'], 'left': ['<*(((><']},
{'right': ['}-[[[*>'], 'left': ['<*]]]-{']},
{'right': [']-<)))b>'], 'left': ['<d(((>-[']},
{'right': ['><XXX*>'], 'left': ['<*XXX><']},
{'right': ['_.-._.-^=>', '.-._.-.^=>',
'-._.-._^=>', '._.-._.^=>'],
'left': ['<=^-._.-._', '<=^.-._.-.',
'<=^_.-._.-', '<=^._.-._.']},
] # (!) Try adding your own fish to FISH_TYPES.
LONGEST_FISH_LENGTH = 10 # Longest single string in FISH_TYPES.
# The x and y positions where a fish runs into the edge of the screen:
LEFT_EDGE = 0
RIGHT_EDGE = WIDTH - 1 - LONGEST_FISH_LENGTH
TOP_EDGE = 0
BOTTOM_EDGE = HEIGHT - 2
def main():
global FISHES, BUBBLERS, BUBBLES, KELPS, STEP
bext.bg('black')
bext.clear()
# Generate the global variables:
FISHES = []
for i in range(NUM_FISH):
FISHES.append(generateFish())
# NOTE: Bubbles are drawn, but not the bubblers themselves.
BUBBLERS = []
for i in range(NUM_BUBBLERS):
# Each bubbler starts at a random position.
BUBBLERS.append(random.randint(LEFT_EDGE, RIGHT_EDGE))
BUBBLES = []
KELPS = []
for i in range(NUM_KELP):
kelpx = random.randint(LEFT_EDGE, RIGHT_EDGE)
kelp = {'x': kelpx, 'segments': []}
# Generate each segment of the kelp:
for i in range(random.randint(6, HEIGHT - 1)):
kelp['segments'].append(random.choice(['(', ')']))
KELPS.append(kelp)
# Run the simulation:
STEP = 1
while True:
simulateAquarium()
drawAquarium()
time.sleep(1 / FRAMES_PER_SECOND)
clearAquarium()
STEP += 1
def getRandomColor():
"""Return a string of a random color."""
return random.choice(('black', 'red', 'green', 'yellow', 'blue',
'purple', 'cyan', 'white'))
def generateFish():
"""Return a dictionary that represents a fish."""
fishType = random.choice(FISH_TYPES)
# Set up colors for each character in the fish text:
colorPattern = random.choice(('random', 'head-tail', 'single'))
fishLength = len(fishType['right'][0])
if colorPattern == 'random': # All parts are randomly colored.
colors = []
for i in range(fishLength):
colors.append(getRandomColor())
if colorPattern == 'single' or colorPattern == 'head-tail':
colors = [getRandomColor()] * fishLength # All the same color.
if colorPattern == 'head-tail': # Head/tail different from body.
headTailColor = getRandomColor()
colors[0] = headTailColor # Set head color.
colors[-1] = headTailColor # Set tail color.
# Set up the rest of fish data structure:
fish = {'right': fishType['right'],
'left': fishType['left'],
'colors': colors,
'hSpeed': random.randint(1, 6),
'vSpeed': random.randint(5, 15),
'timeToHDirChange': random.randint(10, 60),
'timeToVDirChange': random.randint(2, 20),
'goingRight': random.choice([True, False]),
'goingDown': random.choice([True, False])}
# 'x' is always the leftmost side of the fish body:
fish['x'] = random.randint(0, WIDTH - 1 - LONGEST_FISH_LENGTH)
fish['y'] = random.randint(0, HEIGHT - 2)
return fish
def simulateAquarium():
"""Simulate the movements in the aquarium for one step."""
global FISHES, BUBBLERS, BUBBLES, KELP, STEP
# Simulate the fish for one step:
for fish in FISHES:
# Move the fish horizontally:
if STEP % fish['hSpeed'] == 0:
if fish['goingRight']:
if fish['x'] != RIGHT_EDGE:
fish['x'] += 1 # Move the fish right.
else:
fish['goingRight'] = False # Turn the fish around.
fish['colors'].reverse() # Turn the colors around.
else:
if fish['x'] != LEFT_EDGE:
fish['x'] -= 1 # Move the fish left.
else:
fish['goingRight'] = True # Turn the fish around.
fish['colors'].reverse() # Turn the colors around.
# Fish can randomly change their horizontal direction:
fish['timeToHDirChange'] -= 1
if fish['timeToHDirChange'] == 0:
fish['timeToHDirChange'] = random.randint(10, 60)
# Turn the fish around:
fish['goingRight'] = not fish['goingRight']
# Move the fish vertically:
if STEP % fish['vSpeed'] == 0:
if fish['goingDown']:
if fish['y'] != BOTTOM_EDGE:
fish['y'] += 1 # Move the fish down.
else:
fish['goingDown'] = False # Turn the fish around.
else:
if fish['y'] != TOP_EDGE:
fish['y'] -= 1 # Move the fish up.
else:
fish['goingDown'] = True # Turn the fish around.
# Fish can randomly change their vertical direction:
fish['timeToVDirChange'] -= 1
if fish['timeToVDirChange'] == 0:
fish['timeToVDirChange'] = random.randint(2, 20)
# Turn the fish around:
fish['goingDown'] = not fish['goingDown']
# Generate bubbles from bubblers:
for bubbler in BUBBLERS:
# There is a 1 in 5 chance of making a bubble:
if random.randint(1, 5) == 1:
BUBBLES.append({'x': bubbler, 'y': HEIGHT - 2})
# Move the bubbles:
for bubble in BUBBLES:
diceRoll = random.randint(1, 6)
if (diceRoll == 1) and (bubble['x'] != LEFT_EDGE):
bubble['x'] -= 1 # Bubble goes left.
elif (diceRoll == 2) and (bubble['x'] != RIGHT_EDGE):
bubble['x'] += 1 # Bubble goes right.
bubble['y'] -= 1 # The bubble always goes up.
# Iterate over BUBBLES in reverse because I'm deleting from BUBBLES
# while iterating over it.
for i in range(len(BUBBLES) - 1, -1, -1):
if BUBBLES[i]['y'] == TOP_EDGE: # Delete bubbles that reach the top.
del BUBBLES[i]
# Simulate the kelp waving for one step:
for kelp in KELPS:
for i, kelpSegment in enumerate(kelp['segments']):
# 1 in 20 chance to change waving:
if random.randint(1, 20) == 1:
if kelpSegment == '(':
kelp['segments'][i] = ')'
elif kelpSegment == ')':
kelp['segments'][i] = '('
def drawAquarium():
"""Draw the aquarium on the screen."""
global FISHES, BUBBLERS, BUBBLES, KELP, STEP
# Draw quit message.
bext.fg('white')
bext.goto(0, 0)
print('Fish Tank, by <NAME> Ctrl-C to quit.', end='')
# Draw the bubbles:
bext.fg('white')
for bubble in BUBBLES:
bext.goto(bubble['x'], bubble['y'])
print(random.choice(('o', 'O')), end='')
# Draw the fish:
for fish in FISHES:
bext.goto(fish['x'], fish['y'])
# Get the correct right- or left-facing fish text.
if fish['goingRight']:
fishText = fish['right'][STEP % len(fish['right'])]
else:
fishText = fish['left'][STEP % len(fish['left'])]
# Draw each character of the fish text in the right color.
for i, fishPart in enumerate(fishText):
bext.fg(fish['colors'][i])
print(fishPart, end='')
# Draw the kelp:
bext.fg('green')
for kelp in KELPS:
for i, kelpSegment in enumerate(kelp['segments']):
if kelpSegment == '(':
bext.goto(kelp['x'], BOTTOM_EDGE - i)
elif kelpSegment == ')':
bext.goto(kelp['x'] + 1, BOTTOM_EDGE - i)
print(kelpSegment, end='')
# Draw the sand on the bottom:
bext.fg('yellow')
bext.goto(0, HEIGHT - 1)
print(chr(9617) * (WIDTH - 1), end='') # Draws '░' characters.
sys.stdout.flush() # (Required for bext-using programs.)
def clearAquarium():
"""Draw empty spaces over everything on the screen."""
global FISHES, BUBBLERS, BUBBLES, KELP
# Draw the bubbles:
for bubble in BUBBLES:
bext.goto(bubble['x'], bubble['y'])
print(' ', end='')
# Draw the fish:
for fish in FISHES:
bext.goto(fish['x'], fish['y'])
# Draw each character of the fish text in the right color.
print(' ' * len(fish['left'][0]), end='')
# Draw the kelp:
for kelp in KELPS:
for i, kelpSegment in enumerate(kelp['segments']):
bext.goto(kelp['x'], HEIGHT - 2 - i)
print(' ', end='')
sys.stdout.flush() # (Required for bext-using programs.)
# If this program was run (instead of imported), run the game:
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
sys.exit() # When Ctrl-C is pressed, end the program.
``` |
{
"source": "jpkanter/efre-lod-elasticsearch-tools",
"score": 2
} |
#### File: Spcht/Core/WorkOrder.py
```python
import errno
import json
import logging
import math
import multiprocessing
import os
import shutil
import subprocess
import sys
import time
import traceback
import xml
from datetime import timedelta, datetime
import rdflib
from . import SpchtErrors as SpchtErrors
from .SpchtCore import Spcht
from Spcht.Utils.SpchtConstants import WORK_ORDER_STATUS
from .SpchtUtility import process2RDF
from Spcht.Utils.local_tools import load_from_json, sparqlQuery, delta_now, test_json, \
load_remote_content, solr_handle_return
logger = logging.getLogger(__name__)
def UpdateWorkOrder(file_path: str, force=False, **kwargs: tuple or list) -> dict:
"""
kwarg Modes:
* update - updates a key, needs at least 2 indizes
* insert - inserts a key, needs at least 3 indizes
* delete - deletes a key, needs at least 1 index
Updates a work order file and does some sanity checks around the whole thing, sanity checks
involve:
* checking if the new status is lower than the old one
* overwritting file_paths for the original json or turtle
:param str file_path: file path to a valid work-order.json
:param bool force: ignores checks like a new status being lower than the old one
:param tuple or list kwargs: 'insert' and/or 'update' and/or 'delete' as tuple, last value is the value for the nested dictionary keys when using update, when using insert n-1 key is the new key and n key the value
:return dict: returns a work order dictionary
"""
# ! i actively decided against writing a file class for work order
work_order = load_from_json(file_path)
if work_order is not None:
if "update" in kwargs:
if isinstance(kwargs['update'], tuple):
kwargs['update'] = [kwargs['update']]
for update in kwargs['update']:
if len(update) < 2:
raise SpchtErrors.ParameterError("Not enough parameters")
old_value = UpdateNestedDictionaryKey(work_order, *update)
if old_value is None:
raise SpchtErrors.ParameterError("Couldnt update key")
# * sanity check
if update[len(update) - 2] == "status" and not force:
if old_value > update[len(update) - 1]:
raise SpchtErrors.WorkOrderInconsitencyError("New status higher than old one")
if "insert" in kwargs:
protected_entries = ("file", "rdf_file")
if isinstance(kwargs['insert'], tuple):
kwargs['insert'] = [kwargs['insert']]
for insert in kwargs['insert']:
if len(insert) < 3:
raise SpchtErrors.ParameterError("Not enough parameters")
overwritten = AddNestedDictionaryKey(work_order, *insert)
# * sanity check for certain fields
field_type = insert[len(insert) - 1]
if overwritten and field_type in protected_entries and not force:
raise SpchtErrors.WorkOrderInconsitencyError("Cannot overwrite any one file path")
# ? file entries are linked to somewhere, we dont want to overwrite those
if "delete" in kwargs:
if isinstance(kwargs['delete'], tuple):
kwargs['delete'] = [kwargs['delete']]
for deletion in kwargs['delete']:
DeleteNestedDictionaryKey(work_order, *deletion)
with open(file_path, "w") as work_order_file:
json.dump(work_order, work_order_file, indent=4)
return work_order
else:
raise SpchtErrors.WorkOrderError
def UpdateNestedDictionaryKey(dictionary: dict, *args) -> None or any:
"""
Changes the content of a dictionary key in any depth that is specified, if the 'path' does not exist
nothing will happen. The last argument will be the new value.
:param dict dictionary:
:param str args:
:return: Boolean operator wether this was succesfull
"""
old_value = None
try:
keys = len(args)
_ = 0
value = dictionary
for key in args:
_ += 1
if _ + 1 >= keys:
old_value = value[key]
value[key] = args[_] # * immutable dictionary objects are passed by reference
# * therefore i change the original object here which is precisely what i want
break # * one more round to come..which we dont want
else:
value = value.get(key)
if value is None:
raise SpchtErrors.ParameterError(key)
return old_value
except KeyError as key:
raise SpchtErrors.ParameterError(key)
def AddNestedDictionaryKey(dictionary: dict, *args) -> bool:
"""
Adds an arbitary key with the value of the last argument to a dictionary, will not create the pathway to that
parameter, if the previos keys do not exist nothing will happen
:param dict dictionary:
:param str args:
:return: Boolean operator wether this was succesfull
"""
overwritten = False
try:
keys = len(args)
_ = 0
value = dictionary
for key in args:
_ += 1
if _ + 2 >= keys:
if value[key].get(args[_]) is not None:
overwritten = True
value[key][args[_]] = args[_ + 1]
break
else:
value = value.get(key)
if value is None:
raise SpchtErrors.ParameterError(key)
return overwritten
except KeyError as key:
raise SpchtErrors.ParameterError(key)
def DeleteNestedDictionaryKey(dictionary: dict, *args) -> bool:
"""
deletes the specified key in the directory that might be in any depth, does nothing if the key does not exist
:param dict dictionary:
:param str args:
:return: Boolean operator wether this was succesfull
"""
try:
keys = len(args)
_ = 0
value = dictionary
for key in args:
_ += 1
if _ >= keys:
if value.pop(key, None) is not None: # pop returns the popped value which should be truthy
return True
else:
return False
else:
value = value.get(key)
if value is None:
raise SpchtErrors.ParameterError(key)
except KeyError as key:
raise SpchtErrors.ParameterError(key)
def CheckForParameters(expectations: tuple, **kwargs):
"""
Checks if all if the expected parameters are present in the parameters of this function and returns those that are not
:param tuple expectations:
:type expectations:
:param kwargs:
:return: a list of missing parameters
:rtype: list
"""
missing = []
for argument in expectations:
if argument not in kwargs:
missing.append(argument)
return missing
# ? a list with len > 0 is unfortunately truthy so that i have to violate proper protocol a bit here
def CheckWorkOrder(work_order_file: str):
"""
Crawls all available data in a work order files and writes a summary to stdout, also creates some statistic
about how long things needed to process and what the current status of the file is.
:param str work_order_file: file path to a work order file
:return: Nothing, only displays to console
"""
print(work_order_file)
work_order = load_from_json(work_order_file)
if work_order is None:
return False
# ? surely this could have been a dictionary but it isn't
time_infos = ("processing", "insert", "deletion", "solr")
try:
extremes = {"min_processing": None, "max_processing": None,
"min_insert": None, "max_insert": None,
"min_all": None, "max_all": None,
"min_solr": None, "max_solr": None,
"min_deletion": None, "max_deletion": None}
linear_delta = timedelta(
seconds=0) # the linear time needed to execute everything, due parallel processing this can be longer than the actual time that was needed
if 'solr_start' in work_order['meta']:
extremes['min_all'] = datetime.fromisoformat(work_order['meta']['solr_start'])
extremes['min_solr'] = datetime.fromisoformat(work_order['meta']['solr_start'])
if 'solr_stop' in work_order['meta']:
extremes['max_all'] = datetime.fromisoformat(work_order['meta']['solr_finish'])
extremes['max_solr'] = datetime.fromisoformat(work_order['meta']['solr_finish'])
if 'solr_start' in work_order['meta'] and 'solr_stop' in work_order['meta']:
linear_delta += extremes['max_solr'] - extremes['min_solr']
counts = {'rdf_files': 0, 'files': 0, 'un_processing': 0, 'un_insert': 0,
'un_intermediate': 0} # occasions of something
counters = {"elements": 0, "triples": 0} # adding counts of individual fields
for key in work_order['file_list']:
# ? why, yes 'for key, item in dict.items()' is a thing
for method in time_infos:
if f'{method}_start' in work_order['file_list'][key]:
temp = datetime.fromisoformat(work_order['file_list'][key][f'{method}_start'])
if extremes[f'min_{method}'] is None or extremes[f'min_{method}'] > temp:
extremes[f'min_{method}'] = temp
if extremes[f'min_all'] is None or extremes['min_all'] > temp:
extremes[f'min_all'] = temp
if f'{method}_finish' in work_order['file_list'][key]:
temp = datetime.fromisoformat(work_order['file_list'][key][f'{method}_finish'])
if extremes[f'max_{method}'] is None or extremes[f'max_{method}'] < temp:
extremes[f'max_{method}'] = temp
if extremes[f'max_all'] is None or extremes['max_all'] < temp:
extremes[f'max_all'] = temp
if f'{method}_start' in work_order['file_list'][key] and f'{method}_finish' in work_order['file_list'][key]:
linear_delta += extremes[f'max_{method}'] - extremes[f'min_{method}']
for prop in counters.keys():
if prop in work_order['file_list'][key]:
if isinstance(work_order['file_list'][key][prop], int):
counters[prop] += work_order['file_list'][key][prop]
if 'rdf_file' in work_order['file_list'][key]:
counts['rdf_files'] += 1
if 'file' in work_order['file_list'][key]:
counts['files'] += 1
status = work_order['file_list'][key]['status']
if status == 3 or status == 2:
counts['un_processing'] += 1
if status == 5 or status == 4:
counts['un_intermediate'] += 1
if status == 7 or status == 6:
counts['un_insert'] += 1
print("+++++++++++++++++++WORK ORDER INFO++++++++++++++++++")
print(f"Current status: {WORK_ORDER_STATUS[work_order['meta']['status']]}")
if counts['un_processing'] > 0:
print(f"Unfinished processing: {counts['un_processing']}")
if counts['un_insert'] > 0:
print(f"Unfinished inserts: {counts['un_insert']}")
if counts['un_intermediate'] > 0:
print(f"Unfinished intermediate: {counts['un_intermediate']}")
print(f"Data retrieval: {work_order['meta']['fetch']}")
if work_order['meta'].get("chunk_size") and work_order['meta'].get("total_rows"):
print(
f"DL Parameters: {work_order['meta']['total_rows']} @ {work_order['meta']['chunk_size']} chunks")
print(f"Processing type: {work_order['meta']['type']}")
if 'processing' in work_order['meta']:
print(f"Multiprocessing, threads: {work_order['meta']['processing']}")
if counters['elements'] > 0:
print(f"Processed elements: {counters['elements']}")
if counters['triples'] > 0:
print(f"Resulting triples: {counters['triples']}")
print(f"Insert method: {work_order['meta']['method']}")
if counts['files'] > 0:
print(f"Downloaded files: {counts['files']}")
if counts['rdf_files'] > 0:
print(f"Processed files: {counts['rdf_files']}")
if extremes['min_all'] is not None and extremes['max_all'] is not None:
delta = extremes['max_all'] - extremes['min_all']
delta2 = None
for averice in time_infos:
if extremes[f'min_{averice}'] is not None and extremes[f'max_{averice}'] is not None:
if delta2 is None:
delta2 = extremes[f'max_{averice}'] - extremes[f'min_{averice}']
else: # ? this insulates against some weird edge case i dont even know, why i am doing this?
delta2 += extremes[f'max_{averice}'] - extremes[f'min_{averice}']
print(f"Total execution time: {delta}")
if delta2:
print(f"Relative execution time: {delta2}")
print(f"Linear execution time: {linear_delta}")
print(f"From: {extremes['min_all']}")
print(f"To: {extremes['max_all']}")
if extremes['min_solr'] is not None and extremes['max_solr'] is not None:
delta = extremes['max_solr'] - extremes['min_solr']
print(f"Download time: {delta}")
print(f"From: {extremes['min_solr']}")
print(f"To: {extremes['max_solr']}")
if extremes['min_deletion'] is not None and extremes['max_deletion'] is not None:
delta = extremes['max_deletion'] - extremes['min_deletion']
print(f"Deletion time: {delta}")
print(f"From: {extremes['min_deletion']}")
print(f"To: {extremes['max_deletion']}")
if extremes['min_processing'] is not None and extremes['max_processing'] is not None:
delta = extremes['max_processing'] - extremes['min_processing']
print(f"Processing time: {delta}")
print(f"From: {extremes['min_processing']}")
print(f"To: {extremes['max_processing']}")
if extremes['min_insert'] is not None and extremes['max_insert'] is not None:
delta = extremes['max_insert'] - extremes['min_insert']
print(f"Inserting time: {delta}")
print(f"From: {extremes['min_insert']}")
print(f"To: {extremes['max_insert']}")
print("++++++++++++++++++++END OF REPORT+++++++++++++++++++")
except KeyError as key:
print("####WORK ORDER LACKS KEYS, ERROR#####")
print(f"Missing Key {key}")
return True
def UseWorkOrder(work_order_file, **kwargs) -> list or int:
"""
:param filename str: file path of the work order
:param deep_check boolean: if true checks the file list for inconsistencies
:param repair_mode boolean: if true resets all 'inbetween' status to the next null status
:return: missing parameters for that step or True
"""
# ? from CheckWorkOrder
# ? ("Freshly created", "fetch started", "fetch completed", "processing started", "processing completed", "inserting started", "insert completed/finished", "fullfilled")
# ? Status, first index is 0
boiler_print = ", check log files for details"
if 'work_order_file' not in kwargs: # ? for manual use cause the checks were build for cli
kwargs['work_order_file'] = work_order_file
if 'spcht_descriptor' not in kwargs and 'spcht_object' in kwargs:
kwargs['spcht_descriptor'] = "dummy, dont need this anymore"
if 'spcht_descriptor' in kwargs and 'spcht_object' not in kwargs:
specht = Spcht(kwargs['spcht_descriptor'])
kwargs['spcht_object'] = specht
work_order = load_from_json(work_order_file)
if work_order is not None:
try:
if work_order['meta']['status'] == 1: # fetching started
logger.debug(f"Order {work_order_file}: Status 1 detected, reseting")
# fetch process is not recoverable, need to reset to zero state and start anew
HardResetWorkOrder(work_order_file)
if work_order['meta']['status'] == 0 or work_order['meta']['status'] == 1: # freshly created
logger.debug(f"Order {work_order_file}: Status 0 detected")
if work_order['meta']['fetch'] == "solr":
# ! checks
if work_order['meta']['type'] == "update":
logger.debug(f"Order {work_order_file}: Status 0 sorted into update download/insert")
expected = (
"work_order_file", "solr_url", "query", "total_rows", "chunk_size", "spcht_descriptor",
"save_folder", "max_age")
else:
logger.debug(f"Order {work_order_file}: Status 0 sorted into normal insert")
expected = (
"work_order_file", "solr_url", "query", "total_rows", "chunk_size", "spcht_descriptor",
"save_folder")
missing = CheckForParameters(expected, **kwargs)
if missing:
logger.info(f"WorkOrder File '{work_order_file}' couldnt not be processed because parameters {str(missing)} were missing.")
return missing
# ! process
UpdateWorkOrder(work_order_file, update=("meta", "status", 1))
if FetchWorkOrderSolr(**kwargs):
logger.debug(f"Order {work_order_file}: Solr fetching finished successful")
UpdateWorkOrder(work_order_file, update=("meta", "status", 2))
return 2
else:
msg = "Solr fetching failed, process now in 'inbetween' status"
logging.error(msg)
print(f"{msg}{boiler_print}")
return 1
if work_order['meta']['status'] == 3: # processing started
logger.debug(f"Order {work_order_file}: Status 3 detected")
print(f"Pickuping the order in an 'inbetween' status - {WORK_ORDER_STATUS[work_order['meta']['status']]}")
if not SoftResetWorkOrder(work_order_file):
msg = f"Reseting work order to state {WORK_ORDER_STATUS[work_order['meta']['status']] - 1} failed"
print(msg)
logger.critical(f"UseWorkOrder > {msg}")
return 3
if work_order['meta']['status'] == 2 or work_order['meta']['status'] == 3: # fetching completed
logger.debug(f"Order {work_order_file}: Status 2 detected")
# ! checks
expected = ("work_order_file", "spcht_descriptor", "subject")
missing = CheckForParameters(expected, **kwargs)
if missing:
return missing
# ! process
logger.info(f"Sorted order '{os.path.basename(work_order_file)}' as method 'insert'")
UpdateWorkOrder(work_order_file, update=("meta", "status", 3))
if 'processes' in kwargs:
ProcessOrderMultiCore(**kwargs)
else:
FulfillProcessingOrder(**kwargs)
# ! TODO: need checkup function here
UpdateWorkOrder(work_order_file, update=("meta", "status", 4))
logger.info(f"Turtle Files created, commencing to next step")
return 4
if work_order['meta']['status'] == 5: # intermediate processing started
logger.debug(f"Order {work_order_file}: Status 5 detected")
print(f"Pickuping the order in an 'inbetween' status - {WORK_ORDER_STATUS[work_order['meta']['status']]}")
if not SoftResetWorkOrder(work_order_file):
msg = f"Reseting work order to state {WORK_ORDER_STATUS[work_order['meta']['status']] - 1} failed"
print(msg)
logger.critical(f"UseWorkOrder > {msg}")
return 3
if work_order['meta']['status'] == 4 or work_order['meta']['status'] == 5: # processing done
logger.debug(f"Order {work_order_file}: Status 4 detected")
if work_order['meta']['type'] == "insert":
UpdateWorkOrder(work_order_file, update=("meta", "status", 6))
return UseWorkOrder(**kwargs) # jumps to the next step, a bit dirty this solution
if work_order['meta']['type'] == "update":
# ? isql emulates sparql queries in the interface
if work_order['meta']['method'] == "sparql":
# ! checks
expected = ("work_order_file", "named_graph", "sparql_endpoint", "user", "password")
missing = CheckForParameters(expected, **kwargs)
if missing:
return missing
logger.info(f"Scanned order '{os.path.basename(work_order_file)}' as type 'update', deletion process..")
UpdateWorkOrder(work_order_file, update=("meta", "status", 5))
# ! process
if IntermediateStepSparqlDelete(**kwargs):
UpdateWorkOrder(work_order_file, update=("meta", "status", 6))
return 6
else:
msg = "Intermediate deletion step failed"
logging.error(msg)
print(f"{msg}{boiler_print}")
return 5
elif work_order['meta']['method'] == "isql":
# ! checks
expected = ("work_order_file", "named_graph", "isql_path", "user", "password")
missing = CheckForParameters(expected, **kwargs)
if missing:
return missing
logger.info(f"Scanned order '{os.path.basename(work_order_file)}' as type 'update', deletion process..")
UpdateWorkOrder(work_order_file, update=("meta", "status", 5))
# ! process
if IntermediateStepISQLDelete(**kwargs):
UpdateWorkOrder(work_order_file, update=("meta", "status", 6))
return 6
else:
msg = "Intermediate deletion step failed"
logging.error(msg)
print(f"{msg}{boiler_print}")
return 5
else:
logger.critical(
f"Unknown method '{work_order['meta']['method']}' in work order file {work_order_file}")
if work_order['meta']['status'] == 7: # inserting started
logger.debug(f"Order {work_order_file}: Status 7 detected")
print(f"Pickuping the order in an 'inbetween' status - {WORK_ORDER_STATUS[work_order['meta']['status']]}")
if not SoftResetWorkOrder(work_order_file):
msg = f"Reseting work order to state {WORK_ORDER_STATUS[work_order['meta']['status']] - 1} failed"
print(msg)
logger.critical(f"UseWorkOrder > {msg}")
return 3
if work_order['meta']['status'] == 6 or work_order['meta']['status'] == 7: # intermediate processing done
logger.debug(f"Order {work_order_file}: Status 6 detected")
if work_order['meta']['method'] == "isql":
logger.debug(f"Order {work_order_file}: Status 6 sorted into isql insert")
# ! checks
expected = ("work_order_file", "named_graph", "isql_path", "user", "password", "virt_folder")
missing = CheckForParameters(expected, **kwargs)
if missing:
return missing
# ! process
logger.info(f"Sorted order '{os.path.basename(work_order_file)}' with method 'isql'")
UpdateWorkOrder(work_order_file, update=("meta", "status", 7))
if FulfillISqlInsertOrder(**kwargs):
UpdateWorkOrder(work_order_file, update=("meta", "status", 8))
return 8
else:
msg = "ISQL insert failed"
logging.error(msg)
print(f"{msg}{boiler_print}")
return 7
elif work_order['meta']['method'] == "sparql":
logger.debug(f"Order {work_order_file}: Status 6 sorted into sparql insert")
# ! checks
expected = ("work_order_file", "named_graph", "sparql_endpoint", "user", "password")
missing = CheckForParameters(expected, **kwargs)
if missing:
return missing
# ! process
logger.info(f"Sorted order '{os.path.basename(work_order_file)}' with method 'sparql'")
UpdateWorkOrder(work_order_file, update=("meta", "status", 7))
if FulfillSparqlInsertOrder(**kwargs):
UpdateWorkOrder(work_order_file, update=("meta", "status", 8))
return 8
else:
msg = "Sparql based insert operation failed"
logger.critical(msg)
print(f"{msg}{boiler_print}")
return 7
if work_order['meta']['status'] == 8: # inserting completed
logger.debug(f"Order {work_order_file}: Status 8 detected")
if CleanUpWorkOrder(work_order_file, **kwargs):
UpdateWorkOrder(work_order_file, update=("meta", "status", 9))
return 9
else:
return 8 # ! this is not all that helpful, like you "cast" this on status 8 and get back status 8, wow
if work_order['meta']['status'] == 9: # fulfilled, cleanup done
logger.debug(f"Order {work_order_file}: Status 9 detected - nothing to do")
# * do nothing, order finished
return 9
except KeyError as key:
logger.critical(f"The supplied json file doesnt appear to have the needed data, '{key}' was missing")
except TypeError as e:
fnc = "UseWorkOrder"
if e == "'NoneType' object is not subscriptable": # feels brittle
msg = "Could not properly load work order file"
print(msg)
logging.critical(f"{fnc} > {msg}")
else:
msg = f"{e.__class__.__name__}: {e}"
logging.critical(f"{fnc} > {msg}")
print(msg)
return False
def ProcessOrderMultiCore(work_order_file: str, **kwargs):
"""
Spawns multiple instances of FulfillProcessingOrder to utilize multiple cores when processing data
:param str work_order_file:
:type work_order_file:
:param kwargs:
:rtype: returns nothing ever
"""
if 'processes' not in kwargs:
raise SpchtErrors.WorkOrderInconsitencyError(
"Cannot call multi core process without defined 'processes' parameter")
if not isinstance(kwargs['processes'], int):
raise SpchtErrors.WorkOrderTypeError("Processes must be defined as integer")
logger.info(
"Started MultiProcess function, duplicated entries might appear in the log files for each instance of the processing procedure")
processes = []
# mod_kwargs = {}
# ? RE: the zombie commented out parts for copies of kwargs, in the parameters there is this innocent
# ? looking object called spcht_object which is an entire class with some data, thing of it as enzyme
# ? in theory nothing the processing does should change anything about the spcht itself, it does it
# ? it thing and once initiated it shall just process data from one state into another
# ? if any problems with the finished data arise i would definitely first check if the multiprocessing
# ? causes problems due some kind of racing condition
if kwargs['processes'] < 1:
kwargs['processes'] = 1
logger.info("Number of processes set to 1, config file review is advised")
else:
UpdateWorkOrder(work_order_file, insert=("meta", "processes", kwargs['processes']))
for i in range(0, kwargs['processes']):
# del mod_kwargs
# mod_kwargs = copy.copy(kwargs)
# mod_kwargs['spcht_object'] = Spcht(kwargs['spcht_object'].descriptor_file)
time.sleep(
1) # ! this all is file based, this is the sledgehammer way of avoiding problems with race conditions
p = multiprocessing.Process(target=FulfillProcessingOrder, args=(work_order_file,), kwargs=kwargs)
processes.append(p)
p.start()
for process in processes:
process.join()
def CreateWorkOrder(order_name, fetch: str, typus: str, method: str, **kwargs):
"""
Creates a basic work order file that serves as origin for all further operation, desribes
the steps necessary to fullfill the order
:param str order_name: name of the order, the file name will be generated from this
:param str fetch: Method of data retrieval, either a 'solr' or a list of plain 'file's
:param str typus: type or work order, either 'insert' or 'update', update deletes triples with the subject of the new data first
:param str method: method of inserting the data in a triplestore, 'sparql', 'isql' or 'odbc', also 'none' if no such operating should take place
:return str: the final name / file path of the work order file with all suffix
"""
allowed = {
"fetch": ["file", "solr"],
"typus": ["insert", "update"],
"method": ["sparql", "isql", "odbc", "none"]
}
if fetch not in allowed['fetch']:
print(f"Fetch method {fetch} not available, must be {allowed['fetch']}")
return False # or raise work order Exception?
if typus not in allowed['typus']:
print(f"Operation type {typus} unknown, must be {allowed['typus']}")
return False
if method not in allowed['method']:
print(f"Insert method '{method}' not available, must be {allowed['method']}")
return False
logger.info("Starting Process of creating a new work order")
if order_name == "":
order_name = "work_order"
work_order = {"meta":
{
"status": 0,
"fetch": fetch,
"type": typus,
"method": method,
},
"file_list": {}
}
work_order_filename = os.path.join(os.getcwd(),
f"{order_name}-{datetime.now().isoformat().replace(':', '-')}.json")
logger.info(f"attempting to write order file to {work_order_filename}")
try:
with open(work_order_filename, "w") as order_file:
json.dump(work_order, order_file, indent=4)
return work_order_filename
except OSError as e:
logger.info(f"Encountered OSError {e}")
return False
def FetchWorkOrderSolr(work_order_file: str,
solr_url: str,
query="*:*",
total_rows=50000,
chunk_size=10000,
spcht_object=None,
save_folder="./",
force=False,
**kwargs):
"""
Utilizes the solr api interface to download data in bulk, uses cursors to continue further into the data
:param str work_order_file: filename of a work order file
:param str solr_url: url to an apache solr endpoint, for example: http://<fqdn>/solr/biblio/select
:param str query: query for the solr '*:*' fetches everything
:param int total_rows: total amount of featcheable rows, if rows > available rows it will gracefully exit after expending the database
:param int chunk_size: size per chunk and file in entries
:param Spcht spcht_object: ready loaded Spcht object, optional, used to limit amount of fetched data
:param str save_folder: folder where temporary files are saved
:param bool force: if true, will ignore security checks like order status
:param kwargs:
:return: True if everything went well, False if something happened
:rtype: bool
"""
# ! some checks for the cli usage
if not os.path.exists(work_order_file):
print("Work order does not exists")
return False
if not isinstance(spcht_object, Spcht):
print("Provided Spcht Object is not a genuine Spcht Object")
return False
if not isinstance(total_rows, int) or not isinstance(chunk_size, int):
print("The *Number* of rows and chunk_size must be an integer number")
return False
n = math.floor(int(total_rows) / int(chunk_size)) + 1
# Work Order things:
work_order = load_from_json(work_order_file)
# ! Check meta status informations
if work_order['meta']['status'] > 1 and not force:
logging.error(
"Status of work order file is not 1, file is beyond first step and cannot be processed by Solr order")
return False
try:
if work_order['meta']['fetch'] != "solr":
logging.error("Provided work order does not use fetch method solr")
return False
work_order['meta']['max_chunks'] = n
work_order['meta']['chunk_size'] = chunk_size
work_order['meta']['total_rows'] = total_rows
work_order['meta']['spcht_user'] = spcht_object is not None
work_order['meta']['full_download'] = False
with open(work_order_file, "w") as order_file:
json.dump(work_order, order_file, indent=4)
except KeyError as key:
logging.error(f"Expected Key {key} is not around in the work order file")
print("Work Order error, aborting", file=sys.stderr)
parameters = {'q': query, 'rows': total_rows, 'wt': "json", "cursorMark": "*", "sort": "id asc"}
# you can specify a Spcht with loaded descriptor to filter field list
if isinstance(spcht_object, Spcht):
parameters['fl'] = ""
for each in spcht_object.get_node_fields():
parameters['fl'] += f"{each} "
parameters['fl'] = parameters['fl'][:-1]
logger.info(f"Using filtered field list: {parameters['fl']}")
if 'max_age' in kwargs: # this means you could technically run a normal insert with max_age
past_time = datetime.now() - timedelta(minutes=kwargs['max_age'])
logging.info(
f"maximum age parameter detected in solr fetch, limiting age of entries to everything younger than {past_time.strftime('%Y-%m-%dT%H:%M:%SZ')}")
searchtime = "last_indexed:[" + past_time.strftime("%Y-%m-%dT%H:%M:%SZ") + " TO *]"
parameters['q'] = f"{parameters['q']} {searchtime}"
base_path = os.path.join(os.getcwd(), save_folder)
start_time = time.time()
logger.info(f"Starting solrdump-like process - Time Zero: {start_time}")
logger.info(f"Solr Source is {solr_url}")
# logger.info(f"Solr query is {parameters['q']}")
logger.info(f"Calculated {n} chunks of a total of {total_rows} entries with a chunk size of {chunk_size}")
logger.info(f"Start Loading Remote chunks - {delta_now(start_time)}")
UpdateWorkOrder(work_order_file, insert=("meta", "solr_start", datetime.now().isoformat()))
try:
for i in range(0, n):
logger.info(f"Solr Download - New Chunk started: [{i + 1}/{n}] - {delta_now(start_time)} ms")
if i + 1 != n:
parameters['rows'] = chunk_size
else: # the rest in the last chunk
parameters['rows'] = int(int(total_rows) % int(chunk_size))
if i == 0: # only first run, no sense in clogging the log files with duplicated stuff
logger.info(f"\tUsing request URL: {solr_url}/{parameters}")
# ! call to solr for data
data = test_json(load_remote_content(solr_url, parameters))
if data is not None:
file_path = f"{os.path.basename(work_order_file)}_{hash(start_time)}_{i+1}-{n}.json"
filename = os.path.join(base_path, file_path)
try:
extracted_data = solr_handle_return(data)
except SpchtErrors.ParsingError as e:
logging.error(f"Error while parsing solr return: {e}")
return False
with open(filename, "w") as dumpfile:
json.dump(extracted_data, dumpfile)
file_spec = {"file": os.path.relpath(filename), "status": 2}
# ? to bring file status in line with order status, files start with 2, logically file_status 1 would be
# ? 'currently downloading' but this is a closed process so there will be never a partial file with
# ? status 1
UpdateWorkOrder(work_order_file, insert=("file_list", i, file_spec))
if data.get("nextCursorMark", "*") != "*" and data['nextCursorMark'] != parameters['cursorMark']:
parameters['cursorMark'] = data['nextCursorMark']
else:
logger.info(
f"{delta_now(start_time)}\tNo further CursorMark was received, therefore there are less results than expected rows. Aborting cycles")
break
else:
logger.info(f"Error in chunk {i + 1} of {n}, no actual data was received, aborting process")
return False
logger.info(f"Download finished, FullDownload successfull")
UpdateWorkOrder(work_order_file, update=("meta", "full_download", True),
insert=("meta", "solr_finish", datetime.now().isoformat()))
# except KeyboardInterrupt:
# print(f"Process was interrupted by user interaction")
# UpdateWorkOrder(work_order_file, insert=("meta", "completed_chunks", n))
# logger.info(f"Process was interrupted by user interaction")
# raise KeyboardInterrupt # necessary cause otherwise the process will be looped when used in main.py
except TypeError as e:
if e == "'NoneType' object is not subscriptable": # feels brittle
msg = "Could not properly load work order file"
fnc = "FetchWorkOrderSolr"
print(msg)
logging.error(f"{fnc} > {msg}")
return False
except OSError as e:
if e.errno == errno.ENOSPC: # ! i am quite sure that i could not even write log files in this case
logging.critical("Device Disc reached its limits")
print("Disc space full", file=sys.stderr)
exit(9)
else:
logger.info(f"Encountered OSError {e.errno}")
return False
print(f"Overall Solr fetch executiontime was {delta_now(start_time, 3)} seconds")
logger.info(f"Overall Executiontime was {delta_now(start_time, 3)} seconds")
return True
def FulfillProcessingOrder(work_order_file: str, subject: str, spcht_object: Spcht, force=False, **kwargs):
"""
Processes all raw data files specified in the work order file list
:param str work_order_file: filename of a work order file
:param str subject: a part of the subject without identifier in the <subject> <predicate> <object> chain
:param Spcht spcht_object: ready loaded Spcht object
:param bool force: if true, will ignore security checks like order status
:return: True if everything worked, False if something is not working
:rtype: boolean
"""
# * there was some mental discussion whether i use fulfill or fulfil (murican vs british), i opted for the american
# * way despite my education being british english because programming english is burger english
# ! checks cause this gets more or less directly called via cli
if not os.path.exists(work_order_file):
print("Work order does not exists")
return False
if not isinstance(subject, str):
print("Subject mst be a string")
return False
if not isinstance(spcht_object, Spcht):
print("Provided Spcht Object is not a genuine Spcht Object")
return False
if spcht_object.descriptor_file is None:
print("Spcht object must be succesfully loaded")
return False
try:
# when traversing a list/iterable we cannot change the iterable while doing so
# but for proper use i need to periodically check if something has changed, as the program
# does not change the number of keys or the keys itself this should work well enough, although
# i question my decision to actually use files of any kind as transaction log
work_order0 = load_from_json(work_order_file)
# ! work file specific parameter check
if work_order0['meta']['status'] < 1 and not force:
logging.error("Given order file is below status 0, probably lacks data anyway, cannot proceed")
return False
if work_order0['meta']['status'] > 3 and not force:
logging.error("Given order file is above status 3, is already fully processed, cannot proceed")
return False
work_order = work_order0
logger.info(
f"Starting processing on files of work order '{os.path.basename(work_order_file)}', detected {len(work_order['file_list'])} Files")
print(f"Start of Spcht Processing - {os.getpid()}")
_ = 0
for key in work_order0['file_list']:
_ += 1
if work_order['file_list'][key]['status'] == 2: # Status 2 - Downloaded, not processed
work_order = UpdateWorkOrder(work_order_file,
update=('file_list', key, 'status', 3),
insert=('file_list', key, 'processing_start', datetime.now().isoformat()))
mapping_data = load_from_json(work_order['file_list'][key]['file'])
quadros = []
elements = 0
for entry in mapping_data:
try:
quader = spcht_object.process_data(entry, subject)
elements += 1
quadros += quader
except SpchtErrors.MandatoryError:
logger.info(
f"Mandatory field was not found in entry {elements} of file {work_order['file_list'][key]['file']}")
logger.info(f"Finished file {_} of {len(work_order['file_list'])}, {len(quadros)} triples")
rdf_dump = f"{work_order['file_list'][key]['file'][:-4]}_rdf.ttl"
with open(rdf_dump, "w") as rdf_file:
rdf_file.write(process2RDF(quadros)) # ? avoiding circular imports
work_order = UpdateWorkOrder(work_order_file,
update=('file_list', key, 'status', 4),
insert=[('file_list', key, 'rdf_file', rdf_dump),
(
'file_list', key, 'processing_finish', datetime.now().isoformat()),
('file_list', key, 'elements', elements),
('file_list', key, 'triples', len(quadros))
])
logger.info(f"Finished processing {len(work_order['file_list'])} files and creating turtle files")
print(f"End of Spcht Processing - {os.getpid()}")
return True
except KeyError as key:
logger.critical(f"The supplied work order doesnt appear to have the needed data, '{key}' was missing")
return False
except TypeError as e:
if e == "'NoneType' object is not subscriptable": # feels brittle
msg = "Could not properly load work order file"
fnc = "FulFillProcessingOrder"
print(msg)
logging.error(f"{fnc} > {msg}")
return False
except Exception as e:
traceback.print_exc()
logger.error(f"Unknown type of exception: '{e}'")
return False
def IntermediateStepSparqlDelete(work_order_file: str, sparql_endpoint: str, user: str, password: str, named_graph: str,
force=False, **kwargs):
"""
Deletes all data that has the same subject as any data found in the processed turtle files, uses the sparql interfaced directly
:param str work_order_file: file path to a work order file
:param str sparql_endpoint: endpoint for an authenticated sparql interface, the one of virtuoso is /sparql-auth
:param str user: user for the sparql interface
:param str password: plain text password for the sparql interface, deleting things is an elevated process
:param str named_graph: fourth part of the triple where the data resides and will be removed
:param bool force: if true, will ignore security checks like status
:param kwargs: additional parameters that all will be ignored but allow the function of the work order principle
:return: True if everything went well and False if something happened
:rtype: bool
"""
# f"WITH <named_graph> DELETE { <subject> ?p ?o } WHERE { <subject> ?p ?o }
try:
work_order0 = load_from_json(work_order_file)
if work_order0['meta']['status'] != 5 and not force:
logging.error("Order has to be on status 5 when using IntermediateStepSparqlDelete")
return False
if work_order0['meta']['type'] != "update":
logging.error(
f"Insert type must be 'update' for IntermediateStepSparqlDelete, but is '{work_order0['meta']['type']}'")
return False
# raise SpchtErrors.WorkOrderError(f"Insert type must be 'update' for IntermediateStepSparqlDelete, but is '{work_order0['meta']['type']}'")
work_order = work_order0
for key in work_order0['file_list']:
if work_order['file_list'][key]['status'] == 4:
logging.info(
f"Deleting old entries that match new entries of {work_order['file_list'][key]['rdf_file']}")
work_order = UpdateWorkOrder(work_order_file,
update=('file_list', key, 'status', 5),
insert=('file_list', key, 'deletion_start', datetime.now().isoformat()))
f_path = work_order['file_list'][key]['rdf_file']
that_graph = rdflib.Graph()
that_graph.parse(f_path, format="turtle")
for evelyn in that_graph.subjects(): # the every word plays continue
triples = f"<{evelyn}> ?p ?o. "
query = f"WITH <{named_graph}> DELETE {{ {triples} }} WHERE {{ {triples} }}"
# * this poses as a major bottleneck as the separate http requests take most of the time for this
# * process, i looked into it and apparently there is no easy way to delete a lot of lines with
# * sparql cause its technically a read-only language and this whole update/delete shebang seems
# * to be an afterthought, you could chain where clauses but that apparent processing time for
# * that scales with U^x which seems to be not very desirable
status, discard = sparqlQuery(query,
sparql_endpoint,
auth=user,
pwd=password,
named_graph=named_graph)
if not status:
return False
work_order = UpdateWorkOrder(work_order_file, update=('file_list', key, 'status', 6),
insert=('file_list', key, 'deletion_finish', datetime.now().isoformat()))
return True
# ? boilerplate code from sparql insert
except KeyError as foreign_key:
logger.critical(f"Missing key in work order: '{foreign_key}'")
return False
except FileNotFoundError as file:
logger.critical(f"Cannot find file {file}")
return False
except TypeError as e:
if e == "'NoneType' object is not subscriptable": # feels brittle
msg = "Could not properly load work order file"
fnc = "IntermediateSparqlDelete"
print(msg)
logging.error(f"{fnc} > {msg}")
return False
def IntermediateStepISQLDelete(work_order_file: str, isql_path: str, user: str, password: str, named_graph: str,
isql_port=1111, force=False, **kwargs):
"""
Deletes all data that has the same subject as any data found in the processed turtle files. Actually uses sparql
to fulfill its function but uses those via the isql interface of virtuoso, needs appropriate rights.
:param str work_order_file: file path to a work order file
:param str isql_path: path to the isql interface of virtuoso
:param str user: user for the isql interface
:param str password: plain text password for the isql interface, deleting things is an elevated process
:param str named_graph: fourth part of the triple where the data resides and will be removed
:param int isql_port: port on which the database server for the isql
:param bool force: if true, will ignore security checks like status
:param kwargs: additional parameters that all will be ignored but allow the function of the work order principle
:return: True if everything went well and False if something happened
:rtype: bool
"""
try:
work_order0 = load_from_json(work_order_file)
if work_order0['meta']['status'] != 5 and not force:
logging.error("Order has to be on status 5 when using IntermediateStepSparqlDelete")
return False
if work_order0['meta']['type'] != "update":
logging.error(f"Insert type must be 'update' for IntermediateStepSparqlDelete, but is '{work_order0['meta']['type']}'")
return False
work_order = work_order0
for key in work_order0['file_list']:
if work_order['file_list'][key]['status'] == 4:
logging.info(f"Deleting old entries that match new entries of {work_order['file_list'][key]['rdf_file']}")
work_order = UpdateWorkOrder(work_order_file,
update=('file_list', key, 'status', 5),
insert=('file_list', key, 'deletion_start', datetime.now().isoformat()))
f_path = work_order['file_list'][key]['rdf_file']
that_graph = rdflib.Graph()
that_graph.parse(f_path, format="turtle")
for evelyn in that_graph.subjects(): # the every word plays continue
triples = f"<{evelyn}> ?p ?o. "
query = f"WITH <{named_graph}> DELETE WHERE {{ {triples} }}"
# ? when using this you can actually delete without specifying after the DELETE clause, weird
subprocess.run([isql_path, str(isql_port), user, password, "VERBOSE=OFF", f"EXEC=sparql {query};"], capture_output=True, check=True)
# ? i dont capture any output for subprocess.run cause all that i am interested in is exit code
# ? non-zero which will be captures by the thrown exception
work_order = UpdateWorkOrder(work_order_file, update=('file_list', key, 'status', 6),
insert=('file_list', key, 'deletion_finish', datetime.now().isoformat()))
return True
# ? boilerplate code from sparql insert
except KeyError as foreign_key:
logger.critical(f"Missing key in work order: '{foreign_key}'")
return False
except FileNotFoundError as file:
logger.critical(f"Cannot find file {file}")
return False
except subprocess.CalledProcessError as e:
logger.error(f"Error while running isql interface, exited with non-zero exit-code {e.returncode}\n"
f"Message from the program: {e.stderr.decode('ascii').strip()}")
return False
except TypeError as e:
if e == "'NoneType' object is not subscriptable": # feels brittle
msg = "Could not properly load work order file"
fnc = "IntermediateStepISQLDelete"
print(msg)
logger.critical(f"{fnc} > {msg}")
return False
def FulfillSparqlInsertOrder(work_order_file: str,
sparql_endpoint: str,
user: str,
password: str,
named_graph: str,
force=False,
**kwargs):
"""
Inserts read data from the processed turtle files in the triplestore, this should work regardless of what kind of
triplestore you are utilising.
:param str work_order_file: file path to a work order file
:param str sparql_endpoint: endpoint for an authenticated sparql interface, the one of virtuoso is /sparql-auth
:param str user: user for the sparql interface
:param str password: plain text password for the sparql interface, inserting things is an elevated process
:param str named_graph: fourth part of the triple where the data resides and will be removed
:param bool force: if true, will ignore security checks like status
:param kwargs: arbitary, additional parameters that all will be ignored
:return: True if everything went well and False if something happened
:rtype: bool
"""
# WITH GRAPH_IRI INSERT { bla } WHERE {};
SPARQL_CHUNK = 50
try:
work_order0 = load_from_json(work_order_file)
if work_order0['meta']['status'] < 4 and not force:
logger.error("Order hast a status below 4 and might be not fully procssed or fetch, aborting")
return False
if work_order0['meta']['status'] > 8 and not force:
logger.error("This work orders status indicates that its already done, aborting.")
return False
if work_order0['meta']['method'] != "sparql":
raise SpchtErrors.WorkOrderError(
f"Method in work order file is {work_order0['meta']['method']} but must be 'sparql' for this method")
work_order = work_order0
for key in work_order0['file_list']:
if work_order['file_list'][key]['status'] == 4 or work_order['file_list'][key]['status'] == 6:
work_order = UpdateWorkOrder(work_order_file,
update=('file_list', key, 'status', 7),
insert=('file_list', key, 'insert_start', datetime.now().isoformat()))
f_path = work_order['file_list'][key]['rdf_file']
this_graph = rdflib.Graph()
this_graph.parse(f_path, format="turtle")
triples = ""
rounds = 0
for sub, pred, obj in this_graph:
rounds += 1
if isinstance(obj, rdflib.term.URIRef):
triples += f"<{sub.toPython()}> <{pred.toPython()}> <{obj.toPython()}> . \n"
else:
if obj.language:
annotation = "@" + obj.language
elif obj.datatype:
annotation = "^^" + obj.datatype
else:
annotation = ""
triples += f"<{sub.toPython()}> <{pred.toPython()}> \"{obj.toPython()}\"{annotation} . \n"
# ! TODO: can optimize here, grouped queries
if rounds > SPARQL_CHUNK:
query = f"""WITH <{named_graph}> INSERT {{ {triples} }}"""
# * i have the sneaking suspicion that i defined the named graph twice
status, discard = sparqlQuery(query,
sparql_endpoint,
auth=user,
pwd=password,
named_graph=named_graph)
if not status:
return False
triples = ""
rounds = 0
# END OF FOR LOOP
if rounds > 0 and triples != "":
query = f"""WITH <{named_graph}> INSERT {{ {triples}}}"""
status, discard = sparqlQuery(query,
sparql_endpoint,
auth=user,
pwd=password,
named_graph=named_graph)
if not status:
return False
work_order = UpdateWorkOrder(work_order_file, update=('file_list', key, 'status', 8),
insert=('file_list', key, 'insert_finish', datetime.now().isoformat()))
return True
except KeyError as foreign_key:
logger.critical(f"Missing key in work order: '{foreign_key}'")
return False
except FileNotFoundError as file:
logger.critical(f"Cannot find file {file}")
return False
except TypeError as e:
if e == "'NoneType' object is not subscriptable": # feels brittle
msg = "Could not properly load work order file"
fnc = "FulFillSparqlInsertOrder"
print(msg)
logger.critical(f"{fnc} > {msg}")
return False
except xml.parsers.expat.ExpatError as e:
logger.error(f"Parsing of triple file failed: {e}")
return False
def FulfillISqlInsertOrder(work_order_file: str,
isql_path: str,
user: str,
password: str,
named_graph: str,
isql_port=1111,
virt_folder="/tmp/",
force=False,
**kwargs): # ! random kwarg is so i can enter more stuff than necessary that can be ignored
"""
This utilizes the virtuoso bulk loader enginer to insert the previously processed data into the
virtuoso triplestore. For that it copies the files with the triples into a folder that virtuoso
accepts for this kind of input, those folders are usually defined in the virtuoso.ini. it then
manually calls the isql interface to put the file into the bulk loader scheduler, and, if done
so deleting the copied file. For now the script has no real way of knowing if the operation actually
succeeds. Only the execution time might be a hint, but that might vary depending on system load
and overall resources.
:param str work_order_file: filename of the work order that is to be fulfilled, gets overwritten often
:param dict work_order: initial work order loaded from file
:param str isql_path: path to the virtuoso isql-v/isql executable
:param str user: name of a virtuoso user with enough rights to insert
:param str password: clear text password of the user from above
:param str named_graph: named graph the data is to be inserted into
:param int isql_port: port of the virtuoso sql database, usually 1111
:param str virt_folder: folder that virtuoso accepts as input for files, must have write
:param bool force: if true, will ignore security checks like status
:return: True if everything went "great"
:rtype: Bool
"""
try:
work_order0 = load_from_json(work_order_file)
if work_order0 is None:
return False
if work_order0['meta']['status'] < 4 and not force:
logger.error("Order hast a status below 4 and might be not fully procssed or fetch, aborting")
return False
if work_order0['meta']['status'] > 8 and not force:
logger.error("This work orders status indicates that its already done, aborting.")
return False
work_order = work_order0
for key in work_order0['file_list']:
if work_order['file_list'][key]['status'] == 4 or work_order['file_list'][key]['status'] == 6:
work_order = UpdateWorkOrder(work_order_file,
update=('file_list', key, 'status', 7),
insert=('file_list', key, 'insert_start', datetime.now().isoformat()))
f_path = work_order['file_list'][key]['rdf_file']
f_path = shutil.copy(f_path, virt_folder)
command = f"EXEC=ld_add('{f_path}', '{named_graph}');"
zero_time = time.time()
subprocess.run([isql_path, str(isql_port), user, password, "VERBOSE=OFF", command,
"EXEC=rdf_loader_run();","EXEC=checkpoint;"],
capture_output=True, check=True)
# ? see IntermediateISQLDelete for decision process about this
logger.debug(f"Executed ld_add command via isql, execution time was {delta_now(zero_time)}")
# ? apparently i cannot really tell if the isql stuff actually works
if os.path.exists(f_path):
os.remove(f_path)
# reloading work order in case something has changed since then
work_order = UpdateWorkOrder(work_order_file, update=('file_list', key, 'status', 8),
insert=('file_list', key, 'insert_finish', datetime.now().isoformat()))
logger.info(f"Successfully called {len(work_order['file_list'])} times the bulk loader")
return True
except KeyError as foreign_key:
logger.critical(f"Missing key in work order: '{foreign_key}'")
return False
except PermissionError as folder:
logger.critical(f"Cannot access folder {folder} to copy turtle into.")
return False
except TypeError as e:
if e == "'NoneType' object is not subscriptable": # feels brittle
msg = "Could not properly load work order file"
fnc = "FulFillSqlInsertOrder"
print(msg)
logger.critical(f"{fnc} > {msg}")
return False
except subprocess.CalledProcessError as e:
logger.error(f"Error while running isql interface, exited with non-zero exit-code {e.returncode}\n"
f"Message from the program: {e.stderr.decode('ascii').strip()}")
return False
except FileNotFoundError as file:
logger.critical(f"Cannot find file {file}")
return False
def CleanUpWorkOrder(work_order_filename: str, force=False, files=('file', 'rdf_file'), **kwargs):
"""
Removes all files referenced in the work order file from the filesystem if the processing state
necessary is reached
:param str work_order_filename: file path to a work order file
:param bool force: If set to true disregards meta status checks and deletes everything it touches
:param files: dictionary keys in the 'file_list' part that get deleted
:return: True if everything went smoothly, False if not.
"""
if force:
logger.info("Force mode detected, will delete everything regardless of status")
try:
work_order_0 = load_from_json(work_order_filename)
# ? Work Order Status 8 - Inserting done, this achieves Step 9 "Clean up done"
if work_order_0['meta']['status'] < 7 and not force:
logger.error("Status of order indicates fulfillment, aborting.")
return False
if work_order_0['meta']['status'] > 8 and not force:
logger.error("Status of order indicates fulfillment, aborting.")
return False
_ = 0
for key in work_order_0['file_list']:
# * for each entry in the part list
if work_order_0['file_list'][key]['status'] == 8 or force:
wo_update = [] # i could just call update work order twice
for fileattr in files:
# * slightly complicated method to allow for more files (for whatever reason)
one_file = work_order_0['file_list'][key].get(fileattr)
if one_file:
try:
os.remove(one_file)
wo_update.append(('file_list', key, fileattr))
except OSError as e:
if e.errno == errno.ENOENT:
logger.info(
f"Removing of '{fileattr}' failed cause the referenced file '{one_file}' ALREADY GONE")
elif e.errno == errno.EPERM or e.errno == errno.EACCES:
logger.info(
f"Removing of '{fileattr}' failed cause ACCESS to the referenced file '{one_file}' is not possible")
elif e.errno == errno.EISDIR:
logger.info(
f"Removing of '{fileattr}' failed cause the referenced file '{one_file}' is a DIRECTORY")
else:
logger.error(
f"Generic, unexpected error while deleting '{fileattr}', filename '{one_file}'")
if len(wo_update) > 0:
UpdateWorkOrder(work_order_filename, delete=wo_update, update=('file_list', key, 'status', 9))
else:
logger.error(
f"On Cleaup of {work_order_filename}:{key} nothing could be deleted, status remained on 8")
# funny, in the end i do nothing with the work order cause the action was to handle the work order, not
# doing things. Twisted sense of humour
return True
# ? it might be an idea to actually update the meta data status as well but i did in all the other functions so
# ? that that value is explicitly handled by another function
except KeyError as key:
print(f"Key missing {key}")
logger.critical(f"Missing key in work order: '{key}'")
return False
except TypeError as e:
if e == "'NoneType' object is not subscriptable": # feels brittle
msg = "Could not properly load work order file"
fnc = "CleanUpWorkOrder"
print(msg)
logger.critical(f"{fnc} > {msg}")
return False
def HardResetWorkOrder(work_order_file: str, **kwargs):
"""
Resets the work order to the last stable status according to the meta>status position, deletes files and entries
relative to that. Like deleting processed files and the timings of that processing
:param str work_order_file: file path to a work order file
:param kwargs:
:return: True if everything was successful
:rtype: bool
"""
work_order = load_from_json(work_order_file)
try:
status = work_order['meta']['status'] # prevents me from writing this a thousand time over..and its cleaner
if status == 1: # downloads are basically unrecoverable cause we dont know how much is missing
CleanUpWorkOrder(work_order_file, force=True, files=('rdf_file', 'file'))
UpdateWorkOrder(work_order_file,
update=[('file_list', {}), ('meta', 'status', 0)],
delete=[('meta', 'solr_start'), ('meta', 'solr_finish'), ('meta', 'full_download'),
('meta', 'spcht_user')],
force=True) # sets to empty list
# what i dont like is that i have like X+1 file operations but in the grand scheme of things it probably
# doesnt matter. There is some thinking of just doing all this in an sqlite database
return True
if status == 3: # processing started
CleanUpWorkOrder(work_order_file, force=True, files=('rdf_file'))
UpdateWorkOrder(work_order_file, update=('meta', 'status', 2))
fields = ('processing_start', 'processing_finish', 'elements', 'triples')
elif status == 5: # post-processing started
UpdateWorkOrder(work_order_file, update=('meta', 'status', 4))
fields = ('deletion_start', 'deletion_finish')
elif status == 7: # inserting started
UpdateWorkOrder(work_order_file, update=('meta', 'status', 6))
fields = ('insert_start', 'insert_finish')
else:
print("No resetable status, this defaults to a success")
return True
# * generic field purge
if status == 3 or status == 5 or status == 7:
work_order0 = load_from_json(work_order_file) # reload after deleting things
work_order = work_order0.copy()
for each in work_order['file_list']:
for that_field in fields:
work_order0['field_list'][each].pop(that_field, None)
with open(work_order_file, "w") as open_file:
json.dump(work_order, open_file, indent=4)
return True
except KeyError as key:
print(f"Key missing {key}")
logger.critical(f"Missing key in work order: '{key}'")
return False
except TypeError as e:
if e == "'NoneType' object is not subscriptable": # feels brittle
msg = "Could not properly load work order file"
fnc = "HardResetWorkOrder"
print(msg)
logger.critical(f"{fnc} > {msg}")
else:
print(f"Generic TypeError: {e}")
return False
except OSError:
print(f"Generic OSError while reseting work order file")
return False
def SoftResetWorkOrder(work_order_file: str, **kwargs):
"""
Instead of reseting to the "big" status like HardResetWorkOrder this only goes through filelist and resets to the
previous file wise state. This function is receipe for disaster if some other process is still working on the file
this is probably the point where i should have utilized a file lock.
the individual files
:param str work_order_file: file path to a work order file
:type work_order_file:
:param kwargs:
:return: True if everything went alright
:rtype: bool
"""
# ? i thought about how fine this function should be, what if somewhen in the future some weirdo fucks up a work
# ? order that big time that there are like all status that are possible in one single file? But actually, that
# ? should not happen under normal circumstances, you only advance if all sub-status are done for that meta
# ? status, so why worry about something that someone probably jerry-rigged to death anyway, that person can write
# ? their own function to fix those cases. This only fixes according to meta status and that's it
# * as a side note: 'Jury-rigged' assembled quickly with the materials at hand
# * 'Jerry-built' = cheaply or poorly built, comes from nautical term 'jury' = 'makeshift', 'temporary'
# * a not-one amount of status is status, statuses would be also correct
# english is fascinating me every waking hour, such an inconsistent language
work_order = load_from_json(work_order_file)
try:
status = work_order['meta']['status']
list_of_updates = []
list_of_deletes = []
if status == 1: # fetch state cannot be repaired in a meaningful way:
return HardResetWorkOrder(work_order_file,
**kwargs) # ? i dont know why i even bother to push the kwargs with it
elif status == 3: # processing started, resets unfinished processes
for each in work_order['file_list']:
if work_order['file_list'][each]['status'] == 3:
# of all the deletes only processing_start makes sense, but i skirt around some freak case here
# so i rather do it properly, the time lost _should_ never matter for 3 dict operations
list_of_deletes.append(('file_list', each, 'processing_start'))
list_of_deletes.append(('file_list', each,
'processing_finish')) # why should it be finished but status 3? anyway, away with that
list_of_deletes.append(('file_list', each, 'elements'))
list_of_deletes.append(('file_list', each, 'triples'))
list_of_updates.append(('file_list', each, 'status', 2))
elif status == 5: # intermediate, post-processing was started but not finished
for each in work_order['file_list']:
if work_order['file_list'][each]['status'] == 5:
list_of_deletes.append(('file_list', each, 'deletion_start'))
list_of_deletes.append(('file_list', each, 'deletion_finish'))
list_of_updates.append(('file_list', each, 'status', 4))
elif status == 7:
for each in work_order['file_list']:
if work_order['file_list'][each]['status'] == 7:
list_of_deletes.append(('file_list', each, 'insert_start'))
list_of_deletes.append(('file_list', each, 'insert_finish'))
list_of_updates.append(('file_list', each, 'status', 6))
else:
logger.info("Cannot soft reset anything.")
return True
UpdateWorkOrder(work_order_file, update=list_of_updates, delete=list_of_deletes, force=True)
return True
except TypeError as e:
if e == "'NoneType' object is not subscriptable": # feels brittle
msg = "Could not properly load work order file"
fnc = "SoftResetWorkOrder"
print(msg)
logger.critical(f"{fnc} > {msg}")
else:
logger.error(f"SoftResetWorkOrder: Generic TypeError occured {e}")
print(f"Generic TypeError: {e}")
return False
except Exception as e:
logger.critical(f"unexpected, uncaught exception happend, {e.__class__.__name__}: '{e}'")
print(e) # this text lies, technically it was of course caught, otherwise there would be no log of it
return
def PurgeWorkOrder(work_order_file: str, **kwargs):
"""
Simply resets an existing work order to status 0 by rewriting it
:param str work_order_file: file path to a work order file
:param kwargs: varios parameters taht all get ignored. Just there for compatiblity reasons
:return: True if file writing succeeded
:rtype: Bool
"""
old_work_order = load_from_json(work_order_file)
try:
meta = old_work_order['meta']
return CreateWorkOrder(work_order_file, meta['fetch'], meta['type'], meta['method'])
except KeyError as key:
print(f"Key missing {key}")
return False
except TypeError as e:
if e == "'NoneType' object is not subscriptable": # feels brittle
msg = "Could not properly load work order file"
fnc = "PureWorkOrder"
print(msg)
logger.critical(f"{fnc} > {msg}")
return False
```
#### File: efre-lod-elasticsearch-tools/Spcht/folio2triplestore.py
```python
import logging
import sys
import re
import copy
import os
import argparse
import json
from datetime import datetime, timedelta
import traceback
# import internal modules
from Spcht.Core import SpchtUtility
from Spcht.Core import WorkOrder
from Spcht.Core.SpchtCore import Spcht, SpchtTriple, SpchtThird
from Spcht.Utils.local_tools import sizeof_fmt
from Spcht.foliotools.foliotools import part1_folio_workings, grab, create_single_location, check_location_changes, \
check_opening_changes, create_location_node, sparql_delete_node_plus1
import Spcht.foliotools.folio2triplestore_config as secret
logging.basicConfig(filename=secret.log_file, format='[%(asctime)s] %(levelname)s:%(message)s', level=logging.INFO)
#logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
append = "?limit=1000"
__version__ = 0.7
def crawl_location(location_hashes, opening_hashes, location_objects, opening_objects):
global append
locations = part1_folio_workings(secret.endpoints['locations'], "location", append)
found_locations = {}
for each in locations['locations']:
if re.search(secret.name, each['code']):
found_locations[each['id']] = copy.deepcopy(each)
new_locations = {}
for key, each in found_locations.items():
if key not in location_hashes:
one_loc, loc_hash, open_hash = create_single_location(each)
location_hashes.update({key: loc_hash})
opening_hashes.update(open_hash)
new_locations[key] = one_loc
if not new_locations:
return []
logging.info(f"Found {len(new_locations)} new locations")
triples, anti_triple, anti_opening = part3_spcht_workings(new_locations, secret.folio_spcht,
secret.anti_folio_spcht,
secret.anti_opening_spcht)
opening_objects.update({k: v[0] for k, v in anti_opening.items()})
location_objects.update(anti_triple)
if part4_work_order(triples):
return [location for location in new_locations.keys()]
else:
return None
def location_update(location_hashes, opening_hashes, location_objects, opening_objects):
changed = check_location_changes(location_hashes)
if not changed:
logging.info("Check completed without any found changes, hibernating...")
return []
changedLocs = {k: v['location'] for k, v in changed.items() if 'location' in v}
location_hashes.update({k: v['location_hash'] for k, v in changed.items() if 'location_hash' in v})
for dic in changed.values():
if 'opening_hash' in dic:
opening_hashes.update(dic['opening_hash'])
# * opening_hashes.update({dic['opening_hash'] for dic in changed.values()})
# ? double dictionary comprehension, the way 'create_node' works is that it has to transport the id of the
# ? opening hour somehow, this it does by nesting the key one layer deeper, so that the result of 'create_one_node'
# ? that is used in location changes gives us {location}, str_hash, {uuid_str: str_hash}
# ? to get the actual opening hour uuid we therefore have to go two layers deep, in this case there should always
# ? be only one key for opening_hour hashes but this method would even work with more, no clue how 'expensive'
# ? this is but it should not matter a lot
for hash_key in changed:
for node in location_objects[hash_key]:
sparql_delete_node_plus1(secret.named_graph, node, secret.sparql_url, secret.triple_user, secret.triple_password)
sparql_delete_node_plus1(secret.named_graph, "?s", secret.sparql_url, secret.triple_user, secret.triple_password, sobject=node)
if not changed[hash_key]: #delete disappeard entries
del location_objects[hash_key]
del location_hashes[hash_key]
triples, anti_triple, anti_opening = part3_spcht_workings(changedLocs, secret.folio_spcht, secret.anti_folio_spcht, secret.anti_opening_spcht)
opening_objects.update({k: v[0] for k, v in anti_opening.items()})
location_objects.update(anti_triple)
if part4_work_order(triples):
return [hash_key for hash_key in changedLocs.keys()]
else:
return None
def opening_update(opening_hashes: dict, opening_object: dict):
changed = check_opening_changes(opening_hashes)
if not changed:
logging.info("Check completed without any found changes, hibernating...")
return {}
# delete old entries, create anew
changedOpenings = {k: v['hours'] for k, v in changed.items()}
heron = Spcht(secret.delta_opening_spcht)
all_triples = []
for key, value in changedOpenings.items():
triples = heron.process_data(value, "https://dUckGoOse")
other_triples = []
for third in triples:
if re.match(r"^https://dUckGoOse", third.subject.content):
continue
other_triples.append(
SpchtTriple(
SpchtThird(opening_object[key][:-1][1:], uri=True),
SpchtThird(secret.openingRDF, uri=True),
third.subject
)
)
all_triples.append(third)
all_triples += other_triples
opening_hashes.update({k: v['hash'] for k, v in changed.items()})
# ! discard processing
for key in changed.keys():
sobject = opening_object[key]
status, discard = sparql_delete_node_plus1(secret.named_graph,
sobject,
secret.sparql_url,
secret.triple_user,
secret.triple_password,
"<https://schema.org/openingHoursSpecification>"
)
if part4_work_order(all_triples):
return [key for key in changed.keys()]
else:
return None # failed inserts
def part3_spcht_workings(extracted_dicts: dict, main_spcht: str, anti_spcht=None, anti_spcht2=None):
# * this can definitely be called janky as heck
duck = Spcht(main_spcht)
duck.name = "Duck"
goose = None
swane = None
if anti_spcht:
goose = Spcht(anti_spcht)
goose.name = "Goose"
if anti_spcht2:
swane = Spcht(anti_spcht2)
swane.name = "Swane"
triples = []
anti_triples = {}
anti_triples2 = {}
for key, each_entry in extracted_dicts.items():
triples += duck.process_data(each_entry, secret.subject)
if goose:
anti_triples[key] = SpchtTriple.extract_subjects(goose.process_data(each_entry, "https://x.y"))
if swane:
anti_triples2[each_entry['loc_main_service_id']] = SpchtTriple.extract_subjects(swane.process_data(each_entry, "https://z.a"))
return triples, anti_triples, anti_triples2
def part4_work_order(triples: list):
with open(secret.turtle_file, "w") as rdf_file:
rdf_file.write(SpchtUtility.process2RDF(triples))
work_order = {
"meta": {
"status": 4,
"fetch": "local",
"type": "insert",
"method": secret.processing,
"full_download": True
},
"file_list": {
"0": {
"rdf_file": secret.turtle_file,
"status": 4
}
}
}
# TODO: we have here a usecase for workorder fileIO, like not writing a file at all would be useful wouldnt it?
with open(secret.workorder_file, "w") as work_order_file:
json.dump(work_order, work_order_file)
res = WorkOrder.FulfillSparqlInsertOrder(secret.workorder_file, secret.sparql_url, secret.triple_user,
secret.triple_password, secret.named_graph)
logging.info(f"WorkOrder Fullfilment, now status: {res}")
return res
def full_update():
# create new main_file
# ? general structure:
init_now = datetime.now().isoformat()
main_file = {
"meta": {
"last_opening": init_now,
"last_location": init_now,
"last_crawl": init_now,
"last_call": init_now,
"log_file": secret.log_file,
"first_call": init_now,
"counter": 0,
"avg_cal_intervall": ""
},
"hashes": {
"location": {},
"opening": {}
},
"triples": {
"location": {},
"opening": {}
}
}
# ? end of structure
# ! part 1 - download of raw data
raw_info = {}
for key, endpoint in secret.endpoints.items():
temp_data = part1_folio_workings(endpoint, key, append)
if temp_data:
raw_info.update(temp_data)
# ! part 2 - packing data
if raw_info:
extracted_dicts = {}
for each in raw_info['locations']:
if re.search(secret.name, each['code']):
inst = grab(raw_info['locinsts'], "id", each['institutionId'])
lib = grab(raw_info['loclibs'], "id", each['libraryId'])
one_node, location_hash, opening_hash = create_location_node(each, inst, lib)
extracted_dicts[each['id']] = one_node
main_file['hashes']['location'][each['id']] = location_hash
main_file['hashes']['opening'].update(opening_hash)
else:
logging.warning("No data to work on")
print("Loading failed, cannot create what is needed")
exit(0)
# ! part 3 - SpchtWorkings
triples, anti_triple, anti_opening = part3_spcht_workings(extracted_dicts, secret.folio_spcht, secret.anti_folio_spcht, secret.anti_opening_spcht)
main_file['triples']['location'] = anti_triple
main_file['triples']['opening'] = {k: v[0] for k, v in anti_opening.items()}
part4_work_order(triples)
with open(secret.main_file, "w") as big_file:
json.dump(main_file, big_file, indent=3)
if __name__ == "__main__":
# * Argparse Init
parser = argparse.ArgumentParser(
description="Folio2Triplestore Tool - Converts Folio Data into triples",
usage="folio2triplestore.py [--info][--opening][--location][--crawl]",
epilog="All Settings are to be done in './foliotools/folio2triplestore_config.py'",
prefix_chars="-")
parser.add_argument("-i", "--info", action="store_true", help="Shows info about the current file if it exists")
parser.add_argument("-c", "--crawl", action="store_true", help="Crawls for new locations regardless of time since last crawl")
parser.add_argument("-l", "--location", action="store_true", help="Checks all known location for updates, ignores cooldown")
parser.add_argument("-o", "--opening", action="store_true", help="Checks all opening hours for changes, ignores cooldown")
args = parser.parse_args()
# This seems like the most inelegant way to trigger the processes by multiple, exclusive conditions
do_crawl = False
do_location = False
do_opening = False
no_arguments = False
if len(sys.argv) == 1:
no_arguments = True
if args.info:
pass
if args.crawl:
do_crawl = True
if args.location:
do_location = True
if args.opening:
do_opening = True
try:
with open(secret.main_file, "r") as big_file:
try:
main_file = json.load(big_file)
main_file_bck = copy.deepcopy(main_file)
except json.JSONDecodeError:
logging.error("'big_file' could not be read, apparently json interpreting failed. Start anew?")
exit(1)
ahuit = datetime.now()
insert_failure = False
time_switch = {
'opening': datetime.fromisoformat(main_file['meta']['last_opening']),
'location': datetime.fromisoformat(main_file['meta']['last_location']),
'crawl': datetime.fromisoformat(main_file['meta']['last_crawl']),
'last_call': datetime.fromisoformat(main_file['meta']['last_call'])
}
# * Time Switch
if no_arguments: # when no argument are given use the normal timer events to trigger
if (ahuit - time_switch['crawl']).total_seconds() > secret.interval_all:
do_crawl = True
if (ahuit - time_switch['opening']).total_seconds() > secret.interval_opening:
do_opening = True
if (ahuit - time_switch['location']).total_seconds() > secret.interval_location:
do_location = True
if args.info:
print(f"Folio2Triplestore Tool Version {__version__}")
print(f" Locations: {len(main_file['hashes']['location'])}")
print(f" Last call: {main_file['meta']['last_call']}")
print(f" Total calls: {main_file['meta']['counter']}")
print(f" Avg. time btw. calls: {main_file['meta']['avg_cal_intervall_human']}")
print(f" Log file size: {sizeof_fmt(os.stat(secret.log_file).st_size)}")
if len(sys.argv) == 2 and args.info:
exit(0) # no changes written if only info was called
if do_crawl:
logging.info(f"Crawling for Locations triggered - now: '{ahuit.isoformat()}', last call: '{main_file['meta']['last_crawl']}'")
main_file['meta']['last_crawl'] = ahuit.isoformat()
crawl_return = crawl_location(main_file['hashes']['location'],
main_file['hashes']['opening'],
main_file['triples']['location'],
main_file['triples']['opening'])
if crawl_return:
logging.info("New Locations inserted:" + str(crawl_return))
elif crawl_return is None:
insert_failure = True
if do_location:
logging.info(f"Location update triggered - now: '{ahuit.isoformat()}', last call: '{main_file['meta']['last_location']}'")
main_file['meta']['last_location'] = ahuit.isoformat()
update_return = location_update(main_file['hashes']['location'],
main_file['hashes']['opening'],
main_file['triples']['location'],
main_file['triples']['opening'] )
if update_return:
logging.info("Updated locations:" + str(update_return))
elif update_return is None:
insert_failure = True
if do_opening:
logging.info(f"Opening update triggered - now: '{ahuit.isoformat()}', last call: '{main_file['meta']['last_opening']}'")
main_file['meta']['last_opening'] = ahuit.isoformat()
update_return = opening_update(main_file['hashes']['opening'], main_file['triples']['opening'])
if update_return:
logging.info("Updated opening hours")
main_file['hashes']['opening'] = update_return
if insert_failure:
# ? due my stupidity the update/crawl functions update referenced dics, the most easy solution for me is to
# ? just replace the change with the former state
main_file = copy.deepcopy(main_file_bck)
try: # this is a try block cause i fear it might fail for stupid reasons and i dont want the entire procedure
# crash because of that screwing around with deltatimes
if main_file['meta']['avg_cal_intervall']:
old_delta = timedelta(seconds=int(main_file['meta']['avg_cal_intervall']))
relative_delta = ahuit - time_switch['last_call']
# nd = new_delta # it was just to long to write 4 times in one line
nd = (old_delta + relative_delta) / 2 # ! hail to datetime library for doing the heavy lifting
else:
nd = ahuit - time_switch['last_call']
main_file['meta']['avg_cal_intervall_human'] = \
f"{str(nd.days):0>3}d-{str(nd.seconds//3600):0>2}h:{str((nd.seconds//60)%60):0>2}m:{str((nd.seconds%60)%60):0>2}s"
main_file['meta']['avg_cal_intervall'] = (nd.days * 60 * 60 * 24) + nd.seconds
except Exception as e:
logging.debug(f"Updating of average call intervall failed somehow: {e.__class__.__name__}: {e}")
traceback.print_exc()
logging.debug(f"Call finished, last call was: {main_file['meta']['last_call']}, average time between calls is now: {main_file['meta']['avg_cal_intervall_human']}")
print("Call to folio2triplestore finished, times updated") # print so at least something shows up in the console if manually called
main_file['meta']['last_call'] = ahuit.isoformat()
main_file['meta']['counter'] += 1
with open(secret.main_file, "w") as big_file:
json.dump(main_file, big_file, indent=3)
except FileNotFoundError:
full_update()
exit(0)
except Exception as e:
logging.critical(f"MAIN::Unexpected exception {e.__class__.__name__} occured, message '{e}'")
traceback.print_exc()
exit(9)
```
#### File: efre-lod-elasticsearch-tools/tests/demo_spcht_processing.py
```python
import os
import sys
import inspect
import logging
import json
import re
from rdflib import Graph, Literal, URIRef
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from Spcht.Core.SpchtCore import Spcht
from Spcht.Utils.local_tools import load_from_json
import Spcht.Core.SpchtUtility as SpchtUtility
"""
This only tests if the actual processing is still working and actually takes place, it does not replace an actual
testing suit that test every function for itself. The featuretest.spcht.json is just featurecomplete und should utilize
every single datafield that is around which makes it useful to find faults in the programming but is not fit for
deeper diagnostics or if the data actually processed the right way."""
TEST_DATA = "thetestset.json"
#TEST_DATA = "./../folio_extract.json"
try:
os.remove("./test_processing.log")
except FileNotFoundError:
print("No previous log file")
logging.basicConfig(filename='test_processing.log', format='[%(asctime)s] %(levelname)s:%(message)s', level=logging.DEBUG)
def quadro_console_out(quadro_list: list):
previous = ""
previous_length = 0
len_map = {}
for each in quadro_list:
if str(each[0]) not in len_map:
len_map[str(each[0])] = len(str(each[1]))
if len(str(each[1])) > len_map[str(each[0])]:
len_map[str(each[0])] = len(str(each[1]))+3
for each in quadro_list:
this_line = ""
if str(each[0]) != previous:
previous = f"{str(each[0])}" # tuples cannot be changed
previous_length = len(previous)
this_line += f"{previous} "
else:
this_line += f"{' '*previous_length} "
tmp = f"{str(each[1])}"
this_line += f"{tmp:{len_map[str(each[0])]}}"
this_line += f"{str(each[2])}"
print(this_line)
if __name__ == "__main__":
spcht_path = "featuretest.spcht.json"
#spcht_path = "./../folio.spcht.json"
NormalBird = Spcht(spcht_path, schema_path="./../Spcht/SpchtSchema.json", debug=True, log_debug=False)
my_data = load_from_json(TEST_DATA)
if not my_data:
print("Test failed while loading testdata")
exit(1)
lines = []
for every in my_data:
lines.extend(NormalBird.process_data(every, "https://ressources.info/"))
quadro_console_out(lines)
export = SpchtUtility.process2RDF(lines, export=False)
with open("processing_turtle.ttl", "w") as turtle_file:
turtle_file.write(export.serialize(format="turtle"))
```
#### File: efre-lod-elasticsearch-tools/tests/test_spcht_utilities.py
```python
import json
import unittest
import Spcht.Core.SpchtUtility as SpchtUtility
from Spcht.Core.SpchtCore import Spcht, SpchtThird, SpchtTriple
from Spcht.Core.SpchtUtility import list_wrapper, insert_list_into_str, is_dictkey, list_has_elements, all_variants, \
match_positions, fill_var
class TestFunc(unittest.TestCase):
def test_listwrapper1(self):
self.assertEqual(list_wrapper(["OneElementList"]), ["OneElementList"])
def test_listwrapper2(self):
self.assertEqual(list_wrapper("OneElement"), ["OneElement"])
def test_listwrapper3(self):
self.assertEqual(list_wrapper(["Element1", "Element2"]), ["Element1", "Element2"])
def test_listwrapper4(self):
self.assertEqual(list_wrapper(None), [None])
def test_insert_into1(self):
""""normal test"""
inserts = ["one", "two", "three"]
sentence = "{} entry, {} variants and {} things"
goal = "one entry, two variants and three things"
trial = insert_list_into_str(inserts, sentence)
self.assertEqual(trial, goal)
def test_insert_into2(self):
"""test with changed placeholder and new regex length"""
inserts = ["one", "two", "three"]
sentence = "[--] entry, [--] variants and [--] things"
goal = "one entry, two variants and three things"
trial = insert_list_into_str(inserts, sentence, regex_pattern=r'\[--\]', pattern_len=4)
self.assertEqual(trial, goal)
def test_insert_into3(self):
"""test with only two inserts"""
inserts = ["one", "two"]
sentence = "{} and {}"
goal = "one and two"
trial = insert_list_into_str(inserts, sentence)
self.assertEqual(trial, goal)
def test_insert_into4(self):
""""test with more inserts than spaces"""
inserts = ["one", "two", "three"]
sentence = "Space1: {}, Space2 {}."
self.assertRaises(TypeError, insert_list_into_str(inserts, sentence))
def test_insert_into5(self):
"""test with less inserts than slots"""
inserts = ["one", "two"]
sentence = "Space1: {}, Space2 {}, Space3 {}"
print(insert_list_into_str(inserts, sentence))
self.assertRaises(TypeError, insert_list_into_str(inserts, sentence))
def test_is_dictkey1(self):
"""tests with one key that is actually there"""
dictionary = {1: 42, 2: 67, 3: 99}
key = 1
self.assertEqual(True, is_dictkey(dictionary, key))
def test_is_dictkey2(self):
"""tests with one key that is not there"""
dictionary = {1: 42, 2: 67, 3: 99}
key = 5
self.assertEqual(False, is_dictkey(dictionary, key))
def test_is_dictkey3(self):
"""tests with keys that are all there"""
dictionary = {1: 42, 2: 67, 3: 99}
key = [1, 2]
self.assertEqual(True, is_dictkey(dictionary, key))
def test_is_dictkey4(self):
"""tests with keys of which some are there"""
dictionary = {1: 42, 2: 67, 3: 99}
key = [1, 5]
self.assertEqual(False, is_dictkey(dictionary, key))
def test_is_dictkey5(self):
"""tests with keys of which noone are there"""
dictionary = {1: 42, 2: 67, 3: 99}
key = [5, 7, 9]
self.assertEqual(False, is_dictkey(dictionary, key))
def test_list_has_elements1(self):
self.assertEqual(True, list_has_elements([1, 2]))
def test_list_has_elements2(self):
self.assertEqual(False, list_has_elements([]))
def test_all_variants1(self):
listed = [[1]]
expected = [[1]]
self.assertEqual(expected, all_variants(listed))
def test_all_variants2(self):
listed = [[1], [2]]
expected = [[1, 2]]
self.assertEqual(expected, all_variants(listed))
def test_all_variants3(self):
listed = [[1], [2], [3]]
expected = [[1, 2, 3]]
self.assertEqual(expected, all_variants(listed))
def test_all_variants4(self):
listed = [[1, 2]]
expected = [[1], [2]]
self.assertEqual(expected, all_variants(listed))
def test_all_variants5(self):
listed = [[1, 2], [3]]
expected = [[1, 3], [2, 3]]
self.assertEqual(expected, all_variants(listed))
def test_all_variants6(self):
listed = [[1, 2], [3, 4]]
expected = [[1, 3], [1, 4], [2, 3], [2, 4]]
self.assertEqual(expected, all_variants(listed))
def test_all_variants7(self):
listed = [[1, 2], [3], [4]]
expected = [[1, 3, 4], [2, 3, 4]]
self.assertEqual(expected, all_variants(listed))
def test_all_variants8(self):
listed = [[1, 2], [3, 4], [5]]
expected = [[1, 3, 5], [1, 4, 5], [2, 3, 5], [2, 4, 5]]
self.assertEqual(expected, all_variants(listed))
def test_match_positions1(self):
regex = r"\{\}"
stringchain = "bla {} fasel {}"
expected = [(4, 6), (13, 15)]
self.assertEqual(expected, match_positions(regex, stringchain))
def test_match_positions2(self):
regex = r"\[\]"
stringchain = "bla {} fasel {}"
expected = []
self.assertEqual(expected, match_positions(regex, stringchain))
def test_fill_var1(self):
exist = 1
input = 5
expected = [1, 5]
self.assertEqual(expected, fill_var(exist, input))
def test_fill_var2(self):
exist = [1, 2]
input = 5
expected = [1, 2, 5]
self.assertEqual(expected, fill_var(exist, input))
def test_fill_var3(self):
exist = {1: 2, 3: 5}
input = 5
expected = [{1: 2, 3: 5}, 5]
self.assertEqual(expected, fill_var(exist, input))
def test_fill_var4(self):
exist = [1, 2]
input = [5, 6]
expected = [1, 2, [5, 6]]
self.assertEqual(expected, fill_var(exist, input))
def test_fill_var5(self):
exist = None
input = 5
expected = 5
self.assertEqual(expected, fill_var(exist, input))
def test_fill_var6(self):
exist = []
input = 5
expected = [5]
self.assertEqual(expected, fill_var(exist, input))
def test_fill_var7(self):
exist = ""
input = 5
expected = 5
self.assertEqual(expected, fill_var(exist, input))
def test_fill_var8(self):
exist = None
input = ""
expected = ""
self.assertEqual(expected, fill_var(exist, input))
def test_extract_dictmarc(self):
with open("thetestset.json", "r") as json_file:
thetestset = json.load(json_file)
fake_node = {"source": "marc", "field": "951:a"}
expected = ["MV", "XA-DE", "XA-PL"]
empty_spcht = Spcht()
empty_spcht._m21_dict = SpchtUtility.marc2list(thetestset[0]['fullrecord'])
with self.subTest("Extract dictmarc list: dictionary"):
expected = ["MV", "XA-DE", "XA-PL"]
computed = [x.content for x in empty_spcht.extract_dictmarc_value(fake_node)]
self.assertEqual(expected, computed)
with self.subTest("Extract dictmarc dictionary: list"):
expected = ["(DE-627)1270642103", "(DE-625)rvk/96225:", "(DE-576)200642103"]
fake_node['field'] = "936:0"
computed = [x.content for x in empty_spcht.extract_dictmarc_value(fake_node)]
self.assertEqual(expected, computed)
def test_spcht_triple_serialize(self):
one_uri = SpchtThird("https://schema.org/adress", uri=True)
snd_uri = SpchtThird("https://schema.org/cat", uri=True)
one_literal = SpchtThird("Miau", tag="xsd:integer")
snd_literal = SpchtThird("english", language="en")
triple_1 = SpchtTriple(one_uri, snd_uri, snd_literal)
triple_2 = SpchtTriple(one_uri, snd_uri, one_literal)
expected = """@prefix ns1: <https://schema.org/> .
ns1:adress ns1:cat "Miau",
"english"@en .
"""
computed = SpchtUtility.process2RDF([triple_1, triple_2])
self.assertEqual(expected, computed)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jpkarlsberg/readux",
"score": 2
} |
#### File: readux/annotations/admin.py
```python
from django import forms
from django.contrib import admin
from django.contrib.auth.models import User, Group
from readux.annotations.models import Annotation, AnnotationGroup
class AnnotationAdmin(admin.ModelAdmin):
# uuid is kind of ugly and probably not useful for display,
# so omitting it here
list_display = ('user', 'text_preview', 'created', 'updated',
'uri_link')
date_hierarchy = 'created'
search_fields = ['id', 'text', 'quote', 'uri', 'extra_data',
'user__username', 'user__email']
# NOTE: searching on uuid with dashes doesn't seem to work,
# but searching on a portion of the uuid without dashes does
# (possibly DB dependent?)
# for now, make these fields read-only, so annotations can only
# be deleted or have user modified via admin; the rest should
# be handled via the annotator interface.
# readonly_fields = ('text', 'quote', 'extra_data', 'uri_link')
readonly_fields = ('text', 'quote', 'uri_link')
# custom model form to allow users to be edited on the group
# edit form using the admin horizontal widget; based on
# http://stackoverflow.com/questions/9879687/adding-a-manytomanywidget-to-the-reverse-of-a-manytomanyfield-in-the-django-admi
class AnnotationGroupForm(forms.ModelForm):
users = forms.ModelMultipleChoiceField(
User.objects.all(),
widget=admin.widgets.FilteredSelectMultiple('Users', False),
required=False
)
def __init__(self, *args, **kwargs):
super(AnnotationGroupForm, self).__init__(*args, **kwargs)
if self.instance.pk:
initial_users = self.instance.user_set.values_list('pk', flat=True)
self.initial['users'] = initial_users
def save(self, *args, **kwargs):
kwargs['commit'] = True
return super(AnnotationGroupForm, self).save(*args, **kwargs)
def save_m2m(self):
self.instance.user_set.clear()
self.instance.user_set.add(*self.cleaned_data['users'])
class AnnotationGroupAdmin(admin.ModelAdmin):
list_display = ('name', 'num_members', 'created', 'updated')
date_hierarchy = 'created'
exclude = ('permissions',)
form = AnnotationGroupForm
# customize default group display to indicate annotation groups #
# patch in a boolean property field to indicate annotation groups
def is_annotationgroup(obj):
return hasattr(obj, 'annotationgroup')
is_annotationgroup.boolean = True
is_annotationgroup.short_description = 'Annotation group'
Group.is_annotationgroup = is_annotationgroup
class GroupAdmin(admin.ModelAdmin):
list_display = ('name', 'is_annotationgroup')
admin.site.register(Annotation, AnnotationAdmin)
admin.site.register(AnnotationGroup, AnnotationGroupAdmin)
admin.site.unregister(Group)
admin.site.register(Group, GroupAdmin)
```
#### File: readux/books/abbyyocr.py
```python
from eulxml import xmlmap
class Base(xmlmap.XmlObject):
'''Base :class:`eulxml.xmlmap.XmlObject` for ABBYY OCR XML with
common namespace declarations.
'''
ROOT_NAMESPACES = {
'fr6v1': 'http://www.abbyy.com/FineReader_xml/FineReader6-schema-v1.xml',
'fr8v2': 'http://www.abbyy.com/FineReader_xml/FineReader8-schema-v2.xml'
}
'namespaces for supported versions of FineReader xml'
id = xmlmap.StringField('@xml:id')
def frns(xpath):
'''Utility function to convert a simple xpath to match any of the
configured versions of ABBYY FineReader XML namespaces. Example
conversions:
* ``page`` becomes ``f1:page|f2:page``
* ``text/par`` becomes ``f1:page/f1:text|f2:page/f2:text``
Uses all declared namespace prefixes from
:attr:`Base.ROOT_NAMESPACES`
'''
namespaces = Base.ROOT_NAMESPACES.keys()
return '|'.join('/'.join('%s:%s' % (ns, el) for el in xpath.split('/'))
for ns in namespaces)
class Formatting(Base):
'''A group of characters in a single :class:`Line` with uniform
formatting.'''
ROOT_NAME = 'formatting'
language = xmlmap.StringField('@lang')
'language of this formatted section'
text = xmlmap.StringField('text()')
'text value'
# char params ?
# boolean attributes for: ff, fs, bold, italic, subscript, superscript,
# smallcaps, underline, strikeout, color, scaling, spacing
class Line(Base):
'''A single line of text in a :class:`Paragraph`.'''
ROOT_NAME = 'line'
baseline = xmlmap.IntegerField('@baseline')
'integer baseline'
left = xmlmap.IntegerField('@l')
'integer left'
top = xmlmap.IntegerField('@t')
'integer top'
right = xmlmap.IntegerField('@r')
'integer right'
bottom = xmlmap.IntegerField('@b')
'integer bottom'
formatted_text = xmlmap.NodeListField(frns('formatting'),
Formatting)
'list of :class:`Formatting` elements'
class Paragraph(Base):
'''A single paragraph of text somewhere in a :class:`Document`.'''
ROOT_NAME = 'par'
align = xmlmap.StringField('@align') # default is Left; Center, Right, Justified
'text alignment (Left, Center, Right, Justified)'
left_indent = xmlmap.IntegerField('@leftIndent')
'integer left indent'
right_indent = xmlmap.IntegerField('@rightIndent')
'integer right indent'
start_indent = xmlmap.IntegerField('@startIndent')
'integer start indent'
line_spacing = xmlmap.IntegerField('@lineSpacing')
'integer line spacing'
# dropChars stuff ?
lines = xmlmap.NodeListField(frns('line'), Line)
'list of :class:`Line` elements'
class Block(Base):
ROOT_NAME = 'page'
'''A single block of content on a :class:`Page`.'''
type = xmlmap.StringField('@blockType') # Text, Table, Picture, Barcode
'type of block (Text, Table, Picture, Barcode)'
left = xmlmap.IntegerField('@l')
'integer left'
top = xmlmap.IntegerField('@t')
'integer top'
right = xmlmap.IntegerField('@r')
'integer right'
bottom = xmlmap.IntegerField('@b')
'integer bottom'
# must have one & only one region;
# region/rect dimensions appears to be redundant...
paragraphs = xmlmap.NodeListField(frns('text/par'), Paragraph)
'list of :class:`Paragraph` elements'
class Page(Base):
'''A single page of a :class:`Document`.'''
ROOT_NAME = 'page'
width = xmlmap.IntegerField('@width')
'integer width'
height = xmlmap.IntegerField('@height')
'integer height'
resolution = xmlmap.IntegerField('@resolution')
'integer resolution'
blocks = xmlmap.NodeListField(frns('block'), Block)
'list of :class:`Block` elements in this page'
text_blocks = xmlmap.NodeListField(frns('block[@blockType="Text"]'),
Block)
'text :class:`Block` elements (where type is "Text")'
picture_blocks = xmlmap.NodeListField(frns('block[@blockType="Picture"]'),
Block)
'picture :class:`Block` elements (where type is "Picture")'
# block position info possibly redundant? map paragraphs directly
paragraphs = xmlmap.NodeListField(frns('block/text/par'),
Paragraph)
'list of :class:`Paragraph` elements in any of the blocks on this page'
class Document(Base):
''':class:`~eulxml.xmlmap.XmlObject` class for an ABBYY
OCR XML Document.
.. Note::
Currently there is no support for tabular formatting elements.
'''
ROOT_NAME ='document'
pages = xmlmap.NodeListField(frns('page'), Page)
'pages as :class:`Page`'
page_count = xmlmap.IntegerField('@pagesCount')
'integer page_count (document ``@pagesCount``)'
language = xmlmap.StringField('@mainLanguage')
'main language of the document'
languages = xmlmap.StringField('@languages')
'all languages included in the document'
```
#### File: readux/books/annotate.py
```python
from bs4 import BeautifulSoup
from datetime import datetime
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db.models import Q
from django.utils.text import slugify
from eulxml.xmlmap import load_xmlobject_from_string, teimap, \
load_xmlobject_from_file, XmlObject
import logging
from lxml import etree
import mistune
import os
from readux import __version__
from readux.books import tei, markdown_tei
from readux.utils import absolutize_url
logger = logging.getLogger(__name__)
TEI_ENCODING_DESCRIPTION = os.path.join(
os.path.dirname(__file__),
'annotated_tei_encodingDesc.xml')
def annotated_tei(teivol, annotations):
'''Takes a TEI :class:`~readux.books.tei.Facsimile` document and an
:class:`~readux.annotation.models.Annotation` queryset,
and generates an annotated TEI Facsimile document. Tei header is
updated to reflect annotated edition, with a responsibility based
on the users associated with the annotations passed in. Annotation
references are added to the facsimile data in the form of start and
end anchors for text annotations and new zones for image annotations.
Annotation content is added to the body of the TEI, and annotation
tags, if any, are added as an interpGrp.
:param teivol: tei document to be annotated; expects
:class:`~readux.books.tei.Facsimile`
:param annotations: :class:`~readux.annotations.models.Annotation`
queryset to be added into the TEI for export
:returns: :class:`~readux.books.tei.AnnotatedFacsimile` with annotation
information
'''
# iterate throuth the annotations associated with this volume
# and insert them into the tei based on the content they reference
# could do some sanity-checking: compare annotation total vs
# number actually added as we go page-by-page?
tags = set()
# make sure tei xml is using the xmlobject we need to add
# annotation data and tags
if not isinstance(teivol, tei.AnnotatedFacsimile):
teivol = tei.AnnotatedFacsimile(teivol.node)
# update title to reflect annotated version being exported
teivol.main_title = teivol.title
teivol.subtitle = ", an annotated digital edition"
del teivol.title # delete old, unqualified title
# update responsibility statement
teivol.responsibility = 'annotated by'
# get a distinct list of all annotation authors
# NOTE: this will add users even if the annotations don't get
# successfully added to the output
user_ids = annotations.only('user').order_by('user')\
.values_list('user', flat=True).distinct()
users = get_user_model().objects.filter(id__in=user_ids)
for user in users:
teivol.responsible_names.append(tei.Name(id=user.username,
value=user.get_full_name()))
# publication statement - main info should already be set
# update to reflect annotated tei and ensure date is current
teivol.create_pubstmt() # make sure publicationStmt exists
teivol.pubstmt.desc = 'Annotated TEI generated by Readux version %s' % __version__
export_date = datetime.now()
teivol.pubstmt.date = export_date
teivol.pubstmt.date_normal = export_date
# add stock encoding description
teivol.encoding_desc = load_xmlobject_from_file(TEI_ENCODING_DESCRIPTION,
XmlObject)
for page in teivol.page_list:
# use page.href to find annotations for this page
# if for some reason href is not set, skip this page
if not page.href:
continue
# page.href should either be local readux uri OR ARK uri;
# local uri is stored as annotation uri, but ark is in extra data
page_annotations = annotations.filter(Q(uri=page.href)|Q(extra_data__contains=page.href))
if page_annotations.exists():
for note in page_annotations:
# possible to get extra matches for page url in related pages,
# so skip any notes where ark doesn't match page url
if note.extra_data.get('ark', '') != page.href and not settings.DEV_ENV:
# NOTE: allow without ark in dev, since test page records
# may not have ARKs
continue
insert_note(teivol, page, note)
# collect a list of unique tags as we work through the notes
if 'tags' in note.info():
tags |= set(t.strip() for t in note.info()['tags'])
consolidate_bibliography(teivol)
# tags are included in the back matter as an interpGrp
if tags:
# create back matter interpgrp for annotation tags
teivol.create_tags()
for tag in tags:
# NOTE: our tag implementation currently does not allow spaces,
# but using slugify to generate ids to avoid any issues with spaces
# and variation in capitalization or punctuation
teivol.tags.interp.append(tei.Interp(id=slugify(tag), value=tag))
return teivol
def annotation_to_tei(annotation, teivol):
'''Generate a tei note from an annotation. Sets annotation id,
slugified tags as ana attribute, username as resp attribute, and
annotation content is converted from markdown to TEI.
:param annotation: :class:`~readux.annotations.models.Annotation`
:param teivol: :class:`~readux.books.tei.AnnotatedFacsimile` tei
document, for converting related page ARK uris into TEI ids
:returns: :class:`readux.books.tei.Note`
'''
# NOTE: annotation created/edited dates are not included here
# because they were determined not to be relevant for our purposes
# sample note provided by Alice
# <note resp="JPK" xml:id="oshnp50n1" n="1"><p>This is an example note.</p></note>
# convert markdown-formatted text content to tei
note_content = markdown_tei.convert(annotation.text)
# markdown results could be a list of paragraphs, and not a proper
# xml tree; also, pags do not include namespace
# wrap in a note element and set the default namespace as tei
teinote = load_xmlobject_from_string('<note xmlns="%s">%s</note>' % \
(teimap.TEI_NAMESPACE, note_content),
tei.Note)
# what id do we want? annotation uuid? url?
teinote.id = 'annotation-%s' % annotation.id # can't start with numeric
teinote.href = absolutize_url(annotation.get_absolute_url())
teinote.type = 'annotation'
# if an annotation includes tags, reference them by slugified id in @ana
if 'tags' in annotation.info() and annotation.info()['tags']:
tags = ' '.join(set('#%s' % slugify(t.strip())
for t in annotation.info()['tags']))
teinote.ana = tags
# if the annotation has an associated user, mark the author
# as responsible for the note
if annotation.user:
teinote.resp = annotation.user.username
# include full markdown of the annotation, as a backup for losing
# content converting from markdown to tei, and for easy display
teinote.markdown = annotation.text
# if annotation contains related pages, generate a link group
if annotation.related_pages:
for rel_page in annotation.related_pages:
page_ref = tei.Ref(text=rel_page, type='related page')
# find tei page identifier from the page ark
target = teivol.page_id_by_xlink(rel_page)
if target is not None:
page_ref.target = '#%s' % target
teinote.related_pages.append(page_ref)
# if annotation includes citations, add them to the tei
# NOTE: expects these citations to be TEI encoded already (generated
# by the zotero api and added via meltdown-zotero annotator plugin)
if annotation.extra_data.get('citations', None):
for bibl in annotation.extra_data['citations']:
# zotero tei export currently includes an id that is not
# a valid ncname (contains : and /)
bibsoup = BeautifulSoup(bibl, 'xml')
# convert xml id into the format we want:
# zotero-#### (zotero item id)
for bibl_struct in bibsoup.find_all('biblStruct'):
bibl_struct['xml:id'] = 'zotero-%s' % \
bibl_struct['xml:id'].split('/')[-1]
teibibl = load_xmlobject_from_string(bibsoup.biblStruct.prettify(),
tei.BiblStruct)
teinote.citations.append(teibibl)
return teinote
def html_xpath_to_tei(xpath):
'''Convert xpaths generated on the readux site to the
equivalent xpaths for the corresponding TEI content,
so that annotations created against the HTML can be matched to
the corresponding TEI.'''
# NOTE: span could match either line in abbyy ocr or word in mets/alto
return xpath.replace('div', 'tei:zone') \
.replace('span', 'node()[local-name()="line" or local-name()="w"]') \
.replace('@id', '@xml:id')
def insert_note(teivol, teipage, annotation):
'''Insert an annotation and highlight reference into a tei document
and tei facsimile page.
:param teivol: :class:`~readux.books.tei.AnnotatedFacsimile` tei
document where the tei note should be added
:param teipage: :class:`~readux.books.tei.Zone` page zone where
annotation highlight references should be added
:param annotation: :class:`~readux.annotations.models.Annotation`
to add the document
'''
info = annotation.info()
# convert html xpaths to tei
if 'ranges' in info and info['ranges']:
# NOTE: assuming a single range selection for now
# the annotator model supports multiple, but UI does not currently
# support it.
selection_range = info['ranges'][0]
# convert html xpaths from readux website to equivalent tei xpaths
# for selection within the facsimile document
# either of start or end xpaths could be empty; if so, assume
# starting at the beginning of the page or end at the end
start_xpath = html_xpath_to_tei(selection_range['start']) or '//tei:zone[1]'
end_xpath = html_xpath_to_tei(selection_range['end']) or '//tei:zone[last()]'
# insert references using start and end xpaths & offsets
start = teipage.node.xpath(start_xpath, namespaces=tei.Zone.ROOT_NAMESPACES)
end = teipage.node.xpath(end_xpath, namespaces=tei.Zone.ROOT_NAMESPACES)
if not start or not end:
logger.warn('Could not find start or end xpath for annotation %s' % annotation.id)
return
else:
# xpath returns a list of matches; we only want the first one
start = start[0]
end = end[0]
start_anchor = tei.Anchor(type='text-annotation-highlight-start',
id='highlight-start-%s' % annotation.id,
next='highlight-end-%s' % annotation.id)
end_anchor = tei.Anchor(type='text-annotation-highlight-end',
id='highlight-end-%s' % annotation.id)
# insert the end *first* in case start and end are in the
# same element; otherwise, the offsets get mixed up
insert_anchor(end, end_anchor.node, selection_range['endOffset'])
insert_anchor(start, start_anchor.node, selection_range['startOffset'])
# generate range target for the note element
target = '#range(#%s, #%s)' % (start_anchor.id, end_anchor.id)
elif 'image_selection' in info:
# for readux, image annotation can *only* be the page image
# so not checking image uri
page_width = teipage.lrx - teipage.ulx
page_height = teipage.lry - teipage.uly
# create a new zone for the image highlight
image_highlight = tei.Zone(type="image-annotation-highlight")
# image selection in annotation stored as percentages
# convert ##% into a float that can be multiplied by page dimensions
selection = {
'x': float(info['image_selection']['x'].rstrip('%')) / 100,
'y': float(info['image_selection']['y'].rstrip('%')) / 100,
'w': float(info['image_selection']['w'].rstrip('%')) / 100,
'h': float(info['image_selection']['h'].rstrip('%')) / 100
}
# convert percentages into upper left and lower right coordinates
# relative to the page
image_highlight.ulx = selection['x'] * float(page_width)
image_highlight.uly = selection['y'] * float(page_height)
image_highlight.lrx = image_highlight.ulx + (selection['w'] * page_width)
image_highlight.lry = image_highlight.uly + (selection['h'] * page_height)
image_highlight.id = 'highlight-%s' % annotation.id
target = '#%s' % image_highlight.id
teipage.node.append(image_highlight.node)
# call annotation_to_tei and insert the resulting note into
# the appropriate part of the document
teinote = annotation_to_tei(annotation, teivol)
teinote.target = target
# append actual annotation to tei annotations div
teivol.annotations.append(teinote)
def insert_anchor(element, anchor, offset):
'''Insert a TEI anchor into an element at a given offset. If
offset is zero, anchor is inserted just before the element. If
offset is length of element text, anchor is inserted immediately
after the element.
:param element: node for the element relative to which the
anchor will be added
:param anchor: node for the anchor element
:param offset: numeric offset into the element
'''
if offset == 0:
# offset zero - insert directly before this element
element.addprevious(anchor)
elif offset >= len(element.text):
# offset at end of this element - insert directly after
element.addnext(anchor)
else:
# offset somewhere inside the text of this element
# insert the element after the text and then break up
# the lxml text and "tail" so that text after the offset
# comes after the inserted anchor
el_text = element.text
element.insert(0, anchor)
element.text = el_text[:offset]
anchor.tail = el_text[offset:]
def consolidate_bibliography(teivol):
'''Clean up redundant bibliographic elements in individual works cited
and consolidate into a single bibliography at the end of the document,
updating so id references in the annotations match.'''
# - remove works cited & milestones from individual notes
# - generate biblstruct at end of documnt with one entry for
# each citation
# - make sure note references and bibl citations match
for note in teivol.annotations:
# clean up any note that has citations
if note.citations:
# move all citations to the bibliography
for cit in note.citations:
# if citation id is not already present, add to
# main document biblography
if cit.id not in teivol.citation_ids:
teivol.citations.append(cit)
# remove all citations from the note
note.citations = []
# remove unstructured citation information generated from markdown
if note.works_cited:
del note.works_cited_milestone
del note.zotero_items
del note.works_cited
del note.list_bibl
```
#### File: management/commands/detect_blank.py
```python
import logging
from readux.books.management.page_import import BasePageImport
logger = logging.getLogger(__name__)
class Command(BasePageImport):
'''Utility script to check if an image or images would be identified as
blank using the same logic as import_covers and import_pages scripts.
Takes a list of image file paths.'''
help = __doc__
def handle(self, *images, **options):
self.setup(**options)
for imgfile in images:
try:
blank = self.is_blank_page(imgfile)
print '%s is%s blank' % (imgfile, '' if blank else ' not')
except:
print 'Error reading %s' % imgfile
```
#### File: management/commands/suppress_yearbook.py
```python
import pdb, re
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from readux.utils import solr_interface
from django.conf import settings
from django.core.paginator import Paginator
class Command(BaseCommand):
'''Utility script to suppress Emory Yearbooks post 1922. It deletes the Solr
index so that the items will not show up in the search results in Readux.
Items are still available but not very visible unless you know the URL.
Can run in dry-run mode to get a summary without actually making any changes.
'''
option_list = BaseCommand.option_list + (
make_option('--dry-run', '-n',
action='store_true',
default=False,
help='Don\'t make any changes; just report on what would be done'),
)
def handle(self, **options):
dry_run = options.get('dry_run', False)
collection_id = "emory-control:LSDI-EmoryYearbooks"
year_threadhold = 1922
solr = solr_interface()
resp = solr.query(collection_id=collection_id).execute()
page_interval = 10
paginator = Paginator(solr.query(collection_id=collection_id), page_interval)
# Announcements
print "\n"
print "###################### Important ######################"
if dry_run: print "*********************** DRY RUN ***********************"
print "environmental varaibles configured as follows"
print "collection_id to match: {}".format(collection_id)
print "year threshold: {} (not including {})".format(year_threadhold, year_threadhold)
print "solr env: {}".format(settings.SOLR_SERVER_URL)
print "#######################################################"
print "\n"
# When there are results returned
if paginator.count > 0:
summary = [] # store index to be purged
# Print summary on top
print "Records with collection_id {} found: {}, listing: ".format(collection_id, paginator.count)
# Regex to match "_yyyy"
regex = r"(\_\d{4})"
# Counter of the currently processed pid
current = 1
# Iterate through search results
for page in range(1, paginator.num_pages + 1):
for i in range(0, len(paginator.page(page))):
if paginator.page(page)[i]:
result = paginator.page(page)[i]
output = "{}/{}: {}, title: {}, label: {}".format(\
current, paginator.count, result["pid"], result["title"], result["label"])
# Match "_yyyy", ask if to delete
if re.search(regex, result["label"]):
match = re.search(regex, result["label"])
year = int(match.group(0)[1:])
if year > year_threadhold:
# dry run - not remove item
if dry_run:
output += " - matched with year {} and can be removed from solr index - dry run!".format(year)
else:
# actually remove the record
solr.delete(queries=solr.Q(pid=result["pid"]))
solr.commit()
output += " - matched with year {} and is removed from solr index".format(year)
record = {"pid": result["pid"], "title": result["title"], "label": result["label"], "year": year}
summary.append(record)
print output
current += 1 # increment for the next item
# Print summary when there is one
if len(summary) > 0:
if dry_run:
print "Dry run summary (these will be removed):"
else:
print "Index deletion summary:"
for record in summary:
print record
# When there is no matching result
else:
print "No matching condition found. Aborted."
```
#### File: management/commands/web_export.py
```python
from eulfedora.server import Repository
from eulxml.xmlmap import load_xmlobject_from_file
from django.core.management.base import BaseCommand, CommandError
import shutil
from readux.books import annotate, export
from readux.books.models import Volume
from readux.books.tei import Facsimile
class Command(BaseCommand):
help = 'Construct web export of an annotated volume'
def add_arguments(self, parser):
parser.add_argument('pid', nargs='+', type=str)
parser.add_argument('--tei',
help='Use the specified TEI file instead of generating it')
def handle(self, *args, **options):
repo = Repository()
for pid in options['pid']:
vol = repo.get_object(pid, type=Volume)
if options['tei']:
tei = load_xmlobject_from_file(options['tei'], Facsimile)
else:
tei = annotate.annotated_tei(vol.generate_volume_tei(),
vol.annotations())
try:
zipfile = export.website(vol, tei)
except export.ExportException as err:
raise CommandError(err)
zipfilename = '%s-annotated-site.zip' % vol.noid
shutil.copyfile(zipfile.name, zipfilename)
print 'Export for %s complete, zipfile is %s' % (vol.noid, zipfilename)
```
#### File: readux/books/sitemaps.py
```python
from django.contrib.sitemaps import Sitemap
from django.core.urlresolvers import reverse
from readux.utils import solr_interface
from readux.books.models import Volume, Page
class _BaseVolumeSitemap(Sitemap):
# common items/lastmodification logic for volumes and volume pdfs
# volume change frequency unknown
def items(self):
solr = solr_interface()
return solr.query(content_model=Volume.VOLUME_CMODEL_PATTERN) \
.field_limit(['pid', 'last_modified'])
def lastmod(self, item):
return item['last_modified']
class VolumePdfSitemap(_BaseVolumeSitemap):
# priority uncertain;
# default priority is 0.5; set PDFs slightly lower
priority = 0.4
def location(self, item):
return reverse('books:pdf', kwargs={'pid': item['pid']})
class VolumeSitemap(_BaseVolumeSitemap):
# priority unknown
def location(self, item):
return reverse('books:volume', kwargs={'pid': item['pid']})
class VolumePageSitemap(Sitemap):
'Sitemap for individual pages'
# default priority is 0.5; set pages slightly lower
priority = 0.4
def items(self):
solr = solr_interface()
return solr.query(content_model=Page.PAGE_CMODEL_PATTERN) \
.field_limit(['pid', 'last_modified',
'isConstituentOf'])
def lastmod(self, item):
return item['last_modified']
def location(self, item):
# volume page belongs to is indexed based on fedora relation
vol_pid = item['isConstituentOf'][0].replace('info:fedora/', '')
return reverse('books:page',
kwargs={'pid': item['pid'], 'vol_pid': vol_pid})
```
#### File: readux/books/tei.py
```python
from django.conf import settings
from eulxml import xmlmap
from eulxml.xmlmap import teimap
from lxml import etree
import os
'''
:class:`eulxml.xmlmap.XmlObject` subclasses for dealing with TEI,
particularly for the TEI facsimile used for positional OCR data for
readux pages and for generating annotated TEI for export.
'''
class TeiBase(teimap.Tei):
'Base class for all TEI objects, with all namespaces'
ROOT_NS = teimap.TEI_NAMESPACE
ROOT_NAMESPACES = {
'tei' : ROOT_NS,
'xml': 'http://www.w3.org/XML/1998/namespace',
'xlink': 'http://www.w3.org/TR/xlink/',
}
class Graphic(TeiBase):
'TEI Graphic'
ROOT_NAME = 'graphic'
#: url
url = xmlmap.StringField('@url')
#: rend
rend = xmlmap.StringField('@rend')
class Zone(TeiBase):
'XmlObject for a zone in a TEI facsimile document'
ROOT_NAME = 'zone'
#: xml id
id = xmlmap.StringField('@xml:id')
#: n attribute
n = xmlmap.StringField('@n')
#: type attribute
type = xmlmap.StringField('@type')
#: upper left x coord
ulx = xmlmap.FloatField('@ulx')
#: upper left y coord
uly = xmlmap.FloatField('@uly')
#: lower right x coord
lrx = xmlmap.FloatField('@lrx')
#: lower right y coord
lry = xmlmap.FloatField('@lry')
#: xlink href
href = xmlmap.StringField('@xlink:href')
#: text content
text = xmlmap.StringField('tei:line|tei:w')
#: list of word zones contained in this zone (e.g., within a textLine zone)
word_zones = xmlmap.NodeListField('.//tei:zone[@type="string"]', 'self')
#: nearest preceding sibling word zone (e.g., previous word in this line), if any)
preceding = xmlmap.NodeField('preceding-sibling::tei:zone[1]', 'self')
#: nearest ancestor zone
parent = xmlmap.NodeField('ancestor::tei:zone[1]', 'self')
#: containing page
page = xmlmap.NodeField('ancestor::tei:surface[@type="page"]', 'self')
# not exactly a zone, but same attributes we care about (type, id, ulx/y, lrx/y)
#: list of graphic elements (i.e. page images)
graphics = xmlmap.NodeListField('tei:graphic', Graphic)
# convenience mappings to specific sizes of page image
#: full size image (tei:graphic with type "full")
full_image = xmlmap.NodeField('tei:graphic[@type="full"]', Graphic)
#: page size image (tei:graphic with type "page")
page_image = xmlmap.NodeField('tei:graphic[@type="page"]', Graphic)
#: thumbnail image (tei:graphic with type "thumbnail")
thumbnail = xmlmap.NodeField('tei:graphic[@type="thumbnail"]', Graphic)
#: small thumbnail image (tei:graphic with type "small-thumbnail")
small_thumbnail = xmlmap.NodeField('tei:graphic[@type="small-thumbnail"]', Graphic)
#: image info as provided by IIIF (tei:graphic with type "info")
image_info = xmlmap.NodeField('tei:graphic[@type="info"]', Graphic)
@property
def width(self):
'zone width'
return self.lrx - self.ulx
@property
def height(self):
'zone height'
return self.lry - self.uly
@property
def avg_height(self):
'''Calculated average height of word zones in the current zone
(i.e. in a text line)'''
if self.word_zones:
word_heights = [w.height for w in self.word_zones]
return sum(word_heights) / float(len(word_heights))
class Ref(TeiBase):
'Tei reference'
ROOT_NAME = 'ref'
#: target
target = xmlmap.StringField('@target')
#: type
type = xmlmap.StringField('@type')
#: text
text = xmlmap.StringField('text()')
class BiblStruct(TeiBase):
'Structured Bibliographic citation'
# minimal mappings for now
ROOT_NAME = 'BiblStruct'
#: xml id
id = xmlmap.StringField('@xml:id')
#: corresp
corresp = xmlmap.StringField('@corresp')
#: type
type = xmlmap.StringField('@type')
class AnnotationWorksCited(TeiBase):
milestone = xmlmap.NodeField('preceding-sibling::tei:milestone',
xmlmap.XmlObject)
ref_list = xmlmap.NodeField(
'parent::tei:list[contains(tei:item/tei:anchor/@xml:id, "zotero")]',
xmlmap.XmlObject)
class Note(TeiBase):
'Tei Note, used here to contain an annotation'
ROOT_NAME = 'note'
#: xml id
id = xmlmap.StringField('@xml:id')
#: responsibility
resp = xmlmap.StringField('@resp')
#: target
target = xmlmap.StringField('@target')
#: type
type = xmlmap.StringField('@type')
#: ana attribute, e.g. for tag identifiers
ana = xmlmap.StringField('@ana')
#: xlink href
href = xmlmap.StringField('@xlink:href')
#: list of paragraphs as strings
paragraphs = xmlmap.StringListField('tei:p')
#: code for the markdown used in the original annotation
markdown = xmlmap.StringField('tei:code[@lang="markdown"]')
#: links to related pages
related_pages = xmlmap.NodeListField('tei:ref[@type="related page"]',
Ref)
#: list of bibliographic citations/works cited
citations = xmlmap.NodeListField('tei:listBibl/tei:biblStruct', BiblStruct)
# in-text citation generated from markdown; these fields
# are mapped so they can be removed from the annotated tei document
works_cited = xmlmap.NodeField(
'tei:head[text() = "Works Cited"]',
xmlmap.XmlObject)
zotero_items = xmlmap.NodeField(
'tei:list[contains(tei:item/tei:anchor/@xml:id, "zotero")]',
xmlmap.XmlObject)
works_cited_milestone = xmlmap.NodeField(
'tei:milestone[following-sibling::tei:head/text() = "Works Cited"]',
xmlmap.XmlObject)
# mapped to remove empty list bibl element
list_bibl = xmlmap.NodeField('tei:listBibl', xmlmap.XmlObject)
class Bibl(TeiBase):
'TEI Bibl, with mappings for digital edition and pdf urls'
#: type
type = xmlmap.StringField('@type')
#: title
title = xmlmap.StringField('tei:title')
#: author
authors = xmlmap.StringListField('tei:author')
#: date
date = xmlmap.StringField('tei:date')
#: url to digital edition
url = xmlmap.StringField('tei:ref[@type="digital-edition"]/@target')
#: url to pdf of digital edition
pdf_url = xmlmap.StringField('tei:ref[@type="pdf"]/@target')
class PublicationStatement(TeiBase):
'Publication statement, with mapping for readux distributor'
#: descriptive statement (paragraph)
desc = xmlmap.StringField('tei:p')
#: date in human-readable display format
date = xmlmap.DateField('tei:date', '%B %d, %Y')
#: normalized date
date_normal = xmlmap.DateField('tei:date/@when', '%Y-%m-%d')
#: readux distributor reference (includes ref with target of readux.library.emory.edu)
distributor_readux = xmlmap.StringField('tei:distributor[@xml:id="readux"]/tei:ref[@target="http://readux.library.emory.edu"]')
class Facsimile(TeiBase):
'''Extension of :class:`eulxml.xmlmap.teimap.TEI` to provide access
to TEI facsimile elements'''
#: local xsd schema
XSD_SCHEMA = 'file://%s' % os.path.join(os.path.abspath(os.path.dirname(__file__)),
'schema', 'TEIPageView.xsd')
# NOTE: using absolute path for schema to avoid path issues when
# building documentation on readthedocs.org
ROOT_NAME = 'TEI'
xmlschema = etree.XMLSchema(etree.parse(XSD_SCHEMA))
# NOTE: not using xmlmap.loadSchema because it doesn't correctly load
# referenced files in the same directory
#: surface with type page, as :class:`Zone`
page = xmlmap.NodeField('tei:facsimile/tei:surface[@type="page"]', Zone)
#: list of pages (surface with type page)
page_list = xmlmap.NodeListField('tei:facsimile/tei:surface[@type="page"]', Zone)
# NOTE: tei facsimile could include illustrations, but ignoring those for now
#: list of zones with type textLine or line as :class:`Zone`
lines = xmlmap.NodeListField('tei:facsimile//tei:zone[@type="textLine" or @type="line"]', Zone)
#: list of word zones (type string) as :class:`Zone`
word_zones = xmlmap.NodeListField('tei:facsimile//tei:zone[@type="string"]', Zone)
#: publication statment distributor
distributor = xmlmap.StringField('tei:teiHeader/tei:fileDesc/tei:publicationStmt/tei:distributor')
#: publication statmnt as :class:`PublicationStatement`
pubstmt = xmlmap.NodeField('tei:teiHeader/tei:fileDesc/tei:publicationStmt',
PublicationStatement)
#: encoding description
encoding_desc = xmlmap.NodeField('tei:teiHeader/tei:encodingDesc',
xmlmap.XmlObject)
#: source description for the original volume
original_source = xmlmap.NodeField('tei:teiHeader/tei:fileDesc/tei:sourceDesc/tei:bibl[@type="original"]',
Bibl)
#: source description for the readux digital edition
digital_source = xmlmap.NodeField('tei:teiHeader/tei:fileDesc/tei:sourceDesc/tei:bibl[@type="digital"]',
Bibl)
class Name(TeiBase):
'Tei NAME, with id attribute and value'
ROOT_NAME = 'name'
#: xml id
id = xmlmap.StringField('@xml:id')
#: full name
value = xmlmap.StringField('.')
class Interp(TeiBase, teimap.TeiInterp):
# extend eulxml.xmlmap.teimap version because it does not include
# the xml namespace for setting xml:id
ROOT_NAME = 'interp'
value = xmlmap.StringField('.')
class InterpGroup(teimap.TeiInterpGroup):
# extend eulxml.xmlmap.teimap version to map our local interp
interp = xmlmap.NodeListField("tei:interp", Interp)
class AnnotatedFacsimile(Facsimile):
'''Annotated Tei facsimile, with mappings needed to generate
TEI with annotations.
'''
#: main tei title
main_title = xmlmap.StringField('tei:teiHeader/tei:fileDesc/tei:titleStmt/tei:title[@type="full"]/tei:title[@type="main"]')
#: tei subtitle (e.g., annotated edition)
subtitle = xmlmap.StringField('tei:teiHeader/tei:fileDesc/tei:titleStmt/tei:title[@type="full"]/tei:title[@type="sub"]')
#: responsibility statement text
responsibility = xmlmap.StringField('tei:teiHeader/tei:fileDesc/tei:titleStmt/tei:respStmt/tei:resp')
#: responsibility statement names
responsible_names = xmlmap.NodeListField('tei:teiHeader/tei:fileDesc/tei:titleStmt/tei:respStmt/tei:name',
Name)
# additional mappings for annotation data
#: list of annotations at body/div[@type="annotations"]/note[@type="annotation"], as :class:`Note`
annotations = xmlmap.NodeListField('tei:text/tei:body/tei:div[@type="annotations"]/tei:note[@type="annotation"]',
Note)
#: list of bibliographic citations/works cited
citations = xmlmap.NodeListField('tei:text/tei:body/tei:div[@type="works-cited"]/tei:listBibl/tei:biblStruct', BiblStruct)
#: list of bibliographic citation ids
citation_ids = xmlmap.StringListField('tei:text/tei:body/tei:div[@type="works-cited"]/tei:listBibl/tei:biblStruct/@xml:id')
#: annotation tags, as :class:`~eulxml.xmlmap.teimap.TeiInterpGroup`
tags = xmlmap.NodeField('tei:text/tei:back/tei:interpGrp[@type="tags"]',
InterpGroup)
def page_id_by_xlink(self, link):
results = self.node.xpath('//tei:surface[@type="page"][@xlink:href="%s"]/@xml:id' \
% link, namespaces=self.ROOT_NAMESPACES)
if results:
return results[0]
class Anchor(TeiBase):
'TEI Anchor, for marking start and end of text annotation highlights'
ROOT_NAME = 'anchor'
#: xml id
id = xmlmap.StringField('@xml:id')
#: type
type = xmlmap.StringField('@type')
#: next attribute
next = xmlmap.StringField('@next')
```
#### File: books/templatetags/teifacsimile.py
```python
from django import template
from django.utils.safestring import mark_safe
from readux.books import tei
register = template.Library()
def percent(a, b):
# a as percentage of b
# ensure both are cast to float, divide, then multiply by 100
return (float(a) / float(b)) * 100
@register.filter
def zone_style(zone, scale):
''''Generate an HTML style and data attributes for a
:class:`readux.books.models.tei.Zone` element so that the text can be
scaled and positioned for display on a resized page image.
Takes a zone element (expected to have x/y coordinate attirbutes) and
a scale, which is used for fallback sizing where percentages cannot
be used. Sets a vhfontsize data attribute for use with javascript
to adjust font sizes relative to the viewport.'''
styles = {}
data = {}
if isinstance(zone, tei.Zone):
if zone.type in ['textLine', 'line']:
# text lines are absolutely positioned boxes
styles['left'] = '%.2f%%' % percent(zone.ulx, zone.page.width)
styles['top'] = '%.2f%%' % percent(zone.uly, zone.page.height)
# width relative to page size
styles['width'] = '%.2f%%' % percent(zone.width, zone.page.width)
styles['height'] = '%.2f%%' % percent(zone.height, zone.page.height)
# TODO: figure out how to determine this from ocr/teifacsimile
# rather than assuming
styles['text-align'] = 'left'
# set pixel-based font size for browsers that don't support viewport based sizes.
# for mets-alto, use average height of words in the line to calculate font size
# for abbyy ocr, no word zones exist, so just use line height
styles['font-size'] = '%.2fpx' % ((zone.avg_height or zone.height) * scale)
# calculate font size as percentage of page height;
# this will be used by javascript to calculate as % of viewport height
data['vhfontsize'] = '%.2f' % percent(zone.lry - zone.uly, zone.page.height)
elif zone.type == 'string':
# set width & height relative to *parent* line, not the whole page
styles['width'] = '%.2f%%' % percent(zone.width, zone.parent.width)
styles['height'] = '%.2f%%' % percent(zone.height, zone.parent.height)
# position words absolutely within the line
styles['left'] = '%.2f%%' % percent(zone.ulx - zone.parent.ulx, zone.parent.width)
attrs = []
if styles:
attrs.append('style="%s"' % ';'.join(['%s:%s' % (k, v) for k, v in styles.iteritems()]))
if data:
attrs.append(' '.join(['data-%s="%s"' % (k, v) for k, v in data.iteritems()]))
if zone.id:
attrs.append('id="%s"' % zone.id)
return mark_safe(' '.join(attrs))
```
#### File: books/tests/views.py
```python
from datetime import datetime, timedelta
from django.contrib.auth import get_user_model
from django.core.paginator import Paginator
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.template.defaultfilters import filesizeformat
from django.test import TestCase
import json
from mock import Mock, patch, NonCallableMock, NonCallableMagicMock, \
MagicMock, call
from urllib import unquote
from readux.annotations.models import Annotation
from readux.books.models import SolrVolume, Volume, Page, SolrPage
from readux.books import sitemaps, views, view_helpers, forms
from readux.utils import absolutize_url
class BookViewsTest(TestCase):
# borrowing fixture & test accounts from readux.annotations.tests
fixtures = ['test_annotation_data.json']
user_credentials = {
'user': {'username': 'testuser', 'password': '<PASSWORD>'},
'superuser': {'username': 'testsuper', 'password': '<PASSWORD>'}
}
# sample datastream profile xml
xml_profile = '''<datastreamProfile xmlns="http://www.fedora.info/definitions/1/0/management/" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.fedora.info/definitions/1/0/management/ http://www.fedora.info/definitions/1/0/datastreamProfile.xsd" pid="synctest:1" dsID="DC">
<dsLabel>Dublin Core Record for this object</dsLabel>
<dsVersionID>DC1.0</dsVersionID>
<dsCreateDate>2016-02-10T16:36:25.913Z</dsCreateDate>
<dsState>A</dsState>
<dsMIME>text/xml</dsMIME>
<dsFormatURI>http://www.openarchives.org/OAI/2.0/oai_dc/</dsFormatURI>
<dsControlGroup>X</dsControlGroup>
<dsSize>379</dsSize>
<dsVersionable>true</dsVersionable>
<dsInfoType/>
<dsLocation>synctest:1+DC+DC1.0</dsLocation>
<dsLocationType/>
<dsChecksumType>MD5</dsChecksumType>
<dsChecksum>cc0db0ef0fcd559065a788a22442d3c7</dsChecksum>
</datastreamProfile>'''
@patch('readux.books.views.VolumePdf.repository_class')
@patch('eulfedora.views._raw_datastream')
def test_pdf(self, mockraw_ds, mockrepo_class):
mockobj = Mock()
mockobj.pid = 'vol:1'
mockobj.label = 'ocm30452349_1908'
mockrepo = mockrepo_class.return_value
mockrepo.get_object.return_value = mockobj
# get datastream called by etag/last-modified methods
mockrepo.api.getDatastream.return_value.content = self.xml_profile
mockrepo.api.getDatastream.return_value.url = 'http://fedora.co/objects/ds'
# to support for last modified conditional
mockobj.pdf.created = datetime.now()
# mockrepo.api.getDatastreamDissemination.return_value =
# mockobj.getDatastreamObject.return_value.created = mockobj.pdf.created
# class-based view handling requires an actual response
mockraw_ds.return_value = HttpResponse()
pdf_url = reverse('books:pdf', kwargs={'pid': mockobj.pid})
response = self.client.get(pdf_url)
# only check custom logic implemented here, via mocks
# (not testing eulfedora.views.raw_datastream logic)
self.assertEqual(mockraw_ds.return_value, response,
'result of fedora raw_datastream should be returned')
# can't check full call args because we can't match request
args, kwargs = mockraw_ds.call_args
# second arg should be pid
self.assertEqual(mockobj.pid, args[1])
# third arg should be datstream id
self.assertEqual(Volume.pdf.id, args[2])
# digital object class should be specified
self.assertEqual(mockrepo, kwargs['repo'])
self.assertEqual({'Content-Disposition': 'filename="%s.pdf"' % mockobj.label},
kwargs['headers'])
# volume with a space in the label
mockobj.label = 'ocm30452349_1908 V0.1'
response = self.client.get(pdf_url)
args, kwargs = mockraw_ds.call_args
content_disposition = kwargs['headers']['Content-Disposition']
self.assertEqual('filename="%s.pdf"' % mockobj.label.replace(' ', '-'),
content_disposition,
'content disposition filename should not include spaces even if label does')
# if object doesn't exist, 404 (don't error on generating headers)
mockobj.exists = False
response = self.client.get(pdf_url)
expected, got = 404, response.status_code
self.assertEqual(expected, got,
'expected %s for %s when there is a fedora error, got %s' % \
(expected, pdf_url, got))
@patch('readux.books.views.Paginator', spec=Paginator)
@patch('readux.books.views.solr_interface')
@patch('readux.books.views.VolumeSearch.paginate_queryset')
def test_search(self, mockqs_paginate, mocksolr_interface, mockpaginator):
mockpage = NonCallableMock()
search_url = reverse('books:search')
# NOTE: pagination now happens in django's class-based view,
# so must be mocked there
mockqs_paginate.return_value = (mockpaginator.return_value,
mockpage, [], False)
# no search terms - invalid form
response = self.client.get(search_url)
self.assertContains(response, 'Please enter one or more search terms')
mocksolr = mocksolr_interface.return_value
# simulate sunburnt's fluid interface
mocksolr.query.return_value = mocksolr.query
for method in ['query', 'facet_by', 'sort_by', 'field_limit',
'exclude', 'filter', 'join', 'paginate', 'results_as',
'facet_query']:
getattr(mocksolr.query, method).return_value = mocksolr.query
# set up mock results for collection query and facet counts
solr_result = NonCallableMagicMock(spec_set=['__iter__', 'facet_counts'])
# *only* mock iter, to avoid weirdness with django templates & callables
solr_result.__iter__.return_value = [
SolrVolume(**{'pid': 'vol:1', 'title': 'Lecoq, the detective', 'pdf_size': 1024}),
SolrVolume(**{'pid': 'vol:2', 'title': '<NAME>', 'pdf_size': 34665}),
]
mocksolr.query.__iter__.return_value = iter(solr_result)
mocksolr.count.return_value = 2
# mock facets
# solr_result.facet_counts.facet_fields = {
# 'collection_label_facet': [('Civil War Literature', 2), ('Yellowbacks', 4)]
# }
# use a noncallable for the pagination result that is used in the template
# because passing callables into django templates does weird things
mockpaginator.return_value.page.return_value = mockpage
results = NonCallableMagicMock(spec=['__iter__', 'facet_counts', '__len__'])
results.__iter__.return_value = iter(solr_result)
results.facet_counts.facet_fields = {
'collection_label_facet': [('Emory Yearbooks', 1), ('Yellowbacks', 4)]
}
results.__len__.return_value = 2
mockpage.object_list = results
mockpage.has_other_pages = False
mockpage.paginator.count = 2
mockpage.paginator.page_range = [1]
mockpaginator.return_value.count = 2
mockpaginator.return_value.page_range = [1]
mockqs_paginate.return_value = (mockpaginator.return_value, mockpage, results, True)
# query with search terms
response = self.client.get(search_url, {'keyword': 'yellowbacks'})
mocksolr.query.filter.assert_called_with(content_model=Volume.VOLUME_CMODEL_PATTERN)
# because of creator/title search boosting, actual query is a little difficult to test
mocksolr.Q.assert_any_call('yellowbacks')
mocksolr.Q.assert_any_call(creator='yellowbacks')
mocksolr.Q.assert_any_call(title='yellowbacks')
# not sure how to test query on Q|Q**3|Q**3
mocksolr.query.field_limit.assert_called_with(SolrVolume.necessary_fields,
score=True)
# check that unapi / zotero harvest is enabled
self.assertContains(response,
'<link rel="unapi-server" type="application/xml" title="unAPI" href="%s" />' % \
reverse('books:unapi'),
html=True,
msg_prefix='link to unAPI server URL should be specified in header')
# check that items are displayed
for item in solr_result:
self.assertContains(response, item['title'],
msg_prefix='title should be displayed')
self.assertContains(response, unquote(reverse('books:pdf', kwargs={'pid': item['pid']})),
msg_prefix='link to pdf should be included in response')
self.assertContains(response,
'<abbr class="unapi-id" title="%s"></abbr>' % item['pid'],
msg_prefix='unapi item id for %s should be included to allow zotero harvest' \
% item['pid'])
# pdf size
self.assertContains(response, filesizeformat(item['pdf_size']),
msg_prefix='PDF size should be displayed in human-readable format')
# check that collection facets are displayed / linked
for coll, count in results.facet_counts.facet_fields['collection_label_facet']:
self.assertContains(response, coll,
msg_prefix='collection facet label should be displayed on search results page')
# not a very definitive test, but at least check the number is displayed
self.assertContains(response, count,
msg_prefix='collection facet count should be displayed on search results page')
self.assertContains(response,
'?keyword=yellowbacks&collection=%s' % coll.replace(' ', '%20'),
msg_prefix='response should include link to search filtered by collection facet')
# multiple terms and phrase
response = self.client.get(search_url, {'keyword': 'yellowbacks "lecoq the detective" mystery'})
for term in ['yellowbacks', 'lecoq the detective', 'mystery']:
mocksolr.Q.assert_any_call(term)
# filtered by collection
response = self.client.get(search_url, {'keyword': 'lecoq', 'collection': 'Yellowbacks'})
mocksolr.query.query.assert_any_call(collection_label='"%s"' % 'Yellowbacks')
## annotation totals
# empty annotation total in context for anonymous user
self.assertEqual({}, response.context['annotated_volumes'])
# check that annotation total is retrieved for ONLY logged in users
with patch('readux.books.views.Volume') as mockvolclass:
response = self.client.get(search_url, {'keyword': 'lecoq', 'collection': 'Yellowbacks'})
mockvolclass.volume_annotation_count.assert_not_called()
User = get_user_model()
testuser = User.objects.get(username=self.user_credentials['user']['username'])
self.client.login(**self.user_credentials['user'])
response = self.client.get(search_url, {'keyword': 'lecoq', 'collection': 'Yellowbacks'})
mockvolclass.volume_annotation_count.assert_called_with(testuser)
@patch('readux.books.views.VolumeText.repository_class') #TypeInferringRepository')
def test_text(self, mockrepo_class):
mockobj = Mock()
mockobj.pid = 'vol:1'
mockobj.label = 'ocm30452349_1908'
# has to return a datetime (and not a mock) for last-modified conditional
mockobj.getDatastreamObject.return_value.created = datetime.now()
mockrepo = mockrepo_class.return_value
mockrepo.get_object.return_value = mockobj
# get datastream called by etag/last-modified methods
mockrepo.api.getDatastream.return_value.content = self.xml_profile
mockrepo.api.getDatastream.return_value.url = 'http://fedora.co/objects/ds'
mockobj.get_fulltext.return_value = 'sample text content'
# to support for last modified conditional
mockobj.ocr.created = datetime.now()
text_url = reverse('books:text', kwargs={'pid': mockobj.pid})
response = self.client.get(text_url)
self.assertEqual(mockobj.get_fulltext.return_value, response.content,
'volume full text should be returned as response content')
self.assertEqual(response['Content-Type'], "text/plain")
self.assertEqual(response['Content-Disposition'],
'filename="%s.txt"' % mockobj.label)
# various 404 conditions
# - no ocr
mockobj.fulltext_available = False
response = self.client.get(text_url)
self.assertEqual(404, response.status_code,
'text view should 404 if fultext is not available')
# - not a volume
mockobj.has_requisite_content_models = False
mockobj.ocr.exists = True
response = self.client.get(text_url)
self.assertEqual(404, response.status_code,
'text view should 404 if object is not a Volume')
# - object doesn't exist
mockobj.exists = False
mockobj.has_requisite_content_models = True
response = self.client.get(text_url)
self.assertEqual(404, response.status_code,
'text view should 404 if object does not exist')
@patch('readux.books.views.Repository')
def test_unapi(self, mockrepo):
unapi_url = reverse('books:unapi')
# no params - should list available formats
response = self.client.get(unapi_url)
self.assertEqual('application/xml', response['content-type'],
'response should be returned as xml')
self.assertContains(response, '<formats>',
msg_prefix='request with no parameters should return all formats')
# volume formats only for now
formats = Volume.unapi_formats
for fmt_name, fmt_info in formats.iteritems():
self.assertContains(response, '<format name="%s" type="%s"' \
% (fmt_name, fmt_info['type']),
msg_prefix='formats should include %s' % fmt_name)
mockobj = Mock()
mockobj.pid = 'vol:1'
mockobj.label = 'ocm30452349_1908'
mockobj.unapi_formats = Volume.unapi_formats
# actual rdf dc logic tested elsewhere
mockobj.rdf_dc.return_value = 'sample bogus rdf for testing purposes'
mockrepo.return_value.get_object.return_value = mockobj
# request with id but no format
response = self.client.get(unapi_url, {'id': mockobj.pid})
self.assertEqual('application/xml', response['content-type'],
'response should be returned as xml')
self.assertContains(response, '<formats id="%s">' % mockobj.pid,
msg_prefix='request with id specified should return formats for that id')
# volume formats only for now
for fmt_name, fmt_info in formats.iteritems():
self.assertContains(response, '<format name="%s" type="%s"' \
% (fmt_name, fmt_info['type']),
msg_prefix='formats should include %s' % fmt_name)
# request with id and format
response = self.client.get(unapi_url, {'id': mockobj.pid, 'format': 'rdf_dc'})
self.assertEqual(formats['rdf_dc']['type'], response['content-type'],
'response content-type should be set based on requested format')
self.assertEqual(mockobj.rdf_dc.return_value, response.content,
'response content should be set based on result of method corresponding to requested format')
@patch('readux.books.views.Repository')
def test_volume(self, mockrepo):
mockobj = NonCallableMock()
mockobj.pid = 'vol:1'
mockobj.title = 'Lecoq, the detective'
mockobj.volume = 'V.1'
mockobj.date = ['1801']
mockobj.creator = ['<NAME>']
mockobj.book.dc.content.description_list = [
'Translation of: Monsieur Lecoq.',
'Victorian yellowbacks + paperbacks, 1849-1905'
]
mockobj.book.dc.content.publisher = 'London : Vizetelly'
mockobj.book.volume_set = [mockobj, NonCallableMock(pid='vol:2')]
mockobj.pdf_size = 1024
mockobj.has_pages = False
mockobj.is_a_volume = True
mockrepo.return_value.get_object.return_value = mockobj
# to support for last modified conditional
mockobj.ocr.created = datetime.now()
vol_url = reverse('books:volume', kwargs={'pid': mockobj.pid})
response = self.client.get(vol_url)
self.assertContains(response, mockobj.title,
msg_prefix='response should include title')
self.assertContains(response, mockobj.volume,
msg_prefix='response should include volume label')
self.assertContains(response, mockobj.date[0],
msg_prefix='response should include date')
self.assertContains(response, mockobj.creator[0],
msg_prefix='response should include creator')
for desc in mockobj.book.dc.content.description_list:
self.assertContains(response, desc,
msg_prefix='response should include dc:description')
self.assertContains(response, mockobj.book.dc.content.publisher,
msg_prefix='response should include publisher')
self.assertContains(response, reverse('books:pdf', kwargs={'pid': mockobj.pid}),
msg_prefix='response should include link to pdf')
# related volumes
self.assertContains(response, 'Related volumes',
msg_prefix='response should include related volumes when present')
self.assertContains(response,
reverse('books:volume', kwargs={'pid': mockobj.book.volume_set[0].pid}),
msg_prefix='response should link to related volumes')
# pdf size
self.assertContains(response, filesizeformat(mockobj.pdf_size),
msg_prefix='PDF size should be displayed in human-readable format')
# no pages loaded, should not include volume search or read online
self.assertNotContains(response, 'Read online',
msg_prefix='volume without pages loaded should not display read online option')
# NOTE: href needed to differentiate from cover url, which starts the same
self.assertNotContains(response, 'href="%s"' % reverse('books:pages', kwargs={'pid': mockobj.pid}),
msg_prefix='volume without pages loaded should not have link to read online')
self.assertNotContains(response, '<form id="volume-search" ',
msg_prefix='volume without pages loaded should not have volume search')
# annotation total passed to context
self.assert_('annotated_volumes' not in response.context,
'annotation count should not be set for volumes without pages')
# simulate volume with pages loaded
mockobj.has_pages = True
# to test annotation count
mockobj.get_absolute_url.return_value = '/books/vol:1/'
mockobj.annotation_count.return_value = 5
response = self.client.get(vol_url)
# *should* include volume search and read online
self.assertContains(response, 'Read online',
msg_prefix='volume with pages loaded should display read online option')
self.assertContains(response, reverse('books:pages', kwargs={'pid': mockobj.pid}),
msg_prefix='volume with pages loaded should have link to read online')
self.assertContains(response, '<form id="volume-search" ',
msg_prefix='volume without pages loaded should have volume search')
# annotation total passed to context
self.assertEqual({mockobj.get_absolute_url(): 5},
response.context['annotated_volumes'],
'annotation count should be set for volumes with pages')
mockobj.annotation_count.return_value = 0
response = self.client.get(vol_url)
self.assert_('annotated_volumes' not in response.context,
'annotation count should not be set in context when it is zero')
# non-existent should 404
mockobj.exists = False
response = self.client.get(vol_url)
expected, got = 404, response.status_code
self.assertEqual(expected, got,
'expected %s for %s when object does not exist, got %s' % \
(expected, vol_url, got))
# exists but isn't a volume - should also 404
mockobj.exists = True
mockobj.is_a_volume = False
response = self.client.get(vol_url)
expected, got = 404, response.status_code
self.assertEqual(expected, got,
'expected %s for %s when object is not a volume, got %s' % \
(expected, vol_url, got))
@patch('readux.books.views.Repository')
@patch('readux.books.views.Paginator', spec=Paginator)
@patch('readux.books.views.solr_interface')
def test_volume_page_search(self, mocksolr_interface, mockpaginator, mockrepo):
mockobj = NonCallableMock()
mockobj.pid = 'vol:1'
mockobj.title = 'Lecoq, the detective'
mockobj.date = ['1801']
mockrepo.return_value.get_object.return_value = mockobj
mocksolr = mocksolr_interface.return_value
# simulate sunburnt's fluid interface
mocksolr.query.return_value = mocksolr.query
for method in ['query', 'facet_by', 'sort_by', 'field_limit', 'highlight',
'exclude', 'filter', 'join', 'paginate', 'results_as']:
getattr(mocksolr.query, method).return_value = mocksolr.query
# set up mock results for collection query and facet counts
solr_result = NonCallableMagicMock(spec_set=['__iter__', 'facet_counts'])
# *only* mock iter, to avoid weirdness with django templates & callables
solr_results = [
SolrPage(**{'pid': 'page:1', 'page_order': '1', 'score': 0.5,
'solr_highlights': {'page_text': ['snippet with search term']},
'identifier': ['http://testpid.co/ark:/1234/11/']}),
SolrPage(**{'pid': 'page:233', 'page_order': '123', 'score': 0.02,
'solr_highlights': {'page_text': ['sample text result from content']},
'identifier': ['http://testpid.co/ark:/1234/22/']}),
]
solr_result.__iter__.return_value = solr_results
mocksolr.query.__iter__.return_value = iter(solr_result)
mocksolr.count.return_value = 2
mockpage = NonCallableMock()
mockpaginator.return_value.page.return_value = mockpage
results = NonCallableMagicMock(spec=['__iter__', 'facet_counts', 'highlighting'])
results.__iter__.return_value = iter(solr_result)
mockpage.object_list = results
mockpage.has_other_pages = False
mockpage.paginator.count = 2
mockpage.paginator.page_range = [1]
# patch in highlighting - apparent change in sunburnt behavior
results.highlighting = {
'page:1': {'page_text': ['snippet with search term']},
'page:233': {'page_text': ['sample text result from content']}
}
vol_url = reverse('books:volume', kwargs={'pid': mockobj.pid})
response = self.client.get(vol_url, {'keyword': 'determine'})
self.assertEqual(response.templates[0].name,
views.VolumeDetail.search_template_name,
'volume search template should be used for valid search submission')
for page in iter(solr_result):
self.assertContains(response,
reverse('books:page-image', kwargs={'vol_pid': mockobj.pid,
'pid': page.pid, 'mode': 'mini-thumbnail'}),
msg_prefix='search results should include mini page thumbnail url')
self.assertContains(response, "Page %(page_order)s" % page,
msg_prefix='search results should include page number')
self.assertContains(response, page['score'],
msg_prefix='search results should display page relevance score')
self.assertContains(response, reverse('books:page',
kwargs={'vol_pid': mockobj.pid, 'pid': page['pid']}),
msg_prefix='search results should link to full page view')
self.assertContains(response, '... %s ...' % page['solr_highlights']['page_text'][0],
msg_prefix='solr snippets should display when available')
# ajax request
with patch('readux.books.views.VolumeDetail.get_context_data') as mock_ctx:
results = NonCallableMagicMock(spec=['__iter__', 'facet_counts', 'highlighting'])
results.__iter__.return_value = iter(solr_result)
results.highlighting = {
solr_results[0].pid: {
'page_text': 'sample highlighting snippet'
},
solr_results[1].pid: {
'page_text': 'another highlighting snippet'
}
}
mockpage = NonCallableMagicMock(spec=['__iter__'])
mockpage.object_list = results
mock_ctx.return_value = {
'pages': mockpage,
}
response = self.client.get(vol_url, {'keyword': 'determine'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual('application/json', response['content-type'])
data = json.loads(response.content)
for idx in range(len(data)):
self.assertEqual(solr_results[idx].pid, data[idx]['pid'])
self.assertEqual('p. %s' % solr_results[idx]['page_order'],
data[idx]['label'])
self.assertEqual(reverse('books:page-image', kwargs={'vol_pid': mockobj.pid,
'pid': solr_results[idx].pid, 'mode': 'mini-thumbnail'}),
data[idx]['thumbnail'])
self.assertEqual(results.highlighting[solr_results[idx].pid]['page_text'],
data[idx]['highlights'])
@patch('readux.books.views.Repository')
@patch('readux.books.views.Paginator', spec=Paginator)
# @patch('readux.books.views.solr_interface')
# def test_volume_pages(self, mocksolr_interface, mockpaginator, mockrepo):
def test_volume_pages(self, mockpaginator, mockrepo):
mockvol = NonCallableMock(spec=Volume)
mockvol.pid = 'vol:1'
mockvol.title = 'Lecoq, the detective'
mockvol.date = ['1801']
# second object retrieved from fedora is page, for layout
mockvol.width = 150
mockvol.height = 200
# volume url needed to identify annotations for pages in this volume
mockvol.get_absolute_url.return_value = reverse('books:volume',
kwargs={'pid': mockvol.pid})
mockrepo.return_value.get_object.return_value = mockvol
mockvol.find_solr_pages = MagicMock()
mockvol.find_solr_pages.return_value.count = 3
mockvol.find_solr_pages.__len__.return_value = 3
mockpage = Mock(width=640, height=400)
mockvol.pages = [mockpage]
vol_page_url = reverse('books:pages', kwargs={'pid': mockvol.pid})
response = self.client.get(vol_page_url)
# volume method should be used to find pages
self.assert_(call() in mockvol.find_solr_pages.call_args_list)
# volume should be set in context
self.assert_(mockvol, response.context['vol'])
# annotated pages should be empty for anonymous user
self.assertEqual({}, response.context['annotated_pages'])
# log in as a regular user
self.client.login(**self.user_credentials['user'])
testuser = get_user_model().objects.get(username=self.user_credentials['user']['username'])
page1_url = reverse('books:page', kwargs={'vol_pid': mockvol.pid, 'pid': 'page:1'})
page2_url = reverse('books:page', kwargs={'vol_pid': mockvol.pid, 'pid': 'page:2'})
page3_url = reverse('books:page', kwargs={'vol_pid': mockvol.pid, 'pid': 'page:3'})
mockvol.page_annotation_count.return_value = {
absolutize_url(page1_url): 5,
absolutize_url(page2_url): 2,
page3_url: 13
}
response = self.client.get(vol_page_url)
mockvol.page_annotation_count.assert_called_with(testuser)
annotated_pages = response.context['annotated_pages']
# counts should be preserved; urls should be non-absolute
# whether they started that way or not
self.assertEqual(5, annotated_pages[absolutize_url(page1_url)])
self.assertEqual(2, annotated_pages[absolutize_url(page2_url)])
self.assertEqual(13, annotated_pages[page3_url])
@patch('readux.books.views.TypeInferringRepository')
def test_view_page(self, mockrepo):
mockobj = Mock()
mockobj.pid = 'page:1'
mockobj.volume.pid = 'vol:1'
mockrepo.return_value.get_object.return_value = mockobj
url = reverse('books:page',
kwargs={'vol_pid': mockobj.volume.pid, 'pid': mockobj.pid})
# doesn't exist
mockobj.exists = False
response = self.client.get(url)
self.assertEqual(404, response.status_code,
'page view should 404 when object doesn\'t exist')
# exists but not a page object
mockobj.exists = True
response = self.client.get(url)
self.assertEqual(404, response.status_code,
'page view should 404 when object isn\'t a Page object')
# page object
mockobj = NonCallableMagicMock(spec=Page)
mockobj.pid = 'page:5'
mockobj.page_order = 5
mockobj.display_label = 'Page 5'
mockobj.volume.pid = 'vol:1'
# first test without tei
mockobj.tei = NonCallableMock() # non-magic mock, to simplify template logic
mockobj.tei.exists = False
# uses solr to find adjacent pages
solr_result = NonCallableMagicMock(spec_set=['__iter__'])
# *only* mock iter, to avoid weirdness with django templates & callables
nearby_pages = [
{'pid': 'page:4', 'page_order': '4'},
{'pid': 'page:5', 'page_order': '5'},
{'pid': 'page:5', 'page_order': '6'},
]
solr_result.__iter__.return_value = nearby_pages
mocksolr_query = MagicMock()
mocksolr_query.__iter__.return_value = iter(solr_result)
mocksolr_query.__len__.return_value = 3
# cheating here, since we know what index should be requested...
mocksolr_query.__getitem__.return_value = nearby_pages[2]
mocksolr_query.query.return_value = mocksolr_query
mockobj.volume.find_solr_pages.return_value = mocksolr_query
mockrepo.return_value.get_object.return_value = mockobj
response = self.client.get(url)
# test expected context variables
self.assertEqual(mockobj, response.context['page'],
'page object should be set in context')
self.assertEqual(nearby_pages[0], response.context['prev'],
'previous page should be selected from solr result and set in context')
self.assertEqual(nearby_pages[2], response.context['next'],
'next page should be selected from solr result and set in context')
self.assertEqual(1, response.context['page_chunk'],
'chunk of paginated pages should be calculated and set in context')
self.assertNotContains(response,
reverse('books:page-tei',
kwargs={'vol_pid': mockobj.volume.pid, 'pid': mockobj.pid}),
msg_prefix='page without tei should NOT link to tei in header')
# TODO:
# - test metadata in header (twitter/og fields)
# - test page image, deep zoom content, title display, etc
# test with tei available
mockobj.tei.exists = True
mockobj.tei.content.page.width = 2000
mockobj.tei.content.page.height = 1500
# for now, simulate no ocr content
mockobj.tei.content.lines = []
response = self.client.get(url)
# scale from original page size (long edge) to display size (1000)
self.assertEqual(0.5, response.context['scale'],
'page scale should be calculated and set in context')
# TODO: test tei text content display?
# FIXME: for some reason, the mocks are not being processed
# correctly and even though the view can access the volume pid,
# the template has this:
# TypeInferringRepository().get_object().volume.__getitem__()'%20id='4568359696'%3E/pages/page:5/tei/" />
# Test is disabled until this issue can be fixed.
# self.assertContains(response,
# '<link rel="alternate" type="text/xml" href="%s" />' % \
# reverse('books:page-tei',
# kwargs={'vol_pid': mockobj.volume.pid, 'pid': mockobj.pid}),
# html=True,
# msg_prefix='page with tei should link to tei in header')
@patch('readux.books.views.PageTei.repository_class')
def test_page_tei(self, mockrepo_class):
mockobj = Mock()
mockobj.exists = True
mockobj.pid = 'page:1'
mockobj.volume.pid = 'vol:1'
mockds = mockobj.getDatastreamObject.return_value
mockds.exists = True
mockds.created = datetime.now()
mockds.info.size = 100
mockrepo = mockrepo_class.return_value
mockrepo = mockrepo_class.return_value
mockrepo.get_object.return_value = mockobj
# get datastream called by etag/last-modified methods
mockrepo.api.getDatastream.return_value.content = self.xml_profile
mockrepo.api.getDatastream.return_value.url = 'http://fedora.co/objects/ds'
# required so raw_ds view can update with local headers
mockrepo.api.getDatastreamDissemination.return_value.headers = {}
url = reverse('books:page-tei',
kwargs={'vol_pid': mockobj.volume.pid, 'pid': mockobj.pid})
response = self.client.get(url)
# class-based view, can no longer test parameters to raw_datastream
# only custom logic is the header, and configuration
self.assertEqual('filename="%s_tei.xml"' % mockobj.pid.replace(':', '-'),
response['content-disposition'],
'tei response should have a content-disposition header set')
mockrepo.api.getDatastreamDissemination.assert_called_with(mockobj.pid,
Page.tei.id, asOfDateTime=None, rqst_headers={}, stream=True)
@patch('readux.books.views.TypeInferringRepository')
def test_page_redirect(self, mockrepo):
mockobj = Mock()
mockobj.pid = 'page:1'
mockobj.volume.pid = 'vol:1'
mockrepo.return_value.get_object.return_value = mockobj
url = reverse('books:old-pageurl-redirect',
kwargs={'pid': mockobj.pid, 'path': ''})
# doesn't exist
mockobj.exists = False
response = self.client.get(url)
self.assertEqual(404, response.status_code,
'page redirect view should 404 when object doesn\'t exist')
# exists but not a page object
mockobj.exists = True
response = self.client.get(url)
self.assertEqual(404, response.status_code,
'page redirect view should 404 when object isn\'t a Page object')
# page object
mockobj = Mock(spec=Page)
mockobj.pid = 'page:5'
mockobj.volume.pid = 'vol:1'
mockobj.exists = True
mockrepo.return_value.get_object.return_value = mockobj
response = self.client.get(url, follow=False)
url_args = {
'kwargs': {
'vol_pid': mockobj.volume.pid,
'pid': mockobj.pid
}
}
self.assertEqual(301, response.status_code,
'page redirect view should return a permanent redirect')
self.assertEqual('http://testserver%s' % \
reverse('books:page', **url_args),
response['location'])
# test a couple of sub page urls
url = reverse('books:old-pageurl-redirect',
kwargs={'pid': mockobj.pid, 'path': 'tei/'})
response = self.client.get(url, follow=False)
self.assertEqual(301, response.status_code,
'page redirect view should return a permanent redirect')
self.assertEqual('http://testserver%s' % \
reverse('books:page-tei', **url_args),
response['location'])
url = reverse('books:old-pageurl-redirect',
kwargs={'pid': mockobj.pid, 'path': 'ocr/'})
response = self.client.get(url, follow=False)
self.assertEqual(301, response.status_code,
'page redirect view should return a permanent redirect')
self.assertEqual('http://testserver%s' % \
reverse('books:page-ocr', **url_args),
response['location'])
@patch('readux.books.sitemaps.solr_interface')
def test_sitemaps(self, mocksolr_interface):
# minimal test, just to check that sitemaps render without error
response = self.client.get(reverse('sitemap-index'))
self.assertContains(response, 'sitemapindex')
response = self.client.get(reverse('sitemap', kwargs={'section': 'volumes'}))
self.assertContains(response, '<urlset')
@patch('readux.books.views.Repository')
def test_volume_export(self, mockrepo):
mockobj = NonCallableMock()
mockobj.pid = 'vol:1'
mockobj.title = 'Lecoq, the detective'
mockobj.volume = 'V.1'
mockobj.date = ['1801']
mockrepo.return_value.get_object.return_value = mockobj
# to support for last modified conditional
mockobj.ocr.created = datetime.now()
# anonymous
export_url = reverse('books:webexport', kwargs={'pid': mockobj.pid})
response = self.client.get(export_url)
self.assertContains(response,
'''<div class="alert alert-warning">Export functionality is only available
to logged in users.</div>''',
msg_prefix='Anonymous user should see warning when viewing export page',
html=True)
# log in as a regular user
self.client.login(**self.user_credentials['user'])
response = self.client.get(export_url)
self.assert_('export_form' in response.context,
'export form should be set in response context for logged in user')
self.assertContains(response, 'Export to GitHub requires a GitHub account.',
msg_prefix='user should see a warning about github account')
## tests for view helpers
class ViewHelpersTest(TestCase):
@patch('readux.books.view_helpers.Repository')
@patch('readux.books.view_helpers.solr_interface')
def test_volume_pages_modified(self, mocksolr_interface, mockrepo):
mockvol = Mock(pid='vol:1')
mockrepo.return_value.get_object.return_value = mockvol
mockrequest = Mock()
mockrequest.user.is_authenticated.return_value = False
# no solr results
mockresult = MagicMock()
mocksolr_interface.return_value.query.return_value.sort_by.return_value.field_limit.return_value = mockresult
mockresult.count.return_value = 0
lastmod = view_helpers.volume_pages_modified(mockrequest, 'vol:1')
self.assertEqual(None, lastmod)
# only solr result
mockresult.count.return_value = 1
yesterday = datetime.now() - timedelta(days=1)
mockresult.__getitem__.return_value = {'timestamp': yesterday}
lastmod = view_helpers.volume_pages_modified(mockrequest, 'vol:1')
self.assertEqual(yesterday, lastmod)
# test with both solr and annotations for logged in user
mockvol.get_absolute_url.return_value = reverse('books:volume', kwargs={'pid': mockvol.pid})
mockrequest.user.is_authenticated.return_value = True
mockrequest.user.username = 'tester'
testuser = get_user_model()(username='tester')
testuser.save()
anno = Annotation.objects.create(user=testuser,
uri=reverse('books:page', kwargs={'vol_pid': mockvol.pid,
'pid': 'page:3'}), extra_data=json.dumps({}))
mockvol.annotations.return_value = Annotation.objects.filter(uri__contains=mockvol.get_absolute_url())
lastmod = view_helpers.volume_pages_modified(mockrequest, 'vol:1')
self.assertEqual(anno.created, lastmod)
class SitemapTestCase(TestCase):
@patch('readux.books.sitemaps.solr_interface')
def test_volume_sitemap(self, mocksolr_interface):
vol_sitemap = sitemaps.VolumeSitemap()
mocksolr = mocksolr_interface.return_value
# check for expected solr query
vol_sitemap.items()
mocksolr.query.assert_called_with(content_model=Volume.VOLUME_CMODEL_PATTERN)
mocksolr.query.return_value.field_limit.assert_called_with(['pid', 'last_modified'])
@patch('readux.books.sitemaps.solr_interface')
def test_volume_page_sitemap(self, mocksolr_interface):
volpage_sitemap = sitemaps.VolumePageSitemap()
mocksolr = mocksolr_interface.return_value
# check for expected solr query
volpage_sitemap.items()
mocksolr.query.assert_called_with(content_model=Page.PAGE_CMODEL_PATTERN)
mocksolr.query.return_value.field_limit.assert_called_with(['pid', 'last_modified',
'isConstituentOf'])
class BookSearchTest(TestCase):
def test_search_terms(self):
form = forms.BookSearch({'keyword': 'term "exact phrase" term2'})
self.assertTrue(form.is_valid())
terms = form.search_terms()
self.assert_('term' in terms)
self.assert_('term2' in terms)
self.assert_('exact phrase' in terms)
# test searching on page ark
ark = 'http://testpid.library.emory.edu/ark:/25593/pwtbb'
form = forms.BookSearch({'keyword': ark})
self.assertTrue(form.is_valid())
terms = form.search_terms()
self.assertEqual([ark], terms)
```
#### File: readux/books/view_helpers.py
```python
import datetime
from django.conf import settings
from django.utils import timezone
import os
from eulfedora.views import datastream_etag
from eulfedora.server import Repository
from eulfedora.util import RequestFailed
from readux.annotations.models import Annotation
from readux.books.models import Volume, VolumeV1_0, Page, PageV1_0
from readux.utils import solr_interface, md5sum
'''
Conditional methods for calculating last modified time and ETags
for view methods in :mod:`readux.books.views`.
.. Note::
In many cases, the Solr indexing timestamp is used rather than the object
modification time, as this may account for changes to the site or indexing
(including adding pages to a volume that is otherwise unchanged).
'''
def volumes_modified(request, *args, **kwargs):
'last modification time for all volumes'
solr = solr_interface()
results = solr.query(content_model=VolumeV1_0.VOLUME_CONTENT_MODEL) \
.sort_by('-timestamp').field_limit('timestamp')
# NOTE: using solr indexing timestamp instead of object last modified, since
# if an object's index has changed it may have been modified
# if user is logged in, changes in annotation totals result
# in volume page display modifications
latest_note = None
if request.user.is_authenticated():
latest_note = Annotation.objects.visible_to(request.user) \
.last_created_time()
solrtime = results[0]['timestamp'] if results.count() else None
return solrtimestamp_or_datetime(solrtime, latest_note)
def volume_modified(request, pid):
'last modification time for a single volume'
solr = solr_interface()
results = solr.query(content_model=VolumeV1_0.VOLUME_CONTENT_MODEL,
pid=pid) \
.sort_by('-timestamp').field_limit('timestamp')
# NOTE: using solr indexing timestamp instead of object last modified, since
# if an object's index has changed it may have been modified,
# and index timestamp for a volume will be updated when pages are added
# if a user is logged in, page should show as modified
# when annotation count changes
latest_note = None
if request.user.is_authenticated():
# NOTE: shouldn't be very expensive to init volume here; not actually
# making any api calls, just using volume to get volume
# uri and associated annotations
repo = Repository()
vol = repo.get_object(pid, type=Volume)
# newest annotation creation for pages in this volume
latest_note = vol.annotations().visible_to(request.user) \
.last_created_time()
solrtime = results[0]['timestamp'] if results.count() else None
return solrtimestamp_or_datetime(solrtime, latest_note)
def volume_pages_modified(request, pid):
'''Last modification time for a single volume or its pages, or for
any annotations of those pages.'''
solr = solr_interface()
repo = Repository()
vol = repo.get_object(pid, type=Volume)
# NOTE: some overlap with Volume find_solr_pages method...
results = solr.query((solr.Q(content_model=Volume.VOLUME_CMODEL_PATTERN) & solr.Q(pid=pid)) | \
(solr.Q(content_model=Page.PAGE_CMODEL_PATTERN) & solr.Q(isConstituentOf=vol.uri))) \
.sort_by('-timestamp').field_limit('timestamp')
# NOTE: using solr indexing timestamp instead of object last modified, since
# if an object's index has changed it may have been modified,
# and index timestamp for a volume will be updated when pages are added
# Page could also be modified based on annotations of the pages.
# We only show total counts per page, so might not be modified if the
# total number has not changed, but simplest just to get last modification
# date in case of changes.
# Note that this does NOT account for annotation deletions.
# if a user is logged in, page should show as modified
# based on annotations
# Only displaying annotation *count* so creation time should
# be sufficient. (Does not take into account deletions...)
latest_note = None
if request.user.is_authenticated():
# get annotations for pages in this volume
try:
latest_note = vol.annotations().visible_to(request.user) \
.last_created_time()
except Annotation.DoesNotExist:
# no notes for this volume
pass
solrtime = results[0]['timestamp'] if results.count() else None
return solrtimestamp_or_datetime(solrtime, latest_note)
def page_modified(request, vol_pid, pid):
'last modification time for a single page'
solr = solr_interface()
# TODO: use volume pid in query
results = solr.query(content_model=PageV1_0.PAGE_CONTENT_MODEL,
pid=pid) \
.sort_by('-timestamp').field_limit('timestamp')
# if user is logged in, page should show as modified
# when annotations have changed
latest_note = None
if request.user.is_authenticated():
# last update for annotations on this volume, if any
repo = Repository()
page = repo.get_object(pid, type=Page)
latest_note = page.annotations().visible_to(request.user) \
.last_updated_time()
solrtime = results[0]['timestamp'] if results.count() else None
return solrtimestamp_or_datetime(solrtime, latest_note)
def solrtimestamp_or_datetime(solrtime, othertime):
# Compare and return the more recent of a solr timestamp or an
# annotation datetime.
# convert solr timestamp to timezone-aware for comparison;
# return the most recent of the two
# FIXME: assuming solr stores as UTC, confirm this
if solrtime is not None and othertime is not None:
solrtime = timezone.make_aware(solrtime, timezone.utc)
return max(solrtime, othertime)
# if both are not set, return solr time if present
if solrtime is not None:
return solrtime
# if nothing has been returned, return other time (could be None)
return othertime
books_models_filename = os.path.join(settings.BASE_DIR, 'readux', 'books', 'models.py')
books_models_modified = datetime.datetime.fromtimestamp(os.path.getmtime(books_models_filename))
books_models_md5sum = md5sum(books_models_filename)
def unapi_modified(request):
'last-modification time for unapi; format list or metadata for a single item'
item_id = request.GET.get('id', None)
# if no id, just lists available formats
if item_id is None:
# configuration is based on Volume class definition, so should only
# change if the file has changed
return books_models_modified
# metadata for a specific record
else:
return volume_modified(request, item_id)
def unapi_etag(request):
'etag for unapi'
item_id = request.GET.get('id', None)
# if no id, just lists available formats
if item_id is None:
# configuration is based on Volume class definition, so should only
# change if the file has changed
return books_models_md5sum
# metadata for a specific record
else:
fmt = request.GET.get('format', None)
if fmt == 'rdf_dc':
return datastream_etag(request, item_id, Volume.dc.id, type=Volume)
def datastream_lastmodified(request, pid, dsid, type):
repo = Repository()
try:
obj = repo.get_object(pid, type=type)
ds = obj.getDatastreamObject(dsid)
if ds and ds.exists:
return ds.created
except RequestFailed:
pass
def pdf_etag(request, pid):
'etag for Volume PDF datastream'
return datastream_etag(request, pid, Volume.pdf.id)
def pdf_lastmodified(request, pid):
'last modified for Volume PDF datastream'
return datastream_lastmodified(request, pid, Volume.pdf.id, Volume)
def ocr_etag(request, pid):
'etag for Volume OCR datastream'
return datastream_etag(request, pid, VolumeV1_0.ocr.id)
def ocr_lastmodified(request, pid):
'last modified for Volume OCR datastream'
return datastream_lastmodified(request, pid, VolumeV1_0.ocr.id, Volume)
# TODO: consider full text etag/lastmodified methods that would work
# for both volume v1.0 and v1.1; if v1.0, simple returns ocr methods
# above; otherwise, no etag is available but last-modified could be pulled
# from most recent solr indexed page.
# (If this requires additional fedora api calls to determine type,
# may be too costly.)
def page_image_etag(request, pid, **kwargs):
'etag for Page image datastream'
return datastream_etag(request, pid, Page.image.id, type=Page)
def page_image_lastmodified(request, pid, **kwargs):
'last modified for Page image datastream'
return datastream_lastmodified(request, pid, Page.image.id, type=Page)
```
#### File: readux/books/views.py
```python
from channels import Channel
from django.conf import settings
from django.core.paginator import Paginator, EmptyPage, InvalidPage
from django.core.urlresolvers import reverse
from wsgiref.util import FileWrapper
from django.contrib.sites.shortcuts import get_current_site
from django.http import Http404, HttpResponse, HttpResponseNotFound, \
HttpResponsePermanentRedirect, StreamingHttpResponse, HttpResponseBadRequest, \
JsonResponse
from django.shortcuts import render
from django.utils.decorators import method_decorator
from django.utils.text import slugify
from django.views.decorators.http import condition, require_http_methods, \
last_modified
from django.views.decorators.vary import vary_on_cookie, vary_on_headers
from django.views.generic import ListView, DetailView, View
from django.views.generic.edit import FormMixin, ProcessFormView
from django.views.generic.base import RedirectView
from eulcommon.djangoextras.auth import login_required_with_ajax
import json
from urllib import urlencode
import os
import re
import requests
import logging
from eulfedora.server import Repository, TypeInferringRepository
from eulfedora.util import RequestFailed
from eulfedora.views import raw_datastream, RawDatastreamView
from readux.annotations.models import AnnotationGroup
from readux.books.models import Volume, SolrVolume, Page, VolumeV1_0, \
PageV1_1, SolrPage
from readux.books.forms import BookSearch, VolumeExport
from readux.books import view_helpers, annotate, export, github
from readux.utils import solr_interface, absolutize_url
from readux.views import VaryOnCookieMixin
logger = logging.getLogger(__name__)
class VolumeSearch(ListView):
'''Search across all volumes.'''
model = Volume
template_name = 'books/volume_search.html'
paginate_by = 10
context_object_name = 'items'
display_mode = 'list'
display_filters = []
sort_options = ['relevance', 'title', 'date added']
@method_decorator(last_modified(view_helpers.volumes_modified))
def dispatch(self, *args, **kwargs):
return super(VolumeSearch, self).dispatch(*args, **kwargs)
def get_queryset(self):
self.form = BookSearch(self.request.GET)
# sort: currently supports relevance, title, or date added
self.sort = self.request.GET.get('sort', None)
if self.form.is_valid():
# get list of keywords and phrases
terms = self.form.search_terms()
solr = solr_interface()
# generate queries text and boost-field queries
text_query = solr.Q()
author_query = solr.Q()
title_query = solr.Q()
for t in terms:
text_query |= solr.Q(t)
author_query |= solr.Q(creator=t)
title_query |= solr.Q(title=t)
q = solr.query().filter(content_model=Volume.VOLUME_CMODEL_PATTERN) \
.query(text_query | author_query**3 | title_query**3) \
.field_limit(SolrVolume.necessary_fields, score=True) \
.results_as(SolrVolume)
if self.sort not in self.sort_options:
# by default, sort by relevance score
self.sort = 'relevance'
if self.sort == 'relevance':
q = q.sort_by('-score')
elif self.sort == 'title':
# sort by title and then by label so multi-volume works should group
# together in the correct order
q = q.sort_by('title_exact').sort_by('label')
elif self.sort == 'date added':
q = q.sort_by('-created')
url_params = self.request.GET.copy()
# don't need to facet on collection if we are already filtered on collection
if 'collection' not in self.request.GET:
q = q.facet_by('collection_label_facet', sort='index', mincount=1)
self.display_filters = []
if 'collection' in self.request.GET:
filter_val = self.request.GET['collection']
# filter the solr query based on the requested collection
q = q.query(collection_label='"%s"' % filter_val)
# generate link to remove the facet
unfacet_urlopts = url_params.copy()
del unfacet_urlopts['collection']
self.display_filters.append(('collection', filter_val,
unfacet_urlopts.urlencode()))
# active filter - only show volumes with pages loaded
if 'read_online' in self.request.GET and self.request.GET['read_online']:
q = q.query(page_count__gte=2)
unfacet_urlopts = url_params.copy()
del unfacet_urlopts['read_online']
self.display_filters.append(('Read online', '',
unfacet_urlopts.urlencode()))
else:
# generate a facet count for books with pages loaded
q = q.facet_query(page_count__gte=2)
return q
else:
# empty 'queryset' result required by view methods
return []
def get_context_data(self):
context_data = super(VolumeSearch, self).get_context_data()
url_params = self.request.GET.copy()
sort_url_params = self.request.GET.copy()
if 'sort' in sort_url_params:
del sort_url_params['sort']
context_data.update({
'form': self.form,
'url_params': urlencode(url_params),
'mode': self.display_mode, # list / cover view
'current_url_params': urlencode(self.request.GET.copy()),
'sort': self.sort,
'sort_options': self.sort_options,
'sort_url_params': urlencode(sort_url_params),
})
# get facets and annotations IF there are are any search results
if context_data['object_list']:
# adjust facets as returned from solr for display
facet_counts = context_data['object_list'].facet_counts
facets = {}
collections = facet_counts.facet_fields.get('collection_label_facet', [])
# only include collections in facet if there are any
if collections:
facets['collection'] = collections
if facet_counts.facet_queries:
# number of volumes with pages loaded;
# facet query is a list of tuple; second value is the count
pages_loaded = facet_counts.facet_queries[0][1]
# only display if it is a facet, i.e. not all volumes
# in the result set have pages loaded
if pages_loaded < context_data['paginator'].count:
facets['pages_loaded'] = facet_counts.facet_queries[0][1]
# generate list for display and removal of active filters
q = self.get_queryset()
annotated_volumes = {}
if context_data['paginator'].count and self.request.user.is_authenticated():
notes = Volume.volume_annotation_count(self.request.user)
domain = get_current_site(self.request).domain.rstrip('/')
if not domain.startswith('https'):
domain = 'https://' + domain
annotated_volumes = dict([(k.replace(domain, ''), v)
for k, v in notes.iteritems()])
context_data.update({
'facets': facets, # available facets
'filters': self.display_filters, # active filters
'annotated_volumes': annotated_volumes
})
return context_data
class VolumeCoverSearch(VolumeSearch):
display_mode = 'covers'
class VolumeDetail(DetailView, VaryOnCookieMixin):
''' Landing page for a single :class:`~readux.books.models.Volume`.
If keyword search terms are specified, searches within the book and
finds matching pages.
'''
model = Volume
template_name = 'books/volume_detail.html'
search_template_name = 'books/volume_pages_search.html'
context_object_name = 'vol'
@method_decorator(last_modified(view_helpers.volume_modified))
@method_decorator(vary_on_headers('X-Requested-With')) # vary on ajax request
def dispatch(self, *args, **kwargs):
return super(VolumeDetail, self).dispatch(*args, **kwargs)
def get_object(self, queryset=None):
# kwargs are set based on configured url pattern
pid = self.kwargs['pid']
repo = Repository(request=self.request)
vol = repo.get_object(pid, type=Volume)
if not vol.exists or not vol.is_a_volume:
raise Http404
return vol
def get_template_names(self):
# search results require a different template
if self.form.is_valid():
return self.search_template_name
return self.template_name
def get_context_data(self, **kwargs):
context_data = super(VolumeDetail, self).get_context_data()
# sort: currently supports title or date added
self.form = BookSearch(self.request.GET)
context_data['form'] = self.form
# if form is valid, then search within the book and display matching pages
# instead of volume info
if self.form.is_valid():
terms = self.form.search_terms()
solr = solr_interface()
query = solr.Q()
for t in terms:
# NOTE: should this be OR or AND?
query |= solr.Q(page_text=t)
if t.isnumeric():
query |= solr.Q(page_order=t)**2
query |= solr.Q(identifier=t)**3
# search for pages that belong to this book
q = solr.query().filter(content_model=Page.PAGE_CMODEL_PATTERN,
isConstituentOf=self.object.uri) \
.query(query) \
.field_limit(['page_order', 'pid', 'identifier'], score=True) \
.highlight('page_text', snippets=3) \
.sort_by('-score').sort_by('page_order') \
.results_as(SolrPage)
# return highlighted snippets from page text
# sort by relevance and then by page order
# paginate the solr result set
paginator = Paginator(q, 30)
try:
page = int(self.request.GET.get('page', '1'))
except ValueError:
page = 1
try:
results = paginator.page(page)
except (EmptyPage, InvalidPage):
results = paginator.page(paginator.num_pages)
# NOTE: highlight snippets are available at
# results.object_list.highlighting but are *NOT* currently
# getting propagated to solrpage objects
# url parameters for pagination
url_params = self.request.GET.copy()
if 'page' in url_params:
del url_params['page']
context_data.update({
'pages': results,
'url_params': urlencode(url_params),
# provided for consistency with class-based view pagination
'paginator': paginator,
'page_obj': results
})
else:
# if not searching the volume, get annotation count for display
# - annotation is only possibly on books with pages loaded
if self.object.has_pages:
# uses same dictionary lookup form as for browse/search volume
annotation_count = self.object.annotation_count(self.request.user)
if annotation_count != 0:
context_data['annotated_volumes'] = {
self.object.get_absolute_url(): annotation_count
}
# enable annotation search if any annotations are present
context_data['annotation_search_enabled'] = bool(annotation_count)
return context_data
def render_to_response(self, context, **response_kwargs):
# return json to ajax request or when requested;
# currently used for annotation related pages autocomplete
if self.request.is_ajax() or self.request.GET.get('format', '') == 'json':
solr_result = context['pages']
highlighting = {}
if solr_result.object_list.highlighting:
highlighting = solr_result.object_list.highlighting
data = [{
'pid': result.pid,
# extra logic to handle records where ARK is not
# present (should only happen in dev)
'uri': next(iter([uri for uri in result['identifier']
if 'ark:' in uri]), ''),
'label': 'p. %s' % result['page_order'],
'thumbnail': reverse('books:page-image',
kwargs={'mode': 'mini-thumbnail', 'pid': result.pid,
'vol_pid': self.object.pid}),
'highlights': highlighting.get(result.pid, {}).get('page_text', '')
} for result in solr_result.object_list]
return JsonResponse(data, safe=False)
else:
return super(VolumeDetail, self).render_to_response(context, **response_kwargs)
class VolumePageList(ListView, VaryOnCookieMixin):
'''Display a paginated list of :class:`~readux.books.models.Page`
objects associated with a single :class:`~readux.books.models.Volume`.
Pages are displayed by thumbnail; thumbnails include an annotation count
indicator for logged in users with annotations.
'''
template_name = 'books/volume_pages_list.html'
paginate_by = 30
context_object_name = 'pages'
@method_decorator(last_modified(view_helpers.volume_pages_modified))
def dispatch(self, *args, **kwargs):
return super(VolumePageList, self).dispatch(*args, **kwargs)
def get_queryset(self):
self.repo = Repository(request=self.request)
# store the volume for use in get_context_data
self.vol = self.repo.get_object(self.kwargs['pid'], type=Volume)
if not self.vol.exists or not self.vol.is_a_volume:
raise Http404
return self.vol.find_solr_pages()
def get_context_data(self, **kwargs):
context_data = super(VolumePageList, self).get_context_data()
context_data.update({
'vol': self.vol,
'form': BookSearch(), # form for searching in this book
})
# if user is authenticated, check for annotations on this volume
if self.request.user.is_authenticated():
notes = self.vol.page_annotation_count(self.request.user)
# method returns a dict for easy lookup;
# strip out base site url for easy lookup in the template
# (need leading / left to match item urls)
domain = get_current_site(self.request).domain.rstrip('/')
if not domain.startswith('https'):
domain = 'https://' + domain
annotated_pages = dict([(k.replace(domain, ''), v)
for k, v in notes.iteritems()])
else:
annotated_pages = {}
context_data.update({
'annotated_pages': annotated_pages,
'annotation_search_enabled': bool(annotated_pages)
})
# Check if the first page of the volume is wider than it is tall
# to set the layout of the pages
first_page = self.vol.pages[0]
if first_page.width > first_page.height:
layout = 'landscape'
else:
layout = 'default'
context_data['layout'] = layout
return context_data
#: size used for scaling single page image
SINGLE_PAGE_SIZE = 1000
class PageDetail(DetailView, VaryOnCookieMixin):
'''View a single page in a book.'''
model = Page
template_name = 'books/page_detail.html'
context_object_name = 'page'
@method_decorator(last_modified(view_helpers.page_modified))
def dispatch(self, *args, **kwargs):
return super(PageDetail, self).dispatch(*args, **kwargs)
def get_object(self, queryset=None):
# NOTE: type inferring repository needed to load pages as correct type
# of Page (v1.0 or v1.1)
repo = TypeInferringRepository(request=self.request)
page = repo.get_object(self.kwargs['pid'])
if not page.exists or not isinstance(page, Page):
raise Http404
return page
def get_context_data(self, **kwargs):
context_data = super(PageDetail, self).get_context_data()
# use solr to find adjacent pages to this one
pagequery = self.object.volume.find_solr_pages()
# search range around current page order
# (+/-1 should probably work, but using 2 to allow some margin for error)
pagequery = pagequery.query(page_order__range=(self.object.page_order - 2,
self.object.page_order + 2))
# find the index of the current page in the sorted solr result
index = 0
prev = nxt = None
for p in pagequery:
if p['pid'] == self.object.pid:
break
index += 1
prev = p
if len(pagequery) > index + 1:
nxt = pagequery[index + 1]
# calculates which paginated page the page is part of based on 30 items per page
page_chunk = ((self.object.page_order - 1) // 30) + 1
# form for searching in this book
form = BookSearch()
# currently only pagev1_1 has tei
if hasattr(self.object, 'tei') and self.object.tei.exists:
# determine scale for positioning OCR text in TEI facsimile
# based on original image size in the OCR and image as displayed
# - find maximum of width/height
long_edge = max(self.object.tei.content.page.width,
self.object.tei.content.page.height)
# NOTE: using the size from image the OCR was run on, since that
# may or may not match the size of the master image loaded in
# fedora, but the aspect ration should be kept the same from
# original -> repository copy -> scaled copy used for display
# - determine scale to convert original size to display size
scale = float(SINGLE_PAGE_SIZE) / float(long_edge)
logger.debug('page size is %s, long edge is %s, scale is %f' % \
(SINGLE_PAGE_SIZE, long_edge, scale))
else:
scale = None
context_data.update({'next': nxt, 'prev': prev,
'page_chunk': page_chunk, 'form': form, 'scale': scale})
# if user is logged in, check for zotero account and pass
# token and user id through for annotation citation
if not self.request.user.is_anonymous():
zotero_account = self.request.user.social_auth.filter(provider='zotero').first()
if zotero_account:
context_data.update({
'zotero_userid': zotero_account.extra_data['access_token']['userID'],
'zotero_token': zotero_account.extra_data['access_token']['oauth_token']
})
# if user is logged in, check if annotations exist and
# search should be enabled
context_data['annotation_search_enabled'] = \
self.object.volume.annotations() \
.visible_to(user=self.request.user).exists()
return context_data
class PageDatastreamView(RawDatastreamView):
'''Base view for :class:`~readux.books.models.Page` datastreams.'''
object_type = Page
datastream_id = None
accept_range_request = False
pid_url_kwarg = 'pid'
repository_class = Repository
def get_headers(self):
return {
'Access-Control-Allow-Origin': '*'
}
class PageOcr(PageDatastreamView):
'''Display the page-level OCR content, if available (for
:class:`~readux.books.models.PageV1_1` objects this is xml,
for :class:`~readux.books.models.PageV1_0` this is text). Returns a
404 if this page object does not have page-level OCR.'''
object_type = PageV1_1
datastream_id = PageV1_1.ocr.id
class PageTei(PageDatastreamView):
'''Display the page-level TEI facsimile, if available. 404 if this page
object does not have TEI facsimile.'''
datastream_id = Page.tei.id
def get_headers(self):
headers = super(PageTei, self).get_headers()
headers.update({
# generate a default filename based on the object pid
'Content-Disposition': 'filename="%s_tei.xml"' % \
self.kwargs['pid'].replace(':', '-'),
})
return headers
class VolumeDatastreamView(RawDatastreamView):
'''Base view for :class:`~readux.books.models.Volume` datastreams.'''
object_type = Volume
datastream_id = None
accept_range_request = False
pid_url_kwarg = 'pid'
repository_class = Repository
# use streaming http response, to handle large files better
streaming = True
class VolumePdf(VolumeDatastreamView):
'''View to allow access to the PDF datastream of a
:class:`~readux.books.models.Volume` object. Sets a
content-disposition header that will prompt the file to be saved
with a default title based on the object label. If **download** is specified
in the query string (i.e., url/to/pdf/?download), then content-disposition
will be set to attachment, prompting for download.'''
datastream_id = Volume.pdf.id
def get_headers(self):
download = 'download' in self.request.GET
# if download is requested, set content-disposition to prompt download
attachment = 'attachment; ' if download else ''
# retrieve the object so we can use it to set the download filename
obj = self.get_repository().get_object(self.kwargs['pid'], type=self.object_type)
if not obj.exists:
raise Http404
if obj.exists: # assuming if exists passes we can get a label
return {
# generate a default filename based on the object label
'Content-Disposition': '%sfilename="%s.pdf"' % \
(attachment, obj.label.replace(' ', '-'))
}
class VolumeOcr(VolumeDatastreamView):
'''View to allow access the raw OCR xml datastream of a
:class:`~readux.books.models.Volume` object.
'''
datastream_id = VolumeV1_0.ocr.id
class VolumeText(VolumeOcr):
'''View to allow access the plain text content of a
:class:`~readux.books.models.Volume` object.
'''
# inherit to get etag/last-modified for ocr datastream
# NOTE: type-inferring is required here because volume could be either
# v1.0 or v.1.1 and method to pull the text content is different
repository_class = TypeInferringRepository
def get(self, request, *args, **kwargs):
repo = self.get_repository()
obj = repo.get_object(self.kwargs['pid'])
# if object doesn't exist, isn't a volume, or doesn't have ocr text - 404
if not obj.exists or not obj.has_requisite_content_models or not obj.fulltext_available:
raise Http404
response = HttpResponse(obj.get_fulltext(), 'text/plain')
# generate a default filename based on the object label
response['Content-Disposition'] = 'filename="%s.txt"' % \
obj.label.replace(' ', '-')
# NOTE: currently etag/last-modified will only work
# for volume v1.0 objects with an ocr datastream
return response
class VolumeTei(View):
def get(self, request, *args, **kwargs):
repo = TypeInferringRepository()
vol = repo.get_object(self.kwargs['pid'])
# if object doesn't exist, isn't a volume, or doesn't have tei text - 404
if not vol.exists or not vol.has_requisite_content_models or not vol.has_tei:
raise Http404
tei = vol.generate_volume_tei()
base_filename = '%s-tei' % vol.noid
if kwargs.get('mode', None) == 'annotated':
tei = annotate.annotated_tei(tei, vol.annotations() \
.filter(user=request.user))
base_filename += '-annotated'
logger.info('Exporting %s as annotated TEI for user %s',
vol.pid, request.user.username)
response = HttpResponse(tei.serialize(pretty=True),
content_type='application/xml')
# generate a default filename based on the object label
response['Content-Disposition'] = 'attachment;filename="%s.xml"' % \
base_filename
response.set_cookie('%s-tei-export' % vol.noid, 'complete', max_age=10)
return response
class AnnotatedVolumeExport(DetailView, FormMixin,
VaryOnCookieMixin):
model = Volume
template_name = 'books/volume_export.html'
context_object_name = 'vol'
form_class = VolumeExport
user_has_github = False
github_account_msg = 'Export to GitHub requires a GitHub account.' + \
' Please authorize access to your GitHub account to use this feature.'
github_scope_msg = 'GitHub account has insufficient access. ' + \
'Please re-authorize your GitHub account to enable ' + \
' the permissions needed for export.'
def dispatch(self, *args, **kwargs):
return super(AnnotatedVolumeExport, self).dispatch(*args, **kwargs)
def get_object(self, queryset=None):
# kwargs are set based on configured url pattern
pid = self.kwargs['pid']
repo = Repository(request=self.request)
vol = repo.get_object(pid, type=Volume)
# 404 if object doesn't exist, isn't a volume, or doesn't have tei
if not vol.exists or not vol.is_a_volume or not vol.has_tei:
raise Http404
# NOTE: not currently an error if volume doesn't have any
# annotations, but export is probably not meaningful
return vol
def get_form_kwargs(self):
# keyword arguments needed to initialize the form
kwargs = super(AnnotatedVolumeExport, self).get_form_kwargs()
# add user, which is used to determine available groups
kwargs['user'] = self.request.user
# add flag to indicate if user has a github account
kwargs['user_has_github'] = self.user_has_github
return kwargs
def get_initial(self):
# initial data for the form
# construct a preliminary semi-reasonable github repo name
# based on the volume title
repo_name = slugify(self.object.title)
# remove first-word articles
repo_name = re.sub('^(a|the|de)-', '', repo_name)
pieces = repo_name.split('-')
# truncate down to first 5 words
if len(pieces) > 5:
repo_name = '-'.join(pieces[:5])
return {'github_repo': repo_name}
def get_context_data(self, **kwargs):
context_data = super(AnnotatedVolumeExport, self).get_context_data()
if not self.request.user.is_anonymous():
# check that user has a github account linked
try:
github.GithubApi.github_account(self.request.user)
self.user_has_github = True
except github.GithubAccountNotFound:
context_data['warning'] = self.github_account_msg
context_data['export_form'] = self.get_form()
return context_data
def render(self, request, **kwargs):
context_data = self.get_context_data()
context_data.update(kwargs)
return render(request, self.template_name, context_data)
# NOTE: processing the submitted form is now handled by
# readux.books.consumers.volume_export
# (form data is submitted via websocket)
class Unapi(View):
'''unAPI service point for :class:`~readux.books.models.Volume` objects,
to make content available for harvest via Zotero.'''
# NOTE: this could probably be generalized into a re-usable view
@method_decorator(condition(etag_func=view_helpers.unapi_etag,
last_modified_func=view_helpers.unapi_modified))
def dispatch(self, *args, **kwargs):
return super(Unapi, self).dispatch(*args, **kwargs)
def get(self, request):
context = {}
item_id = request.GET.get('id', None)
fmt = request.GET.get('format', None)
if item_id is not None:
context['id'] = item_id
repo = Repository(request=self.request)
# generalized class-based view would need probably a get-item method
# for repo objects, could use type-inferring repo variant
obj = repo.get_object(item_id, type=Volume)
formats = obj.unapi_formats
if fmt is None:
# display formats for this item
context['formats'] = formats
else:
current_format = formats[fmt]
# return requested format for this item
meth = getattr(obj, current_format['method'])
return HttpResponse(meth(), content_type=current_format['type'])
else:
# display formats for all items
# NOTE: if multiple classes, should be able to combine the formats
context['formats'] = Volume.unapi_formats
# NOTE: doesn't really even need to be a template, could be generated
# with eulxml just as easily if that simplifies reuse
return render(request, 'books/unapi_format.xml', context,
content_type='application/xml')
def _error_image_response(mode):
# error image http response for 401/404/500 errors when serving out
# images from fedora
error_images = {
'thumbnail': 'notfound_thumbnail.png',
'single-page': 'notfound_page.png',
'mini-thumbnail': 'notfound_mini_thumbnail_page.png',
}
# need a different way to catch it
if mode in error_images:
img = error_images[mode]
if settings.DEBUG:
base_path = settings.STATICFILES_DIRS[0]
else:
base_path = settings.STATIC_ROOT
with open(os.path.join(base_path, 'img', img)) as content:
return HttpResponseNotFound(content.read(), content_type='image/png')
class PageRedirect(RedirectView):
# redirect view for old page urls without volume pids
pattern_name = 'books:page'
permanent = True
def get_redirect_url(self, *args, **kwargs):
# NOTE: type inferring repository needed to load pages as correct type
# of Page (v1.0 or v1.1)
repo = TypeInferringRepository()
page = repo.get_object(kwargs['pid'])
if not page.exists or not isinstance(page, Page):
raise Http404
page_url = reverse(self.pattern_name,
kwargs={'vol_pid': page.volume.pid, 'pid': page.pid})
return ''.join([page_url, kwargs['path']])
class ProxyView(View):
# quick and dirty proxyview modeled on RedirectView
def get(self, request, *args, **kwargs):
url = self.get_redirect_url(*args, **kwargs)
# use headers to allow browsers to cache downloaded copies
headers = {}
for header in ['HTTP_IF_MODIFIED_SINCE', 'HTTP_IF_UNMODIFIED_SINCE',
'HTTP_IF_MATCH', 'HTTP_IF_NONE_MATCH']:
if header in request.META:
headers[header.replace('HTTP_', '')] = request.META.get(header)
remote_response = requests.get(url, headers=headers)
local_response = HttpResponse()
local_response.status_code = remote_response.status_code
# include response headers, except for server-specific items
for header, value in remote_response.headers.iteritems():
if header not in ['Connection', 'Server', 'Keep-Alive', 'Link']:
# 'Access-Control-Allow-Origin', 'Link']:
# FIXME: link header is valuable, but would
# need to be made relative to current url
local_response[header] = value
# special case, for deep zoom (hack)
if kwargs['mode'] == 'info':
data = remote_response.json()
# need to adjust the id to be relative to current url
# this is a hack, patching in a proxy iiif interface at this url
data['@id'] = absolutize_url(request.path.replace('/info/', '/iiif'))
local_response.content = json.dumps(data)
# upate content-length for change in data
local_response['content-length'] = len(local_response.content)
# needed to allow external site (i.e. jekyll export)
# to use deepzoom
local_response['Access-Control-Allow-Origin'] = '*'
else:
# include response content if any
local_response.content = remote_response.content
return local_response
def head(self, request, *args, **kwargs):
url = self.get_redirect_url(*args, **kwargs)
remote_response = requests.head(url)
response = HttpResponse()
for header, value in remote_response.headers.iteritems():
if header not in ['Connection', 'Server', 'Keep-Alive',
'Access-Control-Allow-Origin', 'Link']:
response[header] = value
return response
# class PageImage(RedirectView):
# NOTE: previously, was redirecting to loris, but currently the loris
# image server is not externally accessible
class PageImage(ProxyView):
'''Local view for page images. These all return redirects to the
configured IIIF image viewer, but allow for a local, semantic
image url independent of image handling implementations
to be referenced in annotations and exports.'''
def get_redirect_url(self, *args, **kwargs):
repo = TypeInferringRepository()
page = repo.get_object(kwargs['pid'], type=Page)
if kwargs['mode'] == 'thumbnail':
return page.iiif.thumbnail()
elif kwargs['mode'] == 'mini-thumbnail':
return page.iiif.mini_thumbnail()
elif kwargs['mode'] == 'single-page':
return page.iiif.page_size()
elif kwargs['mode'] == 'fs': # full size
return page.iiif
elif kwargs['mode'] == 'info':
# TODO: needs an 'Access-Control-Allow-Origin' header
# to allow jekyll sites to use for deep zoom
return page.iiif.info()
elif kwargs['mode'] == 'iiif':
return page.iiif.info().replace('info.json', kwargs['url'].strip('/'))
```
#### File: management/commands/collection_descriptions.py
```python
from django.core.management.base import BaseCommand, CommandError
from readux.collection.models import Collection
from readux.collection.fixtures.collection_descriptions import descriptions
from readux.fedora import ManagementRepository
class Command(BaseCommand):
'''Update LSDI Volume PDF ARKs to resolve to the current readux site.
Takes an optional list of pids; otherwise, looks for all Volume objects in
the configured fedora instance.'''
help = __doc__
def handle(self, *pids, **options):
repo = ManagementRepository()
# if pids are specified on command line, only process those objects
if pids:
objs = [repo.get_object(pid, type=Collection) for pid in pids]
# otherwise, look for all volume objects in fedora
else:
objs = repo.get_objects_with_cmodel(Collection.COLLECTION_CONTENT_MODEL,
type=Collection)
for coll in objs:
if coll.pid in descriptions:
coll.dc.content.description = descriptions[coll.pid]
if coll.dc.isModified():
coll.save('updating description')
```
#### File: readux/readux/fedora.py
```python
import logging
from django.conf import settings
from django.core.urlresolvers import reverse
import httplib
import urllib
from eulfedora import models, server
from eulxml.xmlmap.dc import DublinCore
from pidservices.clients import parse_ark
from pidservices.djangowrapper.shortcuts import DjangoPidmanRestClient
from readux.utils import absolutize_url
logger = logging.getLogger(__name__)
# try to configure a pidman client to get pids.
try:
pidman = DjangoPidmanRestClient()
except:
# if we're in dev mode then we can fall back on the fedora default
# pid allocator. in non-dev, though, we really need pidman
if getattr(settings, 'DEV_ENV', False):
logger.warn('Failed to configure PID manager client; default pid logic will be used')
pidman = None
else:
raise
class ManagementRepository(server.Repository):
'''Convenience class to initialize an instance of :class:`eulfedora.server.Repository`
with Fedora management/maintenance account credentials defined in Django settings.
.. Note::
This :class:`~eulfedora.server.Repository` variant should *only*
be used for maintainance tasks (e.g., scripts that ingest,
modify, or otherwise manage content). It should **not** be
used for general website views or access; those views should
use the standard :class:`~eulfedora.server.Repository` which
will pick up the default, non-privileged credentials intended
for read and display access but not for modifying content in
the repository.
'''
default_pidspace = getattr(settings, 'FEDORA_PIDSPACE', None)
# default pidspace is not automatically pulled from django conf
# when user/password are specified, so explicitly set it here
def __init__(self):
# explicitly disabling other init args, so that anyone who tries to use
# this as a regular repo will get errors rather than confusing behavior
super(ManagementRepository, self).__init__(username=settings.FEDORA_MANAGEMENT_USER,
password=settings.FEDORA_MANAGEMENT_PASSWORD)
class DigitalObject(models.DigitalObject):
"""Readux base :class:`~eulfedora.models.DigitalObject` class with logic
for setting and accessing pids based on PID manager ids."""
#: :class:`~eulfedora.models.XmlDatastream` for the required Fedora
#: **DC** datastream; datastream content loaded as an instance
#: of :class:`eulxml.xmlmap.dc.DublinCore`; overriding default
#: declaration in eulfedora to configure as Managed instead of Inline XML
dc = models.XmlDatastream("DC", "Dublin Core", DublinCore, defaults={
'control_group': 'M',
'format': 'http://www.openarchives.org/OAI/2.0/oai_dc/',
'versionable': True
})
# NOTE: we don't really need DC versioned, but there is a Fedora bug
# that requires Managed DC be versioned
#: :class:`~eulfedora.models.RdfDatastream` for the standard Fedora
#: **RELS-EXT** datastream; overriding to configure as Managed instead
#: of Inline XML
rels_ext = models.RdfDatastream("RELS-EXT", "External Relations", defaults={
'control_group': 'M',
'format': 'info:fedora/fedora-system:FedoraRELSExt-1.0',
})
def __init__(self, *args, **kwargs):
default_pidspace = getattr(settings, 'FEDORA_PIDSPACE', None)
kwargs['default_pidspace'] = default_pidspace
super(DigitalObject, self).__init__(*args, **kwargs)
self._default_target_data = None
@property
def noid(self):
pidspace, noid = self.pid.split(':')
return noid
@property
def ark_uri(self):
for dcid in self.dc.content.identifier_list:
if 'ark:/' in dcid:
return dcid
#: special pid token that tells pid manager to put the newly minted
# pid into the url
PID_TOKEN = '{%PID%}'
def get_default_pid(self):
'''Default pid logic for DigitalObjects in :mod:`readux`. Mint a
new ARK via the PID manager, store the ARK in the MODS
metadata (if available) or Dublin Core, and use the noid
portion of the ARK for a Fedora pid in the site-configured
Fedora pidspace.'''
global pidman
if pidman is not None:
# pidman wants a target for the new pid
# generate a pidman-ready target for a named view
# Use the object absolute url method
# NOTE: this requires that all values used in a url be set
# (i.e., page objects must have volume pid configured)
self.pid = '%s:%s' % (self.default_pidspace, self.PID_TOKEN)
target = self.get_absolute_url()
# reverse() encodes the PID_TOKEN and the :, so just unquote the url
# (shouldn't contain anything else that needs escaping)
target = urllib.unquote(target)
# reverse() returns a full path - absolutize so we get scheme & server also
target = absolutize_url(target)
# pid name is not required, but helpful for managing pids
pid_name = self.label
# ask pidman for a new ark in the configured pidman domain
try:
ark = pidman.create_ark(settings.PIDMAN_DOMAIN, target, name=pid_name)
except httplib.BadStatusLine:
logger.warn('Error creating ARK; re-initializing pidman client and trying again')
pidman = DjangoPidmanRestClient()
ark = pidman.create_ark(settings.PIDMAN_DOMAIN, target, name=pid_name)
# pidman returns the full, resolvable ark
# parse into dictionary with nma, naan, and noid
parsed_ark = parse_ark(ark)
noid = parsed_ark['noid'] # nice opaque identifier
# Add full uri ARK to dc:identifier
self.dc.content.identifier_list.append(ark)
# use the noid to construct a pid in the configured pidspace
return '%s:%s' % (self.default_pidspace, noid)
else:
# if pidmanager is not available, fall back to default pid behavior
return super(DigitalObject, self).get_default_pid()
def index_data_descriptive(self):
'''Extend the default :meth:`eulfedora.models.DigitalObject.index_data`
to do common clean up for all Readux indexing:
- If there are multiple titles, choose the longest one.
'''
data = super(DigitalObject, self).index_data_descriptive()
# a few books have multiple titles;
# if title is a list, choose the longest one
if 'title' in data and isinstance(data['title'], list):
title = ''
for d in data['title']:
if len(d) > len(title):
title = d
data['title'] = title
return data
```
#### File: readux/pages/context_processors.py
```python
from feincms.module.page.models import Page
def default_page(request):
# always include a default feincms page, so we can retrieve
# top-level navigation
return {
'feincms_page': Page.objects.in_navigation().first()
}
```
#### File: readux/readux/views.py
```python
from django.views.decorators.vary import vary_on_cookie
class VaryOnCookieMixin(object):
@classmethod
def as_view(cls, **initkwargs):
view = super(VaryOnCookieMixin, cls).as_view(**initkwargs)
return vary_on_cookie(view)
``` |
{
"source": "jpkeeton/python_practice",
"score": 3
} |
#### File: jpkeeton/python_practice/fib1.py
```python
def fib1(n: int) ->int:
return fib1(n-1) + fib1(n-2)
if __name__ == "__main__":
print(fib1(5))
```
#### File: jpkeeton/python_practice/fib5.py
```python
def fib5(n:int) -> int:
if n == 0: return n # special case
last: int = 0 # initially set to fib(0)
next: int = 1 # initially set to fib(1)
for _ in range(1,n):
last, next = next, last + next
return next
if __name__ == "__main__":
print(fib5(5))
print(fib5(50))
``` |
{
"source": "jpkell05/hockey-vizualization-webapp",
"score": 3
} |
#### File: jpkell05/hockey-vizualization-webapp/first_flask.py
```python
from flask import Flask, request, render_template
import numpy as np
import pandas as pd
from bokeh.plotting import figure, output_file, show
from bokeh.resources import INLINE
from bokeh.embed import components
from bokeh.util.string import encode_utf8
from bs4 import BeautifulSoup
import requests
app = Flask(__name__)
def make_fancy_plot(Team, Stat):
# Get Fancy Stats
source = requests.get('http://hkref.com/tiny/LMtAx').text
soup = BeautifulSoup(source, 'lxml')
data = []
for i in soup.find_all('tr'):
temp = []
for j in i.find_all('td'):
temp.append(j.text)
data.append(temp)
data = [i for i in data if len(i) > 1]
# Make dataframe
columns=['Team', 'Season', 'Corsi For', 'Corsi Against', 'Corsi For %', 'Fenwick For', 'Fenwick Against', 'Fenwick For %', 'Shooting %', 'Save %', 'PDO (S% + SV%)', 'Offensive Zone Start', 'Defensive Zone Start', 'FOW', 'FOL', 'FO%', 'Hits', 'Blocks']
df = pd.DataFrame(data, columns=columns)
df.replace('MDA', 'ANA', inplace=True)
df.replace('PHX', 'ARI', inplace=True)
df.replace('ATL', 'WPG', inplace=True)
df.Season = df.Season.str.slice(0,2) + df.Season.str.slice(5,7)
df = df.apply(pd.to_numeric, errors='ignore')
df.sort_values(by=['Team', 'Season'], inplace=True)
team_data = df[df.Team == Team]
X = team_data.Season
Y = team_data[Stat]
# output to static HTML file
#output_file("lines.html")
# create a new plot with a title and axis labels
p = figure(title="{} {}".format(Team, Stat), x_axis_label='Season', y_axis_label=Stat)
# add a line renderer with legend and line thickness
p.line(X, Y, line_width=2)
# show the results
#show(p)
script, div = components(p)
return script, div
@app.route("/", methods=['POST', 'GET'])
def index():
if request.method =='POST':
Team = request.form['Team']
Stat = request.form['Stat']
script, div = make_fancy_plot(Team, Stat)
js_resources = INLINE.render_js()
css_resources = INLINE.render_css()
html = render_template('index.html', plot_script=script,plot_div=div,js_resources=js_resources,css_resources=css_resources)
return encode_utf8(html)
else:
return render_template('index.html', plot_script='',plot_div='',js_resources='',css_resources='')
``` |
{
"source": "jpkeskinen/P4UL",
"score": 2
} |
#### File: P4UL/pyNetCDF/compareNetCdf2D.py
```python
import netCDF4 as nc
import sys
import numpy as np
import matplotlib.pyplot as plt
import argparse
import scipy.ndimage as sn # contains the filters
from plotTools import addImagePlot
from netcdfTools import read3dDataFromNetCDF
from utilities import selectFromList
#==========================================================#
def readVar( fn, vstr, cl=1 ):
xDict = read3dDataFromNetCDF( fn , vstr , cl )
v = xDict['v']; x = xDict['x']; y = xDict['y']; z = xDict['z']
xDict = None
return v, x, y, z
#==========================================================#
def U_hd( fn, cl=1, direction=False ):
ut, xu, yu, zu = readVar( fn, 'u_xy', cl )
vt, xv, yv, zv = readVar( fn, 'v_xy', cl )
x = xv[:-1]; y = yu[:-1]; z = 0.5*(zu+zv)
uc = 0.5*( ut[:,:,:-1,1:] + ut[:,:,:-1,:-1] )
vc = 0.5*( vt[:,:,1:,:-1] + ut[:,:,:-1,:-1] )
if( direction ):
v = np.arctan( vc/(uc+1.E-5) ) * (180./np.pi)
else:
a = np.arctan( vc/(uc+1.E-5) )
v = uc * np.cos(a) + vc * np.sin(a)
return v, x, y, z
#==========================================================#
helpStr = '''
Diff mode:
'd': root mean square diff (RMSD),
'r': RMSD (relative delta),
's': RMSD (scaled delta),
'n': root normalized mean square diff.,
'f': fractional bias
'v': geometric variance
'''
methodList = ['d','r', 's','n','f','v','R']
parser = argparse.ArgumentParser(prog='compareNetCdf2D.py')
parser.add_argument("-f1", "--filename1",type=str, help="Name of the first (ref) input NETCDF file.")
parser.add_argument("-f2", "--filename2",type=str, help="Name of the second input NETCDF file.")
parser.add_argument("-v", "--varname", type=str, default='u',\
help="Name of the variable in NETCDF file. Default='u' ")
parser.add_argument("-v0", "--vref", type=float, nargs=2, default=[0.,0.],\
help="Reference values 'v0' in v+ = (v - v0)/v* for -f1 and -f2. Default = [0,0]")
parser.add_argument("-vs", "--vstar", type=float, nargs=2, default=[1.,1.],\
help="Characteristic value 'v*' in v+ = (v - v0)/v* for -f1 and -f2. Default = [1.,1.]")
parser.add_argument("-c", "--coarsen", type=int, nargs=2, default=[1,1],\
help="Factor for coarsening the -f1 and -f2 data when read from file. Default = [1,1]")
parser.add_argument("-m","--mode", type=str, default='d', choices=methodList,\
help=helpStr)
parser.add_argument("-w", "--writeFile", action="store_true", default=False,\
help="Write the root-mean-square of the differences to a file.")
parser.add_argument("-nxx1", "--nexclx1", type=int, nargs=2, default=[None,None],\
help="For -f1, exclude the [first,last] number of nodes from analysis in x-direction.")
parser.add_argument("-nxy1", "--nexcly1", type=int, nargs=2, default=[None,None],\
help="For -f1, exclude the [first,last] number of nodes from analysis in y-direction.")
parser.add_argument("-nxx2", "--nexclx2", type=int, nargs=2, default=[None,None],\
help="For -f2, exclude the [first,last] number of nodes from analysis in x-direction.")
parser.add_argument("-nxy2", "--nexcly2", type=int, nargs=2, default=[None,None],\
help="For -f2, exclude the [first,last] number of nodes from analysis in y-direction.")
parser.add_argument("-xs", "--exclsmall", help="Exclude values below |0.01| from analysis.",\
action="store_true", default=False)
parser.add_argument("-p", "--printOn", help="Print the numpy array data.",\
action="store_true", default=False)
parser.add_argument("-s", "--save", action="store_true", default=False,\
help="Save figures. Default=False")
parser.add_argument("--lims", help="User specified limits.", action="store_true", default=False)
parser.add_argument("--grid", help="Turn on grid.", action="store_true", default=False)
args = parser.parse_args()
#==========================================================#
# Rename ... that's all.
f1 = args.filename1 # './DATA_2D_XY_AV_NETCDF_N02-1.nc'
f2 = args.filename2 # './DATA_2D_XY_AV_NETCDF_N02-2.nc'
varname = args.varname
v0 = np.array(args.vref )
vs = np.array(args.vstar)
cl = np.array(args.coarsen)
mode = args.mode
nxx1 = args.nexclx1
nxy1 = args.nexcly1
nxx2 = args.nexclx2
nxy2 = args.nexcly2
exclSmall= args.exclsmall
writeFile= args.writeFile
printOn = args.printOn
saveOn = args.save
limsOn = args.lims
gridOn = args.grid
#----------------------------------------------------------#
Sdict = {'d':'RMSD','s':'RMSD (scaled)','r':'RMSD (rel)','n':'RNMSD','f':'FB',\
'v':'VG','R':'R'}
# Shorter name
vn = varname.split('_')[0]
dirOn = 'UD' in varname.upper()
horizOn = 'UH' in varname.upper()
# Default for excluded indices is [None,None]. If numerical values are given,
# the latter needs to be made negative.
if( nxx1.count(None) == 0 ): nxx1[1]*=-1
if( nxy1.count(None) == 0 ): nxy1[1]*=-1
if( nxx2.count(None) == 0 ): nxx2[1]*=-1
if( nxy2.count(None) == 0 ): nxy2[1]*=-1
if( (not horizOn) and (not dirOn) ):
#print('{}'.format(varname))
v1, x1, y1, z1 = readVar( f1, varname, cl[0] )
v2, x2, y2, z2 = readVar( f2, varname, cl[1] )
else:
v1, x1, y1, z1 = U_hd( f1, cl[0], dirOn )
v2, x2, y2, z2 = U_hd( f2, cl[1], dirOn )
if( not dirOn ):
v1 -= v0[0]; v1 /= vs[0]
v2 -= v0[1]; v2 /= vs[1]
idk = selectFromList( z1 )
if( writeFile ):
fout = open('{}_d{}.dat'.format(Sdict[mode],vn), 'w')
fout.write('# file1 = {}, file2 = {}\n'.format(f1, f2))
fout.write('# z_coord \t {}(d{})\n'.format(Sdict[mode],vn))
#fout.write('{:.2f}\t{:.2e}'.format( z1[k1], dv ))
for k1 in idk:
#k2 = np.where(z2==z1[k1])[0] # This outputs a list
k2 = np.where(np.abs(z2-z1[k1])==np.min(np.abs(z2-z1[k1])))[0]
if( len(k2) == 0 ):
print(' Coordinate {} not in file {}. Skipping.'.format(z1[k1],f2))
continue
else:
k2 = k2[0] # Take always the first term
if( len(v1.shape) == 4): v1x = np.mean(v1[:,k1,nxy1[0]:nxy1[1],nxx1[0]:nxx1[1]], axis=0)
else: v1x = v1[ k1,nxy1[0]:nxy1[1],nxx1[0]:nxx1[1]]
if( len(v2.shape) == 4): v2x = np.mean(v2[:,k2,nxy2[0]:nxy2[1],nxx2[0]:nxx2[1]], axis=0)
else: v2x = v2[ k2,nxy2[0]:nxy2[1],nxx2[0]:nxx2[1]]
# - - - - - - - - - - - - - - - - - - - - - - - - - - - #
dims1 = np.array( v1x.shape )
dims2 = np.array( v2x.shape )
if( all( dims1 == dims2 ) ):
print(' Dimensions of the two datasets match!: dims = {}'.format(dims1))
else:
print(' Caution! Dataset dimensions do not match. dims1 = {} vs. dims2 = {}'.format(dims1, dims2))
dx1 = (x1[2]-x1[1]); dy1 = (y1[2]-y1[1])
dx2 = (x2[2]-x2[1]); dy2 = (y2[2]-y2[1])
rr = int(np.round(dx2/dx1, decimals=0)); rry = int(np.round(dy2/dy1, decimals=0))
if( rr != rry ): sys.exit(' Resolution ratios are dissimilar. Exiting ...')
v2f = np.zeros( dims1 ) # Fine resolution
nc,ec = np.ogrid[ 0:dims1[0] , 0:dims1[1] ] # northing, easting ... fine resolution
nf,ef = np.ogrid[ 0:dims1[0] , 0:dims1[1] ] # northing, easting ... fine resolution
nc=nc//rr; ec=ec//rr # coarse indices
#nc = nc.astype(int); ec = ec.astype(int)
#nf = nf.astype(int); ef = ef.astype(int)
#np.savetxt('n.dat',np.c_[nf,nc], fmt='%.1f')
#np.savetxt('e.dat',np.c_[ef.T,ec.T], fmt='%.1f')
# Check bounds
nf = np.minimum( nf , dims1[0]-1)
ef = np.minimum( ef , dims1[1]-1)
nc = np.minimum( nc , dims2[0]-1)
ec = np.minimum( ec , dims2[1]-1)
# Perform value placement
v2f[nf, ef] += v2x[nc,ec]; v2x = None
v2x = v2f
# - - - - - - - - - - - - - - - - - - - - - - - - - - - #
if( not np.ma.isMaskedArray(v1x) and not np.ma.isMaskedArray(v2x) ):
idm = (v1x == v2x)
v1x = np.ma.masked_array( v1x, mask=idm )
v2x = np.ma.masked_array( v2x, mask=idm )
idm = None
idm = np.ma.getmask(v1x); print(' Nm = {}'.format(np.count_nonzero(idm)))
idz = (v2x == 0.0)
idm += idz
#idm = sn.binary_dilation(idm); print(' Nm = {}'.format(np.count_nonzero(idm)))
v1x = np.ma.masked_array( v1x, mask=idm)
v2x = np.ma.masked_array( v2x, mask=idm)
#v2x = np.ma.round( v2x, decimals=2 )
#v1x = np.ma.round( v1x, decimals=2 )
if( exclSmall ):
# Take values that are above 0.01 or below -0.01
idx = np.array( (v1x < 5.E-2) )
#idx = np.array( (v1x > -0.1) )
m1x = np.ma.getmask(v1x)
m1x += idx
m2 = np.ma.getmask(v2x)
m2 += idx
'''
id2x = np.array( np.abs(v2x) > 1E-2 )
vm2 = np.ma.mean( v2x[id2x] )
id1x = np.array( np.abs(v1x) > 1E-2 )
vm1 = np.ma.mean( v1x[id1x] )
dv = (v2x[id1x] - v1x[id1x] )
'''
vm1 = np.mean( v1x )
vm2 = np.mean( v2x )
print('k={}: vm1 = {}, vm2 = {} '.format(k1,vm1,vm2))
dv = (v2x - v1x)
# NOTE: We are using the desired indices obtained only from the reference (f1) data.
idnn = ~(dv == np.nan )
N = np.ma.count( np.ravel( dv[idnn] ) )
print('k={}: Number of good points, N = {}'.format(k1,N))
if( mode in ['r','s','d'] ):
if( mode == 'r' ): dv /= np.abs( v1x + 1E-5 )
if( mode == 's' ): dv /= ( vm1 + 1E-5 )
#if( mode == 'd' ): Keep as is: dv = (v2x - v1x)
RES = np.sqrt(np.sum(dv**2)/N)
SkewDiff = (1./N)*np.sum(dv**3) * ( 1./(N-1.)*np.sum(dv**2) )**(-1.5)
print('{} (d{}) = {}, Sk(d{}) = {} '.format(Sdict[mode], vn , RES, vn, SkewDiff ))
if( mode == 'n'):
v_min_th = np.sqrt(1.e-5)
vm1 = np.sign(vm1)*np.maximum( np.abs(vm1), v_min_th )
vm2 = np.sign(vm2)*np.maximum( np.abs(vm2), v_min_th )
denom = vm1*vm2
enum = np.sum(dv**2)/N
RES = np.sqrt( enum/np.abs(denom) )
#print(' enum = {}, denom = {} '.format(enum,denom))
print('{} (d{}) = {}'.format(Sdict[mode], vn , RES))
if( mode == 'f'):
denom_min_th = 1.e-2
dv = (vm2 - vm1) # Note, mean values
enum = np.maximum( dv, 1.e-3 )
denom = 0.5*(np.abs(vm2)+np.abs(vm1))
denom = np.sign(denom)*np.maximum( np.abs(denom), denom_min_th )
RES = dv/denom
#print(' enum = {}, denom = {} '.format(dv,denom))
print('{} (d{}) = {}'.format(Sdict[mode], vn , RES))
if( mode == 'v'):
v_min_th = 1.e-1
dv = np.log( np.maximum( np.abs(v2x), v_min_th)/(np.maximum( np.abs(v1x), v_min_th )) )
RES = np.exp( np.sum(dv**2)/N )
print('{} (d{}) = {}'.format(Sdict[mode], vn , RES))
if( mode == 'R'):
dv = (v1x-vm1)*(v2x-vm2)
RES = (np.sum(dv)/N)/(np.std(v1x)*np.std(v2x))
print('{} (d{}) = {}'.format(Sdict[mode], vn , RES))
if( writeFile ):
fout.write('{:.2f}\t{:.2e}\n'.format( z1[k1], RES ))
if( printOn ):
dimsf = np.array( np.shape( dv ) )
xydims = dimsf
figDims = 13.*(xydims[::-1].astype(float)/np.max(xydims))
fig = plt.figure(num=1, figsize=figDims)
labelStr = '({0}_2 - {0}_1)(z={1} m)'.format(vn, z1[k1])
fig = addImagePlot( fig, dv[::-1,:], labelStr, gridOn, limsOn )
fig2 = plt.figure(num=2, figsize=figDims)
lbl = '(Ref {0})(z={1} m)'.format(vn, z1[k1])
fig2 = addImagePlot( fig2, v1x[::-1,:], lbl, gridOn, limsOn )
fig3 = plt.figure(num=3, figsize=figDims)
lbl = '(f2 {0})(z={1} m)'.format(vn, z1[k1])
fig3 = addImagePlot( fig3, v2x[::-1,:], lbl, gridOn, limsOn )
#fig3 = plt.figure(num=3)
#plt.hist( np.ravel(dv[idnn]), bins=25, \
# normed=True, log=True, histtype=u'bar', label=labelStr )
if( saveOn ):
figname = '{}_{}_z{}.jpg'.format(Sdict[mode],vn, int(z1[k1]))
print(' Saving = {}'.format(figname))
fig.savefig( figname, format='jpg', dpi=150)
fig2.savefig( 'REF_'+figname, format='jpg', dpi=150)
fig3.savefig( 'F2_'+figname, format='jpg', dpi=150)
#fig3.savefig( figname.replace("RES","Hist"), format='jpg', dpi=150)
plt.show()
if( writeFile ): fout.close()
``` |
{
"source": "JPKFin/AIrsenal",
"score": 3
} |
#### File: airsenal/scripts/make_transfers.py
```python
from prettytable import PrettyTable
import requests
import json
import argparse
import getpass
from airsenal.framework.optimization_utils import get_starting_squad
from airsenal.framework.utils import (
session as dbsession,
get_player_name,
get_bank,
get_player,
CURRENT_SEASON,
get_player_from_api_id,
)
from airsenal.scripts.get_transfer_suggestions import get_transfer_suggestions
from airsenal.framework.data_fetcher import FPLDataFetcher
"""
TODO:
- confirm points loss
- write a test.
"""
def check_proceed():
proceed = input("Apply Transfers? There is no turning back! (yes/no)")
if proceed != "yes":
return False
print("Applying Transfers...")
return True
def deduct_transfer_price(pre_bank, priced_transfers):
gain = [transfer[0][1] - transfer[1][1] for transfer in priced_transfers]
return pre_bank + sum(gain)
def print_output(
team_id, current_gw, priced_transfers, pre_bank, post_bank, points_cost="TODO"
):
print("\n")
header = f"Transfers to apply for fpl_team_id: {team_id} for gameweek: {current_gw}"
line = "=" * len(header)
print(f"{header} \n {line} \n")
print(f"Bank Balance Before transfers is: £{pre_bank/10}")
t = PrettyTable(["Status", "Name", "Price"])
for transfer in priced_transfers:
t.add_row(["OUT", get_player_name(transfer[0][0]), f"£{transfer[0][1]/10}"])
t.add_row(["IN", get_player_name(transfer[1][0]), f"£{transfer[1][1]/10}"])
print(t)
print(f"Bank Balance After transfers is: £{post_bank/10}")
# print(f"Points Cost of Transfers: {points_cost}")
print("\n")
def get_sell_price(team_id, player_id):
squad = get_starting_squad(team_id)
for p in squad.players:
if p.player_id == player_id:
return squad.get_sell_price_for_player(p)
def get_gw_transfer_suggestions(fpl_team_id=None):
# gets the transfer suggestions for the latest optimization run,
# regardless of fpl_team_id
rows = get_transfer_suggestions(dbsession)
if fpl_team_id and int(fpl_team_id) != rows[0].fpl_team_id:
raise Exception(
f"Team ID passed is {fpl_team_id}, but transfer suggestions are for "
f"team ID {rows[0].fpl_team_id}. We recommend re-running optimization."
)
else:
fpl_team_id = rows[0].fpl_team_id
current_gw, chip = rows[0].gameweek, rows[0].chip_played
players_out, players_in = [], []
for row in rows:
if row.gameweek == current_gw:
if row.in_or_out < 0:
players_out.append(row.player_id)
else:
players_in.append(row.player_id)
return ([players_out, players_in], fpl_team_id, current_gw, chip)
def price_transfers(transfer_player_ids, fetcher, current_gw):
"""
For most gameweeks, we get transfer suggestions from the db, including
both players to be removed and added.
"""
transfers = list(zip(*transfer_player_ids)) # [(out,in),(out,in)]
priced_transfers = [
[
[t[0], get_sell_price(fetcher.FPL_TEAM_ID, t[0])],
[
t[1],
fetcher.get_player_summary_data()[get_player(t[1]).fpl_api_id][
"now_cost"
],
],
]
for t in transfers
]
def to_dict(t):
return {
"element_out": get_player(t[0][0]).fpl_api_id,
"selling_price": t[0][1],
"element_in": get_player(t[1][0]).fpl_api_id,
"purchase_price": t[1][1],
}
transfer_list = [to_dict(transfer) for transfer in priced_transfers]
return transfer_list
def sort_by_position(transfer_list):
"""
Takes a list of transfers e.g. [{"element_in": <FPL_API_ID>, "purchase_price": x}]
and returns the same list ordered by DEF, FWD, GK, MID (i.e. alphabetical)
to ensure that when we send a big list to the transfer API,
we always replace like-with-like.
Note that it is the FPL API ID used here, NOT the player_id.
"""
def _get_position(api_id):
return get_player_from_api_id(api_id).position(CURRENT_SEASON)
# key to the dict could be either 'element_in' or 'element_out'.
id_key = None
for k, v in transfer_list[0].items():
if "element" in k:
id_key = k
break
if not id_key:
raise RuntimeError(
"""
sort_by_position expected a list of dicts,
containing key 'element_in' or 'element_out'
"""
)
# now sort by position of the element_in/out player
transfer_list = sorted(transfer_list, key=lambda k: _get_position(k[id_key]))
return transfer_list
def remove_duplicates(transfers_in, transfers_out):
"""
If we are replacing lots of players (e.g. new team), need to make sure there
are no duplicates - can't add a player if we already have them.
"""
t_in = [t["element_in"] for t in transfers_in]
t_out = [t["element_out"] for t in transfers_out]
dupes = list(set(t_in) & set(t_out))
transfers_in = [t for t in transfers_in if not t["element_in"] in dupes]
transfers_out = [t for t in transfers_out if not t["element_out"] in dupes]
return transfers_in, transfers_out
def build_init_priced_transfers(fetcher, fpl_team_id=None):
"""
Before gameweek 1, there won't be any 'sell' transfer suggestions in the db.
We can instead query the API for our current 'picks' (requires login).
"""
if not fpl_team_id:
if (not fetcher.FPL_TEAM_ID) or fetcher.FPL_TEAM_ID == "MISSING_ID":
fpl_team_id = int(input("Please enter FPL team ID: "))
else:
fpl_team_id = fetcher.FPL_TEAM_ID
current_squad = fetcher.get_current_squad_data(fpl_team_id)
transfers_out = [
{"element_out": el["element"], "selling_price": el["selling_price"]}
for el in current_squad
]
transfer_in_suggestions = get_transfer_suggestions(dbsession)
if len(transfers_out) != len(transfer_in_suggestions):
raise RuntimeError(
"Number of transfers in and out don't match: {} {}".format(
len(transfer_in_suggestions), len(transfers_out)
)
)
transfers_in = []
for t in transfer_in_suggestions:
api_id = get_player(t.player_id).fpl_api_id
price = fetcher.get_player_summary_data()[api_id]["now_cost"]
transfers_in.append({"element_in": api_id, "purchase_price": price})
# remove duplicates - can't add a player we already have
transfers_in, transfers_out = remove_duplicates(transfers_in, transfers_out)
# re-order both lists so they go DEF, FWD, GK, MID
transfers_in = sort_by_position(transfers_in)
transfers_out = sort_by_position(transfers_out)
transfer_list = [
{**transfers_in[i], **transfers_out[i]} for i in range(len(transfers_in))
]
return transfer_list
def build_transfer_payload(priced_transfers, current_gw, fetcher, chip_played):
transfer_payload = {
"confirmed": False,
"entry": fetcher.FPL_TEAM_ID,
"event": current_gw,
"transfers": priced_transfers,
"wildcard": False,
"freehit": False,
}
if chip_played:
transfer_payload[chip_played.replace("_", "")] = True
print(transfer_payload)
return transfer_payload
def login(session, fetcher):
if (
(not fetcher.FPL_LOGIN)
or (not fetcher.FPL_PASSWORD)
or (fetcher.FPL_LOGIN == "MISSING_ID")
or (fetcher.FPL_PASSWORD == "MISSING_ID")
):
fetcher.FPL_LOGIN = input("Please enter FPL login: ")
fetcher.FPL_PASSWORD = get<PASSWORD>.getpass("Please enter FPL password: ")
# print("FPL credentials {} {}".format(fetcher.FPL_LOGIN, fetcher.FPL_PASSWORD))
login_url = "https://users.premierleague.com/accounts/login/"
headers = {
"login": fetcher.FPL_LOGIN,
"password": fetcher.FPL_PASSWORD,
"app": "plfpl-web",
"redirect_uri": "https://fantasy.premierleague.com/a/login",
}
session.post(login_url, data=headers)
return session
def post_transfers(transfer_payload, fetcher):
req_session = requests.session()
req_session = login(req_session, fetcher)
# adapted from https://github.com/amosbastian/fpl/blob/master/fpl/utils.py
headers = {
"Content-Type": "application/json; charset=UTF-8",
"X-Requested-With": "XMLHttpRequest",
"Referer": "https://fantasy.premierleague.com/a/squad/transfers",
}
transfer_url = "https://fantasy.premierleague.com/api/transfers/"
resp = req_session.post(
transfer_url, data=json.dumps(transfer_payload), headers=headers
)
if "non_form_errors" in resp:
raise Exception(resp["non_form_errors"])
elif resp.status_code == 200:
print("SUCCESS....transfers made!")
else:
print("Transfers unsuccessful due to unknown error")
print(f"Response status code: {resp.status_code}")
print(f"Response text: {resp.text}")
def make_transfers(fpl_team_id=None, skip_check=False):
transfer_player_ids, team_id, current_gw, chip_played = get_gw_transfer_suggestions(
fpl_team_id
)
fetcher = FPLDataFetcher(team_id)
if len(transfer_player_ids[0]) == 0:
# no players to remove in DB - initial team?
print("Making transfer list for starting team")
priced_transfers = build_init_priced_transfers(fetcher, team_id)
else:
pre_transfer_bank = get_bank(fpl_team_id=team_id)
priced_transfers = price_transfers(transfer_player_ids, fetcher, current_gw)
post_transfer_bank = deduct_transfer_price(pre_transfer_bank, priced_transfers)
print_output(
team_id, current_gw, priced_transfers, pre_transfer_bank, post_transfer_bank
)
if skip_check or check_proceed():
transfer_req = build_transfer_payload(
priced_transfers, current_gw, fetcher, chip_played
)
post_transfers(transfer_req, fetcher)
return True
def main():
parser = argparse.ArgumentParser("Make transfers via the FPL API")
parser.add_argument("--fpl_team_id", help="FPL team ID", type=int)
parser.add_argument("--confirm", help="skip confirmation step", action="store_true")
args = parser.parse_args()
confirm = args.confirm if args.confirm else False
make_transfers(args.fpl_team_id, confirm)
if __name__ == "__main__":
main()
```
#### File: airsenal/scripts/squad_builder.py
```python
import argparse
from airsenal.framework.utils import (
NEXT_GAMEWEEK,
get_latest_prediction_tag,
fetcher,
)
from airsenal.framework.season import get_current_season
from airsenal.framework.optimization_squad import make_new_squad
from airsenal.framework.optimization_utils import fill_initial_suggestion_table
positions = ["FWD", "MID", "DEF", "GK"] # front-to-back
def main():
parser = argparse.ArgumentParser(description="Make a squad from scratch")
# General parameters
parser.add_argument(
"--budget", help="budget, in 0.1 millions", type=int, default=1000
)
parser.add_argument("--season", help="season, in format e.g. 1819")
parser.add_argument("--gw_start", help="gameweek to start from", type=int)
parser.add_argument(
"--num_gw", help="how many gameweeks to consider", type=int, default=3
)
parser.add_argument(
"--algorithm",
help="Which optimization algorithm to use - 'normal' or 'genetic'",
type=str,
default="genetic",
)
# parameters for "normal" optimization
parser.add_argument(
"--num_iterations",
help="number of iterations (normal algorithm only)",
type=int,
default=10,
)
# parameters for "pygmo" optimization
parser.add_argument(
"--num_generations",
help="number of generations (genetic only)",
type=int,
default=100,
)
parser.add_argument(
"--population_size",
help="number of candidate solutions per generation (genetic only)",
type=int,
default=100,
)
parser.add_argument(
"--no_subs",
help="Don't include points contribution from substitutes (genetic only)",
action="store_true",
)
parser.add_argument(
"--include_zero",
help="Include players with zero predicted points (genetic only)",
action="store_true",
)
parser.add_argument(
"--verbose",
help="Print details on optimsation progress",
action="store_true",
)
parser.add_argument(
"--fpl_team_id",
help="ID for your FPL team",
type=int,
)
args = parser.parse_args()
season = args.season or get_current_season()
budget = args.budget
gw_start = args.gw_start or NEXT_GAMEWEEK
gw_range = list(range(gw_start, min(38, gw_start + args.num_gw)))
tag = get_latest_prediction_tag(season)
algorithm = args.algorithm
num_iterations = args.num_iterations
num_generations = args.num_generations
population_size = args.population_size
remove_zero = not args.include_zero
verbose = args.verbose
if args.no_subs:
sub_weights = {"GK": 0, "Outfield": (0, 0, 0)}
else:
sub_weights = {"GK": 0.01, "Outfield": (0.4, 0.1, 0.02)}
if algorithm == "genetic":
try:
import pygmo as pg
uda = pg.sga(gen=num_generations)
except ModuleNotFoundError as e:
print(e)
print("Defaulting to algorithm=normal instead")
algorithm = "normal"
uda = None
else:
uda = None
best_squad = make_new_squad(
gw_range,
tag,
budget=budget,
season=season,
algorithm=algorithm,
remove_zero=remove_zero,
sub_weights=sub_weights,
uda=uda,
population_size=population_size,
num_iterations=num_iterations,
verbose=verbose,
)
points = best_squad.get_expected_points(gw_start, tag)
print("---------------------")
print("Best expected points for gameweek {}: {}".format(gw_start, points))
print("---------------------")
print(best_squad)
fpl_team_id = args.fpl_team_id or fetcher.FPL_TEAM_ID
fill_initial_suggestion_table(
best_squad,
fpl_team_id,
tag,
season=season,
gameweek=gw_start,
)
``` |
{
"source": "jpkulasingham/Eelbrain",
"score": 3
} |
#### File: Eelbrain/eelbrain/_config.py
```python
from multiprocessing import cpu_count
import os
from matplotlib.colors import to_rgb
SUPPRESS_WARNINGS = True
CONFIG = {
'n_workers': cpu_count(),
'eelbrain': True,
'autorun': None,
'show': True,
'format': 'svg',
'figure_background': 'white',
'prompt_toolkit': True,
'animate': True,
'nice': 0,
'tqdm': False, # disable=CONFIG['tqdm']
}
def configure(
n_workers=None,
frame=None,
autorun=None,
show=None,
format=None,
figure_background=None,
prompt_toolkit=None,
animate=None,
nice=None,
tqdm=None,
):
"""Set basic configuration parameters for the current session
Parameters
----------
n_workers : bool | int
Number of worker processes to use in multiprocessing enabled
computations. ``False`` to disable multiprocessing. ``True`` (default)
to use as many processes as cores are available. Negative numbers to use
all but n available CPUs.
frame : bool
Open figures in the Eelbrain application. This provides additional
functionality such as copying a figure to the clipboard. If False, open
figures as normal matplotlib figures.
autorun : bool
When a figure is created, automatically enter the GUI mainloop. By
default, this is True when the figure is created in interactive mode
but False when the figure is created in a script (in order to run the
GUI at a specific point in a script, call :func:`eelbrain.gui.run`).
show : bool
Show plots on the screen when they're created (disable this to create
plots and save them without showing them on the screen).
format : str
Default format for plots (for example "png", "svg", ...).
figure_background : bool | matplotlib color
While :mod:`matplotlib` uses a gray figure background by default,
Eelbrain uses white. Set this parameter to ``False`` to use the default
from :attr:`matplotlib.rcParams`, or set it to a valid matplotblib
color value to use an arbitrary color. ``True`` to revert to the default
white.
prompt_toolkit : bool
In IPython 5, prompt_toolkit allows running the GUI main loop in
parallel to the Terminal, meaning that the IPython terminal and GUI
windows can be used without explicitly switching between Terminal and
GUI. This feature is enabled by default, but can be disabled by setting
``prompt_toolkit=False``.
animate : bool
Animate plot navigation (default True).
nice : int [-20, 19]
Scheduling priority for muliprocessing (larger number yields more to
other processes; negative numbers require root privileges).
tqdm : bool
Enable or disable :mod:`tqdm` progress bars.
"""
# don't change values before raising an error
new = {}
if n_workers is not None:
if n_workers is True:
new['n_workers'] = cpu_count()
elif n_workers is False:
new['n_workers'] = 0
elif isinstance(n_workers, int):
if n_workers < 0:
if cpu_count() - n_workers < 1:
raise ValueError("n_workers=%i, but only %i CPUs are "
"available" % (n_workers, cpu_count()))
new['n_workers'] = cpu_count() - n_workers
else:
new['n_workers'] = n_workers
else:
raise TypeError("n_workers=%r" % (n_workers,))
if frame is not None:
new['eelbrain'] = bool(frame)
if autorun is not None:
new['autorun'] = bool(autorun)
if show is not None:
new['show'] = bool(show)
if format is not None:
new['format'] = format.lower()
if figure_background is not None:
if figure_background is True:
figure_background = 'white'
elif figure_background is not False:
to_rgb(figure_background)
new['figure_background'] = figure_background
if prompt_toolkit is not None:
new['prompt_toolkit'] = bool(prompt_toolkit)
if animate is not None:
new['animate'] = bool(animate)
if nice is not None:
nice = int(nice)
if not -20 <= nice < 20:
raise ValueError("nice=%i; needs to be in range [-20, 19]" % (nice,))
elif nice < 0 and not os.getuid() == 0:
raise ValueError("nice=%i; values < 0 require root privileges" % (nice,))
new['nice'] = nice
if tqdm is not None:
new['tqdm'] = not tqdm
CONFIG.update(new)
```
#### File: Eelbrain/eelbrain/_exceptions.py
```python
class DefinitionError(Exception):
"MneExperiment definition error"
class DimensionMismatchError(Exception):
"Trying to align NDVars with mismatching dimensions"
@classmethod
def from_dims_list(cls, message, dims_list):
unique_dims = []
for dims in dims_list:
if any(dims == dims_ for dims_ in unique_dims):
continue
else:
unique_dims.append(dims)
desc = '\n'.join(map(str, unique_dims))
return cls(f'{message}\n{desc}')
class WrongDimension(Exception):
"Dimension that is supported"
class IncompleteModel(Exception):
"Function requires a fully specified model"
class OldVersionError(Exception):
"Trying to load a file from a version that is no longer supported"
class ZeroVariance(Exception):
"Trying to do test on data with zero variance"
```
#### File: eelbrain/_experiment/experiment.py
```python
from collections import defaultdict
import difflib
from functools import reduce
from glob import glob
from itertools import chain, product
import operator
import os
import re
import shutil
import subprocess
from time import localtime, strftime
import traceback
import numpy as np
from tqdm import tqdm
from .. import fmtxt
from .._config import CONFIG
from .._text import enumeration, n_of, plural
from .._utils import as_sequence, LazyProperty, ask
from .._utils.com import Notifier, NotNotifier
from .definitions import check_names, compound
def _etree_expand(node, state):
for tk, tv in node.items():
if tk == '.':
continue
for k, v in state.items():
name = '{%s}' % tk
if str(v).startswith(name):
tv[k] = {'.': v.replace(name, '')}
if len(tv) > 1:
_etree_expand(tv, state)
def _etree_node_repr(node, name, indent=0):
head = ' ' * indent
out = [(name, head + node['.'])]
for k, v in node.items():
if k == '.':
continue
out.extend(_etree_node_repr(v, k, indent=indent + 3))
return out
class LayeredDict(dict):
"""Dictionary which can store and restore states"""
def __init__(self):
self._states = []
dict.__init__(self)
def __repr__(self):
return ("<LayeredDict with %i stored states:\n"
"%r>" % (len(self._states), dict.__repr__(self)))
def get_stored(self, key, level, default=None):
"""Retrieve a field value from any level
Parameters
----------
key : str
the field name (dictionary key).
level : int
The level from which to retrieve the value. -1 = the current level.
"""
return self._states[level].get(key, default)
def restore_state(self, state=-1, discard_tip=True):
"""Restore a previously stored state
Parameters
----------
state : int | dict
Index of the state which to restore (specified as index into a
list of stored states, i.e., negative values access recently
stored states).
discard_tip : bool
Discard the relevant state after restoring it. All states stored
later are discarded either way.
See Also
--------
.get_stored(): Retrieve a stored value without losing stored states
"""
if isinstance(state, int):
index = state
state = self._states[index]
if discard_tip:
del self._states[index:]
elif index != -1: # -1 + 1 = 0
del self._states[index + 1:]
elif not isinstance(state, dict):
raise TypeError("state needs to be either int or dict, got %r" %
(state,))
self.clear()
self.update(state)
def store_state(self):
"Store the current state"
self._states.append(self.copy())
class _TempStateController:
def __init__(self, experiment):
self.experiment = experiment
def __enter__(self):
self.experiment._store_state()
def __exit__(self, exc_type, exc_value, traceback):
self.experiment._restore_state()
class TreeModel:
"""
A hierarchical collection of format strings and field values
Notes
-----
Any subclass should make sure to call the ``._store_state()`` method at the
end of initialization.
"""
owner = None # email address as string (for notification)
_auto_debug = False # in notification block
_fmt_pattern = re.compile(r'\{([\w-]+)\}')
# a dictionary of static templates (i.e., templates that do not have any hooks)
_templates = {}
defaults = {}
_repr_args = ()
def __init__(self, **state):
# scaffold for state
self._fields = LayeredDict()
self._field_values = LayeredDict()
self._terminal_fields = []
self._secondary_cache = defaultdict(tuple) # secondary cache-files
self._repr_kwargs = []
self._repr_kwargs_optional = []
# scaffold for hooks
self._compound_members = {}
self._compounds = defaultdict(list)
self._eval_handlers = defaultdict(list)
self._post_set_handlers = defaultdict(list)
self._set_handlers = {}
self._slave_fields = defaultdict(list)
self._slave_handlers = {}
# construct initial state: make all defaults available, then set as
# many values as we can
self._defaults = dict(self.defaults)
self._defaults.update(state)
for k, v in self._templates.items():
if v is None or isinstance(v, str):
self._register_constant(k, v)
elif isinstance(v, tuple):
self._register_field(k, v, v[0], allow_empty=True)
else:
raise TypeError(f"Invalid templates field value: {v!r}. Need None, tuple or string")
if self.owner:
task = self.__class__.__name__
self.notification = Notifier(self.owner, task, self._crash_report,
self._auto_debug)
else:
self.notification = NotNotifier()
def __repr__(self):
args = [f'{self._fields[arg]!r}' for arg in self._repr_args]
kwargs = [(arg, self._fields[arg]) for arg in self._repr_kwargs]
no_initial_state = len(self._fields._states) == 0
for k in self._repr_kwargs_optional:
v = self._fields[k]
if no_initial_state or v != self._fields.get_stored(k, level=0):
kwargs.append((k, v))
args.extend(f'{k}={v!r}' for k, v in kwargs)
return f"{self.__class__.__name__}({', '.join(args)})"
def _bind_eval(self, key, handler):
self._eval_handlers[key].append(handler)
def _bind_post_set(self, key, handler):
handlers = self._post_set_handlers[key]
if handler not in handlers:
handlers.append(handler)
def _bind_set(self, key, handler):
if key in self._set_handlers:
raise KeyError("set-handler for %r already set" % key)
self._set_handlers[key] = handler
def _crash_report(self):
out = []
# try:
# source = inspect.getsource(self.__class__)
# except Exception as e:
# source = "Failed to retrieve source:\n" + traceback.format_exc(e)
# out.append(source)
try:
tree = str(self.show_state())
except Exception as e:
tree = "Failed to retrieve state:\n" + traceback.format_exc(e)
out.append(tree)
# package versions
from .. import __version__
import mne
import scipy
out.append('\n'.join(("Eelbrain %s" % __version__,
"mne-python %s" % mne.__version__,
"SciPy %s" % scipy.__version__,
"NumPy %s" % np.__version__)))
return out
def _find_missing_fields(self):
"""Check that all field names occurring in templates are valid entries
Raises
------
KeyError
If any field names occurring in templates are not registered fields.
"""
# find field names occurring in field values but not as fields
missing = set()
for temp in self._fields.values():
for field in self._fmt_pattern.findall(temp):
if field not in self._fields:
missing.add(field)
if missing:
raise KeyError("The following fields occur in templates but "
"are undefined: %s" % ', '.join(sorted(missing)))
def _register_compound(self, key, elements):
"""Register a field that is composed out of other fields
The compound always reflects ``' '.join(elements)`` including only
elements that are not empty.
Parameters
----------
key : str
The name of the compound field.
elements : tuple of str
The field names of the elements.
"""
self._compound_members[key] = elements
for e in elements:
self._compounds[e].append(key)
self._bind_post_set(e, self._update_compounds)
self._fields[key] = None
self._update_compound(key)
def _register_constant(self, key, value):
value = self._defaults.get(key, value)
if value is None:
raise ValueError("The %r field needs to be set as default" % key)
self._fields[key] = value
def _register_field(self, key, values=None, default=None, set_handler=None,
eval_handler=None, post_set_handler=None,
depends_on=None, slave_handler=None,
allow_empty=False, repr=None):
"""Register an iterable field
Parameters
----------
key : str
Name of the field.
values : None | sequence of str
Possible values for this field, if known.
default : None | str
Set the default value (if None, the first element in values).
set_handler : None | callable
Function to call instead of updating the state value. The return
value of the set_handler is sent to the post_set_handler.
eval_handler : None | callable
Function to use for evaluating a value before setting. Can be
called without actually setting the value; any parameter changes
need to be evaluated in post_set_handlers.
post_set_handler : None | callable
Function to call after the value is changed. Needs to be able to
handle non-existing values for ``e.set(..., vmatch=False)`` calls.
depends_on : str | sequence of str
Slave fields: Fields in depends_on trigger change in ``key``.
slave_handler : func
Slave fields: Function that determines the new value of ``key``.
allow_empty : bool
Allow empty string in ``values``.
repr : bool
By default, fields are shown in ``repr`` if they are different from
the value at initialization. Set to ``True`` to always show them
(as long as there are at least 2 ``values``).
"""
if key in self._fields:
raise KeyError("Field already exists: %r" % key)
if depends_on is not None:
if (set_handler is not None or eval_handler is not None or
post_set_handler is not None):
raise RuntimeError("Slave values can't have other handlers")
elif slave_handler is None:
raise RuntimeError("Slave value requires slave_handler")
self._register_slave_field(key, depends_on, slave_handler)
if default is None:
default = slave_handler(self._fields)
if set_handler is not None:
self._bind_set(key, set_handler)
if eval_handler is not None:
self._bind_eval(key, eval_handler)
if post_set_handler is not None:
self._bind_post_set(key, post_set_handler)
default = self._defaults.get(key, default)
if values:
values = tuple(values)
check_names(values, key, allow_empty)
if default is None:
default = values[0]
elif default not in values:
raise ValueError(f"Default {default!r} for {key!r} not in values {values}")
self._field_values[key] = values
# repr
if key in self._repr_args:
pass
elif repr is True:
if values and len(values) > 1:
self._repr_kwargs.append(key)
elif repr is None:
if values and len(values) > 1:
self._repr_kwargs_optional.append(key)
elif repr is not False:
raise TypeError(f"repr={repr!r}")
self._terminal_fields.append(key)
self._fields[key] = ''
if default is not None:
self.set(**{key: default})
def _register_slave_field(self, key, depends_on, handler):
"""Register a field that strictly depends on one or more other fields
Parameters
----------
key : str
Field name.
depends_on : str | sequence of str
Fields that trigger change.
handler : func
Function that determines the new value.
Notes
-----
Restrictions:
- Slave fields can not have any other handlers
- Slave fields can not depend on other slave fields
"""
if isinstance(depends_on, str):
depends_on = (depends_on,)
for dep in depends_on:
self._slave_fields[dep].append(key)
self._slave_handlers[key] = handler
self._fields[key] = handler(self._fields)
def expand_template(self, temp, keep=()):
"""Expand all constant variables in a template
Parameters
----------
temp : str
Template or name of the template which should be expanded.
keep : container (implements __contains__)
Names of the variables which should not be expanded.
Returns
-------
formatted_temp : str
Template with all variables replaced by their values, except
variables which have entries in field_values or in ``keep``.
"""
temp = self._fields.get(temp, temp)
while True:
stop = True
for name in self._fmt_pattern.findall(temp):
if (name in keep) or (self._field_values.get(name, False)):
pass
else:
temp = temp.replace('{%s}' % name, self._fields[name])
stop = False
if stop:
break
return temp
def find_keys(self, temp, root=True):
"""Find all terminal field names that are relevant for a template.
Parameters
----------
temp : str
Template (or field name) for which to find terminal field names.
root : bool
Include "root" if present (default True).
Returns
-------
keys : list
All terminal field names that are relevant for formatting ``temp``.
"""
if temp in self._terminal_fields:
return [temp]
if temp in self._compound_members:
temporary_keys = list(self._compound_members[temp])
else:
temp = self._fields.get(temp, temp)
temporary_keys = self._fmt_pattern.findall(temp)
keys = []
while temporary_keys:
key = temporary_keys.pop(0)
if key == 'root':
if root:
keys.append('root')
elif key in self._terminal_fields:
keys.append(key)
else:
keys.extend(self.find_keys(key, root))
# remove duplicates
return list(dict.fromkeys(keys))
def format(self, string, vmatch=True, **kwargs):
"""Format a string (i.e., replace any '{xxx}' fields with their values)
Parameters
----------
string : str
Template string.
vmatch : bool
For fields with known names, only allow existing field names.
others :
State parameters.
Returns
-------
formatted_string : str
The template temp formatted with current state values.
"""
self.set(match=vmatch, **kwargs)
while self._fmt_pattern.search(string):
string = string.format(**self._fields)
return string
def get(self, temp, **state):
return self.format('{%s}' % temp, **state)
def _get_rel(self, temp, start):
"Get the path of ``temp`` relative to ``start`` (both field names)"
abs_ = self.get(temp)
start_ = self.get(start)
return os.path.relpath(abs_, start_)
def get_field_values(self, field, exclude=()):
"""Find values for a field taking into account exclusion
Parameters
----------
field : str
Field for which to find values.
exclude : list of str
Exclude these values.
"""
values = self._field_values[field]
if isinstance(exclude, str):
exclude = (exclude,)
if exclude:
values = [v for v in values if v not in exclude]
else:
values = list(values)
return values
def iter(self, fields, exclude=None, values=None, progress_bar=None, **constants):
"""
Cycle the experiment's state through all values on the given fields
Parameters
----------
fields : sequence | str
Field(s) over which should be iterated.
exclude : dict {str: iterator over str}
Exclude values from iteration (``{field: values_to_exclude}``).
values : dict {str: iterator over str}
Fields with custom values to iterate over (instead of the
corresponding field values) with {name: (sequence of values)}
entries.
progress_bar : str
Message to show in the progress bar.
...
Fields with constant values throughout the iteration.
"""
if isinstance(fields, str):
fields = (fields,)
yield_str = True
else:
yield_str = False
# find actual fields to iterate over:
iter_fields = []
for field in fields:
if field in constants:
continue
iter_fields.extend(f for f in self.find_keys(field) if f not in constants)
# check values and exclude
if values:
bad = set(values).difference(iter_fields)
if bad:
raise ValueError(f"values={values!r}: keys that are not iterated over ({', '.join(bad)})")
else:
values = {}
if exclude:
bad = set(exclude).difference(iter_fields)
if bad:
raise ValueError(f"exclude={exclude!r}: keys that are not iterated over ({', '.join(bad)})")
else:
exclude = {}
# set constants (before .get_field_values() call)
self.set(**constants)
# gather values to iterate over
v_lists = []
for field in iter_fields:
if field in values:
v_lists.append(as_sequence(values[field]))
else:
exclude_ = exclude.get(field, None)
v_lists.append(self.get_field_values(field, exclude_))
if len(v_lists):
n = reduce(operator.mul, map(len, v_lists))
with self._temporary_state:
disable = progress_bar is None or CONFIG['tqdm']
for v_list in tqdm(product(*v_lists), progress_bar, n, disable=disable):
self._restore_state(discard_tip=False)
self.set(**dict(zip(iter_fields, v_list)))
if yield_str:
yield self.get(fields[0])
else:
yield tuple(self.get(f) for f in fields)
else:
yield ()
def iter_temp(self, temp, exclude=None, values={}, **constants):
"""
Iterate through all paths conforming to a template given in ``temp``.
Parameters
----------
temp : str
Name of a template in the MneExperiment.templates dictionary, or
a path template with variables indicated as in ``'{var_name}'``
"""
# if the name is an existing template, retrieve it
temp = self.expand_template(temp, values.keys())
# find variables for iteration
variables = set(self._fmt_pattern.findall(temp))
variables.difference_update(constants)
for _ in self.iter(variables, exclude, values, **constants):
path = temp.format(**self._fields)
yield path
def _partial(self, temp, skip=()):
"Format a template while leaving some slots unfilled"
skip = set(skip)
fields = self._fields.copy()
fields.update({k: '{%s}' % k for k in skip})
string = '{%s}' % temp
while set(self._fmt_pattern.findall(string)).difference(skip):
string = string.format(**fields)
return string
def _copy_state(self):
"""Copy of the state that can be used with ``._restore_state()``"""
return self._fields.copy(), self._field_values.copy()
def _restore_state(self, state=-1, discard_tip=True):
"""Restore a previously stored state
Parameters
----------
state : int
Index of the state which to restore (specified as index into a
list of stored states, i.e., negative values access recently
stored states).
discard_tip : bool
Discard the relevant state after restoring it. All states stored
later are discarded either way.
"""
if isinstance(state, int):
s1 = s2 = state
else:
s1, s2 = state
self._fields.restore_state(s1, discard_tip)
self._field_values.restore_state(s2, discard_tip)
def reset(self):
"""Reset all field values to the state at initialization
This function can be used in cases where the same MneExperiment instance
is used to perform multiple independent operations, where parameters set
during one operation should not affect the next operation.
"""
self._restore_state(0, False)
def set(self, match=True, allow_asterisk=False, **state):
"""Set the value of one or more fields.
Parameters
----------
match : bool
For fields with pre-defined values, only allow valid values (default
``True``).
allow_asterisk : bool
If a value contains ``'*'``, set the value without the normal value
evaluation and checking mechanisms (default ``False``).
... :
Fields and values to set. Invalid fields raise a KeyError. Unless
match == False, Invalid values raise a ValueError.
"""
if not state:
return
# expand compounds
if state.pop('expand_compounds', True):
for k in list(state):
if k in self._compound_members:
fields = self._compound_members[k]
v = state.pop(k)
values = v.split(' ')
for i, field in enumerate(fields):
field_values = self._field_values[field]
vi = values[i] if len(values) > i else None
if vi in field_values:
continue
elif '' in field_values:
values.insert(i, '')
else:
raise ValueError(f"{k}={v!r}")
if len(values) != len(fields):
raise ValueError(f"{k}={v!r}")
state.update(zip(fields, values))
handled_state = {} # fields with special set handlers
for k in list(state):
v = state[k]
if k not in self._fields:
raise TypeError(f"{k}={v!r}: No template named {k!r}")
elif v is None:
state.pop(k)
continue
elif k in self._set_handlers:
handled_state[k] = self._set_handlers[k](state.pop(k))
continue
elif not isinstance(v, str):
raise TypeError(f"{k}={v!r}: Values have to be strings")
elif '*' in v and allow_asterisk:
continue
# eval values
eval_handlers = self._eval_handlers[k]
if eval_handlers:
for handler in eval_handlers:
try:
v = handler(v)
except ValueError:
if match:
raise
if not isinstance(v, str):
raise RuntimeError(f"Invalid conversion from handler {handler}: {k}={v!r}")
state[k] = v
elif match and k in self._field_values and v not in self._field_values[k]:
matches = difflib.get_close_matches(v, self._field_values[k], 1)
if matches:
alt = f"Did you mean {matches[0]!r}? "
else:
alt = ''
raise ValueError(f"{k}={v!r}. {alt}To see all valid values use e.show_fields(); To set a non-existent value, use e.set({k}={v!r}, match=False).")
self._fields.update(state)
# fields depending on changes in other fields
slave_state = {}
for state_key in set(state).union(handled_state).intersection(self._slave_fields):
for slave_key in self._slave_fields[state_key]:
if slave_key not in slave_state:
v = self._slave_handlers[slave_key](self._fields)
if v is not None:
slave_state[slave_key] = v
self._fields.update(slave_state)
# call post_set handlers
for k, v in chain(state.items(), handled_state.items(), slave_state.items()):
for handler in self._post_set_handlers[k]:
handler(k, v)
def show_fields(self, str_out=False):
"""
Generate a table for all iterable fields and ther values.
Parameters
----------
str_out : bool
Return the table as a string (instead of printing it).
"""
lines = []
for key in self._field_values:
values = list(self._field_values[key])
line = f'{key}:'
head_len = len(line) + 1
while values:
v = repr(values.pop(0))
if values:
v += ','
if len(v) < 80 - head_len:
line += ' ' + v
else:
lines.append(line)
line = ' ' * head_len + v
if not values:
lines.append(line)
table = '\n'.join(lines)
if str_out:
return table
else:
print(table)
def show_state(self, temp=None, empty=False, hide=()):
"""List all top-level fields and their values
(Top-level fields are fields whose values do not contain templates)
Parameters
----------
temp : None | str
Only show variables relevant to this template.
empty : bool
Show empty variables (items whose value is the empty string '').
hide : collection of str
State variables to hide.
Returns
-------
state : Table
Table of (relevant) variables and their values.
"""
table = fmtxt.Table('lll')
table.cells('Key', '*', 'Value')
table.caption('*: Value is modified from initialization state.')
table.midrule()
if temp is None:
keys = chain(self._repr_kwargs, self._repr_kwargs_optional)
else:
keys = self.find_keys(temp)
for k in sorted(keys):
if k in hide:
continue
v = self._fields[k]
if v != self._fields.get_stored(k, level=0):
mod = '*'
else:
mod = ''
if empty or mod or v:
table.cells(k, mod, repr(v))
return table
def show_tree(self, root='root', fields=None):
"""
Print a tree of the filehierarchy implicit in the templates
Parameters
----------
root : str
Name of the root template (e.g., 'besa-root').
fields : list of str
Which fields to include in the tree (default is all).
"""
if fields is None:
fields = self._fields
else:
# find all implied fields
new_fields = set(fields)
fields = {}
while new_fields:
k = new_fields.pop()
fields[k] = v = self._fields[k]
new_fields.update([f for f in self._fmt_pattern.findall(v) if f not in fields])
tree = {'.': self.get(root)}
root_temp = '{%s}' % root
for k, v in fields.items():
if str(v).startswith(root_temp):
tree[k] = {'.': v.replace(root_temp, '')}
_etree_expand(tree, fields)
nodes = _etree_node_repr(tree, root)
name_len = max(len(n) for n, _ in nodes)
path_len = max(len(p) for _, p in nodes)
pad = ' ' * (80 - name_len - path_len)
print('\n'.join(n.ljust(name_len) + pad + p.ljust(path_len) for n, p in nodes))
def _store_state(self):
"""Store the current state
See also
--------
._restore_state() : restore a previously stored state
"""
self._fields.store_state()
self._field_values.store_state()
@LazyProperty
def _temporary_state(self):
return _TempStateController(self)
def _update_compound(self, key):
items = [self.get(k) for k in self._compound_members[key]]
self.set(**{key: compound(items)}, expand_compounds=False)
def _update_compounds(self, key, _):
for compound in self._compounds[key]:
self._update_compound(compound)
class FileTree(TreeModel):
""":class:`TreeModel` subclass for a file system hierarchy"""
_repr_args = ('root',)
_safe_delete = 'root' # directory from which to rm without warning
def __init__(self, **state):
TreeModel.__init__(self, **state)
self._make_handlers = {}
self._cache_handlers = {}
self._register_field('root', eval_handler=self._eval_root)
def _bind_cache(self, key, handler):
"""Bind a cache function to a ``*-file`` key
The cache function is called every time the file name is retrieved and
should recreate the file if it is outdated.
The cache function can return the filename of the created file since
it is called every time the specific file is requested. Note that this
causes problems for ``glob()``.
"""
if key in self._cache_handlers:
raise RuntimeError(f"Cache handler for {key!r} already defined")
elif key in self._make_handlers:
raise RuntimeError(f"Already defined make handler for {key!r}")
self._cache_handlers[key] = handler
def _bind_make(self, key, handler):
"""Bind a make function to a ``*-file`` key
The make function is called only when the file name is retrieved and
the file does not exist.
"""
if key in self._cache_handlers:
raise RuntimeError(f"Already defined cache handler for {key!r}")
elif key in self._make_handlers:
raise RuntimeError(f"Make handler for {key!r} already defined")
self._make_handlers[key] = handler
@staticmethod
def _eval_root(root):
root = os.path.abspath(os.path.expanduser(root))
if root != '':
root = os.path.normpath(root)
return root
def get(self, temp, fmatch=False, vmatch=True, match=True, mkdir=False,
make=False, **kwargs):
"""
Retrieve a formatted template
With match=True, '*' are expanded to match a file,
and if there is not a unique match, an error is raised. With
mkdir=True, the directory containing the file is created if it does not
exist.
Parameters
----------
temp : str
Name of the requested template.
fmatch : bool
"File-match": If the template contains asterisk ('*'), use glob to
fill it in. An IOError is raised if the pattern does not match
exactly one file.
vmatch : bool
"Value match": Require existence of the assigned value (only
applies for fields with stored values).
match : bool
Do any matching (i.e., match=False sets fmatch as well as vmatch
to False).
mkdir : bool
If the directory containing the file does not exist, create it.
make : bool
If a requested file does not exists, make it if possible.
kwargs :
Set any state values.
"""
if not match:
fmatch = vmatch = False
path = TreeModel.get(self, temp, vmatch=vmatch, **kwargs)
path = os.path.expanduser(path)
# assert the presence of the file
if fmatch and ('*' in path):
paths = glob(path)
if len(paths) == 0 and make and temp in self._make_handlers:
self._make_handlers[temp]()
paths = glob(path)
if len(paths) == 1:
path = paths[0]
elif len(paths) > 1:
raise IOError(f"More than one files match {path!r}: {paths}")
else:
raise IOError(f"No file found for {path!r}")
# create the directory
if mkdir:
if temp.endswith('dir'):
dirname = path
else:
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
root = self.get('root')
if root == '':
raise IOError("Prevented from creating directories because root is not set")
elif os.path.exists(root):
os.makedirs(dirname)
else:
raise IOError(f"Prevented from creating directories because root does not exist: {root!r}")
# make the file
if make:
if temp in self._cache_handlers:
path = self._cache_handlers[temp]() or path
elif not os.path.exists(path):
if temp in self._make_handlers:
with self._temporary_state:
self._make_handlers[temp]()
elif temp.endswith('-dir'):
os.makedirs(path)
else:
raise RuntimeError(f"No make handler for {temp!r}")
return path
def glob(self, temp, inclusive=False, **state):
"""Find all files matching a certain pattern
Parameters
----------
temp : str
Name of the path template for which to find files.
inclusive : bool
Treat all unspecified fields as ``*`` (default False).
See Also
--------
copy : Copy files.
move : Move files.
rm : Delete files.
Notes
-----
State parameters can include an asterisk ('*') to match multiple files.
Uses :func:`glob.glob`.
"""
pattern = self._glob_pattern(temp, inclusive, **state)
return glob(pattern)
def _glob_pattern(self, temp, inclusive=False, **state):
if inclusive:
for key in self._terminal_fields:
if key in state or key == 'root':
continue
elif key in self._field_values and len(self._field_values[key]) == 1:
continue
state[key] = '*'
with self._temporary_state:
pattern = self.get(temp, allow_asterisk=True, **state)
return pattern
def _find_files_with_target(self, action, temp, dst_root, inclusive, overwrite, confirm, state):
if dst_root is None:
if 'root' not in state:
raise TypeError("Need to specify at least one of root and dst_root")
dst_root = self.get('root')
src_filenames = self.glob(temp, inclusive, **state)
n = len(src_filenames)
if n == 0:
print("No files matching pattern.")
return None, None
root = self.get('root')
errors = [filename for filename in src_filenames if not filename.startswith(root)]
if errors:
raise ValueError(f"{len(errors)} files are not located in the root directory ({errors[0]}, ...)")
rel_filenames = {src: os.path.relpath(src, root) for src in src_filenames}
dst_filenames = {src: os.path.join(dst_root, filename) for src, filename in rel_filenames.items()}
if overwrite is not True:
exist = [src for src, dst in dst_filenames.items() if os.path.exists(dst)]
if exist:
if overwrite is None:
raise ValueError(f"{len(exist)} of {n} files already exist")
elif overwrite is False:
if len(exist) == n:
print(f"All {n} files already exist.")
return None, None
n -= len(exist)
for src in exist:
src_filenames.remove(src)
else:
raise TypeError(f"overwrite={overwrite!r}")
if not confirm:
print(f"{action} {self.get('root')} -> {dst_root}:")
for src in src_filenames:
print(" " + rel_filenames[src])
if input(f"{action} {n} files? (confirm with 'yes'): ") != 'yes':
return None, None
return src_filenames, [dst_filenames[src] for src in src_filenames]
def copy(self, temp, dst_root=None, inclusive=False, confirm=False, overwrite=None, **state):
"""Copy files to a different root folder
Parameters
----------
temp : str
Name of the path template for which to find files.
dst_root : str
Path to the root to which the files should be moved. If the target
is the experiment's root directory, specify ``root`` as the source
root and leave ``dst_root`` unspecified.
inclusive : bool
Treat all unspecified fields as ``*`` (default False).
confirm : bool
Skip asking for confirmation before copying the files.
overwrite : bool
``True`` to overwrite target files if they already exist. ``False``
to quietly keep exising files.
See Also
--------
glob : Find all files matching a template.
move : Move files.
rm : Delete files.
make_copy : Copy a file by substituting a field
Notes
-----
State parameters can include an asterisk ('*') to match multiple files.
"""
src_filenames, dst_filenames = self._find_files_with_target('Copy', temp, dst_root, inclusive, overwrite, confirm, state)
if not src_filenames:
return
for src, dst in tqdm(zip(src_filenames, dst_filenames), "Copying", len(src_filenames)):
dirname = os.path.dirname(dst)
if not os.path.exists(dirname):
os.makedirs(dirname)
if os.path.isdir(src):
shutil.copytree(src, dst)
else:
shutil.copy(src, dst)
def move(self, temp, dst_root=None, inclusive=False, confirm=False, overwrite=None, **state):
"""Move files to a different root folder
Parameters
----------
temp : str
Name of the path template for which to find files.
dst_root : str
Path to the root to which the files should be moved. If the target
is the experiment's root directory, specify ``root`` as the source
root and leave ``dst_root`` unspecified.
inclusive : bool
Treat all unspecified fields as ``*`` (default False).
confirm : bool
Skip asking for confirmation before moving the files.
overwrite : bool
Overwrite target files if they already exist.
See Also
--------
copy : Copy files.
glob : Find all files matching a template.
rm : Delete files.
Notes
-----
State parameters can include an asterisk ('*') to match multiple files.
"""
if overwrite is False:
raise ValueError(f"overwrite={overwrite!r}")
src_filenames, dst_filenames = self._find_files_with_target('Move', temp, dst_root, inclusive, overwrite, confirm, state)
if not src_filenames:
return
for src, dst in tqdm(zip(src_filenames, dst_filenames), "Moving", len(src_filenames)):
dirname = os.path.dirname(dst)
if not os.path.exists(dirname):
os.makedirs(dirname)
os.rename(src, dst)
def show_file_status(self, temp, row, col=None, count=True, present='time',
absent='-', **kwargs):
"""Compile a table about the existence of files
Parameters
----------
temp : str
The name of the path template for the files to examine.
row : str
Field over which to alternate rows.
col : None | str
Field over which to alternate columns (default is a single column).
count : bool
Add a column with a number for each line (default True).
present : 'time' | 'date' | str
String to display when a given file is present. 'time' to use last
modification date and time (default); 'date' for date only.
absent : str
String to display when a given file is absent (default '-').
others :
``self.iter()`` kwargs.
"""
if col is None:
col_v = (None,)
ncol = 1
else:
col_v = self.get_field_values(col)
ncol = len(col_v)
# table header
table = fmtxt.Table('r' * bool(count) + 'l' * (ncol + 1))
if count:
table.cell()
table.cell(row)
if col is None:
table.cell(temp)
else:
for name in col_v:
table.cell(name)
table.midrule()
# body
for i, row_v in enumerate(self.iter(row, **kwargs)):
if count:
table.cell(i)
table.cell(row_v)
for v in col_v:
if v is None:
path = self.get(temp)
else:
path = self.get(temp, **{col: v})
if os.path.exists(path):
if present == 'time':
r = strftime('%x %X', localtime(os.path.getmtime(path)))
elif present == 'date':
r = strftime('%x', localtime(os.path.getmtime(path)))
else:
r = present
else:
r = absent
table.cell(r)
return table
def show_file_status_mult(self, files, fields, count=True, present='X',
absent='-', **kwargs):
"""
Compile a table about the existence of multiple files
Parameters
----------
files : str | list of str
The names of the path templates whose existence to list.
fields : str | list of str
The names of the variables for which to list files (i.e., for each
unique combination of ``fields``, list ``files``).
count : bool
Add a column with a number for each subject.
present : str
String to display when a given file is present.
absent : str
String to display when a given file is absent.
Examples
--------
>>> e.show_file_status_mult(['raw-file', 'trans-file', 'fwd-file'],
... 'subject')
Subject Raw-file Trans-file Fwd-file
-----------------------------------------------
0 AD001 X X X
1 AD002 X X X
2 AD003 X X X
...
"""
if not isinstance(files, (list, tuple)):
files = [files]
if not isinstance(fields, (list, tuple)):
fields = [fields]
ncol = (len(fields) + len(files))
table = fmtxt.Table('r' * bool(count) + 'l' * ncol)
if count:
table.cell()
for name in fields + files:
table.cell(name.capitalize())
table.midrule()
for i, _ in enumerate(self.iter(fields, **kwargs)):
if count:
table.cell(i)
for field in fields:
table.cell(self.get(field))
for temp in files:
path = self.get(temp)
if os.path.exists(path):
table.cell(present)
else:
table.cell(absent)
return table
def show_in_finder(self, temp, **kwargs):
"Reveal the file corresponding to the ``temp`` template in the Finder."
fname = self.get(temp, **kwargs)
subprocess.call(["open", "-R", fname])
def rename(self, old, new, exclude=False):
"""Rename all files corresponding to a pattern (or template)
Parameters
----------
old : str
Template for the files to be renamed. Can interpret '*', but will
raise an error in cases where more than one file fit the pattern.
new : str
Template for the new names.
Examples
--------
The following command will collect a specific file for each subject and
place it in a common folder:
>>> e.rename('info-file', '/some_other_place/{subject}_info.txt')
"""
new = self.expand_template(new)
files = []
for old_name in self.iter_temp(old, exclude):
if '*' in old_name:
matches = glob(old_name)
if len(matches) == 1:
old_name = matches[0]
elif len(matches) > 1:
err = ("Several files fit the pattern %r" % old_name)
raise ValueError(err)
if os.path.exists(old_name):
new_name = self.format(new)
files.append((old_name, new_name))
if not files:
print("No files found for %r" % old)
return
old_pf = os.path.commonprefix([pair[0] for pair in files])
new_pf = os.path.commonprefix([pair[1] for pair in files])
n_pf_old = len(old_pf)
n_pf_new = len(new_pf)
table = fmtxt.Table('lll')
table.cells('Old', '', 'New')
table.midrule()
table.caption("%s -> %s" % (old_pf, new_pf))
for old, new in files:
table.cells(old[n_pf_old:], '->', new[n_pf_new:])
print(table)
msg = "Rename %s files (confirm with 'yes')? " % len(files)
if input(msg) == 'yes':
for old, new in files:
dirname = os.path.dirname(new)
if not os.path.exists(dirname):
os.makedirs(dirname)
os.rename(old, new)
def rename_field(self, temp, field, old, new, exclude=False, **kwargs):
"""Change the value of one field in paths corresponding to a template
Parameters
----------
temp : str
Template name.
field : str
Field to change.
old : str
Old value.
new : str
New value.
kwargs :
``self.iter_temp`` arguments.
"""
items = [] # (tag, src, dst)
kwargs[field] = old
dst_kwa = {field: new}
for src in self.iter_temp(temp, exclude, ** kwargs):
dst = self.get(temp, **dst_kwa)
if os.path.exists(src):
if os.path.exists(dst):
tag = 'o'
else:
tag = ' '
else:
tag = 'm'
items.append((tag, src, dst))
src_prefix = os.path.commonprefix(tuple(item[1] for item in items))
dst_prefix = os.path.commonprefix(tuple(item[2] for item in items))
src_crop = len(src_prefix)
dst_crop = len(dst_prefix)
# print info
if src_prefix == dst_prefix:
lines = ['in ' + src_prefix, '']
else:
lines = [src_prefix, '->' + dst_prefix, '']
for tag, src, dst in items:
lines.append('%s %s -> %s' % (tag, src[src_crop:], dst[dst_crop:]))
lines.append('')
msg = 'Legend m: source is missing; o: will overwite a file'
lines.append(msg)
print('\n'.join(lines))
rename = tuple(item for item in items if item[0] == ' ')
if not rename:
return
msg = "Rename %i files (confirm with 'yes')? " % len(rename)
if input(msg) != 'yes':
return
for _, src, dst in rename:
os.rename(src, dst)
print("Done")
def rm(self, temp, inclusive=False, confirm=False, **constants):
"""Remove all files corresponding to a template
Asks for confirmation before deleting anything. Uses glob, so
individual templates can be set to '*'.
Parameters
----------
temp : str
Name of the path template for which to find and delete files.
inclusive : bool
Treat all unspecified fields as ``*`` (default False).
confirm : bool
Confirm removal of the selected files. If False (default) the user
is prompted for confirmation with a list of files; if True, the
files are removed immediately.
**others** :
Set field values (values can be '*' to match all).
See Also
--------
glob : Find all files matching a template.
copy : Copy files
move : Move files.
"""
files = self.glob(temp, inclusive, **constants)
secondary_files = []
for stemp in self._secondary_cache[temp]:
secondary_files.extend(self.glob(stemp, inclusive, **constants))
options = {'yes': 'delete files', 'no': "don't delete files (default)"}
if files or secondary_files:
print("root: %s\n" % self.get('root'))
print('\n'.join(self._remove_root(files)))
is_dir = [os.path.isdir(path) for path in files]
# Confirm deletion
if not confirm:
n_dirs = sum(is_dir)
n_files = len(files) - n_dirs
desc = []
if n_dirs:
desc.append(n_of(n_dirs, 'directory'))
if n_files:
desc.append(n_of(n_files, 'file'))
if secondary_files:
desc.append(n_of(len(secondary_files), 'secondary file'))
info = f"Delete {enumeration(desc)}?"
# Confirm if deleting files not in managed space
safe_root = self.get(self._safe_delete)
n_unsafe = len(files) - sum(path.startswith(safe_root) for path in files)
if n_unsafe:
info += f"\n!\n! {plural('item', n_unsafe)} outside of {self._safe_delete}\n!"
if ask(info, options, allow_empty=True) != 'yes':
print('aborting...')
return
print('deleting...')
dirs = (p for p, isdir in zip(files, is_dir) if isdir)
files = (p for p, isdir in zip(files, is_dir) if not isdir)
for path in dirs:
shutil.rmtree(path)
for path in chain(files, secondary_files):
os.remove(path)
else:
print("No files found for %r" % temp)
def _remove_root(self, paths):
root = self.get('root')
root_len = len(root)
return (path[root_len:] if path.startswith(root) else path
for path in paths)
```
#### File: _experiment/tests/test_definitions.py
```python
from eelbrain._experiment.definitions import find_dependent_epochs, find_epoch_vars, find_epochs_vars
from eelbrain._experiment.test_def import find_test_vars
from eelbrain._experiment.variable_def import Variables
def test_find_epoch_vars():
assert find_epoch_vars({'sel': "myvar == 'x'"}) == {'myvar'}
assert find_epoch_vars({'post_baseline_trigger_shift': "myvar"}) == {'myvar'}
epochs = {'a': {'sel': "vara == 'a'"},
'b': {'sel': "logical_and(varb == 'b', varc == 'c')"},
'sec': {'sel_epoch': 'a', 'sel': "svar == 's'"},
'super': {'sub_epochs': ('a', 'b')}}
assert find_epochs_vars(epochs) == {'a': {'vara'},
'b': {'logical_and', 'varb', 'varc'},
'sec': {'vara', 'svar'},
'super': {'vara', 'logical_and', 'varb', 'varc'}}
assert set(find_dependent_epochs('a', epochs)) == {'sec', 'super'}
assert find_dependent_epochs('b', epochs) == ['super']
assert find_dependent_epochs('sec', epochs) == []
assert find_dependent_epochs('super', epochs) == []
def test_find_test_vars():
assert find_test_vars({'kind': 'anova', 'model': "a % b % c", 'vars': Variables(None)}) == {'a', 'b', 'c'}
assert find_test_vars({'kind': 'two-stage', 'stage_1': "a + b + a*b", 'vars': Variables(None)}) == {'a', 'b'}
test_def = {'kind': 'two-stage',
'stage_1': "a + b + a*b",
'vars': Variables(("a = c * d", "b = c * e"))}
assert find_test_vars(test_def) == {'c', 'd', 'e'}
test_def = {'kind': 'two-stage',
'stage_1': "a + b + a*b",
'vars': Variables({'a': 'c * d',
'b': 'c * e',
'x': 'something * nonexistent'})}
assert find_test_vars(test_def) == {'c', 'd', 'e'}
test_def = {'kind': 'two-stage',
'stage_1': "a + b + a*b",
'vars': Variables({'a': ('c%d', {}),
'b': ('c%e', {})})}
assert find_test_vars(test_def) == {'c', 'd', 'e'}
```
#### File: _experiment/tests/test_experiment.py
```python
from itertools import product
import os
from eelbrain.testing import TempDir
from eelbrain._experiment import TreeModel, FileTree
class Tree(TreeModel):
_templates = dict(apath="/{afield}/",
afield=('a1', 'a2', 'a3'),
field2=('', 'value'))
def __init__(self, **kwargs):
TreeModel.__init__(self, **kwargs)
self._register_compound('cmp', ('afield', 'field2'))
def test_tree():
"Test simple formatting in the tree"
tree = Tree()
assert tree.get('apath') == '/a1/'
vs = []
for v in tree.iter('afield'):
vs.append(v)
assert tree.get('apath') == '/%s/' % v
tree.set(afield='a3')
assert tree.get('afield') == 'a3'
assert tree.get('apath') == '/a3/'
assert vs == ['a1', 'a2', 'a3']
assert tree.get('afield') == 'a1'
# test compound
assert tree.get('cmp') == 'a1'
tree.set(field2='value')
assert tree.get('cmp') == 'a1 value'
tree.set(field2='')
assert tree.get('cmp') == 'a1'
# test temporary state
with tree._temporary_state:
tree.set(afield='a2')
assert tree.get('afield') == 'a2'
assert tree.get('afield') == 'a1'
class SlaveTree(TreeModel):
_templates = {'path': '{a}_{b}_{sb_comp}_{slave}'}
def __init__(self, a_seq, b_seq, c_seq):
TreeModel.__init__(self)
self._register_field('a', a_seq)
self._register_field('b', b_seq, allow_empty=True)
self._register_field('c', c_seq)
self._register_compound('ab', ('a', 'b'))
self._register_slave_field('s', 'a', lambda f: f['a'].upper())
self._register_compound('sb', ('s', 'b'))
self._register_slave_field('comp_slave', 'sb', lambda f: f['sb'].upper())
# compound involving slave field
self._register_field('s_a', a_seq, depends_on='c', slave_handler=self._update_sa)
self._register_field('s_b', b_seq, depends_on='c', slave_handler=self._update_sb, allow_empty=True)
self._register_compound('s_ab', ('s_a', 's_b'))
self._store_state()
@staticmethod
def _update_sa(fields):
if fields['c'] == 'c1':
return 'a1'
else:
return 'a2'
@staticmethod
def _update_sb(fields):
if fields['c'] == 'c1':
return 'b1'
else:
return 'b2'
def test_slave_tree():
a_seq = ['a1', 'a2', 'a3']
b_seq = ['b1', 'b2', '']
c_seq = ['c1', 'c2']
ab_seq = [f'{a} {b}' if b else a for a, b in product(a_seq, b_seq)]
tree = SlaveTree(a_seq, b_seq, c_seq)
# set
assert tree.get('a') == 'a1'
tree.set(a='a2')
assert tree.get('a') == 'a2'
tree.set(ab='a1 b2')
assert tree.get('a') == 'a1'
assert tree.get('b') == 'b2'
tree.set(ab='a3')
assert tree.get('a') == 'a3'
assert tree.get('b') == ''
tree.reset()
assert tree.get('ab') == 'a1 b1'
assert tree.get('sb') == 'A1 b1'
assert tree.get('comp_slave') == 'A1 B1'
tree.set(a='a2')
assert tree.get('ab') == 'a2 b1'
assert tree.get('sb') == 'A2 b1'
assert tree.get('comp_slave') == 'A2 B1'
# compound involving slave field
tree.set(c='c2')
assert tree.get('s_ab') == 'a2 b2'
tree.set(c='c1')
assert tree.get('s_ab') == 'a1 b1'
# finde terminal keys
assert tree.find_keys('c') == ['c']
assert tree.find_keys('ab') == ['a', 'b']
# .iter()
assert list(tree.iter('a')) == a_seq
assert list(tree.iter(('a', 'b'))) == list(product(a_seq, b_seq))
assert list(tree.iter(('b', 'a'))) == list(product(b_seq, a_seq))
# iter compound
assert list(tree.iter('ab')) == ab_seq
assert list(tree.iter(('c', 'ab'))) == list(product(c_seq, ab_seq))
assert list(tree.iter('ab', values={'b': ''})) == a_seq
assert list(tree.iter('ab', b='')) == a_seq
def test_file_tree():
"Test file management tree"
class Tree(FileTree):
_templates = {'a-folder': '{root}/{folder}',
'a-file': '{a-folder}/{name}.txt',
'folder': ('f1', 'f2'),
'name': ('a1', 'a2', 'a3')}
def __init__(self, *args, **kwargs):
FileTree.__init__(self, *args, **kwargs)
self._bind_make('a-file', self._make_a)
def load_a(self):
with open(self.get('a-file', make=True)) as fid:
return fid.read()
def _make_a(self):
with open(self.get('a-file', mkdir=True), 'w') as fid:
fid.write(self.format("{folder} {name}"))
root = TempDir()
tree = Tree(root=root)
for _ in tree.iter_temp('a-file'):
assert tree.load_a() == tree.format("{folder} {name}")
for i, fname in enumerate(tree.iter_temp('a-file')):
assert os.path.exists(fname)
assert i == 5
assert tree._glob_pattern('a-file', True, folder='f1') == f'{root}/f1/*.txt'
tree.rm('a-file', name='*', folder='f1', confirm=True)
for fname in tree.iter_temp('a-file', folder='f1'):
assert fname[-6:-4] == tree.get('name')
assert not os.path.exists(fname)
for fname in tree.iter_temp('a-file', folder='f2'):
assert fname[-6:-4] == tree.get('name')
assert os.path.exists(fname)
```
#### File: eelbrain/_io/stc_dataset.py
```python
import os
import re
import glob
import itertools
from mne import read_source_estimate
from .._data_obj import Dataset, Factor
from .fiff import stc_ndvar
class DatasetSTCLoader:
"""
Load source estimates on disk into Dataset for use in statistical tests
Parameters
----------
data_dir : str
Path to directory containing stc files
Attributes
----------
data_dir : str
Path to data directory
subjects : tuple of str
Subject IDs extracted from stc filenames
factors : tuple of str
Names of experimental factors
levels : tuple of tuple of str
Names of levels of each factor in ``factors``
Notes
-----
When instantiated, the loader will automatically do level detection
based on .stc filenames. The user must explicitly set the factor
names with :meth:`DatasetSTCLoader.set_factor_names`. The dataset
may then be loaded via :meth:`DatasetSTCLoader.make_dataset`.
Examples
--------
>>> loader = DatasetSTCLoader("path/to/exported/stcs")
>>> loader.set_factor_names(["factor1", "factor2"])
>>> ds = loader.make_dataset(subjects_dir="mri/")
See Also
--------
eelbrain.gui.load_stcs : a GUI to load source estimates into a Dataset
"""
def __init__(self, data_dir):
if not os.path.exists(data_dir):
raise ValueError("Directory '%s' not found." % data_dir)
self.data_dir = data_dir
self.subjects = None
self.levels = None
self.factors = None
self._n_factors = None
self._level_lens = None
self._find_subjects()
self._find_level_names()
def __repr__(self):
tmp = "<DatasetSTCLoader: {} subjects | {} design>"
return tmp.format(len(self.subjects), self.design_shape)
def _all_stc_filenames(self):
return glob.glob(os.path.join(self.data_dir, "*", "*.stc"))
def _find_subjects(self):
pattern = re.compile(r"[AR]\d{4}")
stcs = self._all_stc_filenames()
subjects = set(pattern.search(s).group() for s in stcs)
self.subjects = tuple(subjects)
def _find_level_names(self):
stcs = self._all_stc_filenames()
if not stcs:
raise ValueError("No .stc files in sub-directories")
# condition names should be lowest level folder
cond_dirs = list(set(s.split(os.sep)[-2] for s in stcs))
# set number of factors based on first full condition name
self._n_factors = len(cond_dirs[0].split("_"))
splits = (c.split("_") for c in cond_dirs)
# transpose to group level names by factor; keep unique
cond_sets = list(map(set, zip(*splits)))
self.levels = tuple(tuple(c) for c in cond_sets) # list of tuples, not sets
self._level_lens = [len(lev) for lev in self.levels]
def set_factor_names(self, factors):
"""
Set names of experimental factors
Parameters
----------
factors : list of str | tuple of str
Factor names. Length must match the number of factors detected
from stc filenames.
"""
if not self.levels:
raise RuntimeError("No level names were detected from "
"the files in the data directory.")
if len(factors) != self._n_factors:
msg = ("There were %d factors detected, but %d factor "
"names provided." % (self._n_factors, len(factors)))
raise ValueError(msg)
self.factors = tuple(factors)
@property
def design_shape(self):
"""Shape of experiment design, e.g. '2 x 3'"""
if self.levels is None:
return None
des = " x ".join(map(str, self._level_lens))
if len(des) == 1:
des = "1 x {}".format(des)
return des
def make_dataset(self, load_stcs=True, subject="fsaverage",
src="ico-4", **stc_kwargs):
"""
Load stcs into a Dataset with columns for subject and experimental factors
Dataset contains one case per condition per subject, and source estimates
loaded as an NDVar. Any additional keyword arguments are passed to
:meth:`eelbrain.load.fiff.stc_ndvar`. If ``SUBJECTS_DIR`` is not set in your
environment, it should be provided here.
Parameters
----------
load_stcs : bool
Whether to include stc data in dataset. Only False when testing
on unreadable stc files.
subject : str
Subject ID of brain to which the source estimates belong;
default: 'fsaverage'
src : str
Source space surface decimation; default 'ico-4'
Returns
-------
ds : eelbrain.Dataset
Dataset with columns 'subject' (random factor), 'src' (NDVar of stc data),
and one Factor for each item in ``self.factors``.
"""
rows = itertools.product(self.subjects, *self.levels)
columns = map(Factor, zip(*rows))
col_names = ["subject"] + list(self.factors)
ds = Dataset(zip(col_names, columns))
ds["subject"].random = True
stc_fnames = []
for c in ds.itercases():
folder = "_".join(c[i] for i in self.factors)
exp = "{}/{}/{}*-lh.stc".format(
self.data_dir, folder, c["subject"])
fnames = glob.glob(exp)
assert len(fnames) == 1
stc_fnames.append(fnames[0])
if load_stcs:
stcs = list(map(read_source_estimate, stc_fnames))
ds["src"] = stc_ndvar(stcs, subject=subject, src=src, **stc_kwargs)
return ds
```
#### File: load/tests/test_fiff.py
```python
import os
from warnings import catch_warnings, filterwarnings
import mne
from mne import pick_types
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
from eelbrain import load
from eelbrain.testing import assert_dataobj_equal, requires_mne_sample_data, file_path
FILTER_WARNING = 'The measurement information indicates a low-pass frequency of 40 Hz.'
@requires_mne_sample_data
def test_load_fiff_mne():
data_path = mne.datasets.sample.data_path()
fwd_path = os.path.join(data_path, 'MEG', 'sample', 'sample-ico-4-fwd.fif')
evoked_path = os.path.join(data_path, 'MEG', 'sample',
'sample_audvis-no-filter-ave.fif')
cov_path = os.path.join(data_path, 'MEG', 'sample', 'sample_audvis-cov.fif')
mri_sdir = os.path.join(data_path, 'subjects')
mne_evoked = mne.read_evokeds(evoked_path, 'Left Auditory')
mne_fwd = mne.read_forward_solution(fwd_path)
mne_fwd = mne.convert_forward_solution(mne_fwd, force_fixed=True, use_cps=True)
cov = mne.read_cov(cov_path)
picks = mne.pick_types(mne_evoked.info, 'mag')
channels = [mne_evoked.ch_names[i] for i in picks]
mne_evoked = mne_evoked.pick_channels(channels)
mne_fwd = mne.pick_channels_forward(mne_fwd, channels)
cov = mne.pick_channels_cov(cov, channels)
mne_inv = mne.minimum_norm.make_inverse_operator(mne_evoked.info, mne_fwd,
cov, 0, None, True)
mne_stc = mne.minimum_norm.apply_inverse(mne_evoked, mne_inv, 1., 'MNE')
meg = load.fiff.evoked_ndvar(mne_evoked)
inv = load.fiff.inverse_operator(mne_inv, 'ico-4', mri_sdir)
stc = inv.dot(meg)
assert_array_almost_equal(stc.get_data(('source', 'time')), mne_stc.data)
fwd = load.fiff.forward_operator(mne_fwd, 'ico-4', mri_sdir)
reconstruct = fwd.dot(stc)
mne_reconstruct = mne.apply_forward(mne_fwd, mne_stc, mne_evoked.info)
assert_array_almost_equal(reconstruct.get_data(('sensor', 'time')),
mne_reconstruct.data)
def test_load_fiff_sensor():
umd_sqd_path = file_path('test_umd-raw.sqd')
raw = mne.io.read_raw_kit(umd_sqd_path)
sensor = load.fiff.sensor_dim(raw)
assert sensor.sysname == 'KIT-UMD-3'
@requires_mne_sample_data
def test_load_fiff_from_raw():
"Test loading data from a fiff raw file"
data_path = mne.datasets.sample.data_path()
meg_path = os.path.join(data_path, 'MEG', 'sample')
raw_path = os.path.join(meg_path, 'sample_audvis_filt-0-40_raw.fif')
evt_path = os.path.join(meg_path, 'sample_audvis_filt-0-40_raw-eve.fif')
# load events
ds = load.fiff.events(raw_path)
assert ds['i_start'].x.dtype.kind == 'i'
# compare with mne
ds_evt = load.fiff.events(events=evt_path)
ds = ds[np.arange(ds.n_cases) != 289] # mne is missing an event
assert_dataobj_equal(ds, ds_evt, name=False)
# add epochs as ndvar
ds = ds.sub('trigger == 32')
with catch_warnings():
filterwarnings('ignore', message=FILTER_WARNING)
ds_ndvar = load.fiff.add_epochs(ds, -0.1, 0.3, decim=10, data='mag',
proj=False, reject=2e-12)
meg = ds_ndvar['meg']
assert meg.ndim == 3
data = meg.get_data(('case', 'sensor', 'time'))
# compare with mne epochs
with catch_warnings():
filterwarnings('ignore', message=FILTER_WARNING)
ds_mne = load.fiff.add_mne_epochs(ds, -0.1, 0.3, decim=10, proj=False,
reject={'mag': 2e-12})
epochs = ds_mne['epochs']
# events
assert_array_equal(epochs.events[:, 1], 0)
assert_array_equal(epochs.events[:, 2], 32)
# data
picks = pick_types(epochs.info, meg='mag')
mne_data = epochs.get_data()[:, picks]
assert_array_equal(meg.sensor.names, [epochs.info['ch_names'][i] for i in picks])
assert_array_equal(data, mne_data)
assert_array_almost_equal(meg.time, epochs.times)
# with proj
with catch_warnings():
filterwarnings('ignore', message=FILTER_WARNING)
meg = load.fiff.epochs(ds, -0.1, 0.3, decim=10, data='mag', proj=True,
reject=2e-12)
epochs = load.fiff.mne_epochs(ds, -0.1, 0.3, decim=10, proj=True,
reject={'mag': 2e-12})
picks = pick_types(epochs.info, meg='mag')
mne_data = epochs.get_data()[:, picks]
assert_array_almost_equal(meg.x, mne_data, 10)
```
#### File: load/tests/test_txt.py
```python
from pathlib import Path
import numpy as np
from numpy.testing import assert_array_equal
from eelbrain import Dataset, datasets, load
from eelbrain.testing import TempDir, assert_dataobj_equal, assert_dataset_equal, file_path
def test_r_tsv_io():
"Test reading output of write.table"
path = file_path('r-write.table.txt')
ds = load.tsv(path, types={'row': 'f'})
assert_array_equal(ds['row'], ['1', '2'])
assert_array_equal(ds['participant'], [1, 1])
assert_array_equal(ds['condition'], ['3B', '3B'])
assert_array_equal(ds['bin'], [0, 0])
def test_tsv_io():
"""Test tsv I/O"""
tempdir = TempDir()
names = ['A', 'B', 'rm', 'intvar', 'fltvar', 'fltvar2', 'index']
ds = datasets.get_uv()
ds['fltvar'][5:10] = np.nan
ds[:4, 'rm'] = ''
# save and load
dst = Path(tempdir) / 'ds.txt'
ds.save_txt(dst)
ds1 = load.tsv(dst, random='rm')
assert_dataset_equal(ds1, ds, decimal=10)
ds1 = load.tsv(dst, skiprows=1, names=names, random='rm')
assert_dataset_equal(ds1, ds, decimal=10)
# delimiter
for delimiter in [' ', ',']:
ds.save_txt(dst, delimiter=delimiter)
ds1 = load.tsv(dst, delimiter=delimiter, random='rm')
assert_dataset_equal(ds1, ds, decimal=10)
# guess data types with missing
intvar2 = ds['intvar'].as_factor()
intvar2[10:] = ''
ds_intvar = Dataset((intvar2,))
ds_intvar.save_txt(dst)
ds_intvar1 = load.tsv(dst, empty='nan')
assert_dataobj_equal(ds_intvar1['intvar', :10], ds['intvar', :10])
assert_array_equal(ds_intvar1['intvar', 10:], np.nan)
# str with space
ds[:5, 'A'] = 'a 1'
ds.save_txt(dst)
ds1 = load.tsv(dst, random='rm')
assert_dataset_equal(ds1, ds, decimal=10)
ds.save_txt(dst, delimiter=' ')
ds1 = load.tsv(dst, delimiter=' ', random='rm')
assert_dataset_equal(ds1, ds, decimal=10)
# Fixed column width
path = file_path('fox-prestige')
ds = load.tsv(path, delimiter=' ', skipinitialspace=True)
assert ds[1] == {'id': 'GENERAL.MANAGERS', 'education': 12.26, 'income': 25879, 'women': 4.02, 'prestige': 69.1, 'census': 1130, 'type': 'prof'}
```
#### File: eelbrain/plot/_colors.py
```python
from collections.abc import Iterator
from itertools import product, chain
from math import ceil
import operator
import numpy as np
import matplotlib as mpl
from matplotlib.colors import LinearSegmentedColormap, Colormap, Normalize, to_rgb, to_rgba
from matplotlib.colorbar import ColorbarBase
from matplotlib.ticker import FixedFormatter, MaxNLocator
from .._colorspaces import LocatedListedColormap, oneway_colors, twoway_colors, symmetric_cmaps
from .._data_obj import Factor, Interaction, cellname
from .._utils import IS_WINDOWS
from ._base import EelFigure, Layout, AxisScale, fix_vlim_for_cmap
from functools import reduce
POINT_SIZE = 0.0138889 # 1 point in inches
LEGEND_SIZE = 1.2 # times font.size
def find_cell_colors(x, colors):
"""Process the colors arg from plotting functions
Parameters
----------
x : categorial
Model for which colors are needed.
colors : str | list | dict
Colors for the plots if multiple categories of data are plotted.
**str**: A colormap name; cells are mapped onto the colormap in
regular intervals.
**list**: A list of colors in the same sequence as cells.
**dict**: A dictionary mapping each cell to a color.
Colors are specified as `matplotlib compatible color arguments
<http://matplotlib.org/api/colors_api.html>`_.
"""
if isinstance(colors, (list, tuple)):
cells = x.cells
if len(colors) < len(cells):
raise ValueError(f"colors={colors!r}: only {len(colors)} colors for {len(cells)} cells.")
return dict(zip(cells, colors))
elif isinstance(colors, dict):
for cell in x.cells:
if cell not in colors:
raise KeyError(f"{cell!r} not in colors")
return colors
elif colors is None:
return colors_for_categorial(x, cmap=colors)
else:
raise TypeError(f"colors={colors!r}")
def colors_for_categorial(x, hue_start=0.2, cmap=None):
"""Automatically select colors for a categorial model
Parameters
----------
x : categorial
Model defining the cells for which to define colors.
hue_start : 0 <= scalar < 1
First hue value (only for two-way or higher level models).
cmap : str (optional)
Name of a matplotlib colormap to use instead of default hue-based
colors (only used for one-way models).
Returns
-------
colors : dict {cell -> color}
Dictionary providing colors for the cells in x.
"""
if isinstance(x, Factor):
return colors_for_oneway(x.cells, hue_start, cmap=cmap)
elif isinstance(x, Interaction):
return colors_for_nway([f.cells for f in x.base], hue_start)
else:
raise TypeError(f"x={x!r}: needs to be Factor or Interaction")
def colors_for_oneway(cells, hue_start=0.2, light_range=0.5, cmap=None,
light_cycle=None, always_cycle_hue=False, locations=None):
"""Define colors for a single factor design
Parameters
----------
cells : sequence of str
Cells for which to assign colors.
hue_start : 0 <= scalar < 1 | sequence of scalar
First hue value (default 0.2) or list of hue values.
light_range : scalar | tuple of 2 scalar
Scalar that specifies the amount of lightness variation (default 0.5).
If positive, the first color is lightest; if negative, the first color
is darkest. A tuple can be used to specify exact end-points (e.g.,
``(1.0, 0.4)``). ``0.2`` is equivalent to ``(0.4, 0.6)``.
The ``light_cycle`` parameter can be used to cycle between light and
dark more than once.
cmap : str
Use a matplotlib colormap instead of the default color generation
algorithm. Name of a matplotlib colormap to use (e.g., 'jet'). If
specified, ``hue_start`` and ``light_range`` are ignored.
light_cycle : int
Cycle from light to dark in ``light_cycle`` cells to make nearby colors
more distinct (default cycles once).
always_cycle_hue : bool
Cycle hue even when cycling lightness. With ``False`` (default), hue
is constant within a lightness cycle.
locations : sequence of float
Locations of the cells on the color-map (all in range [0, 1]; default is
evenly spaced; example: ``numpy.linspace(0, 1, len(cells)) ** 0.5``).
Returns
-------
dict : {str: tuple}
Mapping from cells to colors.
"""
if isinstance(cells, Iterator):
cells = tuple(cells)
n = len(cells)
if cmap is None:
colors = oneway_colors(n, hue_start, light_range, light_cycle, always_cycle_hue, locations)
else:
cm = mpl.cm.get_cmap(cmap)
if locations is None:
imax = n - 1
locations = (i / imax for i in range(n))
colors = (cm(x) for x in locations)
return dict(zip(cells, colors))
def colors_for_twoway(x1_cells, x2_cells, hue_start=0.2, hue_shift=0., hues=None, lightness=None):
"""Define cell colors for a two-way design
Parameters
----------
x1_cells : tuple of str
Cells of the major factor.
x2_cells : tuple of str
Cells of the minor factor.
hue_start : 0 <= scalar < 1
First hue value.
hue_shift : 0 <= scalar < 1
Use that part of the hue continuum between categories to shift hue
within categories.
hues : list of scalar
List of hue values corresponding to the levels of the first factor
(overrides regular hue distribution).
lightness : scalar | list of scalar
If specified as scalar, colors will occupy the range
``[lightness, 100-lightness]``. Can also be given as list with one
value corresponding to each element in the second factor.
Returns
-------
dict : {tuple: tuple}
Mapping from cells to colors.
"""
n1 = len(x1_cells)
n2 = len(x2_cells)
if n1 < 2 or n2 < 2:
raise ValueError("Need at least 2 cells on each factor")
clist = twoway_colors(n1, n2, hue_start, hue_shift, hues, lightness)
return dict(zip(product(x1_cells, x2_cells), clist))
def colors_for_nway(cell_lists, hue_start=0.2):
"""Define cell colors for a two-way design
Parameters
----------
cell_lists : sequence of of tuple of str
List of the cells for each factor. E.g. for ``A % B``:
``[('a1', 'a2'), ('b1', 'b2', 'b3')]``.
hue_start : 0 <= scalar < 1
First hue value.
Returns
-------
dict : {tuple: tuple}
Mapping from cells to colors.
"""
if len(cell_lists) == 1:
return colors_for_oneway(cell_lists[0])
elif len(cell_lists) == 2:
return colors_for_twoway(cell_lists[0], cell_lists[1], hue_start)
elif len(cell_lists) > 2:
ns = tuple(map(len, cell_lists))
n_outer = reduce(operator.mul, ns[:-1])
n_inner = ns[-1]
# outer circle
hues = np.linspace(hue_start, 1 + hue_start, ns[0], False)
# subdivide for each level
distance = 1. / ns[0]
for n_current in ns[1:-1]:
new = []
d = distance / 3
for hue in hues:
new.extend(np.linspace(hue - d, hue + d, n_current))
hues = new
distance = 2 * d / (n_current - 1)
hues = np.asarray(hues)
hues %= 1
colors = twoway_colors(n_outer, n_inner, hues=hues)
return dict(zip(product(*cell_lists), colors))
else:
return {}
def single_hue_colormap(hue):
"""Colormap based on single hue
Parameters
----------
hue : matplotlib color
Base RGB color.
Returns
-------
colormap : matplotlib Colormap
Colormap from transparent to ``hue``.
"""
name = str(hue)
color = to_rgb(hue)
start = color + (0.,)
stop = color + (1.,)
return LinearSegmentedColormap.from_list(name, (start, stop))
def soft_threshold_colormap(cmap, threshold, vmax, subthreshold=None, symmetric=None):
"""Soft-threshold a colormap to make small values transparent
Parameters
----------
cmap : str
Base colormap.
threshold : scalar
Value at which to threshold the colormap (i.e., the value at which to
start the colormap).
vmax : scalar
Intended largest value of the colormap (used to infer the location of
the ``threshold``).
subthreshold : matplotlib color
Color of sub-threshold values (the default is the end or middle of
the colormap, depending on whether it is symmetric).
symmetric : bool
Whether the ``cmap`` is symmetric (ranging from ``-vmax`` to ``vmax``)
or not (ranging from ``0`` to ``vmax``). The default is ``True`` for
known symmetric colormaps and ``False`` otherwise.
Returns
-------
thresholded_cmap : matplotlib ListedColormap
Soft-thresholded colormap.
"""
assert vmax > threshold >= 0
cmap = mpl.cm.get_cmap(cmap)
if symmetric is None:
symmetric = cmap.name in symmetric_cmaps
colors = cmap(np.linspace(0., 1., cmap.N))
if subthreshold is None:
subthreshold_color = cmap(0.5) if symmetric else cmap(0)
else:
subthreshold_color = to_rgba(subthreshold)
n = int(round(vmax / ((vmax - threshold) / cmap.N)))
out_colors = np.empty((n, 4))
if symmetric:
i_threshold = int(ceil(cmap.N / 2))
out_colors[:i_threshold] = colors[:i_threshold]
out_colors[i_threshold:-i_threshold] = subthreshold_color
out_colors[-i_threshold:] = colors[-i_threshold:]
else:
out_colors[:-cmap.N] = subthreshold_color
out_colors[-cmap.N:] = colors
out = LocatedListedColormap(out_colors, cmap.name)
out.vmax = vmax
out.vmin = -vmax if symmetric else 0
out.symmetric = symmetric
return out
class ColorGrid(EelFigure):
"""Plot colors for a two-way design in a grid
Parameters
----------
row_cells : tuple of str
Cells contained in the rows.
column_cells : tuple of str
Cells contained in the columns.
colors : dict
Colors for cells.
size : scalar
Size (width and height) of the color squares (the default is to
scale them to fit the font size).
column_label_position : 'top' | 'bottom'
Where to place the column labels (default is 'top').
row_first : bool
Whether the row cell precedes the column cell in color keys. By
default this is inferred from the existing keys.
labels : dict (optional)
Condition labels that are used instead of the keys in ``row_cells`` and
``column_cells``.
shape : 'box' | 'line'
Shape for color samples (default 'box').
...
Also accepts :ref:`general-layout-parameters`.
Attributes
----------
column_labels : list of :class:`matplotlib.text.Text`
Column labels.
row_labels : list of :class:`matplotlib.text.Text`
Row labels.
"""
def __init__(self, row_cells, column_cells, colors, size=None,
column_label_position='top', row_first=None, labels=None,
shape='box', *args, **kwargs):
if row_first is None:
row_cell_0 = row_cells[0]
col_cell_0 = column_cells[0]
if (row_cell_0, col_cell_0) in colors:
row_first = True
elif (col_cell_0, row_cell_0) in colors:
row_first = False
else:
msg = ("Neither %s nor %s exist as a key in colors" %
((row_cell_0, col_cell_0), (col_cell_0, row_cell_0)))
raise KeyError(msg)
if size is None:
size = mpl.rcParams['font.size'] * LEGEND_SIZE * POINT_SIZE
layout = Layout(0, 1, 3, False, *args, **kwargs)
EelFigure.__init__(self, None, layout)
ax = self.figure.add_axes((0, 0, 1, 1), frameon=False)
ax.set_axis_off()
self._ax = ax
# reverse rows so we can plot upwards
row_cells = tuple(reversed(row_cells))
n_rows = len(row_cells)
n_cols = len(column_cells)
# color patches
for col in range(n_cols):
for row in range(n_rows):
if row_first:
cell = (row_cells[row], column_cells[col])
else:
cell = (column_cells[col], row_cells[row])
if shape == 'box':
patch = mpl.patches.Rectangle((col, row), 1, 1, fc=colors[cell],
ec='none')
ax.add_patch(patch)
elif shape == 'line':
y = row + 0.5
ax.plot([col, col + 1], [y, y], color=colors[cell])
else:
raise ValueError("shape=%r" % (shape,))
# prepare labels
if labels:
column_labels = [labels.get(c, c) for c in column_cells]
row_labels = [labels.get(c, c) for c in row_cells]
else:
column_labels = column_cells
row_labels = row_cells
# column labels
tilt_labels = any(len(label) > 1 for label in column_labels)
self.column_labels = []
if column_label_position == 'top':
y = n_rows + 0.1
va = 'bottom'
rotation = 40 if tilt_labels else 0
ymin = 0
ymax = self._layout.h / size
elif column_label_position == 'bottom':
y = -0.1
va = 'top'
rotation = -40 if tilt_labels else 0
ymax = n_rows
ymin = n_rows - self._layout.h / size
else:
raise ValueError(f"column_label_position={column_label_position!r}")
for col, label in enumerate(column_labels):
h = ax.text(col + 0.5, y, label, va=va, ha='left' if tilt_labels else 'center', rotation=rotation)
self.column_labels.append(h)
# row labels
x = n_cols + 0.1
self.row_labels = []
for row, label in enumerate(row_labels):
h = ax.text(x, row + 0.5, label, va='center', ha='left')
self.row_labels.append(h)
if size is not None:
self._ax.set_xlim(0, self._layout.w / size)
self._ax.set_ylim(ymin, ymax)
self._show()
def _tight(self):
# arbitrary default with equal aspect
self._ax.set_ylim(0, 1)
self._ax.set_xlim(0, 1 * self._layout.w / self._layout.h)
# draw to compute text coordinates
self.draw()
# find label bounding box
xmax = 0
ymax = 0
for h in chain(self.column_labels, self.row_labels):
bbox = h.get_window_extent()
if bbox.xmax > xmax:
xmax = bbox.xmax
xpos = h.get_position()[0]
if bbox.ymax > ymax:
ymax = bbox.ymax
ypos = h.get_position()[1]
xmax += 2
ymax += 2
# transform from display coordinates -> data coordinates
trans = self._ax.transData.inverted()
xmax, ymax = trans.transform((xmax, ymax))
# calculate required movement
_, ax_xmax = self._ax.get_xlim()
_, ax_ymax = self._ax.get_ylim()
xtrans = ax_xmax - xmax
ytrans = ax_ymax - ymax
# calculate the scale factor:
# new_coord = x * coord
# new_coord = coord + trans
# x = (coord + trans) / coord
scale = (xpos + xtrans) / xpos
scale_y = (ypos + ytrans) / ypos
if scale_y <= scale:
scale = scale_y
self._ax.set_xlim(0, ax_xmax / scale)
self._ax.set_ylim(0, ax_ymax / scale)
class ColorList(EelFigure):
"""Plot colors with labels
Parameters
----------
colors : dict
Colors for cells.
cells : tuple
Cells for which to plot colors (default is ``colors.keys()``).
labels : dict (optional)
Condition labels that are used instead of the keys in ``colors``. This
is useful if ``colors`` uses abbreviated labels, but the color legend
should contain more intelligible labels.
size : scalar
Size (width and height) of the color squares (the default is to
scale them to fit the font size).
h : 'auto' | scalar
Height of the figure in inches. If 'auto' (default), the height is
chosen to fit all labels.
...
Also accepts :ref:`general-layout-parameters`.
Attributes
----------
labels : list of :class:`matplotlib.text.Text`
Color labels.
"""
def __init__(self, colors, cells=None, labels=None, size=None, h='auto', *args, **kwargs):
if cells is None:
cells = tuple(colors.keys())
elif isinstance(cells, Iterator):
cells = tuple(cells)
if h == 'auto':
if size is None:
size = mpl.rcParams['font.size'] * LEGEND_SIZE * POINT_SIZE
h = len(cells) * size
elif size is None: # size = h / len(cells)
pass
else:
raise NotImplementedError("specifying size and h parameters together")
if labels is None:
labels = {cell: cellname(cell) for cell in cells}
elif not isinstance(labels, dict):
raise TypeError(f"labels={labels!r}")
layout = Layout(0, 1.5, 2, False, False, h, *args, **kwargs)
EelFigure.__init__(self, None, layout)
ax = self.figure.add_axes((0, 0, 1, 1), frameon=False)
ax.set_axis_off()
n = len(cells)
self.labels = []
for i, cell in enumerate(cells):
bottom = n - i - 1
y = bottom + 0.5
patch = mpl.patches.Rectangle((0, bottom), 1, 1, fc=colors[cell], ec='none', zorder=1)
ax.add_patch(patch)
h = ax.text(1.1, y, labels.get(cell, cell), va='center', ha='left', zorder=2)
self.labels.append(h)
ax.set_ylim(0, n)
ax.set_xlim(0, n * self._layout.w / self._layout.h)
self._draw_hooks.append(self.__update_frame)
self._ax = ax
self._show()
if IS_WINDOWS and self._has_frame:
self._frame.Fit()
def __update_frame(self):
if self._layout.w_fixed or not self._has_frame:
return
# resize figure to match legend
# (all calculation in pixels)
fig_bb = self.figure.get_window_extent()
x_max = max(h.get_window_extent().x1 for h in self.labels)
w0, h0 = self._frame.GetSize()
new_w = w0 + (x_max - fig_bb.x1) + 5
self._frame.SetSize((new_w, h0))
# adjust x-limits
n = len(self.labels)
ax_bb = self._ax.get_window_extent()
self._ax.set_xlim(0, n * ax_bb.width / ax_bb.height)
class ColorBar(EelFigure):
"""A color-bar for a matplotlib color-map
Parameters
----------
cmap : str | Colormap | array
Name of the color-map, or a matplotlib Colormap, or LUT.
vmin : scalar
Lower end of the scale mapped onto cmap.
vmax : scalar
Upper end of the scale mapped onto cmap.
label : bool | str
Label for the x-axis (default is the unit, or if no unit is provided
the name of the colormap).
label_position : 'left' | 'right' | 'top' | 'bottom'
Position of the axis label. Valid values depend on orientation.
label_rotation : scalar
Angle of the label in degrees (For horizontal colorbars, the default is
0; for vertical colorbars, the default is 0 for labels of 3 characters
and shorter, and 90 for longer labels).
clipmin : scalar
Clip the color-bar below this value.
clipmax : scalar
Clip the color-bar above this value.
orientation : 'horizontal' | 'vertical'
Orientation of the bar (default is horizontal).
unit : str
Unit for the axis to determine tick labels (for example, ``u'µV'`` to
label 0.000001 as '1').
contours : iterator of scalar (optional)
Plot contour lines at these values.
width : scalar
Width of the color-bar in inches.
ticks : {float: str} dict | sequence of float
Customize tick-labels on the colormap; either a dictionary with
tick-locations and labels, or a sequence of tick locations. To draw no
ticks, set to ``()``.
threshold : scalar
Set the alpha of values below ``threshold`` to 0 (as well as for
negative values above ``abs(threshold)``).
ticklocation : 'auto', 'top', 'bottom', 'left', 'right'
Where to place ticks and label.
background : matplotlib color
Background color (for colormaps including transparency).
...
Also accepts :ref:`general-layout-parameters`.
"""
def __init__(self, cmap, vmin=None, vmax=None, label=True, label_position=None,
label_rotation=None,
clipmin=None, clipmax=None, orientation='horizontal',
unit=None, contours=(), width=None, ticks=None, threshold=None,
ticklocation='auto', background='white', tight=True,
h=None, w=None, *args, **kwargs):
# get Colormap
if isinstance(cmap, np.ndarray):
if threshold is not None:
raise NotImplementedError("threshold parameter with cmap=<array>")
if cmap.max() > 1:
cmap = cmap / 255.
cm = mpl.colors.ListedColormap(cmap, 'LUT')
elif isinstance(cmap, Colormap):
cm = cmap
else:
cm = mpl.cm.get_cmap(cmap)
# prepare layout
if orientation == 'horizontal':
if h is None and w is None:
h = 1
ax_aspect = 4
elif orientation == 'vertical':
if h is None and w is None:
h = 4
ax_aspect = 0.3
else:
raise ValueError("orientation=%s" % repr(orientation))
layout = Layout(1, ax_aspect, 2, tight, False, h, w, *args, **kwargs)
EelFigure.__init__(self, cm.name, layout)
ax = self._axes[0]
# translate between axes and data coordinates
if isinstance(vmin, Normalize):
norm = vmin
else:
vmin, vmax = fix_vlim_for_cmap(vmin, vmax, cm)
norm = Normalize(vmin, vmax)
if isinstance(unit, AxisScale):
scale = unit
else:
scale = AxisScale(unit or 1, label)
# value ticks
if ticks is False:
tick_locs = ()
formatter = scale.formatter
elif isinstance(ticks, dict):
tick_locs = sorted(ticks)
formatter = FixedFormatter([ticks[t] for t in tick_locs])
else:
if ticks is None:
tick_locs = MaxNLocator(4)
else:
tick_locs = ticks
formatter = scale.formatter
if orientation == 'horizontal':
axis = ax.xaxis
contour_func = ax.axhline
else:
axis = ax.yaxis
contour_func = ax.axvline
if label is True:
label = scale.label or cm.name
if not label:
label = ''
# show only part of the colorbar
if clipmin is not None or clipmax is not None:
boundaries = norm.inverse(np.linspace(0, 1, cm.N + 1))
if clipmin is None:
start = None
else:
start = np.digitize(clipmin, boundaries, True)
# boundaries[start] = clipmin
if clipmax is None:
stop = None
else:
stop = np.digitize(clipmax, boundaries) + 1
# boundaries[stop-1] = clipmax
boundaries = boundaries[start:stop]
else:
boundaries = None
colorbar = ColorbarBase(ax, cm, norm, boundaries=boundaries, orientation=orientation, ticklocation=ticklocation, ticks=tick_locs, label=label, format=formatter)
# label position/rotation
if label_position is not None:
axis.set_label_position(label_position)
if label_rotation is not None:
axis.label.set_rotation(label_rotation)
if orientation == 'vertical':
if (label_rotation + 10) % 360 < 20:
axis.label.set_va('center')
elif orientation == 'vertical' and len(label) <= 3:
axis.label.set_rotation(0)
axis.label.set_va('center')
self._contours = [contour_func(c, c='k') for c in contours]
self._draw_hooks.append(self.__fix_alpha)
self._draw_hooks.append(self.__update_bar_tickness)
self._background = background
self._colorbar = colorbar
self._orientation = orientation
self._width = width
self._show()
def __fix_alpha(self):
# fix cmaps with alpha https://stackoverflow.com/q/15003353/166700
if self._background is not False:
lut = self._colorbar.solids.get_facecolor()
bg_color = to_rgb(self._background)
lut[:, :3] *= lut[:, 3:]
lut[:, :3] += (1 - lut[:, 3:]) * bg_color
lut[:, 3] = 1.
self._colorbar.solids.set_facecolor(lut)
return True
def _tight(self):
# make sure ticklabels have space
ax = self._axes[0]
if self._orientation == 'vertical' and not self._layout.w_fixed:
self.draw()
labels = ax.get_yticklabels()
# wmax = max(l.get_window_extent().width for l in labels)
x0 = min(l.get_window_extent().x0 for l in labels)
if x0 < 0:
w, h = self.figure.get_size_inches()
w -= (x0 / self._layout.dpi)
self.figure.set_size_inches(w, h, forward=True)
super(ColorBar, self)._tight()
def __update_bar_tickness(self):
# Override to keep bar thickness
if not self._width:
return
ax = self._axes[0]
x = (self._width, self._width)
x = self.figure.dpi_scale_trans.transform(x)
x = self.figure.transFigure.inverted().transform(x)
pos = ax.get_position()
xmin, ymin, width, height = pos.xmin, pos.ymin, pos.width, pos.height
if self._orientation == 'vertical':
if self._layout._margins_arg and 'right' in self._layout._margins_arg:
xmin += width - x[0]
width = x[0]
else:
if self._layout._margins_arg and 'top' in self._layout._margins_arg:
ymin += height - x[1]
height = x[1]
ax.set_position((xmin, ymin, width, height))
return True
```
#### File: eelbrain/plot/_nuts.py
```python
class _plt_bin_nuts:
def __init__(self, ax, epoch, color='r', fill=False, hatch='//', **kwargs):
"""Plot a simple on/off nonuniform time series
Parameters
----------
ax : matplotlib axes
Target axes.
epoch : array
Array with fields 'start' and 'stop'.
kwargs :
axvspan keyword arguments.
"""
self._handles = []
for line in epoch:
start = line['start']
stop = line['stop']
h = ax.axvspan(start, stop, color=color, fill=fill, hatch=hatch, **kwargs)
self._handles.append(h)
```
#### File: plot/tests/test_topo.py
```python
from matplotlib.backend_bases import KeyEvent
import numpy as np
import pytest
import wx
from eelbrain import datasets, plot, testnd
from eelbrain._utils import IS_WINDOWS
from eelbrain.testing import requires_mne_sample_data
from eelbrain._wxgui.testing import hide_plots
def test_plot_topomap():
"Test plot.Topomap"
ds = datasets.get_uts(utsnd=True)
topo = ds.eval('utsnd.summary(time=(0.075, 0.125))')
p = plot.Topomap(topo, ds=ds)
p.add_contour('V', 1, '#00FF00')
p.close()
p = plot.Topomap(topo, ds=ds, vmax=0.2, w=2)
p.close()
p = plot.Topomap(topo, 'A%B', ds=ds, axw=2)
p.close()
p = plot.Topomap(topo, ds=ds, sensorlabels=None)
p.close()
index = np.array([1, 3, 2])
p = plot.Topomap(topo[index], '.case', nrow=1, axh=2, h=2.4, axtitle=index)
p.close()
@requires_mne_sample_data
@hide_plots
def test_plot_topomap_mne():
"Test plot.Topomap with MNE data"
ds = datasets.get_mne_sample(sub=[0, 1], sns=True)
p = plot.Topomap(ds['meg'].summary(time=(.1, .12)), proj='left')
p.close()
# grad
ds = datasets.get_mne_sample(sub=[0], sns='grad')
with pytest.raises(NotImplementedError), pytest.warns(RuntimeWarning):
plot.Topomap('meg.sub(time=.1)', ds=ds)
@hide_plots
def test_plot_topo_butterfly():
"Test plot.TopoButterfly"
ds = datasets.get_uts(utsnd=True)
# single row
p = plot.TopoButterfly('utsnd', ds=ds)
p.set_time(0.2)
# t keypress on topomap
x, y = p.topo_axes[0].transAxes.transform((.5, .5))
event = KeyEvent('test', p.canvas, 't', x, y, wx.KeyEvent())
p._on_key_press(event)
p.close()
p = plot.TopoButterfly('utsnd', ds=ds, vmax=0.2, w=6)
p.close()
# multiple rows
p = plot.TopoButterfly('utsnd', 'A%B', ds=ds, w=6)
if not IS_WINDOWS:
assert (*p.figure.get_size_inches(),) == (6, 12)
# t keypress on topomaps
for ax in p.topo_axes:
x, y = ax.transAxes.transform((.5, .5))
event = KeyEvent('test', p.canvas, 't', x, y, wx.KeyEvent())
p._on_key_press(event)
p.close()
p = plot.TopoButterfly('utsnd', mark=[1, 2], ds=ds)
p.close()
p = plot.TopoButterfly('utsnd', mark=['1', '2'], ds=ds)
p.set_vlim(2)
assert p.get_vlim() == (-2.0, 2.0)
p.set_ylim(-1, 1)
assert p.get_ylim() == (-1.0, 1.0)
p.close()
@hide_plots
def test_plot_array():
"Test plot.TopoArray"
ds = datasets.get_uts(utsnd=True)
p = plot.TopoArray('utsnd', ds=ds)
assert repr(p) == "<TopoArray: utsnd>"
p.set_topo_t(0, 0.2)
p.close()
p = plot.TopoArray('utsnd', ds=ds, vmax=0.2, w=2)
p.close()
p = plot.TopoArray('utsnd', 'A%B', ds=ds, axw=4)
assert repr(p) == "<TopoArray: utsnd ~ A x B>"
p.close()
# results
res = testnd.ttest_ind('utsnd', 'A', ds=ds, pmin=0.05, tstart=0.1, tstop=0.3, samples=2)
p = plot.TopoArray(res)
assert repr(p) == "<TopoArray: a0, a1, a0 - a1>"
p.set_topo_t(0, 0.)
p.close()
```
#### File: eelbrain/plot/_utils.py
```python
from colormath.color_objects import sRGBColor, HSVColor
from colormath.color_conversions import convert_color
from matplotlib.colors import to_rgb
def adjust_hsv(color, h=0., s=0., v=0.):
hsv = convert_color(sRGBColor(*to_rgb(color)), HSVColor)
hsv.hsv_h += h
hsv.hsv_s += s
hsv.hsv_v += v
return convert_color(hsv, sRGBColor).get_value_tuple()
def set_hsv(color, h=None, s=None, v=None):
hsv = convert_color(sRGBColor(*to_rgb(color)), HSVColor)
if h is not None:
hsv.hsv_h = h
if s is not None:
hsv.hsv_s = s
if v is not None:
hsv.hsv_v = v
return convert_color(hsv, sRGBColor).get_value_tuple()
```
#### File: Eelbrain/eelbrain/_result_plots.py
```python
from math import floor, log10
from os import makedirs
from os.path import basename, dirname, exists, expanduser, isdir, join
import matplotlib as mpl
import numpy as np
from . import fmtxt, plot, testnd
from .plot._base import POINT
from ._data_obj import combine
# usage: with mpl.rc_context(RC):
FONT = 'Helvetica'
RC = {
'figure.dpi': 300,
'savefig.dpi': 300,
'savefig.transparent': True,
# Font
'font.family': 'sans-serif',
'font.sans-serif': FONT,
'font.size': 9,
# make sure equations use same font
'mathtext.fontset': 'custom',
'font.cursive': FONT,
'font.serif': FONT,
# subplot
'figure.subplot.top': 0.95,
# legend
'legend.fontsize': 6,
'legend.frameon': False,
}
for key in mpl.rcParams:
if 'width' in key:
RC[key] = mpl.rcParams[key] * 0.5
class PlotDestDir:
"""Generate paths for saving plots in figure-specific subdirectories
Parameters
----------
root : str
Directory in which to save files.
pix_fmt : str
Pixel graphics format (default ``png``).
vec_fmt : str
Vector graphics format (default ``pdf``).
name : str
Name for the info report (default is ``basename(root)``).
"""
def __init__(self, root, pix_fmt='png', vec_fmt='pdf', name=None):
root = expanduser(root)
if not exists(root):
makedirs(root)
else:
assert isdir(root)
assert pix_fmt.isalnum()
assert vec_fmt.isalnum()
if name is None:
name = basename(root)
if not name:
name = basename(dirname(root))
self.root = root
self._pix_fmt = pix_fmt
self._vec_fmt = vec_fmt
self.pix = join(root, '%s.' + pix_fmt)
self.vec = join(root, '%s.' + vec_fmt)
self.mov = join(root, '%s.mov')
self.txt = join(root, '%s.txt')
self.name = name
self.report = fmtxt.Report(name)
self._active_section = [self.report]
def with_ext(self, ext):
"""Generate path template ``%s.{ext}``"""
assert ext.isalnum()
return join(self.root, '%s.' + ext)
def subdir(self, dirname, name=None):
"""PlotDestDir object for a sub-directory"""
return PlotDestDir(join(self.root, dirname), self._pix_fmt, self._vec_fmt, name)
# MARK: report
def section(self, heading, level=1):
if level <= 0:
raise ValueError("level=%r; must be >= 1, section 0 is the document")
elif level > len(self._active_section):
raise RuntimeError("Can't add section with level %i before adding "
"section with level %i" % (level, level - 1))
while len(self._active_section) > level:
self._active_section.pop(-1)
section = self._active_section[-1].add_section(heading)
self._active_section.append(section)
def info(self, content):
"""Add ``info_string`` to the info list"""
section = self._active_section[-1]
section.append(content)
def save_info(self, format='html'):
"""Save info to ``info.txt``"""
dst = join(self.root, self.name)
try:
getattr(self.report, 'save_' + format)(dst)
except AttributeError:
raise ValueError("format=%r; Invalid format" % (format,))
def cname(cid):
if isinstance(cid, tuple):
return '-'.join(map(str, cid))
else:
return str(cid)
class ClusterPlotter:
"""Make plots for spatio-temporal clusters
returned by :meth:`MneExperiment.load_result_plotter`
Parameters
----------
ds : Dataset
Dataset with the data on which the test is based.
res : NDTest
Test result object with spatio-temporal cluster test result.
colors : dict
Colors for plotting data in a ``{cell: color}`` dictionary.
dst : str
Directory in which to place results.
vec_fmt : str
Format for vector graphics (default 'pdf').
pix_fmt : str
Format for pixel graphics (default 'png').
labels : dict
Labels for data in a ``{cell: label}`` dictionary (the default is to
use cell names).
h : scalar
Plot height in inches (default 1.2).
rc : dict
Matplotlib rc-parameters dictionary (the default is optimized for the
default plot size ``h=1.2``).
Notes
-----
After loading a :class:`ClusterPlotter`, its ``rc``, ``colors``, ``labels``
and ``h`` attributes can be updated to create different plot layouts without
reloading the data.
"""
def __init__(self, ds, res, colors, dst, vec_fmt='pdf', pix_fmt='png',
labels=None, h=1.2, rc=None):
self.rc = RC.copy()
if rc is not None:
self.rc.update(rc)
self.ds = ds
self.res = res
self.colors = colors
self.labels = labels
self.h = h
self._dst = PlotDestDir(dst, pix_fmt, vec_fmt)
self._is_anova = isinstance(self.res, testnd.anova)
def _ids(self, ids):
if isinstance(ids, (float, int)):
return self._ids_for_p(ids)
elif isinstance(ids, dict):
if not self._is_anova:
raise TypeError("ids can not be dict for results other than ANOVA")
out = []
for effect, cids in ids.items():
if isinstance(cids, float):
out.extend(self._ids_for_p(cids, effect))
else:
out.extend((effect, cid) for cid in cids)
return out
else:
return ids
def _ids_for_p(self, p, effect=None):
"Find cluster IDs for clusters with p-value <= p"
if effect is None:
clusters = self.res.find_clusters(p)
else:
clusters = self.res.find_clusters(p, effect=effect)
clusters[:, 'effect'] = effect
if self._is_anova:
return list(zip(clusters['effect'], clusters['id']))
else:
return clusters['id']
def _get_clusters(self, ids):
return [self._get_cluster(cid) for cid in ids]
def _get_cluster(self, cid):
if self._is_anova:
effect, cid = cid
return self.res.cluster(cid, effect)
else:
return self.res.cluster(cid)
def plot_color_list(self, name, cells, w=None, colors=None):
if colors is None:
colors = self.colors
with mpl.rc_context(self.rc):
p = plot.ColorList(colors, cells, self.labels, w=w, show=False)
p.save(self._dst.vec % "colorlist %s" % name, transparent=True)
p.close()
def plot_color_grid(self, name, row_cells, column_cells):
with mpl.rc_context(self.rc):
p = plot.ColorGrid(row_cells, column_cells, self.colors, labels=self.labels)
p.save(self._dst.vec % "colorgrid %s" % name, transparent=True)
p.close()
def plot_clusters_spatial(self, ids, views, w=600, h=480, prefix=''):
"""Plot spatial extent of the clusters
Parameters
----------
ids : sequence | dict | scalar <= 1
IDs of the clusters that should be plotted. For ANOVA results, this
should be an ``{effect_name: id_list}`` dict. Instead of a list of
IDs a scalar can be provided to plot all clusters with p-values
smaller than this.
views : str | list of str | dict
Can a str or list of str to use the same views for all clusters. A dict
can have as keys labels or cluster IDs.
w, h : int
Size in pixels. The default (600 x 480) corresponds to 2 x 1.6 in
at 300 dpi.
prefix : str
Prefix to use for the image files (optional, can be used to
distinguish different groups of images sharing the same color-bars).
Notes
-----
The horizontal colorbar is 1.5 in wide, the vertical colorbar is 1.6 in
high.
"""
ids = self._ids(ids)
clusters = self._get_clusters(ids)
clusters_spatial = [c.sum('time') for c in clusters]
if isinstance(views, str):
views = (views,)
# vmax
vmin = min(c.min() for c in clusters_spatial)
vmax = max(c.max() for c in clusters_spatial)
abs_vmax = max(vmax, abs(vmin))
# anatomical extent
brain_colorbar_done = False
for cid, cluster in zip(ids, clusters_spatial):
name = cname(cid)
if prefix:
name = prefix + ' ' + name
for hemi in ('lh', 'rh'):
if not cluster.sub(source=hemi).any():
continue
brain = plot.brain.cluster(cluster, abs_vmax, views='lat',
background=(1, 1, 1), colorbar=False,
parallel=True, hemi=hemi, w=w, h=h)
for view in views:
brain.show_view(view)
brain.save_image(self._dst_pix % ' '.join((name, hemi, view)),
'rgba', True)
if not brain_colorbar_done:
with mpl.rc_context(self.rc):
label = "Sum of %s-values" % cluster.info['meas']
clipmin = 0 if vmin == 0 else None
clipmax = 0 if vmax == 0 else None
if prefix:
cbar_name = '%s cbar %%s' % prefix
else:
cbar_name = 'cbar %s'
h_cmap = 0.7 + POINT * mpl.rcParams['font.size']
p = brain.plot_colorbar(label, clipmin=clipmin, clipmax=clipmax,
width=0.1, h=h_cmap, w=1.5, show=False)
p.save(self._dst.vec % cbar_name % 'h', transparent=True)
p.close()
w_cmap = 0.8 + 0.1 * abs(floor(log10(vmax)))
p = brain.plot_colorbar(label, clipmin=clipmin, clipmax=clipmax,
width=0.1, h=1.6, w=w_cmap,
orientation='vertical', show=False)
p.save(self._dst.vec % cbar_name % 'v', transparent=True)
p.close()
brain_colorbar_done = True
brain.close()
def _get_data(self, model, sub, subagg):
"""Plot values in cluster
Parameters
----------
subagg : str
Index in ds: within index, collapse across other predictors.
"""
ds = self.ds
modelname = model
if sub:
ds = ds.sub(sub)
modelname += '[%s]' % sub
if subagg:
idx_subagg = ds.eval(subagg)
ds_full = ds.sub(np.invert(idx_subagg))
ds_agg = ds.sub(idx_subagg).aggregate("subject", drop_bad=True)
ds = combine((ds_full, ds_agg), incomplete='fill in')
ds['condition'] = ds.eval(model).as_factor()
model = 'condition'
modelname += '(agg %s)' % subagg
return ds, model, modelname
def plot_values(self, ids, model, ymax, ymin, dpi=300, sub=None,
subagg=None, cells=None, pairwise=False, colors=None,
prefix=None, w=None, filter=None, legend=False):
"""Plot values in cluster
Parameters
----------
ids : sequence | dict | scalar <= 1
IDs of the clusters that should be plotted. For ANOVA results, this
should be an ``{effect_name: id_list}`` dict. Instead of a list of
IDs a scalar can be provided to plot all clusters with p-values
smaller than this.
model : str
Model defining cells which to plot separately.
ymax : scalar
Top of the y-axis.
ymin : scalar
Bottom of the y axis.
dpi : int
Figure DPI.
sub : str
Only use a subset of the data.
subagg : str
Index in ds: within index, collapse across other predictors.
cells : sequence of cells in model
Modify visible cells and their order. Only applies to the barplot.
Does not affect filename.
pairwise : bool
Add pairwise tests to barplots.
colors : dict
Substitute colors (default are the colors provided at
initialization).
prefix : str
Prefix to use for the image files (optional, can be used to
distinguish different groups of images sharing the same color-bars).
w : scalar
UTS-stat plot width (default is ``2 * h``).
filter : Filter
Filter signal for display purposes (optional).
legend : bool
Plot a color legend.
"""
if w is None:
w = self.h * 2
ds, model, modelname = self._get_data(model, sub, subagg)
ids = self._ids(ids)
if colors is None:
colors = self.colors
src = ds['srcm']
n_cells = len(ds.eval(model).cells)
w_bar = (n_cells * 2 + 4) * (self.h / 12)
with mpl.rc_context(self.rc):
for cid in ids:
name = cname(cid)
if prefix:
name = prefix + ' ' + name
cluster = self._get_cluster(cid)
y_mean = src.mean(cluster != 0)
y_tc = src.mean(cluster.any('time'))
# barplot
p = plot.Barplot(
y_mean, model, 'subject', None, cells, pairwise, ds=ds,
trend=False, corr=None, title=None, frame=False,
yaxis=False, ylabel=False, colors=colors, bottom=ymin,
top=ymax, w=w_bar, h=self.h, xlabel=None, xticks=None,
tight=False, test_markers=False, show=False)
p.save(self._dst.vec % ' '.join((name, modelname, 'barplot')),
dpi=dpi, transparent=True)
p.close()
# time-course
if filter is not None:
y_tc = filter.filtfilt(y_tc)
p = plot.UTSStat(
y_tc, model, match='subject', ds=ds, error='sem',
colors=colors, title=None, axtitle=None, frame=False,
bottom=ymin, top=ymax, legend=None, ylabel=None,
xlabel=None, w=w, h=self.h, tight=False, show=False)
dt = y_tc.time.tstep / 2.
mark_start = cluster.info['tstart'] - dt
mark_stop = cluster.info['tstop'] - dt
p.add_vspan(mark_start, mark_stop, color='k', alpha=0.1, zorder=-2)
p.save(self._dst.vec % ' '.join((name, modelname, 'timecourse')),
dpi=dpi, transparent=True)
p.close()
# legend (only once)
if legend:
p.save_legend(self._dst.vec % (modelname + ' legend'),
transparent=True)
legend = False
```
#### File: eelbrain/_stats/permutation.py
```python
from itertools import chain, repeat
from math import ceil, pi, sin
import random
import numpy as np
from .._data_obj import NDVar, Var, NestedEffect
from .._utils import intervals
from . import vector
# Keep local RNG independent of public RNG
RNG = random.Random()
_YIELD_ORIGINAL = 0
# for testing purposes, yield original order instead of permutations
def _resample_params(N, samples):
"""Decide whether to do permutations or random resampling
Parameters
----------
N : int
Number of observations.
samples : int
``samples`` parameter (number of resampling iterations, or < 0 to
sample all permutations).
Returns
-------
actual_n_samples : int
Adapted number of resamplings that will be done.
samples_param : int
Samples parameter for the resample function (-1 to do all permutations,
otherwise same as n_samples).
"""
n_perm = 2 ** N
if n_perm - 1 <= samples:
samples = -1
if samples < 0:
n_samples = n_perm - 1
else:
n_samples = samples
return n_samples, samples
def permute_order(n, samples=10000, replacement=False, unit=None, rng=None):
"""Generator function to create indices to shuffle n items
Parameters
----------
n : int
Number of cases.
samples : int
Number of samples to yield. If < 0, all possible permutations are
performed.
replacement : bool
whether random samples should be drawn with replacement or without.
unit : categorial
Factor specifying unit of measurement (e.g. subject). If unit is
specified, resampling proceeds by first resampling the categories of
unit (with or without replacement) and then shuffling the values
within units (no replacement).
rng : numpy.random.RandomState
Random number generator. By default, a random state with seed 0 is used.
Returns
-------
Iterator over index.
"""
n = int(n)
samples = int(samples)
if samples < 0:
raise NotImplementedError("Complete permutation for resampling through reordering")
if _YIELD_ORIGINAL:
original = np.arange(n)
for _ in range(samples):
yield original
return
if rng is None:
rng = np.random.RandomState(0)
if unit is None or unit is False:
if replacement:
for _ in range(samples):
yield rng.randint(n, n)
else:
index = np.arange(n)
for _ in range(samples):
rng.shuffle(index)
yield index
else:
if replacement:
raise NotImplementedError("Replacement and units")
idx_orig = np.arange(n)
idx_perm = np.empty_like(idx_orig)
unit_idxs = [np.flatnonzero(unit == cell) for cell in unit.cells]
if isinstance(unit, NestedEffect):
dst_idxs_iter = ((unit_idxs[i] for i in order)
for order in permute_order(len(unit_idxs), samples, rng=rng))
else:
dst_idxs_iter = repeat(unit_idxs, samples)
for dst_idxs in dst_idxs_iter:
for src, dst in zip(unit_idxs, dst_idxs):
v = idx_orig[src]
rng.shuffle(v)
idx_perm[dst] = v
yield idx_perm
def permute_sign_flip(n, samples=10000, rng=None, out=None):
"""Iterate over indices for ``samples`` permutations of the data
Parameters
----------
n : int
Number of cases.
samples : int
Number of samples to yield. If < 0, all possible permutations are
performed.
rng : random.Random
Random number generator.
out : array of int8 (n,)
Buffer for the ``sign`` variable that is yielded in each iteration.
Yields
------
sign : array of int8 (n,)
Sign for each case (``1`` or ``-1``; ``sign`` is the same array object
but its content modified in every iteration).
"""
n = int(n)
if rng is None and samples >= 0:
rng = random.Random(0)
if out is None:
out = np.empty(n, np.int8)
else:
assert out.shape == (n,)
if n > 62: # Python 2 limit for xrange
if samples < 0:
raise NotImplementedError("All possibilities for more than 62 cases")
n_groups = ceil(n / 62.)
group_size = int(ceil(n / n_groups))
out_parts = chain(range(0, n, group_size), [n])
for _ in zip(*(permute_sign_flip(stop - start, samples, rng, out[start: stop])
for start, stop in intervals(out_parts))):
yield out
return
# determine possible number of permutations
n_perm_possible = 2 ** n
if samples < 0:
# do all permutations
sample_sequences = range(1, n_perm_possible)
else:
# random resampling
sample_sequences = rng.sample(range(1, n_perm_possible), samples)
for seq in sample_sequences:
out.fill(1)
for i in (i for i, s in enumerate(bin(seq)[-1:1:-1]) if s == '1'):
out[i] = -1
yield out
def resample(y, samples=10000, replacement=False, unit=None):
"""
Generator function to resample a dependent variable (y) multiple times
Parameters
----------
y : Var | NDVar
Variable which is to be resampled.
samples : int
Number of samples to yield. If < 0, all possible permutations are
performed.
replacement : bool
whether random samples should be drawn with replacement or without.
unit : categorial
Factor specifying unit of measurement (e.g. subject). If unit is
specified, resampling proceeds by first resampling the categories of
unit (with or without replacement) and then shuffling the values
within units (no replacement).
Returns
-------
Iterator over Y_resampled. One copy of ``y`` is made, and this copy is
yielded in each iteration with shuffled data.
"""
if isinstance(y, Var):
pass
elif isinstance(y, NDVar):
if not y.has_case:
raise ValueError("Need NDVar with cases")
else:
raise TypeError("Need Var or NDVar")
out = y.copy(f'{y.name}_resampled')
for index in permute_order(len(out), samples, replacement, unit):
out.x[index] = y.x
yield out
def random_seeds(samples):
"""Sequence of seeds for permutation based on random numbers
Parameters
----------
samples : int
Number of samples to yield.
Returns
-------
sign : array of int8 (n,)
Sign for each case (``1`` or ``-1``; ``sign`` is the same array object
but its content modified in every iteration).
"""
rng = np.random.RandomState(0)
return rng.randint(2**32, size=samples, dtype=np.uint32)
def _sample_xi_by_rejection(n, seed):
"""Return a sample (or samples) from the distribution p(x) = 2 * np.sin(x/2) ** 2 / pi
See [1]_ for why samples from this distribution is required to sample
random rotation matrices.
..[1] <NAME>. (1965). On random rotations in R^3. Biometrika, 52(3/4), 636-639.
Parameters
----------
n : int
Number of the samples.
seed : int
Seed for the random state.
Returns
-------
ndarray
samples drawn from the distribution
"""
RNG.seed(seed) # could lead to conflict with threading
samples = np.empty(n)
i = 0
while i < n:
z = RNG.random() * pi
u = RNG.random() * 2 / pi
if u <= 2 * sin(z / 2) ** 2 / pi:
samples[i] = z
i += 1
return samples
def rand_rotation_matrices(n: int, seed: int):
"""Function to create random rotation matrices in 3D
Parameters
----------
n : int
Number of rotation matrices to return.
seed : int
Seed the random state.
Returns
-------
rotation : array (n, 3, 3)
Sampled rotation matrices.
"""
rng = np.random.RandomState(seed)
phi = np.arccos(rng.uniform(-1, 1, n))
theta = rng.uniform(0, 2 * pi, n)
xi = _sample_xi_by_rejection(n, seed)
return vector.rotation_matrices(phi, theta, xi, np.empty((n, 3, 3)))
```
#### File: eelbrain/tests/test_data_opt.py
```python
from math import log, sqrt
import numpy as np
from numpy.testing import assert_allclose
from scipy.signal import gaussian
from eelbrain._data_opt import gaussian_smoother
def test_gaussian_smoother():
"Test gaussian_kernel function"
x, y = np.mgrid[:99, :99]
d = np.abs(x - y, dtype=np.float64)
d[9, 0] = d[0, 9] = -1
std = 40. / (2 * (sqrt(2 * log(2))))
g = gaussian_smoother(d, std)
# basic properties
assert g.shape == (99, 99)
# FWHM
assert g[0, 0] / 2 == g[0, 20]
assert g[9, 0] == 0
assert g[0, 9] == 0
# compare with scipy.signal gaussian
ref = gaussian(99, std)
ref /= ref.sum()
assert_allclose(g[49], ref)
```
#### File: eelbrain/tests/test_examples.py
```python
import importlib.util
import logging
import os
from pathlib import Path
import re
from tempfile import TemporaryDirectory
import mne
import pytest
from eelbrain import configure
from eelbrain.testing import working_directory
DATASETS = {
'mne_sample': bool(mne.datasets.sample.data_path(download=False))
}
# find examples
examples_dir = Path(__file__).parents[2] / 'examples'
examples = list(examples_dir.glob('*/*.py'))
@pytest.mark.parametrize("path", examples)
def test_example(tmp_path, path: Path):
"Run the example script at ``filename``"
# check for flags
text = path.read_text()
if re.findall("^# skip test:", text, re.MULTILINE):
return
# check for required modules
required_modules = re.findall(r"^# requires: (\w+)$", text, re.MULTILINE)
for module in required_modules:
try:
importlib.import_module(module)
except ImportError:
pytest.skip(f"required module {module} not available")
# check for required datasets
required_datasets = re.findall(r"^# dataset: (\w+)$", text, re.MULTILINE)
for dataset in required_datasets:
if not DATASETS[dataset]:
raise pytest.skip(f"required dataset {dataset} not available")
# set up context
configure(show=False)
with working_directory(tmp_path):
temp_dir = Path(tmp_path)
# link files
for file in path.parent.glob('*.*'):
if file.name.startswith(('.', '_')):
continue
elif file.name in text:
os.link(file, temp_dir / file.name)
# reduce computational load
text = text.replace("n_samples = 1000", "n_samples = 2")
# prepare example script
exa_path = temp_dir / path.name
exa_path.write_text(text)
logging.info(" Executing %s/%s", path.parent.name, path.name)
# execute example
spec = importlib.util.spec_from_file_location(exa_path.stem, exa_path)
example_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(example_module)
```
#### File: eelbrain/tests/test_ndvar.py
```python
import numpy as np
from numpy.testing import assert_array_equal, assert_allclose
import pytest
from scipy import signal
from eelbrain import (
NDVar, Case, Scalar, UTS, datasets,
concatenate, convolve, correlation_coefficient, cross_correlation,
cwt_morlet, find_intervals, find_peaks, frequency_response, psd_welch,
resample, set_time,
)
from eelbrain.testing import assert_dataobj_equal, get_ndvar
def test_concatenate():
"""Test concatenate()
Concatenation of SourceSpace is tested in .test_mne.test_source_estimate
"""
ds = datasets.get_uts(True)
v0 = ds[0, 'utsnd']
v1 = ds[1, 'utsnd']
vc = concatenate((v1, v0))
assert_array_equal(vc.sub(time=(0, 1)).x, v1.x)
assert_array_equal(vc.sub(time=(1, 2)).x, v0.x)
assert_array_equal(vc.info, ds['utsnd'].info)
# scalar
psd = psd_welch(ds['utsnd'], n_fft=100)
v0 = psd.sub(frequency=(None, 5))
v1 = psd.sub(frequency=(45, None))
conc = concatenate((v0, v1), 'frequency')
assert_array_equal(conc.frequency.values[:5], psd.frequency.values[:5])
assert_array_equal(conc.frequency.values[5:], psd.frequency.values[45:])
conc_data = conc.get_data(v1.dimnames)
assert_array_equal(conc_data[:, :, 5:], v1.x)
# cat
x = get_ndvar(2, frequency=0, cat=4)
x_re = concatenate([x.sub(cat=(None, 'c')), x.sub(cat=('c', None))], 'cat')
assert_dataobj_equal(x_re, x)
def test_convolve():
# convolve is also tested in test_boosting.py
ds = datasets._get_continuous()
h1 = ds['h1']
h2 = ds['h2']
x1 = ds['x1']
xc = convolve(h1, x1)
xc_np = np.convolve(h1.x, x1.x)
assert_array_equal(xc.x, xc_np[:100])
# add dimension through kernel
xc = convolve(h2, x1)
xc_np = np.vstack((np.convolve(h2.x[0], x1.x)[:100],
np.convolve(h2.x[1], x1.x)[:100]))
assert_array_equal(xc.x, xc_np)
def test_correlation_coefficient():
ds = datasets.get_uts()
uts = ds['uts']
uts2 = uts.copy()
uts2.x += np.random.normal(0, 1, uts2.shape)
assert correlation_coefficient(uts, uts2) == pytest.approx(
np.corrcoef(uts.x.ravel(), uts2.x.ravel())[0, 1])
assert_allclose(
correlation_coefficient(uts[:10], uts2[:10], 'time').x,
[np.corrcoef(uts.x[i], uts2.x[i])[0, 1] for i in range(10)])
assert_allclose(
correlation_coefficient(uts[:, :-.1], uts2[:, :-.1], 'case').x,
[np.corrcoef(uts.x[:, i], uts2.x[:, i])[0, 1] for i in range(10)])
def test_cross_correlation():
ds = datasets._get_continuous()
x = ds['x1']
assert cross_correlation(x, x).argmax() == 0
assert cross_correlation(x[2:], x).argmax() == 0
assert cross_correlation(x[:9], x).argmax() == 0
assert cross_correlation(x, x[1:]).argmax() == 0
assert cross_correlation(x, x[:8]).argmax() == 0
assert cross_correlation(x[2:], x[:8]).argmax() == 0
def test_cwt():
ds = datasets._get_continuous()
# 1d
y = cwt_morlet(ds['x1'], [2, 3, 4])
assert y.ndim == 2
# 2d
y = cwt_morlet(ds['x2'], [2, 3, 4])
assert y.ndim == 3
def test_dot():
ds = datasets.get_uts(True)
# x subset of y
index = ['3', '2']
utsnd = ds['utsnd']
topo = utsnd.mean(('case', 'time'))
y1 = topo.sub(sensor=index).dot(utsnd.sub(sensor=index))
assert_dataobj_equal(topo[index].dot(utsnd), y1)
assert_dataobj_equal(topo.dot(utsnd.sub(sensor=index)), y1)
def test_find_intervals():
time = UTS(-5, 1, 10)
x = NDVar([0, 1, 0, 1, 1, 0, 1, 1, 1, 0], (time,))
assert find_intervals(x) == ((-4, -3), (-2, 0), (1, 4))
x = NDVar([0, 1, 0, 1, 1, 0, 1, 1, 1, 1], (time,))
assert find_intervals(x) == ((-4, -3), (-2, 0), (1, 5))
x = NDVar([1, 1, 0, 1, 1, 0, 1, 1, 1, 1], (time,))
assert find_intervals(x) == ((-5, -3), (-2, 0), (1, 5))
def test_find_peaks():
scalar = Scalar('scalar', range(9))
time = UTS(0, .1, 12)
v = NDVar(np.zeros((9, 12)), (scalar, time))
wsize = [0, 0, 1, 2, 3, 2, 1, 0, 0]
for i, s in enumerate(wsize):
if s:
v.x[i, 5 - s: 5 + s] += np.hamming(2 * s)
peaks = find_peaks(v)
x, y = np.where(peaks.x)
assert_array_equal(x, [4])
assert_array_equal(y, [5])
def test_frequency_response():
b_array = signal.firwin(80, 0.5, window=('kaiser', 8))
freqs_array, fresp_array = signal.freqz(b_array)
hz_to_rad = 2 * np.pi * 0.01
b = NDVar(b_array, (UTS(0, 0.01, 80),))
fresp = frequency_response(b)
assert_array_equal(fresp.x, fresp_array)
assert_array_equal(fresp.frequency.values * hz_to_rad, freqs_array)
b2d = concatenate((b, b), Case)
fresp = frequency_response(b2d)
assert_array_equal(fresp.x[0], fresp_array)
assert_array_equal(fresp.x[1], fresp_array)
assert_array_equal(fresp.frequency.values * hz_to_rad, freqs_array)
def test_mask():
ds = datasets.get_uts(True)
x = NDVar([1, 2, 3], Case)
assert x.mean() == 2.0
y = x.mask([True, False, False])
assert y.mean() == 2.5
# multi-dimensional
y = ds[:2, 'utsnd'].copy()
mask_x = y.time.times >= 0.500
mask_ndvar = NDVar(mask_x, y.time)
y_masked = y.mask(mask_ndvar)
assert_array_equal(y_masked.x.mask[:, :, 70:], True)
assert_array_equal(y_masked.x.mask[:, :, :70], False)
# mask that is smaller than array
mask = mask_ndvar.sub(time=(0.100, None))
with pytest.raises(TypeError):
y.mask(mask)
y_masked = y.mask(mask, missing=True)
assert_array_equal(y_masked.x.mask[:, :, 70:], True)
assert_array_equal(y_masked.x.mask[:, :, 30:70], False)
assert_array_equal(y_masked.x.mask[:, :, :30], True)
def test_resample():
x = NDVar([0.0, 1.0, 1.4, 1.0, 0.0], UTS(0, 0.1, 5)).mask([True, False, False, False, True])
y = resample(x, 20)
assert_array_equal(y.x.mask, [True, False, False, False, False, False, False, False, True, True])
y = resample(x, 20, npad=0)
assert_array_equal(y.x.mask, [True, False, False, False, False, False, False, False, True, True])
def test_set_time():
for x in [get_ndvar(2, 100, 0), get_ndvar(2, 100, 8)]:
x_sub = x.sub(time=(0.000, None))
assert x_sub.time.tmin == 0.000
x_pad = set_time(x_sub, x)
assert x_pad.time.tmin == -0.100
assert x_pad.x.ravel()[0] == 0
x_pad = set_time(x_sub, x, mode='edge')
assert x_pad.time.tmin == -0.100
assert x_pad.x.ravel()[0] == x_sub.x.ravel()[0]
def test_smoothing():
x = get_ndvar(2)
xt = NDVar(x.x.swapaxes(1, 2), [x.dims[i] for i in [0, 2, 1]], x.name, x.info)
# smoothing across time
ma = x.smooth('time', 0.2, 'blackman')
assert_dataobj_equal(x.smooth('time', window='blackman', window_samples=20), ma)
with pytest.raises(TypeError):
x.smooth('time')
with pytest.raises(TypeError):
x.smooth('time', 0.2, 'blackman', window_samples=20)
mas = xt.smooth('time', 0.2, 'blackman')
assert_allclose(ma.x, mas.x.swapaxes(1, 2), 1e-10)
ma_mean = x.mean('case').smooth('time', 0.2, 'blackman')
assert_allclose(ma.mean('case').x, ma_mean.x)
# against raw scipy.signal
window = signal.get_window('blackman', 20, False)
window /= window.sum()
window.shape = (1, 20, 1)
assert_array_equal(ma.x[:, 10:-10], signal.convolve(x.x, window, 'same')[:, 10:-10])
# mode parameter
full = signal.convolve(x.x, window, 'full')
ma = x.smooth('time', 0.2, 'blackman', mode='left')
assert_array_equal(ma.x[:], full[:, :100])
ma = x.smooth('time', 0.2, 'blackman', mode='right')
assert_array_equal(ma.x[:], full[:, 19:])
# fix_edges: smooth with constant sum
xs = x.smooth('frequency', window_samples=1, fix_edges=True)
assert_dataobj_equal(xs.sum('frequency'), x.sum('frequency'))
xs = x.smooth('frequency', window_samples=2, fix_edges=True)
assert_dataobj_equal(xs.sum('frequency'), x.sum('frequency'), 14)
xs = x.smooth('frequency', window_samples=3, fix_edges=True)
assert_dataobj_equal(xs.sum('frequency'), x.sum('frequency'), 14)
xs = x.smooth('frequency', window_samples=5, fix_edges=True)
assert_dataobj_equal(xs.sum('frequency'), x.sum('frequency'), 14)
xs = x.smooth('frequency', window_samples=4, fix_edges=True)
assert_dataobj_equal(xs.sum('frequency'), x.sum('frequency'), 14)
# gaussian
x = get_ndvar(2, frequency=0, sensor=5)
x.smooth('sensor', 0.1, 'gaussian')
x = get_ndvar(2, sensor=5)
x.smooth('sensor', 0.1, 'gaussian')
```
#### File: _trf/tests/test_boosting.py
```python
from itertools import product
from math import floor
import os
from warnings import catch_warnings, filterwarnings
import numpy as np
from numpy.testing import assert_array_equal, assert_allclose
import pickle
import pytest
from pytest import approx
import scipy.io
from eelbrain import (
datasets, configure,
boosting, convolve, correlation_coefficient, epoch_impulse_predictor,
)
from eelbrain.testing import assert_dataobj_equal
from eelbrain._trf._boosting import boost, evaluate_kernel
from eelbrain._trf._boosting import convolve as boosting_convolve
def assert_res_equal(res1, res):
assert_array_equal(res1.h, res.h)
assert res1.r == res.r
assert res1.spearmanr == res.spearmanr
@pytest.mark.parametrize('n_workers', [0, True])
def test_boosting(n_workers):
"Test boosting NDVars"
ds = datasets._get_continuous(ynd=True)
configure(n_workers=n_workers)
y = ds['y']
ynd = ds['ynd']
x1 = ds['x1']
x2 = ds['x2']
y_mean = y.mean()
x2_mean = x2.mean('time')
# test values from running function, not verified independently
res = boosting(y, x1 * 2000, 0, 1, scale_data=False, mindelta=0.0025)
assert repr(res) == '<boosting y ~ x1, 0 - 1, scale_data=False, mindelta=0.0025>'
assert res.r == approx(0.75, abs=0.001)
assert res.y_mean is None
assert res.h.info['unit'] == 'V'
assert res.h_scaled.info['unit'] == 'V'
with pytest.raises(NotImplementedError):
res.proportion_explained
res = boosting(y, x1, 0, 1)
assert repr(res) == '<boosting y ~ x1, 0 - 1>'
assert res.r == approx(0.83, abs=0.001)
assert res.y_mean == y_mean
assert res.y_scale == y.std()
assert res.x_mean == x1.mean()
assert res.x_scale == x1.std()
assert res.h.name == 'x1'
assert res.h.info['unit'] == 'normalized'
assert res.h_scaled.name == 'x1'
assert res.h_scaled.info['unit'] == 'V'
assert res.proportion_explained == approx(0.506, abs=0.001)
# inplace
res_ip = boosting(y.copy(), x1.copy(), 0, 1, 'inplace')
assert_res_equal(res_ip, res)
# persistence
res_p = pickle.loads(pickle.dumps(res, pickle.HIGHEST_PROTOCOL))
assert_res_equal(res_p, res)
res = boosting(y, x2, 0, 1)
assert res.r == approx(0.601, abs=0.001)
assert res.proportion_explained == approx(0.273, abs=0.001)
res = boosting(y, x2, 0, 1, error='l1')
assert res.r == approx(0.553, abs=0.001)
assert res.y_mean == y.mean()
assert res.y_scale == (y - y_mean).abs().mean()
assert_array_equal(res.x_mean.x, x2_mean)
assert_array_equal(res.x_scale, (x2 - x2_mean).abs().mean('time'))
assert res.proportion_explained == approx(0.123, abs=0.001)
# 2 predictors
res = boosting(y, [x1, x2], 0, 1)
assert res.r == approx(0.947, abs=0.001)
# selective stopping
res = boosting(y, [x1, x2], 0, 1, selective_stopping=1)
assert res.r == approx(0.967, abs=0.001)
res = boosting(y, [x1, x2], 0, 1, selective_stopping=2)
assert res.r == approx(0.992, abs=0.001)
# prefit
res_full = boosting(y, [x1, x2], 0, 1)
prefit = boosting(y, x1, 0, 1)
res = boosting(y, [x1, x2], 0, 1, prefit=prefit)
assert correlation_coefficient(res.h, res_full.h[1]) == approx(0.984, 1e-3)
prefit = boosting(y, x2, 0, 1)
res = boosting(y, [x1, x2], 0, 1, prefit=prefit)
assert correlation_coefficient(res.h, res_full.h[0]) == approx(0.995, 1e-3)
# ynd
res_full = boosting(ynd, [x1, x2], 0, 1)
prefit = boosting(ynd, x1, 0, 1)
res = boosting(ynd, [x1, x2], 0, 1, prefit=prefit)
assert correlation_coefficient(res.h, res_full.h[1]) == approx(0.978, 1e-3)
prefit = boosting(ynd, x2, 0, 1)
res = boosting(ynd, [x1, x2], 0, 1, prefit=prefit)
assert correlation_coefficient(res.h, res_full.h[0]) == approx(0.997, 1e-3)
def test_boosting_epochs():
"""Test boosting with epoched data"""
ds = datasets.get_uts(True, vector3d=True)
p1 = epoch_impulse_predictor('uts', 'A=="a1"', name='a1', ds=ds)
p0 = epoch_impulse_predictor('uts', 'A=="a0"', name='a0', ds=ds)
p1 = p1.smooth('time', .05, 'hamming')
p0 = p0.smooth('time', .05, 'hamming')
# 1d
for tstart, basis in product((-0.1, 0.1, 0), (0, 0.05)):
print(f"tstart={tstart}, basis={basis}")
res = boosting('uts', [p0, p1], tstart, 0.6, model='A', ds=ds, basis=basis, partitions=10, debug=True)
y = convolve(res.h_scaled, [p0, p1])
assert correlation_coefficient(y, res.y_pred) > .999
r = correlation_coefficient(y, ds['uts'])
assert res.r == approx(r, abs=1e-3)
assert res.partitions == 10
# prefit
res1 = boosting('uts', p1, 0, 0.6, model='A', ds=ds, partitions=10)
res0 = boosting('uts', p0, 0, 0.6, model='A', ds=ds, partitions=10)
res01 = boosting('uts', [p0, p1], 0, 0.6, model='A', ds=ds, partitions=10, prefit=res1)
# 2d
res = boosting('utsnd', [p0, p1], 0, 0.6, model='A', ds=ds, partitions=10)
assert len(res.h) == 2
assert res.h[0].shape == (5, 60)
assert res.h[1].shape == (5, 60)
y = convolve(res.h_scaled, [p0, p1])
r = correlation_coefficient(y, ds['utsnd'], ('case', 'time'))
assert_dataobj_equal(res.r, r, decimal=3, name=False)
# vector
res = boosting('v3d', [p0, p1], 0, 0.6, error='l1', model='A', ds=ds, partitions=10)
assert res.residual.ndim == 0
def test_result():
"Test boosting results"
ds = datasets._get_continuous()
x1 = ds['x1']
# convolve function
y = convolve([ds['h1'], ds['h2']], [ds['x1'], ds['x2']])
assert_dataobj_equal(y, ds['y'], name=False)
# test prediction with res.h and res.h_scaled
res = boosting(ds['y'], ds['x1'], 0, 1)
y1 = convolve(res.h_scaled, ds['x1'])
x_scaled = ds['x1'] / res.x_scale
y2 = convolve(res.h, x_scaled)
y2 *= res.y_scale
y2 += y1.mean() - y2.mean() # mean can't be reconstructed
assert_dataobj_equal(y1, y2, decimal=12)
# reconstruction
res = boosting(x1, y, -1, 0, debug=True)
x1r = convolve(res.h_scaled, y)
assert correlation_coefficient(res.y_pred, x1r) > .999
assert correlation_coefficient(x1r[0.9:], x1[0.9:]) == approx(res.r, abs=1e-3)
# test NaN checks (modifies data)
ds['x2'].x[1, 50] = np.nan
with pytest.raises(ValueError):
boosting(ds['y'], ds['x2'], 0, .5)
with pytest.raises(ValueError):
boosting(ds['y'], ds['x2'], 0, .5, False)
ds['x2'].x[1, :] = 1
with catch_warnings():
filterwarnings('ignore', category=RuntimeWarning)
with pytest.raises(ValueError):
boosting(ds['y'], ds['x2'], 0, .5)
ds['y'].x[50] = np.nan
with pytest.raises(ValueError):
boosting(ds['y'], ds['x1'], 0, .5)
with pytest.raises(ValueError):
boosting(ds['y'], ds['x1'], 0, .5, False)
def test_boosting_func():
"Test boosting() against svdboostV4pred.m"
# 1d-TRF
path = os.path.join(os.path.dirname(__file__), 'test_boosting.mat')
mat = scipy.io.loadmat(path)
y = mat['signal'][0]
x = mat['stim']
x_pads = np.zeros(len(x))
y_len = len(y)
seg_len = int(y_len / 40)
all_segments = np.array([[0, seg_len], [seg_len, y_len]], np.int64)
train_segments = all_segments[1:]
test_segments = all_segments[:1]
h, test_sse_history = boost(y, x, x_pads, all_segments, train_segments, test_segments,
0, 10, 0.005, 0.005, 'l2', return_history=True)
test_seg_len = int(floor(x.shape[1] / 40))
y_pred = boosting_convolve(h, x[:, :test_seg_len], x_pads, 0)
r, rr, _ = evaluate_kernel(y[:test_seg_len], y_pred, 'l2', h.shape[1] - 1)
assert_array_equal(h, mat['h'])
assert r == approx(mat['crlt'][0, 0])
assert rr == approx(mat['crlt'][1, 0])
assert_allclose(test_sse_history, mat['Str_testE'][0])
# 2d-TRF
path = os.path.join(os.path.dirname(__file__), 'test_boosting_2d.mat')
mat = scipy.io.loadmat(path)
y = mat['signal'][0]
x = mat['stim']
x_pads = np.zeros(len(x))
h, test_sse_history = boost(y, x, x_pads, all_segments, train_segments, test_segments,
0, 10, 0.005, 0.005, 'l2', return_history=True)
test_seg_len = int(floor(x.shape[1] / 40))
y_pred = boosting_convolve(h, x[:, :test_seg_len], x_pads, 0)
r, rr, _ = evaluate_kernel(y[:test_seg_len], y_pred, 'l2', h.shape[1] - 1)
assert_array_equal(h, mat['h'])
assert r == approx(mat['crlt'][0, 0])
assert rr == approx(mat['crlt'][1, 0])
# svdboostV4pred multiplies error by number of predictors
assert_allclose(test_sse_history, mat['Str_testE'][0] / 3)
```
#### File: eelbrain/_utils/r_bridge.py
```python
import warnings
from rpy2.robjects import r
try:
from rpy2.rinterface import RRuntimeWarning
except ImportError: # rpy2 < 2.8
RRuntimeWarning = UserWarning
def r_require(package):
with r_warning_filter:
success = r('require(%s)' % package)[0]
if not success:
print(r("install.packages('%s', repos='http://cran.us.r-project.org')"
% package))
success = r('require(%s)' % package)[0]
if not success:
raise RuntimeError("Could not install R package %r" % package)
class RWarningFilter:
def __enter__(self):
self.context = warnings.catch_warnings()
self.context.__enter__()
warnings.filterwarnings('ignore', category=RRuntimeWarning)
def __exit__(self, exc_type, exc_val, exc_tb):
self.context.__exit__(exc_type, exc_val, exc_tb)
r_warning_filter = RWarningFilter()
```
#### File: eelbrain/_utils/tex.py
```python
import os
import subprocess
import tempfile
def convert(tex_source, input_format, output_format, max_runs=5):
'''Convert LaTeX or TeX source to PDF or DVI.'''
# check arguments
assert isinstance(tex_source, str)
try:
(tex_cmd, output_suffix) = {
('tex', 'dvi'): ('tex', '.dvi'),
('latex', 'dvi'): ('latex', '.dvi'),
('tex', 'pdf'): ('pdftex', '.pdf'),
('latex', 'pdf'): ('pdflatex', '.pdf'),
}[(input_format, output_format)]
except KeyError:
raise ValueError('Unable to handle conversion: %s -> %s'
% (input_format, output_format))
if max_runs < 2:
raise ValueError('max_runs must be at least 2.')
# create temporary directory
with tempfile.TemporaryDirectory(prefix='tex-temp-') as tex_dir:
# create LaTeX source file
tex_filename = os.path.join(tex_dir, 'texput.tex')
with open(tex_filename, 'w') as fid:
fid.write(tex_source)
# run LaTeX processor as often as necessary
aux_old = None
for i in range(max_runs):
tex_process = subprocess.Popen(
[tex_cmd,
'-interaction=batchmode',
'-halt-on-error',
'-no-shell-escape',
tex_filename,
],
stdin=open(os.devnull, 'r'),
stdout=open(os.devnull, 'w'),
stderr=subprocess.STDOUT,
close_fds=True,
shell=False,
cwd=tex_dir,
env={'PATH': os.getenv('PATH')},
)
tex_process.wait()
if tex_process.returncode != 0:
with open(os.path.join(tex_dir, 'texput.log'), 'rb') as fid:
log = fid.read()
raise ValueError(log.decode())
with open(os.path.join(tex_dir, 'texput.aux'), 'rb') as fid:
aux = fid.read()
if aux == aux_old: # aux file stabilized
output_filename = os.path.join(tex_dir, 'texput' + output_suffix)
if not os.path.exists(output_filename):
raise RuntimeError('No output file was produced.')
else:
with open(output_filename, 'rb') as fid:
return fid.read()
aux_old = aux
# TODO:
# Also handle makeindex and bibtex,
# possibly in a similar manner as described in:
# http://vim-latex.sourceforge.net/documentation/latex-suite/compiling-multiple.html
raise RuntimeError("%s didn't stabilize after %i runs" % ('texput.aux', max_runs))
def tex2dvi(tex_source, **kwargs):
'''Convert TeX source to DVI.'''
return convert(tex_source, 'tex', 'dvi', **kwargs)
def latex2dvi(tex_source, **kwargs):
'''Convert LaTeX source to DVI.'''
return convert(tex_source, 'latex', 'dvi', **kwargs)
def tex2pdf(tex_source, **kwargs):
'''Convert TeX source to PDF.'''
return convert(tex_source, 'tex', 'pdf', **kwargs)
def latex2pdf(tex_source, **kwargs):
'''Convert LaTeX source to PDF.'''
return convert(tex_source, 'latex', 'pdf', **kwargs)
```
#### File: _utils/ui/__init__.py
```python
import os
USE_WX = True
_progress_monitors = []
def get_ui():
global USE_WX
if USE_WX:
try:
from . import wx_ui
from ..._wxgui import get_app
get_app()
return wx_ui
except ImportError:
USE_WX = False
from . import tk_ui
return tk_ui
def ask_saveas(title="Save File", message="Please Pick a File Name",
filetypes=(("Pickled Python Objects (*.pickled)", '*.pickled'),),
defaultDir=None, defaultFile=False):
"""Display a save-as dialog
Parameters
----------
title : str
Title of the dialog.
message : str
Message in the dialog.
filetypes : sequence of tuples
Sequence of (label, pattern) tuples.
defaultDir : None | str
Default directory to save at.
defaultFile : None | str
Default file name.
Returns
-------
result : False | str
The path as str if the user selects a dialog, otherwise ``False``.
"""
result = get_ui().ask_saveas(title, message, filetypes, defaultDir,
defaultFile)
return result
def ask_dir(title="Select Folder",
message="Please Pick a Folder",
must_exist=True):
return get_ui().ask_dir(title, message, must_exist)
def ask_file(title="Pick File",
message="Please Pick a File",
filetypes=[("All files", '*')],
directory='',
mult=False):
"""
Ask for an existing file.
Parameters
----------
title, message : str
Title and message for the dialog.
filetypes : sequence of tuples
Sequence of (label, pattern) tuples.
directory : None | str
Path to initial directory.
mult : bool
Allow selecting multiple files.
Returns
-------
paths : False | str | list
If the user cancels: False. Otherwise, if mult=False a single path, and
if mult=True a list of paths.
"""
return get_ui().ask_file(title, message, filetypes, directory, mult)
def ask(title="Overwrite File?",
message="Duplicate filename. Do you want to overwrite?",
cancel=False,
default=True, # True=YES, False=NO, None=Nothing
):
return get_ui().ask(title, message, cancel, default)
def ask_color(default=(0, 0, 0)):
return get_ui().ask_color(default)
def ask_str(msg, title, default=''):
return get_ui().ask_str(msg, title, default)
def kill_progress_monitors():
while len(_progress_monitors) > 0:
p = _progress_monitors.pop()
p.terminate()
def message(title, message="", icon='i'):
return get_ui().message(title, message, icon)
def copy_file(path):
return get_ui().copy_file(path)
def copy_text(text):
return get_ui().copy_text(text)
def test_targetpath(path, cancel=True):
"""Test whether ``path`` is okay to write to
If the directory does not exist, the user is asked whether it should be
created.
Parameters
----------
path : str
Path to test.
cancel : bool
Add a cancel button. If clicked, a KeyboardInterrupt Exception is
raised.
Returns
-------
success : bool
True if path is a valid path to write to, False otherwise.
"""
if not path:
return False
dirname = os.path.abspath(os.path.dirname(path))
if not os.path.exists(dirname):
msg = ("The directory %r does not exist. Should it be created?" % dirname)
answer = ask("Create Directory?", msg, cancel=cancel)
if answer:
os.makedirs(dirname)
elif answer is None: # cancel
err = ("User canceled because the directory %r does not exist"
% dirname)
raise KeyboardInterrupt(err)
return os.path.exists(dirname)
```
#### File: _utils/ui/wx_ui.py
```python
from ..._wxgui import wx, get_app
def ask_saveas(title, message, filetypes, defaultDir, defaultFile):
"""See eelbrain.ui documentation"""
app = get_app()
return app.ask_saveas(title, message, filetypes, defaultDir, defaultFile)
def ask_dir(title="Select Folder", message="Please Pick a Folder", must_exist=True):
app = get_app()
return app.ask_for_dir(title, message, must_exist)
def ask_file(title, message, filetypes, directory, mult):
app = get_app()
return app.ask_for_file(title, message, filetypes, directory, mult)
def ask(title="Overwrite File?",
message="Duplicate filename. Do you want to overwrite?",
cancel=False,
default=True, # True=YES, False=NO, None=Nothing
):
style = wx.YES_NO | wx.ICON_QUESTION
if cancel:
style = style | wx.CANCEL
if default:
style = style | wx.YES_DEFAULT
elif default == False:
style = style | wx.NO_DEFAULT
dialog = wx.MessageDialog(None, message, title, style)
answer = dialog.ShowModal()
if answer == wx.ID_NO:
return False
elif answer == wx.ID_YES:
return True
elif answer == wx.ID_CANCEL:
return None
def ask_color(default=(0, 0, 0)):
dlg = wx.ColourDialog(None)
dlg.GetColourData().SetChooseFull(True)
if dlg.ShowModal() == wx.ID_OK:
data = dlg.GetColourData()
out = data.GetColour().Get()
out = tuple([o / 255. for o in out])
else:
out = False
dlg.Destroy()
return out
def ask_str(message, title, default=''):
app = get_app()
return app.ask_for_string(title, message, default)
def message(title, message="", icon='i'):
style = wx.OK
if icon == 'i':
style = style | wx.ICON_INFORMATION
elif icon == '?':
style = style | wx.ICON_QUESTION
elif icon == '!':
style = style | wx.ICON_EXCLAMATION
elif icon == 'error':
style = style | wx.ICON_ERROR
elif icon is None:
pass
else:
raise ValueError("Invalid icon argument: %r" % icon)
dlg = wx.MessageDialog(None, message, title, style)
dlg.ShowModal()
def copy_file(path):
if wx.TheClipboard.Open():
try:
data_object = wx.FileDataObject()
data_object.AddFile(path)
wx.TheClipboard.SetData(data_object)
except:
wx.TheClipboard.Close()
raise
else:
wx.TheClipboard.Close()
def copy_text(text):
if wx.TheClipboard.Open():
try:
data_object = wx.TextDataObject(text)
wx.TheClipboard.SetData(data_object)
except:
wx.TheClipboard.Close()
raise
else:
wx.TheClipboard.Close()
```
#### File: examples/meg/mne_sample_loader.py
```python
import eelbrain as eel
def load_evts(path):
"""Load events from the mne sample data as dataset
Parameters
----------
path : str
Path to the raw file.
Returns
-------
ds : dataset
Events from the raw file as dataset.
"""
# load the events in the raw file as a dataset
ds = eel.load.fiff.events(path, stim_channel='STI 014')
# get the trigger variable form the dataset for eaier access
trigger = ds['trigger']
# use trigger to add various labels to the dataset
ds['condition'] = eel.Factor(trigger, labels={1:'LA', 2:'RA', 3:'LV', 4:'RV',
5:'smiley', 32:'button'})
ds['side'] = eel.Factor(trigger, labels={1: 'L', 2:'R', 3:'L', 4:'R',
5:'None', 32:'None'})
ds['modality'] = eel.Factor(trigger, labels={1: 'A', 2:'A', 3:'V', 4:'V',
5:'None', 32:'None'})
return ds
if __name__ == '__main__':
# Use the function to load the events and plot data from a specific condition
import os
import mne
datapath = mne.datasets.sample.data_path()
raw_path = os.path.join(datapath, 'MEG', 'sample',
'sample_audvis_filt-0-40_raw.fif')
ds = load_evts(raw_path)
print(eel.table.frequencies('condition', ds=ds))
ds = ds.sub('modality == "A"')
ds = eel.load.fiff.add_epochs(ds, tmin=-0.1, tmax=0.3, baseline=(None, 0),
proj=False, data='mag', reject=2e-12,
name='meg', sysname='neuromag306mag')
p = plot.TopoButterfly('meg', 'side', ds=ds)
p.set_vlim(1e-12)
``` |
{
"source": "jpl126/ai-gym-playground",
"score": 3
} |
#### File: ai-gym-playground/frozen_lake/custom_frozen_lake.py
```python
import numpy as np
from typing import Set, Tuple
class InvalidMoveException(Exception):
pass
MOVES = {
0: 'LEFT',
1: 'DOWN',
2: 'RIGHT',
3: 'UP'
}
class FrozenLakeEnv:
_INT_TO_CHAR = {
0: 'F',
1: 'S',
2: 'H',
3: 'G'
}
def __init__(self, grid_size: Tuple[int, int] = (4, 4),
holes_positions: Tuple[Tuple[int, int]] = (
(1, 1), (1, 3), (2, 3), (3, 0)),
start_position: Tuple[int, int] = (0, 0),
goal_position: Tuple[int, int] = (3, 3)):
self._agent_position = list(start_position)
self._goal_position = list(goal_position)
# grid: 0 - frozen, 1 - start, 2 - hole, 3 - goal
self._grid = np.array([[0] * grid_size[0]] * grid_size[1])
self._grid[start_position] = 1
self._grid[goal_position] = 3
for hole in holes_positions:
self._grid[hole] = 2
def render(self):
"""
Prints frozen lake world. With highlighted agent's location.
:return: None
"""
text_grid = ''
# + 1 is caused by '/n' at the end of each row
state_no = (self._agent_position[0] * (self._grid.shape[0] + 1)
+ self._agent_position[1])
for row in self._grid:
char_row = [self._INT_TO_CHAR[item] for item in row]
text_grid += ''.join(char_row) + '\n'
text_grid = (text_grid[:state_no] + '\033[41m' + text_grid[state_no]
+ '\033[0m' + text_grid[state_no + 1:-1])
print(text_grid)
def step(self, action: int) -> Tuple[int, float, bool, dict]:
"""
Gives environment response to agent's action.
:param action: move direction according to MOVES variable
:return: (
number of new agent's state,
reward signal,
True if episode is finished False otherwise,
info: your info for debugging
"""
done = False
info = {}
reward = 0
nearby_walls = self._get_walls_next_to_agent()
if action not in nearby_walls:
self._set_agent_position(action)
observation = self._get_observation()
if observation == self._get_goal_observation():
done = True
reward = 1
elif self._is_agent_in_hole():
done = True
return observation, reward, done, info
def _get_walls_next_to_agent(self) -> Set[int]:
"""
Gets walls next to the agent at his current position.
Walls description:
0: left wall,
1: down wall,
2: right wall,
3: up wall
:return: set with all walls next to the agent
"""
walls = []
if self._agent_position[0] == 0:
walls.append(3)
if self._agent_position[0] == self._grid.shape[0] - 1:
walls.append(1)
if self._agent_position[1] == 0:
walls.append(0)
if self._agent_position[1] == self._grid.shape[1] - 1:
walls.append(2)
return set(walls)
def _get_goal_observation(self) -> int:
"""
Returns current state (grid cell) as an integer.
:return: int describing current state
"""
observation = (self._goal_position[0] * self._grid.shape[1]
+ self._goal_position[1])
return observation
def _get_observation(self) -> int:
"""
Returns current state (grid cell) as an integer.
:return: int describing current state
"""
observation = (self._agent_position[0] * self._grid.shape[0]
+ self._agent_position[1])
return observation
def _set_agent_position(self, direction: int):
"""
Move agent to new position by one field. Rise an exception when agent
leaves grid world.
:param direction: direction according to MOVES variable
:return: None
"""
if direction in self._get_walls_next_to_agent():
raise InvalidMoveException
if direction == 0:
self._agent_position[1] -= 1
elif direction == 2:
self._agent_position[1] += 1
elif direction == 1:
self._agent_position[0] += 1
elif direction == 3:
self._agent_position[0] -= 1
else:
raise InvalidMoveException
def _is_agent_in_hole(self):
"""
Check if agent felt into a hole
:return: True if agent is in the hole, False otherwise
"""
row_no = self._agent_position[0]
col_no = self._agent_position[1]
if self._grid[row_no, col_no] == 2:
return True
return False
``` |
{
"source": "jpl126/dynamic-programing",
"score": 3
} |
#### File: dynamic_programming/utils/policies.py
```python
from typing import Callable
import numpy as np
def get_random_policy(actions_no: int, *args, **kwargs) -> Callable:
del args, kwargs # unused
def random_policy(state: int) -> int:
del state # unused in random agent
return np.random.randint(actions_no)
return random_policy
``` |
{
"source": "jpl2020/cloud-foundation-fabric",
"score": 2
} |
#### File: modules/net_vpc/test_plan_psa.py
```python
def test_single_range(plan_runner):
"Test single PSA range."
psa_config = '''{
ranges = {
bar = "172.16.100.0/24"
foo = "172.16.101.0/24"
},
routes = null
}'''
_, resources = plan_runner(psa_config=psa_config)
assert len(resources) == 5
for r in resources:
if r['type'] == 'google_compute_network_peering_routes_config':
assert not r['values']['export_custom_routes']
assert not r['values']['import_custom_routes']
def test_routes_export(plan_runner):
"Test routes export."
psa_config = '''{
ranges = {
bar = "172.16.100.0/24"
},
routes = {
export = true
import = false
}
}'''
_, resources = plan_runner(psa_config=psa_config)
assert len(resources) == 4
for r in resources:
if r['type'] == 'google_compute_network_peering_routes_config':
assert r['values']['export_custom_routes']
assert not r['values']['import_custom_routes']
def test_routes_import(plan_runner):
"Test routes import."
psa_config = '''{
ranges = {
bar = "172.16.100.0/24"
},
routes = {
export = false
import = true
}
}'''
_, resources = plan_runner(psa_config=psa_config)
for r in resources:
if r['type'] == 'google_compute_network_peering_routes_config':
assert not r['values']['export_custom_routes']
assert r['values']['import_custom_routes']
def test_routes_export_import(plan_runner):
"Test routes export and import."
psa_config = '''{
ranges = {
bar = "172.16.100.0/24"
},
routes = {
export = true
import = true
}
}'''
_, resources = plan_runner(psa_config=psa_config)
for r in resources:
if r['type'] == 'google_compute_network_peering_routes_config':
assert r['values']['export_custom_routes']
assert r['values']['import_custom_routes']
``` |
{
"source": "jpl98/digital-analog-classifier",
"score": 3
} |
#### File: jpl98/digital-analog-classifier/train.py
```python
from collector import *
def combine_csv():
film_face = pd.read_csv(film_rgb_data_dir)
film_face['Dig'] = 0
dig_face = pd.read_csv(digital_rgb_data_dir)
dig_face['Dig'] = 1
comb = pd.concat(([film_face, dig_face])).astype(int).reset_index(drop=True)
comb.to_csv(combined_rgb_data_dir, index=False)
def convolve(need_to_scrape, is_dig, grid_size):
if need_to_scrape:
scrape(0, is_dig, grid_size, 0.05)
combine_csv()
return 1
size = 6
data = pd.read_csv(combined_rgb_data_dir)
print(data)
data_columns = data.columns
predictors = data[data_columns[data_columns != 'Dig']]
target = data['Dig']
n_cols = (size ** 2) * 3
def build_model(cols):
new_model = Sequential()
new_model.add(Dense(15, activation='relu', input_shape=(cols,)))
for i in range(19):
new_model.add(Dense(15, activation='relu'))
new_model.add(Dense(2, activation='softmax'))
new_model.compile(optimizer=keras.optimizers.Adam(learning_rate=0.001), loss='mse',
metrics=['accuracy'])
return new_model
record = 0.0545
def train(lowest_mse):
x_train, x_test, y_train, y_test = train_test_split(predictors, target, test_size=0.3, random_state=30,
shuffle=True)
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
mse_prev = lowest_mse
count = 0
for i in range(20):
fit = model.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=10, verbose=1)
print(fit)
print("\n Testing:")
mse = model.evaluate(x_test, y_test, verbose=1)[0]
if mse < mse_prev:
count += 1
print("\nRecord low MSE! decreased by", mse_prev - mse, "-", count, "so far.\n")
model.save(model_dir)
mse_prev = mse
train(record)
``` |
{
"source": "jplalor/acl-anthology",
"score": 2
} |
#### File: bin/anthology/utils.py
```python
import itertools as it
import logging
import os
import re
import requests
from lxml import etree
from urllib.parse import urlparse
from xml.sax.saxutils import escape as xml_escape
from typing import Tuple, Optional
from .people import PersonName
from . import data
xml_escape_or_none = lambda t: None if t is None else xml_escape(t)
def is_journal(anthology_id):
return anthology_id[0] in ("J", "Q")
def is_volume_id(anthology_id):
collection_id, volume_id, paper_id = deconstruct_anthology_id(anthology_id)
return paper_id == '0'
def build_anthology_id(collection_id: str, volume_id: str, paper_id: Optional[str] = None) -> str:
"""
Transforms collection id, volume id, and paper id to a width-padded
Anthology ID. e.g., ('P18', '1', '1') -> P18-1001.
"""
if collection_id.startswith('W') or collection_id == 'C69' or (collection_id == 'D19' and int(volume_id) >= 5):
anthology_id = f'{collection_id}-{int(volume_id):02d}'
if paper_id is not None:
anthology_id += f'{int(paper_id):02d}'
else:
anthology_id = f'{collection_id}-{int(volume_id):01d}'
if paper_id is not None:
anthology_id += f'{int(paper_id):03d}'
return anthology_id
def test_url_code(url):
"""
Test a URL, returning the result.
"""
r = requests.head(url, allow_redirects=True)
return r
def test_url(url):
"""
Tests a URL, returning True if the URL exists, and False otherwise.
"""
return test_url_code(url).status_code == requests.codes.ok
def deconstruct_anthology_id(anthology_id: str) -> Tuple[str, str, str]:
"""
Transforms an Anthology ID into its constituent collection id, volume id, and paper id
parts. e.g,
P18-1007 -> ('P18', '1', '7')
W18-6310 -> ('W18', '63', '10')
D19-1001 -> ('D19', '1', '1')
D19-5702 -> ('D19', '57', '2')
Also can deconstruct Anthology volumes:
P18-1 -> ('P18', '1', None)
W18-63 -> ('W18', '63', None)
For Anthology IDs prior to 2020, the volume ID is the first digit after the hyphen, except
for the following situations, where it is the first two digits:
- All collections starting with 'W'
- The collection "C69"
- All collections in "D19" where the first digit is >= 5
"""
collection_id, rest = anthology_id.split('-')
if collection_id.startswith('W') or collection_id == 'C69' or (collection_id == 'D19' and int(rest[0]) >= 5):
if len(rest) == 4:
return (collection_id, str(int(rest[0:2])), str(int(rest[2:])))
else: # Possible Volume only identifier
return (collection_id, str(int(rest)), None)
else:
if len(rest) == 4:
return (collection_id, str(int(rest[0:1])), str(int(rest[1:])))
else: # Possible Volume only identifier
return (collection_id, str(int(rest)), None)
def stringify_children(node):
"""Returns the full content of a node, including tags.
Used for nodes that can have mixed text and HTML elements (like <b> and <i>)."""
return "".join(
chunk
for chunk in it.chain(
(xml_escape_or_none(node.text),),
it.chain(
*(
(
etree.tostring(child, with_tail=False, encoding=str),
xml_escape_or_none(child.tail),
)
for child in node.getchildren()
)
),
(xml_escape_or_none(node.tail),),
)
if chunk
).strip()
def remove_extra_whitespace(text):
return re.sub(" +", " ", text.replace("\n", "").strip())
def infer_url(filename, prefix=data.ANTHOLOGY_PREFIX):
"""If URL is relative, return the full Anthology URL.
"""
if urlparse(filename).netloc:
return filename
return f"{prefix}/{filename}"
def infer_attachment_url(filename, parent_id=None):
if urlparse(filename).netloc:
return filename
# Otherwise, treat it as an internal filename
if parent_id is not None and not filename.startswith(parent_id):
logging.error(
"attachment must begin with paper ID '{}', but is '{}'".format(
parent_id, filename
)
)
return infer_url(filename, data.ATTACHMENT_PREFIX)
def infer_year(collection_id):
"""Infer the year from the collection ID.
Many paper entries do not explicitly contain their year. This function assumes
that the paper's collection identifier follows the format 'xyy', where x is
some letter and yy are the last two digits of the year of publication.
"""
assert (
len(collection_id) == 3
), f"Couldn't infer year: unknown volume ID format '{collection_id}' ({type(collection_id)})"
digits = collection_id[1:]
if int(digits) >= 60:
year = "19{}".format(digits)
else:
year = "20{}".format(digits)
return year
_MONTH_TO_NUM = {
"january": 1,
"february": 2,
"march": 3,
"april": 4,
"may": 5,
"june": 6,
"july": 7,
"august": 8,
"september": 9,
"october": 10,
"november": 11,
"december": 12,
}
def month_str2num(text):
"""Convert a month string to a number, e.g. February -> 2
Returns None if the string doesn't correspond to a month.
Not using Python's datetime here since its behaviour depends on the system
locale."""
return _MONTH_TO_NUM.get(text.lower(), None)
class SeverityTracker(logging.Handler):
def __init__(self, level=logging.NOTSET):
super().__init__(level=level)
self.highest = logging.NOTSET
def emit(self, record):
if record.levelno > self.highest:
self.highest = record.levelno
def clean_whitespace(text, strip='left'):
old_text = text
if text is not None:
text = re.sub(r' +', ' ', text)
if strip == 'left' or strip == 'both':
text = text.lstrip()
if strip == 'right' or strip == 'both':
text = text.rstrip()
return text
def indent(elem, level=0, internal=False):
"""
Enforces canonical indentation: two spaces,
with each tag on a new line, except that 'author', 'editor',
'title', and 'booktitle' tags are placed on a single line.
Adapted from https://stackoverflow.com/a/33956544 .
"""
# tags that have no internal linebreaks (including children)
oneline = elem.tag in ('author', 'editor', 'title', 'booktitle')
elem.text = clean_whitespace(elem.text)
if len(elem): # children
# Set indent of first child for tags with no text
if not oneline and (not elem.text or not elem.text.strip()):
elem.text = '\n' + (level + 1) * ' '
if not elem.tail or not elem.tail.strip():
if level:
elem.tail = '\n' + level * ' '
else:
elem.tail = '\n'
# recurse
for child in elem:
indent(child, level + 1, internal=oneline)
# Clean up the last child
if oneline:
child.tail = clean_whitespace(child.tail, strip='right')
elif (not child.tail or not child.tail.strip()):
child.tail = '\n' + level * ' '
else:
elem.text = clean_whitespace(elem.text, strip='both')
if internal:
elem.tail = clean_whitespace(elem.tail, strip='none')
elif not elem.tail or not elem.tail.strip():
elem.tail = '\n' + level * ' '
def parse_element(xml_element):
attrib = {}
if xml_element is None:
return attrib
for element in xml_element:
# parse value
tag = element.tag.lower()
if tag in ("abstract", "title", "booktitle"):
tag = "xml_{}".format(tag)
value = element
elif tag == "attachment":
value = {
"filename": element.text,
"type": element.get("type", "attachment"),
"url": element.text,
}
elif tag in ("author", "editor"):
id_ = element.attrib.get("id", None)
value = (PersonName.from_element(element), id_)
elif tag == "erratum":
value = {
"value": element.text,
"id": element.get("id"),
"url": element.text
}
elif tag == "revision":
value = {
"value": element.get("href"),
"id": element.get("id"),
"url": element.get("href"),
"explanation": element.text
}
elif tag == "mrf":
value = {"filename": element.text, "src": element.get("src")}
elif tag == "video":
# Treat videos the same way as other attachments
tag = "attachment"
value = {
"filename": element.get("href"),
"type": element.get("tag", "video"),
"url": element.get("href"),
}
elif tag in ("dataset", "software"):
value = {
"filename": element.text,
"type": tag,
"url": element.text,
}
tag = "attachment"
else:
value = element.text
if tag == "url":
# Set the URL (canonical / landing page for Anthology)
value = infer_url(element.text)
# Add a PDF link with, converting relative URLs to canonical ones
attrib['pdf'] = element.text if urlparse(element.text).netloc else data.ANTHOLOGY_PDF.format(element.text)
if tag in data.LIST_ELEMENTS:
try:
attrib[tag].append(value)
except KeyError:
attrib[tag] = [value]
else:
attrib[tag] = value
return attrib
def make_simple_element(tag,
text=None,
attrib=None,
parent=None,
namespaces=None):
"""Convenience function to create an LXML node"""
el = etree.Element(tag, nsmap=namespaces) if parent is None else etree.SubElement(parent, tag)
if text:
el.text = text
if attrib:
for key, value in attrib.items():
el.attrib[key] = value
return el
def make_nested(root, pdf_path: str):
"""
Converts an XML tree root to the nested format (if not already converted).
The original format was:
<volume id="P19">
<paper id="1000"> <!-- new volume -->
The nested format is:
<collection id="P19">
<volume id="1">
<frontmatter>
...
</frontmatter>
<paper id="1">
"""
collection_id = root.attrib['id']
if root.tag == 'collection':
return root
new_root = make_simple_element('collection')
new_root.attrib['id'] = collection_id
new_root.tail = '\n'
volume = None
meta = None
prev_volume_id = None
for paper in root.findall("paper"):
paper_id = paper.attrib['id']
if len(paper_id) != 4:
logging.warning(f'skipping invalid paper ID {paper_id}')
continue
first_volume_digit = int(paper_id[0])
if collection_id.startswith('W') or collection_id == 'C69' or (collection_id == 'D19' and first_volume_digit >= 5):
volume_width = 2
paper_width = 2
else:
volume_width = 1
paper_width = 3
volume_id, paper_id = int(paper_id[0:volume_width]), int(paper_id[volume_width:])
full_volume_id = f'{collection_id}-{volume_id:0{volume_width}d}'
full_paper_id = f'{full_volume_id}{paper_id:0{paper_width}d}'
paper.attrib['id'] = '{}'.format(paper_id)
# new volume
if prev_volume_id is None or prev_volume_id != volume_id:
meta = make_simple_element('meta')
if collection_id == 'C69':
meta.append(make_simple_element('month', text='September'))
meta.append(make_simple_element('year', text='1969'))
meta.append(make_simple_element('address', text='Sånga Säby, Sweden'))
volume = make_simple_element('volume')
volume.append(meta)
volume.attrib['id'] = str(volume_id)
prev_volume_id = volume_id
new_root.append(volume)
# Add volume-level <url> tag if PDF is present
volume_path = os.path.join(pdf_path, f'{full_volume_id}.pdf')
if os.path.exists(volume_path):
url = make_simple_element('url', text=full_volume_id)
print(f"{collection_id}: inserting volume URL: {full_volume_id}")
meta.append(url)
# Transform paper 0 to explicit frontmatter
if paper_id == 0:
paper.tag = 'frontmatter'
del paper.attrib['id']
title = paper.find('title')
if title is not None:
title.tag = 'booktitle'
meta.insert(0, title)
frontmatter_path = os.path.join(pdf_path, f'{full_paper_id}.pdf')
if os.path.exists(frontmatter_path):
url = paper.find('url')
if url is not None:
url.text = f'{full_paper_id}'
else:
url = make_simple_element('url', text=full_paper_id)
paper.append(url)
print(f"{collection_id}: inserting frontmatter URL: {full_paper_id}")
else:
if paper.find('url') is not None:
paper.remove(paper.find('url'))
print(f"{collection_id}: removing missing frontmatter PDF: {full_paper_id}")
# Change authors of frontmatter to editors
authors = paper.findall('author')
if authors is not None:
for author in authors:
author.tag = 'editor'
# Remove empty abstracts (corner case)
abstract = paper.find('abstract')
if abstract is not None:
if abstract.text != None:
print('* WARNING: non-empty abstract for', paper.full_id)
else:
paper.remove(abstract)
# Transfer editor keys (once)
if volume.find('editor') is None:
editors = paper.findall('editor')
if editors is not None:
for editor in editors:
meta.append(editor)
# Transfer the DOI key if it's a volume entry
doi = paper.find('doi')
if doi is not None:
if doi.text.endswith(full_volume_id):
print(f'* Moving DOI entry {doi.text} from frontmatter to metadata')
meta.append(doi)
# Canonicalize URL
url = paper.find('url')
if url is not None:
url.text = re.sub(r'https?://(www.)?aclweb.org/anthology/', '', url.text)
# Remove bibtype and bibkey
for key_name in 'bibtype bibkey'.split():
node = paper.find(key_name)
if node is not None:
paper.remove(node)
# Move to metadata
for key_name in 'booktitle publisher volume address month year ISBN isbn'.split():
# Move the key to the volume if not present in the volume
node_paper = paper.find(key_name)
if node_paper is not None:
node_meta = meta.find(key_name)
# If not found in the volume, move it
if node_meta is None:
node_meta = node_paper
if key_name == 'booktitle':
meta.insert(0, node_paper)
else:
meta.append(node_paper)
# If found in the volume, move only if it's redundant
elif node_paper.tag == node_meta.tag:
paper.remove(node_paper)
# Take volume booktitle from first paper title if it wasn't found in the
# frontmatter paper (some volumes have no front matter)
if collection_id == 'C69' and meta.find('booktitle') is None and paper.find('title') is not None:
meta.insert(0, make_simple_element('booktitle', text=paper.find('title').text))
volume.append(paper)
indent(new_root)
return new_root
``` |
{
"source": "jplalor/flowpm",
"score": 2
} |
#### File: flowpm/flowpm/tfpower.py
```python
import tensorflow as tf
import numpy as np
import flowpm.tfbackground as bkgrd
import flowpm.constants as const
from flowpm.scipy.integrate import simps
def primordial_matter_power(cosmo, k):
r"""Primordial power spectrum
Pk = k^n
Parameters
----------
cosmo: dictionary
Input cosmology dictionary.
k: array_like
Input scale at which to evaluate the PPS
Returns:
pk: array_like
Primordial power spectrum evaluated at requested scales
"""
k = tf.convert_to_tensor(k, dtype=tf.float32)
return k ** cosmo['n_s']
def Eisenstein_Hu(cosmo, k, type="eisenhu_osc"):
"""Computes the Eisenstein & Hu matter transfer function.
Parameters
----------
cosmo: dictionary
Background cosmology
k: array_like
Wave number in h Mpc^{-1}
type: str, optional
Type of transfer function. Either 'eisenhu' or 'eisenhu_osc'
(def: 'eisenhu_osc')
Returns
-------
T: array_like
Value of the transfer function at the requested wave number
Notes
-----
The Eisenstein & Hu transfer functions are computed using the fitting
formulae of :cite:`1998:EisensteinHu`
"""
k = tf.convert_to_tensor(k, dtype=tf.float32)
#############################################
# Quantities computed from 1998:EisensteinHu
# Provides : - k_eq : scale of the particle horizon at equality epoch
# - z_eq : redshift of equality epoch
# - R_eq : ratio of the baryon to photon momentum density
# at z_eq
# - z_d : redshift of drag epoch
# - R_d : ratio of the baryon to photon momentum density
# at z_d
# - sh_d : sound horizon at drag epoch
# - k_silk : Silk damping scale
T_2_7_sqr = (const.tcmb / 2.7) ** 2
h2 = cosmo["h"] ** 2
w_m = cosmo["Omega0_m"] * h2
w_b = cosmo["Omega0_b"] * h2
fb = cosmo["Omega0_b"] / cosmo["Omega0_m"]
fc = (cosmo["Omega0_m"] - cosmo["Omega0_b"]) / cosmo["Omega0_m"]
k_eq = 7.46e-2 * w_m / T_2_7_sqr / cosmo["h"] # Eq. (3) [h/Mpc]
z_eq = 2.50e4 * w_m / (T_2_7_sqr) ** 2 # Eq. (2)
# z drag from Eq. (4)
b1 = 0.313 * tf.math.pow(w_m, -0.419) * (1.0 + 0.607 * tf.math.pow(w_m, 0.674))
b2 = 0.238 * tf.math.pow(w_m, 0.223)
z_d = (
1291.0
* tf.math.pow(w_m, 0.251)
/ (1.0 + 0.659 * tf.math.pow(w_m, 0.828))
* (1.0 + b1 * tf.math.pow(w_b, b2))
)
# Ratio of the baryon to photon momentum density at z_d Eq. (5)
R_d = 31.5 * w_b / (T_2_7_sqr) ** 2 * (1.0e3 / z_d)
# Ratio of the baryon to photon momentum density at z_eq Eq. (5)
R_eq = 31.5 * w_b / (T_2_7_sqr) ** 2 * (1.0e3 / z_eq)
# Sound horizon at drag epoch in h^-1 Mpc Eq. (6)
sh_d = (
2.0
/ (3.0 * k_eq)
* tf.math.sqrt(6.0 / R_eq)
* tf.math.log((np.sqrt(1.0 + R_d) + tf.math.sqrt(R_eq + R_d)) / (1.0 + tf.math.sqrt(R_eq)))
)
# Eq. (7) but in [hMpc^{-1}]
k_silk = (
1.6
* tf.math.pow(w_b, 0.52)
* tf.math.pow(w_m, 0.73)
* (1.0 + tf.math.pow(10.4 * w_m, -0.95))
/ cosmo["h"]
)
#############################################
alpha_gamma = (
1.0
- 0.328 * tf.math.log(431.0 * w_m) * w_b / w_m
+ 0.38 * tf.math.log(22.3 * w_m) * (cosmo["Omega0_b"] / cosmo["Omega0_m"]) ** 2
)
gamma_eff = (
cosmo["Omega0_m"]
* cosmo["h"]
* (alpha_gamma + (1.0 - alpha_gamma) / (1.0 + (0.43 * k * sh_d) ** 4))
)
if type == "eisenhu":
q = k * tf.math.pow(const.tcmb / 2.7, 2) / gamma_eff
# EH98 (29) #
L = tf.math.log(2.0 * tf.math.exp(1.0) + 1.8 * q)
C = 14.2 + 731.0 / (1.0 + 62.5 * q)
res = L / (L + C * q * q)
elif type == "eisenhu_osc":
# Cold dark matter transfer function
# EH98 (11, 12)
a1 = tf.math.pow(46.9 * w_m, 0.670) * (1.0 + tf.math.pow(32.1 * w_m, -0.532))
a2 = tf.math.pow(12.0 * w_m, 0.424) * (1.0 + tf.math.pow(45.0 * w_m, -0.582))
alpha_c = tf.math.pow(a1, -fb) * tf.math.pow(a2, -(fb ** 3))
b1 = 0.944 / (1.0 + tf.math.pow(458.0 * w_m, -0.708))
b2 = tf.math.pow(0.395 * w_m, -0.0266)
beta_c = 1.0 + b1 * (tf.math.pow(fc, b2) - 1.0)
beta_c = 1.0 / beta_c
# EH98 (19). [k] = h/Mpc
def T_tilde(k1, alpha, beta):
# EH98 (10); [q] = 1 BUT [k] = h/Mpc
q = k1 / (13.41 * k_eq)
L = tf.math.log(tf.math.exp(1.0) + 1.8 * beta * q)
C = 14.2 / alpha + 386.0 / (1.0 + 69.9 * tf.math.pow(q, 1.08))
T0 = L / (L + C * q * q)
return T0
# EH98 (17, 18)
f = 1.0 / (1.0 + (k * sh_d / 5.4) ** 4)
Tc = f * T_tilde(k, 1.0, beta_c) + (1.0 - f) * T_tilde(k, alpha_c, beta_c)
# Baryon transfer function
# EH98 (19, 14, 21)
y = (1.0 + z_eq) / (1.0 + z_d)
x = tf.math.sqrt(1.0 + y)
G_EH98 = y * (-6.0 * x + (2.0 + 3.0 * y) * tf.math.log((x + 1.0) / (x - 1.0)))
alpha_b = 2.07 * k_eq * sh_d * tf.math.pow(1.0 + R_d, -0.75) * G_EH98
beta_node = 8.41 * tf.math.pow(w_m, 0.435)
tilde_s = sh_d / tf.math.pow(1.0 + (beta_node / (k * sh_d)) ** 3, 1.0 / 3.0)
beta_b = 0.5 + fb + (3.0 - 2.0 * fb) * tf.math.sqrt((17.2 * w_m) ** 2 + 1.0)
# [tilde_s] = Mpc/h
Tb = (
T_tilde(k, 1.0, 1.0) / (1.0 + (k * sh_d / 5.2) ** 2)
+ alpha_b
/ (1.0 + (beta_b / (k * sh_d)) ** 3)
* tf.math.exp(- tf.math.pow(k / k_silk, 1.4))
) * (tf.math.sin(k * tilde_s )/(k * tilde_s + 1e-9)) # TODO: Replace by sinc when possible
# Total transfer function
res = fb * Tb + fc * Tc
else:
raise NotImplementedError
return res
def linear_matter_power(cosmo, k, a=1.0, transfer_fn=Eisenstein_Hu, **kwargs):
r"""Computes the linear matter power spectrum.
Parameters
----------
k: array_like
Wave number in h Mpc^{-1}
a: array_like, optional
Scale factor (def: 1.0)
transfer_fn: transfer_fn(cosmo, k, **kwargs)
Transfer function
Returns
-------
pk: array_like
Linear matter power spectrum at the specified scale
and scale factor.
"""
k=tf.convert_to_tensor(k,dtype=tf.float32)
a=tf.convert_to_tensor(a,dtype=tf.float32)
g = bkgrd.D1(cosmo, a)
t = transfer_fn(cosmo, k, **kwargs)
pknorm = cosmo["sigma8"] ** 2 / sigmasqr(cosmo, 8.0, transfer_fn, **kwargs)
pk = primordial_matter_power(cosmo, k) * t ** 2 * g ** 2
# Apply normalisation
pk = pk * pknorm
return tf.squeeze(pk)
def sigmasqr(cosmo, R, transfer_fn, kmin=0.0001, kmax=1000.0, ksteps=5, **kwargs):
"""Computes the energy of the fluctuations within a sphere of R h^{-1} Mpc
.. math::
\\sigma^2(R)= \\frac{1}{2 \\pi^2} \\int_0^\\infty \\frac{dk}{k} k^3 P(k,z) W^2(kR)
where
.. math::
W(kR) = \\frac{3j_1(kR)}{kR}
"""
def int_sigma(logk):
k = np.exp(logk)
x = k * R
w = 3.0 * (np.sin(x) - x * np.cos(x)) / (x * x * x)
pk = transfer_fn(cosmo, k, **kwargs) ** 2 * primordial_matter_power(cosmo, k)
return k * (k * w) ** 2 * pk
y = simps(int_sigma, np.log10(kmin), np.log10(kmax), 256)
return 1.0 / (2.0 * np.pi ** 2.0) * y
```
#### File: flowpm/tests/tf_background_test.py
```python
import tensorflow as tf
import numpy as np
from flowpm.tfbackground import dEa,Omega_m_a, E, Gf,Gf2, gf,gf2,D1,D2,D1f,D2f, f1,f2
from numpy.testing import assert_allclose
from flowpm.legacy_background import MatterDominated
cosmo={"w0":-1.0,
"wa":0.0,
"H0":100,
"h":0.6774,
"Omega0_b":0.04860,
"Omega0_c":0.2589,
"Omega0_m":0.3075,
"Omega0_k":0.0,
"Omega0_de":0.6925,
"n_s":0.9667,
"sigma8":0.8159}
def test_E():
""" This function tests the scale factor dependence of the
Hubble parameter.
"""
M_d=MatterDominated(Omega0_m=0.3075)
a = np.logspace(-3, 0)
# Computing reference E value with old code
E_ref = M_d.E(a)
# Computing new E function with tensorflow
E_back = E(cosmo, a)
assert_allclose(E_ref, E_back, rtol=1e-4)
def test_Eprime():
""" Testing Derivative of the scale factor dependent factor E(a)
"""
M_d=MatterDominated(Omega0_m=0.3075)
a = np.logspace(-3, 0)
# Computing reference E' value with old code
E_prim_back=M_d.efunc_prime(a)
# Computing new E' function with tensorflow
E_n = dEa(cosmo, a)
assert_allclose(E_prim_back, E_n, rtol=1e-4)
def test_Omega_m():
""" Testing Matter density at scale factor `a`
"""
M_d=MatterDominated(Omega0_m=0.3075)
a = np.logspace(-3, 0)
# Computing reference Omega_m value with old code
Omega_back=M_d.Om(a)
# Computing new Omega_m' function with tensorflow
Omega_m_n = Omega_m_a(cosmo, a)
assert_allclose(Omega_back, Omega_m_n, rtol=1e-4)
def test_growth_1order():
""" Testing linear growth factor D_1(a)
"""
M_d=MatterDominated(Omega0_m=0.3075)
a =np.logspace(-2, 0.0, 128)
gback = M_d.D1(a)
gtfback =D1(cosmo, a)
assert_allclose(gback, gtfback, rtol=1e-2)
def test_growth_2order():
""" Testing linear growth factor D_2(a)
"""
M_d=MatterDominated(Omega0_m=0.3075)
a = np.logspace(-2, 0.0,128)
g2back = M_d.D2(a)
g2tfback =D2(cosmo, a)
assert_allclose(g2back, g2tfback, rtol=1e-2)
def test_D1_fnorm():
""" Testing D'_1(a)
"""
M_d=MatterDominated(Omega0_m=0.3075)
a =np.logspace(-2, 0.0, 128)
gback = M_d.gp(a)
gtfback =D1f(cosmo,a)
assert_allclose(gback, gtfback, rtol=1e-2)
def test_D2_fnorm():
""" Testing D'_2(a)
"""
M_d=MatterDominated(Omega0_m=0.3075)
a = np.logspace(-2, 0.0,128)
g2back = M_d.gp2(a)
g2tfback =D2f(cosmo,a)
assert_allclose(g2back, g2tfback, rtol=1e-2)
# =============================================================================
def testf1():
M_d=MatterDominated(Omega0_m=0.3075)
a = np.logspace(-2, 0,128)
f1_back=M_d.f1(a)
f1_tf=f1(cosmo, a)
assert_allclose(f1_back, f1_tf, rtol=1e-2)
def testf2():
M_d=MatterDominated(Omega0_m=0.3075)
a = np.logspace(-2, 0,128)
f2_back=M_d.f2(a)
f2_tf=f2(cosmo, a)
assert_allclose(f2_back, f2_tf, rtol=1e-2)
def testGf():
M_d=MatterDominated(Omega0_m=0.3075)
a = np.logspace(-2, 0,128)
Gf_back=M_d.Gf(a)
Gf_tf=Gf(cosmo, a)
assert_allclose(Gf_back, Gf_tf, rtol=1e-2)
def testGf2():
M_d=MatterDominated(Omega0_m=0.3075)
a = np.logspace(-2, 0,128)
Gf2_back=M_d.Gf2(a)
Gf2_tf=Gf2(cosmo, a)
assert_allclose(Gf2_back, Gf2_tf, rtol=1e-2)
def testgf():
M_d=MatterDominated(Omega0_m=0.3075)
a = np.logspace(-2, 0,128)
gf_back=M_d.gf(a)
gf_tf=gf(cosmo, a)
assert_allclose(gf_back, gf_tf, rtol=1e-2)
def testgf2():
M_d=MatterDominated(Omega0_m=0.3075)
a = np.logspace(-2, 0,128)
gf2_back=M_d.gf2(a)
gf2_tf=gf2(cosmo, a)
assert_allclose(gf2_back, gf2_tf, rtol=1e-2)
``` |
{
"source": "jplalor/transformers-android-demo",
"score": 2
} |
#### File: transformers-android-demo/model_converter/model.py
```python
import torch.nn as nn
from torch.nn import CrossEntropyLoss, MSELoss
import tensorflow as tf
from transformers.modeling_electra import (
ElectraModel,
ElectraPreTrainedModel,
)
from transformers.modeling_tf_electra import (
TFElectraMainLayer,
TFElectraPreTrainedModel
)
class ElectraForSequenceClassification(ElectraPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.electra = ElectraModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
self.softmax = nn.Softmax(dim=-1)
self.init_weights()
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
discriminator_hidden_states = self.electra(
input_ids, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds
)
pooled_output = discriminator_hidden_states[0][:, 0]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
outputs = (logits,)
softmax_logits = self.softmax(logits)
outputs = (softmax_logits,) + outputs
return outputs # (softmax_logits, logits)
class TFElectraForSequenceClassification(TFElectraPreTrainedModel):
def __init__(self, config, **kwargs):
super().__init__(config, **kwargs)
self.electra = TFElectraMainLayer(config, name="electra")
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
self.classifier = tf.keras.layers.Dense(config.num_labels, name="classifier")
self.softmax = tf.keras.layers.Softmax(axis=-1)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
training=False,
):
discriminator_hidden_states = self.electra(
input_ids, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds, training=training
)
pooled_output = discriminator_hidden_states[0][:, 0]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
outputs = (logits,)
softmax_logits = self.softmax(logits)
outputs = (softmax_logits,) + outputs
return outputs # (softmax_logits, logits)
``` |
{
"source": "jplam123/ForRency",
"score": 3
} |
#### File: ForRency/Forecasting/scrape.py
```python
import datetime
import os
import tweepy as tweepy
from dotenv import load_dotenv
import pandas as pd
import senti_anal
def output_csv(data, hashtag, page_num):
if page_num % 100 == 0:
date_label = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
df = pd.DataFrame(data)
senti_anal.textblob_anal(df)
print("....... outputting to csv", page_num, len(data))
df.to_csv(f"{hashtag}_{page_num}_{date_label}.csv", index=False)
print(" ..... resetting df")
data = []
return data
def mine_data(data, page):
for tweet in page:
mined = {
"tweet_id": tweet.id,
"name": tweet.user.name,
"retweet_count": tweet.retweet_count,
"text": tweet.full_text,
"created_at": tweet.created_at,
"hashtags": tweet.entities["hashtags"],
"status_count": tweet.user.statuses_count,
"followers_count": tweet.user.followers_count,
"location": tweet.place,
}
try:
mined["retweet_text"] = tweet.retweeted_status.full_text
except:
mined["retweet_text"] = "No RT Text"
data.append(mined)
class TweetScraper:
# Constants
# API RELATED CONSTANTS
load_dotenv()
API_KEY = os.environ['API_KEY']
API_SECRET_KEY = os.environ.get('API_SECRET_KEY')
# API authentication for tweepy
auth = tweepy.OAuthHandler(API_KEY, API_SECRET_KEY)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
# Defining the search terms as variables
BTC_search_words = ["BTC", "bitcoin", "Bitcoin", "btc"]
def hashtag_looper(self):
for btc_word in self.BTC_search_words:
self.hashtag_scraper(self, btc_word)
def hashtag_scraper(self, hashtag: str):
last_tweet_id = False
page_num = 1
data = []
query = f"#{hashtag}"
print(" ===== ", query)
for page in tweepy.Cursor(
self.api.search,
q=query,
lang="en",
tweet_mode="extended",
count=200,
).pages(400):
print(f"page: {page_num}")
page_num += 1
mine_data(data, page)
data = output_csv(data, hashtag, page_num)
date_label = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
df = pd.DataFrame(data)
senti_anal.textblob_anal(df)
df.to_csv(f"{hashtag}_{page_num/100}_{date_label}.csv", index=False)
if __name__ == '__main__':
t = TweetScraper
t.hashtag_looper(t)
```
#### File: ForRency/Forecasting/senti_anal.py
```python
from textblob import TextBlob
from pandas import DataFrame
def textblob_anal(df):
print("Hello there")
blobs = [TextBlob(content) for content in df['text']]
df['tb_Pol'] = [b.sentiment.polarity for b in blobs]
df['tb_Subj'] = [b.sentiment.subjectivity for b in blobs]
``` |
{
"source": "jplamowska/python-tdd",
"score": 4
} |
#### File: jplamowska/python-tdd/TDD.py
```python
def f1(a,b=0):
return a*a + b
def f2(a):
if len(a) >0:
return a[0]
else:
return "BUUUUM"
def f3(a):
if a==1:
return "jeden"
elif a==2:
return "dwa"
elif a==3:
return "trzy"
else:
return "other"
def f4(a,b=""):
if len(b)>0:
return a + " ma kota " + "i " + b
else:
return a+ " ma kota"
def f5(a, step=1):
return range(a)[ : : step]
def f6(a, b):
return b * a
def f7(a):
if (isinstance(a,str) and a[-1] =="."):
return "zdanie"
elif(isinstance(a,str) and a[0] == "<" and a[-1] == ">"):
return "tag poczatkowy"
elif(isinstance(a, str)):
return "slowo"
elif(isinstance(a, int) and a < 0):
return "liczba_ze_znakiem"
elif(isinstance(a, int) and a < 10):
return "cyfra"
elif(isinstance(a, int)):
return "liczba"
```
#### File: jplamowska/python-tdd/TDDtest.py
```python
import unittest, TDD
class TESTStringMethods(unittest.TestCase):
def test_f1(self):
w = TDD.f1(0)
self.assertEqual(w,0)
def test_f1_refactor(self):
w= TDD.f1(1)
self.assertEqual(w,1)
def test_f1_rf2(self):
w=TDD.f1(2)
self.assertEqual(w,4)
def test_f1_rf3(self):
w=TDD.f1(2,1)
self.assertEqual(w,5)
def test_f1_rf4(self):
w=TDD.f1(2,3)
self.assertEqual(w,7)
def test_f2(self):
w=TDD.f2("ala")
self.assertEqual(w,"a")
def test_f2_innyprzyklad(self):
w=TDD.f2([1,2,3])
self.assertEqual(w,1)
def test_f2_innyprzyklad2(self):
w=TDD.f2([])
self.assertEqual(w,"BUUUUM")
def test_f3_jaka_to_liczba(self):
w=TDD.f3(1)
self.assertEqual(w,"jeden")
def test_f3_jaka_to_liczba2(self):
w=TDD.f3(2)
self.assertEqual(w,"dwa")
def test_f3_jaka_to_liczba3(self):
w=TDD.f3(3)
self.assertEqual(w,"trzy")
def test_f3_jaka_to_liczba_other(self):
w=TDD.f3(999)
self.assertEqual(w, "other")
def test_f4_kto_co_ma(self):
w=TDD.f4("kot", "mysz")
self.assertEqual(w, "kot ma kota i mysz")
def test_f4_kto_co_ma1(self):
w=TDD.f4("kot", "")
self.assertEqual(w, "kot ma kota")
def test_f5(self):
w=TDD.f5(0)
self.assertEqual(w, [])
def test_f5_jeden(self):
w=TDD.f5(1)
self.assertEqual(w, [0])
def test_f5_dwa(self):
w=TDD.f5(2)
self.assertEqual(w, [0, 1])
def test_f5_siedem(self):
w=TDD.f5(7)
self.assertEqual(w, [0, 1, 2, 3, 4, 5, 6])
def test_f5_siedem_dwa(self):
w=TDD.f5(7, 2)
self.assertEqual(w, [0, 2, 4, 6])
def test_f5_siedemnascie_dwa(self):
w=TDD.f5(17, 2)
self.assertEqual(w, [0, 2, 4, 6, 8, 10, 12, 14, 16])
def test_f5_siedemnascie_piec(self):
w=TDD.f5(17, 5)
self.assertEqual(w, [0, 5, 10, 15])
def test_f6_jeden(self):
w = TDD.f6(1, '*')
self.assertEqual(w, '*')
def test_f6_siedem(self):
w = TDD.f6(7, '*')
self.assertEqual(w, '*******')
def test_f7_slowo(self):
w = TDD.f7('ala')
self.assertEqual(w, 'slowo')
def test_f7_cyfra(self):
w = TDD.f7(1)
self.assertEqual(w, 'cyfra')
def test_f7_liczba(self):
w = TDD.f7(11111)
self.assertEqual(w, 'liczba')
def test_f7_liczba_ze_znakiem(self):
w = TDD.f7(-11111)
self.assertEqual(w, 'liczba_ze_znakiem')
def test_f7_zdanie(self):
w=TDD.f7("Ala ma kota.")
self.assertEqual(w, 'zdanie')
def test_f7_tag(self):
w=TDD.f7("<taaag>")
self.assertEqual(w, 'tag poczatkowy')
``` |
{
"source": "jplapp/associative_clustering",
"score": 3
} |
#### File: semisup/tools/coil20.py
```python
from __future__ import division
from __future__ import print_function
import cv2
import numpy as np
from tools import data_dirs
DATADIR = data_dirs.coil20
NUM_LABELS = 20
IMAGE_SHAPE = [32, 32, 1]
def get_data(name):
"""Utility for convenient data loading."""
images, labels = extract_images()
images = np.reshape(images, list(images.shape) + [1, ])
rng = np.random.RandomState(seed=47)
# inds = rng.choice(len(images), int(len(images) / 5 * 4))
# print(inds)
if name == 'train' or name == 'unlabeled':
return images, labels
elif name == 'test':
return images, labels
def extract_images():
"""Extract the images into a 4D uint8 numpy array [index, y, x, depth]."""
from os import listdir
from os.path import isfile, join
files = [f for f in listdir(DATADIR) if isfile(join(DATADIR, f))]
print(DATADIR)
images = []
labels = []
for file in files:
img = cv2.imread(join(DATADIR, file), 0)
img = cv2.resize(img, (32, 32))
images.append(img)
img_id = file[3:5]
if img_id[-1] == '_':
img_id = img_id[0]
labels.append(int(img_id) - 1)
return np.array(images), np.array(labels)
# Dataset specific augmentation parameters.
augmentation_params = dict()
augmentation_params['max_crop_percentage'] = 0.2
augmentation_params['brightness_max_delta'] = 0.5
augmentation_params['noise_std'] = 0.05
augmentation_params['flip'] = True
augmentation_params['max_rotate_angle'] = 10
``` |
{
"source": "jp-larose/aiogremlin",
"score": 2
} |
#### File: aiogremlin/driver/resultset.py
```python
import asyncio
import functools
from autologging import logged, traced
import logging
from aiogremlin import exception
def error_handler(fn):
@functools.wraps(fn)
async def wrapper(self):
msg = await fn(self)
if msg:
if msg.status_code not in [200, 206]:
self.close()
raise exception.GremlinServerError(
msg.status_code,
"{0}: {1}".format(msg.status_code, msg.message))
if isinstance(msg.data, list) and msg.data:
msg = msg.data[0]
else:
msg = msg.data
return msg
return wrapper
@traced
@logged
class ResultSet:
"""Gremlin Server response implementated as an async iterator."""
def __init__(self, request_id, timeout, loop=None):
self._response_queue = asyncio.Queue(loop=loop)
self._request_id = request_id
self._loop = loop
self._timeout = timeout
self._done = asyncio.Event(loop=self._loop)
self._aggregate_to = None
@property
def request_id(self):
return self._request_id
@property
def stream(self):
return self._response_queue
def queue_result(self, result):
if result is None:
self.close()
self._response_queue.put_nowait(result)
@property
def done(self):
"""
Readonly property.
:returns: `asyncio.Event` object
"""
return self._done
@property
def aggregate_to(self):
return self._aggregate_to
@aggregate_to.setter
def aggregate_to(self, val):
self._aggregate_to = val
def __aiter__(self):
return self
async def __anext__(self):
msg = await self.one()
if not msg:
raise StopAsyncIteration
return msg
def close(self):
self.done.set()
self._loop = None
@error_handler
async def one(self):
"""Get a single message from the response stream"""
if not self._response_queue.empty():
self.__log.debug("Response queue not empty")
msg = self._response_queue.get_nowait()
elif self.done.is_set():
self.__log.debug("'done' condition is set")
msg = None
else:
self.__log.debug(f"Trying to get from response queue. {self._response_queue=} {self._timeout=} {self._loop=}")
try:
msg = await asyncio.wait_for(self._response_queue.get(),
timeout=self._timeout,
loop=self._loop)
self._response_queue.task_done()
except asyncio.TimeoutError:
self.close()
raise exception.ResponseTimeoutError('Response timed out')
self.__log.debug(f"{msg=} {type(msg)=}")
return msg
async def all(self):
results = []
async for result in self:
results.append(result)
return results
```
#### File: aiogremlin/remote/driver_remote_connection.py
```python
import asyncio
from urllib.parse import urlparse
from aiogremlin.driver.cluster import Cluster
from gremlin_python.driver import serializer
from aiogremlin.remote.driver_remote_side_effects import (
AsyncRemoteTraversalSideEffects)
from gremlin_python.driver.remote_connection import RemoteTraversal
from autologging import logged, traced
__author__ = '<NAME> (<EMAIL>)'
@logged
@traced
class DriverRemoteConnection:
"""
Remote connection to a Gremlin Server. Do not instantiate directly,
instead use :py:meth:`DriverRemoteConnection.open` or
:py:meth:`DriverRemoteConnection.using`
:param aiogremlin.driver.client.Client client:
:param asyncio.BaseEventLoop loop:
:param aiogremlin.driver.cluster.Cluster cluster:
"""
def __init__(self, *, client, loop=None, cluster=None):
self._client = client
self._loop = loop
self._cluster = cluster
@property
def client(self):
return self._client
@property
def config(self):
return self._cluster.config
@classmethod
async def using(cls, cluster, aliases=None):
"""
Create a :py:class:`DriverRemoteConnection` using a specific
:py:class:`Cluster<aiogremlin.driver.cluster.Cluster>`
:param aiogremlin.driver.cluster.Cluster cluster:
:param dict aliases: Optional mapping for aliases. Default is `None`.
Also accepts `str` argument which will be assigned to `g`
"""
client = await cluster.connect(aliases=aliases)
loop = cluster._loop
return cls(client=client, loop=loop)
@classmethod
async def open(cls, *, url=None, aliases=None, loop=None,
graphson_reader=None, graphson_writer=None, **config):
"""
:param str url: Optional url for host Gremlin Server
:param dict aliases: Optional mapping for aliases. Default is `None`.
Also accepts `str` argument which will be assigned to `g`
:param asyncio.BaseEventLoop loop:
:param graphson_reader: Custom graphson_reader
:param graphson_writer: Custom graphson_writer
:param config: Optional cluster configuration passed as kwargs or `dict`
"""
if url:
parsed_url = urlparse(url)
config.update({
'scheme': parsed_url.scheme,
'hosts': [parsed_url.hostname],
'port': parsed_url.port})
if isinstance(aliases, str):
aliases = {'g': aliases}
# if not loop:
# loop = asyncio.get_event_loop()
message_serializer = serializer.GraphSONMessageSerializer(
reader=graphson_reader,
writer=graphson_writer)
config.update({'message_serializer': message_serializer})
cluster = await Cluster.open(loop=loop, aliases=aliases, **config)
client = await cluster.connect()
return cls(client=client, loop=loop, cluster=cluster)
async def close(self):
"""
Close underlying cluster if applicable. If created with
:py:meth:`DriverRemoteConnection.using`, cluster is NOT closed.
"""
if self._cluster:
await self._cluster.close()
async def submit(self, bytecode):
"""Submit bytecode to the Gremlin Server"""
result_set = await self._client.submit(bytecode)
side_effects = AsyncRemoteTraversalSideEffects(result_set.request_id,
self._client)
return RemoteTraversal(result_set, side_effects)
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
await self.close()
self._client = None
self._cluster = None
```
#### File: aiogremlin/structure/graph.py
```python
from aiogremlin.process import graph_traversal
from aiogremlin.process.traversal import AsyncTraversalStrategies
from gremlin_python.structure import graph
from autologging import logged, traced
@logged
@traced
class Graph(graph.Graph):
def __init__(self):
if self.__class__ not in AsyncTraversalStrategies.global_cache:
AsyncTraversalStrategies.global_cache[
self.__class__] = AsyncTraversalStrategies()
def traversal(self, traversal_source_class=None):
if not traversal_source_class:
traversal_source_class = graph_traversal.AsyncGraphTraversalSource
return traversal_source_class(
self, AsyncTraversalStrategies.global_cache[self.__class__])
```
#### File: aiogremlin/tests/test_aliases.py
```python
import json
import uuid
import pytest
from aiogremlin import driver
from gremlin_python.driver import request, serializer
@pytest.mark.asyncio
async def test_gremlin_query(cluster):
alias = { 'g': 'g' }
cluster = await driver.Cluster.open(aliases=alias, hosts=['gremlin-server'])
#client = await cluster.connect()
#assert client.aliases == alias
# resp = await client.submit("1 + 1")
#async for msg in resp:
# print(msg)
await cluster.close()
@pytest.mark.asyncio
async def test_alias_serialization():
alias = { 'g': 'g' }
message = '1 + 1'
cluster = await driver.Cluster.open(aliases=alias, hosts=['gremlin-server'])
client = await cluster.connect()
# This is the code client/conn uses on submit
message = request.RequestMessage(
processor='', op='eval',
args={'gremlin': message,
'aliases': client._aliases})
request_id = str(uuid.uuid4())
message = serializer.GraphSONMessageSerializer().serialize_message(
request_id, message)
message = message.decode('utf-8')[34:]
aliases = json.loads(message)['args']['aliases']
assert aliases == alias
await cluster.close()
``` |
{
"source": "jp-larose/hobgoblin",
"score": 2
} |
#### File: hobgoblin/fileio/graphson.py
```python
import collections
try:
import ujson as json
except ImportError:
import json
from gremlin_python.structure.io import graphsonV2d0 as graphson
from hobgoblin.element import Vertex, Edge, VertexProperty
from hobgoblin.manager import ListVertexPropertyManager
writer = graphson.GraphSONWriter()
AdjList = collections.namedtuple("AdjList", "vertex inE outE")
vp_id = 10
def dump(fpath, *adj_lists, mode="w"):
"""Convert Hobgoblin elements to GraphSON"""
with open(fpath, mode) as f:
for adj_list in adj_lists:
dumped = dumps(adj_list)
f.write(dumped + '\n')
def dumps(adj_list):
"""Convert Hobgoblin elements to GraphSON"""
vertex = _prep_vertex(adj_list.vertex)
for inE in adj_list.inE:
prepped = _prep_edge(inE, "inV")
label = inE.__label__
vertex["inE"].setdefault(label, [])
vertex["inE"][label].append(prepped)
for outE in adj_list.outE:
prepped = _prep_edge(outE, "outV")
label = outE.__label__
vertex["outE"].setdefault(label, [])
vertex["outE"][label].append(prepped)
return json.dumps(vertex)
def _prep_edge(e, t):
if t == 'inV':
other = "outV"
other_id = e.source.id
elif t == 'outV':
other = "inV"
other_id = e.target.id
else:
raise RuntimeError('Invalid edge type')
edge = {
"id": {
"@type": "g:Int32",
"@value": e.id,
},
other: {
"@type": "g:Int32",
"@value": other_id,
},
"properties": {}
}
for db_name, (ogm_name, _) in e.__mapping__.db_properties.items():
edge["properties"][db_name] = writer.toDict(getattr(e, ogm_name))
return edge
def _prep_vertex(v):
global vp_id
mapping = v.__mapping__
properties = v.__properties__
vertex = {
"id": {
"@type": "g:Int32",
"@value": v.id
},
"label": v.__label__,
"properties": {},
"outE": {},
"inE": {}
}
for db_name, (ogm_name, _) in mapping.db_properties.items():
prop = properties[ogm_name]
vertex["properties"].setdefault(db_name, [])
if isinstance(prop, VertexProperty):
prop = getattr(v, ogm_name)
if isinstance(prop, ListVertexPropertyManager):
for p in prop:
value = p.value
vp = _prep_vp(p, value, v, db_name)
vp_id += 1
vertex["properties"][db_name].append(vp)
continue
else:
value = prop.value
else:
value = getattr(v, ogm_name)
vp = _prep_vp(prop, value, v, db_name)
vp_id += 1
vertex["properties"][db_name].append(vp)
return vertex
def _prep_vp(prop, value, v, db_name):
vp = {
"id": {
"@type": "g:Int64",
"@value": vp_id
},
"value": writer.toDict(value),
"properties": {}
}
if isinstance(prop, VertexProperty):
for db_name, (ogm_name, _) in prop.__mapping__.db_properties.items():
vp["properties"][db_name] = writer.toDict(getattr(prop, ogm_name))
return vp
def _dump_edge(e):
pass
```
#### File: hobgoblin/tests/test_pool.py
```python
import asyncio
import pytest
@pytest.mark.asyncio
async def test_pool_init(connection_pool):
await connection_pool.init_pool()
assert len(connection_pool._available) == 1
await connection_pool.close()
@pytest.mark.asyncio
async def test_acquire_release(connection_pool):
conn = await connection_pool.acquire()
assert not len(connection_pool._available)
assert len(connection_pool._acquired) == 1
assert conn.times_acquired == 1
connection_pool.release(conn)
assert len(connection_pool._available) == 1
assert not len(connection_pool._acquired)
assert not conn.times_acquired
await connection_pool.close()
@pytest.mark.asyncio
async def test_acquire_multiple(connection_pool):
conn1 = await connection_pool.acquire()
conn2 = await connection_pool.acquire()
assert conn1 is not conn2
assert len(connection_pool._acquired) == 2
await connection_pool.close()
@pytest.mark.asyncio
async def test_share(connection_pool):
connection_pool._max_conns = 1
conn1 = await connection_pool.acquire()
conn2 = await connection_pool.acquire()
assert conn1 is conn2
assert conn1.times_acquired == 2
await connection_pool.close()
@pytest.mark.asyncio
async def test_acquire_multiple_and_share(connection_pool):
connection_pool._max_conns = 2
connection_pool._max_times_acquired = 2
conn1 = await connection_pool.acquire()
conn2 = await connection_pool.acquire()
assert conn1 is not conn2
conn3 = await connection_pool.acquire()
conn4 = await connection_pool.acquire()
assert conn3 is not conn4
assert conn3 is conn1
assert conn4 is conn2
await connection_pool.close()
@pytest.mark.asyncio
async def test_max_acquired(connection_pool):
connection_pool._max_conns = 2
connection_pool._max_times_acquired = 2
conn1 = await connection_pool.acquire()
conn2 = await connection_pool.acquire()
conn3 = await connection_pool.acquire()
conn4 = await connection_pool.acquire()
with pytest.raises(asyncio.TimeoutError):
await asyncio.wait_for(connection_pool.acquire(), timeout=0.1)
await connection_pool.close()
@pytest.mark.asyncio
async def test_release_notify(connection_pool):
connection_pool._max_conns = 2
connection_pool._max_times_acquired = 2
conn1 = await connection_pool.acquire()
conn2 = await connection_pool.acquire()
conn3 = await connection_pool.acquire()
conn4 = await connection_pool.acquire()
async def release(conn):
conn.release()
results = await asyncio.gather(
*[connection_pool.acquire(), release(conn4)])
conn4 = results[0]
assert conn4 is conn2
await connection_pool.close()
```
#### File: hobgoblin/tests/test_properties.py
```python
import pytest
from gremlin_python.statics import long
from hobgoblin import element, exception, manager, properties
def test_set_change_property(person, lives_in):
# vertex
assert not person.name
person.name = 'leif'
assert person.name == 'leif'
person.name = 'leifur'
assert person.name == 'leifur'
# edge
assert not lives_in.notes
lives_in.notes = 'notable'
assert lives_in.notes == 'notable'
lives_in.notes = 'more notable'
assert lives_in.notes == 'more notable'
def test_property_default(knows):
assert knows.notes == 'N/A'
knows.notes = 'notable'
assert knows.notes == 'notable'
def test_false_bool_default(place):
assert place.incorporated.value is False
def test_validation(person):
person.age = 10
with pytest.raises(Exception):
person.age = 'hello'
def test_setattr_validation(person):
setattr(person, 'age', 10)
assert person.age == 10
with pytest.raises(Exception):
setattr(person, 'age', 'hello')
def test_set_id_long(person):
person.id = 1
assert isinstance(person.id, long)
def test_id_class_attr_throws(person_class):
with pytest.raises(exception.ElementError):
person_class.id
# Vertex properties
def test_set_change_vertex_property(person):
assert not person.birthplace
person.birthplace = 'Iowa City'
assert isinstance(person.birthplace, element.VertexProperty)
assert person.birthplace.value == 'Iowa City'
person.birthplace = 'U of I Hospital'
assert person.birthplace.value == 'U of I Hospital'
def test_vertex_property_default():
"""Makes sure that a brand new VertexProperty (i.e., with no value set) is
still representable. Addresses issue #52.
"""
vp = element.VertexProperty(int)
assert repr(vp) == "<VertexProperty(type=0, value=None)"
def test_validate_vertex_prop(person):
assert not person.birthplace
person.birthplace = 1
assert person.birthplace.value == '1'
def test_set_change_list_card_vertex_property(person):
assert not person.nicknames
person.nicknames = 'sly'
assert isinstance(person.nicknames, list)
assert isinstance(person.nicknames, manager.ListVertexPropertyManager)
assert isinstance(person.nicknames[0], element.VertexProperty)
assert person.nicknames[0].value == 'sly'
assert person.nicknames('sly') == person.nicknames[0]
person.nicknames = set(['sly', 'guy'])
assert isinstance(person.nicknames, list)
assert person.nicknames('sly').value == 'sly'
assert person.nicknames('guy').value == 'guy'
person.nicknames = ('sly', 'big', 'guy')
assert isinstance(person.nicknames, list)
assert [v.value for v in person.nicknames] == ['sly', 'big', 'guy']
person.nicknames = ['sly', 'big', 'guy', 'guy']
assert isinstance(person.nicknames, list)
assert len(person.nicknames('guy')) == 2
assert [v.value for v in person.nicknames] == ['sly', 'big', 'guy', 'guy']
person.nicknames.append(1)
assert person.nicknames('1').value == '1'
def test_list_card_vertex_property_validation(person):
person.nicknames = [1, 1.5, 2]
assert [v.value for v in person.nicknames] == ['1', '1.5', '2']
def test_set_change_set_card_vertex_property(place):
assert not place.important_numbers
place.important_numbers = 1
assert isinstance(place.important_numbers, set)
assert isinstance(place.important_numbers,
manager.SetVertexPropertyManager)
number_one, = place.important_numbers
assert isinstance(number_one, element.VertexProperty)
assert number_one.value == 1
assert place.important_numbers(1) == number_one
place.important_numbers = [1, 2]
assert isinstance(place.important_numbers, set)
assert {v.value for v in place.important_numbers} == set([1, 2])
place.important_numbers.add(3)
assert {v.value for v in place.important_numbers} == set([1, 2, 3])
place.important_numbers = (1, 2, 3, 4)
assert isinstance(place.important_numbers, set)
assert {v.value for v in place.important_numbers} == set([1, 2, 3, 4])
place.important_numbers = set([1, 2, 3])
assert isinstance(place.important_numbers, set)
assert {v.value for v in place.important_numbers} == set([1, 2, 3])
with pytest.raises(exception.ValidationError):
place.important_numbers.add('dude')
def test_set_card_union(place):
place.important_numbers = set([1, 2, 3])
place.important_numbers = place.important_numbers.union({3, 4, 5})
def test_set_card_64bit_integer(place):
place.important_numbers = set([long(1), long(2), long(3)])
assert all(isinstance(i.value, long) for i in place.important_numbers)
def test_set_card_validation_vertex_property(place):
with pytest.raises(exception.ValidationError):
place.important_numbers = set(['hello', 2, 3])
def test_cant_set_vertex_prop_on_edge():
with pytest.raises(exception.MappingError):
class MyEdge(element.Edge):
vert_prop = element.VertexProperty(properties.String)
def test_meta_property_set_update(place):
assert not place.historical_name
place.historical_name = ['hispania', 'al-andalus']
place.historical_name('hispania').notes = 'roman rule'
assert place.historical_name('hispania').notes == 'roman rule'
place.historical_name('hispania').year = 300
assert place.historical_name('hispania').year == 300
place.historical_name('al-andalus').notes = 'muslim rule'
assert place.historical_name('al-andalus').notes == 'muslim rule'
place.historical_name('al-andalus').year = 700
assert place.historical_name('al-andalus').year == 700
def test_meta_property_validation(place):
assert not place.historical_name
place.historical_name = ['spain']
with pytest.raises(exception.ValidationError):
place.historical_name('spain').year = 'hello'
class TestString:
def test_validation(self, string):
assert string.validate(1) == '1'
def test_to_db(self, string):
assert string.to_db('hello') == 'hello'
def test_to_ogm(self, string):
assert string.to_ogm('hello') == 'hello'
def test_initval_to_db(self, string_class):
string = string_class('hello')
assert string.to_db() == 'hello'
class TestInteger:
def test_validation(self, integer):
assert integer.validate('1') == 1
with pytest.raises(Exception):
integer.validate('hello')
def test_to_db(self, integer):
assert integer.to_db(1) == 1
def test_to_ogm(self, integer):
assert integer.to_db(1) == 1
def test_initval_to_db(self, integer_class):
integer = integer_class(1)
assert integer.to_db() == 1
class TestFloat:
def test_validation(self, flt):
assert flt.validate(1.2) == 1.2
with pytest.raises(Exception):
flt.validate('hello')
def test_to_db(self, flt):
assert flt.to_db(1.2) == 1.2
def test_to_ogm(self, flt):
assert flt.to_db(1.2) == 1.2
def test_initval_to_db(self, flt_class):
flt = flt_class(1.2)
assert flt.to_db() == 1.2
class TestBoolean:
def test_validation_true(self, boolean):
assert boolean.validate(True)
def test_validation_false(self, boolean):
assert not boolean.validate(False)
def test_to_db_true(self, boolean):
assert boolean.to_db(True)
def test_to_db_false(self, boolean):
assert not boolean.to_db(False)
def test_to_ogm_true(self, boolean):
assert boolean.to_ogm(True)
def test_to_ogm_false(self, boolean):
assert not boolean.to_ogm(False)
def test_initval_to_db_true(self, boolean_class):
boolean = boolean_class(True)
assert boolean.to_db()
def test_initval_to_db_true(self, boolean_class):
boolean = boolean_class(False)
assert not boolean.to_db()
``` |
{
"source": "jplasser/cloob",
"score": 2
} |
#### File: src/training/methods.py
```python
import torch
import torch.nn as nn
def infoLOOB_loss(x, y, i, inv_tau):
tau = 1 / inv_tau
k = x @ y.T / tau
positives = -torch.mean(torch.sum(k * i, dim=1))
# For logsumexp the zero entries must be equal to a very large negative number
large_neg = -1000.0
arg_lse = k * torch.logical_not(i) + i * large_neg
negatives = torch.mean(torch.logsumexp(arg_lse, dim=1))
return tau * (positives + negatives)
def cloob(image_features, text_features, inv_tau, hopfield_layer):
p_xx, p_yy, p_xy, p_yx = hopfield_retrieval(image_features, text_features, hopfield_layer)
identity = torch.eye(p_xx.shape[0]) > 0.5
i = identity.to(p_xx.device)
loss_img = infoLOOB_loss(p_xx, p_xy, i, inv_tau=inv_tau)
loss_txt = infoLOOB_loss(p_yy, p_yx, i, inv_tau=inv_tau)
return loss_img + loss_txt
def clip(image_features, text_features, inv_tau, loss_fct_img, loss_fct_txt, args):
logits_per_image = inv_tau * image_features @ text_features.t()
logits_per_text = logits_per_image.t()
ground_truth = torch.arange(len(logits_per_image)).long()
if args.gpu is not None:
ground_truth = ground_truth.cuda(args.gpu, non_blocking=True)
loss_img = loss_fct_img(logits_per_image, ground_truth) / 2
loss_txt = loss_fct_txt(logits_per_text, ground_truth) / 2
return loss_img + loss_txt
def hopfield_retrieval(image_features, text_features, hopfield_layer):
patterns_xx = hopfield(state_patterns=image_features, stored_patterns=image_features, hopfield_layer=hopfield_layer)
patterns_yy = hopfield(state_patterns=text_features, stored_patterns=text_features, hopfield_layer=hopfield_layer)
patterns_xy = hopfield(state_patterns=text_features, stored_patterns=image_features, hopfield_layer=hopfield_layer)
patterns_yx = hopfield(state_patterns=image_features, stored_patterns=text_features, hopfield_layer=hopfield_layer)
return patterns_xx, patterns_yy, patterns_xy, patterns_yx
def hopfield(state_patterns, stored_patterns, hopfield_layer):
retrieved_patterns = hopfield_layer.forward((stored_patterns.unsqueeze(0), state_patterns.unsqueeze(0), stored_patterns.unsqueeze(0))).squeeze()
# Row vectors -> dim=1 to normalize the row vectors
retrieved_patterns = retrieved_patterns / retrieved_patterns.norm(dim=1, keepdim=True)
return retrieved_patterns
```
#### File: src/training/scheduler.py
```python
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
import math
def get_cosine_schedule_with_warmup(
optimizer: Optimizer, warmup: int, num_training_steps: int, num_cycles: float = 0.5, last_epoch: int = -1
):
"""
Create a schedule with a learning rate that decreases following the values of the cosine function between the
initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the
initial lr set in the optimizer.
Args:
optimizer (:class:`~torch.optim.Optimizer`):
The optimizer for which to schedule the learning rate.
warmup (:obj:`int`):
The number of steps for the warmup phase.
num_training_steps (:obj:`int`):
The total number of training steps.
num_cycles (:obj:`float`, `optional`, defaults to 0.5):
The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0
following a half-cosine).
last_epoch (:obj:`int`, `optional`, defaults to -1):
The index of the last epoch when resuming training.
Return:
:obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
def lr_lambda(current_step):
if current_step < warmup:
return float(current_step) / float(max(1, warmup))
progress = float(current_step - warmup) / float(max(1, num_training_steps - warmup))
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress)))
return LambdaLR(optimizer, lr_lambda, last_epoch)
def get_cosine_with_hard_restarts_schedule_with_warmup(
optimizer: Optimizer, warmup: int, num_training_steps: int, num_cycles: int = 1, last_epoch: int = -1
):
"""
Create a schedule with a learning rate that decreases following the values of the cosine function between the
initial lr set in the optimizer to 0, with several hard restarts, after a warmup period during which it increases
linearly between 0 and the initial lr set in the optimizer.
Args:
optimizer (:class:`~torch.optim.Optimizer`):
The optimizer for which to schedule the learning rate.
warmup (:obj:`int`):
The number of steps for the warmup phase.
num_training_steps (:obj:`int`):
The total number of training steps.
num_cycles (:obj:`int`, `optional`, defaults to 1):
The number of hard restarts to use.
last_epoch (:obj:`int`, `optional`, defaults to -1):
The index of the last epoch when resuming training.
Return:
:obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
"""
def lr_lambda(current_step):
if current_step < warmup:
return float(current_step) / float(max(1, warmup))
progress = float(current_step - warmup) / float(max(1, num_training_steps - warmup))
if progress >= 1.0:
return 0.0
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(num_cycles) * progress) % 1.0))))
return LambdaLR(optimizer, lr_lambda, last_epoch)
``` |
{
"source": "jplasser/CNEP",
"score": 3
} |
#### File: src/models/events_data_encoder.py
```python
import torch
from torch import nn, Tensor
from torch.nn.utils.rnn import PackedSequence
from sklearn import metrics
import numpy as np
from tqdm import tqdm
from typing import Optional
from collections import OrderedDict
class LSTM_CNN2(nn.Module):
def __init__(self, input_dim=390, hidden_dim=8, lstm_layers=1):
# dim, batch_norm, dropout, rec_dropout, task,
# target_repl = False, deep_supervision = False, num_classes = 1,
# depth = 1, input_dim = 390, ** kwargs
super(LSTM_CNN2, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.layers = lstm_layers
self.bidirectional = True
# self.dense = dense
# some more parameters
# self.output_dim = dim
# self.batch_norm = batch_norm
self.dropout = 0.3
self.rec_dropout = 0.3
self.depth = lstm_layers
self.drop_conv = 0.5
self.num_classes = 1
# define the LSTM layer
# in keras we have inputs: A 3D tensor with shape [batch, timesteps, feature]
# units: Positive integer, dimensionality of the output space. = dim=num_units=hidden_size
if self.layers >= 2:
self.lstm1 = nn.LSTM(input_size=self.input_dim,
hidden_size=self.hidden_dim,
num_layers=self.layers - 1,
dropout=self.rec_dropout,
bidirectional=self.bidirectional,
batch_first=True)
self.do0 = nn.Dropout(self.dropout)
# this is not in the original model
# self.act1 = nn.ReLU()
if self.layers >= 2:
self.lstm2 = nn.LSTM(input_size=self.hidden_dim * 2,
hidden_size=self.hidden_dim * 2,
num_layers=1,
dropout=self.rec_dropout,
bidirectional=False,
batch_first=True)
else:
self.lstm2 = nn.LSTM(input_size=self.input_dim,
hidden_size=self.hidden_dim * 2,
num_layers=1,
dropout=self.rec_dropout,
bidirectional=False,
batch_first=True)
self.do1 = nn.Dropout(self.dropout)
# self.bn0 = nn.BatchNorm1d(48 * self.hidden_dim*2)
# three Convolutional Neural Networks with different kernel sizes
nfilters = [2, 3, 4]
nb_filters = 100
pooling_reps = []
self.cnn1 = nn.Sequential(
nn.Conv1d(in_channels=self.hidden_dim * 2, out_channels=nb_filters, kernel_size=2,
stride=1, padding=0, dilation=1, groups=1, bias=True,
padding_mode='zeros'),
nn.ReLU(),
nn.MaxPool1d(kernel_size=2),
nn.Flatten()
)
self.cnn2 = nn.Sequential(
nn.Conv1d(in_channels=self.hidden_dim * 2, out_channels=nb_filters, kernel_size=3,
stride=1, padding=0, dilation=1, groups=1, bias=True,
padding_mode='zeros'),
nn.ReLU(),
nn.MaxPool1d(kernel_size=2),
nn.Flatten()
)
self.cnn3 = nn.Sequential(
nn.Conv1d(in_channels=self.hidden_dim * 2, out_channels=nb_filters, kernel_size=4,
stride=1, padding=0, dilation=1, groups=1, bias=True,
padding_mode='zeros'),
nn.ReLU(),
nn.MaxPool1d(kernel_size=2),
nn.Flatten()
)
self.do2 = nn.Dropout(self.drop_conv)
self.final = nn.Linear(6800, self.num_classes)
def forward(self, inputs, labels=None):
out = inputs
if self.layers >= 2:
out, h = self.lstm1(out)
out = self.do0(out)
out, h = self.lstm2(out)
out = self.do1(out)
pooling_reps = []
pool_vecs = self.cnn1(out.permute((0, 2, 1)))
pooling_reps.append(pool_vecs)
pool_vecs = self.cnn2(out.permute((0, 2, 1)))
pooling_reps.append(pool_vecs)
pool_vecs = self.cnn3(out.permute((0, 2, 1)))
pooling_reps.append(pool_vecs)
# concatenate all vectors
representation = torch.cat(pooling_reps, dim=1).contiguous()
out = self.do2(representation)
out = self.final(out)
return out
class VariationalDropout(nn.Module):
"""
Applies the same dropout mask across the temporal dimension
See https://arxiv.org/abs/1512.05287 for more details.
Note that this is not applied to the recurrent activations in the LSTM like the above paper.
Instead, it is applied to the inputs and outputs of the recurrent layer.
"""
def __init__(self, dropout: float, batch_first: Optional[bool] = False):
super().__init__()
self.dropout = dropout
self.batch_first = batch_first
def forward(self, x: torch.Tensor) -> torch.Tensor:
if not self.training or self.dropout <= 0.:
return x
is_packed = isinstance(x, PackedSequence)
if is_packed:
x, batch_sizes = x
max_batch_size = int(batch_sizes[0])
else:
batch_sizes = None
max_batch_size = x.size(0)
# Drop same mask across entire sequence
if self.batch_first:
m = x.new_empty(max_batch_size, 1, x.size(2), requires_grad=False).bernoulli_(1 - self.dropout)
else:
m = x.new_empty(1, max_batch_size, x.size(2), requires_grad=False).bernoulli_(1 - self.dropout)
x = x.masked_fill(m == 0, 0) / (1 - self.dropout)
if is_packed:
return PackedSequence(x, batch_sizes)
else:
return x
class LSTMNew(nn.LSTM):
def __init__(self, *args, dropouti: float = 0.,
dropoutw: float = 0., dropouto: float = 0.,
batch_first=True, unit_forget_bias=True, **kwargs):
super().__init__(*args, **kwargs, batch_first=batch_first)
self.unit_forget_bias = unit_forget_bias
self.dropoutw = dropoutw
self.input_drop = VariationalDropout(dropouti,
batch_first=batch_first)
self.output_drop = VariationalDropout(dropouto,
batch_first=batch_first)
self._init_weights()
def _init_weights(self):
"""
Use orthogonal init for recurrent layers, xavier uniform for input layers
Bias is 0 except for forget gate
"""
for name, param in self.named_parameters():
if "weight_hh" in name:
nn.init.orthogonal_(param.data)
elif "weight_ih" in name:
nn.init.xavier_uniform_(param.data)
elif "bias" in name and self.unit_forget_bias:
nn.init.zeros_(param.data)
param.data[self.hidden_size:2 * self.hidden_size] = 1
def _drop_weights(self):
for name, param in self.named_parameters():
if "weight_hh" in name:
getattr(self, name).data = \
torch.nn.functional.dropout(param.data, p=self.dropoutw,
training=self.training).contiguous()
def forward(self, input, hx=None):
self._drop_weights()
self.flatten_parameters()
input = self.input_drop(input)
seq, state = super().forward(input, hx=hx)
return self.output_drop(seq), state
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class EventsDataEncoder(nn.Module):
def __init__(self, input_dim=390, hidden_dim=512, lstm_layers=3,
filter_kernels=[2, 3, 4], filters=100, output_dim=1024,
add_embeds=True, embed_dim=700,
dropout=0.3, dropout_w=0.2, dropout_conv=0.2):
# dim, batch_norm, dropout, rec_dropout, task,
# target_repl = False, deep_supervision = False, num_classes = 1,
# depth = 1, input_dim = 390, ** kwargs
super(EventsDataEncoder, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.layers = lstm_layers
self.bidirectional = True
# some more parameters
self.dropout = dropout
self.rec_dropout = dropout_w
self.depth = lstm_layers
self.drop_conv = dropout_conv
self.num_classes = 1
self.output_dim = output_dim
self.add_embeds = add_embeds
self.embed_dim = embed_dim if add_embeds else 0
# define the LSTM layer
# in keras we have inputs: A 3D tensor with shape [batch, timesteps, feature]
# units: Positive integer, dimensionality of the output space. = dim=num_units=hidden_size
if self.layers >= 2:
self.lstm1 = LSTMNew(input_size=self.input_dim,
hidden_size=self.hidden_dim,
num_layers=self.layers - 1,
dropoutw=self.rec_dropout,
dropout=self.rec_dropout,
bidirectional=self.bidirectional,
batch_first=True)
self.do0 = nn.Dropout(self.dropout)
# this is not in the original model
if self.layers >= 2:
self.lstm2 = LSTMNew(input_size=self.hidden_dim * 2,
hidden_size=self.hidden_dim * 2,
num_layers=1,
dropoutw=self.rec_dropout,
dropout=self.rec_dropout,
bidirectional=False,
batch_first=True)
else:
self.lstm2 = LSTMNew(input_size=self.input_dim,
hidden_size=self.hidden_dim * 2,
num_layers=1,
dropoutw=self.rec_dropout,
dropout=self.rec_dropout,
bidirectional=False,
batch_first=True)
# three Convolutional Neural Networks with different kernel sizes
nfilters = filter_kernels
nb_filters = filters
# 48 hrs of events data
L_out = [(48 - k) + 1 for k in nfilters]
maxpool_padding, maxpool_dilation, maxpool_kernel_size, maxpool_stride = (0, 1, 2, 2)
dim_ = int(np.sum([100 * np.floor(
(l + 2 * maxpool_padding - maxpool_dilation * (maxpool_kernel_size - 1) - 1) / maxpool_stride + 1)
for l in
L_out]))
if self.add_embeds:
dim_ += self.embed_dim
self.cnn1 = nn.Sequential(OrderedDict([
("cnn1_conv1d", nn.Conv1d(in_channels=self.hidden_dim * 2, out_channels=nb_filters, kernel_size=nfilters[0],
stride=1, padding=0, dilation=1, groups=1, bias=True,
padding_mode='zeros')),
("cnn1_relu", nn.ReLU()),
("cnn1_maxpool1d", nn.MaxPool1d(kernel_size=2)),
("cnn1_flatten", nn.Flatten())
]))
self.cnn2 = nn.Sequential(OrderedDict([
("cnn2_conv1d", nn.Conv1d(in_channels=self.hidden_dim * 2, out_channels=nb_filters, kernel_size=nfilters[1],
stride=1, padding=0, dilation=1, groups=1, bias=True,
padding_mode='zeros')),
("cnn2_relu", nn.ReLU()),
("cnn2_maxpool1d", nn.MaxPool1d(kernel_size=2)),
("cnn2_flatten", nn.Flatten())
]))
self.cnn3 = nn.Sequential(OrderedDict([
("cnn3_conv1d", nn.Conv1d(in_channels=self.hidden_dim * 2, out_channels=nb_filters, kernel_size=nfilters[2],
stride=1, padding=0, dilation=1, groups=1, bias=True,
padding_mode='zeros')),
("cnn3_relu", nn.ReLU()),
("cnn3_maxpool1d", nn.MaxPool1d(kernel_size=2)),
("cnn3_flatten", nn.Flatten())
]))
# dim_latent = int(np.sum([100 * np.floor(
# (l + 2 * maxpool_padding - maxpool_dilation * (maxpool_kernel_size - 1) - 1) / maxpool_stride + 1)
# for l in
# L_out]))
# dim_encoder = embed_dim * 2
# self.latent = nn.Sequential(OrderedDict([
# ("enc_fc1", nn.Linear(dim_latent, embed_dim)),
# # ("enc_fc1", nn.Linear(dim_, 1024)),
# # ("enc_fc1", nn.Linear(dim_, self.output_dim)),
# # ("enc_bn1", nn.BatchNorm1d(dim_ * 2)), # new BN
# ("enc_relu", nn.ReLU())])
# )
self.encoder = nn.Sequential(OrderedDict([
# ("enc_layernorm1", nn.LayerNorm(dim_)),
("enc_fc1", nn.Linear(dim_, dim_ * 2)),
("enc_relu", nn.ReLU()),
("enc_layernorm2", nn.LayerNorm(dim_ * 2)),
("enc_fc2", nn.Linear(dim_ * 2, self.output_dim)),
("enc_relu2", nn.ReLU()),
("enc_layernorm3", nn.LayerNorm(self.output_dim))
]))
self.do2 = nn.Dropout(self.drop_conv)
# self.final = nn.Linear(dim_, self.num_classes)
def forward(self, inputs, embeds=None):
out = inputs
if self.layers >= 2:
out, h = self.lstm1(out)
out = self.do0(out)
out, h = self.lstm2(out)
pooling_reps = []
pool_vecs = self.cnn1(out.permute((0, 2, 1)))
pooling_reps.append(pool_vecs)
pool_vecs = self.cnn2(out.permute((0, 2, 1)))
pooling_reps.append(pool_vecs)
pool_vecs = self.cnn3(out.permute((0, 2, 1)))
pooling_reps.append(pool_vecs)
# concatenate all vectors
representation = torch.cat(pooling_reps, dim=1).contiguous()
# new model architecture
# out = self.latent(representation)
out = self.do2(representation)
if self.add_embeds:
out = torch.cat([out, embeds], dim=1)
encoding = self.encoder(out)
# out = self.final(out)
# return encoding in the shape of (output_dim)
return encoding
``` |
{
"source": "jplasser/embeddings",
"score": 3
} |
#### File: embeddings/eval/codes_analysis.py
```python
from __future__ import division
import argparse
import numpy as np
import scipy as sp
from scipy.spatial.distance import cdist
from icd9 import ICD9
tree = ICD9('codes.json')
def get_icd9_pairs(icd9_set):
icd9_pairs = {}
with open('icd9_grp_file.txt', 'r') as infile:
data = infile.readlines()
for row in data:
codes, name = row.strip().split('#')
name = name.strip()
codes = codes.strip().split(' ')
new_codes = set([])
for code in codes:
if code in icd9_set:
new_codes.add(code)
elif len(code) > 5 and code[:5] in icd9_set:
new_codes.add(code[:5])
elif len(code) > 4 and code[:3] in icd9_set:
new_codes.add(code[:3])
codes = list(new_codes)
if len(codes) > 1:
for idx, code in enumerate(codes):
if code not in icd9_pairs:
icd9_pairs[code] = set([])
icd9_pairs[code].update(set(codes[:idx]))
icd9_pairs[code].update(set(codes[idx+1:]))
return icd9_pairs
def get_coarse_icd9_pairs(icd9_set):
icd9_pairs = {}
ccs_to_icd9 = {}
with open('ccs_coarsest.txt', 'r') as infile:
data = infile.readlines()
currect_ccs = ''
for row in data:
if row[:10].strip() != '':
current_ccs = row[:10].strip()
ccs_to_icd9[current_ccs] = set([])
elif row.strip() != '':
ccs_to_icd9[current_ccs].update(set(row.strip().split(' ')))
ccs_coarse = {}
for ccs in ccs_to_icd9.keys():
ccs_eles = ccs.split('.')
if len(ccs_eles) >= 2:
code = ccs_eles[0] + '.' + ccs_eles[1]
if code not in ccs_coarse:
ccs_coarse[code] = set([])
ccs_coarse[code].update(ccs_to_icd9[ccs])
for ccs in ccs_coarse.keys():
new_codes = set([])
for code in ccs_coarse[ccs]:
if len(code) > 3:
new_code = code[:3] + '.' + code[3:]
code = new_code
if code in icd9_set:
new_codes.add(code)
elif len(code) > 5 and code[:5] in icd9_set:
new_codes.add(code[:5])
elif len(code) > 4 and code[:3] in icd9_set:
new_codes.add(code[:3])
codes = list(new_codes)
if len(codes) > 1:
for idx, code in enumerate(codes):
if code not in icd9_pairs:
icd9_pairs[code] = set([])
icd9_pairs[code].update(set(codes[:idx]))
icd9_pairs[code].update(set(codes[idx+1:]))
return icd9_pairs
def get_cui_concept_mappings():
concept_to_cui_hdr = '2b_concept_ID_to_CUI.txt'
concept_to_cui = {}
cui_to_concept = {}
with open(concept_to_cui_hdr, 'r') as infile:
lines = infile.readlines()
for line in lines:
concept = line.split('\t')[0]
cui = line.split('\t')[1].split('\r')[0]
concept_to_cui[concept] = cui
cui_to_concept[cui] = concept
return concept_to_cui, cui_to_concept
def get_icd9_cui_mappings():
cui_to_icd9 = {}
icd9_to_cui = {}
with open('cui_icd9.txt', 'r') as infile:
data = infile.readlines()
for row in data:
ele = row.strip().split('|')
if ele[11] == 'ICD9CM':
cui = ele[0]
icd9 = ele[10]
if cui not in cui_to_icd9 and icd9 != '' and '-' not in icd9:
cui_to_icd9[cui] = icd9
icd9_to_cui[icd9] = cui
return cui_to_icd9, icd9_to_cui
def read_embedding_cui(filename):
concept_to_cui, cui_to_concept = get_cui_concept_mappings() # comment out this after fix input
cui_to_icd9, icd9_to_cui = get_icd9_cui_mappings()
with open(filename, 'r') as infile:
embedding_num, dimension = map(int, infile.readline().strip().split(' '))
# -1 for remove </s>
embedding_matrix = np.zeros((embedding_num-1, dimension))
data = infile.readlines()
idx_to_name = {}
name_to_idx = {}
embedding_type_to_indices = {}
embedding_type_to_indices['IDX'] = []
embedding_type_to_indices['O'] = []
for idx in xrange(embedding_num-1):
datum = data[idx+1].strip().split(' ')
cui = datum[0]
if cui[0] != 'C':
if cui in concept_to_cui:
cui = concept_to_cui[cui]
embedding_matrix[idx,:] = np.array(map(float, datum[1:]))
# potential bug here
if cui in cui_to_icd9:
idx_to_name[idx] = cui_to_icd9[cui]
name_to_idx[cui_to_icd9[cui]] = idx
embedding_type_to_indices['IDX'].append(idx)
else:
idx_to_name[idx] = cui
name_to_idx[cui] = idx
embedding_type_to_indices['O'].append(idx)
return embedding_matrix, embedding_type_to_indices, name_to_idx, idx_to_name
def read_embedding_codes(filename):
with open(filename, 'r') as infile:
embedding_num, dimension = map(int, infile.readline().strip().split(' '))
# -1 for remove </s>
embedding_matrix = np.zeros((embedding_num-1, dimension))
data = infile.readlines()
embedding_type_to_indices = {}
name_to_idx = {}
idx_to_name = {}
for idx in xrange(embedding_num-1):
datum = data[idx+1].strip().split(' ')
embedding_name = datum[0]
embedding_type, embedding_value = embedding_name.split('_')
name_to_idx[embedding_value] = idx
idx_to_name[idx] = embedding_value
if embedding_type not in embedding_type_to_indices:
embedding_type_to_indices[embedding_type] = []
embedding_type_to_indices[embedding_type].append(idx)
embedding_matrix[idx,:] = np.array(map(float, datum[1:]))
return embedding_matrix, embedding_type_to_indices, name_to_idx, idx_to_name
def generate_overlapping_sets(filenames_type):
embedding_idx_icd9 = {} # a dictionary of (embedding_matrix, idx_to_icd9, icd9_to_idx)
overlapping_icd9s = set([])
start = 1
for filename, embedding_type in filenames_type:
#print filename
#print embedding_type
if embedding_type == 'codes':
embedding_matrix, embedding_type_to_indices, icd9_to_idx, idx_to_icd9 = read_embedding_codes(filename)
embedding_idx_icd9[filename] = (embedding_matrix, idx_to_icd9, icd9_to_idx)
if start == 1:
start = 0
overlapping_icd9s.update(set(icd9_to_idx.keys()))
else:
overlapping_icd9s.intersection_update(set(icd9_to_idx.keys()))
#print len(overlapping_icd9s)
elif embedding_type == 'cui':
embedding_matrix, embedding_type_to_indices, icd9_to_idx, idx_to_icd9 = read_embedding_cui(filename)
embedding_idx_icd9[filename] = (embedding_matrix, idx_to_icd9, icd9_to_idx)
if start == 1:
start = 0
overlapping_icd9s.update(set(icd9_to_idx.keys()))
else:
overlapping_icd9s.intersection_update(set(icd9_to_idx.keys()))
#print len(overlapping_icd9s)
overlapping_icd9s = list(overlapping_icd9s)
idx_of_overlapping_icd9s = {}
for filename, embedding_type in filenames_type:
idx_of_overlapping_icd9s[filename] = []
idx_to_icd9 = {}
icd9_to_idx = {}
for idx, icd9 in enumerate(overlapping_icd9s):
idx_to_icd9[idx] = icd9
icd9_to_idx[icd9] = idx
for filename, embedding_type in filenames_type:
idx_of_overlapping_icd9s[filename].append(embedding_idx_icd9[filename][2][icd9])
filename_to_embedding_matrix = {}
for filename, embedding_type in filenames_type:
idx_of_overlapping_icd9s[filename] = np.array(idx_of_overlapping_icd9s[filename])
filename_to_embedding_matrix[filename] = embedding_idx_icd9[filename][0][idx_of_overlapping_icd9s[filename]]
return filename_to_embedding_matrix, idx_to_icd9, icd9_to_idx
def get_icd9_to_description():
icd9_to_description = {}
with open('CMS32_DESC_LONG_DX.txt', 'r') as infile:
data = infile.readlines()
for row in data:
icd9 = row.strip()[:6].strip()
if len(icd9) > 3:
icd9 = icd9[:3] + '.' + icd9[3:]
description = row.strip()[6:].strip()
icd9_to_description[icd9] = description
return icd9_to_description
# type == f: fine grain, c: coarse grain
def get_css_analysis(filenames_type, num_of_neighbor, type='f'):
filename_to_embedding_matrix, idx_to_icd9, icd9_to_idx = generate_overlapping_sets(filenames_type)
print len(icd9_to_idx.keys())
if type == 'c':
icd9_pairs = get_coarse_icd9_pairs(set(icd9_to_idx.keys()))
else:
icd9_pairs = get_icd9_pairs(set(icd9_to_idx.keys()))
print len(icd9_pairs)
icd9_to_check = set(icd9_pairs.keys())
icd9_to_check.intersection_update(set(icd9_to_idx.keys()))
print len(icd9_to_check)
icd9_to_description = get_icd9_to_description()
for icd9 in icd9_to_idx.keys():
if icd9 not in icd9_to_description:
if tree.find(icd9):
icd9_to_description[icd9] = tree.find(icd9).description.encode('utf-8')
else:
icd9_to_description[icd9] = ''
filename_all = []
value_all = []
for filename, embedding_type in filenames_type:
#print filename
icd9_embeddings = filename_to_embedding_matrix[filename]
Y = cdist(icd9_embeddings, icd9_embeddings, 'cosine')
ranks = np.argsort(Y)
cumulative_ndcgs = []
for icd9 in icd9_to_check:
target = ranks[icd9_to_idx[icd9], 1:num_of_neighbor+1]
num_of_possible_hits = 0
icd9_to_remove = set()
for val in icd9_pairs[icd9]:
if val not in icd9_to_idx:
icd9_to_remove.add(val)
icd9_pairs[icd9].difference(icd9_to_remove)
num_of_possible_hits = min(len(icd9_pairs[icd9]), num_of_neighbor)
#print icd9 + '(' + str(num_of_possible_hits) + ')',
#if icd9 in icd9_to_description:
# print '(' + icd9_to_description[icd9] + ')',
#print ''
#print '-------------------------------------------'
dcg = 0
best_dcg = np.sum(np.reciprocal(np.log2(range(2, num_of_possible_hits+2))))
for i in xrange(num_of_neighbor):
if idx_to_icd9[target[i]] in icd9_pairs[icd9]:
dcg += np.reciprocal(np.log2(i+2))
#print 'hit: ',
#else:
#print ' ',
#print idx_to_icd9[target[i]],
#if idx_to_icd9[target[i]] in icd9_to_description:
#print icd9_to_description[idx_to_icd9[target[i]]],
#print ''
#print dcg/best_dcg
#print ''
cumulative_ndcgs.append(dcg/best_dcg)
filename_all.append((filename))
value_all.append(np.mean(np.array(cumulative_ndcgs)))
return filename_all, value_all
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--filenames", default='orig_files_all.txt')
args = parser.parse_args()
filenames_file = args.filenames
filenames = []
with open(filenames_file, 'r') as infile:
data = infile.readlines()
for row in data:
filenames.append(row.strip().split(','))
get_css_analysis(filenames, 40, 'f')
```
#### File: embeddings/eval/ndfrt_analysis.py
```python
from __future__ import division
import argparse
import numpy as np
import scipy as sp
from scipy.spatial.distance import cosine
from scipy.spatial.distance import cdist
from scipy.stats import pearsonr
from scipy.stats import spearmanr
from multiprocessing import Process, Queue
def get_cui_concept_mappings():
concept_to_cui_hdr = '2b_concept_ID_to_CUI.txt'
concept_to_cui = {}
cui_to_concept = {}
with open(concept_to_cui_hdr, 'r') as infile:
lines = infile.readlines()
for line in lines:
concept = line.split('\t')[0]
cui = line.split('\t')[1].split('\r')[0]
concept_to_cui[concept] = cui
cui_to_concept[cui] = concept
return concept_to_cui, cui_to_concept
# MRCONSO.RRF is a file that needs to be downloaded from the UMLS Metathesaurus
def get_CUI_to_description():
cui_to_description = {}
with open('MRCONSO.RRF', 'r') as infile:
lines = infile.readlines()
for row in lines:
datum = row.strip().split('|')
if datum[0] not in cui_to_description:
cui_to_description[datum[0]] = datum[14]
return cui_to_description
def get_CUI_to_type():
CUI_to_type_hdr = 'MRSTY.RRF'
CUI_to_type_map = {}
with open(CUI_to_type_hdr, 'r') as infile:
lines = infile.readlines()
for line in lines:
CUI = line.split('|')[0]
type = line.split('|')[3]
if CUI not in CUI_to_type_map:
CUI_to_type_map[CUI] = [type]
else:
CUI_to_type_map[CUI].append(type)
return CUI_to_type_map
def read_embedding_matrix(filename):
concept_to_cui, cui_to_concept = get_cui_concept_mappings() # comment out this after fix input
with open(filename, 'r') as infile:
embedding_num, dimension = map(int, infile.readline().strip().split(' '))
# -1 for remove </s>
embedding_matrix = np.zeros((embedding_num-1, dimension))
data = infile.readlines()
idx_to_cui = {}
cui_to_idx = {}
for idx in xrange(embedding_num-1):
datum = data[idx+1].strip().split(' ')
cui = datum[0]
if cui[0] != 'C':
if cui in concept_to_cui:
cui = concept_to_cui[cui]
embedding_matrix[idx,:] = np.array(map(float, datum[1:]))
idx_to_cui[idx] = cui
cui_to_idx[cui] = idx
return embedding_matrix, idx_to_cui, cui_to_idx
def generate_overlapping_sets(filenames):
embedding_idx_cui = {} # a dictionary of (embedding_matrix, idx_to_cui, cui_to_idx)
overlapping_cuis = set([])
if len(filenames) == 1:
embedding_matrix, idx_to_cui, cui_to_idx = read_embedding_matrix(filenames[0])
filename_to_embedding_matrix = {}
filename_to_embedding_matrix[filenames[0]] = embedding_matrix
return filename_to_embedding_matrix, idx_to_cui, cui_to_idx
for fileidx, filename in enumerate(filenames):
embedding_matrix, idx_to_cui, cui_to_idx = read_embedding_matrix(filename)
embedding_idx_cui[filename] = (embedding_matrix, idx_to_cui, cui_to_idx)
if fileidx == 0:
overlapping_cuis.update(set(cui_to_idx.keys()))
else:
overlapping_cuis.intersection_update(set(cui_to_idx.keys()))
overlapping_cuis = list(overlapping_cuis)
idx_of_overlapping_cuis = {}
for filename in filenames:
idx_of_overlapping_cuis[filename] = []
idx_to_cui = {}
cui_to_idx = {}
for idx, cui in enumerate(overlapping_cuis):
idx_to_cui[idx] = cui
cui_to_idx[cui] = idx
for filename in filenames:
idx_of_overlapping_cuis[filename].append(embedding_idx_cui[filename][2][cui])
filename_to_embedding_matrix = {}
for filename in filenames:
idx_of_overlapping_cuis[filename] = np.array(idx_of_overlapping_cuis[filename])
filename_to_embedding_matrix[filename] = embedding_idx_cui[filename][0][idx_of_overlapping_cuis[filename]]
return filename_to_embedding_matrix, idx_to_cui, cui_to_idx
'''
def generate_overlapping_sets(filename1, filename2):
embedding_matrix_1, idx_to_cui_1, cui_to_idx_1 = read_embedding_matrix(filename1)
embedding_matrix_2, idx_to_cui_2, cui_to_idx_2 = read_embedding_matrix(filename2)
overlapping_cuis = list(set(cui_to_idx_1.keys()).intersection(set(cui_to_idx_2.keys())))
print 'set %s has %d concepts' %(filename1, embedding_matrix_1.shape[0])
print 'set %s has %d concepts' %(filename2, embedding_matrix_2.shape[0])
print 'the size of overlapping concept is %d\n' %(len(overlapping_cuis))
idx_of_overlapping_cuis_1 = []
idx_of_overlapping_cuis_2 = []
idx_to_cui = {}
cui_to_idx = {}
for idx, cui in enumerate(overlapping_cuis):
idx_of_overlapping_cuis_1.append(cui_to_idx_1[cui])
idx_of_overlapping_cuis_2.append(cui_to_idx_2[cui])
idx_to_cui[idx] = cui
cui_to_idx[cui] = idx
embedding_matrix_1_new = embedding_matrix_1[np.array(idx_of_overlapping_cuis_1)]
embedding_matrix_2_new = embedding_matrix_2[np.array(idx_of_overlapping_cuis_2)]
return embedding_matrix_1_new, embedding_matrix_2_new, idx_to_cui, cui_to_idx
'''
def get_neighbors(query_idx, target_indices, embedding_matrix, num_of_neighbor):
vector = np.reshape(embedding_matrix[query_idx], (1, -1))
Y = cdist(vector, embedding_matrix, 'cosine')
ranks = np.argsort(Y)
return ranks[0, :num_of_neighbor+1]
def get_analogy(ref_idx, seed_idx, query_idx, target_indices, embedding_matrix, num_of_neighbor):
vector = embedding_matrix[seed_idx, :] - embedding_matrix[ref_idx, :] + embedding_matrix[query_idx, :]
vector = np.reshape(vector, (1, -1))
Y = cdist(vector, embedding_matrix, 'cosine')
ranks = np.argsort(Y)
return ranks[0, :num_of_neighbor+1]
def organize_cui_by_type(cui_to_idx):
cui_to_type = get_CUI_to_type()
type_to_idx = {}
idx_to_type = {}
for cui in cui_to_idx.keys():
if cui in cui_to_type:
types = cui_to_type[cui]
idx_to_type[cui_to_idx[cui]] = types
for type in types:
if type in type_to_idx:
type_to_idx[type].add((cui_to_idx[cui]))
else:
type_to_idx[type] = set([cui_to_idx[cui]])
return type_to_idx, idx_to_type
def get_nn_analysis(cui_to_idx, embedding_matrix, num_of_neighbor, f):
type_to_idx, idx_to_type = organize_cui_by_type(cui_to_idx)
Y = cdist(embedding_matrix, embedding_matrix, 'cosine')
ranks = np.argsort(Y)
type_idx_dcg_err = {}
type_dcg = {}
type_err = {}
print 'done calcualting distance'
for type in type_to_idx.keys():
type_dcg[type] = []
type_err[type] = []
type_idx_dcg_err[type] = []
for idx in idx_to_type.keys():
target = ranks[idx, 1:num_of_neighbor+1]
for type in idx_to_type[idx]:
dcg = 0
err = 0
for i in xrange(num_of_neighbor):
if target[i] in type_to_idx[type]:
dcg += np.reciprocal(np.log2(i+2))
if err == 0:
err = 1/(1+i)
type_idx_dcg_err[type].append((idx, dcg, err))
type_dcg[type].append(dcg)
type_err[type].append(err)
for type in type_to_idx.keys():
print '%50s (DCG) %2.5f %2.5f' %(type, np.mean(np.array(type_dcg[type])), np.std(np.array(type_dcg[type])))
print '%50s (ERR) %2.5f %2.5f' %(type, np.mean(np.array(type_err[type])), np.std(np.array(type_err[type])))
f.write('%50s (DCG) %2.5f %2.5f\n' %(type, np.mean(np.array(type_dcg[type])), np.std(np.array(type_dcg[type]))))
f.write('%50s (ERR) %2.5f %2.5f\n' %(type, np.mean(np.array(type_err[type])), np.std(np.array(type_err[type]))))
return type_idx_dcg_err
def get_all_target_neighbors(query_to_targets, embedding_matrix, num_of_neighbor):
vectors = embedding_matrix[np.array(query_to_targets.keys()), :]
Y = cdist(vectors, embedding_matrix, 'cosine')
ranks = np.argsort(Y)
query_target_rank = {}
for idx, query in enumerate(query_to_targets.keys()):
targets_list = list(query_to_targets[query])
target_rank = []
for target in targets_list:
target_rank.append(np.where(ranks[idx, :] == target)[0][0])
query_target_rank[query] = (zip(targets_list, target_rank), ranks[idx, :num_of_neighbor+1])
return query_target_rank
def get_all_target_analogies(ref_idx, seed_idx, query_to_targets, embedding_matrix, num_of_neighbor):
ref_vecs = np.tile(embedding_matrix[seed_idx, :] - embedding_matrix[ref_idx], (len(query_to_targets), 1))
vectors = ref_vecs + embedding_matrix[np.array(query_to_targets.keys()), :]
Y = cdist(vectors, embedding_matrix, 'cosine')
ranks = np.argsort(Y)
query_target_rank = {}
for idx, query in enumerate(query_to_targets.keys()):
targets_list = list(query_to_targets[query])
target_rank = []
for target in targets_list:
target_rank.append(np.where(ranks[idx, :] == target)[0][0])
query_target_rank[query] = (zip(targets_list, target_rank), ranks[idx, :num_of_neighbor+1])
return query_target_rank
def get_drug_diseases_to_check(concept_filename, cui_to_idx):
query_to_targets = {}
outfile = open('drug_disease_' + concept_filename.split('/')[-1] , 'w')
cui_to_description = get_CUI_to_description()
with open(concept_filename, 'r') as infile:
data = infile.readlines()
for row in data:
drug, diseases = row.strip().split(':')
diseases = diseases.split(',')[:-1]
if drug in cui_to_idx and cui_to_idx[drug] not in query_to_targets:
disease_set = set([])
disease_cui_set = set([])
for disease in diseases:
if disease in cui_to_idx:
disease_set.add(cui_to_idx[disease])
disease_cui_set.add(disease)
if len(disease_set) > 0:
outfile.write('%s(%s):' %(drug, cui_to_description[drug]))
for cui in disease_cui_set:
outfile.write('%s(%s),' %(cui, cui_to_description[cui]))
outfile.write('\n')
query_to_targets[cui_to_idx[drug]] = disease_set
outfile.close()
print '%d/%d concepts are found in embeddings' %(len(query_to_targets), len(data))
return query_to_targets
def get_drug_pairs_to_check(concept_filename, cui_to_idx):
drug_pairs = {}
query_to_targets_cui = {}
with open(concept_filename, 'r') as infile:
data = infile.readlines()
disease_to_drugs = {}
for row in data:
drug, diseases = row.strip().split(':')
diseases = diseases.split(',')[:-1]
query_to_targets_cui[drug] = set(diseases)
for idx, disease in enumerate(diseases):
if disease not in disease_to_drugs:
disease_to_drugs[disease] = []
if drug in cui_to_idx:
disease_to_drugs[disease].append(cui_to_idx[drug])
for disease in disease_to_drugs.keys():
if len(disease_to_drugs[disease]) > 1:
for idx, drug in enumerate(disease_to_drugs[disease]):
if drug not in drug_pairs:
drug_pairs[drug] = set([])
drug_pairs[drug].update(set(disease_to_drugs[disease][:idx]))
drug_pairs[drug].update(set(disease_to_drugs[disease][idx+1:]))
return drug_pairs, query_to_targets_cui
def display_query_target_rank(query_target_rank, idx_to_cui, seed_pair=None):
cui_to_description = get_CUI_to_description()
CUI_to_type_map = get_CUI_to_type()
for query in query_target_rank.keys():
query_cui = idx_to_cui[query]
query_name = cui_to_description[query_cui]
if not seed_pair:
print 'Neighbors for %9s %s' %(query_cui, query_name)
else:
ref_idx, seed_idx = seed_pair
ref_cui = idx_to_cui[ref_idx]
ref_name = cui_to_description[ref_cui]
seed_cui = idx_to_cui[seed_idx]
seed_name = cui_to_description[seed_cui]
print 'Analogy for %s %s : %s %s = %s %s : ?' %(ref_cui, ref_name, seed_cui, seed_name, query_cui, query_name)
print '------------------------------------------------------------'
target_rank_pairs, top_neighbors = query_target_rank[query]
for target_idx, rank in target_rank_pairs:
target_cui = idx_to_cui[target_idx]
target_name = cui_to_description[target_cui]
print '%5d %9s %s' %(rank, target_cui, target_name),
if target_cui in CUI_to_type_map:
print CUI_to_type_map[target_cui]
else:
print ""
for index, target_idx in enumerate(list(top_neighbors)):
target_cui = idx_to_cui[target_idx]
if target_cui not in cui_to_description:
cui_to_description[target_cui] = target_cui
target_name = cui_to_description[target_cui]
print '%5d %9s %s' %(index, target_cui, target_name),
if target_cui in CUI_to_type_map:
print CUI_to_type_map[target_cui]
else:
print ""
print ""
def evaluate_result(query_target_rank, num_of_nn):
num_of_queries = len(query_target_rank)
num_of_hits = 0
for query in query_target_rank.keys():
target_rank_pairs, top_neighbors = query_target_rank[query]
for target_idx, rank in target_rank_pairs:
if rank <= num_of_nn:
num_of_hits += 1
break
#print '%5d out of %5d queries (%2.4f)' %(num_of_hits, num_of_queries, (num_of_hits*100)/num_of_queries)
#f.write('%5d out of %5d queries (%2.4f)\n' %(num_of_hits, num_of_queries, (num_of_hits*100)/num_of_queries))
return num_of_hits
def analyze_semantic_files_child(result_q, pidx, n1, n2, ref_seed_list, query_to_targets, embedding_matrix, num_of_nn):
counter = 0
ref_seed_hit_list = []
hit_sum = 0
hit_max = (-1, -1, 0)
for idx in xrange(n1, n2):
counter += 1
#if (idx-n1) % 10 == 0:
# print pidx, idx-n1
ref_idx, seed_idx = ref_seed_list[idx]
query_target_rank = get_all_target_analogies(ref_idx,
seed_idx,
query_to_targets,
embedding_matrix,
num_of_nn)
num_of_hits = evaluate_result(query_target_rank, num_of_nn)
hit_sum += num_of_hits
if num_of_hits > hit_max[2]:
hit_max = (ref_idx, seed_idx, num_of_hits)
ref_seed_hit_list.append((ref_idx, seed_idx, num_of_hits))
result_q.put((counter, hit_sum, hit_max))
def analyze_semantic_files(filenames, num_of_nn, concept_file, num_of_cores):
filename_to_embedding_matrices, idx_to_cui, cui_to_idx = generate_overlapping_sets(filenames)
print len(idx_to_cui)
query_to_targets = get_drug_diseases_to_check(concept_file, cui_to_idx)
all_queries = query_to_targets.keys()
fname = 'analysis_semantic_' + concept_file.split('/')[-1].split('.')[0] + '.txt'
f = open(fname, 'w')
#print f
num_of_queries = len(all_queries)
f.write('number of queries: %d\n' %(num_of_queries))
cui_to_description = get_CUI_to_description()
for filename in filenames:
query_target_rank = get_all_target_neighbors(query_to_targets, filename_to_embedding_matrices[filename], num_of_nn)
num_of_hits = evaluate_result(query_target_rank, num_of_nn)
print '%s & %.2f,' %(filename.split('/')[-1],
num_of_hits*100/num_of_queries),
f.write('%s,%.4f,%d,' %(filename.split('/')[-1], num_of_hits*100/num_of_queries, num_of_hits))
ref_seed_list = []
for ref_idx in all_queries:
for seed_idx in query_to_targets[ref_idx]:
ref_seed_list.append((ref_idx, seed_idx))
result_q = Queue()
process_list = []
N = len(ref_seed_list)
#print N
chunk_size = np.ceil(N/num_of_cores)
for i in xrange(num_of_cores):
n1 = min(int(i*chunk_size), N)
n2 = min(int((i+1)*chunk_size), N)
p = Process(target=analyze_semantic_files_child,
args=(result_q, i, n1, n2,
ref_seed_list,
query_to_targets,
filename_to_embedding_matrices[filename],
num_of_nn))
process_list.append(p)
for p in process_list:
p.start()
for p in process_list:
p.join()
counter = 0
hit_sum = 0
hit_max = (-1, -1, 0)
for p in process_list:
counter_part, hit_sum_part, hit_max_part = result_q.get()
counter += counter_part
hit_sum += hit_sum_part
if hit_max_part[2] > hit_max[2]:
hit_max = hit_max_part
ref_cui = idx_to_cui[hit_max[0]]
ref_name = cui_to_description[ref_cui]
seed_cui = idx_to_cui[hit_max[1]]
seed_name = cui_to_description[seed_cui]
print '& %.2f & %.2f \\\\' %(hit_sum/counter*100/num_of_queries,
hit_max[2]*100/num_of_queries)
f.write('%.4f,%.4f,%s,%s,%.4f,%d\n' %(hit_sum/counter*100/num_of_queries,
hit_sum/counter,
ref_name, seed_name,
hit_max[2]*100/num_of_queries,
hit_max[2]))
f.close()
def analyze_semantic(filename1, filename2, num_of_nn, concept_file):
embedding_matrix_1, embedding_matrix_2, idx_to_cui, cui_to_idx = generate_overlapping_sets(filename1, filename2)
query_to_targets = get_drug_diseases_to_check(concept_file, cui_to_idx)
all_queries = query_to_targets.keys()
fname = 'new_result/' + filename2.split('/')[-1].split('.')[0] + "_" + concept_file.split('/')[-1].split('.')[0]
f = open(fname, 'w')
f.write('%s\n%s\n%s\n' % (filename1, filename2, concept_file))
#print "\nNeighbor result of de Vine et al."
f.write("\nNeighbor result of de Vine et al.\n")
query_target_rank_1_n = get_all_target_neighbors(query_to_targets, embedding_matrix_1, num_of_nn)
evaluate_result(query_target_rank_1_n, num_of_nn, f)
#display_query_target_rank(query_target_rank_1_n, idx_to_cui)
#print "\nNeighbor result of Stanford"
f.write("\nNeighbor result of Stanford\n")
query_target_rank_2_n = get_all_target_neighbors(query_to_targets, embedding_matrix_2, num_of_nn)
evaluate_result(query_target_rank_2_n, num_of_nn, f)
#display_query_target_rank(query_target_rank_2_n, idx_to_cui)
cui_to_description = get_CUI_to_description()
for ref_idx in all_queries:
for seed_idx in list(query_to_targets[ref_idx]):
ref_cui = idx_to_cui[ref_idx]
ref_name = cui_to_description[ref_cui]
seed_cui = idx_to_cui[seed_idx]
seed_name = cui_to_description[seed_cui]
#print '\nAnalogy using seed %s %s : %s %s' %(ref_cui, ref_name, seed_cui, seed_name)
f.write('\nAnalogy using seed %s %s : %s %s\n' %(ref_cui, ref_name, seed_cui, seed_name))
#print 'de Vine'
f.write('de Vine\n')
query_target_rank_1_a = get_all_target_analogies(ref_idx, seed_idx, query_to_targets, embedding_matrix_1, num_of_nn)
evaluate_result(query_target_rank_1_a, num_of_nn, f)
#display_query_target_rank(query_target_rank_1_a, idx_to_cui, (ref_idx, seed_idx))
#print 'Stanford'
f.write('Stanford\n')
query_target_rank_2_a = get_all_target_analogies(ref_idx, seed_idx, query_to_targets, embedding_matrix_2, num_of_nn)
evaluate_result(query_target_rank_2_a, num_of_nn, f)
#display_query_target_rank(query_target_rank_2_a, idx_to_cui, (ref_idx, seed_idx))
f.close()
def get_fine_grain_drug(idx_to_cui, embedding_matrix, drug_pairs, search_indices, query_to_targets_cui, num_of_neighbor, display=False):
cui_to_description = get_CUI_to_description()
query_indices = np.array(drug_pairs.keys())
Y = cdist(embedding_matrix[query_indices, :], embedding_matrix[search_indices, :], 'cosine')
ranks = np.argsort(Y)
cumulative_ndcgs = []
for counter, query_idx in enumerate(list(query_indices)):
target = ranks[counter, 1:num_of_neighbor+1]
num_of_possible_hits = min(len(drug_pairs[query_idx]), num_of_neighbor)
if display:
print ""
print cui_to_description[idx_to_cui[query_idx]] + ' (' + str(num_of_possible_hits) + '): ',
for disease_cui in query_to_targets_cui[idx_to_cui[query_idx]]:
print disease_cui,
if disease_cui in cui_to_description:
print '(' + cui_to_description[disease_cui] + '),',
print ''
print '-------------------------------------------'
dcg = 0
best_dcg = np.sum(np.reciprocal(np.log2(range(2, num_of_possible_hits+2))))
for i in xrange(num_of_neighbor):
if search_indices[target[i]] in drug_pairs[query_idx]:
dcg += np.reciprocal(np.log2(i+2))
if display:
print 'hit: ',
else:
if display:
print ' ',
if display:
print cui_to_description[idx_to_cui[search_indices[target[i]]]] + ': ',
if idx_to_cui[search_indices[target[i]]] in query_to_targets_cui:
for disease_cui in query_to_targets_cui[idx_to_cui[search_indices[target[i]]]]:
print disease_cui,
if disease_cui in cui_to_description:
print '(' + cui_to_description[disease_cui] + '),',
print ''
cumulative_ndcgs.append(dcg/best_dcg)
if display:
print dcg/best_dcg
#print ''
#print np.mean(np.array(cumulative_ndcgs))
#print np.median(np.array(cumulative_ndcgs))
#print ''
return cumulative_ndcgs, np.mean(np.array(cumulative_ndcgs))
def analyze_fine_grain_concept_similarity_files(filenames, concept_filename, num_of_nn):
filename_to_embedding_matrices, idx_to_cui, cui_to_idx = generate_overlapping_sets(filenames)
drug_pairs, query_to_targets_cui = get_drug_pairs_to_check(concept_filename, cui_to_idx)
drug_to_check = set([])
for drug in drug_pairs.keys():
drug_to_check.add(idx_to_cui[drug])
print drug_to_check.difference(set(query_to_targets_cui))
print len(query_to_targets_cui)
print len(drug_pairs)
type_to_idx, idx_to_type = organize_cui_by_type(cui_to_idx)
cui_to_description = get_CUI_to_description()
'''
search_indices = set([])
search_indices.update(set(drug_pairs.keys()))
search_indices.update(type_to_idx['Pharmacologic Substance'])
search_indices.update(type_to_idx['Antibiotic'])
search_indices = np.array(list(search_indices))
'''
search_indices = np.array(drug_pairs.keys())
for filename in filenames:
cumulative_ndcgs, mean_ndcgs = get_fine_grain_drug(idx_to_cui, filename_to_embedding_matrices[filename], drug_pairs, search_indices, query_to_targets_cui, num_of_nn)
print filename + ' & ' + str(mean_ndcgs) + ' \\\\ '
def analyze_concept_similarity_files(filenames, num_of_nn):
filename_to_embedding_matrices, idx_to_cui, cui_to_idx = generate_overlapping_sets(filenames)
fname = 'analysis_concept_similarity'
f = open(fname, 'w')
print f
for filename in filenames:
f.write('%s\n' %(fname))
type_idx_dcg_err = get_nn_analysis(cui_to_idx, filename_to_embedding_matrices[filename], num_of_nn, f)
f.write('\n')
f.close()
# compute neighbor score for UMLS
def analyze_concept_similarity(filename1, filename2, num_of_nn):
embedding_matrix_1, embedding_matrix_2, idx_to_cui, cui_to_idx = generate_overlapping_sets(filename1, filename2)
print 'result for de Vine'
type_idx_dcg_err_1 = get_nn_analysis(cui_to_idx, embedding_matrix_1, num_of_nn)
print '\nresult for Stanford'
type_idx_dcg_err_2 = get_nn_analysis(cui_to_idx, embedding_matrix_2, num_of_nn)
def analyze_concept_relatedness(filenames):
pairs_to_evaluate = []
cui_to_description = {}
with open('caviedes.tsv', 'r') as infile:
data = infile.readlines()
for row in data:
elements = row.strip().split('\t')
pairs_to_evaluate.append((elements[1], elements[3], float(elements[4])))
cui_to_description[elements[1]] = elements[0]
cui_to_description[elements[3]] = elements[2]
filename_to_embedding_matrices, idx_to_cui, cui_to_idx = generate_overlapping_sets(filenames)
caviedes = []
caviedes_to_print = []
for cui1, cui2, similarity_score in pairs_to_evaluate:
if cui1 in cui_to_idx and cui2 in cui_to_idx:
caviedes.append(similarity_score)
caviedes_to_print.append((cui_to_description[cui1],
cui_to_description[cui2],
similarity_score))
for val in sorted(caviedes_to_print, key=lambda ele: ele[2]):
print val
for filename in filenames:
embedding_matrix = filename_to_embedding_matrices[filename]
Y = cdist(embedding_matrix, embedding_matrix, 'cosine')
print filename
Y_scaled = Y/Y.max()
current = []
current_to_print = []
for cui1, cui2, similarity_score in pairs_to_evaluate:
if cui1 in cui_to_idx and cui2 in cui_to_idx:
print cui1, cui2
current.append(Y_scaled[cui_to_idx[cui1], cui_to_idx[cui2]])
current_to_print.append((cui_to_description[cui1],
cui_to_description[cui2],
Y_scaled[cui_to_idx[cui1], cui_to_idx[cui2]]))
else:
print 'not a hit: ', cui1, cui2
caviedes = np.array(caviedes)
current = np.array(current)
print pearsonr(caviedes, current)
print spearmanr(caviedes, current)
for val in sorted(current_to_print, key=lambda ele: ele[2]):
print val
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--analysis_type", type=int, default=1, help='1:ndt-rf, 2:')
parser.add_argument("--filenames", default='files_to_process.txt')
parser.add_argument("--number_of_nn", type=int, default=40, help='the number of neighbors to show')
parser.add_argument("--number_of_cores", type=int, default=60, help='the number of cores to use')
parser.add_argument("--concept_file", default='ndf-rt/may_treat_cui.txt', help='input the concept file for query')
args = parser.parse_args()
analysis_type = args.analysis_type
filenames_file = args.filenames
num_of_nn = args.number_of_nn
num_of_cores = args.number_of_cores
concept_file = args.concept_file
filenames = []
with open(filenames_file, 'r') as infile:
data = infile.readlines()
for row in data:
filenames.append(row.strip())
analyze_semantic_files(filenames, num_of_nn, concept_file, num_of_cores)
#analyze_concept_similarity_files(filenames, num_of_nn)
# experiment for table 3
#analyze_fine_grain_concept_similarity_files(filenames, concept_file, num_of_nn)
#analyze_concept_relatedness(filenames)
#if analysis_type == 1:
# analyze_semantic(filename1, filename2, num_of_nn, concept_file)
#elif analysis_type == 2:
# analyze_concept_similarity(filename1, filename2, num_of_nn)
``` |
{
"source": "jplattel/smp-scraper",
"score": 3
} |
#### File: jplattel/smp-scraper/scraper.py
```python
import logging
import sys
import os
import mechanize
import xlrd
import csv
import datetime
import peewee
# Database
db = peewee.MySQLDatabase("database_name", host="localhost", user="root", password="password")
class BaseModel(peewee.Model):
class Meta:
database = db
class Usage(BaseModel):
date = peewee.DateTimeField(unique=True)
value = peewee.FloatField()
def setup():
db.connect()
db.create_table(Usage)
# Logging
logger = logging.getLogger("mechanize")
logger.addHandler(logging.StreamHandler(sys.stdout))
logger.setLevel(logging.INFO)
def scrape(date=1416265200, email, password):
"""
Scrapes data from slimmemeterportal.nl
"""
# Open browser
br = mechanize.Browser()
br.open('https://slimmemeterportal.nl/login')
# Fill out login form
br.select_form(nr=0)
br["user_session[email]"] = email
br["user_session[password]"] = password
br.submit()
# Download the XLS file with
r = br.retrieve('https://slimmemeterportal.nl/cust/consumption/chart.xls?commodity=power&datatype=consumption&range=86400×lot_start=' + str(date), str(date) + '.xls')
# Save from excel to database
def save_to_db(date):
wb = xlrd.open_workbook(str(date) + '.xls')
sh = wb.sheet_by_name('Sheet 1')
r = range(sh.nrows)
r.pop(0) # remove header
for rownum in r:
# convert datatime from excel
d = xlrd.xldate_as_tuple(sh.row_values(rownum)[0], wb.datemode) # Convert Excel data to tuple
d = datetime.datetime(*d) # tuple to datetime
# Ugly way to check if it entry exists in the database, quick 'n dirty!
e = False
try:
e = Usage.get(Usage.date == d)
except:
pass
if not e:
u = Usage(date=d, value=sh.row_values(rownum)[1])
u.save()
# Delete xls file for cleanup
os.remove(str(date) + '.xls')
# Start scraping
scrape(unix_timestamp_at_midnight_of_a_day, email, password)
save_to_db(unix_timestamp_at_midnight_of_a_day)
``` |
{
"source": "JPLAY0/mmsegmentation",
"score": 2
} |
#### File: mmsegmentation/demo/debug_net.py
```python
import os
import time
import cv2 as cv
import mmcv
import numpy as np
import torch
from PIL import Image
from mmcv.runner.checkpoint import save_checkpoint
from torch import nn
from tqdm import tqdm
from mmseg.models import build_segmentor
from mmseg.models.backbones import BaseNet
from mmseg.models.backbones.resnet18 import ResNet18
from mmseg.models.decode_heads.swift_head import SwiftHead
warm_step = 20
test_step = 20
img_size_list = [(1, 3, 1024, 2048)]
def print_params(model):
total_params = sum(p.numel() for p in model.parameters())
print(f'{total_params:,} total parameters.')
total_trainable_params = sum(
p.numel() for p in model.parameters() if p.requires_grad)
print(f'{total_trainable_params:,} training parameters.')
def benchmark(net, img_size):
device = torch.device('cuda')
model = net.to(device)
model.eval()
img = torch.empty(size=img_size, device=device)
with torch.no_grad():
for _ in range(warm_step):
_ = model(img)
torch.cuda.synchronize()
start = time.perf_counter()
for _ in range(test_step):
_ = model(img)
torch.cuda.synchronize()
end = time.perf_counter()
return test_step / (end - start)
def model_test():
name = 'gluon_resnet18_v1b'
model = BaseNet('gluon_resnet18_v1b')
img = torch.randn((1, 3, 1024, 2048))
ret = model(img)
for x in ret:
print(x.shape)
for img_size in img_size_list:
fps = benchmark(model, img_size)
print('model: ', name, 'img_size: ', img_size, 'fps:', round(fps, 1))
def find_shape():
img_path = '/home/jpl/data/pycode/mmsegmentation/data/Vistas/training/images'
h, w = 0, 0
for path in tqdm(os.listdir(img_path)):
whole_path = os.path.join(img_path, path)
img: Image.Image = Image.open(whole_path)
h += img.size[0]
w += img.size[1]
print(h / 18000) # 3418.662888888889
print(w / 18000) # 2481.146277777778
def find_sto():
img_path = '/home/jpl/data/pycode/mmsegmentation/data/Vistas/training/images'
num_pixels = 3418.662888888889 * 2481.146277777778 * 18000
# color = np.zeros(3)
# for path in tqdm(os.listdir(img_path)):
# whole_path = os.path.join(img_path, path)
# img = cv.imread(whole_path)
# for i in range(3):
# color[2 - i] += np.sum(img[:, :, i])
# mean = color / num_pixels
mean = np.array([80.5423, 91.3162, 81.4312])
print(mean)
color = np.zeros(3)
for path in tqdm(os.listdir(img_path)):
whole_path = os.path.join(img_path, path)
img = cv.imread(whole_path)
for i in range(3):
color[2 - i] += np.sum((img[:, :, i] - mean[i]) ** 2)
var = color / num_pixels
print(np.sqrt(var))
def img_test():
img_path = '/home/jpl/data/pycode/mmsegmentation/data/Vistas/validation/labels'
lbl_min, lbl_max = 1000, -1
for path in tqdm(os.listdir(img_path)):
whole_path = os.path.join(img_path, path)
img = np.asarray(Image.open(whole_path))
lbl_min = min(lbl_min, img.min())
lbl_max = max(lbl_max, img.max())
print(lbl_max)
print(lbl_min)
def save_backbone_checkpoint():
cfg = mmcv.Config.fromfile('work_dirs/sg_resnet18_vitas/sg_resnet18_cityscapes.py')
model: torch.nn.Module = build_segmentor(cfg.model).backbone
save_checkpoint(model, 'checkpoints/resnet18_vistas.pth')
class FPFModel(nn.Module):
def __init__(self):
super().__init__()
self.backbone = ResNet18()
self.head = SwiftHead(in_channels=[64, 128, 256, 512], channels=96, num_classes=19, in_index=(0, 1, 2, 3),
input_transform='multiple_select', norm_cfg=dict(type='BN', requires_grad=True))
def forward(self, x):
x = self.backbone(x)
return self.head(x)
def ultra_speed_test():
model = FPFModel().cuda()
model.eval()
cases = 10000
with torch.no_grad():
tot = 0
for _ in range(cases):
img = torch.randn((1, 3, 1024, 2048), device='cuda')
start = time.perf_counter()
model(img)
torch.cuda.synchronize()
tot += time.perf_counter() - start
print(cases / tot)
def weight_extract():
weight_path = 'work_dirs/lppm_ct/latest.pth'
weight: dict = torch.load(weight_path)
state: dict = weight['state_dict']
del state['decode_head.conv_seg.weight']
del state['decode_head.conv_seg.bias']
del state['auxiliary_head.0.conv_seg.weight']
del state['auxiliary_head.0.conv_seg.bias']
# del state['auxiliary_head.1.conv_seg.weight']
# del state['auxiliary_head.1.conv_seg.bias']
# del state['auxiliary_head.2.conv_seg.weight']
# del state['auxiliary_head.2.conv_seg.bias']
torch.save(weight, 'checkpoints/pretrained_ct.pth')
if __name__ == '__main__':
weight_extract()
```
#### File: models/decode_heads/fpf_head.py
```python
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
from .decode_head import BaseDecodeHead
from .psp_head import PPM
from ..builder import HEADS
@HEADS.register_module()
class FPFHead(BaseDecodeHead):
def __init__(self, pool_scales=(1, 2, 3, 6), **kwargs):
super(FPFHead, self).__init__(**kwargs)
self.pool_scales = pool_scales
self.psp_modules = PPM(
self.pool_scales,
self.in_channels[-1],
self.channels,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg,
align_corners=self.align_corners)
self.bottleneck = ConvModule(
self.in_channels[-1] + len(pool_scales) * self.channels,
self.channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
self.convs = nn.ModuleList()
self.num_convs = len(self.in_channels) - 1
for i in range(self.num_convs):
self.convs.append(ConvModule(self.in_channels[i], self.channels, 1, norm_cfg=self.norm_cfg))
self.convs.append(ConvModule(self.channels * 2, self.channels, 3, padding=1, norm_cfg=self.norm_cfg))
self.up = nn.Upsample(mode='bilinear', align_corners=False)
def forward(self, inputs):
"""Forward function."""
x = self._transform_inputs(inputs)
psp_outs = [x[-1]]
psp_outs.extend(self.psp_modules(x[-1]))
psp_outs = torch.cat(psp_outs, dim=1)
x[-1] = self.bottleneck(psp_outs)
for i in reversed(range(self.num_convs)):
x[i] = self.convs[i * 2](x[i])
self.up.size = x[i].shape[2:]
x[i] = torch.cat([self.up(x[i + 1]), x[i]], dim=1)
x[i] = self.convs[i * 2 + 1](x[i])
output = self.cls_seg(x[0])
return output
```
#### File: models/decode_heads/lppm_head.py
```python
from torch import nn
from mmcv.cnn import ConvModule
from .decode_head import BaseDecodeHead
from ..builder import HEADS
@HEADS.register_module()
class LPPMHead(BaseDecodeHead):
def __init__(self, pool_scales=(1, 2, 3, 6), **kwargs):
super(LPPMHead, self).__init__(**kwargs)
self.convs = nn.ModuleList()
self.num_convs = len(self.in_channels) - 1
self.lppm = LPPM(pool_scales, self.in_channels, self.channels, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg, align_corners=self.align_corners)
for i in range(self.num_convs):
self.convs.append(ConvModule(self.in_channels[i], self.channels, 1, norm_cfg=self.norm_cfg, inplace=False))
self.convs.append(ConvModule(self.channels, self.channels, 3, padding=1, norm_cfg=self.norm_cfg))
self.up = nn.Upsample(mode='bilinear', align_corners=False)
def forward(self, inputs):
"""Forward function."""
x = self._transform_inputs(inputs)
x[-1] = self.lppm(x[-1])
for i in reversed(range(self.num_convs)):
x[i] = self.convs[i * 2](x[i])
self.up.size = x[i].shape[2:]
x[i] = x[i] + self.up(x[i + 1])
x[i] = self.convs[i * 2 + 1](x[i])
return self.cls_seg(x[0])
class LPPM(nn.Module):
def __init__(self, pool_scales, in_channels, channels, conv_cfg, norm_cfg,
act_cfg, align_corners):
super(LPPM, self).__init__()
self.pools = nn.ModuleList()
self.convs = nn.ModuleList()
self.convs.append(ConvModule(in_channels[-1], channels, 3, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg,
act_cfg=act_cfg))
for pool_scale in pool_scales:
self.pools.append(nn.AdaptiveAvgPool2d(pool_scale))
self.convs.append(ConvModule(in_channels[-1], channels, 3, padding=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg,
act_cfg=act_cfg))
self.up = nn.Upsample(mode='bilinear', align_corners=align_corners)
def forward(self, x):
"""Forward function."""
self.up.size = x.size()[2:]
copy = x.clone()
x = self.convs[-1](x)
for i, pool in enumerate(self.pools):
x = x + self.up(self.convs[i](pool(copy)))
return x
``` |
{
"source": "jpleasu/openbestand",
"score": 2
} |
#### File: openbestand/openbestand/cli.py
```python
import re
import asyncio
from bleak import BleakClient, BleakScanner
from .core import Record, BATTERY_LEVEL, find_device, parse_args
from aioconsole import ainput, aprint
class ConsoleClient(BleakClient):
"""
console client.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.last_buff = None
self.output = False
async def connect(self, **kwargs):
await super().connect(**kwargs)
await asyncio.sleep(1)
await self.start_notify(BATTERY_LEVEL, self.notify_cb)
await aprint(f'connected to device with address {self.address}')
async def notify_cb(self, sender, buff):
if buff != self.last_buff:
if self.output:
r = Record.from_buffer(buff)
await aprint(f"\n-- {r} --", end='')
self.last_buff = buff
async def console_prompt(self) -> bool: # noqa
"""
present a debug prompt.. return False on disconnect.
"""
ret = True
x = await ainput('> ')
r = Record.from_buffer(self.last_buff[:])
if x.strip() == '':
await aprint(f"-- {r} --")
return ret
if x == 'h' or x == 'help' or x == '?':
await aprint('''\
h : help
<enter> : print state
q : quit
x : power off
s : start/stop notifications
c : calibrate
t <ang> : set the target to angle ang
d <secs> : set delay to secs seconds
b : toggle buzzer
b+ : raise the buzz strength
b- : lower the buzz strength
b[0-2] : set the buzz strength
p[0-f] : set the buzz pattern
''')
return ret
elif x == 'q':
return False
await aprint('Quitting.')
elif x == 'x':
r.power_off()
ret = False
await aprint('Turning off.')
elif x == 's':
self.output = not self.output
self.last_buff = None
if self.output:
await aprint('Starting notifications.')
else:
await aprint('Stopping notifications.')
return True
elif x == 'c':
r.calibrating = True
# must be sent twice *shrug*
await aprint('Calibrating')
await self.write_gatt_char(BATTERY_LEVEL, bytes(r), response=True)
await asyncio.sleep(.1)
await self.write_gatt_char(BATTERY_LEVEL, bytes(r), response=True)
# 3 seconds to calibrate should be plenty
for count in range(6):
r = Record.from_buffer(self.last_buff)
if not r.calibrating:
break
await aprint('Calibrating ..')
await asyncio.sleep(.5)
r.calibrating = False
await aprint('Done calibrating')
elif x.startswith('t '):
r.target = int(x.split()[1])
await aprint(f'Setting target to {r.target}.')
elif x.startswith('d '):
r.delay = int(x.split()[1], 0)
await aprint(f'Setting delay to {r.delay}.')
elif x == 'b':
r.toggle_buzz()
await aprint(f'Setting buzzer to {"on" if r.buzz else "off"}.')
elif x == 'b+':
r.buzz_strength = min(2, r.buzz_strength + 1)
await aprint(f'Setting buzz strength to {r.buzz_strength}.')
elif x == 'b-':
r.buzz_strength = max(0, r.buzz_strength - 1)
await aprint(f'Setting buzz strength to {r.buzz_strength}.')
elif x in ['b0', 'b1', 'b2']:
r.buzz_strength = int(x[1])
await aprint(f'Setting buzz strength to {r.buzz_strength}.')
elif re.match('p[0-f]', x):
r.buzz_pattern = x[1]
await aprint(f'Setting buzz pattern to {r.buzz_pattern}.')
await self.write_gatt_char(BATTERY_LEVEL, bytes(r), response=True)
return ret
async def amain(**kwargs):
d = await find_device(**kwargs)
if d is None:
await aprint("""Bestand not found. Verify that your device is in pairing mode with a blinking blue light.""")
return
async with ConsoleClient(d) as client:
while client.is_connected:
if not await client.console_prompt():
break
def main():
"""entry point."""
try:
asyncio.run(amain(**parse_args()))
except KeyboardInterrupt:
pass
print()
async def amain_rssi(**kwargs):
# use timeout as an interval
timeout = kwargs.pop('timeout', 1.0)
loop = asyncio.get_event_loop()
while True:
t0 = loop.time()
device = await find_device(**kwargs)
if device is None:
await aprint('---')
else:
print(device.rssi)
t1 = loop.time()
d = t1 - t0
if d < timeout:
await asyncio.sleep(timeout - d)
def main_rssi():
"""entry point for RSSI monitoring."""
try:
asyncio.run(amain_rssi(**parse_args()))
except KeyboardInterrupt:
pass
print()
``` |
{
"source": "jpleger/django-analystnotes",
"score": 2
} |
#### File: django-analystnotes/analystnotes/filters.py
```python
from rest_framework import filters
from django.db.models import Q
class ObjectOwnerFieldPermissionsFilter(filters.BaseFilterBackend):
"""
A filter backend that limits results to those where the requesting user
has read object level permissions.
"""
def filter_queryset(self, request, queryset, view):
return queryset.filter(owner=request.user)
class ProjectOwnerFieldPermissionsFilter(filters.BaseFilterBackend):
"""
A filter backend that limits results to those where the requesting user
has read object level permissions.
"""
def filter_queryset(self, request, queryset, view):
return queryset.filter(project__owner=request.user)
class OrderByNameFilter(filters.BaseFilterBackend):
"""
Basic Filter to order records by name
"""
def filter_queryset(self, request, queryset, view):
return queryset.order_by('name')
``` |
{
"source": "jplehmann/django-address",
"score": 2
} |
#### File: example_site/person/views.py
```python
from django.conf import settings
from django.shortcuts import render
from address.models import Address
from .forms import PersonForm
def home(request):
success = False
addresses = Address.objects.all()
if settings.GOOGLE_API_KEY:
google_api_key_set = True
else:
google_api_key_set = False
if request.method == 'POST':
form = PersonForm(request.POST)
if form.is_valid():
success = True
else:
form = PersonForm(initial={'address': Address.objects.last()})
context = {'form': form,
'google_api_key_set': google_api_key_set,
'success': success,
'addresses': addresses}
return render(request, 'example/home.html', context)
``` |
{
"source": "JPLeoRX/parallelizer",
"score": 3
} |
#### File: parallelizer/parallelizer/process_parallelizer.py
```python
import multiprocessing
from typing import List
from parallelizer.executor import Executor
from parallelizer.executor_event import ExecutorEvent
from parallelizer.parallelizer import Parallelizer
class ProcessParallelizer(Parallelizer):
def __init__(self, number_of_threads: int, timeout_in_seconds: float = 60):
super().__init__(number_of_threads, timeout_in_seconds)
def start_executors(self, executors: List[Executor]) -> List[ExecutorEvent]:
# Keep return values in a memory-shared dict
return_dict = multiprocessing.Manager().dict()
# Start processes
executor_events = []
processes = []
for executor in executors:
executor_event = ExecutorEvent(executor.worker_index, None, return_dict)
executor_events.append(executor_event)
process = multiprocessing.Process(target=executor.execute_all, args=[executor_event])
processes.append(process)
process.start()
# Wait for all processes to finish
for process in processes:
process.join(self.timeout_in_seconds)
# Map results back from shared dict into each event
for executor_event in executor_events:
executor_event.results = return_dict[executor_event.worker_index]
# Return events
return executor_events
``` |
{
"source": "jplevyak/pyc",
"score": 3
} |
#### File: pyc/lib/datetime.py
```python
from time import struct_time
import string
MINYEAR = 1
MAXYEAR = 9999
class date:
def __init__(self, year, month, day):
self.year = year
self.month = month
self.day = day
def today():
return date(0, 0, 0)
def fromtimestamp(timestamp):
return date(0, 0, 0)
def fromordinal(ordinal):
return date(0, 0, 0)
today = staticmethod(today)
fromtimestamp = staticmethod(fromtimestamp)
fromordinal = staticmethod(fromordinal)
def __add__(self, other):
return self
def __sub__(self, other):
return other.subfromdate()
def subfromdate(self):
return timedelta()
def replace(self, year=0, month=0, day=0):
return self
def timetuple(self):
return struct_time(9*(1,))
def toordinal(self):
return 1
def weekday(self):
return 1
def isoweekday(self):
return 1
def isocalendar(self):
return (1, 1, 1)
def isoformat(self):
return ''
def __str__(self):
return ''
def ctime(self):
return ''
def strftime(self, format):
return ''
class datetime(date):
def __init__(self, year, month, day, hour=0, minute=0, second=0, microsecond=0, tzinfo=None):
date.__init__(self, year, month, day)
self.hour = hour
self.minute = minute
self.second = second
self.microsecond = microsecond
self.tzinfo = tzinfo
tzinfo.utcoffset(self)
tzinfo.dst(self)
tzinfo.tzname(self)
def today():
return datetime(0, 0, 0)
def now(tz=None):
tz.utcoffset(self)
return datetime(0, 0, 0)
def utcnow():
return datetime(0, 0, 0)
def fromtimestamp(timestamp, tz=None):
tz.fromutc(self)
return datetime(0, 0, 0)
def utcfromtimestamp(timestamp):
return datetime(0, 0, 0)
def fromordinal(ordinal):
return datetime(0, 0, 0)
def combine(date, time):
return datetime(0, 0, 0)
def strptime(date_string, format):
return datetime(0, 0, 0)
today = staticmethod(today)
now = staticmethod(now)
utcnow = staticmethod(utcnow)
fromtimestamp = staticmethod(fromtimestamp)
utcfromtimestamp = staticmethod(utcfromtimestamp)
fromordinal = staticmethod(fromordinal)
combine = staticmethod(combine)
strptime = staticmethod(strptime)
def __add__(self, delta):
return self
def __sub__(self, other):
return other.subfromdatetime()
def subfromdatetime(self):
return timedelta()
def date(self):
return date(self.year, self.month, self.day)
def time(self):
return time(self.hour, self.minute, self.second, self.microsecond, 0)
def timetz(self):
return time(self.hour, self.minute, self.second, self.microsecond, self.tzinfo)
def replace(self, year=0, month=0, day=0, hour=0, minute=0, second=0, microsecond=0, tzinfo=None):
return self
def astimezone(self, tz):
tz.fromutc(self)
return self
def utcoffset(self):
return timedelta()
def dst(self):
return timedelta()
def tzname(self):
return ''
def timetuple(self):
return struct_time(9*(1,))
def utctimetuple(self):
return struct_time(9*(1,))
def toordinal(self):
return 1
def weekday(self):
return 1
def isoweekday(self):
return 1
def isocalendar(self):
return (1, 1, 1)
def isoformat(self, sep='T'):
return ''
def __str__(self):
return ''
def ctime(self):
return ''
def strftime(self, format):
return ''
class time:
def __init__(self, hour=0, minute=0, second=0, microsecond=0, tzinfo=None):
self.hour = hour
self.minute = minute
self.second = second
self.microsecond = microsecond
self.tzinfo = tzinfo
dt = datetime(0,0,0)
tzinfo.utcoffset(dt)
tzinfo.dst(dt)
tzinfo.tzname(dt)
def replace(self, hour=0, minute=0, second=0, microsecond=0, tzinfo=None):
return self
def isoformat(self):
return ''
def __str__(self):
return ''
def strftime(self, format):
return ''
def utcoffset(self):
return timedelta()
def dst(self):
return timedelta()
def tzname(self):
return ''
class timedelta:
def __init__(self, days=0, seconds=0, microseconds=0, milliseconds=0, minutes=0, hours=0, weeks=0):
self.days = 1
self.seconds = 1
self.microseconds = 1
def __str__(self):
return ''
def __add__(self, other):
return self
def __sub__(self, other):
return self
def __mul__(self, n):
return self
def __div__(self, n):
return self
def __neg__(self):
return self
def __floordiv__(self, n):
return self
def __abs__(self):
return self
def subfromdate(self):
return date(1, 1, 1)
def subfromdatetime(self):
return datetime(1, 1, 1)
class tzinfo:
def __init__(self):
pass
def utcoffset(self, dt):
return timedelta()
def dst(self, dt):
return timedelta()
def tzname(self, dt):
return ''
def fromutc(self, dt):
self.utcoffset(dt)
self.dst(dt)
return datetime(0,0,0)
date.min = date (MINYEAR, 1, 1)
date.max = date (MAXYEAR, 12, 31)
date.resolution = timedelta(days=1)
datetime.min = datetime(MINYEAR, 1, 1, tzinfo=None)
datetime.max = datetime(MAXYEAR, 12, 31, 23, 59, 59, 999999, tzinfo=None)
datetime.resolution = timedelta(microseconds=1)
time.min = time(0, 0, 0, 0)
time.max = time(23, 59, 59, 999999)
time.resolution = timedelta(microseconds=1)
timedelta.min = timedelta(-999999999)
timedelta.max = timedelta(days=999999999, hours=23, minutes=59, seconds=59, microseconds=999999)
timedelta.resolution = timedelta(microseconds=1)
```
#### File: pyc/lib/getopt.py
```python
import os
class GetoptError(Exception):
def __init__(self, msg, opt=''):
self.msg = msg
self.opt = opt
class error(GetoptError):
pass
def getopt(args, shortopts, longopts = []):
return ([('',)], [''])
def gnu_getopt(args, shortopts, longopts = []):
return ([('',)], [''])
def do_longs(opts, opt, longopts, args):
return ([('',)], [''])
def long_has_args(opt, longopts):
return True, ''
def do_shorts(opts, optstring, shortopts, args):
return ([('',)], [''])
def short_has_arg(opt, shortopts):
return True
```
#### File: pyc/lib/socket.py
```python
SHUT_RD=0
SHUT_WR=1
SHUT_RDWR=2
SOL_IP=0
SOL_SOCKET=1
SO_REUSEADDR=2
AF_UNIX=1
AF_INET=2
IP_TOS=1
SOCK_STREAM=1
SOCK_DGRAM=2
SOMAXCONN=128
INADDR_ANY=0
INADDR_BROADCAST=0xffffffff
INADDR_NONE=0xffffffff
INADDR_LOOPBACK=0x7f000001
class error(Exception): pass
class herror(Exception): pass
class gaierror(Exception): pass
class timeout(Exception): pass
class socket(object):
def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0):
pass
def accept(self):
return (socket(), ('', 1) )
def fileno(self):
return 0
def listen(self, backlog):
return self
def shutdown(self, how):
return self
def close(self):
return self
# setblocking(0) == settimeout(0.0)
# setblocking(1) == settimeout(None)
def setblocking(self, flag):
return self
def settimeout(self, value):
return self
def gettimeout(self):
return 0.0
def setsockopt(self, level, optname, value):
return self
def getsockopt(self, level, optname, value=0):
return ''
def bind(self, address):
return self
def connect(self, address):
return self
def recv(self, bufsize, flags=0):
return ''
def send(self, string, flags=0):
return 0
def sendall(self, string, flags=0):
return None
def getsockname(self):
return ('', 0)
def getpeername(self):
return ('', 0)
def recvfrom(self, bufsize, flags=0):
return ('', ('', 0))
def sendto(self, bufsize, flags=0, address=0):
return 0
def getfqdn(host):
return ''
def gethostname():
return ''
def gethostbyname(hostname):
return ''
def ntohs(x):
return 0
def htons(x):
return 0
def ntohl(x):
return 0
def htonl(x):
return 0
def inet_aton(x):
return ''
def inet_ntoa(x):
return ''
def has_ipv6():
return False
def getdefaulttimeout():
return 0.0
def setdefaulttimeout(x):
return None
```
#### File: pyc/tests/scoping2.py
```python
x = 2
def f():
x = 3
def g():
global x
print x
x = 4
print x
g()
f()
print x
```
#### File: pyc/tests/t25.py
```python
def f():
return 1
f = lambda: 2
print f()
```
#### File: pyc/tests/t33.py
```python
def xbin(x): # return integer as string in binary
if x <= 0:
return "0"
else:
if (x&1 == 0):
s = "0"
else:
s = "1"
x = x >> 1
while (x > 0):
if (x&1 == 0):
s = "0" + s
else:
s = "1" + s
x = x >> 1
return s
print xbin(23)
```
#### File: pyc/tests/t4.py
```python
def hi(a, b, c):
if a < b < c:
print '< true'
else:
print '< false'
if a > b > c:
print '> true'
else:
print '> false'
hi(1, 2, 3)
hi(3, 2, 1)
hi(1, 2, 1)
hi(2, 1, 2)
```
#### File: pyc/tests/t6.py
```python
def hi():
print 'hi'
return 'ho'
print hi()
``` |
{
"source": "jplewa/qdk-python",
"score": 2
} |
#### File: aio/job/base_job.py
```python
import logging
from urllib.parse import urlparse
from typing import Any, Dict, TYPE_CHECKING
from urllib.parse import urlparse
from azure.storage.blob import BlobClient
from azure.quantum.aio.storage import upload_blob, download_blob, ContainerClient
from azure.quantum._client.models import JobDetails
from azure.quantum.job.job import BaseJob as SyncBaseJob
if TYPE_CHECKING:
from azure.quantum.aio.workspace import Workspace
logger = logging.getLogger(__name__)
DEFAULT_TIMEOUT = 300 # Default timeout for waiting for job to complete
class BaseJob(SyncBaseJob):
# Optionally override these to create a Provider-specific Job subclass
"""
Base job class with methods to create a job from raw blob data,
upload blob data and download results.
"""
@classmethod
async def from_input_data(
cls,
workspace: "Workspace",
name: str,
target: str,
input_data: bytes,
content_type: str,
blob_name: str = "inputData",
encoding: str = "",
job_id: str = None,
container_name: str = None,
provider_id: str = None,
input_data_format: str = None,
output_data_format: str = None,
input_params: Dict[str, Any] = None,
**kwargs
) -> "BaseJob":
"""Create a new Azure Quantum job based on a raw input_data payload.
:param workspace: Azure Quantum workspace to submit the input_data to
:type workspace: "Workspace"
:param name: Name of the job
:type name: str
:param target: Azure Quantum target
:type target: str
:param input_data: Raw input data to submit
:type input_data: bytes
:param blob_name: Input data blob name, defaults to "inputData"
:type blob_name: str
:param content_type: Content type, e.g. "application/json"
:type content_type: str
:param encoding: input_data encoding, e.g. "gzip", defaults to empty string
:type encoding: str
:param job_id: Job ID, defaults to None
:type job_id: str, optional
:param container_name: Container name, defaults to None
:type container_name: str
:param provider_id: Provider ID, defaults to None
:type provider_id: str, optional
:param input_data_format: Input data format, defaults to None
:type input_data_format: str, optional
:param output_data_format: Output data format, defaults to None
:type output_data_format: str, optional
:param input_params: Input parameters, defaults to None
:type input_params: Dict[str, Any], optional
:param input_params: Input params for job
:type input_params: Dict[str, Any]
:return: Azure Quantum Job
:rtype: Job
"""
# Generate job ID if not specified
if job_id is None:
job_id = cls.create_job_id()
# Create container if it does not yet exist
container_uri = await workspace.get_container_uri(
job_id=job_id,
container_name=container_name
)
logger.debug(f"Container URI: {container_uri}")
# Upload data to container
input_data_uri = await cls.upload_input_data(
container_uri=container_uri,
input_data=input_data,
content_type=content_type,
blob_name=blob_name,
encoding=encoding
)
# Create and submit job
return await cls.from_storage_uri(
workspace=workspace,
job_id=job_id,
target=target,
input_data_uri=input_data_uri,
container_uri=container_uri,
name=name,
input_data_format=input_data_format,
output_data_format=output_data_format,
provider_id=provider_id,
input_params=input_params,
**kwargs
)
@classmethod
async def from_storage_uri(
cls,
workspace: "Workspace",
name: str,
target: str,
input_data_uri: str,
provider_id: str,
input_data_format: str,
output_data_format: str,
container_uri: str = None,
job_id: str = None,
input_params: Dict[str, Any] = None,
submit_job: bool = True,
**kwargs
) -> "BaseJob":
"""Create new Job from URI if input data is already uploaded
to blob storage
:param workspace: Azure Quantum workspace to submit the blob to
:type workspace: "Workspace"
:param name: Job name
:type name: str
:param target: Azure Quantum target
:type target: str
:param input_data_uri: Input data URI
:type input_data_uri: str
:param provider_id: Provider ID
:type provider_id: str, optional
:param input_data_format: Input data format
:type input_data_format: str, optional
:param output_data_format: Output data format
:type output_data_format: str, optional
:param container_uri: Container URI, defaults to None
:type container_uri: str
:param job_id: Pre-generated job ID, defaults to None
:type job_id: str
:param input_params: Input parameters, defaults to None
:type input_params: Dict[str, Any], optional
:param submit_job: If job should be submitted to the service, defaults to True
:type submit_job: bool
:return: Job instance
:rtype: Job
"""
# Generate job_id, input_params, data formats and provider ID if not specified
if job_id is None:
job_id = cls.create_job_id()
if input_params is None:
input_params = {}
# Create container for output data if not specified
if container_uri is None:
container_uri = await workspace.get_container_uri(job_id=job_id)
# Create job details and return Job
details = JobDetails(
id=job_id,
name=name,
container_uri=container_uri,
input_data_format=input_data_format,
output_data_format=output_data_format,
input_data_uri=input_data_uri,
provider_id=provider_id,
target=target,
input_params=input_params,
**kwargs
)
job = cls(workspace, details, **kwargs)
logger.info(
f"Submitting problem '{name}'. \
Using payload from: '{job.details.input_data_uri}'"
)
if submit_job:
logger.debug(f"==> submitting: {job.details}")
await job.submit()
return job
@staticmethod
async def upload_input_data(
container_uri: str,
input_data: bytes,
content_type: str,
blob_name: str = "inputData",
encoding: str = "",
return_sas_token: bool = False
) -> str:
"""Upload input data file
:param container_uri: Container URI
:type container_uri: str
:param input_data: Input data in binary format
:type input_data: bytes
:param content_type: Content type, e.g. "application/json"
:type content_type: str
:param blob_name: Blob name, defaults to "inputData"
:type blob_name: str, optional
:param encoding: Encoding, e.g. "gzip", defaults to ""
:type encoding: str, optional
:param return_sas_token: Flag to return SAS token as part of URI, defaults to False
:type return_sas_token: bool, optional
:return: Uploaded data URI
:rtype: str
"""
container_client = ContainerClient.from_container_url(
container_uri
)
uploaded_blob_uri = await upload_blob(
container_client,
blob_name,
content_type,
encoding,
input_data,
return_sas_token=return_sas_token
)
await container_client.close()
return uploaded_blob_uri
async def download_data(self, blob_uri: str) -> dict:
"""Download file from blob uri
:param blob_uri: Blob URI
:type blob_uri: str
:return: Payload from blob
:rtype: dict
"""
url = urlparse(blob_uri)
if url.query.find("se=") == -1:
# blob_uri does not contains SAS token,
# get sas url from service
blob_client = BlobClient.from_blob_url(
blob_uri
)
blob_uri = self.workspace._get_linked_storage_sas_uri(
blob_client.container_name, blob_client.blob_name
)
payload = await download_blob(blob_uri)
await blob_client.close()
else:
# blob_uri contains SAS token, use it
payload = await download_blob(blob_uri)
return payload
```
#### File: quantum/cirq/job.py
```python
from typing import TYPE_CHECKING, Dict, Sequence
if TYPE_CHECKING:
import cirq
from azure.quantum import Job as AzureJob
class Job:
"""
Thin wrapper around an Azure Quantum Job that supports
returning results in Cirq format.
"""
def __init__(
self,
azure_job: "AzureJob",
program: "cirq.Circuit",
measurement_dict: dict = None
):
"""Construct a Job."""
self._azure_job = azure_job
self._program = program
self._measurement_dict = measurement_dict
def job_id(self) -> str:
"""Returns the job id (UID) for the job."""
return self._azure_job.id
def status(self) -> str:
"""Gets the current status of the job."""
self._azure_job.refresh()
return self._azure_job.details.status
def target(self) -> str:
"""Returns the target where the job was run."""
return self._azure_job.details.target
def name(self) -> str:
"""Returns the name of the job which was supplied during job creation."""
return self._azure_job.details.name
def num_qubits(self) -> int:
"""Returns the number of qubits for the job."""
return self._azure_job.details.metadata["qubits"]
def repetitions(self) -> int:
"""Returns the number of repetitions for the job."""
return self._azure_job.details.metadata["repetitions"]
def measurement_dict(self) -> Dict[str, Sequence[int]]:
"""Returns a dictionary of measurement keys to target qubit index."""
if self._measurement_dict is None:
from cirq import MeasurementGate
measurements = [op for op in self._program.all_operations() if isinstance(op.gate, MeasurementGate)]
self._measurement_dict = {
meas.gate.key: [q.x for q in meas.qubits] for meas in measurements
}
return self._measurement_dict
def results(self, timeout_seconds: int = 7200) -> "cirq.Result":
"""Poll the Azure Quantum API for results."""
return self._azure_job.get_results(timeout_secs=timeout_seconds)
def cancel(self):
"""Cancel the given job."""
self._azure_job.workspace.cancel_job(self._azure_job)
def delete(self):
"""Delete the given job."""
self._azure_job.workspace.cancel_job(self._azure_job)
def __str__(self) -> str:
return f'azure.quantum.cirq.Job(job_id={self.job_id()})'
```
#### File: tests/unit/test_streaming_problem.py
```python
import unittest
import json
from typing import List
import pytest
from azure.quantum.optimization import (
StreamingProblem,
Problem,
ProblemType,
Term,
)
from azure.quantum.storage import download_blob
from common import QuantumTestBase
class TestStreamingProblem(QuantumTestBase):
def __test_upload_problem(
self,
count: int,
terms_thresh: int,
size_thresh: int,
compress: bool,
problem_type: ProblemType = ProblemType.ising,
initial_terms: List[Term] = [],
**kwargs
):
if not (self.in_recording or self.is_live):
# Temporarily disabling this test in playback mode
# due to multiple calls to the storage API
# that need to have a request id to distinguish
# them while playing back
print("Skipping this test in playback mode")
return
ws = self.create_workspace()
sProblem = StreamingProblem(
ws, name="test", problem_type=problem_type, terms=initial_terms
)
rProblem = Problem(
"test", problem_type=problem_type, terms=initial_terms
)
sProblem.upload_terms_threshold = terms_thresh
sProblem.upload_size_threshold = size_thresh
sProblem.compress = compress
for i in range(count):
sProblem.add_term(c=i, indices=[i, i + 1])
rProblem.add_term(c=i, indices=[i, i + 1])
self.assertEqual(problem_type, sProblem.problem_type)
self.assertEqual(problem_type.name, sProblem.stats["type"])
self.assertEqual(
count + len(initial_terms), sProblem.stats["num_terms"]
)
self.assertEqual(
self.__kwarg_or_value(kwargs, "avg_coupling", 2),
sProblem.stats["avg_coupling"],
)
self.assertEqual(
self.__kwarg_or_value(kwargs, "max_coupling", 2),
sProblem.stats["max_coupling"],
)
self.assertEqual(
self.__kwarg_or_value(kwargs, "min_coupling", 2),
sProblem.stats["min_coupling"],
)
uri = sProblem.upload(ws)
uploaded = json.loads(sProblem.download().serialize())
local = json.loads(rProblem.serialize())
self.assertEqual(uploaded, local)
def __kwarg_or_value(self, kwarg, name, default):
if name in kwarg:
return kwarg[name]
return default
@pytest.mark.live_test
def test_streaming_problem_small_chunks(self):
self.__test_upload_problem(4, 1, 1, False)
@pytest.mark.live_test
def test_streaming_problem_large_chunks(self):
self.__test_upload_problem(4, 1000, 10e6, False)
@pytest.mark.live_test
def test_streaming_problem_small_chunks_compressed(self):
self.__test_upload_problem(4, 1, 1, True)
@pytest.mark.live_test
def test_streaming_problem_large_chunks_compressed(self):
self.__test_upload_problem(4, 1000, 10e6, True)
@pytest.mark.live_test
def test_streaming_problem_pubo(self):
self.__test_upload_problem(4, 1, 1, False, ProblemType.pubo)
@pytest.mark.live_test
def test_streaming_problem_initial_terms(self):
self.__test_upload_problem(
4,
1,
1,
False,
initial_terms=[
Term(w=10, indices=[0, 1, 2]),
Term(w=20, indices=[1, 2, 3]),
],
avg_coupling=(4 * 2 + 6) / 6,
max_coupling=3,
)
def check_all(self):
self.test_streaming_problem_small_chunks()
self.test_streaming_problem_large_chunks()
self.test_streaming_problem_small_chunks_compressed()
self.test_streaming_problem_large_chunks_compressed()
self.test_streaming_problem_pubo()
self.test_streaming_problem_initial_terms()
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jplewa/workflow-scheduling-problem-qiskit",
"score": 2
} |
#### File: jplewa/workflow-scheduling-problem-qiskit/plot_utils.py
```python
import json
import matplotlib.pyplot as plt
import numpy as np
from qiskit.quantum_info import Pauli
import matplotlib.patches as mpatches
GREEN = '#4a804f'
RED = '#db4059'
optimal = "0000010101"
correct_solutions = ['0110100011', '0000010101', '0101101010', '0001110001', '0011111000', '0100001110', '0010011100']
font = {'size': 50}
SIZE = 100
def array_contains(h, v):
for i in range(len(h)):
if h[i] == True and v[i] == 0:
return False
return True
def get_energy(key, d):
if key in d:
return d[key]
else:
return 0
def is_correct(key):
if key in correct_solutions:
return True
return False
def is_ideal(key):
if key == optimal:
return True
return False
def compute_time(k):
return int(k[0]) + int(k[1]) * 2 + int(k[2]) * 4 + int(k[3]) * 8 + int(k[4])* 4 + int(k[5]) + int(k[6]) * 2 + int(k[7]) * 12 + int(k[8]) * 3 + int(k[9]) * 6
def draw_probability_diagram(filename, filenameEnergy, title):
x = []
y = []
index = 0
corrects_index = []
correct_res_number = 0
incorrect_res_number = 0
ideal = 0
with open(filenameEnergy) as handle:
ennergies_dict = json.loads(handle.read())
min_val = min(ennergies_dict.values())
max_val = max(ennergies_dict.values())
all_res_number = 0
with open(filename) as handle:
results = json.loads(handle.read())
for key, val in results.items():
all_res_number += int(val)
print('all results: ', all_res_number)
for key, val in results.items():
energy = get_energy(key, ennergies_dict)
x.append(energy.real)
if is_correct(key):
corrects_index.append(index)
y.append(val/all_res_number)
correct_res_number += val
else:
y.append(val/all_res_number)
incorrect_res_number += val
index += 1
if optimal in results:
ideal = results[optimal]
print("optimal: ", ideal)
print('correct res:', correct_res_number)
print('incorrect res:', incorrect_res_number)
print('correct configurations:', len(corrects_index))
print('incorrect configurations:', index - len(corrects_index))
x_label = 'energy'
y_label = 'Probability'
width = (abs(min_val) + abs(max_val)) * 0.01
fig, ax = plt.subplots(figsize=(40,20))
barlist = plt.bar(x, y, color=RED, width=width)
for i in corrects_index:
barlist[i].set_color(GREEN)
barlist[i].set_width(width)
ax.set_xlabel(x_label, fontdict=font)
ax.set_ylabel(y_label, fontdict=font)
ax.tick_params(axis='y', labelsize='50')
ax.tick_params(axis='x', labelsize='40', labelrotation=90)
red_patch = mpatches.Patch(color='r', label='incorrect solutions')
g_patch = mpatches.Patch(color='g', label='correct solutions')
plt.xlim(min_val * 1.1 , max_val* 1.1)
plt.legend(handles=[red_patch, g_patch], fontsize=50)
ax.set_title(title, fontdict={'fontsize': SIZE, 'fontweight': 'medium'})
fig.tight_layout()
def plot_histogram(filename):
"""Exact results in histogram form. """
with open(filename) as handle:
dict_res = json.loads(handle.read())
keys = []
values = []
index = 0
corrects_index = []
for key, val in dict_res.items():
keys.append(key[::-1])
values.append(int(val))
if is_correct(key):
corrects_index.append(index)
index += 1
x_label = 'Result'
y_label = 'Number of shots'
fig, ax = plt.subplots(figsize=(40,20))
ax.set_xlabel(x_label, fontdict=font)
plt.bar(keys, values)
barlist = plt.bar(keys, values, color=RED)
for i in corrects_index:
barlist[i].set_color(GREEN)
ax.set_ylabel(y_label, fontdict=font)
ax.tick_params(axis='y', labelsize='50')
ax.tick_params(axis='x', labelsize='40', labelrotation=90)
fig.tight_layout()
# when the results differs not possible to mark all results, only results with number of shots grater than b are presented.
def plot_histogram_with_different_res(filename, b):
with open(filename) as handle:
dict_res = json.loads(handle.read())
keys = []
values = []
index = 0
corrects_index = []
for key, val in dict_res.items():
if int(val) > b:
keys.append(key[::-1])
values.append(int(val))
if is_correct(key):
corrects_index.append(index)
index += 1
x_label = 'result'
y_label = 'Number of shots'
fig, ax1 = plt.subplots(figsize=(40,20))
ax1.set_xlabel(x_label, fontdict=font)
plt.bar(keys, values)
barlist = plt.bar(keys, values, color=RED)
for i in corrects_index:
barlist[i].set_color(GREEN)
ax1.set_ylabel(y_label, fontdict=font)
ax1.tick_params(axis='y', labelsize='50')
ax1.tick_params(axis='x', labelsize='40', labelrotation=90)
fig.tight_layout() # otherwise the right y-label is slightly clipped
# draw all energy values for all eigenvalues for the hamiltonian
def draw_energy_diagram(filename):
x = []
y = []
index = 0
corrects_index = []
x1 = []
x2 = []
y1 = []
y2 = []
with open(filename) as handle:
ennergies_dict = json.loads(handle.read())
min_val = min(ennergies_dict.values())
max_val = max(ennergies_dict.values())
for key, val in ennergies_dict.items():
energy = get_energy(key, ennergies_dict)
x.append(energy.real)
if is_correct(key):
x1.append(energy.real)
y1.append(1)
corrects_index.append(index)
y.append(1)
else:
x2.append(energy.real)
y2.append(1)
y.append(0.5)
index += 1
print('correct:', len(corrects_index))
print('incorrect:', index - len(corrects_index))
width = (abs(min_val) + abs(max_val)) * 0.001
x_label = 'energy'
y_label = ''
fig, ax = plt.subplots(figsize=(40,20))
ax.set_xlabel(x_label, fontdict=font)
barlist = plt.bar(x1, y1, color=GREEN,width=width)
barlist2= plt.bar(x2, y2, color=RED, width=width)
ax.tick_params(axis='x', labelsize='40', labelrotation=90)
ax.axes.get_yaxis().set_visible(False)
red_patch = mpatches.Patch(color='r', label='incorrect solutions')
g_patch = mpatches.Patch(color='g', label='correct solutions')
plt.legend(handles=[red_patch, g_patch], fontsize=50)
fig.tight_layout()
``` |
{
"source": "jplind79/rcat",
"score": 3
} |
#### File: rcat/plot/colors.py
```python
import sys
from matplotlib import colors
import matplotlib.pyplot as plt
import palettable
# Self-produced colormaps
#
prct_diff = ["#006400", "#3CB371", "#8220F0", "#000096", "#0000CD", "#4169E1",
"#1E90FF", "#00BFFF", "#A0D2FF", "#D2F5FF", "#FFFFC8", "#FFE132",
"#FFAA00", "#FF6E00", "#FF0000", "#C80000", "#A02323", "#FF69B4",
"#E4BD9A", "#BD885A"]
prct_diff = colors.ListedColormap(prct_diff)
prct_diff.set_over("#a14f07")
prct_diff.set_under("#42f942")
prct_diff_r = colors.ListedColormap(prct_diff.colors[::-1])
myWhYlOr = ["#DFEDED", "#E9F0D2", "#F0F2B7", "#F6F59A", "#FAF87C", "#FDFB5A",
"#FEFE29", "#FFF500", "#FFE700", "#FFD900", "#FFCB00", "#FFBD00",
"#FFAE00", "#FF9F00", "#FF8F00", "#FF7D00", "#FF6B00",
"#FF5500", "#FF3A00", "#FF0000"]
prec_diff = ["#8B2500", "#983900", "#A64D00", "#B46100", "#C27500", "#CF8B0D",
"#DAA543", "#E4BE78", "#EFD8AE", "#F9F2E4", "#E4EEE4", "#AECEAE",
"#78AD78", "#438C43", "#0D6C0D", "#285428", "#5D3F5D", "#932A93",
"#C915C9", "#FF00FF"]
prec_diff = colors.ListedColormap(prec_diff)
prec_diff.set_over("#fe8181")
prec_diff.set_under("#e0301e")
prec_diff_wzero = ["#8B2500", "#983900", "#A64D00", "#B46100", "#C27500",
"#CF8B0D", "#DAA543", "#E4BE78", "#EFD8AE", "#F9F2E4",
"#FFFFFF", "#FFFFFF", "#E4EEE4", "#AECEAE",
"#78AD78", "#438C43", "#0D6C0D", "#285428", "#5D3F5D",
"#5B7AD7", "#2D50B9", "#233E90", "#142352"]
prec_diff_wzero = colors.ListedColormap(prec_diff_wzero)
prec_diff_wzero.set_over("#142352")
prec_diff_wzero.set_under("#e0301e")
myGrBuPu_long = ["White", "#edfac2", "#cdffcd", "#99f0b2", "#53bd9f",
"#32a696", "#3296b4", "#0570b0", "#05508c", "#0a1f96",
"#2c0246", "#6a2c5a"]
myGrBuPu_long = colors.ListedColormap(myGrBuPu_long)
myGrBuPu_long.set_over("#ff00ff")
myGrBuPu_long.set_under("White")
myGrBuPu = ["#edfac2", "#cdffcd", "#99f0b2", "#53bd9f", "#32a696", "#3296b4",
"#0570b0", "#05508c", "#0a1f96", "#2c0246", "#6a2c5a", "#ff99ac",
"#ff4d6e", "#ffcccc", "#ffffcc"]
myGrBuPu = colors.ListedColormap(myGrBuPu)
myGrBuPu.set_over("#f7f056")
myGrBuPu.set_under("White")
topography = ["#79B2DE", "#ACD0A5", "#94BF8B", "#A8C68F", "#BDCC96",
"#D1D7AB", "#E1E4B5", "#EFEBC0", "#E8E1B6", "#DED6A3",
"#D3CA9D", "#CAB982", "#C3A76B", "#B9985A", "#AA8753",
"#AC9A7C", "#BAAE9A", "#CAC3B8", "#E0DED8"]
# Palettable colors (more info: https://jiffyclub.github.io/palettable/)
#
# Greens
greens_seq = palettable.colorbrewer.get_map('Greens',
'sequential', 8).mpl_colormap
# Greys
greys_seq = palettable.colorbrewer.get_map('Greys',
'sequential', 8).mpl_colormap
# Diverging brown-blue-green
BrBg_div = palettable.colorbrewer.get_map('BrBg', 'diverging', 9).mpl_colormap
# ??
unknown = ["#DFEDED", "#E9F0D2", "#F0F2B7", "#F6F59A", "#FAF87C", "#FDFB5A",
"#FEFE29", "#FFF500", "#FFE700", "#FFD900", "#FFCB00", "#FFBD00",
"#FFAE00", "#FF9F00", "#FF8F00", "#FF7D00", "#FF6B00", "#FF5500",
"#FF3A00", "#FF0000"]
unknown = colors.ListedColormap(unknown)
# Good for line plots
set3 = palettable.colorbrewer.get_map('Set3', 'qualitative', 11).mpl_colors
set2 = palettable.colorbrewer.get_map('Set2', 'qualitative', 8).mpl_colors
set1 = palettable.colorbrewer.get_map('Set1', 'qualitative', 9).mpl_colors
# Misc
almost_black = '#262626'
magenta = "#FF00FF"
green = "#00EE00"
color_dict =\
{
'topography': topography,
'prec_diff': prec_diff,
'prec_diff_wzero': prec_diff_wzero,
'myGrBuPu': myGrBuPu,
'myGrBuPu_long': myGrBuPu_long,
'prct_diff': prct_diff,
'prct_diff_r': prct_diff_r,
'myWhYlOr': myWhYlOr,
'set1': set1,
'set2': set2,
'set3': set3,
'unknown': unknown
}
single_color_dict = \
{
'green': green,
'magenta': magenta,
'almost_black': almost_black
}
def getcolormap(cmap_name, custom=False):
'''
Function to retrieve colormap, either customized (custom=True) or available
through Matplotlib predefined colormaps.
Parameters
----------
cmap_name: string
String, giving name of colormap to be retrieved.
custom: Boolean
Logical indicating self-produced (custom=True) or Matplotlib
colormap.
Returns
-------
cmap: Matplotlib colormap object
'''
if custom:
cmap = color_dict[cmap_name]
else:
msg = ("Error retrieving colormap: {}.\nMake sure it exists in "
"Matplotlib's predefined colormaps, or change "
"accordingly.").format(cmap_name)
try:
cmap = plt.cm.get_cmap(cmap_name)
except ValueError:
print(msg)
sys.exit
return cmap
def getsinglecolor(color_name):
'''
Function to retrieve single custom color.
Parameters
----------
color_name: string
String, giving name of color to be retrieved.
Returns
-------
color: Matplotlib color object
'''
try:
color = single_color_dict[color_name]
except ValueError:
print("Color {} does not exist in single color dictionary.".
format(color))
sys.exit
return color
def norm_colors(bounds, ncolors, clip=False):
"""
In addition to min and max of levels, this function takes as arguments
boundaries between which data is to be mapped. The colors are then
linearly distributed between these 'bounds'.
"""
return palettable.palette.BoundaryNorm(boundaries=bounds, ncolors=ncolors,
clip=clip)
``` |
{
"source": "jplindquist/Projects",
"score": 5
} |
#### File: Projects/Numbers/fibonacci.py
```python
def fibonnaciSequence(n):
assert n > 0
sequence = [1] # Initialize sequence to 1
while len(sequence) < n:
if len(sequence) == 1:
# If the length of sequence is 1, append 1 to the end so the series is 1, 1 so far
sequence.append(1)
else:
# Add the previous 2 numbers in the series, and append it to the list
sequence.append(sequence[-1] + sequence[-2])
for i in range(len(sequence)):
# Convert the numbers to strings
sequence[i] = str(sequence[i])
# Return the sequence, separated by commas
return (', '.join(sequence))
def main():
n = raw_input('How many numbers do you need? ')
try:
n = int(n)
except ValueError:
print "Invalid input. Exiting..."
exit(0)
if n > 0:
print fibonnaciSequence(n)
else:
print "You must enter a number larger than 0. Try again..."
main()
if __name__ == "__main__":
main()
``` |
{
"source": "JPlin/pyhowfar",
"score": 2
} |
#### File: pyhowfar/datasets/WFLW.py
```python
from __future__ import print_function
import os
import numpy as np
import random
import math
from skimage import io
import torch
import torch.utils.data as data
# from utils.utils import *
from utils.imutils import *
from utils.transforms import *
from utils.osutils import *
class WFLW(data.Dataset):
def __init__(self,
args,
split,
img_folder='/mnt/d1p8/ming/FaceData/WFLW/WFLW_align',
resize_size=256):
self.nParts = 98
self.pointType = args.pointType
# self.anno = anno
self.img_folder = img_folder
self.split = split
self.is_train = True if self.split == 'train' else False
self.anno = self._getDataFaces(self.is_train)
self.total = len(self.anno)
self.scale_factor = args.scale_factor
self.rot_factor = args.rot_factor
self.resize_size = resize_size
self.mean = [0.5, 0.5, 0.5]
self.std = [0.5, 0.5, 0.5]
def _getDataFaces(self, is_train):
files = os.listdir(self.img_folder)
lines = []
for d in files:
if d.endswith('.jpg') or d.endswith('.png'):
lines.append(os.path.join(self.img_folder, d))
split_point = int(len(lines) * 0.95)
if is_train:
print('=> loaded wflwtrain set, {} images were found'.format(
split_point))
return lines[:split_point]
else:
print('=> loaded wflw validation set, {} images were found'.format(
len(lines) - split_point))
return lines[split_point:]
def __len__(self):
return self.total
def __getitem__(self, index):
inp, out, pts, c, s = self.generateSampleFace(index)
self.pts, self.c, self.s = pts, c, s
if self.is_train:
return inp, out
else:
meta = {
'index': index,
'center': c,
'scale': s,
'pts': pts,
}
return inp, out, meta
def generateSampleFace(self, idx):
sf = self.scale_factor
rf = self.rot_factor
pts = read_npy(
os.path.join(
self.img_folder, self.anno[idx].replace('.jpg',
'.npy').replace(
'.png', '.npy')))
c = torch.Tensor((256 / 2, 256 / 2))
s = 1.0
img = load_image(os.path.join(self.img_folder, self.anno[idx]))
# rescale image
img = resize(img, self.resize_size, self.resize_size)
pts = torch.Tensor(pts)
r = 0
if self.is_train:
# s = s * torch.randn(1).mul_(sf).add_(1).clamp(1 - sf, 1 + sf)[0]
r = torch.randn(1).mul_(rf).clamp(
-2 * rf, 2 * rf)[0] if random.random() <= 0.6 else 0
if random.random() <= 0.5:
img = torch.from_numpy(fliplr(img.numpy())).float()
pts = shufflelr(pts, width=img.size(2), dataset='WFLW')
c[0] = img.size(2) - c[0]
# add random color disturb
img[0, :, :].mul_(random.uniform(0.7, 1.3)).clamp_(0, 1)
img[1, :, :].mul_(random.uniform(0.7, 1.3)).clamp_(0, 1)
img[2, :, :].mul_(random.uniform(0.7, 1.3)).clamp_(0, 1)
inp = crop(img, c, s, [256, 256], rot=r)
# inp = img
inp = color_normalize(inp, self.mean, self.std)
tpts = pts.clone()
out = torch.zeros(self.nParts, 64, 64)
for i in range(self.nParts):
if tpts[i, 0] > 0:
if r != 0:
tpts[i, 0:2] = to_torch(
sk_transform(
tpts[i, 0:2] + 1,
c,
s, [256, 256],
invert=0,
rot=-r))
else:
tpts[i, 0:2] = tpts[i, 0:2] + 1
tpts[i] = tpts[i] / 4.0
out[i] = draw_labelmap(out[i], tpts[i] - 1, sigma=1)
return inp, out, tpts * 4.0, c, s
if __name__ == "__main__":
import opts, demo
args = opts.argparser()
dataset = WFLW(args, 'test')
crop_win = None
for i in range(dataset.__len__()):
input, target, meta = dataset.__getitem__(i)
input = input.numpy().transpose(1, 2, 0) * 255.
target = target.numpy()
if crop_win is None:
crop_win = plt.imshow(input)
else:
crop_win.set_data(input)
plt.pause(0.5)
plt.draw
```
#### File: pyhowfar/utils/transforms.py
```python
from __future__ import absolute_import
import os
import matplotlib.pyplot as plt
import numpy as np
import scipy.misc
import torch
from skimage import transform as T
from .imutils import *
from .misc import *
def color_normalize(x, mean, std):
if x.size(0) == 1:
x = x.repeat(3, x.size(1), x.size(2))
for t, m, s in zip(x, mean, std):
t.sub_(m).div_(s)
return x
def color_denormalize(x, mean, std):
if x.size(0) == 1:
x = x.repeat(3, x.size(1), x.size(2))
for t, m, s in zip(x, mean, std):
t.mul_(s).add_(m)
return x
def flip_back(flip_output, dataset='mpii'):
"""
flip output map
"""
if dataset == 'mpii':
matchedParts = ([0, 5], [1, 4], [2, 3], [10, 15], [11, 14], [12, 13])
elif dataset == 'cari_align':
matchedParts = ([27 , 29],[0 , 5] , [1 , 4] , [2 , 3] , [7 , 12], [6 , 13], [10 ,15] , [16 , 17] , [9 , 14] , [8 ,11] , [18 , 19] , [20 , 22] , [23 , 25] , [62 , 30] , [61 , 31] , [60 , 32], [59 , 33], [58 , 34], [57 , 35], [56 , 36], [55 , 37] , [54 , 38], [53, 39] , [52 , 40] , [51 , 41] , [50 , 42], [49 , 43] , [48 , 44] , [47 , 45])
elif dataset == 'WFLW':
matchedParts = ([0 , 32], [1 , 31] , [2 , 30] , [3 , 29] , [4, 28] , [5 , 27] , [6 , 26] , [7 , 25] , [8 , 24] , [9 , 23] , [10 , 22] , [11 , 21], [12 , 20] , [13 , 19] , [14 , 18] , [15 , 17] , [33 , 46] , [34 , 45] , [35 , 44], [36 , 43],[37 , 42], [38 , 50] , [39 , 49] , [40 , 48] , [41 , 47] , [60 , 72] , [61 , 71] , [62 , 70] , [63 , 69] , [64 , 68] , [65 , 75] , [66 , 74] , [67 , 73] , [56 , 58] , [55 , 59] , [76 , 82] , [88 , 92] , [77 , 81] , [78 , 80] , [87 , 83] , [86 , 84], [89 , 91], [95 , 93] , [96 , 97])
else:
print('Not supported dataset: ' + dataset)
# flip output horizontally
flip_output = fliplr(flip_output.numpy())
# Change left-right parts
for pair in matchedParts:
tmp = np.copy(flip_output[:, pair[0], :, :])
flip_output[:, pair[0], :, :] = flip_output[:, pair[1], :, :]
flip_output[:, pair[1], :, :] = tmp
return torch.from_numpy(flip_output).float()
def shufflelr(x, width, dataset='mpii'):
"""
flip coords
"""
if dataset == 'mpii':
matchedParts = ([0, 5], [1, 4], [2, 3], [10, 15], [11, 14], [12, 13])
elif dataset in ['w300lp', 'vw300', 'w300', 'menpo']:
matchedParts = ([0, 16], [1, 15], [2, 14], [3, 13], [4, 12], [5, 11], [6, 10], [7, 9],
[17, 26], [18, 25], [19, 26], [20, 23], [21, 22], [36, 45], [37, 44],
[38, 43], [39, 42], [41, 46], [40, 47], [31, 35], [32, 34], [50, 52],
[49, 53], [48, 54], [61, 63], [62, 64], [67, 65], [59, 55], [58, 56])
elif dataset == 'WFLW':
matchedParts = ([0, 32], [1, 31], [2, 30], [3, 29], [4, 28], [5, 27], [
6, 26
], [7, 25], [8, 24], [9, 23], [10, 22], [11, 21], [12, 20], [13, 19], [
14, 18
], [15, 17], [33, 46], [34, 45], [35, 44], [36, 43], [37, 42], [
38, 50
], [39, 49], [40, 48], [41, 47], [60, 72], [61, 71], [62, 70], [
63, 69
], [64, 68], [65, 75], [66, 74], [67, 73], [56, 58], [55, 59],
[76, 82], [88, 92], [77, 81], [78, 80], [87, 83],
[86, 84], [89, 91], [95, 93], [96, 97])
elif dataset == 'cari_align':
matchedParts = ([27 , 29],[0 , 5] , [1 , 4] , [2 , 3] , [7 , 12], [6 , 13], [10 ,15] , [16 , 17] , [9 , 14] , [8 ,11] , [18 , 19] , [20 , 22] , [23 , 25] , [62 , 30] , [61 , 31] , [60 , 32], [59 , 33], [58 , 34], [57 , 35], [56 , 36], [55 , 37] , [54 , 38], [53, 39] , [52 , 40] , [51 , 41] , [50 , 42], [49 , 43] , [48 , 44] , [47 , 45])
else:
print('Not supported dataset: ' + dataset)
# Flip horizontal
x[:, 0] = width - x[:, 0]
# Change left-right parts
for pair in matchedParts:
tmp = x[pair[0], :].clone()
x[pair[0], :] = x[pair[1], :]
x[pair[1], :] = tmp
return x
def fliplr(x):
if x.ndim == 3:
x = np.transpose(np.fliplr(np.transpose(x, (0, 2, 1))), (0, 2, 1))
elif x.ndim == 4:
for i in range(x.shape[0]):
x[i] = np.transpose(np.fliplr(np.transpose(x[i], (0, 2, 1))), (0, 2, 1))
return x.astype(float)
def get_transform(center, scale, res, rot=0):
"""
General image processing functions
"""
# Generate transformation matrix
h = 200 * scale
t = np.zeros((3, 3))
t[0, 0] = float(res[1]) / h
t[1, 1] = float(res[0]) / h
t[0, 2] = res[1] * (-center[0] / h + .5)
t[1, 2] = res[0] * (-center[1] / h + .5)
# t[0, 2] = res[1] * (-float(center[0]) / h + .5)
# t[1, 2] = res[0] * (-float(center[1]) / h + .5)
t[2, 2] = 1
if not rot == 0:
rot = -rot # To match direction of rotation from cropping
rot_mat = np.zeros((3, 3))
rot_rad = rot * np.pi / 180
sn, cs = np.sin(rot_rad), np.cos(rot_rad)
rot_mat[0, :2] = [cs, -sn]
rot_mat[1, :2] = [sn, cs]
rot_mat[2, 2] = 1
# Need to rotate around center
t_mat = np.eye(3)
t_mat[0, 2] = -res[1] / 2
t_mat[1, 2] = -res[0] / 2
t_inv = t_mat.copy()
t_inv[:2, 2] *= -1
t = np.dot(t_inv, np.dot(rot_mat, np.dot(t_mat, t)))
return t
def transform(pt, center, scale, res, invert=0, rot=0):
# Transform pixel location to different reference
t = get_transform(center, scale, res, rot=rot)
if invert:
t = np.linalg.inv(t)
new_pt = np.array([pt[0] - 1, pt[1] - 1, 1.]).T
new_pt = np.dot(t, new_pt)
return new_pt[:2].astype(int) + 1
def sk_transform(pt, center, scale, res, invert=0, rot=0):
rot = rot * np.pi / 180
x0, y0 = res[0] / 2, res[0] / 2
ret_pt = np.zeros_like(pt)
ret_pt[0] = ((pt[0] - x0) * np.cos(rot)) - ((pt[1] - y0) * np.sin(rot)) + x0
ret_pt[1] = ((pt[0] - x0) * np.sin(rot)) + ((pt[1] - y0) * np.cos(rot)) + y0
return ret_pt
def transform_preds(coords, center, scale, res):
# size = coords.size()
# coords = coords.view(-1, coords.size(-1))
# print(coords.size())
for p in range(coords.size(0)):
coords[p, 0:2] = to_torch(transform(coords[p, 0:2], center, scale, res, 1, 0))
return coords
def crop(img, center, scale, res, rot=0):
img = im_to_numpy(img)
# Preprocessing for efficient cropping
ht, wd = img.shape[0], img.shape[1]
sf = scale * 200.0 / res[0]
if sf < 2:
sf = 1
else:
new_size = int(np.math.floor(max(ht, wd) / sf))
new_ht = int(np.math.floor(ht / sf))
new_wd = int(np.math.floor(wd / sf))
if new_size < 2:
return torch.zeros(res[0], res[1], img.shape[2]) \
if len(img.shape) > 2 else torch.zeros(res[0], res[1])
else:
img = scipy.misc.imresize(img, [new_ht, new_wd])
center = center * 1. / sf
scale = scale / sf
# Upper left point
# ul = np.array(transform([0, 0], center, scale, res, invert=1))
# Bottom right point
# br = np.array(transform(res, center, scale, res, invert=1))
ul = np.array([0, 0])
br = np.array([255, 255])
# Padding so that when rotated proper amount of context is included
pad = int(np.linalg.norm(br - ul) / 2 - float(br[1] - ul[1]) / 2)
if not rot == 0:
ul -= pad
br += pad
new_shape = [br[1] - ul[1], br[0] - ul[0]]
if len(img.shape) > 2:
new_shape += [img.shape[2]]
new_img = np.zeros(new_shape)
# Range to fill new array
new_x = max(0, -ul[0]), min(br[0], len(img[0])) - ul[0]
new_y = max(0, -ul[1]), min(br[1], len(img)) - ul[1]
# Range to sample from original image
old_x = max(0, ul[0]), min(len(img[0]), br[0])
old_y = max(0, ul[1]), min(len(img), br[1])
new_img[new_y[0]:new_y[1], new_x[0]:new_x[1]] = img[old_y[0]:old_y[1], old_x[0]:old_x[1]]
if not rot == 0:
# Remove padding
new_img = scipy.misc.imrotate(new_img, rot)
new_img = new_img[pad:-pad, pad:-pad]
new_img = im_to_torch(scipy.misc.imresize(new_img, res))
return new_img
``` |
{
"source": "jplippi/neo3-boa",
"score": 3
} |
#### File: neo3-boa/boa3/cli.py
```python
import argparse
import os
import sys
from boa3.boa3 import Boa3
def main():
parser = argparse.ArgumentParser()
parser.add_argument("input", help=".py smart contract to compile")
args = parser.parse_args()
if not args.input.endswith(".py") or not os.path.isfile(args.input):
print("Input file is not .py")
sys.exit(1)
fullpath = os.path.realpath(args.input)
path, filename = os.path.split(fullpath)
try:
Boa3.compile_and_save(args.input)
print(f"Wrote {filename.replace('.py', '.nef')} to {path}")
except Exception as e:
print(e)
if __name__ == "__main__":
main()
```
#### File: boa3/compiler/codegeneratorvisitor.py
```python
import ast
from typing import Dict, Tuple
from boa3.compiler.codegenerator import CodeGenerator
from boa3.model.method import Method
from boa3.model.operation.binary.binaryoperation import BinaryOperation
from boa3.model.operation.binaryop import BinaryOp
from boa3.model.operation.unary.unaryoperation import UnaryOperation
from boa3.model.symbol import ISymbol
from boa3.model.type.itype import IType
from boa3.model.type.type import Type
from boa3.model.variable import Variable
class VisitorCodeGenerator(ast.NodeVisitor):
"""
This class is responsible for walk through the ast.
The methods with the name starting with 'visit_' are implementations of methods from the :class:`NodeVisitor` class.
These methods are used to walk through the Python abstract syntax tree.
:ivar generator:
"""
def __init__(self, generator: CodeGenerator):
self.generator = generator
@property
def symbols(self) -> Dict[str, ISymbol]:
return self.generator.symbol_table
def visit_to_generate(self, node: ast.AST):
"""
Visitor to generate the nodes that the primary visitor is used to retrieve value
:param node: an ast node
"""
result = self.visit(node)
# the default return of the name visitor is the name string
if isinstance(node, ast.Name):
# TODO: validate function calls
self.generator.convert_load_symbol(result)
def visit_FunctionDef(self, function: ast.FunctionDef):
"""
Visitor of the function definition node
Generates the Neo VM code for the function
:param function: the python ast function definition node
"""
method = self.symbols[function.name]
if function.returns is not None:
fun_rtype_id: str = self.visit(function.returns)
else:
fun_rtype_id: str = Type.none.identifier
symbol: ISymbol = self.generator.get_symbol(fun_rtype_id)
if isinstance(method, Method) and isinstance(symbol, IType):
fun_return: IType = symbol
method.return_type = fun_return
self.generator.convert_begin_method(method)
for stmt in function.body:
self.visit(stmt)
self.generator.convert_end_method()
def visit_arguments(self, arguments: ast.arguments) -> Dict[str, Variable]:
"""
Visitor of the function arguments node
:param arguments: the python ast function arguments node
:return: a dictionary that maps each argument to its identifier
"""
args: Dict[str, Variable] = {}
for arg in arguments.args:
var_id, var = self.visit_arg(arg) # Tuple[str, Variable]
args[var_id] = var
return args
def visit_arg(self, arg: ast.arg) -> Tuple[str, Variable]:
"""
Visitor of a function argument node
:param arg: the python ast arg node
:return: a tuple with the identifier and the argument
"""
var_id = arg.arg
var_type = self.visit(arg.annotation)
return var_id, Variable(var_type)
def visit_Return(self, ret: ast.Return):
"""
Visitor of a function return node
:param ret: the python ast return node
"""
if ret.value is not None:
self.visit_to_generate(ret.value)
def store_variable(self, var_id: str, value: ast.AST, index: ast.AST = None):
# if the value is None, it is a variable declaration
if value is not None:
if index is None:
# if index is None, then it is a variable assignment
self.visit_to_generate(value)
self.generator.convert_store_variable(var_id)
else:
# if not, it is an array assignment
self.generator.convert_load_symbol(var_id)
self.visit_to_generate(index)
self.visit_to_generate(value)
self.generator.convert_set_array_item()
def visit_AnnAssign(self, ann_assign: ast.AnnAssign):
"""
Visitor of an annotated assignment node
:param ann_assign: the python ast variable assignment node
"""
var_id = self.visit(ann_assign.target)
self.store_variable(var_id, ann_assign.value)
def visit_Assign(self, assign: ast.Assign):
"""
Visitor of an assignment node
:param assign: the python ast variable assignment node
"""
var_index = None
var_id = self.visit(assign.targets[0])
# if it is a tuple, then it is an array assignment
if isinstance(var_id, tuple):
var_index = var_id[1]
var_id: str = var_id[0]
self.store_variable(var_id, assign.value, var_index)
def visit_Subscript(self, subscript: ast.Subscript):
"""
Visitor of a subscript node
:param subscript: the python ast subscript node
"""
if isinstance(subscript.ctx, ast.Load):
# get item
self.visit_to_generate(subscript.value)
self.visit_to_generate(subscript.slice.value)
self.generator.convert_get_array_item()
else:
# set item
var_id = self.visit(subscript.value)
return var_id, subscript.slice.value
def visit_BinOp(self, bin_op: ast.BinOp):
"""
Visitor of a binary operation node
:param bin_op: the python ast binary operation node
"""
if isinstance(bin_op.op, BinaryOperation):
self.visit_to_generate(bin_op.left)
self.visit_to_generate(bin_op.right)
self.generator.convert_operation(bin_op.op)
def visit_UnaryOp(self, un_op: ast.UnaryOp):
"""
Visitor of a binary operation node
:param un_op: the python ast binary operation node
"""
if isinstance(un_op.op, UnaryOperation):
self.visit_to_generate(un_op.operand)
self.generator.convert_operation(un_op.op)
def visit_Compare(self, compare: ast.Compare):
"""
Visitor of a compare operation node
:param compare: the python ast compare operation node
"""
converted: bool = False
left = compare.left
for index, op in enumerate(compare.ops):
right = compare.comparators[index]
if isinstance(op, BinaryOperation):
self.visit_to_generate(left)
self.visit_to_generate(right)
self.generator.convert_operation(op)
# if it's more than two comparators, must include AND between the operations
if not converted:
converted = True
else:
self.generator.convert_operation(BinaryOp.And)
left = right
def visit_BoolOp(self, bool_op: ast.BoolOp):
"""
Visitor of a compare operation node
:param bool_op: the python ast boolean operation node
"""
if isinstance(bool_op.op, BinaryOperation):
left = bool_op.values[0]
self.visit_to_generate(left)
for index, right in enumerate(bool_op.values[1:]):
self.visit_to_generate(right)
self.generator.convert_operation(bool_op.op)
def visit_While(self, while_node: ast.While):
"""
Verifies if the type of while test is valid
:param while_node: the python ast while statement node
"""
start_addr: int = self.generator.convert_begin_while()
for stmt in while_node.body:
self.visit_to_generate(stmt)
test_address: int = self.generator.address
self.visit_to_generate(while_node.test)
self.generator.convert_end_while(start_addr, test_address)
for stmt in while_node.orelse:
self.visit_to_generate(stmt)
def visit_If(self, if_node: ast.If):
"""
Verifies if the type of if test is valid
:param if_node: the python ast if statement node
"""
self.visit_to_generate(if_node.test)
start_addr: int = self.generator.convert_begin_if()
for stmt in if_node.body:
self.visit_to_generate(stmt)
if len(if_node.orelse) > 0:
start_addr = self.generator.convert_begin_else(start_addr)
for stmt in if_node.orelse:
self.visit_to_generate(stmt)
self.generator.convert_end_if(start_addr)
def visit_IfExp(self, if_node: ast.IfExp):
"""
Verifies if the type of if test is valid
:param if_node: the python ast if statement node
"""
self.visit_to_generate(if_node.test)
start_addr: int = self.generator.convert_begin_if()
self.visit_to_generate(if_node.body)
start_addr = self.generator.convert_begin_else(start_addr)
self.visit_to_generate(if_node.orelse)
self.generator.convert_end_if(start_addr)
def visit_Name(self, name: ast.Name) -> str:
"""
Visitor of a name node
:param name: the python ast name identifier node
:return: the identifier of the name
"""
return name.id
def visit_NameConstant(self, constant: ast.NameConstant):
"""
Visitor of constant names node
:param constant: the python ast name constant node
:return: the value of the constant
"""
self.generator.convert_literal(constant.value)
def visit_Num(self, num: ast.Num):
"""
Visitor of literal number node
:param num: the python ast number node
"""
self.generator.convert_literal(num.n)
def visit_Str(self, str: ast.Str):
"""
Visitor of literal string node
:param str: the python ast string node
"""
self.generator.convert_literal(str.s)
def visit_Tuple(self, tup_node: ast.Tuple):
"""
Visitor of literal tuple node
:param tup_node: the python ast string node
:return: the value of the tuple
"""
tup = tuple([value for value in tup_node.elts])
length = len(tup_node.elts)
self.generator.convert_new_array(length)
for index, value in enumerate(tup_node.elts):
self.generator.convert_set_new_array_item_at(index)
self.visit_to_generate(value)
self.generator.convert_set_array_item()
```
#### File: boa3/compiler/compiler.py
```python
from boa3.analyser.analyser import Analyser
from boa3.compiler.codegenerator import CodeGenerator
from boa3.compiler.filegenerator import FileGenerator
from boa3.exception.NotLoadedException import NotLoadedException
class Compiler:
"""
The main compiler class.
:ivar bytecode: the compiled file as a byte array. Empty by default.
"""
def __init__(self):
self.bytecode: bytearray = bytearray()
self.__analyser: Analyser = None
def compile(self, path: str) -> bytes:
"""
Load a Python file and tries to compile it
:param path: the path of the Python file to compile
:return: the bytecode of the compiled .nef file
"""
self.__analyse(path)
return self.__compile()
def compile_and_save(self, path: str, output_path: str):
"""
Save the compiled file and the metadata files
:param path: the path of the Python file to compile
:param output_path: the path to save the generated files
"""
self.__analyse(path)
self.bytecode = self.__compile()
self.__save(output_path)
def __analyse(self, path: str):
"""
Load a Python file and analyses its syntax
:param path: the path of the Python file to compile
"""
self.__analyser = Analyser.analyse(path)
def __compile(self) -> bytes:
"""
Compile the analysed Python file.
:return: the compiled file as a bytecode.
:raise NotLoadedException: raised if none file were analysed
"""
if not self.__analyser.is_analysed:
raise NotLoadedException
return CodeGenerator.generate_code(self.__analyser)
def __save(self, output_path: str):
"""
Save the compiled file and the metadata files
:param output_path: the path to save the generated files
:raise NotLoadedException: raised if no file were compiled
"""
if (self.__analyser is None
or not self.__analyser.is_analysed
or len(self.bytecode) == 0):
raise NotLoadedException
generator = FileGenerator(self.bytecode, self.__analyser.symbol_table)
nef_bytes = generator.generate_nef_file()
with open(output_path, 'wb+') as nef_file:
nef_file.write(nef_bytes)
nef_file.close()
manifest_path = output_path.replace('.nef', '.manifest.json')
manifest_bytes = generator.generate_manifest_file()
with open(manifest_path, 'wb+') as manifest_file:
manifest_file.write(manifest_bytes)
manifest_file.close()
```
#### File: boa3/exception/CompilerError.py
```python
from abc import ABC
class CompilerError(ABC, Exception):
"""
An interface for compilation errors
"""
def __init__(self, line: int, col: int, message: str = None):
self.line: int = line
self.col: int = col
self.message = "%s:%s" % (line, col)
if message is not None:
self.message += " - %s" % message
def __str__(self) -> str:
return self.message
class TypeHintMissing(CompilerError):
"""
An error raised when type hint cannot be found
"""
def __init__(self, line: int, col: int, symbol_id: str = None):
message = None
if symbol_id is not None:
message = "Type hint is missing for the symbol '%s'" % symbol_id
super().__init__(line, col, message)
class InvalidType(CompilerError):
"""
An error raised when a type that is not supported by Neo VM is used
"""
def __init__(self, line: int, col: int, symbol_id: str = None):
message = "Invalid type"
if symbol_id is not None:
message += ": '%s'" % symbol_id
super().__init__(line, col, message)
class NotSupportedOperation(CompilerError):
"""
An error raised when an operation that is not supported by Neo VM is used
"""
def __init__(self, line: int, col: int, symbol_id: str):
message = "The following operation is not supported: '%s'" % symbol_id
super().__init__(line, col, message)
class UnresolvedReference(CompilerError):
"""
An error raised when an undefined symbol is used
"""
def __init__(self, line: int, col: int, symbol_id: str):
message = "Unresolved reference '%s'" % symbol_id
super().__init__(line, col, message)
class UnresolvedOperation(CompilerError):
"""
An error raised when an undefined symbol is used
"""
def __init__(self, line: int, col: int, type_id: str, operation_id: str):
message = "Unresolved reference '%s' does not have a definition of '%s' operator" % (type_id, operation_id)
super().__init__(line, col, message)
class MismatchedTypes(CompilerError):
"""
An error raised when the evaluated and expected types are not the same
"""
def __init__(self, line: int, col: int, expected_type_id: str, actual_type_id: str):
message = "Expected type '%s', got '%s' instead" % (expected_type_id, actual_type_id)
super().__init__(line, col, message)
class TooManyReturns(CompilerError):
"""
An error raised when a function returns a tuple
"""
def __init__(self, line: int, col: int):
message = "Too many returns"
super().__init__(line, col, message)
class IncorrectNumberOfOperands(CompilerError):
"""
An error raised when an operation is used with the wrong number of operands
"""
def __init__(self, line: int, col: int, expected_count: int, actual_count: int):
message = "Incorrect number of operands: expected '%s', got '%s' instead" % (expected_count, actual_count)
super().__init__(line, col, message)
```
#### File: boa3/exception/CompilerWarning.py
```python
from abc import ABC
class CompilerWarning(ABC):
def __init__(self, line: int, col: int):
self.line: int = line
self.col: int = col
```
#### File: boa3/model/expression.py
```python
from abc import abstractmethod
from boa3.model.symbol import ISymbol
from boa3.model.type.itype import IType
class IExpression(ISymbol):
"""
An interface used to represent expressions
"""
@property
@abstractmethod
def type(self) -> IType:
"""
Gets the type of the evaluated expression
:return: the resulting type when the expression is evaluated
"""
pass
```
#### File: operation/binary/binaryoperation.py
```python
from abc import ABC, abstractmethod
from typing import List
from boa3.model.operation.operation import IOperation
from boa3.model.type.type import IType
class BinaryOperation(IOperation, ABC):
"""
An interface used to represent binary operations
:ivar operator: the operator of the operation. Inherited from :class:`IOperation`
:ivar left: the left operand type. Inherited from :class:`BinaryOperation`
:ivar right: the left operand type. Inherited from :class:`BinaryOperation`
:ivar result: the result type of the operation. Inherited from :class:`IOperation`
"""
_valid_types: List[IType] = []
def __init__(self, left: IType, right: IType):
self.left_type: IType = left
self.right_type: IType = right
result = self._get_result(left, right)
super().__init__(self.operator, result)
@property
def _get_number_of_operands(self) -> int:
return 2
@abstractmethod
def _get_result(self, left: IType, right: IType) -> IType:
"""
Gets the result type of the operation given the operands types.
:param left: left operand type
:param right: right operand type
:return: the result type of the operation. Type.none if the operands are not valid.
"""
pass
@classmethod
def build(cls, left: IType, right: IType):
"""
Creates a binary operation with the given operands types
:param left: left operand type
:param right: right operand type
:return: The built operation if the operands are valid. None otherwise
:rtype: BinaryOperation or None
"""
return cls(left, right)
```
#### File: model/operation/operation.py
```python
from abc import ABC, abstractmethod
from typing import Optional
from boa3.model.operation.operator import Operator
from boa3.model.type.type import IType
from boa3.neo.vm.opcode.Opcode import Opcode
class IOperation(ABC):
"""
An interface used to represent operations
:ivar operator: the operator of the operation
:ivar result: the result type of the operation
"""
def __init__(self, operator: Operator, result_type: IType):
self.operator: Operator = operator
self.result: IType = result_type
@property
def opcode(self) -> Optional[Opcode]:
"""
Gets the operation opcode in Neo Vm
:return: the opcode if exists. None otherwise.
"""
return None
@property
@abstractmethod
def _get_number_of_operands(self) -> int:
"""
Gets the number of operands required for this operations
:return: Number of operands
"""
pass
@abstractmethod
def validate_type(self, *types: IType) -> bool:
"""
Verifies if the given operands are valid to the operation
:param types: types of the operand
:return: True if all arguments are valid. False otherwise.
"""
pass
def is_valid(self, operator: Operator, *types: IType) -> bool:
"""
Verifies if the given operator and operands are valid to the operation
:param operator:
:param types: types of the operand
:return: True if all arguments are valid. False otherwise.
"""
if len(types) != self._get_number_of_operands:
return False
return operator is self.operator and self.validate_type(*types)
@property
def is_supported(self) -> bool:
"""
Verifies if the operation is supported by the compiler
:return: True if it is supported. False otherwise.
"""
return True
```
#### File: model/type/inttype.py
```python
from typing import Any
from boa3.model.type.itype import IType
from boa3.neo.vm.type.AbiType import AbiType
class IntType(IType):
"""
A class used to represent Python int type
"""
def __init__(self):
identifier = 'int'
super().__init__(identifier)
@property
def abi_type(self) -> AbiType:
return AbiType.Integer
@classmethod
def build(cls, value: Any):
if cls.is_type_of(value):
from boa3.model.type.type import Type
return Type.int
@classmethod
def is_type_of(cls, value: Any):
return type(value) == int
```
#### File: model/type/tupletype.py
```python
from typing import Any, List
from boa3.model.type.itype import IType
from boa3.model.type.sequencetype import SequenceType
from boa3.neo.vm.type.AbiType import AbiType
class TupleType(SequenceType):
"""
A class used to represent Python tuple type
"""
def __init__(self, values_type: List[IType] = None):
identifier = 'tuple'
values_type = self.filter_types(values_type)
super().__init__(identifier, values_type)
@property
def abi_type(self) -> AbiType:
return AbiType.Array # TODO: change when 'bytes' is implemented
def is_valid_key(self, value_type: IType) -> bool:
return value_type == self.valid_key
@property
def valid_key(self) -> IType:
from boa3.model.type.type import Type
return Type.int
@classmethod
def build(cls, value: Any):
if cls.is_type_of(value):
values_types: List[IType] = cls.get_types(value)
return cls(values_types)
@classmethod
def is_type_of(cls, value: Any):
return type(value) == tuple
def __eq__(self, other) -> bool:
if type(self) != type(other):
return False
return self.value_type == other.value_type
def __hash__(self):
return hash(self.identifier + self.value_type.identifier)
```
#### File: boa3/model/variable.py
```python
from boa3.model.expression import IExpression
from boa3.model.type.itype import IType
class Variable(IExpression):
"""
A class used to represent a variable
:ivar var_type: the type of the variable.
"""
def __init__(self, var_type: IType):
self.__var_type: IType = var_type
@property
def type(self) -> IType:
return self.__var_type
```
#### File: neo/cryptography/__init__.py
```python
import hashlib
def hash160(byte_array) -> bytes:
"""
Get a hash of the provided message using the ripemd160 algorithm.
:param byte_array: data to hash.
:type byte_array: bytearray or bytes
:return: hashed data
:rtype: bytes
"""
intermed = sha256(byte_array)
return hashlib.new('ripemd160', intermed).digest()
def sha256(byte_array) -> bytes:
"""
Perform a SHA256 operation on the input.
:param byte_array: data to hash.
:type byte_array: bytearray or bytes
:return: hashed data
:rtype: bytes
"""
return hashlib.sha256(byte_array).digest()
```
#### File: example/arithmetic_test/Concatenation.py
```python
def Main(a: str, b: str) -> str:
return a + b
```
#### File: example/arithmetic_test/MismatchedOperandUnary.py
```python
def Main(a: str) -> str:
return -a
```
#### File: example/variable_test/AssignmentWithTuples.py
```python
def Main():
a, b = 2, '3'
```
#### File: example/variable_test/UsingUndeclaredVariable.py
```python
def Main(a: int) -> int:
test1 = a
test2 = 3
test3 = b
return test2
```
#### File: example/while_test/WhileBoa2Test1.py
```python
def Main() -> int:
j = 3
while j < 6:
j = j + 1
return j
```
#### File: boa3_test/tests/boa_test.py
```python
import os
from unittest import TestCase
from boa3.analyser.analyser import Analyser
from boa3.compiler.compiler import Compiler
class BoaTest(TestCase):
dirname: str = None
@classmethod
def setUpClass(cls):
path = os.path.abspath(__file__).replace('\\', '/') # for windows compatibility
cls.dirname = '/'.join(path.split('/')[:-3])
super(BoaTest, cls).setUpClass()
def get_compiler_analyser(self, compiler: Compiler) -> Analyser:
return compiler._Compiler__analyser
``` |
{
"source": "jpliquid/testActions2",
"score": 2
} |
#### File: uuv_control_utils/scripts/set_gm_current_perturbation.py
```python
import rclpy
import sys
from numpy import pi
from uuv_world_ros_plugins_msgs.srv import *
from plankton_utils.time import is_sim_time
def main():
rclpy.init()
sim_time_param = is_sim_time()
node = rclpy.create_node(
'set_gm_current_perturbation',
allow_undeclared_parameters=True,
automatically_declare_parameters_from_overrides=True,
parameter_overrides=[sim_time_param]
)
node.get_logger().info('Starting current perturbation node')
node.get_logger().info('Programming the generation of a current perturbation')
params = ['component', 'mean', 'min', 'max', 'noise', 'mu']
values = dict()
for p in params:
assert node.has_parameter(p)
values[p] = node.get_parameter(p).value
assert values['component'] in ['velocity', 'horz_angle', 'vert_angle']
if values['component'] == 'velocity':
assert values['mean'] > 0
else:
values['min'] *= pi / 180.0
values['max'] *= pi / 180.0
values['mean'] *= pi / 180.0
assert values['min'] < values['max']
assert values['noise'] >= 0
assert values['mu'] >= 0
set_model = node.create_client(
SetCurrentModel,
'/hydrodynamics/set_current_%s_model' % values['component'])
if not set_model.wait_for_service(timeout_sec=30):
raise RuntimeError("Service %s not running" % (set_model.srv_name))
req = SetCurrentModel.Request()
req.mean = values['mean']
req.min = values['min']
req.max = values['max']
req.noise =values['noise']
req.mu = values['mu']
future = set_model.call_async(req)
rclpy.spin_until_future_complete(self, future)
if future.result() is not None:
prop = future.result()
if prop.succes:
node.get_logger().info(('Model for <{}> set successfully!'.format(values['component']))
else:
node.get_logger().info(('Error setting model!')
#==============================================================================
if __name__ == '__main__':
try:
main()
except Exception as e:
print('Caught exception: ' + str(e))
finally:
if rclpy.ok():
rclpy.shutdown()
```
#### File: uuv_control_utils/scripts/start_circular_trajectory.py
```python
import rclpy
import sys
from numpy import pi
from uuv_control_msgs.srv import InitCircularTrajectory
from geometry_msgs.msg import Point
from std_msgs.msg import Time
from plankton_utils.time import time_in_float_sec
from plankton_utils import float_sec_to_int_sec_nano
from plankton_utils.time import is_sim_time
def main():
rclpy.init()
sim_time_param = is_sim_time()
node = rclpy.create_node(
'start_circular_trajectory',
allow_undeclared_parameters=True,
automatically_declare_parameters_from_overrides=True,
parameter_overrides=[sim_time_param])
# sim_time = rclpy.parameter.Parameter('use_sim_time', rclpy.Parameter.Type.BOOL, True)
# node.set_parameters([sim_time])
node.get_logger().info('Starting the circular trajectory creator')
#Important...ensure the clock has been updated when using sim time
while node.get_clock().now() == rclpy.time.Time():
rclpy.spin_once(node)
# If no start time is provided: start *now*.
start_time = time_in_float_sec(node.get_clock().now())
start_now = False
if node.has_parameter('start_time'):
start_time = node.get_parameter('start_time').value
if start_time < 0.0:
node.get_logger().warn('Negative start time, setting it to 0.0')
start_time = 0.0
start_now = True
else:
start_now = True
param_labels = ['radius', 'center', 'n_points', 'heading_offset',
'duration', 'max_forward_speed']
params = dict()
for label in param_labels:
if not node.has_parameter(label):
node.get_logger().error('{} must be provided for the trajectory generation!'.format(label))
sys.exit(-1)
params[label] = node.get_parameter(label).value
if len(params['center']) != 3:
node.get_logger().error('Center of circle must have 3 components (x, y, z)')
sys.exit(-1)
if params['n_points'] <= 2:
node.get_logger().error('Number of points must be at least 2')
sys.exit(-1)
if params['max_forward_speed'] <= 0:
node.get_logger().error('Velocity limit must be positive')
sys.exit(-1)
srv_name = 'start_circular_trajectory'
traj_gen = node.create_client(InitCircularTrajectory, srv_name)
if not traj_gen.wait_for_service(timeout_sec=20):
node.get_logger().error('Service %s not available! Closing node...' %(traj_gen.srv_name))
sys.exit(-1)
node.get_logger().info('Generating trajectory that starts at t={} s'.format(start_time))
#Convert the time value
(sec, nsec) = float_sec_to_int_sec_nano(start_time)
req = InitCircularTrajectory.Request()
req.start_time = rclpy.time.Time(seconds=sec, nanoseconds=nsec).to_msg()
req.start_now = start_now
req.radius = params['radius'],
req.center = Point(params['center'][0], params['center'][1], params['center'][2])
req.is_clockwise = False
req.angle_offset = 0.0
req.n_points = params['n_points']
req.heading_offset = params['heading_offset'] * pi / 180
req.max_forward_speed = params['max_forward_speed']
req.duration = params['duration']
future = traj_gen.call_async(req)
rclpy.spin_until_future_complete(self, future)
try:
response = future.result()
except Exception as e:
node.get_logger().error('Service call ' + srv_name + ' failed, error=' + str(e)):
else:
node.get_logger().info('Trajectory successfully generated!')
#success = traj_gen.call(req)
# if success:
# print('Trajectory successfully generated!')
# else:
# print('Failed')
#==============================================================================
if __name__ == '__main__':
try:
main()
except Exception as e:
print('Something went wrong: ' + str(e))
finally:
if rclpy.ok():
rclpy.shutdown()
```
#### File: uuv_thruster_manager/test/test_thruster_allocator_y_axis.test.py
```python
import numpy as np
import unittest
import time
import rclpy
from geometry_msgs.msg import WrenchStamped
from uuv_thruster_manager.srv import GetThrusterManagerConfig
from uuv_thruster_manager.srv import ThrusterManagerInfo
from uuv_thruster_manager.srv import GetThrusterCurve
from uuv_thruster_manager.srv import SetThrusterManagerConfig
from uuv_gazebo_ros_plugins_msgs.msg import FloatStamped
import launch
from launch.actions import DeclareLaunchArgument, GroupAction, IncludeLaunchDescription
from launch.launch_description_sources import AnyLaunchDescriptionSource
from launch import LaunchDescription
import launch_testing.actions
import launch_ros
from launch_ros.actions import Node
from ament_index_python.packages import get_package_share_directory
import os
import pathlib
import xacro
import pytest
# Other imports
NS = 'test_vehicle'
AXIS = 'y'
AXIS_X_TAM = np.array([
[1, 0, 0, 0, 0, 0],
[0.87758256, 0, -0.47942554, 0.47942554, 0.47942554, 0.87758256],
[0.87758256, 0.47942554, 0, -0.47942554, 0.87758256, -0.87758256]
]).T
AXIS_Y_TAM = np.array([
[0, 0.87758256, 0.47942554, 0, 0.47942554, -0.87758256],
[0, 1, 0, 0, 0, 1],
[-0.47942554, 0.87758256, 0, -0.87758256, -0.47942554, 0.47942554]
]).T
AXIS_Z_TAM = np.array([
[0, -0.47942554, 0.87758256, 0, 0.87758256, 0.47942554],
[0.47942554, 0, 0.87758256, -0.87758256, -0.87758256, 0.47942554],
[0., 0., 1., 1., 0., 0.]
]).T
def ensure_path_exists(full_path):
dir_name = os.path.dirname(full_path)
if dir_name:
try:
os.makedirs(dir_name)
except OSError:
print ("Creation of the directory %s failed" % dir_name)
class TestThrusterAllocator(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Initialize the ROS context for the test node
rclpy.init()
# =========================================================================
@classmethod
def tearDownClass(cls):
# Shutdown the ROS context
rclpy.shutdown()
# =========================================================================
def setUp(self):
# Create a ROS node for tests (node name in isolated env does not matter)
self.node = rclpy.create_node('test_thrusters_allocator',
allow_undeclared_parameters=True,
automatically_declare_parameters_from_overrides=True)
# =========================================================================
def tearDown(self):
self.node.destroy_node()
# =========================================================================
def test_services_exist(self):
services = list()
services.append(self.node.create_client(
ThrusterManagerInfo, '/{}/thruster_manager/get_thrusters_info'.format(NS)
))
services.append(self.node.create_client(
GetThrusterCurve, '/{}/thruster_manager/get_thruster_curve'.format(NS)
))
services.append(self.node.create_client(
SetThrusterManagerConfig, '/{}/thruster_manager/set_config'.format(NS)
))
services.append(self.node.create_client(
GetThrusterManagerConfig, '/{}/thruster_manager/get_config'.format(NS)
))
for srv in services:
self.assertTrue(srv.wait_for_service(timeout_sec=20), 'service %s not ready' % srv.srv_name)
# =========================================================================
def test_config(self):
#axis = self.node.get_parameter('axis').value
# ref_config = rospy.get_param('/{}/thruster_manager'.format(NS))
srv_name = '/{}/thruster_manager/get_config'.format(NS)
cli = self.node.create_client(GetThrusterManagerConfig, srv_name)
if not cli.wait_for_service(timeout_sec=30):
self.fail('service %s not available...' % srv_name)
# srv = rospy.ServiceProxy('/{}/thruster_manager/get_config'.format(NS), GetThrusterManagerConfig)
# tm_config = srv()
req = GetThrusterManagerConfig.Request()
future = cli.call_async(req)
rclpy.spin_until_future_complete(self.node, future)
tm_config = future.result()
self.assertEqual(tm_config.tf_prefix, '/test_vehicle/')
self.assertEqual(tm_config.base_link, 'base_link')
self.assertEqual(tm_config.thruster_frame_base, 'thruster_')
self.assertEqual(tm_config.thruster_topic_suffix, '/input')
self.assertEqual(tm_config.timeout, -1)
self.assertEqual(tm_config.max_thrust, 1000.0)
self.assertEqual(tm_config.n_thrusters, 3)
if AXIS == 'x':
tam_flat = AXIS_X_TAM.flatten()
elif AXIS == 'y':
tam_flat = AXIS_Y_TAM.flatten()
elif AXIS == 'z':
tam_flat = AXIS_Z_TAM.flatten()
self.assertEqual(len(tm_config.allocation_matrix), tam_flat.size, tam_flat)
for x, y in zip(tam_flat, tm_config.allocation_matrix):
self.assertAlmostEqual(x, y)
# =============================================================================
@pytest.mark.rostest
def generate_test_description():
#path_to_test = os.path.dirname(__file__)
file_path = pathlib.Path(__file__)
# Here, parent first removes the file name
parent_file_path = pathlib.Path(__file__).parent
thruster_manager_launch = os.path.join(
get_package_share_directory('uuv_thruster_manager'),
'launch',
'thruster_manager.launch'
)
thruster_manager_yaml = os.path.join(
str(parent_file_path),
'test_vehicle_thruster_manager_proportional.yaml'
)
xacro_file = os.path.join(
str(parent_file_path),
'test_vehicle_y_axis.urdf.xacro'
)
output = os.path.join(
str(parent_file_path),
'robot_description_y_axis.urdf'
)
doc = xacro.process(xacro_file)
ensure_path_exists(output)
try:
with open(output, 'w') as file_out:
file_out.write(doc)
except IOError as e:
print("Failed to open output: ", exc=e)
args = output.split()
# ('axis', 'x')
launch_args = [('model_name', 'test_vehicle'),
('output_dir', '/tmp'), ('config_file', thruster_manager_yaml), ('reset_tam', 'true'), ('urdf_file', output)]
thruster_manager_launch_desc = IncludeLaunchDescription(
AnyLaunchDescriptionSource(thruster_manager_launch), launch_arguments=launch_args)
joint_state_publisher = launch_ros.actions.Node(
node_namespace = 'test_vehicle',
package="joint_state_publisher",
node_executable="joint_state_publisher",
node_name="joint_state_publisher",
#parameters=[{'source_list':'test_vehicle/joint_states'}],
#remappings=[('joint_states', '/test_vehicle/joint_states')],
arguments=args,
output='screen',
parameters=[{'use_sim_time':False}, {'rate': 100}],
)
robot_state_description = launch_ros.actions.Node(
node_namespace = 'test_vehicle',
package='robot_state_publisher',
node_executable='robot_state_publisher',
#parameters=[{'robot_description', doc}]
# TODO To replace in foxy with parameters=[{'robot_description', Command('ros2 run xacro...')}]
arguments=args,
output='screen',
parameters=[{'use_sim_time':False}], # Use subst here
)
return (
launch.LaunchDescription([
joint_state_publisher,
robot_state_description,
thruster_manager_launch_desc,
# Start tests right away - no need to wait for anything
launch_testing.actions.ReadyToTest(),
])
)
```
#### File: uuv_thruster_manager/test/test_thrusters.py
```python
import unittest
import numpy as np
import random
import rclpy
from tf_quaternion import transformations
from uuv_thrusters import ThrusterManager
from uuv_thrusters.models import Thruster
IDX = 0
TOPIC = '/thruster'
AXES = [
np.array([1, 0, 0, 0]),
np.array([0, 1, 0, 0]),
np.array([0, 0, 1, 0])
]
def get_force_vector(pos, orientation, axis):
thrust_body = transformations.quaternion_matrix(orientation).dot(
axis.transpose())[0:3]
torque_body = np.cross(pos, thrust_body)
return np.hstack((thrust_body, torque_body)).transpose()
# =============================================================================
class TestThrusters(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Initialize the ROS context for the test node
rclpy.init()
# =========================================================================
@classmethod
def tearDownClass(cls):
# Shutdown the ROS context
rclpy.shutdown()
# =========================================================================
def setUp(self):
# Create a ROS node for tests
self.node = rclpy.create_node('test_thrusters')
# =========================================================================
def tearDown(self):
self.node.destroy_node()
# =========================================================================
def test_thruster(self):
# Use random positions and quaternions
for axis in AXES:
pos = np.random.rand(3)
q = transformations.random_quaternion()
thruster = Thruster(
self.node,
index=IDX,
topic=TOPIC,
pos=pos,
orientation=q,
axis=axis)
self.assertEqual(thruster.index, IDX)
self.assertEqual(thruster.topic, TOPIC)
self.assertTrue((thruster.tam_column == get_force_vector(pos, q, axis)).all())
# =========================================================================
def test_thruster_proportional(self):
# Use random positions and quaternions
for axis in AXES:
pos = np.random.rand(3)
q = transformations.random_quaternion()
gain = random.random()
thruster = Thruster.create_thruster(
self.node,
'proportional',
IDX,
TOPIC,
pos,
q,
axis,
gain=gain)
self.assertEqual(thruster.index, IDX)
self.assertEqual(thruster.topic, TOPIC)
self.assertTrue((thruster.tam_column == get_force_vector(pos, q, axis)).all())
self.assertEqual(thruster.get_thrust_value(0), 0)
self.assertEqual(thruster.get_command_value(0), 0)
command = np.linspace(-100, 100, 10)
for x in command:
y = thruster.get_thrust_value(x)
self.assertEqual(y, gain * np.abs(x) * x)
thrust = np.linspace(-50000, 50000, 10)
for x in thrust:
y = thruster.get_command_value(x)
self.assertEqual(y, np.sign(x) * np.sqrt(np.abs(x) / gain))
# =========================================================================
def test_thruster_custom(self):
input_values = np.linspace(-50, 50, 10)
output_values = np.linspace(-10000, 10000, 10)
gain = 20000.0 / 100.0
# Use random positions and quaternions
for axis in AXES:
pos = np.random.rand(3)
q = transformations.random_quaternion()
thruster = Thruster.create_thruster(
self.node,
'custom',
IDX,
TOPIC,
pos,
q,
axis,
input=input_values,
output=output_values)
self.assertEqual(thruster.index, IDX)
self.assertEqual(thruster.topic, TOPIC)
self.assertTrue((thruster.tam_column == get_force_vector(pos, q, axis)).all())
self.assertTrue(np.isclose(thruster.get_thrust_value(0), 0))
self.assertTrue(np.isclose(thruster.get_command_value(0), 0))
x = random.random() * 10
self.assertTrue(np.isclose(thruster.get_thrust_value(x), gain * x))
y = random.random() * 10000
self.assertTrue(np.isclose(thruster.get_command_value(y), y / gain))
# if __name__ == '__main__':
# import rosunit
# rosunit.unitrun(PKG, 'test_thrusters', TestThrusters)
```
#### File: src/uuv_control_interfaces/dp_controller_local_planner.py
```python
import rclpy
import logging
import sys
import time
import numpy as np
from copy import deepcopy
from os.path import isfile
from threading import Lock, Event
from std_msgs.msg import Bool, Float64
from geometry_msgs.msg import Twist
from uuv_control_msgs.srv import *
from uuv_control_msgs.msg import Trajectory, TrajectoryPoint, WaypointSet
from visualization_msgs.msg import MarkerArray
from geometry_msgs.msg import Point
import uuv_trajectory_generator
import uuv_waypoints
from tf_quaternion.transformations import quaternion_about_axis, quaternion_multiply, \
quaternion_inverse, quaternion_matrix, euler_from_quaternion, quaternion_from_euler
from ._log import get_logger
from rclpy.node import Node
from plankton_utils.param_helper import get_parameter_or_helper
from plankton_utils.time import time_in_float_sec as to_fsec
from plankton_utils.time import float_sec_to_int_sec_nano
# TODO Rewrite for TF2
class DPControllerLocalPlanner(object):
"""Local planner for the dynamic positioning controllers
to interpolate trajectories and generate trajectories from
interpolated waypoint paths.
> *Input parameters*
* `full_dof` (*type:* `bool`, *default:* `False`): If `True`,
the reference trajectory reference will be computed for 6 DoF,
otherwise, 4 DoF `(x, y, z, yaw)`.
* `stamped_pose_only` (*type:* `bool`, *default:* `False`): If
`True`, only stamped poses will be generated as a reference, with
velocity and acceleration reference being set to zero.
* `thrusters_only` (*type:* `bool`, *default:* `True`): If `False`,
the idle mode will be used to keep the vehicle moving.
> *ROS parameters*
* `max_forward_speed` (*type:* `float`, *default:* `1.0`): Maximum
allowed forward speed.
* `idle_radius` (*type:* `float`, *default:* `10.0`): Radius of the circle
path generated when an AUV is in idle mode.
* `inertial_frame_id` (*type:* `str`): Name of the inertial frame used,
options are `world` or `world_ned`.
* `timeout_idle_mode` (*type:* `float`): Timeout at the start or after
a trajectory is finished where the AUV is set to start idle mode path.
* `look_ahead_delay` (*type:* `float`): Look ahead delay in seconds. This
parameters will offset the interpolation of the trajectory in the given
amount of seconds to compute the look-ahead target for AUVs.
!!! warning
The parameters for the path interpolators must also be provided when
starting a node that includes the local planner, since the interpolators
are initialized by the local planner.
> *ROS publishers*
* `trajectory` (*type:* `uuv_control_msgs.Trajectory`): Generated trajectory or
stamped pose path.
* `waypoints` (*type:* `uuv_control_msgs.WaypointSet`): Set of waypoints provided
as input for the interpolator
* `station_keeping_on` (*type:* `std_msgs.Bool`): Status of the station keeping mode
* `automatic_on` (*type:* `std_msgs.Bool`): Status of automatic model. If `False`
the vehicle can receive control inputs from a teleop node.
* `trajectory_tracking_on` (*type:* `std_msgs.Bool`): Sets the output flag to `True`
when trajectory tracking is ongoing
* `interpolator_visual_markers` (*type:* `visualization_msgs.MarkerArray`): Helper
visual markers from the interpolator class.
* `time_to_target` (*type:* `std_msgs.Float64`): Estimated time to target in seconds.
> *ROS services*
* `hold_vehicle` (*type:* `uuv_control_msgs.Hold`)
* `start_waypoint_list` (*type:* `uuv_control_msgs.InitWaypointSet`)
* `start_circular_trajectory` (*type:* `uuv_control_msgs.InitCircularTrajectory`)
* `start_helical_trajectory` (*type:* `uuv_control_msgs.InitHelicalTrajectory`)
* `init_waypoints_from_file` (*type:* `uuv_control_msgs.InitWaypointsFromFile`)
* `go_to` (*type:* `uuv_control_msgs.GoTo`)
* `go_to_incremental` (*type:* `uuv_control_msgs.GoToIncremental`)
"""
def __init__(self, node: Node, full_dof=False, stamped_pose_only=False, thrusters_only=True):
self.node = node
self._logger = get_logger()
self._lock = Lock()
self._traj_interpolator = uuv_trajectory_generator.TrajectoryGenerator(
self.node, full_dof=full_dof, stamped_pose_only=stamped_pose_only)
# Max. allowed forward speed
self._max_forward_speed = get_parameter_or_helper(node, 'max_forward_speed', 1.0).value
self._idle_circle_center = None
self._idle_z = None
self._logger.info('Max. forward speed [m/s]=%.2f' % self._max_forward_speed)
self._idle_radius = get_parameter_or_helper(node, 'idle_radius', 10.0).value
assert self._idle_radius > 0
self._logger.info('Idle circle radius [m] = %.2f' % self._idle_radius)
# Is underactuated?
self._is_underactuated = get_parameter_or_helper(node, 'is_underactuated', False).get_parameter_value().bool_value
self.inertial_frame_id = 'world'
self.transform_ned_to_enu = None
self.q_ned_to_enu = None
if node.has_parameter('inertial_frame_id'):
self.inertial_frame_id = node.get_parameter('inertial_frame_id').get_parameter_value().string_value
assert len(self.inertial_frame_id) > 0
assert self.inertial_frame_id in ['world', 'world_ned']
self._logger.info('Inertial frame ID=' + self.inertial_frame_id)
#node.set_parameter('inertial_frame_id', self.inertial_frame_id)
try:
import tf2_ros
tf_buffer = tf2_ros.Buffer()
listener = tf2_ros.TransformListener(tf_buffer, node)
tf_trans_ned_to_enu = tf_buffer.lookup_transform(
'world', 'world_ned', rclpy.time.Time(),
rclpy.time.Duration(seconds=10))
self.q_ned_to_enu = np.array(
[tf_trans_ned_to_enu.transform.rotation.x,
tf_trans_ned_to_enu.transform.rotation.y,
tf_trans_ned_to_enu.transform.rotation.z,
tf_trans_ned_to_enu.transform.rotation.w])
except Exception as ex:
self._logger.warning(
'Error while requesting ENU to NED transform'
', message={}'.format(ex))
self.q_ned_to_enu = quaternion_from_euler(2 * np.pi, 0, np.pi)
self.transform_ned_to_enu = quaternion_matrix(
self.q_ned_to_enu)[0:3, 0:3]
if self.transform_ned_to_enu is not None:
self._logger.info('Transform world_ned (NED) to world (ENU)=\n' +
str(self.transform_ned_to_enu))
self._logger.info('Inertial frame ID=' + self.inertial_frame_id)
self._logger.info('Max. forward speed = ' +
str(self._max_forward_speed))
for method in self._traj_interpolator.get_interpolator_tags():
if node.has_parameter(method):
self._logger.info('Parameters for interpolation method <%s> found' % method)
params = node.get_parameter(method)
self._logger.info('\t' + str(params))
self._traj_interpolator.set_interpolator_parameters(method, params)
else:
self._logger.info('No parameters for interpolation method <%s> found' % method)
# dt used to compute the pose reference from the joystick input
self._dt = 0.0
# Time stamp for the last velocity reference received
self._last_teleop_update = None
# Flag to indicate if the teleoperation node is active
self._is_teleop_active = False
# Teleop node twist message
self._teleop_vel_ref = None
self.init_odom_event = Event()
self.init_odom_event.clear()
self._timeout_idle_mode = get_parameter_or_helper(node, 'timeout_idle_mode', 5.0).value
self._start_count_idle = node.get_clock().now()
self._thrusters_only = thrusters_only
if not self._thrusters_only:
self._look_ahead_delay = get_parameter_or_helper(node, 'look_ahead_delay', 3.0).value
else:
self._look_ahead_delay = 0.0
self._station_keeping_center = None
# Publishing topic for the trajectory given to the controller
self._trajectory_pub = node.create_publisher(Trajectory, 'trajectory', 1)
# Publishing waypoints
self._waypoints_pub = node.create_publisher(WaypointSet, 'waypoints', 1)
self._station_keeping_pub = node.create_publisher(Bool, 'station_keeping_on', 1)
self._automatic_control_pub = node.create_publisher(Bool, 'automatic_on', 1)
self._traj_tracking_pub = node.create_publisher(Bool,'trajectory_tracking_on', 1)
self._interp_visual_markers = node.create_publisher(MarkerArray, 'interpolator_visual_markers', 1)
self._teleop_sub = node.create_subscription(Twist, 'cmd_vel', self._update_teleop, 10)
self._waypoints_msg = None
self._trajectory_msg = None
# Subscribing topic for the trajectory given to the controller
self._input_trajectory_sub = node.create_subscription(
Trajectory, 'input_trajectory', self._update_trajectory_from_msg, 10)
self._max_time_pub = node.create_publisher(Float64, 'time_to_target', 1)
self._traj_info_update_timer = node.create_timer(0.2, self._publish_trajectory_info)
# Flag to activate station keeping
self._station_keeping_on = True
# Flag to set vehicle control to automatic
self._is_automatic = True
# Flag true if a trajectory is being tracked
self._traj_running = False
# Current vehicle pose
self._vehicle_pose = None
# Current reference point
self._this_ref_pnt = None
# Flag that indicates that a waypoint set has been initialized
self._smooth_approach_on = False
# Time stamp for received trajectory
self._stamp_trajectory_received = 0.0
# Dictionary of services
self._services = dict()
srv_name = 'hold_vehicle'
self._services[srv_name] = node.create_service(Hold, srv_name, self.hold_vehicle)
srv_name = 'start_waypoint_list'
self._services[srv_name] = node.create_service(
InitWaypointSet, srv_name, self.start_waypoint_list)
srv_name = 'start_circular_trajectory'
self._services[srv_name] = node.create_service(
InitCircularTrajectory, srv_name, self.start_circle)
srv_name = 'start_helical_trajectory'
self._services[srv_name] = node.create_service(
InitHelicalTrajectory, srv_name, self.start_helix)
srv_name = 'init_waypoints_from_file'
self._services[srv_name] = node.create_service(
InitWaypointsFromFile, srv_name, self.init_waypoints_from_file)
srv_name = 'go_to'
self._services[srv_name] = node.create_service(GoTo, srv_name, self.go_to)
srv_name = 'go_to_incremental'
self._services[srv_name] = node.create_service(
GoToIncremental, srv_name, self.go_to_incremental)
# =========================================================================
def __del__(self):
"""Remove logging message handlers"""
while self._logger.handlers:
self._logger.handlers.pop()
# =========================================================================
def _transform_position(self, vec, target, source):
"""Transform the position vector between `world` and `world_ned`.
> *Input arguments*
* `vec` (*type:* `numpy.array`): Position vector
* `target` (*type:* `str`): Target frame
* `source` (*type:* `str`): Source frame
> *Returns*
`numpy.array`: Transformed vector
"""
if target == source:
return vec
if target == 'world':
return np.dot(self.transform_ned_to_enu, vec)
if target == 'world_ned':
return np.dot(self.transform_ned_to_enu.T, vec)
# =========================================================================
def _transform_waypoint(self, waypoint):
"""Transform position vector of a waypoint between
`world` and `world_ned` frames.
> *Input arguments*
* `waypoint` (*type:* `uuv_waypoints.Waypoint`): Input waypoint
> *Returns*
`uuv_waypoints.Waypoint`: Transformed waypoint
"""
output = deepcopy(waypoint)
output.pos = self._transform_position(output.pos,
self.inertial_frame_id,
output.inertial_frame_id)
output.inertial_frame_id = self.inertial_frame_id
output.max_forward_speed = min(waypoint.max_forward_speed, self._max_forward_speed)
return output
# =========================================================================
def _transform_waypoint_set(self, waypoint_set):
"""Apply transformation between `world` and 'world_ned` frames
to waypoints in a waypoint set.
> *Input arguments*
* `waypoint_set` (*type:* `uuv_waypoins.WaypointSet`): Set of waypoints
> *Returns*
`uuv_waypoins.WaypointSet`: Set of transformed waypoints
"""
output = uuv_waypoints.WaypointSet(
inertial_frame_id=self.inertial_frame_id)
for i in range(waypoint_set.num_waypoints):
wp = self._transform_waypoint(waypoint_set.get_waypoint(i))
output.add_waypoint(wp)
return output
# =========================================================================
def _apply_workspace_constraints(self, waypoint_set):
"""Filter out waypoints that are positioned above
sea surface, namely `z > 0` if the inertial frame is
`world`, or `z < 0` if the inertial frame is `world_ned`.
> *Input arguments*
* `waypoint_set` (*type:* `uuv_waypoins.WaypointSet`): Set of waypoints
> *Returns*
`uuv_waypoins.WaypointSet`: Filtered set of waypoints
"""
wp_set = uuv_waypoints.WaypointSet(
inertial_frame_id=self.inertial_frame_id)
for i in range(waypoint_set.num_waypoints):
wp = waypoint_set.get_waypoint(i)
if wp.z > 0 and self.inertial_frame_id == 'world':
continue
if wp.z < 0 and self.inertial_frame_id == 'world_ned':
continue
wp_set.add_waypoint(wp)
return wp_set
# =========================================================================
def _publish_trajectory_info(self, event):
"""Publish messages for the waypoints, trajectory and
debug flags.
"""
if self._waypoints_msg is not None:
self._waypoints_pub.publish(self._waypoints_msg)
if self._trajectory_msg is not None:
self._trajectory_pub.publish(self._trajectory_msg)
markers = self._traj_interpolator.get_visual_markers()
if markers is not None:
self._interp_visual_markers.publish(markers)
else:
self._interp_visual_markers.publish(MarkerArray())
self._station_keeping_pub.publish(Bool(data = self._station_keeping_on))
self._automatic_control_pub.publish(Bool(data = self._is_automatic))
self._traj_tracking_pub.publish(Bool(data = self._traj_running))
return True
# =========================================================================
def _update_trajectory_info(self):
"""Update the trajectory message."""
self._waypoints_msg = WaypointSet()
if self._traj_interpolator.is_using_waypoints():
wps = self._traj_interpolator.get_waypoints()
if wps is not None:
wps.inertial_frame_id = self.inertial_frame_id
self._waypoints_msg = wps.to_message(self.node)
self._waypoints_msg.header.frame_id = self.inertial_frame_id
msg = self._traj_interpolator.get_trajectory_as_message()
if msg is not None:
msg.header.frame_id = self.inertial_frame_id
self._trajectory_msg = msg
self._logger.info('Updating the trajectory information')
else:
self._trajectory_msg = None
self._logger.error('Error generating trajectory message')
# =========================================================================
def _update_teleop(self, msg):
"""Callback to the twist teleop subscriber."""
# Test whether the vehicle is in automatic mode (following a given
# trajectory)
if self._is_automatic:
self._teleop_vel_ref = None
return
# If this is the first twist message since the last time automatic mode
# was turned off, then just update the teleop timestamp and wait for
# the next message to allow computing pose and velocity reference.
if self._last_teleop_update is None:
self._teleop_vel_ref = None
self._last_teleop_update = to_fsec(self.node.get_clock().now())
return
# Store twist reference message
self._teleop_vel_ref = msg
# Set the teleop mode is active only if any of the linear velocity components and
# yaw rate are non-zero
vel = np.array([self._teleop_vel_ref.linear.x, self._teleop_vel_ref.linear.y, self._teleop_vel_ref.linear.z, self._teleop_vel_ref.angular.z])
self._is_teleop_active = np.abs(vel).sum() > 0
# Store time stamp
self._last_teleop_update = to_fsec(self.node.get_clock().now())
# =========================================================================
def _calc_teleop_reference(self):
"""Compute pose and velocity reference using the
joystick linear and angular velocity input.
"""
# Check if there is already a timestamp for the last received reference
# message from the teleop node
if self._last_teleop_update is None:
self._is_teleop_active = False
# Compute time step
self._dt = to_fsec(self.node.get_clock().now()) - self._last_teleop_update
# Compute the pose and velocity reference if the computed time step is
# positive and the twist teleop message is valid
if self._dt > 0 and self._teleop_vel_ref is not None and self._dt < 0.1:
speed = np.sqrt(self._teleop_vel_ref.linear.x**2 + self._teleop_vel_ref.linear.y**2)
vel = np.array([self._teleop_vel_ref.linear.x, self._teleop_vel_ref.linear.y, self._teleop_vel_ref.linear.z])
# Cap the forward speed if needed
if speed > self._max_forward_speed:
vel[0] *= self._max_forward_speed / speed
vel[1] *= self._max_forward_speed / speed
vel = np.dot(self._vehicle_pose.rot_matrix, vel)
# Compute pose step
step = uuv_trajectory_generator.TrajectoryPoint()
step.pos = np.dot(self._vehicle_pose.rot_matrix, vel * self._dt)
step.rotq = quaternion_about_axis(self._teleop_vel_ref.angular.z * self._dt, [0, 0, 1])
# Compute new reference
ref_pnt = uuv_trajectory_generator.TrajectoryPoint()
ref_pnt.pos = self._vehicle_pose.pos + step.pos
ref_pnt.rotq = quaternion_multiply(self.get_vehicle_rot(), step.rotq)
# Cap the pose reference in Z to stay underwater
if ref_pnt.z > 0:
ref_pnt.z = 0.0
ref_pnt.vel = [vel[0], vel[1], 0, 0, 0, self._teleop_vel_ref.angular.z]
else:
ref_pnt.vel = [vel[0], vel[1], vel[2], 0, 0, self._teleop_vel_ref.angular.z]
ref_pnt.acc = np.zeros(6)
else:
self._is_teleop_active = False
ref_pnt = deepcopy(self._vehicle_pose)
return ref_pnt
# =========================================================================
def _calc_smooth_approach(self):
"""Add the current vehicle position as waypoint
to allow a smooth approach to the given trajectory.
"""
if self._vehicle_pose is None:
self._logger.error('Simulation not properly initialized yet, ignoring approach...')
return
if not self._traj_interpolator.is_using_waypoints():
self._logger.error('Not using the waypoint interpolation method')
return
heading = euler_from_quaternion(self.get_vehicle_rot())[2]
if self._thrusters_only:
init_wp = uuv_waypoints.Waypoint(
x=self._vehicle_pose.pos[0],
y=self._vehicle_pose.pos[1],
z=self._vehicle_pose.pos[2],
max_forward_speed=self._traj_interpolator.get_waypoints().get_waypoint(0).max_forward_speed,
heading_offset=self._traj_interpolator.get_waypoints().get_waypoint(0).heading_offset)
else:
max_speed = self._traj_interpolator.get_waypoints().get_waypoint(0).max_forward_speed
init_wp = uuv_waypoints.Waypoint(
x=self._vehicle_pose.pos[0],# + max_speed / self._look_ahead_delay * np.cos(heading),
y=self._vehicle_pose.pos[1],# + max_speed / self._look_ahead_delay * np.sin(heading),
z=self._vehicle_pose.pos[2],
max_forward_speed=max_speed,
heading_offset=self._traj_interpolator.get_waypoints().get_waypoint(0).heading_offset)
first_wp = self._traj_interpolator.get_waypoints().get_waypoint(0)
dx = first_wp.x - init_wp.x
dy = first_wp.y - init_wp.y
dz = first_wp.z - init_wp.z
# One new waypoint at each meter
self._logger.info('Adding waypoints to approach the first position in the given waypoint set')
steps = int(np.floor(first_wp.dist(init_wp.pos)) / 10)
if steps > 0 and self._traj_interpolator.get_interp_method() != 'dubins':
for i in range(1, steps):
wp = uuv_waypoints.Waypoint(
x=first_wp.x - i * dx / steps,
y=first_wp.y - i * dy / steps,
z=first_wp.z - i * dz / steps,
max_forward_speed=self._traj_interpolator.get_waypoints().get_waypoint(0).max_forward_speed)
self._traj_interpolator.add_waypoint(wp, add_to_beginning=True)
self._traj_interpolator.add_waypoint(init_wp, add_to_beginning=True)
self._update_trajectory_info()
# =========================================================================
def is_station_keeping_on(self):
"""Return `True`, if vehicle is holding its position."""
return self._station_keeping_on
# =========================================================================
def is_automatic_on(self):
"""Return `True` if vehicle if following a trajectory in
automatic mode.
"""
return self._is_automatic
# =========================================================================
def set_station_keeping(self, is_on=True):
"""Set station keeping mode flag.
> *Input arguments*
* `is_on` (*type:* `bool`, *default:* `True`): Station keeping flag
"""
self._station_keeping_on = is_on
self._logger.info('STATION KEEPING MODE = ' + ('ON' if is_on else 'OFF'))
# =========================================================================
def set_automatic_mode(self, is_on=True):
"""Set automatic mode flag."""
self._is_automatic = is_on
self._logger.info('AUTOMATIC MODE = ' + ('ON' if is_on else 'OFF'))
# =========================================================================
def set_trajectory_running(self, is_on=True):
"""Set trajectory tracking flag."""
self._traj_running = is_on
self._logger.info('TRAJECTORY TRACKING = ' + ('ON' if is_on else 'OFF'))
# =========================================================================
def has_started(self):
"""Return if the trajectory interpolator has started generating
reference points.
"""
return self._traj_interpolator.has_started()
# =========================================================================
def has_finished(self):
"""Return `True` if the trajectory has finished."""
return self._traj_interpolator.has_finished()
# =========================================================================
def update_vehicle_pose(self, pos, quat):
"""Update the vehicle's pose information.
> *Input arguments*
* `pos` (*type:* `numpy.array`): Position vector
* `quat` (*type:* `numpy.array`): Quaternion as `(qx, qy, qz, qw)`
"""
if self._vehicle_pose is None:
self._vehicle_pose = uuv_trajectory_generator.TrajectoryPoint()
self._vehicle_pose.pos = pos
self._vehicle_pose.rotq = quat
self._vehicle_pose.t = to_fsec(self.node.get_clock().now())
self.init_odom_event.set()
# =========================================================================
def get_vehicle_rot(self):
"""Return the vehicle's rotation quaternion."""
self.init_odom_event.wait()
return self._vehicle_pose.rotq
# =========================================================================
def _update_trajectory_from_msg(self, msg):
self._stamp_trajectory_received = to_fsec(self.node.get_clock().now())
self._traj_interpolator.init_from_trajectory_message(msg)
self._logger.info('New trajectory received at ' + str(self._stamp_trajectory_received) + 's')
self._update_trajectory_info()
# =========================================================================
def start_station_keeping(self):
"""Start station keeping mode by setting the pose
set-point of the vehicle as the last pose before the
vehicle finished automatic mode.
"""
if self._vehicle_pose is not None:
self._this_ref_pnt = deepcopy(self._vehicle_pose)
self._this_ref_pnt.vel = np.zeros(6)
self._this_ref_pnt.acc = np.zeros(6)
self.set_station_keeping(True)
self.set_automatic_mode(False)
self._smooth_approach_on = False
# =========================================================================
def hold_vehicle(self, request, response):
"""Service callback function to hold the vehicle's
current position.
"""
if self._vehicle_pose is None:
self._logger.error('Current pose of the vehicle is invalid')
response.success = False
#return HoldResponse(False)
else:
self.start_station_keeping()
response.success = True
return response
# return HoldResponse(True)
# =========================================================================
def start_waypoint_list(self, request, response):
"""Service callback function to follow a set of waypoints
> *Input arguments*
* `request` (*type:* `uuv_control_msgs.InitWaypointSet`)
"""
if len(request.waypoints) == 0:
self._logger.error('Waypoint list is empty')
response.success = False
return response
#return InitWaypointSetResponse(False)
t = rclpy.time.Time(request.start_time.data.secs, request.start_time.data.nsecs)
if to_fsec(t) < to_fsec(self.node.get_clock().now()) and not request.start_now:
self._logger.error('The trajectory starts in the past, correct the starting time!')
response.success = False
return response
#return InitWaypointSetResponse(False)
else:
self._logger.info('Start waypoint trajectory now!')
self._lock.acquire()
# Create a waypoint set
wp_set = uuv_waypoints.WaypointSet(
inertial_frame_id=self.inertial_frame_id)
# Create a waypoint set message, to fill wp_set
waypointset_msg = WaypointSet()
waypointset_msg.header.stamp = self.node.get_clock().now().to_msg()
waypointset_msg.header.frame_id = self.inertial_frame_id
if request.start_now:
waypointset_msg.start_time = self.node.get_clock().now().to_msg()
else:
waypointset_msg.start_time = t.to_msg()
waypointset_msg.waypoints = request.waypoints
wp_set.from_message(waypointset_msg)
wp_set = self._transform_waypoint_set(wp_set)
wp_set = self._apply_workspace_constraints(wp_set)
if self._traj_interpolator.set_waypoints(wp_set, self.get_vehicle_rot()):
self._station_keeping_center = None
self._traj_interpolator.set_start_time((to_fsec(t) if not request.start_now else to_fsec(self.node.get_clock().now())))
self._update_trajectory_info()
self.set_station_keeping(False)
self.set_automatic_mode(True)
self.set_trajectory_running(True)
self._idle_circle_center = None
self._smooth_approach_on = True
self._logger.info('============================')
self._logger.info(' WAYPOINT SET ')
self._logger.info('============================')
self._logger.info('Interpolator = ' + request.interpolator.data)
self._logger.info('# waypoints = %d' % self._traj_interpolator.get_waypoints().num_waypoints)
self._logger.info('Starting time = %.2f' % (to_fsec(t) if not request.start_now else to_fsec(self.node.get_clock().now())))
self._logger.info('Inertial frame ID = ' + self.inertial_frame_id)
self._logger.info('============================')
self._lock.release()
response.success = True
return response
#return InitWaypointSetResponse(True)
else:
self._logger.error('Error occurred while parsing waypoints')
self._lock.release()
response.success = False
return response
#return InitWaypointSetResponse(False)
# =========================================================================
def start_circle(self, request, response):
"""Service callback function to initialize a parametrized
circular trajectory.
> *Input arguments*
* `request` (*type:* `uuv_control_msgs.InitCircularTrajectory`)
"""
if request.max_forward_speed <= 0 or request.radius <= 0 or \
request.n_points <= 0:
self._logger.error('Invalid parameters to generate a circular trajectory')
response.success = False
return response
#return InitCircularTrajectoryResponse(False)
t = rclpy.time.Time(request.start_time.data.secs, request.start_time.data.nsecs)
if to_fsec(t) < to_fsec(self.node.get_clock().now()) and not request.start_now:
self._logger.error('The trajectory starts in the past, correct the starting time!')
response.success = False
return response
#return InitCircularTrajectoryResponse(False)
try:
wp_set = uuv_waypoints.WaypointSet(
inertial_frame_id=self.inertial_frame_id)
success = wp_set.generate_circle(radius=request.radius,
center=request.center,
num_points=request.n_points,
max_forward_speed=request.max_forward_speed,
theta_offset=request.angle_offset,
heading_offset=request.heading_offset)
if not success:
self._logger.error('Error generating circular trajectory from waypoint set')
response.success = False
return response
#return InitCircularTrajectoryResponse(False)
wp_set = self._apply_workspace_constraints(wp_set)
if wp_set.is_empty:
self._logger.error('Waypoints violate workspace constraints, are you using world or world_ned as reference?')
response.success = False
return response
#return InitCircularTrajectoryResponse(False)
self._lock.acquire()
# Activates station keeping
self.set_station_keeping(True)
self._traj_interpolator.set_interp_method('cubic')
self._traj_interpolator.set_waypoints(wp_set, self.get_vehicle_rot())
self._station_keeping_center = None
self._traj_interpolator.set_start_time((to_fsec(t) if not request.start_now else to_fsec(self.node.get_clock().now())))
if request.duration > 0:
if self._traj_interpolator.set_duration(request.duration):
self._logger.info('Setting a maximum duration, duration=%.2f s' % request.duration)
else:
self._logger.error('Setting maximum duration failed')
self._update_trajectory_info()
# Disables station keeping to start trajectory
self.set_station_keeping(False)
self.set_automatic_mode(True)
self.set_trajectory_running(True)
self._idle_circle_center = None
self._smooth_approach_on = True
self._logger.info('============================')
self._logger.info('CIRCULAR TRAJECTORY GENERATED FROM WAYPOINT INTERPOLATION')
self._logger.info('============================')
self._logger.info('Radius [m] = %.2f' % request.radius)
self._logger.info('Center [m] = (%.2f, %.2f, %.2f)' % (request.center.x, request.center.y, request.center.z))
self._logger.info('# of points = %d' % request.n_points)
self._logger.info('Max. forward speed = %.2f' % request.max_forward_speed)
self._logger.info('Circle angle offset = %.2f' % request.angle_offset)
self._logger.info('Heading offset = %.2f' % request.heading_offset)
self._logger.info('# waypoints = %d' % self._traj_interpolator.get_waypoints().num_waypoints)
self._logger.info('Starting from = ' + str(self._traj_interpolator.get_waypoints().get_waypoint(0).pos))
self._logger.info('Starting time [s] = %.2f' % (to_fsec(t) if not request.start_now else to_fsec(self.node.get_clock().now())))
self._logger.info('============================')
self._lock.release()
response.success = True
return response
#return InitCircularTrajectoryResponse(True)
except Exception as e:
self._logger.error('Error while setting circular trajectory, msg={}'.format(e))
self.set_station_keeping(True)
self.set_automatic_mode(False)
self.set_trajectory_running(False)
self._lock.release()
response.success = False
return response
#return InitCircularTrajectoryResponse(False)
# =========================================================================
def start_helix(self, request, response):
"""Service callback function to initialize a parametrized helical
trajectory.
> *Input arguments*
* `request` (*type:* `uuv_control_msgs.InitHelicalTrajectory`)
"""
if request.radius <= 0 or request.n_points <= 0 or \
request.n_turns <= 0:
self._logger.error('Invalid parameters to generate a helical trajectory')
response.success = False
return response
#return InitHelicalTrajectoryResponse(False)
t = rclpy.time.Time(request.start_time.data.secs, request.start_time.data.nsecs)
if to_fsec(t) < to_fsec(self.node.get_clock().now()) and not request.start_now:
self._logger.error('The trajectory starts in the past, correct the starting time!')
response.success = False
return response
#return InitHelicalTrajectoryResponse(False)
else:
self._logger.info('Start helical trajectory now!')
try:
wp_set = uuv_waypoints.WaypointSet(inertial_frame_id=self.inertial_frame_id)
success = wp_set.generate_helix(radius=request.radius,
center=request.center,
num_points=request.n_points,
max_forward_speed=request.max_forward_speed,
delta_z=request.delta_z,
num_turns=request.n_turns,
theta_offset=request.angle_offset,
heading_offset=request.heading_offset)
if not success:
self._logger.error('Error generating circular trajectory from waypoint set')
response.success = False
return response
#return InitHelicalTrajectoryResponse(False)
wp_set = self._apply_workspace_constraints(wp_set)
if wp_set.is_empty:
self._logger.error('Waypoints violate workspace constraints, are you using world or world_ned as reference?')
response.success = False
return response
#return InitHelicalTrajectoryResponse(False)
self._lock.acquire()
self.set_station_keeping(True)
self._traj_interpolator.set_interp_method('cubic')
if not self._traj_interpolator.set_waypoints(wp_set, self.get_vehicle_rot()):
self._logger.error('Error setting the waypoints')
response.success = False
return response
#return InitHelicalTrajectoryResponse(False)
self._station_keeping_center = None
self._traj_interpolator.set_start_time((to_fsec(t) if not request.start_now else to_fsec(self.node.get_clock().now())))
if request.duration > 0:
if self._traj_interpolator.set_duration(request.duration):
self._logger.info('Setting a maximum duration, duration=%.2f s' % request.duration)
else:
self._logger.error('Setting maximum duration failed')
self._update_trajectory_info()
self.set_station_keeping(False)
self.set_automatic_mode(True)
self.set_trajectory_running(True)
self._idle_circle_center = None
self._smooth_approach_on = True
self._logger.info('============================')
self._logger.info('HELICAL TRAJECTORY GENERATED FROM WAYPOINT INTERPOLATION')
self._logger.info('============================')
self._logger.info('Radius [m] = %.2f' % request.radius)
self._logger.info('Center [m] = (%.2f, %.2f, %.2f)' % (request.center.x, request.center.y, request.center.z))
self._logger.info('# of points = %d' % request.n_points)
self._logger.info('Max. forward speed = %.2f' % request.max_forward_speed)
self._logger.info('Delta Z = %.2f' % request.delta_z)
self._logger.info('# of turns = %d' % request.n_turns)
self._logger.info('Helix angle offset = %.2f' % request.angle_offset)
self._logger.info('Heading offset = %.2f' % request.heading_offset)
self._logger.info('# waypoints = %d' % self._traj_interpolator.get_waypoints().num_waypoints)
self._logger.info('Starting from = ' + str(self._traj_interpolator.get_waypoints().get_waypoint(0).pos))
self._logger.info('Starting time [s] = %.2f' % (to_fsec(t) if not request.start_now else to_fsec(self.node.get_clock().now())))
self._logger.info('============================')
self._lock.release()
response.success = True
return response
#return InitHelicalTrajectoryResponse(True)
except Exception as e:
self._logger.error('Error while setting helical trajectory, msg={}'.format(e))
self.set_station_keeping(True)
self.set_automatic_mode(False)
self.set_trajectory_running(False)
self._lock.release()
response.success = False
return response
#return InitHelicalTrajectoryResponse(False)
# =========================================================================
def init_waypoints_from_file(self, request, response):
"""Service callback function to initialize the path interpolator
with a set of waypoints loaded from a YAML file.
> *Input arguments*
* `request` (*type:* `uuv_control_msgs.InitWaypointsFromFile`)
"""
if (len(request.filename.data) == 0 or
not isfile(request.filename.data)):
self._logger.error('Invalid waypoint file')
response.success = False
return response
#return InitWaypointsFromFileResponse(False)
t = rclpy.time.Time(request.start_time.data.secs, request.start_time.data.nsecs)
if to_fsec(t) < to_fsec(self.node.get_clock().now()) and not request.start_now:
self._logger.error('The trajectory starts in the past, correct the starting time!')
response.success = False
return response
#return InitWaypointsFromFileResponse(False)
else:
self._logger.info('Start waypoint trajectory now!')
self._lock.acquire()
self.set_station_keeping(True)
self._traj_interpolator.set_interp_method(request.interpolator.data)
wp_set = uuv_waypoints.WaypointSet()
if not wp_set.read_from_file(request.filename.data):
self._logger.info('Error occurred while parsing waypoint file')
response.success = False
return response
#return InitWaypointsFromFileResponse(False)
wp_set = self._transform_waypoint_set(wp_set)
wp_set = self._apply_workspace_constraints(wp_set)
if self._traj_interpolator.set_waypoints(wp_set, self.get_vehicle_rot()):
self._station_keeping_center = None
self._traj_interpolator.set_start_time((to_fsec(t) if not request.start_now else to_fsec(self.node.get_clock().now())))
self._update_trajectory_info()
self.set_station_keeping(False)
self.set_automatic_mode(True)
self.set_trajectory_running(True)
self._idle_circle_center = None
self._smooth_approach_on = True
self._logger.info('============================')
self._logger.info('IMPORT WAYPOINTS FROM FILE')
self._logger.info('============================')
self._logger.info('Filename = ' + request.filename.data)
self._logger.info('Interpolator = ' + request.interpolator.data)
self._logger.info('# waypoints = %d' % self._traj_interpolator.get_waypoints().num_waypoints)
self._logger.info('Starting time = %.2f' % (to_fsec(t) if not request.start_now else to_fsec(self.node.get_clock().now())))
self._logger.info('Inertial frame ID = ' + self.inertial_frame_id)
self._logger.info('============================')
self._lock.release()
response.success = True
return response
#return InitWaypointsFromFileResponse(True)
else:
self._logger.error('Error occurred while parsing waypoint file')
self._lock.release()
response.success = False
return response
#return InitWaypointsFromFileResponse(False)
# =========================================================================
def go_to(self, request, response):
"""Service callback function to initialize to set one target
waypoint .
> *Input arguments*
* `request` (*type:* `uuv_control_msgs.GoTo`)
"""
if self._vehicle_pose is None:
self._logger.error('Current pose has not been initialized yet')
response.success = False
return response
#return GoToResponse(False)
if request.waypoint.max_forward_speed <= 0.0:
self._logger.error('Max. forward speed must be greater than zero')
response.success = False
return response
#return GoToResponse(False)
self.set_station_keeping(True)
self._lock.acquire()
wp_set = uuv_waypoints.WaypointSet(
inertial_frame_id=self.inertial_frame_id)
init_wp = uuv_waypoints.Waypoint(
x=self._vehicle_pose.pos[0],
y=self._vehicle_pose.pos[1],
z=self._vehicle_pose.pos[2],
max_forward_speed=request.waypoint.max_forward_speed,
heading_offset=euler_from_quaternion(self.get_vehicle_rot())[2],
use_fixed_heading=request.waypoint.use_fixed_heading,
inertial_frame_id=self.inertial_frame_id)
wp_set.add_waypoint(init_wp)
wp_set.add_waypoint_from_msg(request.waypoint)
wp_set = self._transform_waypoint_set(wp_set)
self._traj_interpolator.set_interp_method(request.interpolator)
if not self._traj_interpolator.set_waypoints(wp_set, self.get_vehicle_rot()):
self._logger.error('Error while setting waypoints')
self._lock.release()
response.success = False
return response
#return GoToResponse(False)
self._station_keeping_center = None
t = to_fsec(self.node.get_clock().now())
self._traj_interpolator.set_start_time(t)
self._update_trajectory_info()
self.set_station_keeping(False)
self.set_automatic_mode(True)
self.set_trajectory_running(True)
self._idle_circle_center = None
self._smooth_approach_on = False
self._logger.info('============================')
self._logger.info('GO TO')
self._logger.info('============================')
self._logger.info('Heading offset [rad] = %.2f' % request.waypoint.heading_offset)
self._logger.info('# waypoints = %d' % self._traj_interpolator.get_waypoints().num_waypoints)
self._logger.info('Starting from = ' + str(self._traj_interpolator.get_waypoints().get_waypoint(0).pos))
self._logger.info('Start time [s] = %.2f ' % t)
self._logger.info('Inertial frame ID = ' + self.inertial_frame_id)
self._logger.info('============================')
self._lock.release()
response.success = True
return response
#return GoToResponse(True)
#==========================================================================
def go_to_incremental(self, request):
"""Service callback to set the command to the vehicle to move to a
relative position in the world.
> *Input arguments*
* `request` (*type:* `uuv_control_msgs.GoToIncremental`)
"""
if self._vehicle_pose is None:
self._logger.error('Current pose has not been initialized yet')
response.success = False
return response
#return GoToIncrementalResponse(False)
if request.max_forward_speed <= 0:
self._logger.error('Max. forward speed must be positive')
response.success = False
return response
#return GoToIncrementalResponse(False)
self._lock.acquire()
self.set_station_keeping(True)
wp_set = uuv_waypoints.WaypointSet(
inertial_frame_id=self.inertial_frame_id)
init_wp = uuv_waypoints.Waypoint(
x=self._vehicle_pose.pos[0],
y=self._vehicle_pose.pos[1],
z=self._vehicle_pose.pos[2],
max_forward_speed=request.max_forward_speed,
heading_offset=euler_from_quaternion(self.get_vehicle_rot())[2],
inertial_frame_id=self.inertial_frame_id)
wp_set.add_waypoint(init_wp)
wp = uuv_waypoints.Waypoint(
x=self._vehicle_pose.pos[0] + request.step.x,
y=self._vehicle_pose.pos[1] + request.step.y,
z=self._vehicle_pose.pos[2] + request.step.z,
max_forward_speed=request.max_forward_speed,
inertial_frame_id=self.inertial_frame_id)
wp_set.add_waypoint(wp)
self._traj_interpolator.set_interp_method(request.interpolator)
if not self._traj_interpolator.set_waypoints(wp_set, self.get_vehicle_rot()):
self._logger.error('Error while setting waypoints')
self._lock.release()
response.success = False
return response
#return GoToIncrementalResponse(False)
self._station_keeping_center = None
self._traj_interpolator.set_start_time(to_fsec(self.node.get_clock().now()))
self._update_trajectory_info()
self.set_station_keeping(False)
self.set_automatic_mode(True)
self.set_trajectory_running(True)
self._idle_circle_center = None
self._smooth_approach_on = False
self._logger.info('============================')
self._logger.info('GO TO INCREMENTAL')
self._logger.info('============================')
self._logger.info(str(wp_set))
self._logger.info('# waypoints = %d' % wp_set.num_waypoints)
self._logger.info('Inertial frame ID = ' + self.inertial_frame_id)
self._logger.info('============================')
self._lock.release()
response.success = True
return response
#return GoToIncrementalResponse(True)
# =========================================================================
def generate_reference(self, t):
"""Return a trajectory point computed by the interpolator for the
timestamp `t`, in case the vehicle is on `automatic` mode. In case
it is in station keeping, the pose is kept constant.
> *Input arguments*
* `t` (*type:* `float`): Timestamp
> *Returns*
`uuv_trajectory_generator.TrajectoryPoint`: Trajectory point
"""
pnt = self._traj_interpolator.generate_reference(t, self._vehicle_pose.pos, self.get_vehicle_rot())
if pnt is None:
return self._vehicle_pose
else:
return pnt
# =========================================================================
def get_idle_circle_path(self, n_points, radius=30):
"""Generate a waypoint set starting from the current
position of the vehicle in the shape of a circle to
initialize an AUVs idle mode.
> *Input arguments*
* `n_points` (*type:* `int`): Number of waypoints
* `radius` (*type:* `float`): Circle radius in meters
> *Returns*
`uuv_waypoints.WaypointSet`: Set of waypoints for idle mode
"""
pose = deepcopy(self._vehicle_pose)
if self._idle_circle_center is None:
frame = np.array([
[np.cos(pose.rot[2]), -np.sin(pose.rot[2]), 0],
[np.sin(pose.rot[2]), np.cos(pose.rot[2]), 0],
[0, 0, 1]])
self._idle_circle_center = (pose.pos + 0.8 * self._max_forward_speed * frame[:, 0].flatten()) + radius * frame[:, 1].flatten()
self._idle_z = pose.pos[2]
phi = lambda u: 2 * np.pi * u + pose.rot[2] - np.pi / 2
u = lambda angle: (angle - pose.rot[2] + np.pi / 2) / (2 * np.pi)
vec = pose.pos - self._idle_circle_center
vec /= np.linalg.norm(vec)
u_init = u(np.arctan2(vec[1], vec[0]))
wp_set = uuv_waypoints.WaypointSet(
inertial_frame_id=self.inertial_frame_id)
for i in np.linspace(u_init, u_init + 1, n_points):
wp = uuv_waypoints.Waypoint(
x=self._idle_circle_center[0] + radius * np.cos(phi(i)),
y=self._idle_circle_center[1] + radius * np.sin(phi(i)),
z=self._idle_z,
max_forward_speed=0.8 * self._max_forward_speed,
inertial_frame_id=self.inertial_frame_id)
wp_set.add_waypoint(wp)
return wp_set
# =========================================================================
def interpolate(self, t):
"""Function interface to the controller. Calls the interpolator to
calculate the current trajectory sample or returns a fixed position
based on the past odometry measurements for station keeping.
> *Input arguments*
* `t` (*type:* `float`): Timestamp
> *Returns*
`uuv_trajectory_generator.TrajectoryPoint`: Trajectory point
"""
self._lock.acquire()
if not self._station_keeping_on and self._traj_running:
if self._smooth_approach_on:
# Generate extra waypoint before the initial waypoint
self._calc_smooth_approach()
self._smooth_approach_on = False
self._update_trajectory_info()
time.sleep(0.5)
self._logger.info('Adding waypoints to approach the given waypoint trajectory')
# Get interpolated reference from the reference trajectory
self._this_ref_pnt = self._traj_interpolator.interpolate(t, self._vehicle_pose.pos, self.get_vehicle_rot())
if self._look_ahead_delay > 0:
self._this_ref_pnt = self.generate_reference(t + self._look_ahead_delay)
self._max_time_pub.publish(Float64(self._traj_interpolator.get_max_time() - to_fsec(self.node.get_clock().now())))
if not self._traj_running:
self._traj_running = True
self._logger.info(self.node.get_namespace() + ' - Trajectory running')
if self._traj_running and (self._traj_interpolator.has_finished() or self._station_keeping_on):
# Trajectory ended, start station keeping mode
self._logger.info(self.node.get_namespace() + ' - Trajectory completed!')
if self._this_ref_pnt is None:
# TODO Fix None value coming from the odometry
if self._is_teleop_active:
self._this_ref_pnt = self._calc_teleop_reference()
else:
self._this_ref_pnt = deepcopy(self._vehicle_pose)
self._this_ref_pnt.vel = np.zeros(6)
self._this_ref_pnt.acc = np.zeros(6)
self._start_count_idle = to_fsec(self.node.get_clock().now())
self.set_station_keeping(True)
self.set_automatic_mode(False)
self.set_trajectory_running(False)
elif self._this_ref_pnt is None:
self._traj_interpolator.set_interp_method('lipb')
# Use the latest position and heading of the vehicle from the odometry to enter station keeping mode
if self._is_teleop_active:
self._this_ref_pnt = self._calc_teleop_reference()
else:
self._this_ref_pnt = deepcopy(self._vehicle_pose)
# Set roll and pitch reference to zero
yaw = self._this_ref_pnt.rot[2]
self._this_ref_pnt.rot = [0, 0, yaw]
self.set_automatic_mode(False)
elif self._station_keeping_on:
if self._is_teleop_active:
self._this_ref_pnt = self._calc_teleop_reference()
self._max_time_pub.publish(Float64(0))
#######################################################################
if not self._thrusters_only and not self._is_teleop_active and to_fsec(self.node.get_clock().now()) - self._start_count_idle > self._timeout_idle_mode:
self._logger.info('AUV STATION KEEPING')
if self._station_keeping_center is None:
self._station_keeping_center = self._this_ref_pnt
wp_set = self.get_idle_circle_path(20, self._idle_radius)
wp_set = self._apply_workspace_constraints(wp_set)
if wp_set.is_empty:
raise RuntimeError('Waypoints violate workspace constraints, are you using world or world_ned as reference?')
# Activates station keeping
self.set_station_keeping(True)
self._traj_interpolator.set_interp_method('cubic')
self._traj_interpolator.set_waypoints(wp_set, self.get_vehicle_rot())
self._traj_interpolator.set_start_time(to_fsec(self.node.get_clock().now()))
self._update_trajectory_info()
# Disables station keeping to start trajectory
self.set_station_keeping(False)
self.set_automatic_mode(True)
self.set_trajectory_running(True)
self._smooth_approach_on = False
#######################################################################
self._lock.release()
return self._this_ref_pnt
```
#### File: uuv_descriptions/test/test_urdf_files.py
```python
import unittest
import subprocess
import os
def call_xacro(xml_file):
assert os.path.isfile(xml_file), 'Invalid XML xacro file'
return subprocess.check_output(['xacro', '--inorder', xml_file])
# =============================================================================
class TestRexROVURDFFiles(unittest.TestCase):
def test_xacro(self):
# Retrieve the root folder for the tests
test_dir = os.path.abspath(os.path.dirname(__file__))
robots_dir = os.path.join(test_dir, '..', 'robots')
for item in os.listdir(robots_dir):
if 'oberon' in item:
continue
if not os.path.isfile(os.path.join(robots_dir, item)):
continue
output = call_xacro(os.path.join(robots_dir, item))
self.assertNotIn(
'XML parsing error',
output.decode('utf-8'),
'Parsing error found for file {}'.format('hey'))
self.assertNotIn(
'No such file or directory',
output.decode('utf-8'),
'Some file not found in {}'.format('hey'))
# if __name__ == '__main__':
# import rosunit
# rosunit.unitrun(PKG, NAME, TestRexROVURDFFiles)
```
#### File: uuv_gazebo_ros_plugins/test/test_thrusters.test.py
```python
import array
import numpy as np
import os
import pathlib
import pytest
import rclpy
import unittest
import xacro
import sys
from time import sleep
from uuv_gazebo_ros_plugins_msgs.msg import FloatStamped
from uuv_gazebo_ros_plugins_msgs.srv import GetThrusterConversionFcn, \
SetThrusterState, GetThrusterState, SetThrusterEfficiency, \
GetThrusterEfficiency
import launch
from launch.actions import IncludeLaunchDescription
from launch.launch_description_sources import AnyLaunchDescriptionSource
import launch_testing.actions
from ament_index_python.packages import get_package_share_directory
class TestThrusters(unittest.TestCase):
# def __init__(self, *args):
# super(TestThrusters, self).__init__(*args)
# rospy.init_node('test_thrusters', anonymous=True)
# self.thruster_input_pub = dict()
# for i in range(3):
# self.thruster_input_pub[i] = rospy.Publisher(
# '/vehicle/thrusters/%d/input' % i, FloatStamped, queue_size=1)
@classmethod
def setUpClass(cls):
# Initialize the ROS context for the test node
rclpy.init()
# =========================================================================
@classmethod
def tearDownClass(cls):
# Shutdown the ROS context
rclpy.shutdown()
os.system('killall -9 gzserver')
# =========================================================================
def setUp(self):
# Create a ROS node for tests
self.node = rclpy.create_node(
'test_thrusters',
allow_undeclared_parameters=True,
automatically_declare_parameters_from_overrides=True
)
self.thruster_input_pub = dict()
for i in range(3):
self.thruster_input_pub[i] = self.node.create_publisher(
FloatStamped, self.build_topic_name('/vehicle/thrusters', i, 'input'), 1)
# =========================================================================
def tearDown(self):
self.node.destroy_node()
# =========================================================================
def build_topic_name(self, prefix, id, suffix):
return prefix + '/id_' + str(id) + '/' + suffix
# =========================================================================
def create_service(self, srv_type, srv_name):
s = self.node.create_client(srv_type, srv_name)
if not s.wait_for_service(timeout_sec=10):
self.fail('service %s not available...' % srv_name)
return s
# =========================================================================
def service_request(self, service, **kwargs):
req = service.srv_type.Request()
for key, value in kwargs.items():
try:
getattr(req, key)
setattr(req, key, value)
except:
print('Non existing attribute %s' % key)
pass
future = service.call_async(req)
rclpy.spin_until_future_complete(self.node, future)
return future.result()
# TODO Migrate the test
# =========================================================================
# def test_input_output_topics_exist(self):
# pub = self.node.create_publisher(
# FloatStamped,
# self.build_topic_name('/vehicle/thrusters', 0, 'input'),
# 1
# )
# # pub = rospy.Publisher(FloatStamped'/vehicle/thrusters/0/input', ,
# # queue_size=1)
# for k in self.thruster_input_pub:
# # Publishing set point to rotor velocity
# input_message = FloatStamped()
# input_message.header.stamp = self.node.get_clock().now().to_msg() # rospy.Time.now()
# input_message.data = 0.2
# self.thruster_input_pub[k].publish(input_message)
# sleep(1)
# output = rospy.wait_for_message('/vehicle/thrusters/%d/thrust' % k,
# FloatStamped, timeout=30)
# self.assertIsNot(output.data, 0.0)
# # Turning thruster off
# input_message.data = 0.0
# pub.publish(input_message)
# =========================================================================
def test_conversion_fcn_parameters(self):
# Testing thruster #0 - basic/proportional model
# rospy.wait_for_service(
# '/vehicle/thrusters/0/get_thruster_conversion_fcn')
# get_thruster_convertion_fcn = rospy.ServiceProxy(
# '/vehicle/thrusters/0/get_thruster_conversion_fcn',
# GetThrusterConversionFcn)
s_get = self.create_service(
GetThrusterConversionFcn,
self.build_topic_name(
'/vehicle/thrusters',
0,
'get_thruster_conversion_fcn'
)
)
fcn = self.service_request(s_get)
# fcn = get_thruster_convertion_fcn()
self.assertEqual(fcn.fcn.function_name, 'Basic')
self.assertEqual(len(fcn.fcn.tags), len(fcn.fcn.data))
self.assertEqual(len(fcn.fcn.tags), 1)
self.assertIn('rotor_constant', fcn.fcn.tags)
self.assertEqual(fcn.fcn.data[0], 0.001)
# Testing thruster #1 - Bessa/nonlinear model
# rospy.wait_for_service(
# '/vehicle/thrusters/1/get_thruster_conversion_fcn')
# get_thruster_convertion_fcn = rospy.ServiceProxy(
# '/vehicle/thrusters/1/get_thruster_conversion_fcn',
# GetThrusterConversionFcn)
s_get = self.create_service(
GetThrusterConversionFcn,
self.build_topic_name(
'/vehicle/thrusters', 1,
'get_thruster_conversion_fcn')
)
fcn = self.service_request(s_get)
# fcn = get_thruster_convertion_fcn()
bessa_tags = ['rotor_constant_l', 'rotor_constant_r', 'delta_l',
'delta_r']
bessa_params = [0.001, 0.001, -0.01, 0.01]
self.assertEqual(fcn.fcn.function_name, 'Bessa')
self.assertEqual(len(fcn.fcn.tags), len(fcn.fcn.data))
self.assertEqual(len(fcn.fcn.tags), 4)
for t, p in zip(fcn.fcn.tags, fcn.fcn.data):
self.assertIn(t, bessa_tags)
self.assertEqual(p, bessa_params[bessa_tags.index(t)])
# Testing thruster #2 - Linear interpolation
# rospy.wait_for_service(
# '/vehicle/thrusters/2/get_thruster_conversion_fcn')
# get_thruster_convertion_fcn = rospy.ServiceProxy(
# '/vehicle/thrusters/2/get_thruster_conversion_fcn',
# GetThrusterConversionFcn)
s_get = self.create_service(
GetThrusterConversionFcn,
self.build_topic_name(
'/vehicle/thrusters', 2,
'get_thruster_conversion_fcn')
)
fcn = self.service_request(s_get)
# fcn = get_thruster_convertion_fcn()
self.assertEqual(fcn.fcn.function_name, 'LinearInterp')
self.assertEqual(len(fcn.fcn.tags), len(fcn.fcn.data))
self.assertEqual(len(fcn.fcn.tags), 0)
self.assertEqual(len(fcn.fcn.lookup_table_input),
len(fcn.fcn.lookup_table_output))
self.assertListEqual([-0.1, 0.0, 0.1],
list(fcn.fcn.lookup_table_input))
self.assertListEqual([-0.01, 0.0, 0.01],
list(fcn.fcn.lookup_table_output))
# =========================================================================
def test_change_thruster_state(self):
for i in range(3):
# rospy.wait_for_service(
# '/vehicle/thrusters/%d/set_thruster_state' % i)
# set_state = rospy.ServiceProxy(
# '/vehicle/thrusters/%d/set_thruster_state' % i,
# SetThrusterState)
s_set = self.create_service(
SetThrusterState,
self.build_topic_name(
'/vehicle/thrusters', i, 'set_thruster_state')
)
set_state = self.service_request(s_set, on=False)
self.assertTrue(set_state.success)
# Test that thruster is off
# rospy.wait_for_service(
# '/vehicle/thrusters/%d/get_thruster_state' % i)
# get_state = rospy.ServiceProxy(
# '/vehicle/thrusters/%d/get_thruster_state' % i,
# GetThrusterState)
s_get = self.create_service(
GetThrusterState,
self.build_topic_name(
'/vehicle/thrusters', i, 'get_thruster_state')
)
get_state = self.service_request(s_get)
self.assertFalse(get_state.is_on)
# Turn thruster on again
set_state = self.service_request(s_set, on=True)
self.assertTrue(set_state.success)
get_state = self.service_request(s_get)
self.assertTrue(get_state.is_on)
# =========================================================================
def test_change_thrust_efficiency(self):
for i in range(3):
# rospy.wait_for_service(
# '/vehicle/thrusters/%d/set_thrust_force_efficiency' % i)
# set_efficiency = rospy.ServiceProxy(
# '/vehicle/thrusters/%d/set_thrust_force_efficiency' % i,
# SetThrusterEfficiency)
s_set = self.create_service(
SetThrusterEfficiency,
self.build_topic_name(
'/vehicle/thrusters', i, 'set_thrust_force_efficiency')
)
set_efficiency = self.service_request(s_set, efficiency=0.5)
self.assertTrue(set_efficiency.success)
# Test that thruster is off
# rospy.wait_for_service(
# '/vehicle/thrusters/%d/get_thrust_force_efficiency' % i)
# get_efficiency = rospy.ServiceProxy(
# '/vehicle/thrusters/%d/get_thrust_force_efficiency' % i,
# GetThrusterEfficiency)
s_get = self.create_service(
GetThrusterEfficiency,
self.build_topic_name(
'/vehicle/thrusters', i, 'get_thrust_force_efficiency')
)
get_efficiency = self.service_request(s_get)
self.assertEqual(get_efficiency.efficiency, 0.5)
# Turn thruster on again
set_efficiency = self.service_request(s_set, efficiency=1.0)
self.assertTrue(set_efficiency.success)
get_efficiency = self.service_request(s_get)
self.assertEqual(get_efficiency.efficiency, 1.0)
# =========================================================================
def test_change_dyn_state_efficiency(self):
for i in range(3):
# rospy.wait_for_service(
# '/vehicle/thrusters/%d/set_dynamic_state_efficiency' % i)
# set_efficiency = rospy.ServiceProxy(
# '/vehicle/thrusters/%d/set_dynamic_state_efficiency' % i,
# SetThrusterEfficiency)
s_set = self.create_service(
SetThrusterEfficiency,
self.build_topic_name(
'/vehicle/thrusters', i, 'set_dynamic_state_efficiency')
)
set_efficiency = self.service_request(s_set, efficiency=0.5)
self.assertTrue(set_efficiency.success)
# Test that thruster is off
# rospy.wait_for_service(
# '/vehicle/thrusters/%d/get_dynamic_state_efficiency' % i)
# get_efficiency = rospy.ServiceProxy(
# '/vehicle/thrusters/%d/get_dynamic_state_efficiency' % i,
# GetThrusterEfficiency)
s_get = self.create_service(
GetThrusterEfficiency,
self.build_topic_name(
'/vehicle/thrusters', i, 'get_dynamic_state_efficiency')
)
get_efficiency = self.service_request(s_get)
self.assertEqual(get_efficiency.efficiency, 0.5)
# Turn thruster on again
set_efficiency = self.service_request(s_set, efficiency=1.0)
self.assertTrue(set_efficiency.success)
get_efficiency = self.service_request(s_get)
self.assertEqual(get_efficiency.efficiency, 1.0)
# =============================================================================
@pytest.mark.rostest
def generate_test_description():
# Set env
#os.environ['GAZEBO_MASTER_URI'] = 'http://localhost:3002'
file_path = pathlib.Path(__file__)
# Here, parent first removes the file name
parent_file_path = pathlib.Path(__file__).parent
# Gazebo
gazebo_launch = os.path.join(
get_package_share_directory('gazebo_ros'),
'launch',
'gazebo.launch.py'
)
gazebo_world = os.path.join(
str(parent_file_path),
'worlds',
'test_empty.world',
)
if not pathlib.Path(gazebo_launch).exists() or not pathlib.Path(gazebo_world).exists():
exc = 'Launch file ' + gazebo_launch + ' or ' + gazebo_world + ' does not exist'
raise Exception(exc)
launch_args = [('world', gazebo_world), ('paused', 'false'), ('gui', 'false'), ('verbose', 'true'), ]
gazebo_launch_desc = IncludeLaunchDescription(
AnyLaunchDescriptionSource(gazebo_launch), launch_arguments=launch_args)
# Upload vehicle
upload_launch = os.path.join(
str(parent_file_path),
'models',
'thrusters',
'test_upload_thrusters.launch.py'
)
if not pathlib.Path(upload_launch).exists():
exc = 'Launch file ' + upload_launch + ' does not exist'
raise Exception(exc)
upload_launch_desc = IncludeLaunchDescription(
AnyLaunchDescriptionSource(upload_launch))
return (
launch.LaunchDescription([
gazebo_launch_desc,
upload_launch_desc,
launch_testing.actions.ReadyToTest(),
])
)
``` |
{
"source": "JPLMLIA/libeos",
"score": 3
} |
#### File: libeos/mise/mise_data.py
```python
import os
import numpy as np
import h5py
class MISEData(object):
def __init__(self, filename):
self.data_file = filename
self._load()
def _load(self):
# Matlab files can be read if saved with '-v7.3' option from Matlab
# Must use HDF5 reader:
misefile = h5py.File(self.data_file, 'r')
cube = misefile['cube']
cube = np.array(cube)
# APL file is wavelengths x height x width
# Our generated data is height x width x wavelengths
# Assume all images have height = width.
# To handle either case, check size of first two dimensions.
# If not equal, assume transpose is needed.
if cube.shape[0] != cube.shape[1]:
print('Warning: cube.shape[0] != cube.shape[1]; assuming transpose needed.')
self.data = cube.T
else:
self.data = cube
# If wavelengths are specified, use them
if 'wavelengths' in misefile.keys():
self.wavelengths = np.array(misefile['wavelengths']).T
else:
self.wavelengths = np.arange(800.0,5001.0,10.0); # Per Diana
```
#### File: libeos/pims/data_utils.py
```python
from __future__ import division
import os
from datetime import datetime
import warnings
import math
import dateutil.parser
import numpy as np
import matplotlib.dates as mdates
import scipy
from scipy.ndimage import minimum_filter
from scipy.linalg import hankel
from scipy.ndimage.filters import gaussian_filter, uniform_filter, median_filter
from scipy.spatial.distance import pdist
from simanneal import Annealer
from joblib import Memory
import pandas as pd
from pandas.plotting import register_matplotlib_converters
from mpl_toolkits.axes_grid1 import make_axes_locatable
from els_data import ELS
from plot_els import parse_quantity
from transform import Filter
# Required for pandas to be compatible with matplotlib when formatting dates.
register_matplotlib_converters()
# Caches the return values of a function in a specific directory.
def cache(directory):
def cache_in_dir(func):
memory = Memory(directory, verbose=0)
return memory.cache(func)
return cache_in_dir
# Returns the total time contained within the list of intervals.
def get_total_time(intervals):
"""
>>> get_total_time([])
0.0
>>> get_total_time([(1, 3)])
2.0
>>> get_total_time([(1, 3), (4, 5)])
3.0
"""
return math.fsum([(interval_end - interval_start) for (interval_start, interval_end) in intervals])
# Returns the list of overlap regions between two sorted (according to start time) lists of intervals (as 2-tuples).
# Note that the lists should not contain any overlaps within themselves.
def get_overlaps(first_intervals, second_intervals):
"""
>>> get_overlaps([(1, 2), (3, 4), (8, 9)], [(1, 4), (7, 8.5)])
[(1, 2), (3, 4), (8, 8.5)]
>>> get_overlaps([(1, 4), (7, 8.5)], [(1, 2), (3, 4), (8, 9)])
[(1, 2), (3, 4), (8, 8.5)]
>>> get_overlaps([(1, 8), (9, 10)], [(2, 3), (5, 6), (7, 9.5)])
[(2, 3), (5, 6), (7, 8), (9, 9.5)]
>>> get_overlaps([(2, 3), (5, 6), (7, 9.5)], [(1, 8), (9, 10)])
[(2, 3), (5, 6), (7, 8), (9, 9.5)]
>>> get_overlaps([(1, 10)], [(0, 5)])
[(1, 5)]
>>> get_overlaps([(0, 5)], [(1, 10)])
[(1, 5)]
>>> get_overlaps([(1, 6), (7, 9)], [(5.5, 7.5)])
[(5.5, 6), (7, 7.5)]
>>> get_overlaps([(5.5, 7.5)], [(1, 6), (7, 9)])
[(5.5, 6), (7, 7.5)]
"""
overlaps = []
for first_interval in first_intervals:
# Find the index of the first interval in the second list starting after this interval ends.
# We do not need to search beyond this interval.
end_index = None
for index, second_interval in enumerate(second_intervals):
if second_interval[0] >= first_interval[1]:
end_index = index
break
# Go through all of these intervals and compute the overlaps.
for second_interval in second_intervals[:end_index]:
if second_interval[1] > first_interval[0]:
uncovered_region = (max(first_interval[0], second_interval[0]), min(first_interval[1], second_interval[1]))
overlaps.append(uncovered_region)
return overlaps
# Returns the intervals within the interval [start_time, end_time] which are not covered by the list of intervals given.
# The list of intervals should be sorted according to their start times.
def get_uncovered(intervals, start_time, end_time):
"""
>>> get_uncovered([(1, 3)], 0, 10)
[(0, 1), (3, 10)]
>>> get_uncovered([(1, 8), (9, 10)], 0, 20)
[(0, 1), (8, 9), (10, 20)]
>>> get_uncovered([], 0, 20)
[(0, 20)]
>>> get_uncovered([(1, 3), (3, 6)], 0, 10)
[(0, 1), (6, 10)]
"""
uncovered_intervals = []
curr_start = start_time
# Go through the list.
for interval in intervals:
curr_end = interval[0]
# We don't add degenerate intervals.
if curr_start < curr_end:
uncovered_intervals.append((curr_start, curr_end))
curr_start = interval[1]
# If there's still time left!
if curr_start < end_time:
uncovered_intervals.append((curr_start, end_time))
return uncovered_intervals
# Merges adjacent intervals passed as a list of 2-tuples.
# The list of intervals must be sorted according to start-time and disjoint.
def merge_adjacent_intervals(intervals):
"""
>>> merge_adjacent_intervals([(1, 3), (4, 5)])
[(1, 3), (4, 5)]
>>> merge_adjacent_intervals([(1, 4), (4, 5)])
[(1, 5)]
>>> merge_adjacent_intervals([(1, 2), (2, 5), (5, 7)])
[(1, 7)]
>>> merge_adjacent_intervals([(1, 2), (2, 5), (5, 7), (8, 9)])
[(1, 7), (8, 9)]
>>> merge_adjacent_intervals([(1, 2), (2, 5), (6, 7), (7, 9), (10, 11), (13, 14)])
[(1, 5), (6, 9), (10, 11), (13, 14)]
>>> merge_adjacent_intervals([])
[]
>>> merge_adjacent_intervals([(0, 1)])
[(0, 1)]
>>> merge_adjacent_intervals([(0, 1), (1, 8)])
[(0, 8)]
"""
merged_intervals = []
# Iterate once through list, and merge greedily.
index = 0
while index < len(intervals):
curr_interval = intervals[index]
curr_start = curr_interval[0]
curr_end = curr_interval[1]
# See how far we can go on merging intervals.
next_index = index + 1
while next_index < len(intervals):
next_interval = intervals[next_index]
if next_interval[0] == curr_end:
curr_end = next_interval[1]
else:
break
next_index += 1
merged_intervals.append((curr_start, curr_end))
index = next_index
return merged_intervals
# Returns non-overlapping intervals as a list of 2-tuples indicating start and end times for each interval.
# The list of interval centers must be sorted.
def mark_intervals(interval_centers, window, start_time, end_time):
"""
>>> mark_intervals([1, 4, 6], 2, 0, 10)
[(0.0, 2.0), (3.0, 7.0)]
>>> mark_intervals([1, 4, 6], 1, 0, 10)
[(0.5, 1.5), (3.5, 4.5), (5.5, 6.5)]
>>> mark_intervals([1, 4, 6], 3, 0, 10)
[(0, 7.5)]
>>> mark_intervals([], 3, 0, 10)
[]
>>> mark_intervals([2], 5, 0, 2)
[(0, 2)]
>>> mark_intervals([4, 5], 3, 0, 10)
[(2.5, 6.5)]
>>> mark_intervals([4, 5, 8], 3, 0, 9)
[(2.5, 9)]
"""
# Variables indicating the start and end times for the next interval to be added.
interval_start = -np.inf
interval_end = -np.inf
interval_time = 0
intervals = []
for center in interval_centers:
interval_start = max(center - window / 2, interval_end, start_time)
interval_end = min(center + window / 2, end_time)
interval_time += interval_end - interval_start
intervals.append((interval_start, interval_end))
# Merge adjacent intervals.
return merge_adjacent_intervals(intervals)
# Converts an array to a dictionary of intervals, with key as the value of the array.
def array_to_intervals(array):
"""
>>> array_to_intervals([2, 1, 1, 1, 3, 3, 4, 4, 1, 1, 2, 2])[1]
[(1, 4), (8, 10)]
>>> array_to_intervals([2, 1, 1, 1, 3, 3, 4, 4, 1, 1, 2, 2])[2]
[(0, 1), (10, 11)]
>>> array_to_intervals([2, 1, 1, 1, 3, 3, 4, 4, 1, 1, 2, 2])[3]
[(4, 6)]
>>> array_to_intervals([2, 1, 1, 1, 3, 3, 4, 4, 1, 1, 2, 2])[4]
[(6, 8)]
>>> array_to_intervals([])
{}
"""
if len(array) == 0:
return {}
interval_dict = {}
last_val = array[0]
interval_start = 0
for index, val in enumerate(array):
# Assign a list of intervals to this value.
if val not in interval_dict:
interval_dict[val] = []
# Check if we have finished an interval.
if val != last_val:
interval_dict[last_val].append((interval_start, index))
interval_start = index
last_val = val
# Assign the last interval.
if interval_start != len(array) - 1:
interval_dict[last_val].append((interval_start, len(array) - 1))
return interval_dict
# Returns the leftmost index in a sorted 1D array of the closest value to the given value.
def closest_index(val, array):
# Restrict array via binary search.
low = 0
high = array.size
while high - low > 2:
mid = (high + low) // 2
if array[mid] < val:
low = mid
else:
high = mid
# Now, search within the restricted array.
return low + np.argmin(np.abs(array[low: high] - val))
# Returns the indices of the peaks (local maxima) in the array, in a neighbourhood.
def peaks(array, neighbourhood=1):
"""
>>> peaks([1, 3, 2])
[1]
>>> peaks([1])
[0]
>>> peaks([3, 2, 1])
[0]
>>> peaks([])
[]
>>> peaks([1, 0, 2, 1])
[0, 2]
"""
# Trivial cases.
if len(array) == 0:
return []
if len(array) == 1:
return [0]
peak_indices = []
# Check each.
for index, val in enumerate(array):
# Check if values masked.
if np.ma.is_masked(val):
continue
if index == 0:
prev_vals = -np.inf
else:
prev_vals = np.max(array[max(0, index - neighbourhood): index])
if index == len(array) - 1:
next_vals = -np.inf
else:
next_vals = np.max(array[index + 1: index + neighbourhood + 1])
if prev_vals <= val and next_vals < val:
peak_indices.append(index)
return peak_indices
# Returns an array, indicating for each element in the first array, the L1-distance to the closest element in the second array.
# If 'return_closest' is true, returns a boolean array indicating for each element in the second array, whether it is the closest element to atleast one element in the first
def closest_distance(array1, array2, return_closest=False):
"""
>>> '%s' % closest_distance([1, 3, 4], [1, 3, 4])
'[0. 0. 0.]'
>>> '%s, %s' % closest_distance([1, 3, 4], [1, 3, 4], return_closest=True)
'[0. 0. 0.], [ True True True]'
>>> '%s' % closest_distance([1, 3, 4], [1, 3, 5])
'[0. 0. 1.]'
>>> '%s, %s' % closest_distance([1, 3, 4], [1, 3, 5], return_closest=True)
'[0. 0. 1.], [ True True True]'
>>> '%s' % closest_distance([5, 3, 4], [1, 3, 5])
'[0. 0. 1.]'
>>> '%s, %s' % closest_distance([5, 3, 4], [1, 3, 5], return_closest=True)
'[0. 0. 1.], [False True True]'
>>> '%s' % closest_distance([10, 3, 4], [1, 3, 5])
'[5. 0. 1.]'
>>> '%s, %s' % closest_distance([10, 3, 4], [1, 3, 5], return_closest=True)
'[5. 0. 1.], [False True True]'
>>> '%s' % closest_distance([10, -2, 4], [1, 3, 5])
'[5. 3. 1.]'
>>> '%s, %s' % closest_distance([10, -2, 4], [1, 3, 5], return_closest=True)
'[5. 3. 1.], [ True True True]'
>>> '%s' % closest_distance([10, -2, 4], [-100, 3])
'[7. 5. 1.]'
>>> '%s, %s' % closest_distance([10, -2, 4], [-100, 3], return_closest=True)
'[7. 5. 1.], [False True]'
"""
distances = np.full(len(array1), np.inf)
for index1, element1 in enumerate(array1):
for element2 in array2:
distances[index1] = np.minimum(distances[index1], np.sum(np.abs(element1 - element2)))
is_closest = np.full(len(array2), False)
if return_closest:
for index2, element2 in enumerate(array2):
for index1, element1 in enumerate(array1):
if distances[index1] == np.sum(np.abs(element1 - element2)):
is_closest[index2] = True
return distances, is_closest
else:
return distances
# Returns the index and the value of the first value in the sorted array which is greater than or equal to val.
def binary_search(val, array):
"""
>>> binary_search(1, [1, 2, 3])
(0, 1)
>>> binary_search(1.5, [1, 2, 3])
(1, 2)
>>> binary_search(8, [1, 2, 3])
(None, None)
>>> binary_search(3, [1, 2, 4])
(2, 4)
"""
# Trivial cases - out of array range.
if len(array) == 0:
return None, None
if val > array[-1]:
return None, None
if val <= array[0]:
return 0, array[0]
low = 0
high = len(array) - 1
while high - low >= 2:
mid = (high + low) // 2
if array[mid] >= val:
high = mid
else:
low = mid + 1
if low == high:
return low, array[low]
if val > array[low]:
return high, array[high]
else:
return low, array[low]
# Artificial one-dimensional time-series with 2 discords.
def generate_1D_timeseries(size):
return np.hstack((np.sin(np.arange(0, (size + 1)/3)), 3 + np.sin(np.arange(0, (size + 1)/3)), np.sin(np.arange(0, (size + 1)/3))))[:size]
# Artificial n-dimensional time-series with 2 discords.
def generate_nD_timeseries(shape, discord_dimensions=None):
series = np.zeros(shape)
sublength = shape[0]//3
width = shape[1]
series[:sublength] = np.ones((sublength, width)) + np.random.rand(sublength, width)/10
series[sublength: 2*sublength, :discord_dimensions] = 2 * np.ones((sublength, discord_dimensions)) + np.random.rand(sublength, discord_dimensions)/10
series[sublength: 2*sublength, discord_dimensions:] = np.ones((sublength, width - discord_dimensions)) + np.random.rand(sublength, width - discord_dimensions)/10
series[2*sublength:] = np.ones((sublength, width)) + np.random.rand(sublength, width)/10
return series
# Artificial 2-dimensional time-series to be segmented.
def generate_2D_timeseries(size):
series1 = np.random.rand(size)
series2 = np.hstack((np.random.rand((size + 1)//3), 2 + np.random.rand((size + 1)//3), 4 + np.random.rand((size + 1)//3)))[:size]
return np.vstack((series1, series2)).T
# Makes a stack of sequences (called a subsequence) from the original sequence.
def pack(sequence, k=10):
"""
>>> pack(np.array([1, 2, 3]), k=2)
array([[1, 2],
[2, 3]])
>>> pack(np.array([1, 2, 3, 4]), k=2)
array([[1, 2],
[2, 3],
[3, 4]])
>>> a = np.array([[1,2,3], [4, 5, 6], [7, 8, 9], [11, 12, 13], [14, 15, 16]])
>>> a
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 7, 8, 9],
[11, 12, 13],
[14, 15, 16]])
>>> pack(a, k=2)
array([[ 1, 2, 3, 4, 5, 6],
[ 4, 5, 6, 7, 8, 9],
[ 7, 8, 9, 11, 12, 13],
[11, 12, 13, 14, 15, 16]])
"""
if len(sequence.shape) == 1:
# Reference: https://stackoverflow.com/questions/22970716/get-all-subsequences-of-a-numpy-array
# Pass first column, and last row of Hankel matrix to be constructed.
return hankel(sequence[:k], sequence[k - 1:]).T[:sequence.shape[0] - k + 1]
else:
# Not as efficient as above, but works correctly for multi-dimensional sequences.
subsequences = np.array([sequence[i: i + k].flatten() for i in range(0, sequence.shape[0] - k + 1)])
return np.stack(arrays=subsequences)
# Returns the unique values with the highest frequency, ie, from ranks 1 to num_rank according to frequency.
def get_top_counts(sequence, num_rank=2):
"""
>>> '%s' % get_top_counts([1, 2, 1, 1, 1, 3, 3, 4, 4, 4], num_rank=2)
'[1 4]'
>>> '%s' % get_top_counts([1, 2, 1, 1, 1, 3, 3, 4, 4, 4], num_rank=3)
'[1 4 3]'
"""
unique, counts = np.unique(sequence, return_counts=True)
array_with_counts = np.asarray((unique, counts)).T
return np.array(sorted(array_with_counts, key=lambda x: -x[1]))[:num_rank, 0]
# Returns a numpy array of all pair-wise Euclidean squared distances between points in this finite sequence, after flattening if required.
def squared_distances(sequence):
"""
>>> squared_distances(np.array([[1, 2], [2, 3], [3, 4]]))
array([2., 8., 2.])
"""
return pdist(sequence.reshape(sequence.shape[0], -1), metric='sqeuclidean')
# Returns the median of all pair-wise Euclidean squared distances between points in this finite sequence.
def get_median_pairwise_distance(sequence):
"""
>>> '%.2f' % get_median_pairwise_distance(np.array([[1, 2], [2, 3], [3, 4]]))
'1.41'
"""
return np.sqrt(np.ma.median(squared_distances(sequence)))
# Extracts the required quantity from the Intel dataset. Results are cached.
@cache('temp/cachedir/')
def get_Intel_data(intel_data_file, quantity, start_time, end_time, drop_sensors=None, downsample_rate='20min'):
"""
:param intel_data_file: The path of the ELS data file. Note that headers must be present in the same directory as well.
:param quantity: The quantity to be extracted.
:param start_time: Start time (as a datetime.datetime object) for readings.
:param end_time: End time (as a datetime.datetime object) for readings.
:param drop_sensors: List of sensors to ignore.
:param downsample_rate: The size of the time bins to downsample into, taking averages.
:return: 3-tuple of (quantities, sensor_ids, times)
"""
# Check file paths.
if not os.path.exists(intel_data_file):
raise OSError('Could not find %s.' % intel_data_file)
# Column names!
names = ['date', 'time', 'epoch', 'sensor_id', 'temperature', 'humidity', 'light', 'voltage']
if quantity not in names:
raise ValueError('Invalid quantity passed. Choose from \'temperature\', \'humidity\', \'light\', \'voltage\'.')
# To parse the date and time columns into a single datetime object.
def dateparse(date, time):
try:
return datetime.strptime(date + " " + time, '%Y-%m-%d %H:%M:%S.%f')
except ValueError:
return datetime.strptime(date + " " + time, '%Y-%m-%d %H:%M:%S')
# Construct a dataframe from the input CSV file.
df = pd.read_csv(intel_data_file, sep=' ', parse_dates={'datetime': ['date', 'time']}, date_parser=dateparse, names=names)
# Restrict to the column we want, restricting to valid sensors.
df = df[df['sensor_id'] <= 54][['sensor_id', 'datetime', quantity]]
# Filter values to those between start and end times.
df = df[(start_time <= df['datetime']) & (df['datetime'] <= end_time)]
# Remove sensors if required.
if drop_sensors is not None:
df = df[~df['sensor_id'].isin(drop_sensors)]
# Create an index on datetime to downsample.
df.set_index(['datetime'], inplace=True)
# Downsample!
df = df.groupby('sensor_id').resample(downsample_rate)[quantity].mean().unstack('sensor_id', fill_value=0)
# Interpolate linearly across sensors independently.
df.interpolate(method='linear', inplace=True)
# Extract the relevant quantities.
quantities = df.to_numpy()
sensor_ids = df.columns.to_numpy()
times = df.index.to_series().apply(mdates.date2num).to_numpy()
return quantities, sensor_ids, times
# Plots Intel data.
def plot_Intel_data(fig, ax, intel_data, colorbar_orientation='vertical', tick_spacing=5):
# Unpack data.
quantities, sensor_ids, times = intel_data
# Set labels.
ax.set_ylabel('Sensor ID')
ax.set_xlabel('Time')
# Plot as a colourmap.
im = ax.imshow(quantities.T, aspect='auto', origin='lower',
interpolation='none', extent=[times[0], times[-1], 0, len(sensor_ids)])
# Set ticks on the y-axis.
ax.set_yticks(np.arange(len(sensor_ids), step=tick_spacing))
ax.set_yticklabels(sensor_ids[::tick_spacing])
# Time on the x-axis has to be formatted correctly.
ax.xaxis_date()
ax.xaxis.set_major_formatter(mdates.DateFormatter('%d-%m-%Y/%H:%M'))
fig.autofmt_xdate()
# Label size to keep the font not too big,
ax.tick_params(axis='both', which='major', labelsize=8)
# Add a colorbar for the temperature.
if colorbar_orientation == 'horizontal':
divider = make_axes_locatable(ax)
cax = divider.append_axes('top', size='5%', pad=0.15)
cbar = fig.colorbar(im, cax=cax, orientation='horizontal')
cbar.ax.xaxis.set_label_position('top')
cbar.ax.xaxis.set_ticks_position('top')
else:
cbar = fig.colorbar(im, ax=ax, orientation=colorbar_orientation)
cbar.set_label('Temperature')
# Returns the anomalies (labelled as 1) in the Intel dataset.
def get_Intel_anomalies(quantities, times):
sensor_means = np.mean(quantities, axis=0)
sensor_devs = np.std(quantities, axis=0)
lower_threshold = np.tile(sensor_means - 3 * sensor_devs, (len(quantities), 1))
upper_threshold = np.tile(sensor_means + 3 * sensor_devs, (len(quantities), 1))
labels = np.ones(len(quantities))
labels[np.all(np.logical_and(lower_threshold <= quantities, quantities <= upper_threshold), axis=1)] = 0
return times[labels == 1]
# Extracts the data from a single file (ie, one sensor) from the Yahoo dataset. Results are cached.
def get_Yahoo_record(yahoo_data_file, quantities):
# Check file paths.
if not os.path.exists(yahoo_data_file):
raise OSError('Could not find %s.' % yahoo_data_file)
# Construct a DataFrame from the input CSV file.
df = pd.read_csv(yahoo_data_file, sep=',')
# Convert UNIX timestamps to actual dates.
df['datetime'] = pd.to_datetime(df.pop('timestamps'), unit='s')
# Return only the required columns with datetimes.
return df[['datetime'] + quantities]
# Loads the Yahoo dataset from combining all files in a directory into a single DataFrame.
def load_Yahoo_as_df(directory):
# Check file paths.
if not os.path.exists(directory):
raise OSError('Could not find %s.' % directory)
# Combine all sensor readings into one big Dataframe.
df = None
for index, filename in enumerate(os.listdir(directory)):
# Actual path of the file.
filepath = directory + filename
# Check whether this file has the required columns.
columns = pd.read_csv(filepath, sep=',').columns
if 'value' in columns and 'anomaly' in columns:
sensor_id = filename[14:-4]
# Get one sensor's reading.
df_sensor = get_Yahoo_record(filepath, ['value', 'anomaly'])
# Create first Dataframe, if nothing so far.
if df is None:
df = df_sensor.rename(columns={'value': sensor_id})
else:
if np.any(df['datetime'] != df_sensor['datetime']):
raise ValueError('Join between non-matching dates.')
df[sensor_id] = df_sensor['value']
df['anomaly'] = df['anomaly'] | df_sensor['anomaly']
return df
# Extracts the data present in this directory as the Yahoo dataset. Results are cached.
@cache('temp/cachedir/')
def get_Yahoo_data(directory):
df = load_Yahoo_as_df(directory)
times = df['datetime'].apply(mdates.date2num).to_numpy()
df.drop(['datetime', 'anomaly'], axis=1, inplace=True)
values = df.to_numpy()
sensor_ids = df.columns.to_numpy()
return values, sensor_ids, times
# Returns the times marked as anomalies (label 1), by taking the logical OR over individual sensor anomaly flags.
def get_Yahoo_anomalies(directory):
df = load_Yahoo_as_df(directory)
labels = df['anomaly'].to_numpy()
times = df['datetime'].apply(mdates.date2num).to_numpy()
return times[labels == 1]
# Cannot handle missing data! Use get_ELS_data() below instead.
# Extracts the quantity with no interpolation across any dimensions from the given ELS .DAT file. Results are cached.
@cache('temp/cachedir/')
def get_ELS_data_no_interpolation(els_data_file, quantity, start_time, end_time):
"""
:param els_data_file: The path of the ELS data file. Note that headers must be present in the same directory as well.
:param quantity: The quantity to be extracted.
:param start_time: Start time (as a datetime.datetime object) for readings.
:param end_time: End time (as a datetime.datetime object) for readings.
:return: 3-tuple of (counts, energy_range, times)
"""
# Check input arguments - data file should exist.
if not os.path.exists(els_data_file):
raise OSError('Could not find %s.' % els_data_file)
if start_time > end_time:
raise ValueError('Start time larger than end time.')
# If we have to get all anodes, we will have to jump in and out to get all anodes.
if quantity == 'anodes_all':
return get_all_anode_ELS_data(els_data_file, start_time, end_time)
data = ELS(els_data_file)
# Convert dates to floats for matplotlib
mds = mdates.date2num(data.start_date)
# D for data.
D, scalelabel = parse_quantity(data, quantity)
# If a datetime object, convert to a matplotlib float date.
try:
xmin = mdates.date2num(start_time)
xmax = mdates.date2num(end_time)
except AttributeError:
xmin = start_time
xmax = end_time
# Check if our time range has atleast some overlap with data.
if xmin > np.max(mds):
raise ValueError('Start time after any data.')
if xmax < np.min(mds):
raise ValueError('End time before any data.')
# Prune to desired date range.
keep = np.where((mds >= xmin) & (mds <= xmax))[0]
mds = mds[keep]
D = D[keep[:, None], :]
data.dim1_e = data.dim1_e[keep[:, None], :]
data.dim1_e_upper = data.dim1_e_upper[keep[:, None], :]
data.dim1_e_lower = data.dim1_e_lower[keep[:, None], :]
print 'Data start time:', datetime.strftime(mdates.num2date(np.min(mds)), '%d-%m-%Y/%H:%M')
print 'Data end time:', datetime.strftime(mdates.num2date(np.max(mds)), '%d-%m-%Y/%H:%M')
if not (len(mds) == len(data.dim1_e) == len(D)):
raise ValueError('Invalid number of columns.')
counts = D
energy_ranges = data.dim1_e
times = mds
# Squeeze out superfluous dimensions.
counts, energy_ranges, times = np.squeeze(counts), np.squeeze(energy_ranges), np.squeeze(times)
return counts, energy_ranges, times
# Extracts ELS data from all anodes.
def get_all_anode_ELS_data(els_data_file, start_time, end_time):
samples, energy_ranges, times = zip(*[get_ELS_data(els_data_file, 'anode' + str(anode_number), start_time, end_time) for anode_number in range(1, 9)])
# Validate times.
times = np.array(times)
if not np.allclose(np.tile(times[0], (8, 1)), times, 0.00000001):
raise ValueError('Invalid times.')
# Validate energy ranges.
energy_ranges = np.array(energy_ranges)
if not np.allclose(np.tile(energy_ranges[0], (8, 1)), energy_ranges):
raise ValueError('Invalid energy ranges.')
# Stack up counts.
samples = np.hstack(samples)
# Return just one of the energy ranges and times, since they're all the same.
return samples, energy_ranges[0], times[0]
# Extracts the quantity from the given ELS .DAT file. Results are cached.
@cache('temp/cachedir/')
def get_ELS_data(els_data_file, quantity, start_time, end_time, blur_sigma=0, bin_selection='all', filter='no_filter', filter_size=1):
"""
:param els_data_file: The path of the ELS data file. Note that the .LBL file must be present in the same directory.
:param quantity: The quantity to be extracted.
:param start_time: Start time (as a datetime.datetime object/matplotlib float date) for readings.
:param end_time: End time (as a datetime.datetime object/matplotlib float date) for readings.
:param blur_sigma: Parameter sigma (in timesteps) for the Gaussian kernel.
:param bin_selection: Selection of ELS bins.
:param filter: Filter to be applied bin-wise after the Gaussian blur.
:param filter_size: Size of the filter to be applied after the Gaussian blur.
:return: 3-tuple of (counts, energy_ranges, times)
"""
# Obtain the raw counts.
counts, energy_ranges, times = get_ELS_data_no_interpolation(els_data_file, quantity, start_time, end_time)
# The common energy range, fixed across all files. These are the bin centres of the 32-bin timesteps in the original CAPS ELS data.
common_energy_range = np.array([
2.39710098e+04, 1.75067754e+04, 1.27858037e+04, 9.34583984e+03,
6.82949463e+03, 4.98947949e+03, 3.64505884e+03, 2.66262939e+03,
1.94540930e+03, 1.42190784e+03, 1.03906091e+03, 7.59045593e+02,
5.54588379e+02, 4.04940857e+02, 2.96158539e+02, 2.16495728e+02,
1.58241898e+02, 1.15493149e+02, 8.43917389e+01, 6.18861465e+01,
4.50986481e+01, 3.29373093e+01, 2.40994759e+01, 1.76704102e+01,
1.27102909e+01, 9.25298405e+00, 6.92527056e+00, 4.90834713e+00,
3.74522614e+00, 2.58445168e+00, 1.41556251e+00, 5.79999983e-01,
])
# Rebin counts at each time-step.
new_counts = rebin_to_common_range(counts, energy_ranges, common_energy_range)
# Interpolate (and resample) counts across each bin independently as a function of time.
new_counts, new_times = interpolate_timesteps(new_counts, times, time_resolution_s=2)
# We might have negative values after interpolation. Clip these to 0, so that they make physical sense.
new_counts[new_counts < 0] = 0
# Smooth along time dimension.
new_counts = gaussian_blur(new_counts, blur_sigma)
# If we have to ignore the unpaired bin, remove it now.
if bin_selection == 'ignore_unpaired':
new_counts = new_counts[:, :-1]
common_energy_range = common_energy_range[:-1]
elif bin_selection == 'center':
new_counts = new_counts[:, 15:-7]
common_energy_range = common_energy_range[15:-7]
# Apply the filter.
new_counts = Filter(filter, filter_size).filter(new_counts)
# Since we have the same energy ranges at each timestep, we repeat the common energy range.
new_energy_ranges = np.repeat(common_energy_range[:, np.newaxis], new_counts.shape[0], axis=1).T
# Print bin-wise statistics.
for bin_dimension in range(new_counts.shape[1]):
bin_counts = new_counts[:, bin_dimension]
valid_indices = ~np.isnan(bin_counts)
bin_mean = np.mean(bin_counts[valid_indices])
bin_std = np.std(bin_counts[valid_indices])
print 'Bin %d: Mean = %0.2f, Standard Dev. = %0.2f' % (bin_dimension, bin_mean, bin_std)
# The new time-series has a common energy range of 32 bins across all timesteps, and is regularly sampled.
return new_counts, new_energy_ranges, new_times
# Interpolate the counts as a function of the energy range at each time-step, with linear spline interpolation.
def rebin_to_common_range(counts, energy_ranges, common_energy_range):
# Rebin at each time-step using valid (not 'NaN') entries.
new_counts = np.full((counts.shape[0], common_energy_range.shape[0]), np.nan)
for index, (timestep_counts, timestep_energy_range) in enumerate(zip(counts, energy_ranges)):
valid_bins = ~np.isnan(timestep_energy_range)
# How many valid counts do we have?
num_valid_bins = np.sum(valid_bins)
corresponding_counts = timestep_counts[valid_bins]
# If we have 32 bins, keep them as is.
if num_valid_bins == 32:
new_counts[index] = corresponding_counts
# If we have 63 bins, combine all the adjacent bins, except the last, which is as is.
# Note that the last bin is the one with the lowest mean energy.
elif num_valid_bins == 63:
new_counts[index, :-1] = (corresponding_counts[0:-1:2] + corresponding_counts[1:-1:2])/2
new_counts[index, -1] = corresponding_counts[-1]
# Otherwise, we'll fill this timestep in later.
else:
pass
return new_counts
# Interpolate (and resample) counts across each bin independently as a function of time, with linear spline interpolation.
def interpolate_timesteps(counts, times, time_resolution_s=2):
time_resolution_days = time_resolution_s / (60 * 60 * 24)
resampled_times = np.arange(times[0], times[-1], time_resolution_days)
resampled_counts = np.zeros((resampled_times.shape[0], counts.shape[1]))
# Rebin along each dimension using valid (not 'NaN') entries.
for bin_dimension in range(counts.shape[1]):
bin_counts = counts[:, bin_dimension]
valid_indices = ~np.isnan(bin_counts)
valid_counts = bin_counts[valid_indices]
valid_times = times[valid_indices]
interpolated_counts_function = scipy.interpolate.interp1d(valid_times, valid_counts, kind='slinear', fill_value='extrapolate', assume_sorted=True)
resampled_counts[:, bin_dimension] = interpolated_counts_function(resampled_times)
return resampled_counts, resampled_times
# Outdated. No longer used in get_ELS_data().
# Fill in missing timesteps with extra entries, so that the time-series appears regularly sampled.
def interpolate_timesteps_duplication(counts, energy_ranges, times):
# Obtain the time-resolution of sampling.
time_differences = np.diff(times)
time_resolution = np.min(time_differences)
new_times = times
new_counts = counts
new_energy_ranges = energy_ranges
inserted = 0
# Add in extra entries wherever we have undersampled - that is, whenever we have a timestamp difference >= 2 * minimum timestamp difference (time resolution).
for index in np.where(time_differences >= 2 * time_resolution)[0]:
# Fill in samples between this timestamp and the next timestamp at steps with size equal to the time resolution.
for new_index, timestep in enumerate(np.arange(times[index] + time_resolution, times[index + 1] - time_resolution, time_resolution), start=index + inserted + 1):
new_times = np.insert(new_times, new_index, timestep)
new_counts = np.insert(new_counts, new_index, counts[index], axis=0)
new_energy_ranges = np.insert(new_energy_ranges, new_index, energy_ranges[index], axis=0)
inserted += 1
return new_counts, new_energy_ranges, new_times
# Takes the average of the energies with the num_rank highest counts at each timestep.
def ranked_average_energy(counts, energies, num_rank=5):
"""
:param counts: counts corresponding to energies
:param energies: energy values
:return: numpy array of average of energies chosen
>>> counts = np.array([[1, 4, 1, 5, 2], [5, 4, 1, 2, 1]])
>>> energies = np.array([1, 2, 3, 4, 5])
>>> ranked_average_energy(counts, energies, 2)
array([3. , 1.5])
>>> ranked_average_energy(counts, energies, 4)
array([3.5, 3. ])
"""
# Select the indexes with the 'num_rank' highest counts.
indices = np.argsort(counts, axis=1)[:, -num_rank:]
# Select the energies corresponding to these indices.
energies_full = np.vstack([energies] * counts.shape[0])
energies_selected = energies_full[np.arange(energies_full.shape[0])[:, None], indices]
# Take the average of these energies.
return np.average(energies_selected, axis=1)
# Takes the average energy weighted according to counts.
def weighted_average_energy(counts, energies):
"""
:param counts: counts corresponding to energies
:param energies: energy values
:return: numpy array of average of energies chosen
>>> counts = np.array([[1, 4, 1, 5, 2], [5, 4, 1, 2, 1]])
>>> energies = np.array([1, 2, 3, 4, 5])
>>> weighted_average_energy(counts, energies)
array([3.23076923, 2.23076923])
"""
# Multiply energies by counts to weight.
energies_counts_product = np.multiply(energies, counts)
# Take the average of these energies.
return np.sum(energies_counts_product, axis=1) / np.sum(counts, axis=1)
# Takes the total energy - energies multiplied by counts and summed.
def total_energy(counts, energies):
"""
:param counts: counts corresponding to energies
:param energies: energy values
:return: numpy array of average of energies chosen
>>> counts = np.array([[1, 4, 1, 5, 2], [5, 4, 1, 2, 1]])
>>> energies = np.array([1, 2, 3, 4, 5])
>>> total_energy(counts, energies)
array([42, 29])
"""
# Multiply energies by counts to weight.
energies_counts_product = np.multiply(energies, counts)
# Take the average of these energies.
return np.sum(energies_counts_product, axis=1)
# Applies a Gaussian blur to the 2D sequence, along the given dimensions only.
def gaussian_blur(sequence, sigma, dims=[0]):
sigmas = np.zeros(len(sequence.shape))
sigmas[np.asarray(dims)] = sigma
return gaussian_filter(np.array(sequence).astype(float), sigma=sigmas)
# Applies a uniform filter to a sequence - elements are replaced by the average of their neighbours.
def uniform_blur(sequence, filter_size):
return uniform_filter(np.array(sequence).astype(float), filter_size, mode='constant')
# Applies a median filter to the sequence.
def median_blur(sequence, filter_size):
return median_filter(sequence, filter_size)
# Assuming the objective function is unimodal, we can run ternary search to find the minima.
def ternary_search(objective, low, high, eps=0.0001):
if high < low:
raise ValueError('Invalid parameters: high must be greater than low.')
if objective(low) == np.inf:
return ValueError('Objective function takes value infinity.')
def restrict_domain(objective, low, high):
if objective(high) == np.inf:
for jump in 2 ** np.linspace(0, 10, 11) / 1000:
if objective(low + jump) < np.inf:
high = low + jump
break
return low, high
# Ensure that we do not have any infinity values within this domain by restricting it!
low, high = restrict_domain(objective, low, high)
while high - low > eps:
m1 = low + (high - low)/3
m2 = low + 2*(high - low)/3
if objective(m1) < objective(m2):
high = m2
else:
low = m1
return low
# Runs one run of simulated annealing to minimize the objective function.
def simulated_annealing(objective, init_state):
class NoiseThresholdOptimization(Annealer):
# Move state randomly, ensuring always positive.
def move(self):
self.state += np.random.randn() * self.state
self.state = max(self.state, 0)
# Energy of a state is just the objective function evaluated in that state.
def energy(self):
return objective(self.state)
best_state, best_ratio = NoiseThresholdOptimization(init_state).anneal()
return best_state
# Reconstructs original data from PCA.
def reconstruct_from_PCA(data, mean, pca_obj):
reconstructed_data = np.dot(data, pca_obj.components_)
reconstructed_data += mean
return reconstructed_data
# Gets the first index from the start such that the prefix sum is greater than or equal to the given fraction of the total.
# Similarly, gets the first index from the end. Both of these are returned as a 2-tuple.
def fraction_sum_indices(arr, fraction=0.5):
"""
>>> fraction_sum_indices([1, 2, 3])
(1, 2)
>>> fraction_sum_indices([1, 8, 1])
(1, 1)
>>> fraction_sum_indices([0, 1, 0])
(1, 1)
>>> fraction_sum_indices([3, 2, 1])
(0, 1)
>>> fraction_sum_indices([3, 2, 1, 6, 8])
(3, 3)
>>> fraction_sum_indices([])
(None, None)
"""
# The indices we will return.
left, right = None, None
# Sum of the entire array.
sum = np.sum(arr)
# Prefixes from the left.
currprefix = 0
for index, val in enumerate(arr):
currprefix += val
if currprefix >= fraction * sum:
left = index
break
# Suffixes from the right.
currsuffix = 0
for index, val in reversed(list(enumerate(arr))):
currsuffix += val
if currsuffix >= fraction * sum:
right = index
break
return left, right
# KL-divergence between two multivariate normals.
def kl_divergence_normals(mean_1, covar_1, mean_2, covar_2):
"""
>>> '%0.3f' % kl_divergence_normals([2], np.eye(1), [1], np.eye(1))
'0.500'
>>> '%0.3f' % kl_divergence_normals([1], np.eye(1), [1], 2 * np.eye(1))
'0.097'
>>> '%0.3f' % kl_divergence_normals(np.ones(3), np.eye(3), np.ones(3), np.eye(3))
'0.000'
"""
# Reshaping for numpy to multiply correctly.
mean_1 = np.reshape(mean_1, (len(mean_1), 1))
mean_2 = np.reshape(mean_2, (len(mean_2), 1))
# Plug into the big formula.
kl_divergence = 0.5 * (np.log(np.linalg.det(covar_2)/np.linalg.det(covar_1)) - len(mean_1) + np.trace(np.matmul(np.linalg.inv(covar_2), covar_1)) + np.squeeze(np.matmul((mean_2 - mean_1).T, np.matmul(np.linalg.inv(covar_2), (mean_2 - mean_1)))))
# Due to numerical precision, we sometimes end up negative but very close to 0.
if kl_divergence < 0:
warnings.warn('Computed KL-divergence %0.3f less than 0.' % kl_divergence)
kl_divergence = 0
return kl_divergence
# Wrapper for vectorized functions to handle empty arrays.
def check_if_empty(func):
def wrapped_func(array):
if np.asarray(array).size == 0:
return []
else:
return func(array)
return wrapped_func
# Convert to floats (unit days). We vectorize this to work over arrays nicely.
@check_if_empty
@np.vectorize
def datestring_to_float(timestep, format_string='%d-%m-%Y/%H:%M:%S'):
return mdates.date2num(datetime.strptime(timestep, format_string))
# Convert to strings. We vectorize this to work over arrays nicely.
@check_if_empty
@np.vectorize
def float_to_datestring(timestep, format_string='%d-%m-%Y/%H:%M:%S'):
return datetime.strftime(mdates.num2date(timestep), format_string)
# Convert a string to a datetime object.
def convert_to_dt(timestep):
return dateutil.parser.parse(timestep, dayfirst=True)
# Gets the day of the year from a string/datetime object.
def day_of_year(dt):
try:
return dt.timetuple().tm_yday
except AttributeError:
return day_of_year(convert_to_dt(dt))
# Gets the year from a string/datetime object.
def year(dt):
try:
return dt.year
except AttributeError:
return year(convert_to_dt(dt))
# Gets the hour from a string/datetime object.
def hour(dt):
try:
return dt.hour
except AttributeError:
return hour(convert_to_dt(dt))
# Returns the ELS .DAT file corresponding to a string/datetime object.
def get_ELS_file_name(dt, remove_extension=False):
"""
>>> get_ELS_file_name('28-06-2004/22:00')
'ELS_200418018_V01.DAT'
>>> get_ELS_file_name('28-06-2004/09:00')
'ELS_200418006_V01.DAT'
>>> get_ELS_file_name('29-06-2004/09:00')
'ELS_200418106_V01.DAT'
>>> get_ELS_file_name('29-06-2005/09:00')
'ELS_200518006_V01.DAT'
>>> get_ELS_file_name('30-06-2005/09:00')
'ELS_200518106_V01.DAT'
>>> get_ELS_file_name(datetime(year=2004, month=1, day=1))
'ELS_200400100_V01.DAT'
>>> get_ELS_file_name(datetime(year=2004, month=1, day=2))
'ELS_200400200_V01.DAT'
"""
try:
dt = convert_to_dt(dt)
except TypeError:
pass
def doy_map(doy):
return '0' * (3 - len(str(doy))) + str(doy)
def hour_map(hour):
def expand(num):
if num == 0:
return '00'
elif num == 1:
return '06'
elif num == 2:
return '12'
elif num == 3:
return '18'
return expand(hour // 6)
if remove_extension:
basestring = 'ELS_%d%s%s_V01'
else:
basestring = 'ELS_%d%s%s_V01.DAT'
return basestring % (year(dt), doy_map(day_of_year(dt)), hour_map(hour(dt)))
# Returns a nicer string representation of a dictionary.
def format_dict(d):
return ', '.join(['%s: %s' % (k, v) for k, v in d.iteritems()])
```
#### File: libeos/pims/libeos_convertor_caps.py
```python
from __future__ import division
import numpy as np
import os
from pathlib2 import Path
from datetime import datetime as dt
from matplotlib.dates import date2num
import yaml
# Internal dependencies.
from data_utils import get_ELS_data_no_interpolation
from libeos_convertor import PIMSFile, PIMSObs
# All timestamps are with respect to this date.
TIMESTAMP_START = date2num(dt(year=2000, month=1, day=1))
# Mode 0 is the normal, 63-bin mode.
# Mode 1 is the energy-summed, 32-bin mode.
def compute_modes(energy_ranges):
return (np.sum(~np.isnan(energy_ranges), axis=1) != 63).astype(np.uint32)
# Convert times in days to times in seconds, since the start of the year 2004.
# We use 2004 because CAPS observations start in 2004.
def convert_to_seconds(times):
times -= TIMESTAMP_START
times = np.round(times * (60 * 60 * 24)).astype(np.uint32)
return times
# Based on how mode 0 and mode 1 are defined.
def compute_num_bins(modes):
num_bins = np.zeros(modes.shape, dtype=np.uint32)
num_bins[np.where(modes == 0)[0]] = 63
num_bins[np.where(modes == 1)[0]] = 32
return num_bins
# Converts a ELS data_file
def convert(els_data_file, quantity='anode5', start_time=dt.min, end_time=dt.max, index=0):
# Read CAPS data.
counts, energy_ranges, times = get_ELS_data_no_interpolation(els_data_file, quantity, start_time, end_time)
# Convert and normalize times to seconds.
times = convert_to_seconds(times)
# Compute modes.
modes = compute_modes(energy_ranges)
# Compute num_bins.
num_bins = compute_num_bins(modes)
# Now, convert!
observations_converted = [PIMSObs(obs_id=index,
counts=timestep_counts,
mode=mode,
timestamp=timestamp,
num_bins=timestep_bins)
for index, (timestep_counts, mode, timestamp, timestep_bins)
in enumerate(zip(counts, modes, times, num_bins))]
# These are always fixed!
# Again, mode 0 is 63 bins, mode 1 is 32 bins.
mode_bin_definitions = [
[2.6040e+04, 2.2227e+04, 1.8991e+04, 1.6256e+04,
1.3876e+04, 1.1867e+04, 1.0143e+04, 8.6740e+03,
7.4150e+03, 6.3360e+03, 5.4160e+03, 4.6300e+03,
3.9560e+03, 3.3830e+03, 2.8900e+03, 2.4710e+03,
2.1120e+03, 1.8050e+03, 1.5440e+03, 1.3190e+03,
1.1280e+03, 9.6410e+02, 8.2400e+02, 7.0430e+02,
6.0180e+02, 5.1480e+02, 4.3940e+02, 3.7590e+02,
3.2150e+02, 2.7480e+02, 2.3500e+02, 2.0090e+02,
1.7170e+02, 1.4690e+02, 1.2510e+02, 1.0740e+02,
9.1760e+01, 7.8180e+01, 6.7150e+01, 5.7450e+01,
4.9000e+01, 4.1810e+01, 3.5840e+01, 3.0490e+01,
2.6340e+01, 2.2210e+01, 1.9260e+01, 1.6330e+01,
1.3980e+01, 1.1640e+01, 9.8900e+00, 8.7200e+00,
7.5600e+00, 6.3900e+00, 5.2300e+00, 4.6400e+00,
4.0600e+00, 3.4800e+00, 2.9000e+00, 2.3200e+00,
1.7400e+00, 1.1600e+00, 5.8000e-01],
[2.3971e+04, 1.7506e+04, 1.2785e+04, 9.3458e+03,
6.8294e+03, 4.9894e+03, 3.6450e+03, 2.6626e+03,
1.9454e+03, 1.4219e+03, 1.0390e+03, 7.5904e+02,
5.5458e+02, 4.0494e+02, 2.9615e+02, 2.1649e+02,
1.5824e+02, 1.1549e+02, 8.4391e+01, 6.1886e+01,
4.5098e+01, 3.2937e+01, 2.4099e+01, 1.7670e+01,
1.2710e+01, 9.2529e+00, 6.9252e+00, 4.9083e+00,
3.7452e+00, 2.5844e+00, 1.4155e+00, 5.7999e-01],
]
# Now, file-level properties.
file_id = index
max_bins = 63
pimsfile = PIMSFile(file_id=file_id, max_bins=max_bins,
mode_bin_definitions=mode_bin_definitions,
observations=observations_converted)
# We also record metadata.
# We use raw (not interpolated) CAPS data here, so some properties, like
# 'blur_sigma' and 'bin_selection', are all defaults.
metadata = {
'data_file': os.path.abspath(els_data_file),
'quantity': quantity,
'TIMESTAMP_START': TIMESTAMP_START,
'blur_sigma': 0,
'bin_selection': 'all',
'filter': 'no_filter',
'filter_size': 0,
}
return pimsfile, metadata
# Returns a name for the output file corresponding to an input file.
def libeos_name(file_name):
return os.path.splitext(os.path.basename(file_name))[0] + '.pim'
# Returns a name for the metadata file corresponding to an input file.
def metadata_name(file_name):
return os.path.splitext(os.path.basename(file_name))[0] + '.meta'
def main(input, output):
# Multiplex depending on whether 'input' and 'output' are files or directories.
if os.path.isdir(input):
input_files = [input + '/' + input_f for input_f in os.listdir(input)]
if os.path.exists(output):
if not os.path.isdir(output):
raise ValueError('Output not a directory.')
else:
# If 'output' doesn't exist, we will create a directory with the same name.
Path(output).mkdir(parents=True)
output_files = [output + '/' + libeos_name(input) for input in input_files]
metadata_files = [output + '/' + metadata_name(input) for input in input_files]
else:
input_files = [input]
if os.path.isdir(output):
output_files = [output + '/' + libeos_name(input)]
metadata_files = [output + '/' + metadata_name(input)]
else:
output_files = [output]
metadata_files = [os.path.splitext(output) + '.meta']
# Loop over each pair.
for index, (input_f, output_f, metadata_f) in enumerate(zip(input_files, output_files, metadata_files)):
if '.DAT' not in input_f:
print 'Ignoring file %s as it is not a ELS .DAT file.' % input_f
continue
pimsfile, metadata = convert(index=index, els_data_file=input_f)
with open(output_f, 'wb') as f:
f.write(pimsfile.tobytes())
with open(metadata_f, 'w') as f:
yaml.dump(metadata, f)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)
parser.add_argument('input', help='Input ELS file (or a directory containing ELS files).')
parser.add_argument('output', help='Output .pim file (or a directory which will be filled with .pim files).')
args = parser.parse_args()
main(**vars(args))
``` |
{
"source": "JPLMLIA/mastcam-noveltydet",
"score": 2
} |
#### File: gan/bigan/test_mcam.py
```python
import time
import numpy as np
import tensorflow as tf
import logging
import importlib
import sys
import bigan.mcam_utilities as network
import data.mcam as data
from utils.evaluations import do_prc, do_roc
from sklearn.metrics import roc_curve, auc
import os
os.environ["CUDA_VISIBLE_DEVICES"]="1"
RANDOM_SEED = 13
FREQ_PRINT = 20 # print frequency image tensorboard [20]
CKPT = './bigan_results/train_logs/mcam/fm/0.1/demo/42/model.ckpt'
def get_getter(ema): # to update neural net with moving avg variables, suitable for ss learning cf Saliman
def ema_getter(getter, name, *args, **kwargs):
var = getter(name, *args, **kwargs)
ema_var = ema.average(var)
return ema_var if ema_var else var
return ema_getter
def test(weight, method, degree, random_seed, label):
""" Runs the Bigan on the Mastcam dataset
Note:
Saves summaries on tensorboard. To display them, please use cmd line
tensorboard --logdir=model.training_logdir() --port=number
Args:
nb_epochs (int): number of epochs
weight (float, optional): weight for the anomaly score composition
method (str, optional): 'fm' for ``Feature Matching`` or "cross-e"
for ``cross entropy``, "efm" etc.
anomalous_label (int): int in range 0 to 10, is the class/digit
which is considered outlier
"""
# Placeholders
input_pl = tf.placeholder(tf.float32, shape=data.get_shape_input(), name="input")
is_training_pl = tf.placeholder(tf.bool, [], name='is_training_pl')
learning_rate = tf.placeholder(tf.float32, shape=(), name="lr_pl")
# Test Data
testx, testy, testnames = data.get_test('all')
# Parameters
starting_lr = network.learning_rate
batch_size = network.batch_size
latent_dim = network.latent_dim
ema_decay = 0.999
rng = np.random.RandomState(RANDOM_SEED)
nr_batches_test = int(testx.shape[0] / batch_size)
gen = network.decoder
enc = network.encoder
dis = network.discriminator
with tf.variable_scope('encoder_model'):
z_gen = enc(input_pl, is_training=is_training_pl)
with tf.variable_scope('generator_model'):
z = tf.random_normal([batch_size, latent_dim])
x_gen = gen(z, is_training=is_training_pl)
reconstruct = gen(z_gen, is_training=is_training_pl, reuse=True)
with tf.variable_scope('discriminator_model'):
l_encoder, inter_layer_inp = dis(z_gen, input_pl, is_training=is_training_pl)
l_generator, inter_layer_rct = dis(z, x_gen, is_training=is_training_pl, reuse=True)
#l_generator, inter_layer_rct = dis(z_gen, reconstruct, is_training=is_training_pl, reuse=True)
with tf.name_scope('loss_functions'):
# discriminator
loss_dis_enc = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.random_uniform(shape=tf.shape(l_encoder), minval=0.9, maxval=1.0),logits=l_encoder))
#loss_dis_enc = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.constant(0.9, shape=tf.shape(l_encoder)),logits=l_encoder))
loss_dis_gen = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(l_generator),logits=l_generator))
# loss_dis_enc = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(l_encoder),logits=l_encoder))
# loss_dis_gen = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(l_generator),logits=l_generator))
loss_discriminator = loss_dis_gen + loss_dis_enc
# generator
#loss_reconstruction = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=input_pl, logits=reconstruct))
loss_reconstruction = tf.reduce_mean(tf.losses.mean_squared_error(labels=input_pl, predictions=reconstruct))
#loss_reconstruction = tf.norm(tf.contrib.layers.flatten(input_pl-reconstruct), ord=2)
loss_features = tf.norm(tf.contrib.layers.flatten(inter_layer_inp-inter_layer_rct), ord=2)
# loss_dis_gen_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(l_generator),logits=l_generator))
# loss_dis_gen_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(input_pl),logits=input_pl))
# loss_dis = -tf.log(loss_dis_gen_fake) + tf.log(1-loss_dis_gen_real)
loss_dis = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(l_generator),logits=l_generator))
loss_generator = 1*loss_dis + 0.4*loss_reconstruction + 0*loss_features
# encoder
# test adding loss in encoder instead?
loss_encoder = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(l_encoder),logits=l_encoder)) + 0.4*loss_reconstruction
with tf.name_scope('optimizers'):
# control op dependencies for batch norm and trainable variables
tvars = tf.trainable_variables()
dvars = [var for var in tvars if 'discriminator_model' in var.name]
gvars = [var for var in tvars if 'generator_model' in var.name]
evars = [var for var in tvars if 'encoder_model' in var.name]
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
update_ops_gen = [x for x in update_ops if ('generator_model' in x.name)]
update_ops_enc = [x for x in update_ops if ('encoder_model' in x.name)]
update_ops_dis = [x for x in update_ops if ('discriminator_model' in x.name)]
optimizer_dis = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=0.5, name='dis_optimizer')
#optimizer_dis = tf.train.GradientDescentOptimizer(learning_rate=0.0001, name='dis_optimizer')
optimizer_gen = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=0.5, name='gen_optimizer')
optimizer_enc = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=0.5, name='enc_optimizer')
with tf.control_dependencies(update_ops_gen):
gen_op = optimizer_gen.minimize(loss_generator, var_list=gvars)
with tf.control_dependencies(update_ops_enc):
enc_op = optimizer_enc.minimize(loss_encoder, var_list=evars)
with tf.control_dependencies(update_ops_dis):
dis_op = optimizer_dis.minimize(loss_discriminator, var_list=dvars)
# Exponential Moving Average for estimation
dis_ema = tf.train.ExponentialMovingAverage(decay=ema_decay)
maintain_averages_op_dis = dis_ema.apply(dvars)
with tf.control_dependencies([dis_op]):
train_dis_op = tf.group(maintain_averages_op_dis)
gen_ema = tf.train.ExponentialMovingAverage(decay=ema_decay)
maintain_averages_op_gen = gen_ema.apply(gvars)
with tf.control_dependencies([gen_op]):
train_gen_op = tf.group(maintain_averages_op_gen)
enc_ema = tf.train.ExponentialMovingAverage(decay=ema_decay)
maintain_averages_op_enc = enc_ema.apply(evars)
with tf.control_dependencies([enc_op]):
train_enc_op = tf.group(maintain_averages_op_enc)
with tf.variable_scope('encoder_model'):
z_gen_ema = enc(input_pl, is_training=is_training_pl,
getter=get_getter(enc_ema), reuse=True)
with tf.variable_scope('generator_model'):
reconstruct_ema = gen(z_gen_ema, is_training=is_training_pl,
getter=get_getter(gen_ema), reuse=True)
with tf.variable_scope('discriminator_model'):
l_encoder_ema, inter_layer_inp_ema = dis(z_gen_ema,
input_pl,
is_training=is_training_pl,
getter=get_getter(dis_ema),
reuse=True)
l_generator_ema, inter_layer_rct_ema = dis(z_gen_ema,
reconstruct_ema,
is_training=is_training_pl,
getter=get_getter(dis_ema),
reuse=True)
with tf.name_scope('Testing'):
with tf.variable_scope('Reconstruction_loss'):
delta = input_pl - reconstruct_ema
delta_flat = tf.contrib.layers.flatten(delta)
#gen_score = tf.reduce_mean(tf.losses.mean_squared_error(input_pl, reconstruct_ema))
gen_score = tf.norm(delta_flat, ord=degree, axis=1,
keep_dims=False, name='epsilon')
with tf.variable_scope('Discriminator_loss'):
if method == "cross-e":
dis_score = tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.ones_like(l_generator_ema),logits=l_generator_ema)
elif method == "fm":
fm = inter_layer_inp_ema - inter_layer_rct_ema
fm = tf.contrib.layers.flatten(fm)
dis_score = tf.norm(fm, ord=degree, axis=1,
keep_dims=False, name='d_loss')
dis_score = tf.squeeze(dis_score)
with tf.variable_scope('Score'):
list_scores = (1 - weight) * gen_score + weight * dis_score
saver = tf.train.Saver()
with tf.Session() as sess:
# Restore weights/tensors from disk
saver.restore(sess, CKPT)
print("Model restored.")
inds = rng.permutation(testx.shape[0])
testx = testx[inds] # shuffling dataset
testy = testy[inds] # shuffling dataset
testnames = testnames[inds]
scores = []
inference_time = []
test_encodings = np.ndarray([testx.shape[0], network.latent_dim])
test_reconstructions = np.ndarray(testx.shape)
# Create scores
for t in range(nr_batches_test):
# construct randomly permuted minibatches
ran_from = t * batch_size
ran_to = (t + 1) * batch_size
begin_val_batch = time.time()
feed_dict = {input_pl: testx[ran_from:ran_to],
is_training_pl:False}
scores += sess.run(list_scores,
feed_dict=feed_dict).tolist()
# store z_gen_ema (encoding)
test_encodings[ran_from:ran_to] = sess.run(z_gen_ema,
feed_dict=feed_dict).tolist()
# store reconstruct_ema (reconstruction)
test_reconstructions[ran_from:ran_to] = sess.run(reconstruct_ema,
feed_dict=feed_dict).tolist()
inference_time.append(time.time() - begin_val_batch)
print('Testing : mean inference time is %.4f' % (
np.mean(inference_time)))
ran_from = nr_batches_test * batch_size
ran_to = (nr_batches_test + 1) * batch_size
size = testx[ran_from:ran_to].shape[0]
fill = np.ones([batch_size - size, 64, 64, 6])
batch = np.concatenate([testx[ran_from:ran_to], fill], axis=0)
feed_dict = {input_pl: batch,
is_training_pl: False}
batch_score = sess.run(list_scores,
feed_dict=feed_dict).tolist()
scores += batch_score[:size]
roc_auc = do_roc(scores, testy, testnames,
file_name=r'bigan/mcam/{}/{}/{}'.format(method, weight,
label),
directory=r'results/bigan/mcam/{}/{}/'.format(method,
weight))
os.mkdir('results/bigan/mcam/{}/{}/{}'.format(method,
weight, label))
os.mkdir(os.path.join('results/bigan/mcam/{}/{}/{}'.format(method,
weight, label), 'reconstructions'))
os.mkdir(os.path.join('results/bigan/mcam/{}/{}/{}'.format(method,
weight, label), 'encodings'))
os.mkdir(os.path.join('results/bigan/mcam/{}/{}/{}'.format(method,
weight, label), 'inputs'))
os.mkdir(os.path.join('results/bigan/mcam/{}/{}/{}'.format(method,
weight, label), 'error_maps'))
for i in range(testx.shape[0]):
# Save the reconstructed images
np.save(os.path.join('results/bigan/mcam/{}/{}/{}'.format(method,
weight, label), 'reconstructions', '%s.npy' % testnames[i]), test_reconstructions[i])
# Save the encoded maps
np.save(os.path.join('results/bigan/mcam/{}/{}/{}'.format(method,
weight, label), 'encodings', '%s.npy' % testnames[i]), test_encodings[i])
# Save the input images
np.save(os.path.join('results/bigan/mcam/{}/{}/{}'.format(method,
weight, label), 'inputs', '%s.npy' % testnames[i]), testx[i])
# Save the error maps between input and reconstructed images
np.save(os.path.join('results/bigan/mcam/{}/{}/{}'.format(method,
weight, label), 'error_maps', '%s.npy' % testnames[i]), np.square(np.subtract(testx[i], test_reconstructions[i])))
print("Testing | ROC AUC = {:.4f}".format(roc_auc))
def run(nb_epochs, weight, method, degree, label, random_seed=42):
""" Runs the training process"""
with tf.Graph().as_default():
# Set the graph level seed
tf.set_random_seed(random_seed)
test(weight, method, degree, random_seed, label)
``` |
{
"source": "JPLMLIA/OWLS-Autonomy",
"score": 2
} |
#### File: src/cli/ACME_evaluation_strict.py
```python
import sys, os
import os.path as op
import glob
import argparse
import csv
from pathlib import Path
import logging
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from utils import logger
def calc_tp(o, l, mass_t=30, time_t=30):
otp = 0
ltp = 0
seen_labels = set()
for opeak in o:
# see which label peaks are within threshold of output peaks
thresh_mask = (np.abs(l[:,0] - opeak[0]) <= mass_t) & (np.abs(l[:,1] - opeak[1]) <= time_t)
if np.sum(thresh_mask) >= 1:
otp += 1
# output matches to more than one label
for lpeak_idx in thresh_mask.nonzero()[0]:
if lpeak_idx not in seen_labels:
seen_labels.add(lpeak_idx)
ltp = len(seen_labels)
return otp, ltp
def calc_tp_strict(o, l, mass_t=30, time_t=30):
seen_outputs = set()
for lpeak in l:
# see which output peaks are within threshold of labeled peak
thresh_mask = (np.abs(o[:,0] - lpeak[0]) <= mass_t) & (np.abs(o[:,1] - lpeak[1]) <= time_t)
thresh_peaks = o[thresh_mask]
if len(thresh_peaks) > 0:
thresh_dists = [np.sqrt((lpeak[0]-tpeak[0])**2 + (lpeak[1]-tpeak[1])**2) for tpeak in thresh_peaks]
closest_peak = thresh_peaks[np.argmin(thresh_dists)]
seen_outputs.add(tuple(closest_peak))
return len(seen_outputs)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('acme_outputs', help='Found peaks from analyzer -- Passed as globs')
parser.add_argument('acme_labels', help='Labels to compare found peaks to -- Passed as globs')
parser.add_argument('--hand_labels', action='store_true',
help='Expects hand labels in --path_labels')
parser.add_argument('--mass_threshold', default=30,
help='How far can peaks be apart from each other in mass [mass index] '
'to be considered the same peak 12 mass index correspond to 1 amu')
parser.add_argument('--time_threshold', default=30,
help='How far can peaks be apart from each other in time [time index] '
'to be considered the same peak 164 time index correspond to 1 Min')
parser.add_argument('--ambiguous', action='store_true',
help='Some peaks are labeled as ambiguous by SMEs. Call this flag to include '
'them as true peak labels.')
parser.add_argument('--log_name', default="ACME_evaluation.log",
help="Filename for the pipeline log. Default is ACME_evaluation.log")
parser.add_argument('--log_folder', default=op.join(op.abspath(op.dirname(__file__)), "logs"),
help="Folder path to store logs. Default is cli/logs")
args = parser.parse_args()
logger.setup_logger(args.log_name, args.log_folder)
# parse args
outputs = sorted(glob.glob(args.acme_outputs))
if not len(outputs):
logging.error(f"No ACME outputs found at {args.acme_outputs}")
sys.exit(1)
labels = sorted(glob.glob(args.acme_labels))
if not len(labels):
logging.error(f"No ACME labels found at {args.acme_labels}")
sys.exit(1)
if len(outputs) != len(labels):
logging.warning(f"{len(outputs)} outputs but {len(labels)} labels")
mass_t = float(args.mass_threshold)
time_t = float(args.time_threshold)
ambiguous = args.ambiguous
hand_labels = args.hand_labels
# pair up outputs and labels
out_label_pairs = []
label_stems = [Path(p).stem for p in labels]
for output in outputs:
# find if output has corresponding label
exp = Path(output).stem.split('_UM_peaks')[0]
if exp+"_label" in label_stems:
label_idx = label_stems.index(exp+"_label")
out_label_pairs.append((output, labels[label_idx], exp))
else:
logging.warning(f"Label not found for output {output}")
# read and store peaks
exp_label_peaks = []
for out_f, label_f, exp in out_label_pairs:
output_peaks = []
with open(out_f, 'r') as f:
reader = csv.DictReader(f)
for row in reader:
output_peaks.append([row['Mass (idx)'], row['Peak Central Time (idx)'], row['Peak Amplitude (ZScore)']])
label_peaks = []
with open(label_f, 'r') as f:
reader = csv.DictReader(f)
for row in reader:
if hand_labels:
if ambiguous or not float(row['ambiguous_flag']):
# only count peak if not ambiguous or flagged
label_peaks.append([row['mass_idx'], row['time_idx'], row['Peak Amplitude (ZScore)']])
else:
label_peaks.append([row['mass_idx'], row['time_idx'], row['Z-Score']])
output_peaks = np.array(output_peaks).astype(np.float)
label_peaks = np.array(label_peaks).astype(np.float)
exp_label_peaks.append((output_peaks, label_peaks, exp))
## Sweep across z-scores
zscores = list(range(5,16))
output_array = []
output_verbose_array = []
for z in tqdm(zscores, desc='z-scores'):
# Global statistics per Z-Score
z_tp = 0
z_oshape = 0
z_lshape = 0
# Per-experiment
for o, l, exp in exp_label_peaks:
o = o[o[:,2]>=z]
#l = l[l[:,2]>=10]
# calculate true positives
tp = calc_tp_strict(o[:,:2], l[:,:2], mass_t, time_t)
# precision and recall
precision = tp / o.shape[0]
recall = tp / l.shape[0]
# f1
f1 = 2 * ((precision * recall) / (precision + recall))
# save
output_verbose_array.append([z, exp, o.shape[0], l.shape[0], tp, (o.shape[0]-tp), precision, recall, f1])
z_tp += tp
z_oshape += o.shape[0]
z_lshape += l.shape[0]
# Global precision and recall
z_precision = z_tp / z_oshape
z_recall = z_tp / z_lshape
z_f1 = 2 * ((z_precision * z_recall) / (z_precision + z_recall))
output_array.append([z, z_precision, z_recall, z_f1, (z_oshape-z_tp)/len(exp_label_peaks)])
output_array = np.array(output_array)
output_verbose_array = np.array(output_verbose_array)
## Plotting
fig, ax = plt.subplots()
ax.plot(zscores, output_array[:,1], 'r^--', label='Precision')
ax.plot(zscores, output_array[:,2], 'bs-', label='Recall')
ax.plot(zscores, output_array[:,3], 'md-.', label='F1')
ax.set_ylim(0, 1)
ax.set_xlim(min(zscores), max(zscores))
ax.set_xlabel('Minimum Z-score Considered')
ax.set_ylabel('Performance')
ax.set_title(args.acme_outputs, fontsize=8)
plt.grid(axis='both')
ax2 = ax.twinx()
ax2.plot(zscores, output_array[:,4], 'g*--', label='Average FPs')
ax2.set_ylim(bottom=0)
ax2.set_ylabel('Number of FP peaks')
ax2.tick_params(axis='y', labelcolor='g')
ax.legend(loc='lower left')
ax2.legend(loc='lower right')
plt.tight_layout()
logging.info('Saving acme_eval_strict.png')
fig.savefig(op.join(args.log_folder,'acme_eval_strict.png'), dpi=400)
## CSV Output
logging.info('Saving acme_eval_strict.csv')
with open(op.join(args.log_folder,'acme_eval_strict.csv'), 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow(['z-score', 'precision', 'recall', 'f1', 'mean FP'])
writer.writerows(output_array)
logging.info('Saving acme_eval_strict_verbose.csv')
with open(op.join(args.log_folder,'acme_eval_strict_verbose.csv'), 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow(['z-score', 'experiment', 'pred N', 'label N', 'true positive', 'false positive', 'precision', 'recall', 'f1'])
writer.writerows(output_verbose_array)
if __name__ == "__main__":
main()
```
#### File: src/cli/ACME_simulator.py
```python
import sys
import os
import os.path as op
sys.path.append("../")
import yaml
import pickle
import numpy as np
import matplotlib.pyplot as plt
import argparse
import pandas as pd
import logging
from acme_cems.lib.analyzer import make_crop
from utils import logger
def plot_exp(exp, save = False, save_path = None):
'''make plots of raw data in a similar style than the ACME heat maps
Parameters
----------
exp: ndarray
data matrix
save: bool
should the plot be saved?
save_path: str
path to where the file should be saved
Returns
-------
plot to screen or as .png to disk
'''
plt.figure(figsize=(20, 10))
max = np.std(exp) * 3 + np.mean(exp)
min = 0
plt.imshow(exp, vmin= min, vmax=max, cmap='inferno')
plt.xlabel('Time [idx]')
plt.ylabel('Mass [idx]')
plt.colorbar(label='Ion Counts clipped at 3 std)')
if save:
plt.savefig(save_path + '.png', dpi = 200)
plt.close()
else:
plt.show()
def add_peak(exp, peak):
'''adds gaussian peaks on data matrix
Parameters
----------
exp: ndarray
data matrix
peak: DataFrame
peak properties (mass,time,width,height,...)
Returns
-------
exp: ndarray
data matrix with added peaks
volume: float
volume of added peak (total number of ion counts for event that caused peak)
'''
size = int(peak.mass_width_idx) * 2 # size of matrix to calculate gaussian peak for
size -= 1 #make size odd
sigma_x = peak.time_width_idx/2 / 3 # convert to 1 sigma
sigma_y = peak.mass_width_idx/2 / 3 # convert to 1 sigma
height = peak.height # peak height
x, y = np.meshgrid(np.linspace(-(size // 2), size // 2, size), np.linspace(-(size // 2), (size // 2), size))
g = np.exp(-(x**2 / (2*sigma_x**2) + y**2 / (2*sigma_y**2)))*height
# calculate volume of peak (ion count of peak integrated over time and mass, approximated as a sum)
volume = np.sum(g)
# add to matrix
x_pos = peak.time_idx
y_pos = peak.mass_idx
exp[y_pos - (size // 2): y_pos + (size // 2) + 1, x_pos - (size // 2): x_pos + (size // 2) + 1] += g
return exp, volume
def add_stripe(exp, stripe, cliffs):
'''adds salt stripes to data matrix
Parameters
----------
exp: ndarray
data matrix
stripe: DataFrame
stripe properties (mass_idx,width,height,...)
cliffs: ndarray
time_idx of cliffs
Returns
-------
exp: ndarray
data matrix with stripes
'''
width = stripe.stripe_width
# make stripe_width odd
if width%2 == 0:
width += 1
# make empty stripe
stripe_mat = np.zeros((width, exp.shape[1]))
# add offset
for i in range(1,len(cliffs)):
stripe_mat[:,cliffs[i-1]:cliffs[i]] += stripe.stripe_offset * np.random.randint(low=0, high=2)
# smooth transitions
smooth = 10
for i in range(len(stripe_mat)):
stripe_mat[i,:] = np.convolve(stripe_mat[i,:], np.ones((smooth,))/smooth, mode='same')
# add noise on stripe
constrained_noise = np.random.randn(stripe_mat.shape[0], stripe_mat.shape[1])
constrained_noise[np.abs(constrained_noise) > 3] = 3
stripe_mat += constrained_noise * stripe.stripe_noise
# add to matrix
y_pos = stripe.stripe_mass_idx
y_len = width
exp[y_pos - (y_len // 2): y_pos + (y_len // 2) + 1, :] += stripe_mat
return exp
def add_background_offset(exp, background_offsets, cliffs):
'''
Parameters
----------
exp: ndarray
data matrix
background_offsets: ndarray
offset to be added to background
cliffs: ndarray
time_idx of cliffs
Returns
-------
exp: ndarray
data matrix with background offsets added
'''
background_mat = np.zeros_like(exp)
for i in range(1, len(cliffs)):
background_mat[:, cliffs[i - 1]:cliffs[i]] += background_offsets[i - 1]
# smooth transitions
smooth = 10
for i in range(len(background_mat)):
background_mat[i, :] = np.convolve(background_mat[i, :], np.ones((smooth,)) / smooth, mode='same')
exp += background_mat
return exp
def acme_sim(args):
'''Maine program to simulate ACME data
Parameters
----------
args
params: Path to config file for Simulator
save_path: Path to save output of Simulator
n_runs: number of simulation runs
Returns
-------
'''
params = args.get('params')
save_path = args.get('out_dir')
n_runs = int(args.get('n_runs'))
DEBUG = False
# make parent outdir if it does not exist
if not os.path.exists(save_path):
os.makedirs(save_path)
# make case name from parameter file
case_name = params.split('.')[-2]
case_name = case_name.split('/')[-1]
params = yaml.safe_load(open(params, 'r'))
# read parameter
n_peaks = params['n_peaks'] # number of peaks
height_min = params['height_min'] # peak height in counts
height_max = params['height_max']
mass_width_min = params['mass_width_min'] # 2 * 3 sigma of peak in mass dimension [idx]
mass_width_max = params['mass_width_max']
time_mass_width_ratio_min = params['time_mass_width_ratio_min'] # ratio of peak width in mass vs time
time_mass_width_ratio_max = params['time_mass_width_ratio_max']
peak_min_dist = params['peak_min_dist'] # minimum distance of peak to each oterh in [idx]
background_noise = params['backgound_noise'] # 1 sigma of background noise
background_offset_min = params['background_offset_min'] # offset
background_offset_max = params['background_offset_max'] # offset
n_stripes = params['n_stripes'] # number of horizontal stripes
stripes_noise_min = params['stripes_noise_min'] # added Noise on stripes
stripes_noise_max = params['stripes_noise_max']
stripes_offset_min = params['stripes_offset_min'] # offset of stripes
stripes_offset_max = params['stripes_offset_max']
stripes_width_min = params['stripes_width_min'] # width of stripes
stripes_width_max = params['stripes_width_max']
n_cliffs = params['n_cliffs'] # number of vertical cliffs (abrupt changes of stripes)
peaks_on_stripes = params['peaks_on_stripes'] # allow for peaks to fall on stripes
# conversion from pixel to z/amu and min
mass_idx_to_amu = 0.0833 # from 190411010_Mix25_50uM_NaCl_1M.raw.pickle'
time_idx_to_min = 0.0061
# iterate over number of simulations we want to perform
n = 0
while n < n_runs:
n += 1
# make folder for data
case_num = str(n).zfill(2)
outdir = os.path.join(save_path, case_num)
if not os.path.exists(outdir):
os.makedirs(outdir)
# make empty data
mass_axis = np.arange(70.0833,400, mass_idx_to_amu)
time_axis = np.arange(0.001,39.988,time_idx_to_min)
exp = np.zeros((len(time_axis), len(mass_axis)))
# transpose so x is time and y is mass
exp = exp.transpose()
n_peaks_init = 2 * n_peaks # make more peaks initially so that the final number of peaks equals n_peaks
# generate time_idx of peaks, avoid being to close to boarders
time_idx = np.random.randint(low=mass_width_max * time_mass_width_ratio_max,
high=len(time_axis) - mass_width_max * time_mass_width_ratio_max,
size=(n_peaks_init))
# generate mass_idx of peaks, avoid being to close to boarders
mass_idx = np.random.randint(low=mass_width_max, high=len(mass_axis)- mass_width_max, size=(n_peaks_init))
# generate peak height
height = np.random.uniform(low=height_min, high=height_max, size=(n_peaks_init))
# generate peak width [sigma]
mass_width_idx = np.random.uniform(low=mass_width_min, high=mass_width_max, size=(n_peaks_init))
time_mass_ratio = np.random.uniform(low=time_mass_width_ratio_min, high=time_mass_width_ratio_max,
size=(n_peaks_init))
time_width_idx = mass_width_idx * time_mass_ratio
# convert mass and time to amu and Min
mass_width = mass_width_idx * mass_idx_to_amu
time_width = time_width_idx * time_idx_to_min
# calc peak time and mass
time = time_axis[time_idx]
mass = mass_axis[mass_idx]
# calculate peak start time and end time
start_time_idx = (time_idx - time_width_idx/2).astype(int)
end_time_idx = (time_idx + time_width_idx/2).astype(int)
start_time = time - time_width/2
end_time = time + time_width/2
# put all variables in DataFrame
peaks = {'time_idx':time_idx, 'time':time , 'mass_idx': mass_idx, 'mass':mass, 'mass_width_idx': mass_width_idx,
'mass_width': mass_width,'time_width_idx': time_width_idx, 'time_width': time_width,'height': height,
'start_time_idx':start_time_idx, 'start_time': start_time,
'end_time_idx': end_time_idx, 'end_time': end_time}
peaks = pd.DataFrame(data=peaks)
# remove peaks that are to close to each other
too_close=[]
for peak in peaks.itertuples():
dist = ((peak.mass_idx-peaks.mass_idx)**2 + (peak.time_idx-peaks.time_idx)**2)**0.5 #calc distance of peak to every other peak
dist[dist == 0] = dist.max()
if dist.min() < peak_min_dist:
too_close.append(peak.Index)
peaks.drop(too_close, inplace=True)
# limit number of peaks to n_peaks
peaks = peaks.iloc[0:n_peaks]
## add peaks to empty data field
volumes = []
for peak in peaks.itertuples():
exp, volume = add_peak(exp, peak) # make peak matrix and add to exp
volumes.append(volume)
peaks['volume'] = volumes
if DEBUG:
plot_exp(exp)
# add 'cliffs'
cliffs = []
while len(cliffs) < n_cliffs:
c = np.random.randint(low=10, high=len(time_axis)-10) # generate location of cliffs
# make sure that there are no peaks on cliffs
min_dist = np.min(np.abs(c - peaks.time_idx))
if min_dist > (mass_width_max * time_mass_width_ratio_max) / 2 + 5:
cliffs.append(c)
# make numpy array, sort, add start and end point
cliffs = np.array(cliffs)
cliffs = np.sort(cliffs)
cliffs = np.concatenate(([0], cliffs, [len(time_axis)]))
# add 'stripes'
stripe_mass_idx = []
while len(stripe_mass_idx) < n_stripes:
s = np.random.randint(low=stripes_width_max, high=len(mass_axis) - stripes_width_max)
min_dist = np.min(np.abs(s - peaks.mass_idx))
# make sure that there are no peaks on stripes if peaks_on_stripes == False
if not peaks_on_stripes:
if min_dist > mass_width_max / 2:
stripe_mass_idx.append(s)
else:
stripe_mass_idx.append(s)
stripe_noise = np.random.uniform(low=stripes_noise_min, high=stripes_noise_max, size=(n_stripes))
stripe_offset = np.random.uniform(low=stripes_offset_min, high=stripes_offset_max, size=(n_stripes))
stripe_width = np.random.randint(low=stripes_width_min, high=stripes_width_max, size=(n_stripes))
stripes = {'stripe_noise': stripe_noise, 'stripe_offset': stripe_offset, 'stripe_width': stripe_width,
'stripe_mass_idx': stripe_mass_idx}
stripes = pd.DataFrame(data=stripes)
for stripe in stripes.itertuples():
exp = add_stripe(exp, stripe, cliffs)
if DEBUG:
plot_exp(exp)
# add random noise to background
constrained_noise = np.random.randn(np.shape(exp)[0],np.shape(exp)[1])
# remove everything above and below 3 std to ensure we don't have noise that looks like peaks
constrained_noise[np.abs(constrained_noise) > 3] = 3
exp += constrained_noise * background_noise
# add background offset
background_offsets = np.random.uniform(low=background_offset_min, high=background_offset_max, size=(n_cliffs + 1))
exp = add_background_offset(exp, background_offsets, cliffs)
# remove negative values
exp[exp < 0] = 0
if DEBUG:
plot_exp(exp)
out_path = os.path.join(outdir, case_name + '_' + case_num)
plot_exp(exp, save=True, save_path=out_path)
# make hand label file
# add total peak height with background offsets and noise as "Counts: Raw"
Counts_Raw = exp[peaks.mass_idx, peaks.time_idx]
peaks['Counts: Raw'] = Counts_Raw
# add 'Z-score' = peak height / std(background)
# add "Counts: Baseline" and "Sigma"
Counts_Baseline = []
Zscores = []
Sigma =[]
for peak in peaks.itertuples():
window_x = int(peak.time_width_idx) + 10
if window_x%2==0: # make sure its odd
window_x +=1
window_y = int(peak.mass_width_idx)
if window_y%2==0:
window_y +=1
center = int(peak.time_width_idx)
if center%2==0:
center +=1
peak_xy = np.array([peak.mass_idx, peak.time_idx])
_, crop_left, crop_right = make_crop(peak_xy,exp,window_x,window_y,center)
background_std = (np.std(crop_right) + np.std(crop_left))/2
# calc and append values to lists
Zscores.append(peak.height / background_std)
Counts_Baseline.append((np.median(crop_right) + np.median(crop_left))/2)
Sigma.append(background_std)
peaks['Z-score'] = Zscores
peaks['Counts: Baseline'] = Counts_Baseline
peaks['Sigma'] = Sigma
# rename peak height to 'Counts: Baseline removed'
peaks.rename(columns={'height': 'Counts: Baseline removed'}, inplace=True)
# make sure that we dont have ambiguous peaks
if np.min(np.array(Zscores)) < 5:
logging.info('repeating run ' + str(n) + ' because z-score of < 5 found')
n -= 1
continue
# write hand labels to disc
save_path_label = out_path + '_label.csv'
peaks.to_csv(save_path_label, sep=',', index=False)
# transpose back to original format
exp = exp.transpose()
# save data to pickle file
save_path_pickle = os.path.join(out_path + '.raw.pickle')
data = {"matrix": exp, "mass_axis": mass_axis, "time_axis": time_axis}
pickle.dump(data, open(save_path_pickle, "wb"))
logging.info('>>> Done')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--params', default=op.join(op.abspath(op.dirname(__file__)), 'configs', 'acme_sim_params.yml'),
help='Path to config file for Simulator. Default is cli/configs/acme_sim_params.yml')
parser.add_argument('--out_dir', default=None,
help='Path to save output of Simulator')
parser.add_argument('--n_runs', default=10,
help='Number of simulation runs to perform')
parser.add_argument('--log_name', default="ACME_simulator.log",
help="Filename for the pipeline log. Default is ACME_simulator.log")
parser.add_argument('--log_folder', default=op.join(op.abspath(op.dirname(__file__)), "logs"),
help="Folder path to store logs. Default is cli/logs")
args = parser.parse_args()
logger.setup_logger(args.log_name, args.log_folder)
acme_sim(vars(args))
logging.info("======= Done =======")
```
#### File: src/cli/HELM_pipeline.py
```python
import sys
import logging
import argparse
import timeit
import os
import os.path as op
import yaml
import glob
from datetime import datetime
import json
import errno
from pathlib import Path
import string
import copy
from utils import logger
from utils.manifest import AsdpManifest, load_manifest_metadata
from utils.memory_tracker.plotter import Plotter, watcher
from helm_dhm.validate import process
from helm_dhm.validate import utils
from helm_dhm.validate import preproc
from utils.dir_helper import get_batch_subdir, get_exp_subdir
from helm_dhm.tracker.LAP_tracker import run_tracker
from helm_dhm.tracker.tracker import run_tracker as run_proj_tracker
from helm_dhm.evaluation.point_metrics import run_point_evaluation
from helm_dhm.evaluation.track_metrics import run_track_evaluation
from helm_dhm.evaluation.reporting import aggregate_statistics
from helm_dhm.features.features import get_features
from helm_dhm.classifier.classifier import train, predict, predict_batch_metrics
from helm_dhm.asdp.asdp import mugshots, generate_SUEs, generate_DDs
from tools.visualizer.render import visualization
PREPROC_STEP = "preproc"
VALIDATE_STEP = "validate"
TRACKER_STEP = "tracker"
PROJ_TRACKER_STEP = "proj_tracker"
POINT_EVAL_STEP = "point_evaluation"
TRACK_EVAL_STEP = "track_evaluation"
FEATURES_STEP = "features"
TRAIN_STEP = "train"
PREDICT_STEP = "predict"
ASDP_STEP = "asdp"
MANIFEST_STEP = "manifest"
PIPELINE_TRAIN = "pipeline_train"
PIPELINE_PREDICT = "pipeline_predict"
PIPELINE_TRACKER_EVAL = "pipeline_tracker_eval"
PIPELINE_PRODUCTS = "pipeline_products"
PIPELINE_FIELD = "pipeline_field"
### Pipeline Steps ###
def preproc_experiment(experiment, config):
'''Preprocess hologram files'''
files = process.get_files(experiment, config)
preproc.resize_holograms(holo_fpaths=files,
outdir=get_exp_subdir('preproc_dir', experiment, config, rm_existing=True),
resize_shape=config['preproc_resolution'],
n_workers=config['_cores'])
def validate_experiment(experiment, config):
'''Create per experiment validate products'''
files = process.get_files(experiment, config)
preproc_files = process.get_preprocs(experiment, config)
process.validate_data(exp_dir=experiment,
holo_fpaths=files,
preproc_fpaths=preproc_files,
n_workers=config['_cores'],
config=config,
instrument="HELM")
def validate_batch(_, experiments, batch_outdir, config):
'''Calculate global statistics'''
process.global_stats(exp_dirs=experiments,
out_dir=batch_outdir,
config=config)
def proj_tracker_experiment(experiment, config):
'''Run the tracker on experiment'''
files = process.get_files(experiment, config)
preproc_files = process.get_preprocs(experiment, config)
run_proj_tracker(exp_dir=experiment,
holograms=preproc_files,
originals=files,
config=config,
n_workers=config['_cores'])
def tracker_experiment(experiment, config):
'''Run the tracker on experiment'''
files = process.get_files(experiment, config)
preproc_files = process.get_preprocs(experiment, config)
run_tracker(exp_dir=experiment,
holograms=preproc_files,
originals=files,
config=config,
n_workers=config['_cores'])
def point_eval_experiment(experiment, config):
'''Create/serialize point evaluation report and return as well'''
experiment_name = Path(experiment).name
eval_dir = get_exp_subdir('evaluation_dir', experiment, config)
pe_score_report_fpath = op.join(eval_dir, experiment_name + '_point_evaluation_report.json')
extended_report_fname = config['evaluation']['points']['by_track_report_file']
if extended_report_fname is not None and len(extended_report_fname) > 0:
extended_report_fpath = op.join(eval_dir, extended_report_fname)
else:
extended_report_fpath = None
# Get true and proposed tracks
label_csv_fpath = op.join(get_exp_subdir('label_dir', experiment, config),
f'{experiment_name}_labels.csv')
if not op.exists(label_csv_fpath):
logging.warning('No labels found for experiment {}. Skipping.'
.format(experiment))
return None
track_fpaths = sorted(glob.glob(op.join(
get_exp_subdir('track_dir', experiment, config), '*.json')))
# Run point evaluation. Results saved to `pe_score_report_fpath`
return (experiment,
run_point_evaluation(label_csv_fpath, track_fpaths, pe_score_report_fpath,
extended_report_fpath, config))
def point_eval_load_cached(experiment, config):
'''Deserialize point evaluation report from previous run'''
experiment_name = Path(experiment).name
pe_score_report_fpath = op.join(get_exp_subdir('evaluation_dir', experiment, config),
experiment_name + '_point_evaluation_report.json')
with open(pe_score_report_fpath) as jsonfile:
return (experiment, json.load(jsonfile))
def point_eval_batch(scores, _, batch_outdir, config):
'''Create point metrics histograms'''
aggregate_statistics(data=scores,
metrics=config['evaluation']['points']['hist_metrics'],
n_bins=config['evaluation']['histogram_bins'],
outdir=get_batch_subdir('point_eval_dir', batch_outdir, config),
macro_metric_path=config['evaluation']['points']['means_score_report_file'],
metrics_raw_path=config['evaluation']['points']['raw_distributions_file'])
def track_eval_experiment(experiment, config):
'''Evaluate tracks on experiment'''
experiment_name = Path(experiment).name
n_frames = len(process.get_preprocs(experiment, config))
# Get true and proposed tracks
label_csv_fpath = op.join(get_exp_subdir('label_dir', experiment, config),
f'{experiment_name}_labels.csv')
if not op.exists(label_csv_fpath):
logging.warning("No labels csv for experiment {}. Skipping...".format(experiment))
return None
track_fpaths = sorted(glob.glob(op.join(get_exp_subdir('track_dir', experiment, config), '*.json')))
# Run track evaluation. Results saved to `score_report_fpath`
return (experiment,
run_track_evaluation(label_csv_fpath, track_fpaths,
get_exp_subdir('evaluation_dir', experiment, config),
n_frames, experiment_name, config))
def track_eval_load_cached(experiment, config):
'''Load existing track evaluations'''
experiment_name = Path(experiment).name
te_score_report_fpath = op.join(get_exp_subdir('evaluation_dir', experiment, config),
experiment_name + '_track_evaluation_report.json')
with open(te_score_report_fpath) as jsonfile:
return (experiment, json.load(jsonfile))
def track_eval_batch(scores, _, batch_outdir, config):
'''Create track metrics histograms'''
aggregate_statistics(data=scores,
metrics=config['evaluation']['tracks']['hist_metrics'],
n_bins=config['evaluation']['histogram_bins'],
outdir=get_batch_subdir('track_eval_dir', batch_outdir, config),
micro_metric_path=config['evaluation']['tracks']['micro_score_report_file'],
macro_metric_path=config['evaluation']['tracks']['macro_score_report_file'])
def features_experiment(experiment, config):
'''Compute features on experiment'''
data_track_features = get_features(experiment=experiment,
config=config,
save=True,
labeled=config['_train_feats'])
if not data_track_features:
logging.error(f'Could not extract features for experiment {experiment}')
return
def train_batch(_, experiments, batch_outdir, config):
'''Run training on batch of experiments'''
train(experiments, batch_outdir, config)
def predict_experiment(experiment, config):
'''Run predict on experiment'''
return predict(experiment, config)
def predict_batch(inputs, experiments, batch_outdir, config):
# Unmarshal experiment results
batch_true_Y = []
batch_pred_Y = []
batch_prob_Y = []
batch_alltracks = 0
for input in inputs:
batch_true_Y.extend(input[0])
batch_pred_Y.extend(input[1])
batch_prob_Y.extend(input[2])
batch_alltracks += input[3]
predict_batch_metrics(batch_true_Y, batch_pred_Y, batch_prob_Y, batch_alltracks,
batch_outdir, config)
def asdp_experiment(experiment, config):
'''Create asdp's for experiment'''
asdp_dir = get_exp_subdir('asdp_dir', experiment, config, rm_existing=True)
predict_dir = get_exp_subdir('predict_dir', experiment, config)
track_fpaths = sorted(glob.glob(op.join(predict_dir, '*.json')))
holograms = process.get_files(experiment, config)
num_files = len(holograms)
mugshots(experiment, holograms, experiment, os.path.join(asdp_dir,"mugshots"), config)
generate_SUEs(experiment, asdp_dir, track_fpaths, config['sue'])
generate_DDs(experiment, asdp_dir, track_fpaths, config['dd'])
if not config['_field_mode']:
visualization(experiment, config, "HELM", config['_cores'], cleanup=True)
return num_files
def asdp_batch(inputs, _, batch_outdir, config):
'''Create batch asdp's from singular experiment products'''
total_files = 0
frame_rate = 15 # TODO - Put in config? Argparse? Read from experiment metadata?
total_files = sum(inputs)
capture_time = total_files / frame_rate
run_time = timeit.default_timer() - start_time
performance_ratio = run_time / capture_time
logging.info("Runtime Performance Ratio: {ratio:.1f}x (Data Processing Time / Raw Data Creation Time)".format(ratio=performance_ratio))
def manifest_experiment(experiment, config):
'''Create manifest for experiment'''
exp_name = Path(experiment).name
validate_dir = get_exp_subdir('validate_dir', experiment, config)
predict_dir = get_exp_subdir('predict_dir', experiment, config)
asdp_dir = get_exp_subdir('asdp_dir', experiment, config)
priority_bin = config.get('_priority_bin', 0)
metadata = config.get('_manifest_metadata', {})
manifest = AsdpManifest('helm', priority_bin)
manifest.add_metadata(**metadata)
# validate products
manifest.add_entry(
'processing_report',
'validate',
op.join(validate_dir, exp_name + '_processing_report.txt'),
)
manifest.add_entry(
'timestats_density',
'validate',
op.join(validate_dir, exp_name + '_timestats_density.csv'),
)
manifest.add_entry(
'timestats_mean_intensity',
'validate',
op.join(validate_dir, exp_name + '_timestats_mean_intensity.csv'),
)
manifest.add_entry(
'timestats_max_intensity',
'validate',
op.join(validate_dir, exp_name + '_timestats_max_intensity.csv'),
)
manifest.add_entry(
'timestats_pixeldiff',
'validate',
op.join(validate_dir, exp_name + '_timestats_pixeldiff.csv'),
)
manifest.add_entry(
'mhi_image_info',
'validate',
op.join(validate_dir, exp_name + '_mhi.png'),
)
# predicted path products
# note that we're listing predict step output, not tracker output.
manifest.add_entry(
'predicted_tracks',
'predict',
op.join(predict_dir),
)
# asdp products
manifest.add_entry(
'track_mugshots',
'asdp',
op.join(asdp_dir, 'mugshots'),
)
manifest.add_entry(
'diversity_descriptor',
'metadata',
op.join(asdp_dir, exp_name + '_dd.csv'),
)
manifest.add_entry(
'science_utility',
'metadata',
op.join(asdp_dir, exp_name + '_sue.csv'),
)
manifest.write(op.join(asdp_dir, exp_name + '_manifest.json'))
### Pipeline Helpers ###
def get_override_config(default_config, new_config):
'''Returns new config wtih all key-values in default_config overrident by those matching in new_config'''
config = copy.deepcopy(default_config)
_override_config(config, new_config)
return config
def _override_config(default_config, new_config, prefix=None):
'''Recursively overrides all key-values in default_config matching those in new_config'''
if prefix is None:
prefix = []
for key in new_config.keys():
if isinstance(new_config[key], dict):
p = prefix[:]
p.append(key)
_override_config(default_config, new_config[key], p)
else:
subdict = default_config
has_prefix = True
for k in prefix:
if k not in subdict.keys() or not isinstance(subdict[k], dict):
has_prefix = False
subdict[k] = {}
subdict = subdict[k]
if not has_prefix or key not in subdict:
logging.warning("Creating new config key: {}".format(prefix + [key]))
subdict[key] = new_config[key]
def has_experiment_outputs(step, experiment, config):
'''Returns true if experiment already has all expected step outputs'''
# The expected output directories for each step; a step is rerun if any are empty
experiment_directories = {PREPROC_STEP : ['preproc_dir'],
VALIDATE_STEP : ['validate_dir'],
TRACKER_STEP : ['track_dir', 'evaluation_dir'],
PROJ_TRACKER_STEP : ['track_dir', 'evaluation_dir'],
POINT_EVAL_STEP : ['evaluation_dir'],
TRACK_EVAL_STEP : ['evaluation_dir'],
PREDICT_STEP : ['predict_dir'],
FEATURES_STEP : ['features_dir'],
ASDP_STEP : ['asdp_dir']}
for directory in experiment_directories[step]:
exp_dir = get_exp_subdir(directory, experiment, config)
if not op.isdir(exp_dir) or len(os.listdir(exp_dir)) == 0:
logging.warning("\tStep {} does not have output {} at {}!".format(step, directory, exp_dir))
return False
# Additional per step files here
paths = []
experiment_name = Path(experiment).name
if step == POINT_EVAL_STEP:
paths.append(op.join(get_exp_subdir('evaluation_dir', experiment, config),
experiment_name + '_point_evaluation_report.json'))
elif step == TRACK_EVAL_STEP:
paths.append(op.join(get_exp_subdir('evaluation_dir', experiment, config),
experiment_name + '_track_evaluation_report.json'))
for path in paths:
if not op.exists(path):
return False
return True
def get_timestamp(step, experiment, config):
'''Return the timestamp of last run of step on experiment'''
timestamp_dir = get_exp_subdir('timestamp_dir', experiment, config)
try:
with open(op.join(timestamp_dir, step), 'r') as ts_file:
step_ts = int(ts_file.readline())
return step_ts
except:
logging.warning("No timestamp found in experiment {} for step {}".format(
experiment, step
))
def should_run(step, use_preexisting, experiment, config):
'''Determine if step needs to be rerun on experiment'''
# Run if caching disabled
if not use_preexisting:
return True
# Run if outputs don't exists, or were run with a different config
if not has_experiment_outputs(step, experiment, config):
return True
# TODO: Run if config doesn't match previous run
# Mapping from each step to the exhaustive list of steps that should trigger a rerun
experiment_dependencies = {PREPROC_STEP : [],
VALIDATE_STEP : [PREPROC_STEP],
TRACKER_STEP : [VALIDATE_STEP, PREPROC_STEP],
POINT_EVAL_STEP : [TRACKER_STEP],
TRACK_EVAL_STEP : [TRACKER_STEP],
FEATURES_STEP : [TRACKER_STEP]}
# Rerun if any of of the steps depended on by this step were run more recently
step_ts = get_timestamp(step, experiment, config)
if not step_ts:
return True
for dependency in experiment_dependencies[step]:
dep_ts = get_timestamp(dependency, experiment, config)
if not dep_ts or dep_ts > step_ts:
return True
return False
def parse_steps(cli_args):
'''Parses command line steps/pipeline keywords and returns list of steps to run.
Step tuples include name of step, functions associated with step, and whether step can use existing products'''
step_names = cli_args.steps
use_existing = cli_args.use_existing
predict_model = cli_args.predict_model
train_feats = cli_args.train_feats
field_mode = cli_args.field_mode
cache_allowed_steps = [PREPROC_STEP, VALIDATE_STEP, TRACKER_STEP, POINT_EVAL_STEP, TRACK_EVAL_STEP]
step_mappings = {
PREPROC_STEP : [preproc_experiment, None, None],
VALIDATE_STEP : [validate_experiment, validate_batch, None],
PROJ_TRACKER_STEP : [proj_tracker_experiment, None, None],
TRACKER_STEP : [tracker_experiment, None, None],
POINT_EVAL_STEP : [point_eval_experiment, point_eval_batch, point_eval_load_cached],
TRACK_EVAL_STEP : [track_eval_experiment, track_eval_batch, track_eval_load_cached],
FEATURES_STEP : [features_experiment, None, None],
TRAIN_STEP : [None, train_batch, None],
PREDICT_STEP : [predict_experiment, predict_batch, None],
ASDP_STEP : [asdp_experiment, asdp_batch, None],
MANIFEST_STEP : [manifest_experiment, None, None]
}
pipelines = {
PIPELINE_TRAIN : [PREPROC_STEP, VALIDATE_STEP, TRACKER_STEP, TRACK_EVAL_STEP, FEATURES_STEP, TRAIN_STEP],
PIPELINE_PREDICT : [PREPROC_STEP, VALIDATE_STEP, TRACKER_STEP, FEATURES_STEP, PREDICT_STEP],
PIPELINE_TRACKER_EVAL : [PREPROC_STEP, VALIDATE_STEP, TRACKER_STEP, POINT_EVAL_STEP, TRACK_EVAL_STEP],
PIPELINE_PRODUCTS : [PREPROC_STEP, VALIDATE_STEP, TRACKER_STEP, POINT_EVAL_STEP,
TRACK_EVAL_STEP, FEATURES_STEP, PREDICT_STEP,
ASDP_STEP, MANIFEST_STEP],
PIPELINE_FIELD : [PREPROC_STEP, VALIDATE_STEP, TRACKER_STEP, FEATURES_STEP,
PREDICT_STEP, ASDP_STEP, MANIFEST_STEP]
}
# Pipeline-wise check
if PIPELINE_FIELD in step_names and not field_mode:
logging.error("--steps pipeline_field requires --field_mode")
# Convert pipelines to steps
if len(step_names) == 1 and step_names[0] in pipelines:
step_names = pipelines[step_names[0]]
# Various checks after substituting pipeline keywords
if PREDICT_STEP in step_names and predict_model == "":
logging.error("--steps predict requires --predict_model")
if TRAIN_STEP in step_names and not train_feats:
logging.error("--steps train requires --train_feats")
if PREDICT_STEP in step_names and train_feats:
logging.error("--steps predict shouldn't use --train_feats")
# Create step tuples of the form: (step_name, exp_func, batch_func, get_previous_func, can_reuse)
step_tuples = []
for step_name in step_names:
if step_name not in step_mappings:
raise Exception("Unrecognized step or pipeline keyword: {}".format(step_name))
step = [step_name]
step.extend(step_mappings[step_name])
step.append(step_name in use_existing and step_name in cache_allowed_steps)
step_tuples.append(tuple(step))
can_reuse = []
for st in step_tuples:
if st[-1]:
can_reuse.append(st[0])
if len(can_reuse) > 1:
logging.info("USE EXISTING ENABLED FOR STEPS: {}".format(' '.join(can_reuse)))
return step_tuples
def pipeline_run_step(step_tuple, experiments, batch_outdir, config):
"""Executes a step in the pipeline, managing generic logging and reuse of intermediate results
Parameters
----------
step_tuple : tuple
step_name : str
The name of the step to execute
experiment_func : function
Function to run on each experiment. Should take experiment string as argument and return
batch_func : function
Function to run on the results of all experiments for this step
get_preexisting_func : function
Function to get a preexisting result (if needed for batch) when experiment_func is skipped on an experiment
use_existing : bool
Whether this step can skipped using preexisting results
experiments : list
The experiments used in this run
batch_outdir : str
The path to the batch output directory
config : dict
The config for this run
"""
# Parse step tuple
step = step_tuple[0]
experiment_func = step_tuple[1]
batch_func = step_tuple[2]
get_preexisting_func = step_tuple[3]
use_preexisting = step_tuple[4]
logging.info("\x1b[1mBeginning {} step...\x1b[0m".format(step)) # Bold font
st = timeit.default_timer()
outputs = []
# Run per experiment steps (if any)
if experiment_func:
for experiment in experiments:
# Skip running on an experiment if we can use a pre-existing result
if not should_run(step, use_preexisting, experiment, config):
logging.info("Using cached {} result for experiment {}".format(step, experiment))
if get_preexisting_func:
outputs.append(get_preexisting_func(experiment, config))
continue
# Else run and timestamp
result = experiment_func(experiment, config)
ct = int(datetime.utcnow().timestamp())
timestamp_dir = get_exp_subdir('timestamp_dir', experiment, config)
with open(op.join(timestamp_dir, step), 'w') as ts_file:
ts_file.write(str(ct))
#TODO: Dump subset of config depended on by this step
if result:
outputs.append(result)
# Run any batch operations on the outputs of all experiments
if batch_func:
batch_func(outputs, experiments, batch_outdir, config)
logging.info("Finished {} step. Elapsed time = {time:.2f} s".format(
step, time=timeit.default_timer() - st))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--config', default=op.join(op.abspath(op.dirname(__file__)), "configs", "helm_config_latest.yml"),
help="Path to configuration file. Default is cli/configs/helm_config_latest.yml")
parser.add_argument('--toga_config', default="",
help="Override subset of config with path to toga generated config")
parser.add_argument('--experiments', nargs='+',
required=True,
help="Glob-able string patterns indicating sets of data files to be processed.")
all_steps = [PREPROC_STEP, VALIDATE_STEP, TRACKER_STEP, PROJ_TRACKER_STEP, POINT_EVAL_STEP, TRACK_EVAL_STEP, FEATURES_STEP, TRAIN_STEP, PREDICT_STEP, ASDP_STEP, MANIFEST_STEP]
pipeline_keywords = [PIPELINE_TRAIN, PIPELINE_PREDICT, PIPELINE_TRACKER_EVAL, PIPELINE_PRODUCTS, PIPELINE_FIELD]
steps_options = all_steps + pipeline_keywords
cache_allowed_steps = [PREPROC_STEP, VALIDATE_STEP, TRACKER_STEP, POINT_EVAL_STEP, TRACK_EVAL_STEP]
parser.add_argument('--use_existing', default=[], nargs='+',
required=False,
choices=cache_allowed_steps,
help=f"Steps for which to use existing output: [{', '.join(cache_allowed_steps)}]",
metavar='CACHED_STEPS')
parser.add_argument('--steps', nargs='+',
required=True,
choices=steps_options,
help=f"Steps to run in the pipeline: [{', '.join(steps_options)}]",
metavar='STEPS')
parser.add_argument('--cores', type=int,
help="How many processor cores to utilize",
default=7)
parser.add_argument('--batch_outdir', required=True,
help="Directory to write batch results")
parser.add_argument('--note', default="",
help="Note to be appended to batch outdir name")
parser.add_argument('--log_name', default="HELM_pipeline.log",
help="Filename for the pipeline log. Default is HELM_pipeline.log")
parser.add_argument('--log_folder', default=op.join(op.abspath(op.dirname(__file__)), "logs"),
help="Folder path to store logs. Default is cli/logs")
parser.add_argument('--train_feats', action='store_true',
help="Only load tracks matched with hand labels (e.g., for ML training)" )
parser.add_argument('--predict_model', default=op.join(op.abspath(op.dirname(__file__)), "models", "classifier_labelbox_v01.pickle"),
help="Path to the pretrained model for prediction. Default is models/classifier_labelbox_v01.pickle")
parser.add_argument('--field_mode', action='store_true',
help='Only outputs field products')
parser.add_argument('--priority_bin', default=0, type=int,
help='Downlink priority bin in which to place generated products')
parser.add_argument('--manifest_metadata', default=None, type=str,
help='Manifest metadata (YAML string); takes precedence over file entries')
parser.add_argument('--manifest_metadata_file',
default=None, type=str,
help='Manifest metadata file (YAML)')
args = parser.parse_args()
logger.setup_logger(args.log_name, args.log_folder)
steps_to_run = parse_steps(args)
with open(args.config) as f:
config = yaml.safe_load(f)
if args.toga_config:
with open(args.toga_config) as f:
override_config = yaml.safe_load(f)
config = get_override_config(config, override_config)
logging.info("Loaded config.")
manifest_metadata = load_manifest_metadata(
args.manifest_metadata_file, args.manifest_metadata)
# To keep pipeline step calling convention simpler, add one-off args to config here
config['_cores'] = args.cores
config['_model_absolute_path'] = args.predict_model
config['_train_feats'] = args.train_feats
config['_field_mode'] = args.field_mode
config['_priority_bin'] = args.priority_bin
config['_manifest_metadata'] = manifest_metadata
# setup batch outdir parent directory
try:
# try-catching avoids race condition
os.mkdir(args.batch_outdir)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
else:
logging.info("Using existing batch output parent directory.")
pass
# setup batch outdir directory
if config['raw_batch_dir']:
# Absolute path for TOGA
if args.note != "":
logging.warning("Using raw batch dir, ignoring --note")
batch_outdir = args.batch_outdir
else:
# Timestamped path for standard use
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
if args.note != "":
batch_outdir = op.join(args.batch_outdir, timestamp+"_"+args.note)
else:
batch_outdir = op.join(args.batch_outdir, timestamp)
utils._check_create_delete_dir(batch_outdir, overwrite=False)
# setup the plotter
pltt = Plotter(save_to=op.join(args.log_folder, "HELM_pipeline_memory.mp4"))
globalQ = pltt.get_queues('HELM_pipeline.py')
# Set up the watcher arguments
watch = {'HELM_pipeline.py': {'queue': globalQ.graph, 'pid': os.getpid()}}
# Start watcher then the plotter
watcher(watch)
pltt.start()
global start_time
start_time = timeit.default_timer()
experiments = process.get_experiments(args.experiments, config)
logging.info("Retrieved experiment dirs.")
if experiments:
# Run the pipeline
for step_tuple in steps_to_run:
pipeline_run_step(step_tuple, experiments, batch_outdir, config)
else:
logging.error("No experiments found!")
run_time = timeit.default_timer() - start_time
try:
ram_mean, ram_max = pltt.stop()
logging.info(f'Average RAM:{ram_mean:.2f}GB, Max RAM:{ram_max:.2f}GB')
except:
logging.error("Memory tracker failed to shut down correctly.")
logging.info("Full script run time: {time:.1f} seconds".format(time=run_time))
logging.info("======= Done =======")
if __name__ == "__main__":
main()
```
#### File: src/cli/HELM_simulator.py
```python
import sys
import os.path as op
import argparse
import logging
from datetime import datetime
from glob import glob
from pathlib import Path
import yaml
from helm_dhm.validate import utils
from helm_dhm.simulator.sim_tracks import run_track_sim
from helm_dhm.simulator.sim_holograms import run_hologram_sim
from helm_dhm.simulator.utils import config_check
from utils import logger
def make_sim_exp_names(config, n_exp):
"""Helper to create simulator directory names using datetime and config"""
# Pull out some metadata
date = datetime.now().strftime('%Y%m%d_%H%M%S')
n_nonmot = config['exp_params']['n_non_motile']
n_mot = config['exp_params']['n_motile']
has_drift = config['exp_params'].get('drift') is not None
exp_dirnames = []
# Generate experiment names. One per repeat
for ei in range(n_exp):
flow_str = 'flow' if has_drift else 'static'
exp_dirnames.append(f'{date}_dhm_{flow_str}_max{n_mot}_motile_max{n_nonmot}_nonmotile_grayscale_sim_{ei:02}')
return exp_dirnames
def main():
###################################
# Argument parsing
parser = argparse.ArgumentParser()
parser.add_argument('--configs', default=op.join(op.abspath(op.dirname(__file__)), "configs", "helm_simulator_config_v2.yml"),
type=str,
nargs='+',
help="Glob-able path(s) to configuration file(s). Default is configs/helm_simulator_config_v2.yml")
# TODO: help for below spits out all 100 possible values
parser.add_argument('--n_exp', type=int,
default=1,
choices=range(1, 100),
help="Number of experiments to create per config. Defaults to 1.")
parser.add_argument('--sim_outdir', type=str,
required=True,
help="Directory to save simulated experiments to. Will overwrite existing directory.")
parser.add_argument('--log_name', default="HELM_simulator.log",
help="Filename for the pipeline log. Default is HELM_simulator.log")
parser.add_argument('--log_folder', default=op.join(op.abspath(op.dirname(__file__)), "logs"),
help="Folder path to store logs. Default is cli/logs")
args = parser.parse_args()
###################################
# Configure logging
logger.setup_logger(args.log_name, args.log_folder)
###################################
# Load simulation configurations
if not args.configs:
logging.warning('No config files found, exiting.')
sys.exit(0)
# Confirm creation of simulation directory
utils._check_create_delete_dir(args.sim_outdir, overwrite=True)
###################################
# Load/check configurations
#config_paths = glob(args.configs)
config_fpaths = set()
for pattern in args.configs:
curr_dirs = sorted([f for f in glob(pattern) if op.isfile(Path(f))])
config_fpaths.update(curr_dirs)
exp_configs = []
for config_fpath in config_fpaths:
with open(config_fpath, 'r') as yaml_f:
config = yaml.safe_load(yaml_f)
config_check(config.copy())
exp_configs.append(config)
###################################
# Simulate `n_exp` experiments per config
logging.info(f'Starting simulation of {len(exp_configs) * args.n_exp} total experiments.')
for config in exp_configs:
exp_names = make_sim_exp_names(config, args.n_exp)
for exp_name in exp_names:
###########################
# Determine the experiment directory and subdirs
exp_dir = op.join(args.sim_outdir, exp_name)
sim_track_dir = op.join(args.sim_outdir, exp_name, config['sim_track_dir'])
sim_hologram_dir = op.join(args.sim_outdir, exp_name, config['sim_hologram_dir'])
# Create the experiment directory and subdirs
utils._check_create_delete_dir(exp_dir, overwrite=True)
utils._check_create_delete_dir(sim_track_dir, overwrite=True)
utils._check_create_delete_dir(sim_hologram_dir, overwrite=True)
logging.info(f'\nStarting simulation of experiment: {exp_dir}')
###########################
# Create tracks
run_track_sim(config, exp_dir)
###########################
# Create holograms
run_hologram_sim(config, exp_dir)
```
#### File: cli/test/test_FAMEHELM_config.py
```python
import yaml
def _compare_keys(dict1, dict2, path=[]):
missing = set(dict1).symmetric_difference(set(dict2))
if len(missing):
raise Exception("Unsynced configs found in " + str(path) + ": " + str(missing))
else:
# dict keys are the same but are there nested keys?
for k in dict1.keys():
if type(dict1[k]) is dict and type(dict2[k]) is dict:
_compare_keys(dict1[k], dict2[k], path=path+[k])
elif type(dict1[k]) is dict or type(dict2[k]) is dict:
raise Exception("One of these is a dict but one isn't: " + str(path+[k]))
```
#### File: src/cli/update_asdp_db.py
```python
import os.path as op
import argparse
import logging
from tqdm import tqdm
from jewel.asdpdb import ASDPDB, compile_asdpdb_entry
from utils.dir_helper import get_unique_file_by_suffix
from utils import logger as OWLSlogger
def update_asdp_db(dbfile, rootdirs):
asdp_db = ASDPDB(dbfile)
logger.info(f'{len(rootdirs)} experiment directories provided as input.')
# Check each directory for a manifest file
experiments = []
for rootdir in tqdm(rootdirs, desc='Checking for manifest files'):
manifest_file = get_unique_file_by_suffix(
rootdir, 'manifest.json', logger=logger
)
if manifest_file is not None:
experiments.append((rootdir, manifest_file))
logger.info(f'Found {len(experiments)} experiment directories')
# Filter new experiment directories
new_experiments = [
(e, m) for e, m in experiments
if not asdp_db.entry_exists(e)
]
logger.info(f'Found {len(new_experiments)} new experiment directories')
if len(new_experiments) > 0:
new_entries = [
compile_asdpdb_entry(e, m)
for e, m in new_experiments
]
new_good_entries = [e for e in new_entries if e is not None]
logger.info(f'Prepared {len(new_good_entries)} entries for ASDPDB')
if len(new_good_entries) > 0:
inserted = asdp_db.add_entries(new_good_entries)
logger.info(f'Updated ASDP DB with {len(inserted)} entries')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('rootdirs', nargs='+',
help='path(s) to experiment root directories')
parser.add_argument('dbfile', help='path to db CSV file (will be created if it does not exist)')
parser.add_argument('--log_name', default="update_asdp_db.log",
help="Filename for the pipeline log. Default is update_asdp_db.log")
parser.add_argument('--log_folder', default=op.join(op.abspath(op.dirname(__file__)), "logs"),
help="Folder path to store logs. Default is cli/logs")
args = parser.parse_args()
OWLSlogger.setup_logger(args.log_name, args.log_folder)
global logger
logger = logging.getLogger()
kwargs = vars(args)
kwargs.pop('log_name', None)
kwargs.pop('log_folder', None)
update_asdp_db(**kwargs)
```
#### File: helm_dhm/classifier/classifier.py
```python
import os, sys
import os.path as op
import glob
import pickle
import logging
import csv
import json
from pathlib import Path
from tqdm import tqdm
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
from sklearn.model_selection import GroupKFold
from utils.dir_helper import get_batch_subdir, get_exp_subdir
IGNORE_FEATS = set(['motility', 'track', 'dataset_name'])
def write_metrics(true_Y, pred_Y, prob_Y, batch_outdir, config, prefix=""):
""" Writes out classifier metrics. Restricted to binary classification.
Only the labels "motile" and "non-motile" are expected, with "motile" as the
positive label.
Currently writes:
- Classification Report
- AUC curve plot
- Precision-Recall curve plot
- Confusion matrix
Parameters
----------
true_Y: list
List of ground truth labels. In ["motile", "non-motile"].
pred_Y: list
List of predicted labels. In ["motile", "non-motile"].
prob_Y: list
List of "motile" probabilities.
batch_outdir: string
Batch output directory.
config: dict
Configuration read from YAML.
prefix: str
Prefix to be appended to the output filenames. Useful for specifying
train vs test metric output.
Defaults to "".
"""
# Output directory path
output_dir = get_batch_subdir("classifier_dir", batch_outdir, config)
### BASIC CLASSIFICATION REPORT
report = metrics.classification_report(true_Y, pred_Y)
if prefix != "":
report_fp = op.join(output_dir, prefix+"_report.txt")
else:
report_fp = op.join(output_dir, "report.txt")
# write to file
with open(report_fp, 'w') as f:
f.write("Classification Report with threshold {}\n".format(config['classifier']['motility_threshold']))
f.write(report)
logging.info(f'Saved motility classification report: {op.join(*Path(report_fp).parts[-2:])}')
### ROC PLOT
fpr, tpr, _ = metrics.roc_curve(true_Y, prob_Y, pos_label="motile")
# Binarize true labels to 1 for motile, 0 for non-motile
binary_true_Y = [1 if x=='motile' else 0 for x in true_Y]
# Calculate AUC
auc = metrics.roc_auc_score(binary_true_Y, prob_Y)
# Plot ROC curve
fig, ax = plt.subplots(dpi=300)
ax.plot(fpr, tpr, color="blue", label="ROC curve (area = {:.2f})".format(auc))
ax.plot([0,1], [0,1], '--', color="red", label="Chance")
ax.set_title("{} ROC Curve".format(prefix))
ax.set_xlabel("False Positive Rate")
ax.set_ylabel("True Positive Rate")
ax.set_aspect('equal')
ax.legend()
if prefix != "":
fig.savefig(op.join(output_dir, "{}_roc_plot.png".format(prefix)))
else:
fig.savefig(op.join(output_dir, "roc_plot.png"))
logging.info(f'Saved ROC plot: {op.join(*Path(op.join(output_dir, "*_roc_plot.png")).parts[-2:])}')
### PRECISION-RECALL PLOT
precision, recall, _ = metrics.precision_recall_curve(true_Y, prob_Y, pos_label="motile")
# Plot PR curve
fig, ax = plt.subplots(dpi=300)
ax.plot(recall, precision, color="blue")
ax.set_title("{} Precision-Recall Curve".format(prefix))
ax.set_xlabel("Recall")
ax.set_ylabel("Precision")
ax.set_aspect('equal')
if prefix != "":
fig.savefig(op.join(output_dir, "{}_pr_plot.png".format(prefix)))
else:
fig.savefig(op.join(output_dir, "pr_plot.png"))
logging.info(f'Saved prec-rec plot: {op.join(*Path(op.join(output_dir, "*_pr_plot.png")).parts[-2:])}')
### CONFUSION MATRIX
confusion = metrics.confusion_matrix(true_Y, pred_Y, labels=['motile', 'non-motile'])
# Plot confusion matrix
fig, ax = plt.subplots(dpi=300)
ax.imshow(confusion, cmap='Blues')
# x-axis formatting
ax.set_xlabel("Predicted label")
ax.set_xticks([0,1])
ax.set_xticklabels(['motile', 'non-motile'])
# y-axis formatting
ax.set_ylabel("True label")
ax.set_yticks([0,1])
ax.set_yticklabels(['motile', 'non-motile'])
# on-square text
for i in range(2):
for j in range(2):
ax.text(j, i, confusion[i,j], ha='center', va='center', color='black')
if prefix != "":
fig.savefig(op.join(output_dir, "{}_confusion.png".format(prefix)))
else:
fig.savefig(op.join(output_dir, "confusion.png"))
logging.info(f'Saved confusion matrix: {op.join(*Path(op.join(output_dir, "*_confusion.png")).parts[-2:])}')
def cross_validate(clf, X, Y, groups, batch_outdir, config):
""" Performs k-fold cross validation on provided classifier
Parameters
----------
clf: sklearn classifier object
Initialized classifier. Any existing learned parameters will be
overwritten.
X: numpy array
Data and features to be trained on.
Y: numpy array
Labels to be trained on.
group: numpy array
Same value for tracks within the same experiment. For GroupKFold.
batch_outdir: string
Directory path to batch output directory
config: dict
Configuration read from YAML.
Returns
-------
None.
"""
### Read from configuration
# number of folds for cross validation
cv_folds = config['classifier']['cv_folds']
# directory for cross validation result output
output_dir = get_batch_subdir('classifier_dir', batch_outdir, config)
# probability threshold for labeling a track as motile
threshold = config['classifier']['motility_threshold']
### Initialize k-fold stratified cross-validation
try:
# Using group k fold to avoid test/train within same exp
skf = GroupKFold(n_splits=cv_folds)
except Exception as e:
logging.error("Failed to initialize cross validation, skipping:")
logging.error(e)
return
### Try splitting
try:
crossval_splits = skf.split(X, Y, groups)
for _, (_, _) in enumerate(crossval_splits):
pass
# Need to re-create generator after spending it
crossval_splits = skf.split(X, Y, groups)
except Exception as e:
logging.error("Failed to split for cross validation, skipping:")
logging.error(e)
return
### Global AUC plot
fig, ax = plt.subplots(dpi=300)
ax.plot([0,1], [0,1], '--', color="red", label="Chance")
ax.set_title("Crossval ROC Curve")
ax.set_xlabel("False Positive Rate")
ax.set_ylabel("True Positive Rate")
ax.set_aspect('equal')
### Global classification report output
report_fp = op.join(output_dir, "crossval_report.txt")
# write to file
with open(report_fp, 'w') as f:
f.write("Classification Report with threshold {}\n".format(threshold))
for curr_fold, (train_index, test_index) in enumerate(crossval_splits):
logging.info(f"Crossval fold {curr_fold}")
# For each split...
train_X = X[train_index]
train_Y = Y[train_index]
test_X = X[test_index]
test_Y = Y[test_index]
# Train model
clf.fit(train_X, train_Y)
# Predict probabilities for AUC curve
pred_Y = clf.predict_proba(test_X)
pred_classes = clf.classes_
# predict_proba() returns probs for both classes, find out which is motile
motile_col = np.where(pred_classes == 'motile')[0][0]
pred_Y = pred_Y[:,motile_col]
# Use configured threshold to assign labels 'motile' and 'non-motile'
num_tracks = len(pred_Y)
pred_Y_labels = np.array(['non-motile'] * num_tracks, dtype=object)
pred_Y_labels[pred_Y > threshold] = 'motile'
binary_test_Y = [1 if x=='motile' else 0 for x in test_Y]
# Write to reports
report = metrics.classification_report(test_Y, pred_Y_labels)
with open(report_fp, 'a') as f:
f.write("\n")
f.write("Fold {}".format(curr_fold))
f.write(report)
# Calculate ROC and AUC and add to plot
fpr, tpr, _ = metrics.roc_curve(test_Y, pred_Y, pos_label='motile')
auc = metrics.roc_auc_score(binary_test_Y, pred_Y)
ax.plot(fpr, tpr, label="Fold {0} (area = {1:.2f})".format(curr_fold, auc))
ax.legend()
fig.savefig(op.join(output_dir, "crossval_roc_plot.png"))
def train(experiments, batch_outdir, config, hyperparams={"max_depth": 10}):
""" Trains an sklearn random forest model on input features and saves it as a pickle
Parameters
----------
experiments: list
List of experiments generated by pipeline-level glob
batch_outdir: string
Output directory for batch-level metrics and trained model
config: dict
Configuration dictionary read in by pipeline from YAML
hyperparams: dict
Hyperparameters for model training. Exposed for DOMINE optimization.
NOTE: Temporarily defaults to {"max_depth": 5}
NOTE: Do not add hyperparameters to config, as it will be fixed eventually
Returns
-------
None
"""
# Batch-level feature and label storage
batch_X = []
batch_Y = []
groups = []
feat_columns = None
for group_id, exp in enumerate(experiments):
# Get feature CSV filepath
feat_subdir = get_exp_subdir('features_dir', exp, config)
feat_filepath = op.join(feat_subdir, config['features']['output'])
# Read in feature CSV
with open(feat_filepath, 'r') as f:
reader = csv.DictReader(f)
# Detect empty feature CSVs
if reader.fieldnames is None:
logging.warning(f'No track features found in {feat_filepath}. Skipping.')
continue
# Set feature column names if they haven't been determined
if feat_columns is None:
feat_columns = [feat for feat in reader.fieldnames if (feat not in IGNORE_FEATS)]
logging.info(f'Using following features for training: {feat_columns}')
for row in reader:
# Assert that the motility column exists
if 'motility' not in row.keys():
# break to catch the empty dataset
break
# Add label to label set
batch_Y.append(row['motility'].lower())
# Add features to feature set
batch_X.append([row[feat] for feat in feat_columns])
# Record group for cross-validation
groups.append(group_id)
batch_X = np.array(batch_X).astype(np.float32)
batch_Y = np.array(batch_Y, dtype=object)
groups = np.array(groups)
if not batch_X.size:
logging.error("No valid rows found in features file, exiting training without output.")
return
### FILTER LABELS
### TODO: Decide what to do with "Ambiguous" or other labels
### Currently only "Motile" and "Non-Motile" are kept. 07/29/2020 JL
keep_indices = [] # indices to keep
found_nonlabels = set() # record found bad labels
drop_count = 0 # number of tracks filtered out
# Build binary mask of rows with non-standard labels for deletion
for i in range(len(batch_X)):
if batch_Y[i].lower() not in ['motile', 'non-motile']:
found_nonlabels.add(batch_Y[i])
drop_count += 1
else:
keep_indices.append(i)
# Don't train on any tracks that aren't Motile or Non-motile
if drop_count:
logging.warning("Non-standard labels encountered: {}".format(found_nonlabels))
logging.warning("{} tracks dropped from training.".format(drop_count))
# This uses the binary mask to only keep rows where the mask val is 1
batch_X = batch_X[keep_indices]
batch_Y = batch_Y[keep_indices]
groups = groups[keep_indices]
if not batch_X.size:
logging.error("No tracks remain after label filtering, exiting training without output.")
return
### PREPROCESS OR AUGMENT
### TODO: At some point, if we use anything other than decision trees, we'll
### need to standardize features or something. Do that here, and consider
### writing helper functions.
# replacing infinite features with numbers
batch_X = np.nan_to_num(batch_X)
### INITIALIZE MODEL
clf = RandomForestClassifier(**hyperparams)
### CROSS VALIDATION
if config['classifier']['do_cross_validation']:
logging.info('Cross validation enabled, running...')
cross_validate(clf, batch_X, batch_Y, groups, batch_outdir, config)
### TRAIN MODEL ON ALL TRAINING DATA
### This occurs regardless of cross validation
clf.fit(batch_X, batch_Y)
### SAVE MODEL TO SPECIFIED PATH
class_dir = get_batch_subdir('classifier_dir', batch_outdir, config)
model_savepath = op.join(class_dir, config['classifier']['model_savepath'])
with open(model_savepath, 'wb') as f:
pickle.dump((clf, feat_columns), f)
logging.info(f'Saved trained model: {op.join(*Path(model_savepath).parts[-2:])}')
### SAVE METRICS
# Predict probabilities for AUC curve and Precision-Recall curve
pred_Y = clf.predict_proba(batch_X)
pred_classes = clf.classes_
# predict_proba() returns probs for both classes, find out which is motile
motile_col = np.where(pred_classes == 'motile')[0][0]
prob_Y = pred_Y[:,motile_col]
# Use configured threshold to assign labels 'motile' and 'non-motile'
threshold = config['classifier']['motility_threshold']
num_tracks = len(prob_Y)
pred_Y_labels = np.array(['non-motile'] * num_tracks, dtype=object)
pred_Y_labels[prob_Y > threshold] = 'motile'
# Write metrics
write_metrics(batch_Y, pred_Y_labels, prob_Y, batch_outdir, config, "train")
def predict(experiment, config):
""" Tests an sklearn model on input features and writes prediction JSONs
Parameters
----------
experiment: str
The experiment to predict on
config: dict
Configuration dictionary read in by pipeline from YAML
Returns
-------
None
TODO: Return metrics for DOMINE optimization?
This would be done by writing to a file via directory helpers.
Toga will be able to override directory logic to obtain metrics.
"""
model_path = config['_model_absolute_path']
### LOAD CLASSIFIER FROM PICKLE
try:
with open(model_path, 'rb') as f:
clf, feat_columns = pickle.load(f)
logging.info(f"Found and loaded {model_path}")
except:
logging.warning(f"Failed to open classifier {model_path}")
return None
# Storage for batch-level metrics
batch_true_Y = []
batch_pred_Y = []
batch_prob_Y = []
batch_alltracks = 0
# Get feature CSV filepath
feat_subdir = get_exp_subdir('features_dir', experiment, config)
feat_filepath = op.join(feat_subdir, config['features']['output'])
# Get track JSON directory
track_subdir = get_exp_subdir('track_dir', experiment, config)
# Get output predict directory
predict_subdir = get_exp_subdir('predict_dir', experiment, config, rm_existing=True)
if not os.path.exists(feat_filepath):
logging.error(f"Feature file {feat_filepath} missing. Aborting classification.")
return
### READ FEATURES FROM CSV FILE
exp_X = []
exp_Y = [] # labels are for metrics
track_ID = []
with open(feat_filepath, 'r') as f:
reader = csv.DictReader(f)
# Assert features aren't empty or no header
if not reader.fieldnames:
logging.error(f"Features are empty or lacks header row.")
return None
# Assert that its features list is the same as training
this_keys = [feat for feat in reader.fieldnames if (feat not in IGNORE_FEATS)]
if set(this_keys) != set(feat_columns):
logging.error(f"Read features list {this_keys} doesn't match model's {feat_columns}")
return None
for row in reader:
# Save labels if they exist
if 'motility' not in row.keys():
exp_Y.append('')
else:
exp_Y.append(row['motility'].lower())
# Assemble features in the same order as training data
exp_X.append([row[feat] for feat in feat_columns])
track_ID.append(int(row['track']))
exp_X = np.array(exp_X).astype(np.float32)
exp_Y = np.array(exp_Y, dtype=object)
if exp_X.size == 0:
logging.error("No tracks found in directory.")
return None
### PREPROCESS OR AUGMENT
### TODO: At some point, if we use anything other than decision trees, we'll
### need to standardize features or something. Do that here, and consider
### writing helper functions.
# replacing infinite features with numbers
exp_X = np.nan_to_num(exp_X)
### PREDICT
pred_Y = clf.predict_proba(exp_X)
# predict_proba() returns probs for both classes, find out which is motile
pred_classes = clf.classes_
motile_col = np.where(pred_classes == 'motile')[0][0]
prob_Y = pred_Y[:,motile_col]
# Use configured threshold to classify into 'motile' and 'other'
# TODO: Using 'other' here for visualizer but 'non-motile' is probably better
# Change 'other' to 'non-motile' in both classifier and visualizer
threshold = config['classifier']['motility_threshold']
num_tracks = len(prob_Y)
pred_Y_labels = np.array(['other'] * num_tracks, dtype=object)
pred_Y_labels[prob_Y > threshold] = 'motile'
# Metrics writer expects 'motile' and 'non-motile'
metrics_compat = np.array(['non-motile'] * num_tracks, dtype=object)
metrics_compat[prob_Y > threshold] = 'motile'
### WRITE TO PREDICT TRACK JSONS
track_fpaths = sorted(glob.glob(op.join(track_subdir, '*.json')))
for i in range(num_tracks):
# new keys and values to be added to the JSON
update_dict = {'classification': pred_Y_labels[i],
'probability_motility': prob_Y[i]}
# we're doing this just in case sequences are nonconsecutive
with open(track_fpaths[track_ID[i]], 'r') as f:
data = json.load(f)
data.update(update_dict)
# write out JSON files
with open(op.join(predict_subdir, op.basename(track_fpaths[track_ID[i]])), 'w') as f:
json.dump(data, f, indent=4)
logging.info(f'Saved predictions: {op.join(*Path(predict_subdir).parts[-2:])}')
### IF TRACKS HAVE LABELS, ADD TO BATCH STORAGE FOR METRICS
for i in range(num_tracks):
if exp_Y[i].lower() in ['motile', 'non-motile']:
# this track has a valid label
batch_true_Y.append(exp_Y[i])
batch_pred_Y.append(metrics_compat[i])
batch_prob_Y.append(prob_Y[i])
batch_alltracks += num_tracks
return (batch_true_Y, batch_pred_Y, batch_prob_Y, num_tracks)
def predict_batch_metrics(batch_true_Y, batch_pred_Y, batch_prob_Y, batch_alltracks, batch_outdir, config):
'''Calculate batch metrics if labels exist'''
if len(batch_true_Y):
logging.info("{} of {} tracks have labels, calculating batch metrics".format(
len(batch_true_Y), batch_alltracks))
write_metrics(batch_true_Y, batch_pred_Y, batch_prob_Y, batch_outdir, config, "predict")
```
#### File: helm_dhm/simulator/utils.py
```python
import json
import logging
import os.path as op
import numpy as np
from scipy import stats
VALID_CONFIG_DISTS = ['truncnorm']
VALID_CONFIG_SHAPES = ['gaussian', 'airy_disk']
def create_dist_objs(**kwargs):
"""Create a scipy distribution object that we can sample"""
dists = []
# Truncated normal distribution
if kwargs['distribution_name'] == 'truncnorm':
# Confirm kwargs are in list format
for kw in ['mean', 'std', 'min', 'max']:
if not isinstance(kwargs[kw], list):
kwargs[kw] = [kwargs[kw]]
# Loop through specified velocity means and std devs
for mu, sigma, min_val, max_val in zip(kwargs['mean'], kwargs['std'],
kwargs['min'], kwargs['max']):
dist = stats.truncnorm((min_val - mu) / sigma,
(max_val - mu) / sigma,
loc=mu,
scale=sigma)
dists.append(dist)
else:
raise ValueError(f'Distribution name ({kwargs["distribution_name"]}) not recognized.')
return dists
def config_check(config_dict):
"""Validates a simulation configuration
Parameters
----------
config_dict: dict
Dictionary containing loaded simulator config
"""
###################################
# Image params
logging.info('Checking simulation image parameters')
ip = config_dict['image_params']
n_chamber_dims = 3 if ip['chamber_depth'] else 2
if len(ip['resolution']) != 2:
raise ValueError('Image `resolution` must be 2 dimensional')
if len(ip['buffer']) != 2:
raise ValueError('Image `buffer` must be 2 dimensional')
if ip['chamber_depth']:
if not 0 <= ip['focus_plane'] <= ip['chamber_depth']:
raise ValueError(f'`focus_plane` must be on interval [0, {ip["chamber_depth"]}]')
distribution_check(ip['noise'], 1)
###################################
# Experiment params
logging.info('Checking simulation experiment parameters')
distribution_check(config_dict['exp_params']['drift'], 2)
###################################
# Particles
# Movement, size, brightness distributions
logging.info('Checking simulation particle parameters')
particles = config_dict['non_motile']['particles'].copy()
particles.update(config_dict['motile']['particles'])
for val in particles.values():
model_fpath = val['movement'].get('model_fpath')
if model_fpath is not None:
if not op.exists(model_fpath):
raise ValueError(f'Can\'t find model for simulating track paths ({model_fpath}). Confirm it exist and check that relative file path is visible')
else:
distribution_check(val['movement'], n_chamber_dims)
distribution_check(config_dict['non_motile']['size'], 1)
distribution_check(config_dict['non_motile']['brightness'], 1)
distribution_check(config_dict['motile']['size'], 1)
distribution_check(config_dict['motile']['brightness'], 1)
# Particle shapes
for particle_shape in config_dict['non_motile']['shapes']:
if particle_shape not in VALID_CONFIG_SHAPES:
raise ValueError(f'Shape `{particle_shape}` not recognized.')
for particle_shape in config_dict['motile']['shapes']:
if particle_shape not in VALID_CONFIG_SHAPES:
raise ValueError(f'Shape `{particle_shape}` not recognized.')
def distribution_check(dist_dict, n_noise_dims):
"""Validate a dictionary with keywords defining a scipy distribution"""
if not dist_dict['distribution_name'] in VALID_CONFIG_DISTS:
raise ValueError(f'Distribution {dist_dict["distribution_name"]} not recognized.')
for key in ['mean', 'std', 'min', 'max']:
if not key in dist_dict.keys():
raise ValueError(f'Missing distribution key "{key}"')
if not(isinstance(dist_dict[key], list)):
dist_dict[key] = [dist_dict[key]]
if len(dist_dict[key]) != n_noise_dims:
raise ValueError('Length of each distribution parameter list incorrect.'
f' Got {len(dist_dict[key])}, expected {n_noise_dims}.')
if not dist_dict['min'] <= dist_dict['mean'] <= dist_dict['max']:
raise ValueError('Distribution must have min <= mean <= max')
def get_track_label_rows(track_fpath, rescale_factor=(0.5, 0.5)):
"""Load a track and get rows that can be saved to a CSV label file
Parameters
----------
track_fpath: str
Path to track .json file containing a simulated track
rescale_factor: tuple
Proportion to multiply coordinates by (e.g., to convert labels from
2048x2048 window to 1024x1024, use (0.5, 0.5)). Define as
(row_rescale, col_rescale).
Returns
-------
row_data: list of dict
Dictionaries with each containing 1 row of information to be written to
the labels CSV file.
"""
# Load track json from file
with open(track_fpath, 'r') as json_file:
track_dict = json.load(json_file)
# Determine static track properties
track_num = track_dict['Track_ID']
motility = 'motile' if track_dict['motility'] is True else 'non-motile'
# Load position/time information
frame_nums = track_dict['Times']
row_vals, col_vals = [], []
for pos in track_dict['Particles_Position']:
row_vals.append(int(np.around(pos[0] * rescale_factor[0])))
col_vals.append(int(np.around(pos[1] * rescale_factor[1])))
# Save to a list of dict that can be written to CSV
row_data = []
for frame_num, row, col in zip(frame_nums, row_vals, col_vals):
row_data.append({'track': track_num,
'X': col,
'Y': row,
'frame': frame_num,
'motility': motility})
return row_data
```
#### File: helm_dhm/tracker/LAP_tracker.py
```python
import sys
import os
import os.path as op
import glob
import logging
import json
import multiprocessing
from functools import partial
from pathlib import Path
from tqdm import tqdm
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import DBSCAN
from scipy.stats import rankdata
from scipy.spatial import distance_matrix
from scipy.optimize import linear_sum_assignment
from scipy.interpolate import interp1d
import networkx as nx
from utils.dir_helper import get_batch_subdir, get_exp_subdir
from utils.file_manipulation import tiff_read
def get_diff_static(I, ds_median, config):
"""
Computes a diff between current image I and the dataset median.
Parameters
----------
I: 2d array
the current image frame
ds_median: 2d array
the dataset median
config: dict
configuration
"""
diff = abs(I - ds_median)
abs_threshold = config['absthresh']
pc_threshold = config['pcthresh']
# Threshold is the max value of the abs_threshold, and the value of diff at percentile pc_threshold
threshold = max(abs_threshold, np.percentile(diff, pc_threshold))
# Suppress values of rng_diff less than a threshold
diff[diff < threshold] = 0
return diff
def get_particles(range_diff, image, clustering_settings):
"""Get the detections using Gary's original method
Returns a list of particles and their properties
Parameters
----------
range_diff:
output from background subtraction
image:
original image frame for intensity calculation
may have a different shape than range_diff
cluster_settings:
hyperparameters for the clustering algorithm
Returns
-------
list of dicts with keys:
pos: (y, x) coordinates of particle
size: number of pixels in cluster
bbox_tl: bbox (top, left)
bbox_hw: bbox (height, width)
max_intensity: max intensity of pixels (list)
"""
# select points above a threshold, and get their weights
idx = (range_diff > 0)
points = np.column_stack(np.nonzero(idx))
weights = range_diff[idx].ravel().astype(float)
# empty list to store particles
particles = []
if len(points) > 0:
# use DBSCAN to cluster the points
dbscan = DBSCAN(eps=clustering_settings['dbscan']['epsilon_px'],
min_samples=clustering_settings['dbscan']['min_weight'])
labels = dbscan.fit_predict(points, sample_weight=weights)
n_clusters = int(np.max(labels)) + 1
for l in range(n_clusters):
idx = (labels == l)
# must have specified minimum number of points
# keep track of clusters that fall below this thresh
if np.sum(idx) < clustering_settings['filters']['min_px']:
continue
relevant = points[idx]
# Build particle properties
particle = {}
# center of particle
particle['pos'] = [round(i, 1) for i in np.average(relevant, axis=0).tolist()]
# number of pixels in particle
particle['size'] = int(np.sum(idx))
# bounding box top left anchor
# bounding box calculations
bbox_y, bbox_x = int(np.min(relevant[:,0])), int(np.min(relevant[:,1]))
bbox_h, bbox_w = int(np.max(relevant[:,0]) - np.min(relevant[:,0])), \
int(np.max(relevant[:,1]) - np.min(relevant[:,1]))
particle['bbox'] = ((bbox_y, bbox_x), (bbox_h, bbox_w))
# convert bounding box indices to original resolution
yres_ratio = image.shape[0] / range_diff.shape[0]
xres_ratio = image.shape[1] / range_diff.shape[1]
bbox_y_ores = int(bbox_y * yres_ratio)
bbox_h_ores = int(bbox_h * yres_ratio)
bbox_x_ores = int(bbox_x * xres_ratio)
bbox_w_ores = int(bbox_w * xres_ratio)
# max intensity for each channel
if len(image.shape) == 2:
# grayscale original image, single channel
particle['max_intensity'] = [int(np.amax(image[bbox_y_ores:bbox_y_ores+bbox_h_ores+1,
bbox_x_ores:bbox_x_ores+bbox_w_ores+1]))]
else:
# RGB original image, max per channel
particle['max_intensity'] = np.amax(image[bbox_y_ores:bbox_y_ores+bbox_h_ores+1,
bbox_x_ores:bbox_x_ores+bbox_w_ores+1],
axis=(0,1)).tolist()
particles.append(particle)
return particles
def linking_LAP(prev_particles, next_particles, max_link):
""" Calculate LAP cost matrix between particles in consecutive frames
Parameters
----------
prev_particles: list
list of particle dicts detected in frame n-1
next_particles: list
list of particle dicts detected in frame n
max_link: float
maximum linking distance between particles
"""
# Get coordinates from list of particle dicts
prev_coords = [p['pos'] for p in prev_particles]
next_coords = [p['pos'] for p in next_particles]
p = len(prev_coords)
n = len(next_coords)
# Top left is the euclidean cost matrix between the particles
topleft = distance_matrix(prev_coords, next_coords)
# If cost is higher than max, set it to inf
topleft[topleft > max_link] = 1e8
# Top right and bottom right are diagonal matrices of value 1.05 * max
# for indicating starting & stopping tracks at this frame
if len(topleft[topleft != 1e8]) != 0:
topright = np.ones((p,p)) * 1e8
np.fill_diagonal(topright, 1.05 * np.max(topleft[topleft != 1e8]))
botleft = np.ones((n,n)) * 1e8
np.fill_diagonal(botleft, 1.05 * np.max(topleft[topleft != 1e8]))
else:
# topleft is all 1e8, no links possible. fill idagonals with 1s for guaranteed solution.
topright = np.ones((p,p)) * 1e8
np.fill_diagonal(topright, 1)
botleft = np.ones((n,n)) * 1e8
np.fill_diagonal(botleft, 1)
# Bottom right is a theoretical necessary, described in Supplement 3 of
# Jaqaman et al. 2008. It's the transpose of top left, with "non-inf" values
# set to a minimal cost.
botright = topleft.T.copy()
botright[botright != 1e8] = 1e-8
# Build final cost matrix
left = np.concatenate((topleft, botleft), axis=0)
right = np.concatenate((topright, botright), axis=0)
LAP_cost = np.concatenate((left, right), axis=1)
return LAP_cost
def stitch_LAP(track_ends, track_starts, max_link, max_skips):
""" Calculate LAP cost matrix between track ends and starts for stitching
Parameters
----------
track_ends: list
List of particles that are at the end of tracks
track_starts: list
List of particles that are at the start of tracks
max_link: float
Maximum distance between stitched start/end points
max_skips: float
Maximum skipped frames between start/end points
"""
end_coords = [(e[0], e[1]) for e in track_ends]
end_times = [[e[2]] for e in track_ends]
start_coords = [(s[0], s[1]) for s in track_starts]
start_times = [[s[2]] for s in track_starts]
e = len(track_ends)
s = len(track_starts)
topleft = distance_matrix(end_coords, start_coords)
frame_gaps = distance_matrix(end_times, start_times)
topleft[np.where(frame_gaps > max_skips)] = 1e8
topleft[topleft > max_link] = 1e8
if len(topleft[topleft != 1e8]) != 0:
topright = np.ones((e,e)) * 1e8
np.fill_diagonal(topright, 1.05 * np.max(topleft[topleft != 1e8]))
botleft = np.ones((s,s)) * 1e8
np.fill_diagonal(botleft, 1.05 * np.max(topleft[topleft != 1e8]))
else:
# topleft is all 1e8, no links possible. fill idagonals with 1s for guaranteed solution.
topright = np.ones((e,e)) * 1e8
np.fill_diagonal(topright, 1)
botleft = np.ones((s,s)) * 1e8
np.fill_diagonal(botleft, 1)
botright = topleft.T.copy()
botright[botright != 1e8] = 1e-8
left = np.concatenate((topleft, botleft), axis=0)
right = np.concatenate((topright, botright), axis=0)
LAP_cost = np.concatenate((left, right), axis=1)
return LAP_cost
def plot_tracks(G, exp_name, plot_output_directory,
win_size=(1024, 1024)):
"""Plot traces for all tracks on a dark background
Parameters
----------
track_fpaths: list of str
Full filepaths to each track to be plotted
exp_name: str
Experiment name
plot_output_directory: str
Directory for saving the track plot
win_size: iterable
Number of pixels in row and column dimensions, respectively.
"""
# Create plot and use dark background
plt.style.use('dark_background')
# Debug track plot
fig, ax = plt.subplots(figsize=(8, 8))
# Overlay track plot
px = 1/128
fig2 = plt.figure(frameon=False, dpi=128)
fig2.set_size_inches(2048*px, 2048*px)
ax2 = plt.Axes(fig2, [0., 0., 1., 1.])
ax2.set_axis_off()
fig2.add_axes(ax2)
if len(G) == 0:
logging.warning('No tracks were available to plot')
for cc in list(nx.connected_components(G)):
cc_sorted = sorted(cc, key = lambda x: x[2])
positions = np.array([(x,y) for x,y,z in cc_sorted])
ax.plot(positions[:, 1], positions[:, 0])
ax2.plot(positions[:, 1], positions[:, 0])
# Set up title and axis labels
ax.set_title('Particle tracks identified in experiment\n' + exp_name)
ax.invert_yaxis()
ax.axis('equal') # Force a square axis
ax.set_xlim(0, win_size[1])
ax.set_ylim(win_size[0], 0)
ax2.invert_yaxis()
ax2.axis('equal')
ax2.axis('off')
ax2.set_xlim(0, win_size[1])
ax2.set_ylim(win_size[0], 0)
fig.savefig(op.join(plot_output_directory, exp_name + "_track_plots.png"),
dpi=150)
fig2.savefig(op.join(plot_output_directory, exp_name + "_track_overlay.png"))
plt.close()
def export_JSON(G, particle_dict, track_dir, config):
# list of tracks and their nodes
ccs = list(nx.connected_components(G))
# sort tracks by starting node time
ccs = sorted(ccs, key = lambda x: min([p[2] for p in x]))
# for each connected component
for idx, cc in enumerate(ccs):
json_dict = {
'Times': [],
'Particles_Position': [],
'Particles_Estimated_Position': [],
'Particles_Size': [],
'Particles_Bbox': [],
'Particles_Max_Intensity': [],
'Track_ID': idx,
'classification': None
}
# sort track by timestamp
cc_sorted = sorted(cc, key = lambda x: x[2])
cc_coords = [[c[0], c[1]] for c in cc_sorted]
cc_times = [int(c[2]) for c in cc_sorted]
# function for interpolation
interp_func = interp1d(cc_times, cc_coords, kind='linear', axis=0)
# for each timestep in timerange
for t in range(cc_times[0], cc_times[-1]+1):
json_dict['Times'].append(t)
if t in cc_times:
# particle exists, no interpolation
# get particle object
particle = particle_dict[cc_sorted[cc_times.index(t)]]
json_dict['Particles_Position'].append(particle['pos'])
json_dict['Particles_Estimated_Position'].append(particle['pos'])
json_dict['Particles_Size'].append(particle['size'])
json_dict['Particles_Bbox'].append(particle['bbox'])
json_dict['Particles_Max_Intensity'].append(particle['max_intensity'])
else:
# particle DNE, interpolate
json_dict['Particles_Estimated_Position'].append(interp_func(t).tolist())
json_dict['Particles_Position'].append(None)
json_dict['Particles_Size'].append(None)
json_dict['Particles_Bbox'].append(None)
json_dict['Particles_Max_Intensity'].append(None)
# save dictionary to JSON
json_fpath = op.join(track_dir, f'{idx:05}.json')
with open(json_fpath, 'w') as f:
json.dump(json_dict, f, indent=2)
def _mp_particles(fpath, mf, conf):
""" Multiprocessing function for reading and identifying particles """
frame = tiff_read(fpath)
diff = get_diff_static(frame, mf, conf['diff_comp'])
detections = get_particles(diff, frame, conf['clustering'])
return detections
def run_tracker(exp_dir, holograms, originals, config, n_workers=1):
"""Execute the tracker code for an experiment
Parameters
----------
exp_dir: str
Experiment directory path
holograms: list
Ordered list of filepaths to holograms
config: dict
Loaded HELM configuration dictionary
n_workers: int
Number of workers to use for multiprocessed portions
"""
exp_name = Path(exp_dir).name
tracker_settings = config['tracker_settings']
track_plot = tracker_settings['track_plot']
track_dir = get_exp_subdir('track_dir', exp_dir, config, rm_existing=True)
plot_dir = get_exp_subdir('evaluation_dir', exp_dir, config, rm_existing=True)
tracker_debug_dir = op.join(plot_dir, "tracker_debug")
# Track and plot directories if they don't exist yet
Path(tracker_debug_dir).mkdir(parents=True, exist_ok=True)
logging.info(f'Track files dir: {op.join(*Path(track_dir).parts[-2:])}')
logging.info(f'Track plots dir: {op.join(*Path(plot_dir).parts[-2:])}')
# Read median image
median_frame = tiff_read(op.join(get_exp_subdir('validate_dir', exp_dir, config),
f'{exp_name}_median_image.tif')).astype(np.float)
# Get particles per frame
with multiprocessing.Pool(n_workers) as pool:
particle_stack = list(tqdm(pool.imap_unordered(
partial(_mp_particles, mf=median_frame, conf=tracker_settings),
holograms), total=len(holograms)))
# Link particles into tracks
G = nx.Graph()
particle_dict = {}
for i in tqdm(range(1, len(particle_stack))):
p = len(particle_stack[i-1])
n = len(particle_stack[i])
if p == 0 or n == 0:
# No particles in previous or next frame, no edges
continue
linking_cost = linking_LAP(particle_stack[i-1],
particle_stack[i],
tracker_settings['LAPtracking']['max_assignment_dist'])
rows, cols = linear_sum_assignment(linking_cost)
for row, col in zip(rows, cols):
if row < p and col < n:
prev_coord = np.concatenate((particle_stack[i-1][row]['pos'], [i-1]))
next_coord = np.concatenate((particle_stack[i][col]['pos'], [i]))
# Add edge to graph
G.add_edge(tuple(prev_coord), tuple(next_coord))
# Add nodes to dict
particle_dict[tuple(prev_coord)] = particle_stack[i-1][row]
particle_dict[tuple(next_coord)] = particle_stack[i][col]
# Track stitching
track_starts = []
track_ends = []
for cc in list(nx.connected_components(G)):
cc_sorted = sorted(cc, key = lambda x: x[2])
track_starts.append(cc_sorted[0])
track_ends.append(cc_sorted[-1])
e = len(track_ends)
s = len(track_starts)
if e != 0 and s != 0:
stitching_cost = stitch_LAP(track_ends, track_starts,
tracker_settings['LAPtracking']['max_assignment_dist'],
tracker_settings['LAPtracking']['max_skip'])
rows, cols = linear_sum_assignment(stitching_cost)
for row, col in zip(rows, cols):
if row < e and col < s:
# Add stitched edges
# TODO: when writing to JSON, handle interpolation
G.add_edge(track_ends[row], track_starts[col])
# Drop tracks with len < limit
for component in list(nx.connected_components(G)):
if len(component) < tracker_settings['LAPtracking']['min_track_obs']:
for node in component:
G.remove_node(node)
# Plot tracks
if track_plot:
plot_tracks(G, exp_name, plot_dir)
# export tracks to json
export_JSON(G, particle_dict, track_dir, config)
```
#### File: helm_dhm/validate/preproc.py
```python
import os
import logging
import multiprocessing
import os.path as op
import glob
from tqdm import tqdm
from pathlib import Path
import numpy as np
from utils.dir_helper import get_exp_subdir
from utils.file_manipulation import tiff_read
from utils.file_manipulation import tiff_write
def mp_resize(args):
""" Multiprocess function for resizing """
image = tiff_read(args['raw_path'], resize_dims=args['resize_shape'], flatten=args['flatten'])
if image is None:
image = np.zeros((args['resize_shape']))
image = (image*255).astype(np.uint8)
tiff_write(image, args['resize_path'])
def resize_holograms(holo_fpaths, outdir, resize_shape, n_workers=1):
""" Writes resized holograms to output directory
holo_fpaths: list of str
List of filepaths to the hologram files
outdir: str
Path to output directory
resize_shape: tuple
Shape of resized image. Dim 2 for grayscale, 3 for RGB
n_workers: int
Number of cores for multiprocessing
"""
# Setup multiprocessed resizing and saving
mp_args = []
for i in range(len(holo_fpaths)):
arg = {
'raw_path': holo_fpaths[i],
'resize_path': op.join(outdir, Path(holo_fpaths[i]).name),
'resize_shape': resize_shape,
'flatten': len(resize_shape) == 2
}
mp_args.append(arg)
with multiprocessing.Pool(n_workers) as pool:
_ = list(tqdm(pool.imap_unordered(mp_resize, mp_args), total=len(holo_fpaths), desc='Resizing Holograms'))
```
#### File: src/jewel/diversity.py
```python
import numpy as np
from scipy.spatial.distance import pdist, squareform
from utils.logger import get_logger
# Global variable for logging
logger = get_logger()
# Note for developers: follow the example of `gaussian_similarity` to implement
# additional similarity functions. The outer wrapper function should take any
# necessary parameters and return a function that computes the pairwise
# similarities using a single `metadata` argument. If custom code is needed to
# parse parameters from the YAML configuration, define a function that takes a
# YAML dictionary containing configuration and returns a dictionary containing
# the loaded parameters, then assign this to the `load_params` attribute of the
# similarity wrapper function. If the parameters can be passed in as-is to the
# similarity wrapper function, then the `load_params` function does not need to
# be defined. Alternatively, the similarity function could be a class which as a
# `load_params` class method defined.
def gaussian_similarity(scale_factor=None):
"""
Return a function that computes Gaussian similarity using the supplied scale
factor, or the median distance as a heuristic if none is supplied.
Parameters
----------
scale_factor: float or None
a scale factor to use in the Gaussian exponent; if None, the median
pairwise distance is used as the scale factor
Returns
-------
similarity_func: callable
a function that takes a dict containing a array-like `dd` entry that
holds diversity descriptor features in its rows, and returns an n-by-n
array of pairwise similarities between the corresponding n ASDPs; values
are guaranteed to be within the range [0, 1]
"""
def similarity_func(metadata):
dd = metadata['dd']
# The `pdist` function computes the upper triangular entries of the
# symmetric pairwise distance matrix. The `squareform` function converts
# these entries to a standard symmetric square matrix with zero entries
# along the diagonal. This requires less than half of the distance
# function calculations as would be needed using `cdist(dd, dd)`.
D = squareform(pdist(dd))
if scale_factor is not None:
gamma = scale_factor
else:
gamma = (1.0 / np.median(D))
return np.exp(-(gamma * D)**2)
return similarity_func
def load_similarity_func(config):
"""
Loads a similarity function from a YAML config dict.
Parameters
----------
config: dict
dictionary containing YAML configuration with a "name" entry for a
similarity function defined within the global namespace of this module,
and (optionally) a "parameters" entry containing a dict of parameters
required by that similarity function
Returns
-------
similarity_func: callable
returns a parameterized similarity function corresponding to the
provided configuration, or None if an error occurred during loading
"""
sim_name = config.get('name', None)
if sim_name is None:
logger.warning(f'No similarity function name specified')
return
# Look for the function by name in the global namespace
sim = globals().get(sim_name, None)
if sim is None:
logger.warning(f'Similarity function "{sim_name}" not found')
return
# Get parameters from config and load them with the custom similarity
# function/class code if defined
param_config = config.get('parameters', {})
if hasattr(sim, 'load_params'):
params = sim.load_params(param_config)
else:
params = param_config
if params is None:
return
return sim(**params)
```
#### File: src/utils/dir_helper.py
```python
import os.path as op
import os
from pathlib import Path
import shutil
from glob import glob
from string import Template
from collections.abc import Iterable
def _get_dir(dir_key, prefix, config, rm_existing):
'''Returns the folder associated with nested key dir_key in config. If relative, prepends prefix'''
if not isinstance(dir_key, Iterable):
dir_key = [dir_key]
c = config
for k in dir_key:
subconfig = c
c = c[k]
dirpath = Template(c).substitute(subconfig)
if not dirpath[0] == '/':
dirpath = op.join(prefix, dirpath)
# Delete existing tracks and plots if required
if rm_existing:
if op.exists(dirpath):
shutil.rmtree(dirpath)
if not op.exists(dirpath):
Path(dirpath).mkdir(parents=True)
return dirpath
def get_exp_subdir(dir_key, exp_dir, config, rm_existing=False):
'''Returns the experiment folder associated with dir_key'''
prefix = exp_dir
# If config contains an experiment dir override, parse here
# This is to allow simulatenous runs on the same experiment
# (e.g. multiple TOGA workers)
# Passing different prefixes prevents clobbering output dirs,
# but allows sharing of hologram/label dirs
eop = 'experiment_outputs_prefix'
if eop in config and config[eop]:
# Don't override original data dirs
if dir_key != "hologram_dir" and dir_key != "label_dir" and dir_key != "preproc_dir":
prefix = op.join(config[eop], op.basename(exp_dir))
retval = _get_dir(['experiment_dirs', dir_key], prefix, config, rm_existing)
return retval
def get_batch_subdir(dir_key, batch_dir, config):
'''Returns the batch folder associated with dir_key'''
return _get_dir(['batch_dirs', dir_key], batch_dir, config, False)
def get_unique_file_by_suffix(root, suffix, logger=None):
"""
Returns the unique file within the given root directory that has the
specified suffix. None is returned if no such unique file exists.
"""
candidates = glob(op.join(root, '*%s' % suffix))
if len(candidates) == 0:
if logger is not None:
logger.warning(
f'No files with suffix "*{suffix}" under "{root}"'
)
return None
if len(candidates) > 1:
if logger is not None:
logger.warning(
f'Multiple files with suffix "*{suffix}" under "{root}"'
)
return None
return candidates[0]
```
#### File: src/utils/file_manipulation.py
```python
import os
import os.path as op
import logging
import PIL
import numpy as np
from skimage.io import imread
from skimage.transform import resize
def tiff_read(tiff_path, resize_dims=None, flatten=False):
""" Read a tiff image with error handling and optional resizing.
Parameters
----------
tiff_path: str
Path to tiff image
resize_dims: tuple
Optional. Specify to force resize after image read.
flatten: bool
Optional. Flatten multichannel to single by averaging the last channel.
Returns
-------
Array of image if successfully read.
None if image is corrupt or does not exist.
"""
if os.path.exists(tiff_path):
try:
image = imread(tiff_path)
if image.size == 0:
logging.error(f"Corrupt image: {tiff_path}")
return None
except:
logging.error(f"Corrupt image: {tiff_path}")
return None
else:
logging.error("File doesn't exist")
return None
if flatten and len(image.shape) == 3:
# We use averaging instead of rgb2gray because it uses
# CRT luminance, which is a weighted mean:
# https://scikit-image.org/docs/dev/api/skimage.color.html#rgb2gray
image = np.squeeze(np.round(np.mean(image, axis=-1)).astype(np.uint8))
if resize_dims:
image = resize(image, resize_dims, anti_aliasing=True)
return image
def tiff_write(image, save_path):
""" Write an image array to a path as a tiff.
Parameters
----------
image: array
Image to be saved to save_path
save_path: str
Filepath for the image to be saved to
"""
save_folder = op.dirname(save_path)
if not os.path.exists(save_folder):
logging.warning(f"{save_folder} folder does not exist, creating.")
os.makedirs(save_folder)
pil_img = PIL.Image.fromarray(image)
pil_img.save(save_path, compression='tiff_lzw')
``` |
{
"source": "JPLMLIA/TOGA",
"score": 3
} |
#### File: genetic_algorithm/mutate/float.py
```python
import numpy as np
import random
from toga.genetic_algorithm.genetype import Mutator
from toga.genetic_algorithm.mutate.genemutate import GeneMutate
class FloatGene(GeneMutate):
def __init__(self, dictionary=None, parents=None, mutator=Mutator.Crossover, mutator_params={}):
self.dictionary = dictionary
self.parents = parents
self.mutator_params = mutator_params if mutator_params is not None else {}
self.percentage = self.mutator_params.get('percentage') if self.mutator_params.get(
'percentage') is not None else 1.0
self.frequency = self.mutator_params.get('frequency')
super().__init__(dictionary, parents, mutator)
def mutate(self):
"""
:return:
"""
return super().mutate()
def crossover(self):
"""
:return:
"""
if self.parents:
value = np.random.choice([x for x in self.parents], size=1, replace=False).tolist()[0]
return float(value)
else:
return self.random()
def random(self):
"""
:return:
"""
values = self.dictionary.get('range')
val = np.random.uniform(min(values), max(values))
return float(val)
def gaussian_step(self):
"""
:return:
"""
if self.parents:
parent = self.parents[0]
original = parent
values = self.dictionary.get('range')
scale = np.abs((max(values) - min(values)))
new_var = np.random.normal(loc=original, scale=scale/4)
new_var = max(min(values), new_var)
new_var = min(max(values), new_var)
return float(new_var)
else:
return self.random()
def gaussian_random(self):
"""
:return:
"""
values = self.dictionary.get('range')
dist = np.arange(min(values), max(values))
return float(min(max(values), max(min(values), random.gauss(np.mean(dist), sigma=3))))
def scaled(self):
values = self.dictionary.get('range')
return float(((max(values) - min(values)) * self.percentage) + min(values))
def minimum(self):
values = self.dictionary.get('range')
return min(values)
def maximum(self):
values = self.dictionary.get('range')
return max(values)
if __name__ == '__main__':
import doctest
doctest.testmod(optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
```
#### File: toga/optimization_state/metrics.py
```python
class Metric(object):
def __init__(self, name, fixed_axis, axis_range, partitions, index):
self.name = name
self.fixed_axis = fixed_axis
self.axis_range = axis_range
self.partitions = partitions
self.index = index
class Metrics(object):
def __init__(self, input_dictionary):
self.input_dictionary = input_dictionary
self.metrics = self.get_metrics()
def get_metrics(self):
metrics = []
for key, value in self.input_dictionary.items():
metrics.append(Metric(name=key,
fixed_axis=value.get('fixed_axis'),
axis_range=value.get('range'),
partitions=value.get('partitions'),
index=value.get('index')))
# sort by index value
metrics = sorted(metrics, key=lambda x: x.index)
return metrics
if __name__ == '__main__':
import doctest
doctest.testmod()
```
#### File: TOGA/toga/toga_settings.py
```python
import logging
import os
import pathlib
import platform
import yaml
from toga.singleton import Singleton
class Settings(metaclass=Singleton):
def __init__(self):
logging.info('Initializing Settings singleton')
ga_settings_path = os.path.join(os.path.dirname(__file__), 'config', 'genetic_algorithm_settings.yml')
with open(os.path.join(ga_settings_path), 'r') as f:
genetic_algorithm_settings = yaml.safe_load(f)
assert(genetic_algorithm_settings is not None)
# settings for how to affect genes and store high performers
self.gene_mutation_scale = genetic_algorithm_settings['mutators']['scale']
self.active_mutators_by_type = genetic_algorithm_settings['mutators']['type']
self.optimization_strategy = genetic_algorithm_settings['optimization_strategy_maximize']
self.individual_per_bin = genetic_algorithm_settings['individuals_per_bin']
with open(os.path.join(os.path.dirname(__file__), 'config', 'gene_performance_metrics.yml'), 'r') as f:
gene_performance_metrics = yaml.safe_load(f)
assert(gene_performance_metrics is not None)
# metrics that are used to optimize parameters
self.optimization_metrics = gene_performance_metrics['fitness']
with open(os.path.join(os.path.dirname(__file__), 'config', 'server_settings.yml'), 'r') as f:
server_settings = yaml.safe_load(f)
assert(server_settings is not None)
# server settings
self.host = server_settings['host']
self.port = server_settings['port']
with open(os.path.join(os.path.dirname(__file__), 'config', 'run_settings.yml'), 'r') as f:
run_settings = yaml.safe_load(f)
assert(run_settings is not None)
# necessary inputs for TOGA to run
self.metrics_out_location = run_settings['metrics_location']
self.gene_template = run_settings['gene_template']
assert(os.path.exists(self.gene_template))
# output directory for TOGA files
self.output_dir = run_settings['work_dir']['base_dir']
# optional setting to add file path for logging best candidates
self.history_log = None
if 'history_log' in run_settings.keys():
self.history_log = os.path.join(self.output_dir, run_settings['history_log'])
# Settings used to make a runnable command for toga gene testing
# source activate /anaconda3/etc/profile.d/conda.sh; conda activate helm36;
# toga_wrapper --tracker_config /path/to/toga_generated/config.yml
self.use_conda_env = run_settings['environment']['conda']['use']
self.environ_name = run_settings['environment']['conda']['environment_name']
self.conda_shell_exec_loc = run_settings['environment']['conda_shell_executable_location']
if platform.system() == 'Darwin':
assert (os.path.exists(self.conda_shell_exec_loc))
self.runnable_cmd = run_settings['command']['cmd']
self.gene_arg = run_settings['command']['gene_mapping']['key']
self.static_args = run_settings['command']['static_args']
# Rules for how to handle running the subprocess from the above command made from the above
self.process_pool_size = run_settings['workers']['max_workers']
self.timeout = run_settings['workers']['max_run_time']
self.overfill_executor_limit = run_settings['workers']['over_fill_executor']
def create_output_directory(self):
if not os.path.exists(self.output_dir):
pathlib.Path(self.output_dir).mkdir(parents=True, exist_ok=True)
sub_directories = ["best", "graph", "random_config", "workdir", 'generation_log']
for sub_dir in sub_directories:
pathlib.Path(os.path.join(self.output_dir, sub_dir)).mkdir(parents=True, exist_ok=True)
if __name__ == '__main__':
a = Settings()
b = Settings()
assert a is b
```
#### File: TOGA/toga/utils.py
```python
import json
import os
from typing import Callable, Optional, Any
import numpy as np
import scipy.stats as stats
import yaml
from sklearn.preprocessing import minmax_scale
def load(file):
return yaml.safe_load(open(file))
def obtain_configs(config_file):
try:
config = yaml.safe_load(open(config_file))
except:
if os.path.isfile(config_file):
raise OSError("Could not open configuration file. Check that file has correct format.")
else:
raise OSError("Configuration file not found.")
return config
def timeit(func: Callable) -> None:
"""
Magic timeit annotation will get the time before the function is called and the time after it finishes
:param func: The function pointer of the function that this is annotating
:return: wrapper
"""
import time
get_time = time.time
outstr = '%s.%s elapsed time: %0.3f seconds'
def wrapper(*args, **kwargs) -> Callable:
start_time = get_time()
res = func(*args, **kwargs)
print(outstr % (func.__module__, func.__name__, get_time() - start_time))
return res
return wrapper
def dict_generator(indict, pre=None) -> Optional[Any]:
"""
Recursive walk through dictionary and returns a tuple of keypath and value
Use with a for loop outside this function to return values from the yield params
:param indict: the dictionary to be traversed in this function
:param pre: the list of traversals that have already occured
:return: yield when value is instance of dictionary or yield the pre list
"""
pre = pre[:] if pre else []
if isinstance(indict, dict):
for key, value in indict.items():
if isinstance(value, dict):
for d in dict_generator(value, pre + [key]):
yield d
else:
yield (pre + [key], value)
else:
yield indict
def first(iterable, default=None):
for item in iterable:
return item
return default
class Decoder(json.JSONDecoder):
"""
Handle decoding json and preserving numerics as numerics instead of strings
"""
def decode(self, s) -> Any:
"""
The decode caller method that returns the private _decode(result)
:param s: the string to decode
:return: the value from _decode(result)
"""
result = super().decode(s)
return self._decode(result)
def _decode(self, o) -> Any:
"""
Tries to match object type to valid python types in this case it supports int dict and lists
:param o: The object to decode
:return: The value of the object with the matching type
"""
if isinstance(o, str):
try:
return int(o)
except ValueError:
return o
elif isinstance(o, dict):
return {k: self._decode(v) for k, v in o.items()}
elif isinstance(o, list):
return [self._decode(v) for v in o]
else:
return o
def goodness_of_fit(arr: np.ndarray,mean=0.5, sigma=1, min=0, max=1, dist_type='uniform', alpha=0.05):
"""
:param num_bins:
the amount of bins to place the distribution under test into
:param arr: input array
type: ndarray expected
:param dist_type: {'uniform', 'normal'}, optional
Defines distribution to compare against ).
Default is 'two-sided'
:param alpha: level of significance to reject null hypothesis (that arr matches dist_type)
Default is 0.05
:return: True if p-value <= level of significance
>>> np.random.seed(0)
>>> s = np.random.uniform(-23, 33, 100000)
>>> goodness_of_fit(s)
True
>>> np.random.seed(0)
>>> s = np.random.uniform(-50.2, 17.9, 1000000)
>>> goodness_of_fit(s)
True
>>> np.random.seed(0)
>>> s = np.random.uniform(-23, 33, 100000)
>>> goodness_of_fit(s, alpha=0.95)
False
>>> np.random.seed(0)
>>> s = np.random.normal(loc=0.0, scale=1.0, size=10000)
>>> goodness_of_fit(s, dist_type='uniform', alpha=0.05)
False
>>> np.random.seed(0)
>>> s = np.random.uniform(-50.2, 17.9, 100000)
>>> goodness_of_fit(s, dist_type='normal')
False
>>> np.random.seed(0)
>>> x = np.random.normal(0, 1, 100000)
>>> goodness_of_fit(x, dist_type='normal', alpha=1)
True
>>> np.random.seed(0)
>>> x = np.random.normal(110, 1222, 100000)
>>> goodness_of_fit(x, dist_type='normal')
True
"""
if dist_type == 'uniform':
# normalize uniform distributions
arr = minmax_scale(arr, feature_range=(0, 1), copy=True)
statistic, pvalue = stats.kstest(arr, 'uniform')
return pvalue > alpha # reject null hypothesis if alpha is greater than pvalue
if dist_type == 'normal':
# using Anderson-Darling test
# normal/exponential critical values: [15%, 10%, 5%, 2.5%, 1%]
# sample result: AndersonResult(statistic=1.383562257554786,
# critical_values=array([0.574, 0.654, 0.785, 0.916, 1.089]),
# significance_level=array([15. , 10. , 5. , 2.5, 1. ]))
A2, critical, sig = stats.anderson(arr, dist='norm')
# [[ True True True True True]
# [ True True True True False]]
results = np.vstack((np.where((A2 < critical), True, False),
np.where((alpha < sig), True, False)))
# [[ True True True True True]
# [False True True True True]]
results = np.flip(results, axis=1)
transposed_results = results.T
for column in transposed_results:
if not column[-1]:
return column[0]
# catch the case where alpha is lower than all significance_levels returned from AndersonResult
# return the 0th column and the associated A2 < critical check at that column
# this is sorted in reverse (lowest to highest) order from the original critical, sig arrays
return transposed_results[0][0]
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
```
#### File: TOGA/toga/worker.py
```python
import os
import platform
import subprocess
from shutil import rmtree
from typing import Tuple, Optional, Any
import pandas as pd
import psutil
import yaml
from toga.toga_settings import Settings
def call(*popenargs, timeout=None, **kwargs):
"""
Custom implementation of subprocess.call
As of 12/1/19 subprocess.call is broken in all versions of python with
Run command with arguments. Wait for command to complete or
timeout, then return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
retcode = call(["ls", "-l"])
"""
with subprocess.Popen(*popenargs, **kwargs) as p:
try:
return p.wait(timeout=timeout)
except: # Including KeyboardInterrupt, wait handled that.
process = psutil.Process(p.pid)
for _ in process.children(recursive=True):
_.kill()
process.kill()
raise
class Worker(object):
"""
Worker object encapsulates the specific gene to be ran with its own attributes needed to run it.
Overall the procedure worker follows is:
1.) Store attributes and create a uuid
2.) Find which config file to map gene to if run_config used to toga allows for multiple configs
3.) Create temporary directory to run system TOGA is optimzing in and have it output temporary files to
4.) Serialize gene to yaml file in a temporary location
5.) Create a run command with modified values
6.) Run system TOGA is optimizing with the command
7.) Retrieve metrics as dataframe, the gene used to generate metrics, mutator used to generate gene
"""
def __init__(self, individual):
"""
:param gene: The generated gene to be mapped to a runnable config.yml
:param mutator: The mutator that caused this gene to occur
:param run_config: The run parameters as a dictionary used by TOGA
"""
self.settings = Settings()
self.timeout = self.settings.timeout
self.experiment_dir = self.settings.output_dir
self.individual = individual
self.gene = individual.genetics.gene
self.mutator = individual.lineage.mutator
self.generation_num = individual.lineage.generation_num
self.uuid = individual.uuid
self.serialization_path = os.path.join(self.experiment_dir, 'random_config',
f'{self.uuid}_{os.path.basename(self.settings.gene_template)}')
self.relative_work_dir = os.path.join(os.path.join(self.experiment_dir, "workdir"), self.uuid)
def make_run_command(self) -> str:
command = ''
if self.settings.use_conda_env:
if platform.system() == 'Darwin':
command += f'source activate {self.settings.conda_shell_exec_loc};' \
f'conda activate {self.settings.environ_name};'
else:
command += f'source activate {self.settings.environ_name};'
command += f'{self.settings.runnable_cmd} ' \
f'{self.settings.gene_arg} {self.serialization_path} {self.settings.static_args}'
return command
def run(self):
"""
:return: self
"""
if not os.path.exists(self.relative_work_dir):
os.mkdir(self.relative_work_dir)
self.serialize_chromosome(
active_chromosome=self.gene,
outpath=self.serialization_path)
cmd = self.make_run_command()
try:
call(cmd,
shell=True,
stderr=subprocess.STDOUT,
cwd=self.relative_work_dir,
timeout=self.timeout
)
except subprocess.TimeoutExpired:
return self
return self
@staticmethod
def serialize_chromosome(active_chromosome={}, outpath="") -> None:
with open(outpath, 'w') as outfile:
yaml.dump(active_chromosome, outfile)
def cleanup(self) -> None:
if os.path.isdir(self.relative_work_dir):
rmtree(self.relative_work_dir, ignore_errors=True)
os.remove(self.serialization_path)
def response(self) -> Tuple[Optional[Any], Optional[Any], Any]:
metrics_path = os.path.join(self.relative_work_dir, self.settings.metrics_out_location)
data_frame = None
if os.path.isfile(metrics_path):
with open(metrics_path) as f:
data_frame = pd.read_csv(f)
else:
print("Metrics File not found: {}".format(metrics_path))
self.individual.metrics.metrics_df = data_frame
return self.individual
``` |
{
"source": "JPLMLIA/UQ4K",
"score": 4
} |
#### File: uq4k/gradient/early_stopper.py
```python
from typing import Tuple
import numpy as np
class EarlyStopper:
"""implements early stopping strategy"""
def __init__(
self,
min_improvement: float,
patience: int,
improvement_direction: str = 'down',
improvement_type: str = 'absolute',
):
"""constructs the early stopper object
Parameters
----------
min_improvement : float
minimum change in metric to qualify as an improvement
patience : int
number of epochs to wait for improvement before stopping
improvement_direction: str
if 'down', then the lower the metric, the better
if 'up', then the higher the metric, the better
improvement_type: str
if 'absolute', then compare the absolute difference between the metric and its current best value
if 'relative', then divide the absolute difference by the best metric value
"""
self.min_improvement = min_improvement
self.patience = patience
self.direction_multiplier = 1 if improvement_direction == "up" else -1
self.improvement_type = improvement_type
self.best_metric = None
self.best_epoch = None
self.waiting = 0
def check(self, metric: float, epoch_number: int) -> Tuple[bool, bool]:
"""checks if we should stop given the given metric
Parameters
----------
metric : float
the metric value
epoch_number: int
the epoch number assotatied with the metric
Returns
-------
Tuple[bool, bool]
- whether an improvement happened
- whether to stop training
"""
metric = np.asarray(metric)
stop = False
improvement = False
if self.best_metric is None:
self.best_metric = metric
self.best_epoch = epoch_number
else:
difference = self.direction_multiplier * (metric - self.best_metric)
if self.improvement_type == 'relative':
difference /= np.abs(self.best_metric)
if difference >= self.min_improvement:
self.best_metric = metric
self.best_epoch = epoch_number
self.waiting = 1
improvement = True
else:
if self.waiting >= self.patience:
stop = True
self.waiting += 1
return improvement, stop
```
#### File: uq4k/gradient/gd_optimizer.py
```python
import random
from functools import partial
from typing import List, Tuple, Union
import cvxpy as cp
import jax
import jax.numpy as jnp
import miniball as mb
import numpy as np
import optax
from jax.config import config
from scipy import stats
from uq4k.gradient.early_stopper import EarlyStopper
from uq4k.models.loss import DifferentaibleMeritFunc
config.update("jax_enable_x64", True)
class GdOpt:
@staticmethod
def __multisample_startegy(
dims: int, num_samples: int = 10, bounds: Tuple[int, int] = (-1, 1), seed: int = 0
) -> jax.numpy.DeviceArray:
"""generates multiple random initializations of the theta parameter
Parameters
----------
dims: int
the dimesnions of the theta parameters
num_samples : int, optional
the number of samples to draw, by default 10
bounds: Tuple[int, int], optional
the lower and upper bound to sample uniformly from
seed: int, optional
the seed to the random number generator, by default 0
Returns
-------
jax.numpy.DeviceArray
the sample of random initializations (num_samples, parameters_count)
"""
rng = jax.random.PRNGKey(seed)
lower_bound, upper_bound = bounds
candidates = jax.random.uniform(
rng, shape=(num_samples, dims), minval=lower_bound, maxval=upper_bound
)
return candidates
def __init__(self, objective: DifferentaibleMeritFunc) -> None:
"""initializes the gradient descent optimizer with a differentiable merit function
Parameters
----------
objective : DifferentaibleMeritFunc
the merit function to optimize
"""
self.objective = objective
self.init_strategies = {"multisample": GdOpt.__multisample_startegy}
def compute_M_alpha(
self,
sigma_2: np.ndarray,
mle_error: float,
df: int,
conf_level: float = 0.95,
man_delta: Union[None, float] = None,
) -> float:
"""calculates the slack defining the ellipsoid from the likelihood ratio
Supports chi-square method and manual setting
Parameters
----------
sigma_2 : np.ndarray
Data variance
mle_error : float
MLE estimator's error
df : int
Degrees of freedom for the Chi-square distribution
conf_level : float, optional
desired confidence level (beta* in the paper), by default 0.95
man_delta : Union[None, float], optional
manual value of delta bypassing the chi-squqre method, by default None
Returns
-------
float
the value of M_alpha
"""
if man_delta:
delta = man_delta
else:
gamma = stats.chi2(df=df).ppf(conf_level)
delta = gamma * sigma_2
return mle_error + delta
def find_mle(
self,
theta_dims: int,
max_epoch: int = 1000,
lr: float = 0.01,
min_improvement: float = 0.01,
patience: int = 1000,
) -> Tuple[jax.numpy.DeviceArray, jax.numpy.DeviceArray]:
"""retrieves the MLE solution by applying gradient descent
Parameters
----------
theta_dims : int
diminsionality of the theta paramter
max_epoch : int, optional
the number of epochs to run GD for, by default 1000
lr : float, optional
the learning rate, by default 0.01
min_improvement: float, optional
the relative minimum difference between loss to consider an improvement,
default to 0.1 (10% lower than the smallest observed loss)
patiance: int, optional
the number of epochs to wait for improvement in the loss before stopping
Returns
-------
Tuple[jax.numpy.DeviceArray, jax.numpy.DeviceArray]
- MLE estimate of theta
- value of MLE error
"""
early_stopper = EarlyStopper(
min_improvement=min_improvement, patience=patience, improvement_type="relative"
)
rng = jax.random.PRNGKey(seed=42)
init_theta = jax.random.uniform(rng, shape=(theta_dims,), minval=-1, maxval=1)
optimizer = optax.adam(learning_rate=lr)
state = optimizer.init(init_theta)
theta = init_theta
error = None
@jax.jit
def update_step(theta, state):
error, grad = jax.value_and_grad(self.objective.sum_sq_norms)(theta)
normed_grad = grad / jnp.linalg.norm(grad)
updates, new_state = optimizer.update(normed_grad, state)
new_theta = optax.apply_updates(theta, updates)
return error, new_theta, new_state
for i in range(max_epoch):
error, theta, state = update_step(theta, state)
_, stop = early_stopper.check(error, i)
if stop:
break
return theta, error
def __get_furthest_point(
self,
initial_theta: jax.numpy.DeviceArray,
M_alpha: float,
center: np.ndarray,
max_epoch: int,
lr: float,
min_improvement: float = 0.01,
patience: int = 1000,
) -> jax.numpy.DeviceArray:
"""performs gradient descent on the whole objective function to get the furthest point from the center
Parameters
----------
initial_theta: jax.numpy.DeviceArray
the initial value of theta to start from
M_alpha : float
the slack defining the ellipsoid from the likelihood ratio
center : np.ndarray
the center to find the furthest point from
max_epoch : int
max number of epoch to run the optimization for
lr : float
the learning rate used
min_improvement: float, optional
the relative minimum difference between loss to consider an improvement,
default to 0.1 (10% lower than the smallest observed loss)
patiance: int, optional
the number of epochs to wait for improvement in the loss before stopping
Returns
-------
jax.numpy.DeviceArray
the furthest point found
"""
early_stopper = EarlyStopper(
min_improvement=min_improvement, patience=patience, improvement_type='relative'
)
optimizer = optax.adam(learning_rate=lr)
state = optimizer.init(initial_theta)
theta = initial_theta
bound_objective = partial(self.objective, center=center, M_alpha=M_alpha)
furthest_point = None
furthest_distance = 0
@jax.jit
def update_step(theta, state):
loss, grad = jax.value_and_grad(bound_objective)(theta)
normed_grad = grad / jnp.linalg.norm(grad)
updates, new_state = optimizer.update(normed_grad, state)
new_theta = optax.apply_updates(theta, updates)
return loss, new_theta, new_state
for i in range(max_epoch):
loss, theta, state = update_step(theta, state)
improvement, stop = early_stopper.check(loss, i)
if stop:
break
if improvement:
furthest_distance = jax.lax.stop_gradient(self.objective.center_dist(theta, center))
furthest_point = jax.lax.stop_gradient(self.objective.qoi_func(theta))
return furthest_point, furthest_distance
def optimize_min_e_ball(
self,
sigma_2: np.ndarray,
data: np.ndarray,
initial_theta: Union[jax.numpy.DeviceArray, str],
theta_dims: int,
raduis_eps: float,
conf_level: float,
max_epoch: int = 100000,
lr: float = 0.001,
man_delta: Union[None, float] = None,
bounds=None,
seed: int = 0,
) -> Tuple[np.ndarray, float, List[np.ndarray], np.ndarray, float]:
"""runs the UQ4K optimization problem to find the minimum enclosing ball
Parameters
----------
sigma_2 : np.ndarray
data varainace
data : np.ndarray
the data array
initial_theta : Union[jax.numpy.DeviceArray, str]
array: the initial starting point for the GD optimization
str: the initialization startegy used internally
- 'multisample': sample multiple random initializations
and continue with the one yielding the furthest distance
theta_dims: int
the dimensionality of the theta parameters
raduis_eps : float
the stopping criterion for the min enclosing ball optimization
conf_level : float
confidence level usded in chi-squared calculation of M_alpha
max_epoch : int, optional
the maximum number of epochs to run GD for, by default 100000
lr: float, optional
the learning rate to be used
man_delta : Union[None, float], optional
manual value of delta bypassing the chi-squared method of M_alpha, by default None
bounds : [type], optional
the bounds for the theta vector (currently not used in GD), by default None
seed: int, optional
random seed for theta initialization
Returns
-------
Tuple[np.ndarray, float, List[np.ndarray], np.ndarray, float]
- mle_theta: MLE estimator of the theta given data
- M_alpha: the level set constraint
- S: collection of points for the minimum enclosing ball
- center: the center of the minimum enclosing ball
- raduis: the raduis of the minimum enclosing ball
"""
dims = theta_dims
qoi_dims = self.objective.qoi_func(np.zeros(shape=(dims,))).size
initial_theta_val = None
init_strategy = None
if isinstance(initial_theta, str):
init_strategy = self.init_strategies.get(initial_theta, None)
if init_strategy is None:
raise KeyError(f"No initialization strategy called {initial_theta}")
else:
initial_theta_val = jnp.reshape(initial_theta, (1, -1))
S = []
center = None
raduis = 0
mle_theta, mle_error = self.find_mle(theta_dims, max_epoch, lr)
M_alpha = self.compute_M_alpha(
sigma_2, mle_error, df=dims, conf_level=conf_level, man_delta=man_delta
)
center = np.asarray(jax.lax.stop_gradient(self.objective.qoi_func(mle_theta)))
S.append(center)
raduis_diff = np.inf
while raduis_diff > raduis_eps:
seed += 2
init_theta_candidates = (
init_strategy(dims=theta_dims, seed=seed) if init_strategy else initial_theta_val
)
furthest_point = None
furthest_distance = 0
for init_theta in init_theta_candidates:
furthest_point_candidate, furthest_distance_candidate = self.__get_furthest_point(
init_theta, M_alpha, center, max_epoch, lr
)
if furthest_distance_candidate >= furthest_distance:
furthest_distance = furthest_distance_candidate
furthest_point = furthest_point_candidate
furthest_point = np.asarray(furthest_point)
S.append(furthest_point)
S_array = np.array(S)
if S_array.ndim == 1:
center, r_squared = mb.get_bounding_ball(S_array[:, np.newaxis])
else:
center, r_squared = mb.get_bounding_ball(S_array)
raduis_diff = np.abs(np.sqrt(r_squared) - raduis)
raduis = np.sqrt(r_squared)
if len(S) > qoi_dims + 1:
distances = [np.linalg.norm(center - Si) for Si in S]
remove_indx = np.argmin(distances)
S.pop(remove_indx)
return mle_theta, M_alpha, np.array(S), center, raduis
def weight_optimization(self, S):
"""
Find dirac weights after min enclosing ball opt
Parameters:
S (np arr) : n x m, n - num diracs | m - dim each dirac
Returns:
optimized weights over diracs (n,) numpy array
"""
# find the optimization objects
ONE_D = len(S.shape) == 1
if ONE_D:
n = S.shape[0]
else:
n, m = S.shape
Q_mat = np.zeros(shape=(n, n))
if ONE_D:
Q_mat = np.outer(S, S)
v = np.square(S)
else:
for t in range(m):
Q_mat += np.outer(S[:, t], S[:, t])
v = np.square(S).sum(axis=1)
# perform the optimization
p_vec = cp.Variable(n)
problem = cp.Problem(
objective=cp.Minimize(cp.quad_form(p_vec, Q_mat) - v.T @ p_vec),
constraints=[p_vec >= np.zeros(n), cp.sum(p_vec) == 1],
)
# solve and check convergence
problem.solve()
assert problem.status == 'optimal'
return p_vec.value
```
#### File: uq4k/models/loss.py
```python
from abc import ABC, abstractmethod
import jax.numpy as jnp
import numpy as np
class AbstractLoss(ABC): # TODO: should these abstract methods be defined here?
def __init__(self):
super().__init__()
@abstractmethod
def sum_sq_norms(self):
"""
Finds the squared 2-norm of the difference between model and data
"""
pass
@abstractmethod
def center_dist(self):
"""
Finds the squared 2-norm between a new proposed parameter value and
the current center
"""
pass
class MeritFunc(AbstractLoss):
def __init__(self, forward_model, mu, data, qoi_func):
"""
Dimension key:
n : number of data points
d : dimension of each data point
m : dimension of the qoi
Parameters:
-----------
forward_model (BaseModel) : see base_model.py
mu (float) : merit function parameter
data (np arr) : array of observed data - n x d
qoi_func (function) : maps theta |-> qoi, R^n -> R^m
"""
self.forward_model = forward_model
self.mu = mu
self.data = data
self.qoi_func = qoi_func
def sum_sq_norms(self, params):
"""
Finds the squared 2-norm of the difference between model and data
Dimension key:
p : dimension of model parameters
Parameters:
-----------
params (np arr) : p
Returns:
--------
2-norm of residuals
"""
diffs = self.data - self.forward_model(params)
return np.square(diffs).sum()
def center_dist(self, new_point, center):
"""
Finds the squared 2-norm between a new proposed parameter value and
the current center
Dimension key:
p : dimension of model parameters
Parameters:
-----------
new_point (np arr) : p
center (np arr) : m
Returns:
--------
squared 2-norm of distance between two points
"""
return np.linalg.norm(self.qoi_func(new_point) - center) ** 2
def __call__(self, new_point, center, M_alpha):
"""
Evaluates the objective function at some new point.
Dimension key:
p : dimension of model parameters
m : dimension of the QoI
Parameters:
-----------
new_point (np arr) : p
center (np arr) : m
M_alpha (float) : bound on the error
Returns:
--------
Objective function
"""
# find the distance from center
center_dist_term = self.center_dist(new_point=new_point, center=center)
# compute the penalty term
error = self.sum_sq_norms(params=new_point)
merit_term = self.mu * np.max(np.array([0, error - M_alpha]))
return -center_dist_term + merit_term
class DifferentaibleMeritFunc(AbstractLoss):
def __init__(self, forward_model, mu, data, qoi_func):
"""
Dimension key:
n : number of data points
d : dimension of each data point
m : dimension of the qoi
Parameters:
-----------
forward_model (BaseModel) : see base_model.py
mu (float) : merit function parameter
data (np arr) : array of observed data - n x d
qoi_func (function) : maps theta |-> qoi, R^n -> R^m
"""
self.forward_model = forward_model
self.mu = mu
self.data = data
self.qoi_func = qoi_func
def sum_sq_norms(self, params):
"""
Finds the squared 2-norm of the difference between model and data
Dimension key:
p : dimension of model parameters
Parameters:
-----------
params (jax DeviceArray) : p
Returns:
--------
2-norm of residuals
"""
diffs_squared = jnp.square(self.data - self.forward_model(params))
return jnp.sum(diffs_squared)
def center_dist(self, new_point, center):
"""
Finds the squared 2-norm between a new proposed parameter value and
the current center
Dimension key:
p : dimension of model parameters
Parameters:
-----------
new_point (jax DeviceArray) : p
center (jax DeviceArray) : m
Returns:
--------
squared 2-norm of distance between two points
"""
diffs_squared = jnp.square(self.qoi_func(new_point) - center)
return jnp.sum(diffs_squared)
def __call__(self, new_point, center, M_alpha):
"""
Evaluates the objective function at some new point.
Dimension key:
p : dimension of model parameters
m : dimension of the QoI
Parameters:
-----------
new_point (jax.numpy.DeviceArray) : p
center (np arr) : m
M_alpha (float) : bound on the error
Returns:
--------
Objective function
"""
center_dist_term = self.center_dist(new_point, center)
error = self.sum_sq_norms(params=new_point)
constraint = self.mu * jnp.max(jnp.array([error - M_alpha, 0]))
return -center_dist_term + constraint
class MeritFunc_NEW(AbstractLoss):
def __init__(self, forward_model, mu, data_y, data_x):
"""
Dimension key:
n : number of data points
dx : dimension of each input
dy : dimension of each output
Parameters:
-----------
forward_model (BaseModel) : see base_model.py
mu (float) : merit function parameter
data_y (np arr) : array of observed output - n x dy
data_x (np arr) : array of observed input - n x dx
"""
self.forward_model = forward_model
self.mu = mu
self.data_y = data_y
self.data_x = data_x
def sum_sq_norms(self):
"""
Finds the squared 2-norm of the difference between model and data
Dimension key:
p : dimension of model parameters
Parameters:
-----------
params (np arr) : p
Returns:
--------
2-norm of residuals
"""
diffs = self.data_y - self.forward_model(self.data_x)
return np.square(diffs).sum()
def center_dist(self, new_point, center):
"""
Finds the squared 2-norm between a new proposed parameter value and
the current center
Dimension key:
p : dimension of model parameters
Parameters:
-----------
new_point (np arr) : p
center (np arr) : p
Returns:
--------
squared 2-norm of distance between two points
"""
return np.linalg.norm(new_point - center) ** 2
def __call__(self, new_point, center, M_alpha):
"""
Evaluates the objective function at some new point.
Dimension key:
p : dimension of model parameters
Parameters:
-----------
new_point (np arr) : p
center (np arr) : p
M_alpha (float) : bound on the error
Returns:
--------
Objective function
"""
# find the distance from center
center_dist_term = self.center_dist(new_point=new_point, center=center)
# compute the penalty term
error = self.sum_sq_norms(params=new_point)
merit_term = self.mu * np.max(np.array([0, error - M_alpha]))
return -center_dist_term + merit_term
``` |
{
"source": "jplonie/content",
"score": 2
} |
#### File: Scripts/DisplayEmailHtml/DisplayEmailHtml.py
```python
import json
import re
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
def create_email_html(email_html='', entry_id_list=[]):
for entry_id in entry_id_list:
email_html = re.sub(f'src="[^>]+"(?=[^>]+alt="{entry_id[0]}")', f'src=entry/download/{entry_id[1]} ', email_html)
return email_html
def get_entry_id_list(attachments, files):
"""Get the email attachments and create entry id list.
Args:
attachments (list): The attachments of the email.
files (list): The uploaded files in the context.
Returns:
list. Attachments entries ids list.
"""
if not (attachments and files):
return []
entry_id_list = []
files = [files] if not isinstance(files, list) else files
for attachment in attachments:
attachment_name = attachment.get('name', '')
for file in files:
if attachment_name == file.get('Name'):
entry_id_list.append((attachment_name, file.get('EntryID')))
demisto.info(f'\n\n idlist \n\n{entry_id_list}')
return entry_id_list
def add_entries(single_reply, incident_id):
"""Add the entries to the related incident
Args:
single_reply: The email reply.
email_related_incident: The related incident.
"""
entries_str = json.dumps(
[{"Type": 1, "ContentsFormat": 'html', "Contents": single_reply, "tags": ['email-thread']}])
res = demisto.executeCommand("addEntries", {"entries": entries_str, 'id': incident_id})
if is_error(res):
demisto.error(f"ERROR: PreprocessEmail - addEntries: {res['Contents']}")
raise DemistoException(f"ERROR: PreprocessEmail - addEntries: {res['Contents']}")
def set_email_reply(email_from, email_to, email_cc, email_subject, html_body, attachments):
"""Set the email reply from the given details.
Args:
email_from: The email author mail.
email_to: The email recipients.
email_cc: The email cc.
html_body: The email HTML body.
Returns:
str. Email reply.
"""
single_reply = f"""
From: {email_from}
To: {email_to}
CC: {email_cc}
Subject: {email_subject}
"""
if attachments:
attachment_names = [attachment.get('name', '') for attachment in attachments]
single_reply += f'Attachments: {attachment_names}\n'
single_reply += f'\n{html_body}\n'
return single_reply
args = demisto.args()
incident = demisto.incidents()[0]
incident_id = incident.get('id')
custom_fields = incident.get('CustomFields', {})
email_body = custom_fields.get('emailbody')
email_from = custom_fields.get('emailfrom')
email_cc = custom_fields.get('emailcc')
email_to = custom_fields.get('emailto')
email_subject = custom_fields.get('emailsubject')
email_html = custom_fields.get('emailhtml')
email_html_image = custom_fields.get('emailhtmlimage')
attachments = incident.get('attachment', {})
files = demisto.context().get('File', [])
if not email_html_image or 'src="cid' in email_html_image:
if 'src="cid' in email_html:
entry_id_list = get_entry_id_list(attachments, files)
html_body = create_email_html(email_html, entry_id_list)
email_reply = set_email_reply(email_from, email_to, email_cc, email_subject, html_body, attachments)
demisto.executeCommand("setIncident", {'customFields': {"emailhtmlimage": email_reply}})
demisto.results({
'ContentsFormat': formats['html'],
'Type': entryTypes['note'],
'Contents': email_reply})
else:
email_reply = set_email_reply(email_from, email_to, email_cc, email_subject, email_html, attachments)
demisto.results({
'ContentsFormat': formats['html'],
'Type': entryTypes['note'],
'Contents': email_reply})
else:
demisto.results({
'ContentsFormat': formats['html'],
'Type': entryTypes['note'],
'Contents': email_html_image})
```
#### File: Tests/tests/configure_and_test_integration_instances_test.py
```python
from Tests.configure_and_test_integration_instances import configure_old_and_new_integrations
def test_configure_old_and_new_integrations(mocker):
"""
Given:
- A list of new integration that should be configured
- A list of old integrations that should be configured
When:
- Running 'configure_old_and_new_integrations' method on those integrations
Then:
- Assert there the configured old integrations has no intersection with the configured new integrations
"""
def configure_integration_instance_mocker(integration,
_,
__):
return integration
mocker.patch('Tests.configure_and_test_integration_instances.configure_integration_instance',
side_effect=configure_integration_instance_mocker)
old_modules_instances, new_modules_instances = configure_old_and_new_integrations(
build=mocker.MagicMock(servers=['server1']),
old_integrations_to_configure=['old_integration1', 'old_integration2'],
new_integrations_to_configure=['new_integration1', 'new_integration2'],
demisto_client=None
)
assert not set(old_modules_instances).intersection(new_modules_instances)
``` |
{
"source": "JPLOpenSource/fprime-sw-Rel1.0",
"score": 3
} |
#### File: server/AdapterLayer/adapter_process.py
```python
import signal
from multiprocessing import Process
class AdapterProcess(Process):
"""
A Process wrapper for ZmqServer protocol adapters.
"""
def __init__(self, Adapter):
Process.__init__(self)
self.__adapter = Adapter
def run(self):
"""
Called by AdapterProcess.start()
Starts the adapter and quits the adapter when a
signal is received.
"""
self.__adapter.Start()
try:
signal.pause() # Sleep until interrupted
except KeyboardInterrupt:
pass
self.__adapter.Quit()
```
#### File: server/Kernel/kernel.py
```python
import os
import sys
import time
import zmq
import logging
import datetime
import thread
import struct
import pickle
import signal
import threading
import traceback
import multiprocessing
from logging import DEBUG, INFO, ERROR
from zmq.eventloop.ioloop import IOLoop, PeriodicCallback
from zmq.eventloop.zmqstream import ZMQStream
from utils import logging_util
from utils import throughput_analyzer
from utils.logging_util import GetLogger
from server.AdapterLayer import adapter_utility
from server.AdapterLayer.adapter_process import AdapterProcess
from server.Kernel import interconnect
from server.Kernel.threads import GeneralSubscriberThread, GeneralPublisherThread
# Global server config class
from server.ServerUtils.server_config import ServerConfig
SERVER_CONFIG = ServerConfig.getInstance()
class ZmqKernel(object):
def __init__(self, command_port, console_lvl=INFO, file_lvl=INFO, tp_on=False,\
timeout=None):
"""
@params command_port: tcp port on which to receive registration and commands
@params console_lvl: global logging level for console output
@params file_lvl: global logging level for file output
@params tp_on: True to log throughput at TestPoints. False to disable
@params timeout: Quit server after timeout. For unittesting purposes
"""
self.__main_context = zmq.Context() # Context for the main event loop sockets
# Store references to each client process
self.__routing_table = dict()
self.__routing_table[SERVER_CONFIG.FLIGHT_TYPE] = dict()
self.__routing_table[SERVER_CONFIG.GROUND_TYPE] = dict()
self.__book_keeping = dict() # Use for storing port numbers
# Setup adapter and adapter book keeping
self.__reference_adapter_dict = adapter_utility.LoadAdapters()
self.__adapter_process_dict = dict()
# Setup global logging settings
logging_util.SetGlobalLoggingLevel(consoleLevel=console_lvl, fileLevel=file_lvl,\
globalLevel=True)
# Setup global throughput_analyzer settings
throughput_analyzer.GlobalToggle(tp_on)
throughput_analyzer.InitializeFolders()
# Create logger
log_path = SERVER_CONFIG.get("filepaths", "server_log_filepath")
self.__logger = GetLogger("zmq_kernel",log_path, logLevel=DEBUG,\
fileLevel=DEBUG)
self.__logger.debug("Logger Active")
self.__logger.debug("PID: {}".format(os.getpid()))
# Create flight and ground subscriber threads
self.__flight_side_context = zmq.Context(io_threads=1) # Context for flight oriented connections
self.__server_flight_sub_port = interconnect.GetRandomPort()
self.__flight_subscribe_thread = GeneralSubscriberThread(self.__flight_side_context,\
SERVER_CONFIG.FLIGHT_TYPE,\
self.__server_flight_sub_port,\
SERVER_CONFIG.FLIGHT_PUB_ADDRESS)
self.__ground_side_context = zmq.Context(io_threads=1) # Context for ground oriented connections
self.__server_ground_sub_port = interconnect.GetRandomPort()
self.__ground_subscribe_thread = GeneralSubscriberThread(self.__ground_side_context,\
SERVER_CONFIG.GROUND_TYPE,\
self.__server_ground_sub_port,
SERVER_CONFIG.GROUND_PUB_ADDRESS)
# Setup routing command socket
self.__routing_command_socket = self.__main_context.socket(zmq.PUB)
self.__routing_command_socket.bind(SERVER_CONFIG.ROUTING_TABLE_CMD_ADDRESS)
self.__logger.debug("Command socket: {}".format(SERVER_CONFIG.ROUTING_TABLE_CMD_ADDRESS))
# Set routing command reply socket
self.__routing_command_reply_socket = self.__main_context.socket(zmq.ROUTER)
self.__routing_command_reply_socket.setsockopt(zmq.RCVTIMEO, 500) # Timeout after 500 ms
self.__routing_command_reply_socket.bind(SERVER_CONFIG.ROUTING_TABLE_CMD_REPLY_ADDRESS)
self.__logger.debug("Command reply socket: {}".format(SERVER_CONFIG.ROUTING_TABLE_CMD_REPLY_ADDRESS))
# Setup command/status socket
self.__command_socket = self.__main_context.socket(zmq.ROUTER)
try:
self.__command_socket.bind("tcp://*:{}".format(command_port))
except zmq.ZMQError as e:
if e.errno == zmq.EADDRINUSE:
self.__logger.error("Unable to bind command socket to port {}"\
.format(command_port))
raise e
# Create Reactor
self.__loop = IOLoop.instance()
# Set timeout for unit testing
if(timeout):
self.__loop.call_later(timeout, self.__loop.stop)
# Wrap sockets in ZMQStreams for IOLoop handlers
self.__command_socket = ZMQStream(self.__command_socket)
# Register handlers
self.__command_socket.on_recv(self.__HandleCommand)
def GetContext(self):
"""
Return zmq context.
"""
return self.__context
def Start(self):
"""
Start main event loop of the zmq kernel
"""
try:
self.__logger.info("Kernel reactor starting.")
self.__flight_subscribe_thread.start()
self.__ground_subscribe_thread.start()
self.__loop.start()
except KeyboardInterrupt:
pass # Fall through to quit
self.Quit()
def Quit(self):
"""
Shut down server
"""
self.__TerminateAdapters()
self.__logger.info("Initiating server shutdown")
# Terminate both contexts to kill all the threads
self.__flight_side_context.term()
self.__ground_side_context.term()
# Must close all sockets before context will terminate
self.__command_socket.close()
self.__routing_command_socket.close()
self.__routing_command_reply_socket.close()
self.__main_context.term()
# Gather all the testpoint data into one file.
throughput_analyzer.AggregateTestPoints()
def __HandleCommand(self, msg):
"""
Receives a new command message and dispatches the message to the
proper command handler.
@params msg: Received Zmq message.
"""
self.__logger.debug("Command Received: {}".format(msg))
return_id = msg[0]
cmd = msg[1]
# Client Register
if cmd == SERVER_CONFIG.REG_CMD:
status, server_pub_port, server_sub_port = self.__HandleRegistration(msg)
self.__RegistrationResponse(return_id, status, server_pub_port, server_sub_port)
# Client subscribe
elif cmd == SERVER_CONFIG.SUB_CMD:
option = SERVER_CONFIG.SUB_OPTION
status = self.__HandleRoutingCoreConfiguration(msg, option)
# Client unsubscribe
elif cmd == SERVER_CONFIG.USUB_CMD:
option = SERVER_CONFIG.USUB_OPTION
status = self.__HandleRoutingCoreConfiguration(msg, option)
# List subscriptions
elif cmd == SERVER_CONFIG.LIST_CMD:
client_sub_dict = self.__HandleListSubscription()
self.__ListSubscriptionResponse(return_id, client_sub_dict)
def __HandleListSubscription(self):
"""
Gets a dictionary detailing the subscription configuration of
all flight and ground clients
"""
return self.__routing_table
def __ListSubscriptionResponse(self, return_id, client_pub_dict):
"""
Send a serialized subscription dictionary to the client with ID return_id.
@params return_id: Identification of the receiving client
@params client_pub_dict: Publish oriented Pub/Sub dictionary
"""
self.__logger.debug("Sending ListSubscription Response")
self.__command_socket.send_multipart([return_id, pickle.dumps(client_pub_dict)])
def __HandleRoutingCoreConfiguration(self, msg, option):
"""
Handle subscribe or unsubscribe operation.
@params msg: Received zmq message from the command socket
@params option: How to configure the routing table.
Either SERVER_CONFIG.SUB_OPTION or SERVER_CONFIG.USUB_OPTION.
"""
client_name = msg[2]
client_type = msg[3]
subscriptions = msg[4:] # Subscriptions are listed
self.__logger.info("{} {} to {}".format(option, client_name, subscriptions))
# Configure subscriptions
if(client_type == SERVER_CONFIG.FLIGHT_TYPE):
pub_client_type = SERVER_CONFIG.GROUND_TYPE
elif(client_type == SERVER_CONFIG.GROUND_TYPE):
pub_client_type = SERVER_CONFIG.FLIGHT_TYPE
else:
self.__logger.error("Client type: {} not recognized.".format(client_type))
return -1
if(subscriptions == ['']): # Empty message in zmq means subscribe to all
subscriptions = [pub_client for pub_client in self.__routing_table[pub_client_type]]
for pub_client in subscriptions:
if(option == SERVER_CONFIG.USUB_OPTION):
self.__routing_table[pub_client_type][pub_client].remove(client_name)
elif(option == SERVER_CONFIG.SUB_OPTION):
self.__routing_table[pub_client_type][intern(pub_client)].add(client_name)
# Tell receiving_client to subcribe or unsubscribe
self.__routing_command_socket.send_multipart([client_name.encode(), option.encode(), pub_client.encode()])
# Wait for response
try:
self.__routing_command_reply_socket.recv()
except zmq.ZMQError as e:
if e.errno == zmq.EAGAIN:
self.__logger.warning("No response from {}".format(client_name))
else:
raise
return 0
def __HandleRegistration(self, msg):
"""
Receives a client registration message.
Returns a tuple containing the registration status, pub, and sub ports
@params msg: Received zmq message from the command socket.
"""
client_name = msg[0]
client_type = msg[2]
proto = msg[3]
self.__logger.info("Registering {client_name} as {client_type} client "
"using {proto} protocol."\
.format(client_name=client_name, client_type=client_type.lower(),\
proto=proto))
try:
# If the client already registered we don't need to create another thread.
# Just return the port numbers.
if client_name in self.__routing_table[client_type]:
self.__logger.info("{} Already registered. Returning port numbers.".format(client_name))
server_sub_port = self.__book_keeping[client_name]['sub_port']
server_pub_port = self.__book_keeping[client_name]['pub_port']
return (1, server_pub_port, server_sub_port)
else: # The client is not registered. Create a new dictionary entry
self.__routing_table[client_type][client_name] = set() # A set of subscribed clients
self.__book_keeping[client_name] = dict()
# Based on the client_type create a PublisherThread
if(client_type == SERVER_CONFIG.FLIGHT_TYPE):
server_sub_port = self.__server_flight_sub_port
server_pub_port = interconnect.GetRandomPort()
pub_thread = GeneralPublisherThread(self.__ground_side_context, client_name, SERVER_CONFIG.GROUND_PUB_ADDRESS,\
server_pub_port)
pub_thread.start()
status = 1
elif(client_type == SERVER_CONFIG.GROUND_TYPE):
server_sub_port = self.__server_ground_sub_port
server_pub_port = interconnect.GetRandomPort()
pub_thread = GeneralPublisherThread(self.__flight_side_context, client_name, SERVER_CONFIG.FLIGHT_PUB_ADDRESS,\
server_pub_port)
pub_thread.start()
status = 1 # Successful registration
else:
raise TypeError
# Check if a protocol adapter should be created
if(proto.lower() in self.__reference_adapter_dict):
# Create an adapter betweent the client and server.
# The adapter connects to the ports intended for the client
# And returns new ports for the client to connect to.
server_pub_port, server_sub_port = self.__CreateAdapter(client_type,\
client_name,\
server_pub_port,\
server_sub_port,\
proto.lower())
elif proto.lower() == "zmq":
pass
else:
raise TypeError
except TypeError:
traceback.print_exc()
self.__logger.error("Registration Error. Either:")
self.__logger.error("Client type: {} not recognized.".format(client_type))
self.__logger.error("Protocol type: {} not recognized".format(proto))
status = 0
server_pub_port = 0
server_sub_port = 0
# Store the port numbers for future reference
self.__book_keeping[client_name]['pub_port'] = server_pub_port
self.__book_keeping[client_name]['sub_port'] = server_sub_port
return (status, server_pub_port, server_sub_port)
def __CreateAdapter(self, client_type, client_name, server_pub_port, server_sub_port, proto):
"""
Based on proto, create a new Adapter and AdapterProcess.
Save a reference to the AdapterProcess and start.
@params client_type: Type of the client
@params client_name: Name of the client
@params server_pub_port: What port the server is publishing from.
@params server_sub_port: What port the server is listening on.
@params proto: Name of the protocol to use.
"""
# Get ports for the adapter to connect to
from_server_pub_port = server_pub_port # Server port publishing to adapter
to_server_sub_port = server_sub_port # Server port subscribed to adapter
from_client_sub_port = interconnect.GetRandomPort() # Adapter port subscribed to client
to_client_pub_port = interconnect.GetRandomPort() # Adapter port publishing to client
# Get uninstantiated adapter object
Adapter = self.__reference_adapter_dict[proto]
# Then create an instance
adapter = Adapter(proto, client_name, to_server_sub_port, from_server_pub_port,\
to_client_pub_port, from_client_sub_port)
# Create a process
process = AdapterProcess(adapter)
# Save a reference to the process
self.__adapter_process_dict[client_name] = process
# Now start
process.start()
# Return new port numbers for the client to connect to
return to_client_pub_port, from_client_sub_port
def __TerminateAdapters(self):
"""
Iterate through the adapter dictionary and terminate all the AdapterProcesses.
terminate() sends a signal for the AdapterProcesses to act on.
"""
for client_name in self.__adapter_process_dict:
self.__adapter_process_dict[client_name].terminate()
def __RegistrationResponse(self, return_name, status, server_pub_port,\
server_sub_port):
"""
Send response to the registering client.
Clients expect publish port numbers before subscribe port.
@params return_name: Name of the client the response is for.
@params status: Status of the registration.
@params server_pub_port: What port the server is publishing from.
@params server_sub_port: What port the server is listening on.
"""
# Pack the data as little endian integers to make it convinient
# for the embedded systems to recieve
msg = [
bytes(return_name),\
struct.pack("<I", status),\
struct.pack("<I", server_pub_port),\
struct.pack("<I", server_sub_port)
]
self.__logger.debug("Registration Status: {}".format(bytes(status)))
self.__logger.debug("{} registered.".format(return_name))
self.__logger.debug("{} Server Pub Port {}".format(return_name, server_pub_port))
self.__logger.debug("{} Server Sub Port {}".format(return_name, server_sub_port))
self.__command_socket.send_multipart(msg)
```
#### File: server/Kernel/threads.py
```python
import os
import zmq
import time
import signal
import threading
import logging
from logging import DEBUG, INFO
from utils.logging_util import GetLogger
from utils import throughput_analyzer
# Global server config class
from server.ServerUtils.server_config import ServerConfig
SERVER_CONFIG = ServerConfig.getInstance()
class GeneralSubscriberThread(threading.Thread):
"""
A subscriber_thread receives packets from flight or ground clients
on it's ROUTER socket. The packets are published from a PUB socket
to publisher_threads.
"""
def __init__(self, context, client_type, server_sub_port, pub_address):
"""
A thread that receives packets from clients.
@params context: ZMQ context.
@params client_type: Type of client
@params server_sub_port: What port the server is listening on.
@params pub_address: What address to publish packets to.
"""
# Setup Logger
name = "{}_SubscribeThread".format(client_type)
self.__name = name
log_path = SERVER_CONFIG.get("filepaths", "server_log_internal_filepath")
self.__logger = GetLogger(name, log_path, logLevel=DEBUG, fileLevel=DEBUG)
self.__logger.debug("Logger Active")
self.__sub_socket = context.socket(zmq.ROUTER)
self.__sub_socket.setsockopt(zmq.LINGER, 0) # Immediatly close socket
self.__sub_socket.setsockopt(zmq.RCVHWM, int(SERVER_CONFIG.get('settings', 'server_socket_hwm'))) # Set zmq msg buffer size.
# This is how many msgs to
# buffer before msgs are dropped.
self.__sub_socket.setsockopt(zmq.ROUTER_HANDOVER, 1) # Needed for client reconnect
self.__sub_socket.bind("tcp://*:{}".format(server_sub_port))
self.__pub_socket = context.socket(zmq.PUB)
self.__pub_socket.setsockopt(zmq.LINGER, 0) # Immediatly close socket
self.__pub_socket.bind(pub_address)
self.__logger.debug("Pub socket connected to {}".format(pub_address))
threading.Thread.__init__(self, target=self.__SubscribeRunnable)
def __SubscribeRunnable(self):
"""
The main loop of the thread.
"""
# Setup logger
self.__logger.debug("Starting Runnable")
test_point = throughput_analyzer.GetTestPoint(self.__name + "_test_point")
test_point.StartAverage() # Start timing the total lifetime
# Wait until a message is received before receiving
poller = zmq.Poller()
poller.register(self.__sub_socket, zmq.POLLIN)
try:
while(True):
socks = dict(poller.poll()) # Block until msg is received
if(self.__sub_socket in socks):
test_point.StartInstance() # Start timing message recv and send latency
msg = self.__sub_socket.recv_multipart(copy=False) # Do not copy the message
self.__pub_socket.send_multipart(msg, copy=False) # This tells zmq to that the application
# does not need to know the contents of the msg frames.
# (Because we are just passing it to the PUB socket)
test_point.SaveInstance() # Stop timer
test_point.Increment(1) # Increment the total number of msgs sent
except zmq.ZMQError as e:
if(e.errno == zmq.ETERM):
self.__logger.debug("ETERM received")
pass
else:
raise
# Exit
test_point.SetAverageThroughput() # Stop timing the total lifetime
test_point.PrintReports() # Print reports to file
self.__sub_socket.close()
self.__pub_socket.close()
class GeneralPublisherThread(threading.Thread):
"""
A publisher_thread receives packets from a subscriber thread through it's SUB socket.
The packets are sent through a DEALER socket to a corrosponding client. The client
only receives packets whose sender_name is prefixed to the packet message and that client
is subscribed to the sender.
"""
def __init__(self, context, client_name, pub_address, server_pub_port):
"""
GeneralPublisherThread constructor.
@params context: Zmq context
@params client_name: Name of the client who received messages from this publisher
@params pub_address: Address of the internal packet publisher_thread.
@params server_pub_port: Port number for a client to connect to.f
"""
# Setup Logger
name = "{}_PublishThread".format(client_name)
self.__name = name
log_path = SERVER_CONFIG.get("filepaths", "server_log_internal_filepath")
self.__logger = GetLogger(name, log_path, logLevel=DEBUG, fileLevel=DEBUG)
self.__logger.debug("Logger Active")
self.__client_name = client_name
self.__sub_socket = context.socket(zmq.SUB)
self.__sub_socket.setsockopt(zmq.LINGER, 0) # Immidiatly close socket
self.__sub_socket.setsockopt(zmq.RCVHWM, int(SERVER_CONFIG.get('settings', 'server_socket_hwm'))) # Set zmq msg buffer size
# This is how many msgs to
# Buffer before msgs are dropped
self.__sub_socket.connect(pub_address)
self.__logger.debug("Sub Socket conneced to {}".format(pub_address))
self.__pub_socket = context.socket(zmq.DEALER)
self.__pub_socket.setsockopt(zmq.LINGER, 0) # Immediatly close socket
self.__pub_socket.bind("tcp://*:{}".format(server_pub_port))
self.__cmd_socket = context.socket(zmq.SUB) # Socket that listens for subscription commands from the kernel
self.__cmd_socket.setsockopt(zmq.SUBSCRIBE, '') # Receive all commands from the kernel
self.__cmd_socket.connect(SERVER_CONFIG.ROUTING_TABLE_CMD_ADDRESS)
self.__cmd_reply_socket = context.socket(zmq.DEALER) # Socket that replies to the kernel
self.__cmd_reply_socket.connect(SERVER_CONFIG.ROUTING_TABLE_CMD_REPLY_ADDRESS)
threading.Thread.__init__(self, target=self.__PublishRunnable)
def __PublishRunnable(self):
"""
Main loop of the thread.
"""
self.__logger.debug("Starting Runnable")
# Select between the two sockets
poller = zmq.Poller()
poller.register(self.__sub_socket, zmq.POLLIN)
poller.register(self.__cmd_socket, zmq.POLLIN)
# Create a test_point to record latency and throughput
test_point = throughput_analyzer.GetTestPoint(self.__name + "_test_point")
test_point.StartAverage() # Start timing the total lifetime
try:
while(True):
socks = dict(poller.poll()) # Block until a msg is received
if(self.__sub_socket in socks): # Packet received
test_point.StartInstance() # Start timing message recv and send latency
msg = self.__sub_socket.recv_multipart()
self.__pub_socket.send(msg[1], copy=False) # First part of message is the sender_name
# We only need to send the fprime packet
test_point.SaveInstance() # Stop the timer for recv and send latency
test_point.Increment(1) # Increase number of messages processsed
if(self.__cmd_socket in socks): # A command from the kernel is received
self.__logger.debug("Received")
cmd_list = self.__cmd_socket.recv_multipart()
recipient = cmd_list[0]
option = cmd_list[1]
pub_client = cmd_list[2] # The publishing client whom to
# subscribe or unsubscribe to.
if(cmd_list[0] == self.__client_name): # Check if the message is addressed to us
self.__logger.debug("Command received: {}".format(cmd_list))
if(option == SERVER_CONFIG.SUB_OPTION): # Command is to subscribe
self.__logger.debug("Setting sub")
self.__sub_socket.setsockopt(zmq.SUBSCRIBE, pub_client) # Set the socket option to subscribe to a publisher
elif(option == SERVER_CONFIG.USUB_OPTION):
self.__logger.debug("Setting usub")
self.__sub_socket.setsockopt(zmq.UNSUBSCRIBE, pub_client) # Set the socket option to unsubscribe from a publisher
# Ack routing table
self.__cmd_reply_socket.send(b"{}_pubsub Received".format(self.__client_name))
except zmq.ZMQError as e:
if e.errno == zmq.ETERM:
self.__logger.debug("ETERM received")
pass
else:
raise
# Exit
test_point.SetAverageThroughput() # Stop timing the total lifetime
test_point.PrintReports() # Print reports to file
# Close sockets
self.__sub_socket.close()
self.__pub_socket.close()
self.__cmd_socket.close()
self.__cmd_reply_socket.close()
``` |
{
"source": "jploudre/raspberry-e-ink",
"score": 3
} |
#### File: jploudre/raspberry-e-ink/bobby.py
```python
import epd1in54
import time
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
def main():
epd = epd1in54.EPD()
epd.init(epd.lut_full_update)
image = Image.open('bobby.bmp')
draw = ImageDraw.Draw(image)
font = ImageFont.truetype(
'/usr/share/fonts/truetype/freefont/dejavu/DejaVuSans-Bold.ttf',
26
)
draw.text((0, 0), 'Bobby!', font=font, fill=0)
epd.clear_frame_memory(0xFF)
epd.set_frame_memory(image, 0, 0)
epd.display_frame()
epd.set_frame_memory(image, 0, 0)
epd.display_frame()
if __name__ == '__main__':
main()
``` |
{
"source": "JPLRalho/PyGameNatureOfCode",
"score": 3
} |
#### File: PyGameNatureOfCode/ErrorHandling/VectorDivisionByZeroError.py
```python
class VectorDivisionByZeroError(Exception):
def __init__(self, message="[Vector.division] Can't divide the vector by zero"):
self.message = message
super().__init__(self.message)
```
#### File: JPLRalho/PyGameNatureOfCode/MainLoop.py
```python
import sys
import pygame
from pygame import QUIT
from Config.InitConfig import InitConfig
# Uncomment the line bellow this to try the Introduction -> Walker behaviors
# from Introduction.Walker import Walker
# Uncomment the line bellow this to try the Introduction -> Perlin Noise Image Generation
# from Introduction.NoiseImage import NoiseImage
# Uncomment the line bellow this to try the Vectors -> Bouncing Ball
from Vectors.BouncingBall import BouncingBall
class MainLoop:
def __init__(self):
self.initConfig = InitConfig()
# Uncomment the line bellow this to try the Introduction -> Walker behaviors
# self.w = Walker(395, 395, 25, 25, (255, 0, 0))
# Uncomment the line bellow this to try the Introduction -> Perlin Noise Image Generation
# self.noiseImage = NoiseImage(self.initConfig.screen)
# Uncomment the line bellow this to try the Vectors -> Bouncing Ball
self.b = BouncingBall([200, 100], 25, (255, 0, 0), [2, 5], self.initConfig.screen.get_size())
def update(self):
# Uncomment the line bellow this to try the Introduction -> Walker behaviors
# self.w.update()
# Uncomment the line bellow this to try the Vectors -> Bouncing Ball
self.b.update()
def events(self):
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
def draw(self):
# Uncomment the line bellow this to try the Introduction -> Walker behaviors
# self.w.draw(self.initConfig.screen)
# Uncomment the line bellow this to try the Vectors -> Bouncing Ball
self.initConfig.screen.fill((255, 255, 255))
self.b.draw(self.initConfig.screen)
pygame.display.update()
self.initConfig.clock.tick(60)
```
#### File: PyGameNatureOfCode/Vectors/BouncingBall.py
```python
import pygame
from Vectors.Utils.Vector import Vector
class BouncingBall:
location = None
radius = None
recColor = None
velocity = None
acceleration = None
surf = None
limits = None
# location and velocity is a Vector from Utils file
def __init__(self, location, radius, color, velocity, limits):
self.location = Vector(location[0], location[1])
self.radius = radius
self.recColor = color
self.velocity = Vector(velocity[0], velocity[1])
self.acceleration = Vector(0, 2)
self.limits = limits
def update(self):
self.accelerate()
self.move()
self.checkLimits()
def circleSurf(self):
surf = pygame.Surface((self.radius * 2, self.radius * 2), pygame.SRCALPHA)
pygame.draw.circle(surf, self.recColor, (self.radius, self.radius), self.radius)
return surf
def draw(self, screen):
self.surf = self.circleSurf()
screen.blit(self.surf, (self.location.getX(), self.location.getY()))
def checkLimits(self):
if self.location.getX() <= 0:
self.location.setX(0)
self.velocity.setX(self.velocity.getX() * -1)
elif self.location.getX() >= self.limits[0] - self.radius * 2:
self.location.setX(self.limits[0] - self.radius * 2)
self.velocity.setX(self.velocity.getX() * -1)
if self.location.getY() <= 0:
self.location.setY(0)
self.velocity.setY(self.velocity.getY() * -1)
elif self.location.getY() >= self.limits[1] - self.radius * 2:
self.location.setY(self.limits[1] - self.radius * 2)
self.velocity.setY(self.velocity.getY() * -1)
def move(self):
self.location.add(self.velocity)
def accelerate(self):
self.velocity.add(self.acceleration)
``` |
{
"source": "jplsek/git-pw",
"score": 3
} |
#### File: git-pw/tests/test_utils.py
```python
import subprocess
import os
import mock
from git_pw import utils
@mock.patch.object(utils.subprocess, 'check_output', return_value=b' bar ')
def test_git_config(mock_subprocess):
value = utils.git_config('foo')
assert value == 'bar'
mock_subprocess.assert_called_once_with(['git', 'config', 'foo'])
@mock.patch.object(utils.subprocess, 'check_output',
return_value=b'\xf0\x9f\xa4\xb7')
def test_git_config_unicode(mock_subprocess):
value = utils.git_config('foo')
assert value == u'\U0001f937'
mock_subprocess.assert_called_once_with(['git', 'config', 'foo'])
@mock.patch.object(utils.subprocess, 'check_output',
side_effect=subprocess.CalledProcessError(1, 'xyz', '123'))
def test_git_config_error(mock_subprocess):
value = utils.git_config('foo')
assert value == ''
@mock.patch.object(utils, 'git_config', return_value='bar')
@mock.patch.object(utils, '_tabulate')
@mock.patch.object(utils, '_echo_via_pager')
@mock.patch.dict(os.environ, {'GIT_PAGER': 'foo', 'PAGER': 'baz'})
def test_echo_via_pager_env_GIT_PAGER(mock_inner, mock_tabulate, mock_config):
utils.echo_via_pager('test', ('foo',), None)
mock_config.assert_not_called()
mock_tabulate.assert_called_once_with('test', ('foo',), None)
mock_inner.assert_called_once_with('foo', mock_tabulate.return_value)
@mock.patch.object(utils, 'git_config', return_value='bar')
@mock.patch.object(utils, '_tabulate')
@mock.patch.object(utils, '_echo_via_pager')
@mock.patch.dict(os.environ, {'PAGER': 'baz'})
def test_echo_via_pager_config(mock_inner, mock_tabulate, mock_config):
utils.echo_via_pager('test', ('foo',), None)
mock_config.assert_called_once_with('core.parser')
mock_tabulate.assert_called_once_with('test', ('foo',), None)
mock_inner.assert_called_once_with('bar', mock_tabulate.return_value)
@mock.patch.object(utils, 'git_config', return_value=None)
@mock.patch.object(utils, '_tabulate')
@mock.patch.object(utils, '_echo_via_pager')
@mock.patch.dict(os.environ, {'PAGER': 'baz'})
def test_echo_via_pager_env_PAGER(mock_inner, mock_tabulate, mock_config):
utils.echo_via_pager('test', ('foo',), None)
mock_config.assert_called_once_with('core.parser')
mock_tabulate.assert_called_once_with('test', ('foo',), None)
mock_inner.assert_called_once_with('baz', mock_tabulate.return_value)
@mock.patch.object(utils, 'git_config', return_value=None)
@mock.patch.object(utils, '_tabulate')
@mock.patch.object(utils, '_echo_via_pager')
def test_echo_via_pager_env_default(mock_inner, mock_tabulate, mock_config):
utils.echo_via_pager('test', ('foo',), None)
mock_config.assert_called_once_with('core.parser')
mock_tabulate.assert_called_once_with('test', ('foo',), None)
mock_inner.assert_called_once_with('less', mock_tabulate.return_value)
``` |
{
"source": "jpludens/quartrmastr",
"score": 3
} |
#### File: db/tables/equip_level_stats.py
```python
import sqlite3
from db import get_connection, get_from_datamaster, get_equip_keys
from db.tables import equips
requirements = [
equips
]
def build():
# Requires Equips
# TODO Link to stats (??? wait, but how?)
with get_connection() as con:
con.row_factory = sqlite3.Row
cur = con.cursor()
foreign_keys = get_equip_keys(cur)
cur.execute("PRAGMA foreign_keys = ON")
cur.execute("DROP TABLE IF EXISTS EquipLevels")
cur.execute("CREATE TABLE EquipLevels("
"Id INTEGER PRIMARY KEY AUTOINCREMENT, "
"Equip INTEGER, "
"Level INTEGER, "
"HP INTEGER, "
"MP INTEGER, "
"PAttack INTEGER, "
"MAttack INTEGER, "
"PDefence INTEGER, "
"MDefence INTEGER, "
"Accuracy INTEGER, "
"Evade INTEGER, "
"FOREIGN KEY(Equip) REFERENCES Equips(Id))")
for csv_row in get_from_datamaster('EquipLevelStats.csv'):
cur.execute("INSERT INTO EquipLevels ("
"Equip, Level, HP, MP, PAttack, MAttack, "
"PDefence, MDefence, Accuracy, Evade) "
"VALUES (\"{}\", \"{}\", \"{}\", \"{}\", \"{}\", \"{}\", "
"\"{}\", \"{}\", \"{}\", \"{}\")".format(
foreign_keys[csv_row.get('EquipName')],
csv_row.get('Level'),
csv_row.get('HP'),
csv_row.get('MP'),
csv_row.get('PAttack'),
csv_row.get('MAttack'),
csv_row.get('PDefence'),
csv_row.get('MDefence'),
csv_row.get('Accuracy'),
csv_row.get('Evade')))
def read():
con = get_connection()
con.row_factory = sqlite3.Row
with con:
cur = con.cursor()
cur.execute("SELECT "
"Equips.Id AS equip, "
"Level AS level, "
"HP AS HEALTH_POINTS, "
"MP AS MAGIC_POINTS, "
"PAttack AS ATTACK, "
"MAttack AS MAGIC_ATTACK, "
"PDefence AS DEFENCE, "
"MDefence AS MAGIC_DEFENCE, "
"Accuracy AS ACCURACY, "
"Evade AS EVADE "
"FROM Equips "
"JOIN EquipLevels "
"ON EquipLevels.Equip = Equips.Id ")
return [dict(row) for row in cur.fetchall()]
def load():
raw_data = read()
result = {}
for row in raw_data:
equip = row["equip"]
level = row["level"]
stats = {k: v for k, v in row.items() if k not in ["equip", "level"]}
try:
result[equip][level] = stats
except KeyError:
result[equip] = {level: stats}
return result
```
#### File: db/tables/equips.py
```python
import sqlite3
from db import get_connection, get_from_datamaster
from db.tables import equip_slots
requirements = [
equip_slots
]
def build():
with get_connection() as con:
cur = con.cursor()
cur.execute("SELECT EquipSlotName, Id FROM EquipSlots")
foreign_keys = {cur_row[0]: cur_row[1]
for cur_row in cur.fetchall()}
cur.execute("PRAGMA foreign_keys = ON")
cur.execute("DROP TABLE IF EXISTS Equips")
cur.execute("CREATE TABLE Equips("
"Id INTEGER PRIMARY KEY AUTOINCREMENT, "
"EquipName TEXT, "
"EquipSlot TEXT, "
"FOREIGN KEY(EquipSlot) REFERENCES EquipSlots(Id))")
for csv_row in get_from_datamaster('Equips.csv'):
cur.execute("INSERT INTO Equips ("
"EquipName, EquipSlot) "
"VALUES (\"{}\", \"{}\")".format(
csv_row.get('EquipName'),
foreign_keys[csv_row.get('EquipSlotName')]))
def read():
con = get_connection()
con.row_factory = sqlite3.Row
with con:
cur = con.cursor()
cur.execute("SELECT "
"Equips.Id AS id, "
"EquipName AS name, "
"EquipSlotName AS slot, "
"EquipSlotTypeName AS type "
"FROM Equips "
"JOIN EquipSlots "
"ON Equips.EquipSlot = EquipSlots.Id "
"JOIN EquipSlotTypes "
"ON EquipSlots.EquipSlotType = EquipSlotTypes.Id")
return [dict(row) for row in cur.fetchall()]
```
#### File: db/tables/trait_stats.py
```python
from db import get_connection, get_from_datamaster
from db.tables import traits
requirements = [traits]
def build():
datamaster = get_from_datamaster("EquipTraits.csv")
trait_rows_with_stats_by_text = {
row["Text"]: row
for row in datamaster
if row["TraitPropertyName"] == "Stat"}
with get_connection() as con:
cur = con.cursor()
cur.execute("SELECT StatName, Id FROM Stats")
stat_ids_by_name = {cur_row[0]: cur_row[1]
for cur_row in cur.fetchall()}
cur.execute("DROP TABLE IF EXISTS TraitStats")
cur.execute("CREATE TABLE TraitStats("
"Id INTEGER PRIMARY KEY AUTOINCREMENT, "
"Trait INTEGER, "
"Stat INTEGER, "
"FOREIGN KEY(Trait) REFERENCES Traits(Id) ,"
"FOREIGN KEY(Stat) REFERENCES Stats(Id))")
for trait in traits.read():
text = trait["text"]
trait_row_from_datamaster = trait_rows_with_stats_by_text.get(text)
if trait_row_from_datamaster:
trait_id = trait["id"]
stat_id = stat_ids_by_name[
trait_row_from_datamaster[
"TraitPropertyValue"]]
cur.execute("INSERT INTO TraitStats ("
"Trait, Stat) "
"VALUES (\"{}\", \"{}\")".format(
trait_id, stat_id))
```
#### File: jpludens/quartrmastr/quartrmastr.py
```python
import flask
import json
from db.loaders import characters, equips, materials
app = flask.Flask(__name__)
api_root = "localhost:5000/api/v1/"
# Api routes
@app.route('/api/v1/characters/', methods=['GET'])
def get_characters():
return json.dumps(characters.load())
@app.route('/api/v1/equips/', methods=['GET'])
def get_equips():
return json.dumps(equips.load())
@app.route('/api/v1/materials/', methods=['GET'])
def get_materials():
return json.dumps(materials.load())
# Image server
@app.route('/images', methods=['GET'])
def get_image():
image = flask.request.args.get('image')
if not image:
return flask.make_response(
'Request to /images must include image argument', 404)
if not image[-4:] == '.png':
return flask.make_response(
'/images only provides files of type(s): .png', 404)
return flask.send_from_directory('static/img/', image)
# App routes
@app.route('/', methods=['GET'])
def index():
# return flask.render_template('index.html')
return flask.render_template('equips.html')
# @app.route('/characters/', methods=['GET'])
# def characters_view():
# return flask.render_template('characters.html')
# @app.route('/equips/', methods=['GET'])
# def equips_view():
# return flask.render_template('equips.html')
# @app.route('/materials/', methods=['GET'])
# def materials_view():
# return flask.render_template('materials.html')
if __name__ == '__main__':
app.run(debug=True, threaded=True)
```
#### File: tests/regression/test_regression_quartrmastr.py
```python
import quartrmastr
import json
import unittest
class CharactersTopographyTests(unittest.TestCase):
def setUp(self):
self.data = json.loads(quartrmastr.get_characters())
# TODO
class EquipsTopographyTests(unittest.TestCase):
def setUp(self):
self.data = json.loads(quartrmastr.get_equips())
def test_get_equips__equips__contains_expected_keys(self):
key_sets = [set(item.keys()) for item in self.data]
# Make sure we get something
self.assertTrue(key_sets != [])
expected_keys = {
'id',
'equipName',
'equipSlot',
'levels',
'traits',
'elementalResistances',
'statusResistances'}
expected_diffs = [
set(),
{'elementalResistances'},
{'statusResistances'},
{'elementalResistances',
'statusResistances'}]
# None of the items have keys we are not expecting
self.assertTrue(all(key_set.difference(expected_keys) == set() for key_set in key_sets))
self.assertTrue(all(expected_keys.difference(key_set) in expected_diffs for key_set in key_sets))
def test_get_equips__elemental_resistances__contains_expected_keys(self):
resistance_sets = [e['elementalResistances'] for e in self.data
if 'elementalResistances' in e]
actuals = [set(resistance.keys())
for resistance_set in resistance_sets
for resistance in resistance_set]
self.assertTrue(actuals != [])
expected = {
"elementName",
"scheme"}
self.assertTrue(all(actual == expected for actual in actuals))
def test_get_equips__ailment_resistances__contains_expected_keys(self):
resistance_sets = [e['statusResistances'] for e in self.data
if 'statusResistances' in e]
actuals = [set(resistance.keys())
for resistance_set in resistance_sets
for resistance in resistance_set]
expected = {
"statusName",
"scheme"}
self.assertTrue(all(actual == expected for actual in actuals))
def test_get_equips__levels__contains_five_elements(self):
level_lengths = [len(e['levels']) for e in self.data
if e['equipSlot'] != 'Flair']
assert level_lengths != []
self.assertTrue(all(length == 5 for length in level_lengths))
def test_get_equips__flair_levels__contains_five_elements(self):
level_lengths = [len(e['levels']) for e in self.data
if e['equipSlot'] == 'Flair']
assert level_lengths != []
self.assertTrue(all(length == 3 for length in level_lengths))
def test_get_equips__levels_under_max__elements_contain_expected_keys(self):
equip_level_sets = [e['levels'] for e in self.data
if e['equipSlot'] != 'Flair']
actuals = [set(level.keys()) for level_set in equip_level_sets
for level in level_set if level.get('level') < 5]
self.assertTrue(actuals != [])
expected = {
'level',
'upgradeMaterials',
'stats'}
self.assertTrue(all(actual == expected for actual in actuals))
def test_get_equips__flair_levels_under_max__elements_contain_expected_keys(self):
equip_level_sets = [e['levels'] for e in self.data
if e['equipSlot'] == 'Flair']
actuals = [set(level.keys()) for level_set in equip_level_sets
for level in level_set if level.get('level') < 3]
self.assertTrue(actuals != [])
expected = {
'level',
'upgradeMaterials',
'stats'}
self.assertTrue(all(actual == expected for actual in actuals))
def test_get_equips__levels_max__elements_contain_expected_keys(self):
equip_level_sets = [e['levels'] for e in self.data
if e['equipSlot'] != 'Flair']
actuals = [set(level.keys()) for level_set in equip_level_sets
for level in level_set if level.get('level') == 5]
self.assertTrue(actuals != [])
expected = {
'level',
'stats'}
self.assertTrue(all(actual == expected for actual in actuals))
def test_get_equips__flair_levels_max__elements_contain_expected_keys(self):
equip_level_sets = [e['levels'] for e in self.data
if e['equipSlot'] == 'Flair']
actuals = [set(level.keys()) for level_set in equip_level_sets
for level in level_set if level.get('level') == 3]
self.assertTrue(actuals != [])
expected = {
'level',
'stats'}
self.assertTrue(all(actual == expected for actual in actuals))
def test_get_equips__levels__elements_sorted_by_level_key(self):
equip_level_sets = [e['levels'] for e in self.data
if e['equipSlot'] != 'Flair']
actuals = [[level.get('level') for level in level_set]
for level_set in equip_level_sets]
self.assertTrue(actuals != [])
expected = [1, 2, 3, 4, 5]
self.assertTrue(all(actual == expected for actual in actuals))
def test_get_equips__flair_levels__elements_sorted_by_level_key(self):
equip_level_sets = [e['levels'] for e in self.data
if e['equipSlot'] == 'Flair']
actuals = [[level.get('level') for level in level_set]
for level_set in equip_level_sets]
self.assertTrue(actuals != [])
expected = [1, 2, 3]
self.assertTrue(all(actual == expected for actual in actuals))
def test_get_equips__third_level__levels_materials_contains_expected_keys(self):
equip_level_sets = [e['levels'] for e in self.data
if e['equipSlot'] != 'Flair']
actuals = [set(upgradeMaterial.keys())
for level_set in equip_level_sets
for level in level_set if level.get('level') < 5
for upgradeMaterial in level.get('upgradeMaterials')]
self.assertTrue(actuals != [])
expected = {'materialAmount', 'materialName'}
self.assertTrue(all(actual == expected for actual in actuals))
def test_get_equips__third_level__flair_levels_materials_contains_expected_keys(self):
equip_level_sets = [e['levels'] for e in self.data
if e['equipSlot'] == 'Flair']
actuals = [set(upgradeMaterial.keys())
for level_set in equip_level_sets
for level in level_set if level.get('level') < 3
for upgradeMaterial in level.get('upgradeMaterials')]
self.assertTrue(actuals != [])
expected = {'materialAmount', 'materialName'}
self.assertTrue(all(actual == expected for actual in actuals))
def test_get_equips__third_level__levels_stats_contains_expected_keys(self):
equip_level_sets = [e['levels'] for e in self.data]
actuals = [set(level.get('stats').keys())
for level_set in equip_level_sets
for level in level_set]
self.assertTrue(actuals != [])
expected = {
'healthPoints',
'magicPoints',
'physicalAttack',
'magicAttack',
'physicalDefence',
'magicDefence',
'accuracy',
'evade'}
self.assertTrue(all(actual == expected for actual in actuals))
class MaterialsTopographyTests(unittest.TestCase):
def setUp(self):
self.data = json.loads(quartrmastr.get_materials())
def test_get_materials__contains_expected_keys(self):
actuals = [set(item.keys()) for item in self.data]
expected = {'id', 'name', 'gold'}
self.assertTrue(all(actual == expected for actual in actuals))
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jpludens/starrlabs",
"score": 2
} |
#### File: starrlabs/build/definition_generator.py
```python
import os
import build.generators.js
import build.generators.sass
from data.sr_enums import js_enums, sass_enums, ordered_categories
def generate(path):
print 'Creating definition files at {}'.format(path)
print ' Creating js...'
js_generator = build.generators.js.JsGenerator()
for name, values in js_enums.items():
include_sort_function = name in ordered_categories
filename = js_generator.generate_file(path, name, values, include_sort_function)
print ' Created definition for {} in {}'.format(name, filename)
print ' Creating sass...'
sass_generator = build.generators.sass.SassGenerator()
variables_filepath = sass_generator.generate_variables_file(path, sass_enums)
print ' Created generated variables at {}'.format(variables_filepath)
if __name__ == "__main__":
#############################################################
# This lets imports work when running from command line
import sys
this_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(this_path, '..'))
#############################################################
from build import generation_path
generate(generation_path)
```
#### File: build/generators/__init__.py
```python
import os
import errno
from enum import Enum
from transformers import case, space
class TransformType(Enum):
case = 0
space = 1
class EnumGenerator(object):
_transformer_by_transform_type = {
TransformType.case: case,
TransformType.space: space
}
class TermType(Enum):
filename = 0
section = 1
category = 2
element_name = 3
element_value = 4
suffix = 5
def __init__(self, indent_string=' ', quote_char='"'):
self.indent_string = indent_string
self.quote_char = quote_char
self.subpath = ''
# This setup should be done by iterating over Term, except:
# 1) Python 2 enum seems to require an __order__ attribute,
# that I don't want to maintain.
# 2) PyCharm complains whether __order__ is defined or not:
# Expected collections.Iterable, got Term instead
self.transform_strategies = {
EnumGenerator.TermType.filename: {
TransformType.case: case.Strategy.as_is,
TransformType.space: space.Strategy.as_is
},
EnumGenerator.TermType.section: {
TransformType.case: case.Strategy.as_is,
TransformType.space: space.Strategy.as_is
},
EnumGenerator.TermType.category: {
TransformType.case: case.Strategy.as_is,
TransformType.space: space.Strategy.as_is
},
EnumGenerator.TermType.element_name: {
TransformType.case: case.Strategy.as_is,
TransformType.space: space.Strategy.as_is
},
EnumGenerator.TermType.element_value: {
TransformType.case: case.Strategy.as_is,
TransformType.space: space.Strategy.as_is
},
EnumGenerator.TermType.suffix: {
TransformType.case: case.Strategy.as_is,
TransformType.space: space.Strategy.as_is
}
}
@classmethod
def _transform(cls, string, transform_type, strategy):
return cls._transformer_by_transform_type[transform_type]\
.transform(string, strategy)
def transform_term(self, string, term_type):
result = string
case_strategy = self.transform_strategies[term_type][TransformType.case]
result = self._transform(result, TransformType.case, case_strategy)
space_strategy = self.transform_strategies[term_type][TransformType.space]
result = self._transform(result, TransformType.space, space_strategy)
return result
def enquote(self, string):
return '{0}{1}{0}'.format(self.quote_char, string, self.quote_char)
```
#### File: starrlabs/build/__init__.py
```python
import os
import errno
import inspect
here = os.path.dirname(os.path.abspath(inspect.stack()[0][1]))
source_root = os.path.join('..', 'src')
generation_root = os.path.join('..', 'gen')
distribution_root = os.path.join('..', 'dst')
source_path = os.path.abspath(os.path.join(here, source_root))
generation_path = os.path.abspath(os.path.join(here, generation_root))
distribution_path = os.path.abspath(os.path.join(here, distribution_root))
def make_path(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EALREADY:
pass
``` |
{
"source": "jplumlee/sqlalchemy-challenge",
"score": 3
} |
#### File: jplumlee/sqlalchemy-challenge/app.py
```python
import numpy as np
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
# Import Flask
from flask import Flask, jsonify
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# Save references to the tables
measurement = Base.classes.measurement
station = Base.classes.station
# Creat an app
app = Flask(__name__)
# Home page.
# List all routes that are available.
@app.route("/")
def welcome():
"""List all available api routes."""
return (
f"Available Routes:<br/>"
f"<a href='/api/v1.0/precipitation'>Precipitation</a><br/>"
f"<a href='/api/v1.0/stations'>Stations</a><br/>"
f"<a href='/api/v1.0/tobs'>TOBS</a><br/>"
f"<a href='/api/v1.0/<start>'>Start</a><br/>"
f"<a href='/api/v1.0/<start>/<end>'>Start/End</a><br/>"
)
# Convert the query results to a dictionary using date as the key and prcp as the value.
@app.route("/api/v1.0/precipitation")
def Precipitation():
# Create our session (link) from Python to the DB
session = Session(engine)
# Query all precipitation
results = session.query(measurement.date, measurement.prcp).filter(measurement.date >= '2016-08-23').all()
session.close()
# Create a dictionary from the row data and append to a list of all_results
all_results = []
for item in results:
item_dict = {}
item_dict["date"] = item[0]
item_dict["prcp"] = item[1]
all_results.append(item_dict)
# Return the JSON representation of your dictionary.
return jsonify(all_results)
# /api/v1.0/stations
@app.route("/api/v1.0/stations")
def Stations():
session = Session(engine)
# Query all stations
results = session.query(station.station).all()
session.close()
all_stations = list(np.ravel(results))
# Return a JSON list of stations from the dataset.
return jsonify(all_stations)
# /api/v1.0/tobs
@app.route("/api/v1.0/tobs")
def TOBS():
session = Session(engine)
# Query the dates and temperature observations of the most active station for the last year of data.
results = session.query(measurement.date,measurement.tobs).filter(measurement.date >= '2016-08-23').filter(measurement.station == 'USC00519281').all()
session.close()
tobs_results = list(np.ravel(results))
# Return a JSON list of temperature observations (TOBS) for the previous year.
return jsonify(tobs_results)
# /api/v1.0/<start>
# When given the start only, calculate TMIN, TAVG, and TMAX for all dates greater than and equal to the start date.
@app.route("/api/v1.0/<start>")
def temperature_start(start):
session = Session(engine)
# Query the dates and temperature observations
results = session.query(func.min(measurement.tobs), func.max(measurement.tobs), func.avg(measurement.tobs)).filter(measurement.date >= '2016-08-23').all()
temperature_start = list(np.ravel(results))
# Return a JSON list of temperature observations (TOBS).
return jsonify(temperature_start)
# Return a JSON list of the minimum temperature, the average temperature, and the max temperature for a given start or start-end range.
# When given the start and the end date, calculate the TMIN, TAVG, and TMAX for dates between the start and end date inclusive.
# /api/v1.0/<start>/<end>
@app.route("/api/v1.0/<start>/<end>")
def temperature_start_end(start, end):
session = Session(engine)
# Query the dates and temperature observations
results = session.query(func.min(measurement.tobs), func.max(measurement.tobs), func.avg(measurement.tobs)).filter(measurement.date >= '2016-08-23').filter(measurement.date <= '2016-08-30').all()
temperature_start_end = list(np.ravel(results))
# Return a JSON list of temperature observations (TOBS).
return jsonify(temperature_start_end)
if __name__ == "__main__":
app.run(debug=True)
``` |
{
"source": "jplummer01/azureml-examples",
"score": 2
} |
#### File: components/rai_analyse/create_counterfactual.py
```python
import argparse
import json
import logging
from responsibleai import RAIInsights
from azureml.core import Run
from constants import RAIToolType
from rai_component_utilities import (
create_rai_insights_from_port_path,
save_to_output_port,
copy_dashboard_info_file,
)
from arg_helpers import (
boolean_parser,
str_or_int_parser,
str_or_list_parser,
json_empty_is_none_parser,
)
_logger = logging.getLogger(__file__)
logging.basicConfig(level=logging.INFO)
def parse_args():
# setup arg parser
parser = argparse.ArgumentParser()
parser.add_argument("--rai_insights_dashboard", type=str, required=True)
parser.add_argument("--total_CFs", type=int, required=True)
parser.add_argument("--method", type=str)
parser.add_argument("--desired_class", type=str_or_int_parser)
parser.add_argument("--desired_range", type=json_empty_is_none_parser, help="List")
parser.add_argument(
"--permitted_range", type=json_empty_is_none_parser, help="Dict"
)
parser.add_argument("--features_to_vary", type=str_or_list_parser)
parser.add_argument("--feature_importance", type=boolean_parser)
parser.add_argument("--counterfactual_path", type=str)
# parse args
args = parser.parse_args()
# return args
return args
def main(args):
my_run = Run.get_context()
# Load the RAI Insights object
rai_i: RAIInsights = create_rai_insights_from_port_path(
my_run, args.rai_insights_dashboard
)
# Add the counterfactual
rai_i.counterfactual.add(
total_CFs=args.total_CFs,
method=args.method,
desired_class=args.desired_class,
desired_range=args.desired_range,
permitted_range=args.permitted_range,
features_to_vary=args.features_to_vary,
feature_importance=args.feature_importance,
)
_logger.info("Added counterfactual")
# Compute
rai_i.compute()
_logger.info("Computation complete")
# Save
save_to_output_port(rai_i, args.counterfactual_path, RAIToolType.COUNTERFACTUAL)
_logger.info("Saved to output port")
# Copy the dashboard info file
copy_dashboard_info_file(args.rai_insights_dashboard, args.counterfactual_path)
_logger.info("Completing")
# run script
if __name__ == "__main__":
# add space in logs
print("*" * 60)
print("\n\n")
# parse args
args = parse_args()
# run main function
main(args)
# add space in logs
print("*" * 60)
print("\n\n")
```
#### File: components/rai_analyse/create_rai_insights.py
```python
import argparse
import json
import logging
import os
import shutil
from typing import Any
from azureml.core import Run
from responsibleai import RAIInsights, __version__ as responsibleai_version
from constants import DashboardInfo, PropertyKeyValues
from arg_helpers import get_from_args, json_empty_is_none_parser
from rai_component_utilities import load_dataset, fetch_model_id, load_mlflow_model
_logger = logging.getLogger(__file__)
logging.basicConfig(level=logging.INFO)
def parse_args():
# setup arg parser
parser = argparse.ArgumentParser()
parser.add_argument("--title", type=str, required=True)
parser.add_argument(
"--task_type", type=str, required=True, choices=["classification", "regression"]
)
parser.add_argument(
"--model_info_path", type=str, help="name:version", required=True
)
parser.add_argument("--train_dataset", type=str, required=True)
parser.add_argument("--test_dataset", type=str, required=True)
parser.add_argument("--target_column_name", type=str, required=True)
parser.add_argument("--maximum_rows_for_test_dataset", type=int, default=5000)
parser.add_argument(
"--categorical_column_names", type=str, help="Optional[List[str]]"
)
parser.add_argument("--classes", type=str, help="Optional[List[str]]")
parser.add_argument("--output_path", type=str, help="Path to output JSON")
# parse args
args = parser.parse_args()
# return args
return args
def create_constructor_arg_dict(args):
"""Create a kwarg dict for RAIInsights constructor
Only does the 'parameters' for the component, not the
input ports
"""
result = dict()
cat_col_names = get_from_args(
args, "categorical_column_names", custom_parser=json.loads, allow_none=True
)
class_names = get_from_args(
args, "classes", custom_parser=json_empty_is_none_parser, allow_none=True
)
result["target_column"] = args.target_column_name
result["task_type"] = args.task_type
result["categorical_features"] = cat_col_names
result["classes"] = class_names
result["maximum_rows_for_test"] = args.maximum_rows_for_test_dataset
return result
def copy_input_data(component_input_path: str, output_path: str):
if os.path.isdir(component_input_path):
src_path = component_input_path
else:
src_path = os.path.dirname(component_input_path)
src_path = src_path + "/"
_logger.info(f"Copying from {src_path} to {output_path}")
assert os.path.isdir(src_path), "Checking src_path"
shutil.copytree(src=src_path, dst=output_path)
def main(args):
my_run = Run.get_context()
_logger.info("Dealing with initialization dataset")
train_df = load_dataset(args.train_dataset)
_logger.info("Dealing with evaluation dataset")
test_df = load_dataset(args.test_dataset)
model_id = fetch_model_id(args.model_info_path)
_logger.info("Loading model: {0}".format(model_id))
model_estimator = load_mlflow_model(my_run.experiment.workspace, model_id)
constructor_args = create_constructor_arg_dict(args)
# Make sure that it actually loads
_logger.info("Creating RAIInsights object")
_ = RAIInsights(
model=model_estimator, train=train_df, test=test_df, **constructor_args
)
_logger.info("Saving JSON for tool components")
output_dict = {
DashboardInfo.RAI_INSIGHTS_RUN_ID_KEY: str(my_run.id),
DashboardInfo.RAI_INSIGHTS_MODEL_ID_KEY: model_id,
DashboardInfo.RAI_INSIGHTS_CONSTRUCTOR_ARGS_KEY: constructor_args,
}
output_file = os.path.join(
args.output_path, DashboardInfo.RAI_INSIGHTS_PARENT_FILENAME
)
with open(output_file, "w") as of:
json.dump(output_dict, of)
_logger.info("Copying train data files")
copy_input_data(
args.train_dataset,
os.path.join(args.output_path, DashboardInfo.TRAIN_FILES_DIR),
)
_logger.info("Copying test data files")
copy_input_data(
args.test_dataset, os.path.join(args.output_path, DashboardInfo.TEST_FILES_DIR)
)
# run script
if __name__ == "__main__":
# add space in logs
print("*" * 60)
print("\n\n")
# parse args
args = parse_args()
# run main function
main(args)
# add space in logs
print("*" * 60)
print("\n\n")
```
#### File: mnist-pytorch/src/trainer.py
```python
import mlflow
import mlflow.pytorch
import numpy as np
from mlflow.models.signature import ModelSignature
from mlflow.types.schema import Schema, TensorSpec
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from typing import SimpleNamespace
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5, 1)
self.conv2 = nn.Conv2d(20, 50, 5, 1)
self.fc1 = nn.Linear(4 * 4 * 50, 500)
self.fc2 = nn.Linear(500, 10)
def forward(self, x):
# Added the view for reshaping score requests
x = x.view(-1, 1, 28, 28)
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 4 * 4 * 50)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print(
"Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format(
epoch,
batch_idx * len(data),
len(train_loader.dataset),
100.0 * batch_idx / len(train_loader),
loss.item(),
)
)
# Use MLflow logging
mlflow.log_metric("epoch_loss", loss.item())
def test(args, model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
# sum up batch loss
test_loss += F.nll_loss(output, target, reduction="sum").item()
# get the index of the max log-probability
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print("\n")
print(
"Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n".format(
test_loss,
correct,
len(test_loader.dataset),
100.0 * correct / len(test_loader.dataset),
)
)
# Use MLflow logging
mlflow.log_metric("average_loss", test_loss)
def driver():
model = Net().to(device)
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
for epoch in range(1, args.epochs + 1):
train(args, model, device, train_loader, optimizer, epoch)
test(args, model, device, test_loader)
if args.save_model:
input_schema = Schema(
[
TensorSpec(np.dtype(np.uint8), (-1, 28, 28)),
]
)
output_schema = Schema(
[
TensorSpec(np.dtype(np.float32), (-1, 10)),
]
)
signature = ModelSignature(inputs=input_schema, outputs=output_schema)
mlflow.pytorch.log_model(model, "model", signature=signature)
if __name__ == "__main__":
# Training settings
args = SimpleNamespace()
args.batch_size = 64
args.test_batch_size = 1000
args.epochs = 3 # Higher number for better convergence
args.lr = 0.01
args.momentum = 0.5
args.no_cuda = True
args.seed = 1
args.log_interval = 10
args.save_model = True
torch.manual_seed(args.seed)
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
loader_params = {"num_workers": 1, "pin_memory": True} if use_cuda else {}
# Use Azure Open Datasets for MNIST dataset
datasets.MNIST.mirrors = ["https://azureopendatastorage.azurefd.net/mnist/"]
datasets.MNIST.resources = [
("train-images-idx3-ubyte.gz", "f68b3c2dcbeaaa9fbdd348bbdeb94873"),
("train-labels-idx1-ubyte.gz", "d53e105ee54ea40749a09fcbcd1e9432"),
("t10k-images-idx3-ubyte.gz", "9fb629c4189551a2d022fa330f9573f3"),
("t10k-labels-idx1-ubyte.gz", "ec29112dd5afa0611ce80d1b7f02629c"),
]
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(
"../data",
train=True,
download=True,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
),
),
batch_size=args.batch_size,
shuffle=True,
**loader_params
)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(
"../data",
train=False,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
),
),
batch_size=args.test_batch_size,
shuffle=True,
**loader_params
)
driver()
```
#### File: forecasting-many-models/scripts/data_preprocessing_file.py
```python
from pathlib import Path
from azureml.core import Run
import argparse
import os
def main(args):
output = Path(args.output)
output.mkdir(parents=True, exist_ok=True)
run_context = Run.get_context()
input_path = run_context.input_datasets["train_10_models"]
for file_name in os.listdir(input_path):
input_file = os.path.join(input_path, file_name)
with open(input_file, "r") as f:
content = f.read()
# Apply any data pre-processing techniques here
output_file = os.path.join(output, file_name)
with open(output_file, "w") as f:
f.write(content)
def my_parse_args():
parser = argparse.ArgumentParser("Test")
parser.add_argument("--input", type=str)
parser.add_argument("--output", type=str)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = my_parse_args()
main(args)
```
#### File: forecasting-many-models/scripts/data_preprocessing_tabular.py
```python
from pathlib import Path
from azureml.core import Run
import argparse
def main(args):
output = Path(args.output)
output.mkdir(parents=True, exist_ok=True)
run_context = Run.get_context()
dataset = run_context.input_datasets["train_10_models"]
df = dataset.to_pandas_dataframe()
# Apply any data pre-processing techniques here
df.to_parquet(output / "data_prepared_result.parquet", compression=None)
def my_parse_args():
parser = argparse.ArgumentParser("Test")
parser.add_argument("--input", type=str)
parser.add_argument("--output", type=str)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = my_parse_args()
main(args)
```
#### File: automl-standalone-jobs/automl-image-instance-segmentation-task-fridge-items/jsonl_converter.py
```python
import argparse
import os
import json
import numpy as np
import PIL.Image as Image
import xml.etree.ElementTree as ET
from simplification.cutil import simplify_coords
from skimage import measure
def convert_mask_to_polygon(
mask,
max_polygon_points=100,
score_threshold=0.5,
max_refinement_iterations=25,
edge_safety_padding=1,
):
"""Convert a numpy mask to a polygon outline in normalized coordinates.
:param mask: Pixel mask, where each pixel has an object (float) score in [0, 1], in size ([1, height, width])
:type: mask: <class 'numpy.array'>
:param max_polygon_points: Maximum number of (x, y) coordinate pairs in polygon
:type: max_polygon_points: Int
:param score_threshold: Score cutoff for considering a pixel as in object.
:type: score_threshold: Float
:param max_refinement_iterations: Maximum number of times to refine the polygon
trying to reduce the number of pixels to meet max polygon points.
:type: max_refinement_iterations: Int
:param edge_safety_padding: Number of pixels to pad the mask with
:type edge_safety_padding: Int
:return: normalized polygon coordinates
:rtype: list of list
"""
# Convert to numpy bitmask
mask = mask[0]
mask_array = np.array((mask > score_threshold), dtype=np.uint8)
image_shape = mask_array.shape
# Pad the mask to avoid errors at the edge of the mask
embedded_mask = np.zeros(
(
image_shape[0] + 2 * edge_safety_padding,
image_shape[1] + 2 * edge_safety_padding,
),
dtype=np.uint8,
)
embedded_mask[
edge_safety_padding : image_shape[0] + edge_safety_padding,
edge_safety_padding : image_shape[1] + edge_safety_padding,
] = mask_array
# Find Image Contours
contours = measure.find_contours(embedded_mask, 0.5)
simplified_contours = []
for contour in contours:
# Iteratively reduce polygon points, if necessary
if max_polygon_points is not None:
simplify_factor = 0
while (
len(contour) > max_polygon_points
and simplify_factor < max_refinement_iterations
):
contour = simplify_coords(contour, simplify_factor)
simplify_factor += 1
# Convert to [x, y, x, y, ....] coordinates and correct for padding
unwrapped_contour = [0] * (2 * len(contour))
unwrapped_contour[::2] = np.ceil(contour[:, 1]) - edge_safety_padding
unwrapped_contour[1::2] = np.ceil(contour[:, 0]) - edge_safety_padding
simplified_contours.append(unwrapped_contour)
return _normalize_contour(simplified_contours, image_shape)
def _normalize_contour(contours, image_shape):
height, width = image_shape[0], image_shape[1]
for contour in contours:
contour[::2] = [x * 1.0 / width for x in contour[::2]]
contour[1::2] = [y * 1.0 / height for y in contour[1::2]]
return contours
def binarise_mask(mask_fname):
mask = Image.open(mask_fname)
mask = np.array(mask)
# instances are encoded as different colors
obj_ids = np.unique(mask)
# first id is the background, so remove it
obj_ids = obj_ids[1:]
# split the color-encoded mask into a set of binary masks
binary_masks = mask == obj_ids[:, None, None]
return binary_masks
def parsing_mask(mask_fname):
# For this particular dataset, initially each mask was merged (based on binary mask of each object)
# in the order of the bounding boxes described in the corresponding PASCAL VOC annotation file.
# Therefore, we have to extract each binary mask which is in the order of objects in the annotation file.
# https://github.com/microsoft/computervision-recipes/blob/master/utils_cv/detection/dataset.py
binary_masks = binarise_mask(mask_fname)
polygons = []
for bi_mask in binary_masks:
if len(bi_mask.shape) == 2:
bi_mask = bi_mask[np.newaxis, :]
polygon = convert_mask_to_polygon(bi_mask)
polygons.append(polygon)
return polygons
def convert_mask_in_VOC_to_jsonl(base_dir, remote_path):
src_images = base_dir
# We'll copy each JSONL file within its related MLTable folder
training_mltable_path = "./data/training-mltable-folder/"
validation_mltable_path = "./data/validation-mltable-folder/"
train_validation_ratio = 5
# Path to the training and validation files
train_annotations_file = os.path.join(
training_mltable_path, "train_annotations.jsonl"
)
validation_annotations_file = os.path.join(
validation_mltable_path, "validation_annotations.jsonl"
)
# Path to the annotations
annotations_folder = os.path.join(src_images, "annotations")
mask_folder = os.path.join(src_images, "segmentation-masks")
# sample json line dictionary
json_line_sample = {
"image_url": remote_path,
"image_details": {"format": None, "width": None, "height": None},
"label": [],
}
# Read each annotation and convert it to jsonl line
with open(train_annotations_file, "w") as train_f:
with open(validation_annotations_file, "w") as validation_f:
for i, filename in enumerate(os.listdir(annotations_folder)):
if filename.endswith(".xml"):
print("Parsing " + os.path.join(src_images, filename))
root = ET.parse(
os.path.join(annotations_folder, filename)
).getroot()
width = int(root.find("size/width").text)
height = int(root.find("size/height").text)
# convert mask into polygon
mask_fname = os.path.join(mask_folder, filename[:-4] + ".png")
polygons = parsing_mask(mask_fname)
labels = []
for index, object in enumerate(root.findall("object")):
name = object.find("name").text
isCrowd = int(object.find("difficult").text)
labels.append(
{
"label": name,
"bbox": "null",
"isCrowd": isCrowd,
"polygon": polygons[index],
}
)
# build the jsonl file
image_filename = root.find("filename").text
_, file_extension = os.path.splitext(image_filename)
json_line = dict(json_line_sample)
json_line["image_url"] = (
json_line["image_url"] + "images/" + image_filename
)
json_line["image_details"]["format"] = file_extension[1:]
json_line["image_details"]["width"] = width
json_line["image_details"]["height"] = height
json_line["label"] = labels
if i % train_validation_ratio == 0:
# validation annotation
validation_f.write(json.dumps(json_line) + "\n")
else:
# train annotation
train_f.write(json.dumps(json_line) + "\n")
else:
print("Skipping unknown file: {}".format(filename))
if __name__ == "__main__":
parser = argparse.ArgumentParser(allow_abbrev=False)
parser.add_argument(
"--data_path",
type=str,
help="the directory contains images, annotations, and masks",
)
args, remaining_args = parser.parse_known_args()
data_path = args.data_path
convert_mask_in_VOC_to_jsonl(data_path)
```
#### File: 2e_image_classification_keras_minist_convnet/train/train.py
```python
from tensorflow import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.utils import to_categorical
from keras.callbacks import Callback
import argparse
from pathlib import Path
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import mlflow
# Get input file
def get_file(f):
f = Path(f)
if f.is_file():
return f
else:
files = list(f.iterdir())
if len(files) == 1:
return files[0]
else:
raise Exception("********This path contains more than one file*******")
def train(train_input, model_output, epochs):
train_file = get_file(train_input)
data_train = pd.read_csv(train_file, header=None)
X = np.array(data_train.iloc[:, 1:])
y = to_categorical(np.array(data_train.iloc[:, 0]))
img_rows, img_cols = 28, 28
input_shape = (img_rows, img_cols, 1)
# Split validation data to optimiza classifier during training
X_train, X_val, y_train, y_val = train_test_split(
X, y, test_size=0.2, random_state=13
)
X_train = (
X_train.reshape(X_train.shape[0], img_rows, img_cols, 1).astype("float32") / 255
)
X_val = X_val.reshape(X_val.shape[0], img_rows, img_cols, 1).astype("float32") / 255
batch_size = 256
num_classes = 10
epochs = epochs
# Construct neuron network
model = Sequential()
model.add(
Conv2D(
32,
kernel_size=(3, 3),
activation="relu",
kernel_initializer="he_normal",
input_shape=input_shape,
)
)
model.add(MaxPooling2D((2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(128, (3, 3), activation="relu"))
model.add(Dropout(0.4))
model.add(Flatten())
model.add(Dense(128, activation="relu"))
model.add(Dropout(0.3))
model.add(Dense(num_classes, activation="softmax"))
model.compile(
loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(),
metrics=["accuracy"],
)
# Log metrics
class LogRunMetrics(Callback):
# callback at the end of every epoch
def on_epoch_end(self, epoch, log):
# log a value repeated which creates a list
mlflow.log_metric("Loss", log["loss"])
mlflow.log_metric("Accuracy", log["accuracy"])
history = model.fit(
X_train,
y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(X_val, y_val),
callbacks=[LogRunMetrics()],
)
# Log an image for training loss and accuracy
fig = plt.figure(figsize=(6, 3))
plt.title("Fashion MNIST with Keras ({} epochs)".format(epochs), fontsize=14)
plt.plot(history.history["accuracy"], "b-", label="Accuracy", lw=4, alpha=0.5)
plt.plot(history.history["loss"], "r--", label="Loss", lw=4, alpha=0.5)
plt.legend(fontsize=12)
plt.grid(True)
mlflow.log_figure(fig, "Loss v.s. Accuracy.png")
# Output model file
model.save(model_output + "/image_classification_model.h5")
```
#### File: src/pytorch_dl_train/profiling.py
```python
import os
import time
import logging
import torch
import mlflow
import tempfile
from torch.profiler import profile, record_function, ProfilerActivity
def markdown_trace_handler(dir_name: str, rank: int = 0):
"""This handler can be used inside torch.profiler call to output
tables in markdown format"""
def _handler_fn(prof) -> None:
if not os.path.isdir(dir_name):
try:
os.makedirs(dir_name, exist_ok=True)
except Exception:
raise RuntimeError("Can't create directory: " + dir_name)
# Note: trying to identify a unique name for the file
file_name = os.path.join(
dir_name,
f"stacks_rank{rank}_step{prof.step_num}_t{int(time.time() * 1000)}.ms",
)
logging.getLogger(__name__).info(
f"Exporting profiler trace as markdown at {file_name}"
)
# generate report in markdown format
markdown = ["# Pytorch Profiler report"]
markdown.append("## Average by cuda time")
markdown.append("```")
markdown.append(
prof.key_averages().table(sort_by="self_cuda_time_total", row_limit=-1)
)
markdown.append("```")
with open(file_name, "w") as out_file:
out_file.write("\n".join(markdown))
return _handler_fn
def composite_trace_handler(handler_list):
"""This can call multiple trace handlers inside one"""
def _handler_fn(prof) -> None:
for handler in handler_list:
handler(prof)
return _handler_fn
def export_stack_trace_handler(
dir_name: str, rank: int = 0, metrics=["self_cuda_time_total"]
):
"""This handler can be used inside torch.profiler call to output
tables in markdown format"""
def _handler_fn(prof) -> None:
if not os.path.isdir(dir_name):
try:
os.makedirs(dir_name, exist_ok=True)
except Exception:
raise RuntimeError("Can't create directory: " + dir_name)
# Note: trying to identify a unique name for the file
for metric in metrics:
file_name = os.path.join(
dir_name,
f"stacks_{metric}_rank{rank}_step{prof.step_num}_t{ int(time.time() * 1000)}.txt",
)
logging.getLogger(__name__).info(
f"Exporting {metric} stacks as text at {file_name}"
)
prof.export_stacks(file_name, metric)
return _handler_fn
class PyTorchProfilerHandler:
"""This class handles the initialization and setup of PyTorch profiler"""
def __init__(self, enabled=False, rank=None):
"""Constructor.
Args:
enabled (bool): is profiling enabled?
export_format (str): generate 'markdown' or 'tensorboard' profile in mlflow artifacts
rank (int): rank of the current process/node
"""
self.logger = logging.getLogger(__name__)
self.enabled = enabled
self.rank = rank
self.profiler_output_tmp_dir = None
self.profiler = None
def start_profiler(self):
"""Setup and start the pytorch profiler.
Returns:
profiler (torch.profiler): the profiler
"""
if self.enabled:
self.profiler_output_tmp_dir = tempfile.TemporaryDirectory()
self.logger.info(
f"Starting profiler (enabled=True) with tmp dir {self.profiler_output_tmp_dir.name}."
)
## profiler activities CPU/GPU
activities = [ProfilerActivity.CPU]
if torch.cuda.is_available():
self.logger.info(f"Enabling CUDA in profiler.")
activities.append(ProfilerActivity.CUDA)
## handlers for exporting profile at each step
# we're creating a list to export in multiple formats
trace_handlers = []
# export in markdown
markdown_logs_export = os.path.join(
self.profiler_output_tmp_dir.name, "markdown"
)
trace_handlers.append(
markdown_trace_handler(markdown_logs_export, rank=self.rank)
)
# export stacks in txt
stacks_logs_export = os.path.join(
self.profiler_output_tmp_dir.name, "stacks"
)
stack_metrics = ["self_cpu_time_total"]
if torch.cuda.is_available():
stack_metrics.append("self_cuda_time_total")
trace_handlers.append(
export_stack_trace_handler(
stacks_logs_export, rank=self.rank, metrics=stack_metrics
)
)
# export tensorboard
tensorboard_logs_export = os.path.join(
self.profiler_output_tmp_dir.name, "tensorboard_logs"
)
trace_handlers.append(
torch.profiler.tensorboard_trace_handler(tensorboard_logs_export)
)
# profiler takes 1 handler, we're composing all above in a single handler
trace_handler = composite_trace_handler(trace_handlers)
# process every single step
profiler_schedule = torch.profiler.schedule(wait=0, warmup=0, active=1)
# initialize profiler
self.profiler = torch.profiler.profile(
schedule=profiler_schedule,
record_shapes=False,
profile_memory=True,
activities=activities,
with_stack=True, # needed to export stacks
on_trace_ready=trace_handler,
)
self.profiler.start()
else:
self.logger.info(f"Profiler not started (enabled=False).")
self.profiler = None
return self.profiler
def stop_profiler(self) -> None:
"""Stops the pytorch profiler and logs the outputs using mlflow"""
if self.profiler:
self.logger.info(f"Stopping profiler.")
self.profiler.stop()
# log via mlflow
self.logger.info(
f"MLFLOW log {self.profiler_output_tmp_dir.name} as an artifact."
)
mlflow.log_artifacts(
self.profiler_output_tmp_dir.name, artifact_path="profiler"
)
self.logger.info(
f"Clean up profiler temp dir {self.profiler_output_tmp_dir.name}"
)
self.profiler_output_tmp_dir.cleanup()
else:
self.logger.info(
"Not stopping profiler as it was not started in the first place."
)
```
#### File: src/pytorch_dl_train/train.py
```python
import os
import time
import json
import pickle
import logging
import argparse
from tqdm import tqdm
from distutils.util import strtobool
import mlflow
# the long list of torch imports
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
from torch.optim import lr_scheduler
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
from torch.profiler import record_function
# internal imports
from model import load_model, MODEL_ARCH_LIST
from image_io import build_image_datasets
from profiling import PyTorchProfilerHandler
class PyTorchDistributedModelTrainingSequence:
"""Generic class to run the sequence for training a PyTorch model
using distributed training."""
def __init__(self):
"""Constructor"""
self.logger = logging.getLogger(__name__)
# DATA
self.training_data_sampler = None
self.training_data_loader = None
self.validation_data_loader = None
# MODEL
self.model = None
self.labels = []
self.model_signature = None
# DISTRIBUTED CONFIG
self.world_size = 1
self.world_rank = 0
self.local_world_size = 1
self.local_rank = 0
self.multinode_available = False
self.cpu_count = os.cpu_count()
self.device = None
# NOTE: if we're running multiple nodes, this indicates if we're on first node
self.self_is_main_node = True
# TRAINING CONFIGS
self.dataloading_config = None
self.training_config = None
# PROFILER
self.profiler = None
self.profiler_output_tmp_dir = None
#####################
### SETUP METHODS ###
#####################
def setup_config(self, args):
"""Sets internal variables using provided CLI arguments (see build_arguments_parser()).
In particular, sets device(cuda) and multinode parameters."""
self.dataloading_config = args
self.training_config = args
# verify parameter default values
if self.dataloading_config.num_workers is None:
self.dataloading_config.num_workers = 0
if self.dataloading_config.num_workers < 0:
self.dataloading_config.num_workers = os.cpu_count()
if self.dataloading_config.num_workers == 0:
self.logger.warning(
"You specified num_workers=0, forcing prefetch_factor to be discarded."
)
self.dataloading_config.prefetch_factor = None
# NOTE: strtobool returns an int, converting to bool explicitely
self.dataloading_config.pin_memory = bool(self.dataloading_config.pin_memory)
self.dataloading_config.non_blocking = bool(
self.dataloading_config.non_blocking
)
# DISTRIBUTED: detect multinode config
# depending on the Azure ML distribution.type, different environment variables will be provided
# to configure DistributedDataParallel
self.distributed_backend = args.distributed_backend
if self.distributed_backend == "nccl":
self.world_size = int(os.environ.get("WORLD_SIZE", "1"))
self.world_rank = int(os.environ.get("RANK", "0"))
self.local_world_size = int(os.environ.get("LOCAL_WORLD_SIZE", "1"))
self.local_rank = int(os.environ.get("LOCAL_RANK", "0"))
self.multinode_available = self.world_size > 1
self.self_is_main_node = self.world_rank == 0
elif self.distributed_backend == "mpi":
# Note: Distributed pytorch package doesn't have MPI built in.
# MPI is only included if you build PyTorch from source on a host that has MPI installed.
self.world_size = int(os.environ.get("OMPI_COMM_WORLD_SIZE", "1"))
self.world_rank = int(os.environ.get("OMPI_COMM_WORLD_RANK", "0"))
self.local_world_size = int(
os.environ.get("OMPI_COMM_WORLD_LOCAL_SIZE", "1")
)
self.local_rank = int(os.environ.get("OMPI_COMM_WORLD_LOCAL_RANK", "0"))
self.multinode_available = self.world_size > 1
self.self_is_main_node = self.world_rank == 0
else:
raise NotImplementedError(
f"distributed_backend={self.distributed_backend} is not implemented yet."
)
# Use CUDA if it is available
if torch.cuda.is_available():
self.logger.info(
f"Setting up torch.device for CUDA for local gpu:{self.local_rank}"
)
self.device = torch.device(self.local_rank)
else:
self.logger.info(f"Setting up torch.device for cpu")
self.device = torch.device("cpu")
if self.multinode_available:
self.logger.info(
f"Running in multinode with backend={self.distributed_backend} local_rank={self.local_rank} rank={self.world_rank} size={self.world_size}"
)
# DISTRIBUTED: this is required to initialize the pytorch backend
torch.distributed.init_process_group(
self.distributed_backend,
rank=self.world_rank,
world_size=self.world_size,
)
else:
self.logger.info(f"Not running in multinode.")
# DISTRIBUTED: in distributed mode, you want to report parameters
# only from main process (rank==0) to avoid conflict
if self.self_is_main_node:
# MLFLOW: report relevant parameters using mlflow
mlflow.log_params(
{
# log some distribution params
"nodes": self.world_size // self.local_world_size,
"instance_per_node": self.local_world_size,
"cuda_available": torch.cuda.is_available(),
"cuda_device_count": torch.cuda.device_count(),
"distributed": self.multinode_available,
"distributed_backend": self.distributed_backend,
# data loading params
"batch_size": self.dataloading_config.batch_size,
"num_workers": self.dataloading_config.num_workers,
"prefetch_factor": self.dataloading_config.prefetch_factor,
"pin_memory": self.dataloading_config.pin_memory,
"non_blocking": self.dataloading_config.non_blocking,
# training params
"model_arch": self.training_config.model_arch,
"model_arch_pretrained": self.training_config.model_arch_pretrained,
"learning_rate": self.training_config.learning_rate,
"num_epochs": self.training_config.num_epochs,
# profiling params
"enable_profiling": self.training_config.enable_profiling,
}
)
def setup_datasets(
self,
training_dataset: torch.utils.data.Dataset,
validation_dataset: torch.utils.data.Dataset,
labels: list,
):
"""Creates and sets up dataloaders for training/validation datasets."""
self.labels = labels
# DISTRIBUTED: you need to use a DistributedSampler that wraps your dataset
# it will draw a different sample on each node/process to distribute data sampling
self.training_data_sampler = DistributedSampler(
training_dataset, num_replicas=self.world_size, rank=self.world_rank
)
# setting up DataLoader with the right arguments
optional_data_loading_kwargs = {}
if self.dataloading_config.num_workers > 0:
# NOTE: this option _ONLY_ applies if num_workers > 0
# or else DataLoader will except
optional_data_loading_kwargs[
"prefetch_factor"
] = self.dataloading_config.prefetch_factor
self.training_data_loader = DataLoader(
training_dataset,
batch_size=self.dataloading_config.batch_size,
num_workers=self.dataloading_config.num_workers, # self.cpu_count,
pin_memory=self.dataloading_config.pin_memory,
# DISTRIBUTED: the sampler needs to be provided to the DataLoader
sampler=self.training_data_sampler,
# all other args
**optional_data_loading_kwargs,
)
# DISTRIBUTED: we don't need a sampler for validation set
# it is used as-is in every node/process
self.validation_data_loader = DataLoader(
validation_dataset,
batch_size=self.dataloading_config.batch_size,
num_workers=self.dataloading_config.num_workers, # self.cpu_count,
pin_memory=self.dataloading_config.pin_memory,
)
if self.self_is_main_node:
# MLFLOW: report relevant parameters using mlflow
mlflow.log_params({"num_classes": len(labels)})
def setup_model(self, model):
"""Configures a model for training."""
self.logger.info(f"Setting up model to use device {self.device}")
self.model = model.to(self.device)
# DISTRIBUTED: the model needs to be wrapped in a DistributedDataParallel class
if self.multinode_available:
self.logger.info(f"Setting up model to use DistributedDataParallel.")
self.model = torch.nn.parallel.DistributedDataParallel(self.model)
# fun: log the number of parameters
params_count = 0
for param in model.parameters():
if param.requires_grad:
params_count += param.numel()
self.logger.info(
"MLFLOW: model_param_count={:.2f} (millions)".format(
round(params_count / 1e6, 2)
)
)
if self.self_is_main_node:
mlflow.log_params({"model_param_count": round(params_count / 1e6, 2)})
return self.model
########################
### TRAINING METHODS ###
########################
def _epoch_eval(self, epoch, criterion):
"""Called during train() for running the eval phase of one epoch."""
with torch.no_grad():
num_correct = 0
num_total_images = 0
running_loss = 0.0
for images, targets in tqdm(self.validation_data_loader):
with record_function("eval.to_device"):
images = images.to(
self.device, non_blocking=self.dataloading_config.non_blocking
)
one_hot_targets = targets.to(
self.device, non_blocking=self.dataloading_config.non_blocking
)
with record_function("eval.forward"):
outputs = self.model(images)
loss = criterion(outputs, one_hot_targets)
running_loss += loss.item() * images.size(0)
correct = torch.argmax(outputs, dim=-1) == (targets.to(self.device))
num_correct += torch.sum(correct).item()
num_total_images += len(images)
return running_loss, num_correct, num_total_images
def _epoch_train(self, epoch, optimizer, criterion):
"""Called during train() for running the train phase of one epoch."""
self.model.train()
self.training_data_sampler.set_epoch(epoch)
num_correct = 0
num_total_images = 0
running_loss = 0.0
for images, targets in tqdm(self.training_data_loader):
# PROFILER: record_function will report to the profiler (if enabled)
# here a specific wall time for a given block of code
with record_function("train.to_device"):
images = images.to(
self.device, non_blocking=self.dataloading_config.non_blocking
)
one_hot_targets = torch.nn.functional.one_hot(
targets.to(
self.device, non_blocking=self.dataloading_config.non_blocking
),
num_classes=len(self.labels),
).float()
with record_function("train.forward"):
# zero the parameter gradients
optimizer.zero_grad()
outputs = self.model(images)
loss = criterion(outputs, one_hot_targets)
correct = torch.argmax(outputs, dim=-1) == (targets.to(self.device))
running_loss += loss.item() * images.size(0)
num_correct += torch.sum(correct).item()
num_total_images += len(images)
# PROFILER: record_function will report to the profiler (if enabled)
# here a specific wall time for a given block of code
with record_function("train.backward"):
loss.backward()
optimizer.step()
return running_loss, num_correct, num_total_images
def train(self, epochs=None):
"""Trains the model.
Args:
epochs (int, optional): if not provided uses internal config
"""
if epochs is None:
epochs = self.training_config.num_epochs
# Observe that all parameters are being optimized
optimizer = optim.SGD(
self.model.parameters(),
lr=self.training_config.learning_rate,
momentum=self.training_config.momentum,
nesterov=True,
weight_decay=1e-4,
)
# criterion = nn.BCEWithLogitsLoss()
criterion = nn.CrossEntropyLoss()
# Decay LR by a factor of 0.1 every 7 epochs
scheduler = lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1)
# DISTRIBUTED: you'll node that this loop has nothing specifically "distributed"
# that's because most of the changes are in the backend (DistributedDataParallel)
for epoch in range(epochs):
self.logger.info(f"Starting epoch={epoch}")
# start timer for epoch time metric
epoch_start = time.time()
# TRAIN: loop on training set and return metrics
running_loss, num_correct, num_samples = self._epoch_train(
epoch, optimizer, criterion
)
epoch_train_loss = running_loss / num_samples
epoch_train_acc = num_correct / num_samples
# report metric values in stdout
self.logger.info(
f"MLFLOW: epoch_train_loss={epoch_train_loss} epoch_train_acc={epoch_train_acc} epoch={epoch}"
)
# MLFLOW / DISTRIBUTED: report metrics only from main node
if self.self_is_main_node:
mlflow.log_metric("epoch_train_loss", epoch_train_loss, step=epoch)
mlflow.log_metric("epoch_train_acc", epoch_train_acc, step=epoch)
# EVAL: run evaluation on validation set and return metrics
running_loss, num_correct, num_samples = self._epoch_eval(epoch, criterion)
epoch_valid_loss = running_loss / num_samples
epoch_valid_acc = num_correct / num_samples
# PROFILER: use profiler.step() to mark a step in training
# the pytorch profiler will use internally to trigger
# saving the traces in different files
if self.profiler:
self.profiler.step()
# stop timer
epoch_train_time = time.time() - epoch_start
self.logger.info(
f"MLFLOW: epoch_valid_loss={epoch_valid_loss} epoch_valid_acc={epoch_valid_acc} epoch={epoch}"
)
self.logger.info(
f"MLFLOW: epoch_train_time={epoch_train_time} epoch={epoch}"
)
# MLFLOW / DISTRIBUTED: report metrics only from main node
if self.self_is_main_node:
mlflow.log_metric("epoch_valid_loss", epoch_valid_loss, step=epoch)
mlflow.log_metric("epoch_valid_acc", epoch_valid_acc, step=epoch)
mlflow.log_metric("epoch_train_time", epoch_train_time, step=epoch)
#################
### MODEL I/O ###
#################
def save(self, output_dir: str, name: str = "dev", register_as: str = None) -> None:
# DISTRIBUTED: you want to save the model only from the main node/process
# in data distributed mode, all models should theoretically be the same
if self.self_is_main_node:
self.logger.info(f"Saving model and classes in {output_dir}...")
# create output directory just in case
os.makedirs(output_dir, exist_ok=True)
if isinstance(self.model, torch.nn.parallel.DistributedDataParallel):
# DISTRIBUTED: to export model, you need to get it out of the DistributedDataParallel class
self.logger.info(
"Model was distibuted, we will export DistributedDataParallel.module"
)
model_to_save = self.model.module.to("cpu")
else:
model_to_save = self.model.to("cpu")
# MLFLOW: mlflow has a nice method to export the model automatically
# add tags and environment for it. You can then use it in Azure ML
# to register your model to an endpoint.
mlflow.pytorch.log_model(
model_to_save,
artifact_path="final_model",
registered_model_name=register_as, # also register it if name is provided
signature=self.model_signature,
)
def build_arguments_parser(parser: argparse.ArgumentParser = None):
"""Builds the argument parser for CLI settings"""
if parser is None:
parser = argparse.ArgumentParser()
group = parser.add_argument_group(f"Training Inputs")
group.add_argument(
"--train_images",
type=str,
required=True,
help="Path to folder containing training images",
)
group.add_argument(
"--valid_images",
type=str,
required=True,
help="path to folder containing validation images",
)
group = parser.add_argument_group(f"Training Outputs")
group.add_argument(
"--model_output",
type=str,
required=False,
default=None,
help="Path to write final model",
)
group.add_argument(
"--register_model_as",
type=str,
required=False,
default=None,
help="Name to register final model in MLFlow",
)
group = parser.add_argument_group(f"Data Loading Parameters")
group.add_argument(
"--batch_size",
type=int,
required=False,
default=64,
help="Train/valid data loading batch size (default: 64)",
)
group.add_argument(
"--num_workers",
type=int,
required=False,
default=None,
help="Num workers for data loader (default: -1 => all cpus available)",
)
group.add_argument(
"--prefetch_factor",
type=int,
required=False,
default=2,
help="Data loader prefetch factor (default: 2)",
)
group.add_argument(
"--pin_memory",
type=strtobool,
required=False,
default=True,
help="Pin Data loader prefetch factor (default: True)",
)
group.add_argument(
"--non_blocking",
type=strtobool,
required=False,
default=False,
help="Use non-blocking transfer to device (default: False)",
)
group = parser.add_argument_group(f"Model/Training Parameters")
group.add_argument(
"--model_arch",
type=str,
required=False,
choices=MODEL_ARCH_LIST,
default="resnet18",
help="Which model architecture to use (default: resnet18)",
)
group.add_argument(
"--model_arch_pretrained",
type=strtobool,
required=False,
default=True,
help="Use pretrained model (default: true)",
)
group.add_argument(
"--distributed_backend",
type=str,
required=False,
choices=["nccl", "mpi"],
default="nccl",
help="Which distributed backend to use.",
)
# DISTRIBUTED: torch.distributed.launch is passing this argument to your script
# it is likely to be deprecated in favor of os.environ['LOCAL_RANK']
# see https://pytorch.org/docs/stable/distributed.html#launch-utility
group.add_argument(
"--local_rank",
type=int,
required=False,
default=None,
help="Passed by torch.distributed.launch utility when running from cli.",
)
group.add_argument(
"--num_epochs",
type=int,
required=False,
default=1,
help="Number of epochs to train for",
)
group.add_argument(
"--learning_rate",
type=float,
required=False,
default=0.01,
help="Learning rate of optimizer",
)
group.add_argument(
"--momentum",
type=float,
required=False,
default=0.01,
help="Momentum of optimizer",
)
group = parser.add_argument_group(f"Monitoring/Profiling Parameters")
group.add_argument(
"--enable_profiling",
type=strtobool,
required=False,
default=False,
help="Enable pytorch profiler.",
)
return parser
def run(args):
"""Run the script using CLI arguments"""
logger = logging.getLogger(__name__)
logger.info(f"Running with arguments: {args}")
# MLFLOW: initialize mlflow (once in entire script)
mlflow.start_run()
# build the image folder datasets
train_dataset, valid_dataset, labels = build_image_datasets(
train_images_dir=args.train_images,
valid_images_dir=args.valid_images,
input_size=224, # size expected by the model
)
# creates the model architecture
model = load_model(
args.model_arch,
output_dimension=len(labels),
pretrained=args.model_arch_pretrained,
)
# use a handler for the training sequence
training_handler = PyTorchDistributedModelTrainingSequence()
# sets cuda and distributed config
training_handler.setup_config(args)
# PROFILER: here we use a helper class to enable profiling
# see profiling.py for the implementation details
training_profiler = PyTorchProfilerHandler(
enabled=bool(args.enable_profiling),
rank=training_handler.world_rank,
)
# PROFILER: set profiler in trainer to call profiler.step() during training
training_handler.profiler = training_profiler.start_profiler()
# creates data loaders from datasets for distributed training
training_handler.setup_datasets(train_dataset, valid_dataset, labels)
# sets the model for distributed training
training_handler.setup_model(model)
# runs training sequence
# NOTE: num_epochs is provided in args
training_handler.train()
# stops profiling (and save in mlflow)
training_profiler.stop_profiler()
# saves final model
if args.model_output:
training_handler.save(
args.model_output,
name=f"epoch-{args.num_epochs}",
register_as=args.register_model_as,
)
# MLFLOW: finalize mlflow (once in entire script)
mlflow.end_run()
def main(cli_args=None):
"""Main function of the script."""
# initialize root logger
logger = logging.getLogger()
logger.setLevel(logging.INFO)
console_handler = logging.StreamHandler()
formatter = logging.Formatter(
"%(asctime)s : %(levelname)s : %(name)s : %(message)s"
)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
# create argument parser
parser = build_arguments_parser()
# runs on cli arguments
args = parser.parse_args(cli_args) # if None, runs on sys.argv
# run the run function
run(args)
if __name__ == "__main__":
main()
``` |
{
"source": "jplummer01/responsible-ai-toolbox",
"score": 3
} |
#### File: erroranalysis/tests/test_importances.py
```python
from common_utils import (create_binary_classification_dataset,
create_boston_data, create_cancer_data,
create_iris_data, create_models_classification,
create_models_regression, create_simple_titanic_data,
create_titanic_pipeline)
from erroranalysis._internal.constants import ModelTask
from erroranalysis._internal.error_analyzer import ModelAnalyzer
TOL = 1e-10
class TestImportances(object):
def test_importances_iris(self):
X_train, X_test, y_train, y_test, feature_names, _ = create_iris_data()
models = create_models_classification(X_train, y_train)
for model in models:
categorical_features = []
run_error_analyzer(model, X_test, y_test, feature_names,
categorical_features)
def test_importances_cancer(self):
X_train, X_test, y_train, y_test, feature_names, _ = \
create_cancer_data()
models = create_models_classification(X_train, y_train)
for model in models:
categorical_features = []
run_error_analyzer(model, X_test, y_test, feature_names,
categorical_features)
def test_importances_binary_classification(self):
X_train, y_train, X_test, y_test, _ = \
create_binary_classification_dataset()
feature_names = list(X_train.columns)
models = create_models_classification(X_train, y_train)
for model in models:
categorical_features = []
run_error_analyzer(model, X_test, y_test, feature_names,
categorical_features)
def test_importances_titanic(self):
X_train, X_test, y_train, y_test, numeric, categorical = \
create_simple_titanic_data()
feature_names = categorical + numeric
clf = create_titanic_pipeline(X_train, y_train)
categorical_features = categorical
run_error_analyzer(clf, X_test, y_test, feature_names,
categorical_features)
def test_importances_boston(self):
X_train, X_test, y_train, y_test, feature_names = \
create_boston_data()
models = create_models_regression(X_train, y_train)
for model in models:
categorical_features = []
run_error_analyzer(model, X_test, y_test, feature_names,
categorical_features)
def run_error_analyzer(model, X_test, y_test, feature_names,
categorical_features):
model_analyzer = ModelAnalyzer(model, X_test, y_test,
feature_names,
categorical_features)
scores = model_analyzer.compute_importances()
if model_analyzer.model_task == ModelTask.CLASSIFICATION:
diff = model.predict(model_analyzer.dataset) != model_analyzer.true_y
else:
diff = model.predict(model_analyzer.dataset) - model_analyzer.true_y
assert isinstance(scores, list)
assert len(scores) == len(feature_names)
# If model predicted perfectly, assert all scores are zeros
if not any(diff):
assert all(abs(score - 0) < TOL for score in scores)
else:
assert any(score != 0 for score in scores)
```
#### File: responsibleai/responsibleai/_data_validations.py
```python
from typing import List, Optional
import numpy as np
import pandas as pd
from responsibleai.exceptions import UserConfigValidationException
def validate_train_test_categories(
train_data: pd.DataFrame,
test_data: pd.DataFrame,
rai_compute_type: str,
categoricals: Optional[List[str]] = None,
):
if categoricals is None:
return
discovered = {}
for column in train_data.columns:
if column in categoricals:
train_unique = np.unique(train_data[column])
test_unique = np.unique(test_data[column])
difference = np.setdiff1d(test_unique, train_unique)
if difference.shape[0] != 0:
discovered[column] = difference.tolist()
if len(discovered) > 0:
message = ("{} requires that every category of "
"categorical features present in the test data "
"be also present in the train data. "
"Categories missing from train data: {}")
raise UserConfigValidationException(
message.format(rai_compute_type, discovered)
)
```
#### File: responsibleai/tests/test_state_directory_management.py
```python
from pathlib import Path
from responsibleai._tools.shared.state_directory_management import \
DirectoryManager
class TestStateDirectoryManagement:
def _verify_directory_manager_operations(self, directory_manager):
# Test the create APIs
config_directory_path = directory_manager.create_config_directory()
assert isinstance(config_directory_path, Path)
assert config_directory_path.exists()
assert DirectoryManager.CONFIGURATION in str(config_directory_path)
data_directory_path = directory_manager.create_data_directory()
assert isinstance(data_directory_path, Path)
assert data_directory_path.exists()
assert DirectoryManager.DATA in str(data_directory_path)
generators_directory_path = \
directory_manager.create_generators_directory()
assert isinstance(generators_directory_path, Path)
assert generators_directory_path.exists()
assert DirectoryManager.GENERATORS in str(generators_directory_path)
# Test the get APIs
config_directory_path = directory_manager.get_config_directory()
assert isinstance(config_directory_path, Path)
assert config_directory_path.exists()
assert DirectoryManager.CONFIGURATION in str(config_directory_path)
data_directory_path = directory_manager.get_data_directory()
assert isinstance(data_directory_path, Path)
assert data_directory_path.exists()
assert DirectoryManager.DATA in str(data_directory_path)
generators_directory_path = \
directory_manager.create_generators_directory()
assert isinstance(generators_directory_path, Path)
assert generators_directory_path.exists()
assert DirectoryManager.GENERATORS in str(generators_directory_path)
def test_directory_manager(self, tmpdir):
parent_directory = tmpdir.mkdir('parent_directory')
dm_one = DirectoryManager(
parent_directory_path=parent_directory,
sub_directory_name='known')
assert dm_one.parent_directory_path.exists()
assert dm_one.sub_directory_name == 'known'
assert (dm_one.parent_directory_path /
dm_one.sub_directory_name).exists()
self._verify_directory_manager_operations(dm_one)
assert isinstance(
DirectoryManager.list_sub_directories(parent_directory),
list)
assert len(
DirectoryManager.list_sub_directories(parent_directory)) == 1
assert 'known' in\
DirectoryManager.list_sub_directories(parent_directory)
dm_two = DirectoryManager(
parent_directory_path=parent_directory)
assert dm_two.parent_directory_path.exists()
assert dm_two.sub_directory_name is not None
assert (dm_two.parent_directory_path /
dm_two.sub_directory_name).exists()
self._verify_directory_manager_operations(dm_two)
assert isinstance(
DirectoryManager.list_sub_directories(parent_directory),
list)
assert len(
DirectoryManager.list_sub_directories(parent_directory)) == 2
assert dm_two.sub_directory_name in\
DirectoryManager.list_sub_directories(parent_directory)
``` |
{
"source": "jpluscplusm/paas-cf",
"score": 3
} |
#### File: paas-cf/scripts/reset-org.py
```python
import argparse
import re
import subprocess
import sys
import time
# python2 compatibility
try: input = raw_input
except NameError: pass
class SubprocessException(Exception):
pass
class OrgReset(object):
def __init__(self, org, initial_space, org_managers, users, users_are_org_managers, users_are_space_managers, users_are_space_developers, org_quota):
self.org = org
self.initial_space = initial_space
self.org_managers = org_managers
self.users = users
self.users_are_org_managers = users_are_org_managers
self.users_are_space_managers = users_are_space_managers
self.users_are_space_developers = users_are_space_developers
self.org_quota = org_quota
def confirm(self):
if not self.confirm_user_input('Are you sure you want to delete org %s? (y/n): ' % self.org):
sys.exit("'No future change is possible.' (phew, not deleted)")
def confirm_user_input(self, question):
response = input(question)
return response == "y"
def whoami(self):
self.user = self.parse_current_user(self.run(['cf', 'target']))
def delete_org(self):
self.run_with_retry(['cf', 'delete-org', self.org, '-f'], 20, lambda err:
'one or more resources within could not be deleted' in err.output
)
def create_org(self):
self.run(['cf', 'create-org', self.org])
self.run(['cf', 'unset-org-role', self.user, self.org, 'OrgManager'])
if self.org_quota:
self.run(['cf', 'set-quota', self.org, self.org_quota])
def create_space(self):
self.run(['cf', 'create-space', self.initial_space, '-o', self.org])
self.run(['cf', 'unset-space-role', self.user, self.org, self.initial_space, 'SpaceManager'])
self.run(['cf', 'unset-space-role', self.user, self.org, self.initial_space, 'SpaceDeveloper'])
def set_roles(self):
for org_manager in self.org_managers:
self.run(['cf', 'set-org-role', org_manager, self.org, 'OrgManager'])
for user in self.users:
if self.users_are_org_managers:
self.run(['cf', 'set-org-role', user, self.org, 'OrgManager'])
if self.users_are_space_managers:
self.run(['cf', 'set-space-role', user, self.org, self.initial_space, 'SpaceManager'])
if self.users_are_space_developers:
self.run(['cf', 'set-space-role', user, self.org, self.initial_space, 'SpaceDeveloper'])
def run(self, command):
print('Running \'%s\'' % ' '.join(command))
try:
output = subprocess.check_output(command)
print(output)
return output
except subprocess.CalledProcessError as err:
raise SubprocessException('Aborting: \'%s\' failed with exit code %d\n%s' % (err.cmd, err.returncode, err.output))
def run_with_retry(self, command, retry_count, retry_func):
print('Running with retry \'%s\'' % ' '.join(command))
try:
output = subprocess.check_output(command)
print(output)
return output
except subprocess.CalledProcessError as err:
if retry_func(err):
print(err.output)
retry_count -= 1
if retry_count <= 0:
raise SubprocessException('Timed out retrying \'%s\'' % ' '.join(command))
print('Sleeping for 30 secs before retrying')
time.sleep(30)
return self.run_with_retry(command, retry_count, retry_func)
else:
raise SubprocessException('Aborting: \'%s\' failed with exit code %d\n%s' % (err.cmd, err.returncode, err.output))
def parse_current_user(self, target_output):
match = re.search(r'[Uu]ser:\s+(\S+)', target_output)
return match.group(1)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-o', metavar='ORG_NAME', help='Which org to wipe and recreate', required=True)
parser.add_argument('-s', metavar='INITIAL_SPACE', help='Initial space to create (default is sandbox)', default='sandbox')
parser.add_argument('-m', metavar='ORG_MANAGER_EMAIL', help='Emails of users who will manage the new org', nargs='*', default=[])
parser.add_argument('-u', metavar='USER_EMAIL', help='Emails of team members to add to the org', nargs='*', default=[])
parser.add_argument('--org-managers', help='Add the OrgManager role for the listed team members', action='store_true', default=False)
parser.add_argument('--space-managers', help='Add the SpaceManager role for the listed team members in the initial space', action='store_true', default=False)
parser.add_argument('--space-developers', help='Add the SpaceDeveloper role for the listed team members in the initial space', action='store_true', default=False)
parser.add_argument('--quota', help='Name of the org quota to use')
args = parser.parse_args()
org_reset = OrgReset(args.o, args.s, args.m, args.u, args.org_managers, args.space_managers, args.space_developers, args.quota)
org_reset.confirm()
org_reset.whoami()
org_reset.delete_org()
org_reset.create_org()
org_reset.create_space()
org_reset.set_roles()
``` |
{
"source": "jplusplus/feowl",
"score": 2
} |
#### File: feowl-api/feowl/forms.py
```python
from django import forms
from django.contrib.auth.forms import ReadOnlyPasswordHashField
from django.utils.translation import ugettext_lazy as _
from models import PowerReport, Device, Contributor, Area
class ContributorAdminForm(forms.ModelForm):
name = forms.RegexField(
label=_("Name"), max_length=30, regex=r"^[\w.@+-]+$",
help_text=_("Required. 30 characters or fewer. Letters, digits and "
"@/./+/-/_ only."),
error_messages={
'invalid': _("This value may contain only letters, numbers and "
"@/./+/-/_ characters.")})
password = ReadOnlyPasswordHashField(label=_("Password"),
help_text=_("Raw passwords are not stored, so there is no way to see "
"this user's password, but you can change the password "
"using <a href=\"password/\">this form</a>."))
def clean_password(self):
return self.initial["password"]
class Meta:
model = Contributor
class ContributorForm(forms.ModelForm):
class Meta:
model = Contributor
class PowerReportForm(forms.ModelForm):
class Meta:
model = PowerReport
def clean_duration(self):
duration = self.cleaned_data['duration']
#ensure that duration is a positive number (PositiveInteger fields can be == 0)
if duration == 0:
raise forms.ValidationError('Duration values must be larger than 0.')
return duration
class DeviceForm(forms.ModelForm):
class Meta:
model = Device
class AreaForm(forms.ModelForm):
class Meta:
model = Area
``` |
{
"source": "jplusplus/newsworthycharts",
"score": 3
} |
#### File: newsworthycharts/lib/formatter.py
```python
from babel.numbers import format_decimal, format_percent, Locale
from babel.units import format_unit
from decimal import Decimal
class Formatter(object):
"""A formatter for a specific language and locale.
Contains some methods for number and text formatting.
Heavier i18n work should be before involving newsworthycharts.
Usage:
>>> fmt = Formatter("sv-SE")
>>> fmt.percent(0.14)
"14 %"
"""
def __init__(self, lang, decimals: int=None, scale: str="celcius"):
"""Create formatter for specific locale."""
self.l = Locale.parse(lang.replace("-", "_")) # NOQA
self.language = self.l.language
self.decimals = decimals
self.scale = scale
def __repr__(self):
return "Formatter: " + repr(self.l)
def __str__(self):
return self.l.get_display_name()
def percent(self, x, *args, **kwargs):
if self.decimals is None:
# Show one decimal by default if values is < 1%
if abs(x) < 0.01:
x = round(x, 1 + 2)
else:
x = round(x, 2)
else:
x = round(x, self.decimals + 2)
return format_percent(x, locale=self.l, decimal_quantization=False)
def temperature_short(self, x, *args, **kwargs):
"""Format a temperature in deegrees, without scale letter."""
decimals = self.decimals
if decimals is None:
decimals = 1
x = round(Decimal(x), decimals)
str = format_unit(x, 'temperature-generic', "short", locale=self.l)
return str
def temperature(self, x, *args, **kwargs):
"""Format a temperature in deegrees, with scale letter."""
decimals = self.decimals
if decimals is None:
decimals = 1
scale = "temperature-{}".format(self.scale)
x = round(Decimal(x), decimals)
str = format_unit(x, scale, "short", locale=self.l)
return str
def number(self, x, *args, **kwargs):
"""Format as number.
:param decimals (int): number of decimals.
"""
decimals = self.decimals
if decimals is None:
# Default roundings
if abs(x) < 0.1:
decimals = 2
elif abs(x) < 1:
decimals = 1
else:
decimals = 0
x = round(Decimal(x), decimals)
return format_decimal(x, locale=self.l)
def short_month(self, x, *args, **kwargs):
"""Get a short month string, e.g. 'Jan', from a number.
Numbers above 12 will wrap
"""
if x > 12:
x = x % 12 + 1
return self.l.months['format']['abbreviated'][x]
def month(self, x, *args, **kwargs):
"""Get a month string from a number.
Numbers above 12 will wrap
"""
if x > 12:
x = x % 12 + 1
return self.l.months['format']['wide'][x]
```
#### File: newsworthycharts/test/test_data_list.py
```python
from newsworthycharts.lib.datalist import DataList
def test_csv():
s1 = [
("2018", 1),
("2019", 2),
]
s2 = [
("2017", 3),
("2019", 4),
]
dl = DataList()
dl.append(s1)
dl.append(s2)
rows = dl.as_csv.split("\r\n")
assert rows[0] == "2017,,3"
assert rows[1] == "2018,1,"
assert rows[2] == "2019,2,4"
def test_stacked_values():
s1 = [
("A", 1),
("B", 2),
("C", 5),
]
s2 = [
("A", 2),
("B", None),
("C", 10),
]
dl = DataList()
dl.append(s1)
dl.append(s2)
stacked_values = dl.stacked_values
assert(stacked_values[0] == 1 + 2)
assert(stacked_values[1] == 2 + 0)
assert(stacked_values[2] == 5 + 10)
assert(dl.stacked_max_val == 15)
```
#### File: newsworthycharts/test/test_serial_chart.py
```python
from newsworthycharts import SerialChart
from newsworthycharts.storage import DictStorage, LocalStorage
# store test charts to this folder for visual verfication
OUTPUT_DIR = "test/rendered_charts"
local_storage = LocalStorage(OUTPUT_DIR)
def test_color_function():
container = {}
ds = DictStorage(container)
chart_obj = {
"width": 800,
"height": 600,
"data": [
[
["2016-01-01", -4],
["2017-01-01", 4],
["2018-01-01", None],
["2019-01-01", -1]
]
],
"type": "bars",
"color_fn": "positive_negative",
"highlight": "2019-01-01",
}
c = SerialChart.init_from(chart_obj, storage=ds)
c.render("test", "png")
neutral_color = c._style["neutral_color"]
pos_color = c._style["positive_color"]
neg_color = c._style["negative_color"]
bar_colors = [bar.get_facecolor() for bar in c.ax.patches]
assert(bar_colors[0] == neg_color)
assert(bar_colors[1] == pos_color)
assert(bar_colors[2] == neutral_color)
assert(bar_colors[3] == neg_color)
chart_obj["color_fn"] = "warm_cold"
c = SerialChart.init_from(chart_obj, storage=ds)
c.render("test", "png")
warm_color = c._style["warm_color"]
cold_color = c._style["cold_color"]
bar_colors = [bar.get_facecolor() for bar in c.ax.patches]
assert(bar_colors[0] == cold_color)
assert(bar_colors[1] == warm_color)
assert(bar_colors[2] == neutral_color)
assert(bar_colors[3] == cold_color)
def test_type_property():
container = {}
ds = DictStorage(container)
chart_obj = {
"width": 800,
"height": 600,
"data": [
[
["2016-01-01", -4],
["2017-01-01", 4],
["2018-01-01", 1],
["2019-01-01", -1]
]
],
"type": "bars",
}
# when type="bars"...
c = SerialChart.init_from(chart_obj, storage=ds)
c.render("test", "png")
bars = c.ax.patches
# ...4 bars should be rendered
assert(len(bars) == 4)
# while a type=line...
chart_obj["type"] = "line"
c = SerialChart.init_from(chart_obj, storage=ds)
c.render("test", "png")
#lines = c.ax.patches
# ... should only render one element
# assert(len(lines) == 1)
def test_stacked_bar_chart():
chart_obj = {
"width": 800,
"height": 600,
"data": [
[
["2016-01-01", 1],
["2017-01-01", 4],
["2018-01-01", None],
["2019-01-01", 2]
],
[
["2016-01-01", 3],
["2017-01-01", 12],
["2018-01-01", 1],
["2019-01-01", None]
]
],
"labels": ["the good", "the bad"],
"type": "bars",
}
# when type="bars"...
c = SerialChart.init_from(chart_obj, storage=local_storage)
c.render("stacked_bar_chart_basic", "png")
bars = c.ax.patches
assert(len(bars) == 8)
# Should color with qualitative colors by default
qualitative_colors = c._style["qualitative_colors"]
bar_colors = [bar.get_facecolor() for bar in c.ax.patches]
assert(bar_colors[0] == qualitative_colors[0])
assert(bar_colors[-1] == qualitative_colors[1])
# now highlight
chart_obj["highlight"] = "the good"
c = SerialChart.init_from(chart_obj, storage=local_storage)
c.render("stacked_bar_chart_highlighted", "png")
bar_colors = [bar.get_facecolor() for bar in c.ax.patches]
assert(bar_colors[0] == c._style["strong_color"])
assert(bar_colors[-1] == c._style["neutral_color"])
# specify colors
chart_obj["colors"] = ["red", "green"]
chart_obj["highlight"] = None
c = SerialChart.init_from(chart_obj, storage=local_storage)
c.render("stacked_bar_chart_spec_colors", "png")
bar_colors = [bar.get_facecolor() for bar in c.ax.patches]
assert(bar_colors[0] == (1.0, 0.0, 0.0, 1.0)) # red
def test_bar_chart_with_ymax():
container = {}
ds = DictStorage(container)
# all negative values with fixed ymax to 0
chart_obj = {
"width": 800,
"height": 600,
"data": [
[
["2016-01-01", -4],
["2017-01-01", -6],
["2018-01-01", -3],
["2019-01-01", -2]
]
],
"ymax": 0,
"type": "bars",
}
c = SerialChart.init_from(chart_obj, storage=local_storage)
c.render("bar_chart_with_ymax1", "png")
assert c.ax.get_ylim()[1] == 0
# when ymax < actual max value in data
chart_obj = {
"width": 800,
"height": 600,
"data": [
[
["2016-01-01", 4],
["2017-01-01", 6],
["2018-01-01", 3],
["2019-01-01", 2]
]
],
"ymax": 3,
"type": "bars",
}
c = SerialChart.init_from(chart_obj, storage=local_storage)
c.render("bar_chart_with_ymax2", "png")
max_value = max([x[1] for x in chart_obj["data"][0]])
assert c.ax.get_ylim()[1] > max_value
def test_serial_chart_with_axis_labels():
chart_obj = {
"width": 800,
"height": 600,
"data": [
[
["2016-01-01", 12],
["2017-01-01", 14],
["2018-01-01", 8],
["2019-01-01", 2]
]
],
"title": "Make sure the ylabel fits",
"xlabel": "Point in time",
"ylabel": "Number of cats",
"note": "Read this also",
"caption": "Source: Truth",
"type": "line",
}
c = SerialChart.init_from(chart_obj, storage=local_storage)
# visually make sure y and x labels are visible
c.render("serial_chart_with_axis_labels", "png")
def test_chart_with_long_y_ticks():
chart_obj = {
"width": 800,
"height": 600,
"data": [
[
["2016-01-01", 4e6],
["2017-01-01", 6e6],
["2018-01-01", 3e6],
["2019-01-01", 2e6]
]
],
"title": "Look how large numbers!",
"type": "bars",
}
c = SerialChart.init_from(chart_obj, storage=local_storage)
# visually make sure tick labels are visible
c.render("serial_bar_chart_with_long_y_ticks", "png")
def test_weekly_chart():
container = {}
ds = DictStorage(container)
# all negative values with fixed ymax to 0
chart_obj = {
"width": 800,
"height": 600,
"data": [
[
[ "2020-06-22", 0.391 ],
[ "2020-06-29", 0.346 ],
[ "2020-07-06", 0.297 ],
[ "2020-07-13", 0.317 ],
[ "2020-07-20", 0.197 ],
[ "2020-07-27", 0.417 ],
]
],
"type": "bars",
"units": "percent",
"interval": "weekly",
}
c = SerialChart.init_from(chart_obj, storage=local_storage)
c.render("serial_chart_weekly", "png")
def test_multi_color_lines():
colors = ["red", "green", "blue"]
chart_obj = {
"width": 800,
"height": 600,
"data": [
[
["2017-01-01", -6],
["2018-01-01", -3],
["2019-01-01", -2]
],
[
["2017-01-01", -3],
["2018-01-01", -1],
["2019-01-01", 4]
],
[
["2017-01-01", 2],
["2018-01-01", 5],
["2019-01-01", -3]
]
],
"labels": colors,
"colors": colors,
"ymax": 0,
"type": "line",
}
c = SerialChart.init_from(chart_obj, storage=local_storage)
c.render("serial_chart_multi_color", "png")
for i, color in enumerate(colors):
assert c.ax.get_lines()[i].get_color() == color
``` |
{
"source": "jplusplus/skrejperpark",
"score": 3
} |
#### File: skrejperpark/statscraper/datatypes.py
```python
from glob import iglob
from itertools import chain
from csv import DictReader
from csv import reader as CsvReader
from .exceptions import NoSuchDatatype
from .DimensionValue import DimensionValue
from .ValueList import ValueList
import os
DIR_PATH = os.path.dirname(os.path.realpath(__file__))
DATATYPES_FILE = os.path.join(DIR_PATH, "datatypes", "datatypes.csv")
VALUE_DELIMITOR = ','
class Datatype(object):
"""Represent a datatype, initiated by id."""
def __init__(self, id):
"""Id is a datatype from datatypes.csv."""
self.id = id
self.allowed_values = ValueList()
data = None
with open(DATATYPES_FILE, 'r') as csvfile:
reader = DictReader(csvfile)
for row in reader:
if row["id"] == id:
data = row
break
if data is None:
raise(NoSuchDatatype)
self.value_type = data["value_type"]
self.description = data["description"]
domain = data["allowed_values"]
if domain:
for file_ in self._get_csv_files(domain):
with open(file_, 'r') as csvfile:
reader = DictReader(csvfile)
dialect_names = [x
for x in reader.fieldnames
if x.startswith("dialect:")]
self.dialects = [d[8:] for d in dialect_names]
for row in reader:
value = DimensionValue(row["id"],
self,
label=row["label"])
dialects = {x: None for x in self.dialects}
for d in dialect_names:
# parse this cell as a csv row
csvreader = CsvReader([row[d]],
delimiter=VALUE_DELIMITOR,
skipinitialspace=True,
strict=True)
values = next(csvreader)
dialects[d[8:]] = values
value.dialects = dialects
self.allowed_values.append(value)
def _get_csv_files(self, domain):
domain = os.path.join(*domain.split("/"))
# We are fetching both by filename and dir name
# so that regions/kenya will match anything in
# `datatypes/values/regions/kenya/*.csv`
# and/or `datatypes/values/regions/kenya.csv`
#
# There is probably an easier way to do this
# FIXME the below function fetches /foo/bar/regions/kenya as well, but we probably want ^regions/kenya
value_path_1 = os.path.join(DIR_PATH, "datatypes", "values", domain)
value_path_2 = os.path.join(DIR_PATH, "datatypes", "values")
files_1 = chain.from_iterable(iglob(os.path.join(root, '*.csv'))
for root, dirs, files in os.walk(value_path_1))
files_2 = chain.from_iterable(iglob(os.path.join(root, domain + '.csv'))
for root, dirs, files in os.walk(value_path_2))
for f in chain(files_1, files_2):
yield f
def __str__(self):
return str(self.id)
def __repr__(self):
return '<Datatype: %s>' % str(self)
```
#### File: statscraper/scrapers/SCBScraper.py
```python
from .PXWebScraper import PXWeb, Dimension
class SCB(PXWeb):
"""The SCB API uses PXWeb. We just hardcode the url."""
base_url = 'http://api.scb.se/OV0104/v1/doris/sv/ssd'
COUNTIES = [
"01", "03", "04", "05", "06", "07", "08", "09", "10", "12", "13",
"14", "17", "18", "19", "20", "21", "22", "23", "24", "25"
]
MUNICIPALITIES = [
"0114", "0115", "0117", "0120", "0123", "0125", "0126", "0127", "0128",
"0136", "0138", "0139", "0140", "0160", "0162", "0163", "0180", "0181",
"0182", "0183", "0184", "0186", "0187", "0188", "0191", "0192", "0305",
"0319", "0330", "0331", "0360", "0380", "0381", "0382", "0428", "0461",
"0480", "0481", "0482", "0483", "0484", "0486", "0488", "0509", "0512",
"0513", "0560", "0561", "0562", "0563", "0580", "0581", "0582", "0583",
"0584", "0586", "0604", "0617", "0642", "0643", "0662", "0665", "0680",
"0682", "0683", "0684", "0685", "0686", "0687", "0760", "0761", "0763",
"0764", "0765", "0767", "0780", "0781", "0821", "0834", "0840", "0860",
"0861", "0862", "0880", "0881", "0882", "0883", "0884", "0885", "0980",
"1060", "1080", "1081", "1082", "1083", "1214", "1230", "1231", "1233",
"1256", "1257", "1260", "1261", "1262", "1263", "1264", "1265", "1266",
"1267", "1270", "1272", "1273", "1275", "1276", "1277", "1278", "1280",
"1281", "1282", "1283", "1284", "1285", "1286", "1287", "1290", "1291",
"1292", "1293", "1315", "1380", "1381", "1382", "1383", "1384", "1401",
"1402", "1407", "1415", "1419", "1421", "1427", "1430", "1435", "1438",
"1439", "1440", "1441", "1442", "1443", "1444", "1445", "1446", "1447",
"1452", "1460", "1461", "1462", "1463", "1465", "1466", "1470", "1471",
"1472", "1473", "1480", "1481", "1482", "1484", "1485", "1486", "1487",
"1488", "1489", "1490", "1491", "1492", "1493", "1494", "1495", "1496",
"1497", "1498", "1499", "1715", "1730", "1737", "1760", "1761", "1762",
"1763", "1764", "1765", "1766", "1780", "1781", "1782", "1783", "1784",
"1785", "1814", "1860", "1861", "1862", "1863", "1864", "1880", "1881",
"1882", "1883", "1884", "1885", "1904", "1907", "1960", "1961", "1962",
"1980", "1981", "1982", "1983", "1984", "2021", "2023", "2026", "2029",
"2031", "2034", "2039", "2061", "2062", "2080", "2081", "2082", "2083",
"2084", "2085", "2101", "2104", "2121", "2132", "2161", "2180", "2181",
"2182", "2183", "2184", "2260", "2262", "2280", "2281", "2282", "2283",
"2284", "2303", "2305", "2309", "2313", "2321", "2326", "2361", "2380",
"2401", "2403", "2404", "2409", "2417", "2418", "2421", "2422", "2425",
"2460", "2462", "2463", "2480", "2481", "2482", "2505", "2506", "2510",
"2513", "2514", "2518", "2521", "2523", "2560", "2580", "2581", "2582",
"2583", "2584"
]
def _fetch_dimensions(self, dataset):
"""Yield all dimensions.
We override this method just to set the correct datatype
and dialect for regions.
"""
for dimension in super()._fetch_dimensions(dataset):
if dimension.id == "Region":
yield Dimension(dimension.id,
datatype="region",
dialect="skatteverket",
label=dimension.label)
else:
yield dimension
def _fetch_data(self, dataset, query={}, by=None):
"""Allow adding municipalities or counties to a query."""
if by == "municipality":
query["Region"] = ("vs:RegionKommun07EjAggr", self.MUNICIPALITIES)
elif by == "county":
query["Region"] = ("vs:RegionLän07EjAggr", self.COUNTIES)
return super()._fetch_data(dataset, query)
``` |
{
"source": "jplusplus/viltolyckor_scraper",
"score": 3
} |
#### File: viltolyckor_scraper/viltolyckor/scraper.py
```python
import requests
from bs4 import BeautifulSoup
from statscraper import (BaseScraper, Collection, DimensionValue,
Dataset, Dimension, Result)
from viltolyckor.utils import parse_result_page
URL = "https://www.viltolycka.se/statistik/viltolyckor-for-respektive-viltslag/"
class ViltolyckorScraper(BaseScraper):
def _fetch_itemslist(self, current_item):
"""This scraper has only one dataset."""
yield ViltolyckorDataset("viltolyckor per viltslag")
def _fetch_allowed_values(self, dimension):
"""Allowed values are only implemented for regions.
Ie units would need to be fetched trough an json api.
"""
dataset = dimension.dataset
if dimension.id == "year":
for option in dataset.soup.select("#ctl11_lstYearInterval option"):
value = option.get("value")
yield DimensionValue(value, dimension)
elif dimension.id == "region":
for option in dataset.soup.select("#ctl11_lstCounties option"):
value = option.get("value")
label = option.text.strip()
yield DimensionValue(value, dimension, label)
elif dimension.id == "viltslag":
tds = dataset.soup.select_one(".statistics-report")\
.select("td.title")
for td in tds:
value = td.text.strip()
yield DimensionValue(value, dimension)
def _fetch_dimensions(self, dataset):
yield Dimension("region")
yield Dimension("year")
yield Dimension("month")
yield Dimension("viltslag")
def _fetch_data(self, dataset, query):
"""Make the actual query.
"""
if query is None:
query = {}
# default query
_query = {
"year": [dataset.latest_year],
"region": "Hela landet"
}
_query.update(query)
allowed_query_dims = ["year", "region"]
# Validate query
for dim in query.keys():
if dim not in allowed_query_dims:
msg = "Querying on {} is not implemented yet".format(dim)
raise NotImplementedError(msg)
for dim, value in _query.iteritems():
if value == "*":
_query[dim] = [x.value for x in dataset.dimensions[dim].allowed_values]
elif not isinstance(value, list):
_query[dim] = [value]
# get all input elem values
payload = {}
for input_elem in dataset.soup.select("input"):
payload[input_elem["name"]] = input_elem.get("value")
for region in _query["region"]:
region_id = dataset._get_region_id(region)
for year in _query["year"]:
payload.update({
"ctl01$ctl11$lstCounties": region_id,
"ctl01$ctl11$lstYearInterval": year,
})
result_page = self._post_html(URL, payload)
for datapoint in parse_result_page(result_page):
value = datapoint["value"]
del datapoint["value"]
yield Result(value, datapoint)
###
# HELPER METHODS
###
@property
def session(self):
"""
"""
if not hasattr(self, "_session"):
self._session = requests.Session()
return self._session
def _get_html(self, url):
""" Get html from url
"""
self.log.info(u"/GET {}".format(url))
r = self.session.get(url)
if hasattr(r, 'from_cache'):
if r.from_cache:
self.log.info("(from cache)")
r.raise_for_status()
return r.content
def _post_html(self, url, payload):
self.log.info(u"/POST {} with {}".format(url, payload))
r = self.session.post(url, payload)
r.raise_for_status()
return r.content
@property
def log(self):
if not hasattr(self, "_logger"):
self._logger = PrintLogger()
return self._logger
class ViltolyckorDataset(Dataset):
@property
def latest_year(self):
"""Get the latest available year."""
return self.years[-1]
@property
def years(self):
"""Get all years."""
return [x.value for x in self.dimensions["year"].allowed_values]
def _get_region_id(self, region_label_or_id):
"""Get region id.
:param region_label_or_id: an id or label of region.
"""
regions = self.dimensions["region"].allowed_values
if region_label_or_id in regions:
return region_label_or_id
else:
return regions.get_by_label(region_label_or_id).value
@property
def html(self):
if not hasattr(self, "_html"):
self._html = self.scraper._get_html(URL)
return self._html
@property
def soup(self):
if not hasattr(self, "_soup"):
self._soup = BeautifulSoup(self.html, 'html.parser')
return self._soup
class PrintLogger():
""" Empyt "fake" logger
"""
def log(self, msg, *args, **kwargs):
print msg
def debug(self, msg, *args, **kwargs):
print msg
def info(self, msg, *args, **kwargs):
print msg
def warning(self, msg, *args, **kwargs):
print msg
def error(self, msg, *args, **kwargs):
print msg
def critical(self, msg, *args, **kwargs):
print msg
``` |
{
"source": "jplu/visual-search-scripts",
"score": 3
} |
#### File: jplu/visual-search-scripts/download_amazon_dataset.py
```python
import gzip
import math
import os
import sys
import multiprocessing
import urllib.request
import requests
import glob
import imghdr
from tqdm import tqdm
import shutil
data_path = 'metadata.json'
compressed_data_path = data_path + '.gz'
images_path = 'images'
NUM_CPU = multiprocessing.cpu_count()
metadata_url = "<amazon_archive_address>"
if not os.path.isfile(compressed_data_path):
file_size = int(requests.head(metadata_url).headers["Content-Length"])
header = {"Range": "bytes=%s-%s" % (0, file_size)}
pbar_1 = tqdm(total=file_size, initial=0, unit='B', unit_scale=True, desc=metadata_url.split('/')[-1])
r = requests.get(metadata_url, headers=header, stream=True)
print("Start downloading metadata.json.gz")
with open("metadata.json.gz", "wb") as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
pbar_1.update(1024)
pbar_1.close()
print("Download done")
if os.path.isfile(compressed_data_path) and not os.path.isfile(data_path):
print("Start uncompress metadata.json.gz")
with gzip.open('metadata.json.gz', 'r') as f_in, open('metadata.json', 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
print("Uncompress done")
if not os.path.isdir(images_path):
os.makedirs(images_path)
def process(line):
data = eval(line)
if 'imUrl' in data and data['imUrl'] is not None and 'categories' in data and data['imUrl'].split('.')[-1] == 'jpg' and data['asin'].isdigit():
url = data['imUrl']
try:
path = os.path.join(images_path, str(int(data['asin'])) + '.jpg')
if not os.path.isfile(path):
urllib.request.urlretrieve(url, path)
if imghdr.what(path) != 'jpeg':
print('Removed {} it is a {}'.format(path, imghdr.what(path)))
sys.stdout.flush()
os.remove(path)
else:
print(path + " already exists")
sys.stdout.flush()
except Exception as e:
print("Error downloading {}".format(url))
sys.stdout.flush()
def process_wrapper(chunk_start, chunk_size):
with open(data_path) as f:
f.seek(chunk_start)
lines = f.read(chunk_size).splitlines()
for line in lines:
process(line)
def chunkify(size=1024*1024):
file_end = os.path.getsize(data_path)
with open(data_path, 'rb+') as f:
chunk_end = f.tell()
while True:
chunk_start = chunk_end
f.seek(size,1)
f.readline()
chunk_end = f.tell()
yield chunk_start, chunk_end - chunk_start
if chunk_end > file_end:
break
def image_upload(blobname, filename):
blob = bucket.blob(blobname)
blob.upload_from_filename(filename)
pool = multiprocessing.Pool(processes=NUM_CPU)
for chunk_start, chunk_size in chunkify():
pool.apply_async(process_wrapper, args=(chunk_start, chunk_size))
pool.close()
pool.join()
``` |
{
"source": "jpM1-46/SA",
"score": 2
} |
#### File: controls/lib/alertmanager.py
```python
import copy
import os
import json
from collections import defaultdict
from dataclasses import dataclass
from typing import List, Dict, Optional
from common.basedir import BASEDIR
from common.params import Params
from common.realtime import DT_CTRL
from selfdrive.controls.lib.events import Alert
from selfdrive.controls.lib.events import EVENTS, ET
with open(os.path.join(BASEDIR, "selfdrive/controls/lib/alerts_offroad.json")) as f:
OFFROAD_ALERTS = json.load(f)
def set_offroad_alert(alert: str, show_alert: bool, extra_text: Optional[str] = None) -> None:
if show_alert:
a = OFFROAD_ALERTS[alert]
if extra_text is not None:
a = copy.copy(OFFROAD_ALERTS[alert])
a['text'] += extra_text
Params().put(alert, json.dumps(a))
else:
Params().delete(alert)
@dataclass
class AlertEntry:
alert: Optional[Alert] = None
start_frame: int = -1
end_frame: int = -1
def active(self, frame: int) -> bool:
return frame <= self.end_frame
class AlertManager:
def __init__(self):
self.alerts: Dict[str, AlertEntry] = defaultdict(AlertEntry)
def add_many(self, frame: int, alerts: List[Alert]) -> None:
for alert in alerts:
entry = self.alerts[alert.alert_type]
entry.alert = alert
if not entry.active(frame):
entry.start_frame = frame
min_end_frame = entry.start_frame + alert.duration
entry.end_frame = max(frame + 1, min_end_frame)
def SA_set_frame(self, frame):
self.SA_frame = frame
def SA_set_enabled(self, enabled):
self.SA_enabled = enabled
def SA_add(self, alert_name, extra_text_1='', extra_text_2=''):
alert = EVENTS[alert_name][ET.PERMANENT] # assume permanent (to display in all states)
added_alert = copy.copy(alert)
added_alert.start_time = self.SA_frame * DT_CTRL
added_alert.alert_text_1 += extra_text_1
added_alert.alert_text_2 += extra_text_2
added_alert.alert_type = f"{alert_name}/{ET.PERMANENT}" # fixes alerts being silent
added_alert.event_type = ET.PERMANENT
self.alerts[alert.alert_type].alert = added_alert
if not self.alerts[alert.alert_type].active(self.SA_frame):
self.alerts[alert.alert_type].start_frame = self.SA_frame
min_end_frame = self.alerts[alert.alert_type].start_frame + alert.duration
self.alerts[alert.alert_type].end_frame = max(self.SA_frame + 1, min_end_frame)
def process_alerts(self, frame: int, clear_event_types: set) -> Optional[Alert]:
current_alert = AlertEntry()
for v in self.alerts.values():
if not v.alert:
continue
if v.alert.event_type in clear_event_types:
v.end_frame = -1
# sort by priority first and then by start_frame
greater = current_alert.alert is None or (v.alert.priority, v.start_frame) > (current_alert.alert.priority, current_alert.start_frame)
if v.active(frame) and greater:
current_alert = v
return current_alert.alert
``` |
{
"source": "jpm343/RetinaX",
"score": 2
} |
#### File: neuron_simulator_service/SAC_network/calculate_gaba_init.py
```python
import SACnetwork
import iodef as io
ginit = SACnetwork.gabaInit(SACnetwork.excGmin)
print "Saving initial GABA parameters to json file"
io.save_gabaInit_file(ginit, SACnetwork.all_params, './gabaInit.json')
```
#### File: neuron_simulator_service/SAC_network/plot_tools.py
```python
import numpy as np
import pandas as pd
import os
import matplotlib.cm as cmx
import matplotlib.pyplot as plt
import json
import glob
def heat_map(datafile, outfolder,
thevar="DSsc", varx="# speed", vary="gmax",
get_average=False,
sac_number=None):
data = pd.read_csv(datafile)
if '# vel' in data and varx == "# speed":
varx = 'vel'
x = data[varx].values
y = data[vary].values
ux = np.unique(x)
uy = np.unique(y)
X, Y = np.meshgrid(ux, uy)
if not os.path.exists(outfolder):
os.mkdir(outfolder)
if sac_number is None:
if thevar == "dsi":
sac_number = ("-1",)
else:
fileparts = datafile.split('_')
sac_number = [f for f in fileparts if f.isdigit()]
si = 0
Z_avg = []
for sac in sac_number:
datakey = thevar
if thevar != 'dsi':
datakey = thevar + str(si)
z = np.array(data[datakey])
z = np.ma.masked_invalid(z.astype(float))
Z = np.empty(X.shape)
# The following is to fill matrix with NaN's in case not all
# simulations are available
for i in range(0, len(ux)):
for j in range(0, len(uy)):
zaux = z[(x == X[i, j]) & (y == Y[i, j])]
if zaux.size > 0:
Z[i, j] = zaux
else:
Z[i, j] = np.nan
if get_average:
Z_avg.append(Z)
fig, ax = plt.subplots()
plt.set_cmap('jet')
plt.title('SAC number: ' + str(sac))
plt.set_cmap('jet')
cax = ax.pcolormesh(X, Y, Z, vmin=0, vmax=1, snap=True)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_ylabel('$Stim\/\ intensity,\/\ g_{max} \/\ (\mu S)$',
fontsize=15)
ax.set_ylim(min(y), max(y))
ax.set_xlim(min(x), max(x))
ax.set_xlabel('$Speed\/\ (mm/s)$', fontsize=15)
cbar = fig.colorbar(cax, ticks=[0, 0.5, 1])
cbar.set_label('$DSI$', fontsize=15)
plt.tight_layout()
plt.savefig(os.path.join(outfolder,
'heatmap-' + thevar + str(sac) + '.png'),
dpi=300,
transparent=True)
plt.close()
si += 1
plt.close(fig)
if get_average:
Z_avg = np.mean(Z_avg, axis=0)
fig, ax = plt.subplots()
plt.set_cmap('jet')
plt.title('Average of SACs')
plt.set_cmap('jet')
cax = ax.pcolormesh(X, Y, Z, vmin=0, vmax=1, snap=True)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_ylabel('$Stim\/\ intensity,\/\ g_{max} \/\ (\mu S)$',
fontsize=15)
ax.set_ylim(min(y), max(y))
ax.set_xlim(min(x), max(x))
ax.set_xlabel('$Speed\/\ (mm/s)$', fontsize=15)
cbar = fig.colorbar(cax, ticks=[0, 0.5, 1])
cbar.set_label('$DSI$', fontsize=15)
plt.tight_layout()
plt.savefig(os.path.join(outfolder,
'heatmap-' + thevar + '_average.png'),
dpi=300,
transparent=True)
plt.close()
plt.close(fig)
def print_param(file):
if not os.path.exists(file):
print "No parameters file. Default parameters were used..."
return
with open(file, "r") as fp:
param = json.load(fp)
for key in param:
print key + " = " + str(param[key])
def move_figs(src, dest):
flist = glob.glob(src + '*.png')
if not os.path.exists(dest):
os.mkdir(dest)
from shutil import copyfile
for f in flist:
copyfile(f, dest)
def gather_results(folder, ext="txt", selection=None):
filelist = glob.glob(os.path.join(folder, "*" + ext))
s = list()
for f in filelist:
r = f.replace(ext, '').split('_')[-1] # remove ext
s.append(int(''.join(x for x in r if x.isdigit())))
notfound = [i for i in range(max(s) + 1) if i not in s]
found = [i for i in range(max(s) + 1) if i not in notfound]
if notfound:
print "Warning! Not found: ", notfound
rootstr = f.split('_array')[0] + '.txt' # to be used as output filename
if selection is not None:
found = found and selection
filelist = [filelist[s.index(i)] for i in found] # sorted
df = pd.DataFrame()
for f in filelist:
df = df.append(pd.read_csv(f), ignore_index=True)
df.to_csv(rootstr)
return rootstr
def gather_results_special(folder,
ext="txt",
ext_sp="_dsi.txt",
col1="# speed",
col2="gmax"):
filelist = glob.glob(os.path.join(folder, "*" + ext))
filelist_sp = glob.glob(os.path.join(folder, "*" + ext_sp))
filelist = [x for x in filelist if x not in filelist_sp]
s = list()
s_sp = list()
for f in filelist:
r = f.replace(ext, '').split('_')[-1]
s.append(int(''.join(x for x in r if x.isdigit())))
for f in filelist_sp:
r = f.replace(ext_sp, '').split('_')[-1] # remove ext
s_sp.append(int(''.join(x for x in r if x.isdigit())))
notfound = [i for i in range(max(s) + 1) if i not in s]
if notfound:
print "Warning! Not found: ", notfound
if len(filelist) != len(filelist_sp):
print "Warning! Lists of different length"
rootstr = f.split('_array')[0] + '.txt' # to be used as output filename
filelist = [filelist[s.index(i)] for i in range(len(s))] # sorted
filelist_sp = [filelist_sp[s_sp.index(i)] for i in range(len(s_sp))]
df = pd.DataFrame(columns=[col1, col2, "dsi"])
for (f, f_sp) in zip(filelist, filelist_sp):
rf = pd.read_csv(f)
df = df.append({col1: rf[col1][0],
col2: rf[col2][0],
"dsi": np.loadtxt(f_sp)}, ignore_index=True)
df.to_csv(rootstr)
return rootstr
def bipolar_SAC_map(SACntw,
outfolder,
DSGC=None,
BPpos=None,
SACpos=None,
whichSAC=None):
celldata = SACntw.celldata
labels = ['SAC {0}'.format(j) for j in range(len(celldata))]
BPcoord = np.array(SACntw.BPpos)
fig, ax = plt.subplots(figsize=(12, 12))
ax.set_position([0.1, 0.1, 0.7, 0.8])
colrs = cmx.gist_rainbow(np.linspace(0, 1, len(celldata)))
for i in range(len(celldata)):
ax.scatter(celldata[i, 0], celldata[i, 1], s=celldata[i, 2],
c=colrs[i], alpha=0.8, label=labels[i])
ax.legend(scatterpoints=1,
loc='upper center', bbox_to_anchor=(0.5, -0.05),
fancybox=False, shadow=False, ncol=10, fontsize=8)
ax.scatter(BPcoord[:, 0], BPcoord[:, 1], s=1, color='grey')
ax.axis('scaled')
plt.xlabel('x coordinate ($\mu$m)')
plt.ylabel('y coordinate ($\mu$m)')
if DSGC is not None:
for sec in DSGC.all:
n3d = SACntw.h.n3d(sec=sec)
xtuple = (SACntw.h.x3d(0, sec=sec),
SACntw.h.x3d(n3d - 1, sec=sec))
ytuple = (SACntw.h.y3d(0, sec=sec),
SACntw.h.y3d(n3d - 1, sec=sec))
plt.plot(xtuple, ytuple, color='black')
if BPpos is not None:
ax.scatter(BPpos[:, 0], BPpos[:, 1], s=5, color='green')
if SACpos is not None:
ax.scatter(SACpos[:, 0], SACpos[:, 1], s=10, color='red')
if whichSAC is not None:
for ws in whichSAC:
ax.scatter(celldata[ws, 0], celldata[ws, 1],
marker="x", c="red", alpha=1,
label=labels[ws])
import ntpath
fn = (ntpath.basename(SACntw.celldatafilename).replace('.txt', '_') +
ntpath.basename(SACntw.syndatafilename).replace('.txt', '_') +
'network.png')
plt.savefig(os.path.join(outfolder, fn), dpi=300, transparent=True)
def plot_vec_recorded(SACntw, Vvecs, outfolder='', vec_type='v', dsi='',
**kargs):
if vec_type == 'v':
ylim = (-70, 0)
ylabel = 'Voltage (mV)'
elif vec_type == 'sc':
ylim = (0, 1)
ylabel = '$\mathrm{G_{GABA}/G_{max}}$'
amac_rec = SACntw.amac_rec
colrs = cmx.gist_rainbow(np.linspace(0, 1, len(SACntw.celldata)))
# plt.clf()
# plt.subplots(figsize=(20, 20))
label_null = ['SAC %g null' % (i) for i in amac_rec]
label_pref = ['SAC %g preferred' % (i) for i in amac_rec]
if 'subplot' in kargs.keys():
plt.subplot(kargs['subplot'])
else:
plt.figure()
plt.subplot(111)
if dsi is not '':
title = '\n'.join([('SAC%d DSI= %.2f' % (k, n))
for n, k in zip(dsi, amac_rec)])
plt.title(title)
for i in range(0, len(amac_rec)):
for j in range(0, len(SACntw.x_rec)):
vvec = Vvecs[i][2 * j + 1].as_numpy()[0:int(SACntw.h.tstop /
SACntw.sampinvl)]
plt.plot(np.arange(0, SACntw.h.tstop, SACntw.sampinvl), vvec,
color=colrs[amac_rec[i]] * (j + 1) / len(SACntw.x_rec),
ls='--', lw=2, label=label_null[i])
vvec = Vvecs[i][2 * j].as_numpy()[0:int(SACntw.h.tstop /
SACntw.sampinvl)]
plt.plot(np.arange(0, SACntw.h.tstop, SACntw.sampinvl),
vvec, color=colrs[amac_rec[i]] * (j + 1) / len(SACntw.x_rec),
ls='-', lw=2, label=label_pref[i])
plt.ylim(ylim)
plt.ylabel(ylabel, fontsize=16)
plt.xlim(50, SACntw.h.tstop)
plt.xlabel('Time (ms)')
plt.legend(prop={'size': 12}, loc='best',
fancybox=False, ncol=len(Vvecs[0]),
frameon=False, facecolor='inherit')
if outfolder != '': # Save to file if outfolder not empty
name = ','.join(str(n) for n in amac_rec)
namefig = os.path.join(outfolder,
(SACntw.synapse_type +
"_%s_%s_%s.png"))
plt.savefig(namefig % (str(SACntw.today),
name, vec_type), dpi=300, transparent=True)
def SAC_map(SACntw, outfolder, DSGC=None):
celldata = SACntw.celldata
amac_rec = SACntw.amac_rec
labels = ['SAC {0}'.format(j + 1) for j in range(len(celldata))]
BPsyn = SACntw.BPsyn
fig, ax = plt.subplots(figsize=(12, 12))
ax.set_position([0.1, 0.1, 0.7, 0.8])
colrs = cmx.gist_rainbow(np.linspace(0, 1, len(celldata)))
for i in range(len(celldata)):
ax.scatter(celldata[i, 0], celldata[i, 1], s=celldata[i, 2],
c=0, alpha=0.5)
for i in amac_rec:
x = celldata[i, 0]
y = celldata[i, 1]
L = celldata[i, 2]
ax.scatter(x, y, s=L,
c=colrs[i], alpha=0.8, label=labels[i])
ax.plot([x - L, x + L], [y, y], '-', c=colrs[i])
ax.plot([x, x], [y + L, y - L], '-', c=colrs[i])
d = 0
for dend in SACntw.amac[i].dend:
synapse_per_seg = get_dendrite_synapses([BP[0] for BP in BPsyn],
dend)
gaba_per_seg = get_dendrite_synapses(SACntw.gabasyn, dend)
Lseg = dend.L / dend.nseg
dend_angle = 2 * np.pi * d / len(SACntw.amac[i].dend)
if d == 0 or d == 2:
mrk = '|'
else:
mrk = '_'
for j in range(dend.nseg):
xs = np.cos(dend_angle) * np.linspace(j * Lseg,
(j + 1) * Lseg,
int(synapse_per_seg[j]))
ys = np.sin(dend_angle) * np.linspace(j * Lseg,
(j + 1) * Lseg,
int(synapse_per_seg[j]))
xsgaba = np.cos(dend_angle) * np.linspace(j * Lseg,
(j + 1) * Lseg,
int(gaba_per_seg[j]))
ysgaba = np.sin(dend_angle) * np.linspace(j * Lseg,
(j + 1) * Lseg,
int(gaba_per_seg[j]))
ax.scatter(xs + x * np.ones(len(xs)),
ys + y * np.ones(len(ys)),
marker=mrk,
c='k')
ax.scatter(xsgaba + x * np.ones(len(xsgaba)),
ysgaba + y * np.ones(len(ysgaba)),
marker=mrk,
c='r')
d += 1
ax.legend(scatterpoints=1,
loc='upper center', bbox_to_anchor=(0.5, -0.05),
fancybox=False, shadow=False, ncol=10, fontsize=8)
ax.axis('scaled')
plt.xlabel('x coordinate ($\mu$m)')
plt.ylabel('y coordinate ($\mu$m)')
import ntpath
fn = (ntpath.basename(SACntw.celldatafilename).replace('.txt', '_') +
ntpath.basename(SACntw.syndatafilename).replace('.txt', '_') +
'subnetwork.png')
plt.savefig(os.path.join(outfolder, fn), dpi=300, transparent=True)
def get_dendrite_synapses(syn_array, dendrite):
nseg = dendrite.nseg
segments = np.zeros(nseg)
for syn in syn_array:
if syn.get_segment().sec.name() == dendrite.name():
segments[int(syn.get_segment().x * nseg)] += 1
return segments
```
#### File: neuron_simulator_service/SAC_network/stimulus.py
```python
from __future__ import division
import numpy as np
# Set bar stimuli speed
def update_bar_speed(BPsyn, delay, width, speed, d_init,
synapse_type="alphaCSyn", angle=0):
print "Updating bar speed to: %f mm/s" % speed
angrad = angle * np.pi / 180.0
angcos = np.cos(angrad)
angsin = np.sin(angrad)
for BPi in BPsyn:
# Note that initial bar position, d_init, should be as far as to
# ensure all BP are activated
# (xprime, yprime): rotated and translated axes centered on the bar
# xprime: axis of bar, yprime: normal to bar
# xprime = BPi[1] * angsin - BPi[2] * angcos
yprime = BPi[1] * angcos + BPi[2] * angsin + d_init
synapse_onset = delay + yprime / speed
if ((speed > 0 and yprime < (0 - width)) or
(speed < 0 and yprime > 0)):
# Bar won't pass over BP location (yprime)
deactivate_BP_synapse(BPi, synapse_type, synapse_onset)
continue
duration = None
if synapse_type == "BPexc":
duration = abs(width / speed)
activate_BP_synapse(BPi, synapse_type, synapse_onset, duration)
return BPsyn
def set_stimulus(BPsyn, stimulus_type, delay, synapse_type, **kwargs):
if stimulus_type == "bar":
width = kwargs['bar_width']
speed = kwargs['bar_speed']
x_init = kwargs['bar_x_init']
if 'bar_angle' in kwargs:
update_bar_speed(BPsyn, delay, width, speed, x_init, synapse_type,
kwargs['bar_angle'])
else:
update_bar_speed(BPsyn, delay, width, speed, x_init, synapse_type)
elif stimulus_type == "annulus":
center = kwargs['center']
ri = kwargs['inner_diam']
ro = kwargs['outer_diam']
dur = kwargs['duration']
print "Setting up annulus stimulus with delay: %1.1f (ms)" % delay
for BPi in BPsyn:
if in_annulus((BPi[1], BPi[2]), center, ri, ro):
activate_BP_synapse(BPi, synapse_type, delay, dur)
elif stimulus_type == "grating":
width = kwargs['bar_width']
speed = kwargs['bar_speed']
x_init = kwargs['bar_x_init']
x_freq = kwargs['spatial_freq']
N_bars = kwargs['N_bars']
dur = width / speed
period = x_freq / speed
for BPi in BPsyn:
if BPi[1] < (x_init - width): # Grating wont pass over BP location
continue
synapse_onset = delay + (BPi[1] - x_init) / speed
activate_BP_synapse(BPi, synapse_type, synapse_onset, dur, period,
N_bars)
elif stimulus_type == "bar_with_circular_mask":
width = kwargs['bar_width']
speed = kwargs['bar_speed']
x_init = kwargs['bar_x_init']
mask_center = kwargs['mask_center']
mask_diam = kwargs['mask_diam']
dur = width / speed
for BPi in BPsyn:
if BPi[1] < (x_init - width) or not in_annulus((BPi[1], BPi[2]),
mask_center, 0, mask_diam / 2):
continue
synapse_onset = delay + (BPi[1] - x_init) / speed
activate_BP_synapse(BPi, synapse_type, synapse_onset, dur)
return BPsyn
def in_annulus(point, center, inner_diam, outer_diam):
dist = np.linalg.norm(np.array(point) - np.array(center))
return dist >= inner_diam and dist <= outer_diam
def activate_BP_synapse(BPsynapse, synapse_type, synapse_onset, dur=None,
period=0, n_events=1):
if synapse_type in ("alphaCSyn", "expCSyn"):
BPsynapse[0].onset = synapse_onset
BPsynapse[0].dur = BPsynapse[0].default_dur
if dur is not None:
BPsynapse[0].dur = dur
elif synapse_type in ("Exp2Syn", "BPexc"):
BPsynapse[-2].number = n_events
BPsynapse[-2].interval = period
BPsynapse[-2].start = synapse_onset
BPsynapse[-2].noise = 0 # Deafult should be 0 anyway
if synapse_type == "BPexc":
BPsynapse[0].dur = dur
def deactivate_BP_synapse(BPsynapse, synapse_type, synapse_onset):
if synapse_type in ("alphaCSyn", "expCSyn"):
BPsynapse[0].onset = synapse_onset
BPsynapse[0].dur = 0
elif synapse_type in ("Exp2Syn", "BPexc"):
BPsynapse[-2].number = 0
BPsynapse[-2].interval = 0
BPsynapse[-2].start = synapse_onset
BPsynapse[-2].noise = 0 # Deafult should be 0 anyway
if synapse_type == "BPexc":
BPsynapse[0].dur = 0
def insert_voltage_clamp(nrnobj, nrnsec, xsec, voltage_amp, dur):
vclamp = nrnobj.SEClamp(xsec, sec=nrnsec)
vclamp.amp1 = voltage_amp
vclamp.dur1 = dur
return vclamp
```
#### File: neuron_simulator_service/SAC_network/submit-SACnetwork.py
```python
from __future__ import division
import SACnetwork
import numpy as np
import time
from mpi4py import MPI
# import neuron as h
# import matplotlib as pl
import json
import datetime
import os
import iodef as io
# Parameters
###############################################################################
G_MIN_EXP = -6
G_MAX_EXP = -3
G_NUMBER = 20
VELOC_MIN_EXP = -1
VELOC_MAX_EXP = 2
VELOC_NUMBER = 20
###############################################################################
if os.path.exists('./submit-SACnetwork.json'):
io.load_params_json('./submit-SACnetwork.json', globals())
gmax = np.logspace(G_MIN_EXP, G_MAX_EXP, G_NUMBER)
stimuliSpeed = np.logspace(VELOC_MIN_EXP, VELOC_MAX_EXP, VELOC_NUMBER)
synapse_type = SACnetwork.synapse_type # Note this param is in SACnetwork.py
t = time.time()
comm = MPI.COMM_WORLD
numproc = comm.size # Number of processes
start_time = time.time()
i = 0
today = datetime.date.today()
print "Simulation using " + synapse_type
print "Starting simulation using ", numproc, " process"
simPath = "./results/%s/" % today
# Parallelizing simulations
if comm.rank == 0: # Process number
if not os.path.exists(simPath): # only process 0 attempts to create folder
os.makedirs(simPath)
for g in gmax: # ,50,100,300,500,1000:
for sp in stimuliSpeed:
comm.send((g, sp), dest=i % numproc)
i += 1
for i in range(numproc):
comm.send(None, dest=i)
# Call functions to parallelize
# Steady-state function
sc1i, sc2i = SACnetwork.gabaInit(SACnetwork.excGmin)
params = comm.recv(source=0)
results = []
while (params is not None):
g, sp = params
print g, sp, 'proc', comm.rank
res = SACnetwork.main(stimvel=sp, gabaI=(
sc1i, sc2i), excmax=g, rank=comm.rank)
results.append(np.r_[sp, g, res])
print res, 'proc', comm.rank,
"Time elapsed: ", time.time() - start_time, "s"
comm.send(res, dest=0)
params = comm.recv(source=0)
results2 = comm.gather(results, root=0)
comm.send(None, dest=0)
if comm.rank == 0:
reslist = np.array([item1 for item2 in results2 for item1 in item2])
print reslist
print "Time elapsed: ", time.time() - start_time, "s"
numv = len(SACnetwork.amac_rec)
numsc = len(SACnetwork.syn_rec)
head_entries = ['v%dp' % n for n in range(numv)] + \
['DSv%d' % n for n in range(numv)] + \
['sc%dp' % n for n in range(numsc)] +\
['DSsc%d' % n for n in range(numsc)] +\
['sc%dpa' % n for n in range(numsc)] +\
['DSsca%d' % n for n in range(numsc)]
head = 'vel,gmax,ancho,vel,' + ','.join(head_entries)
np.savetxt(simPath +
synapse_type + '_%s_%s_%s_gmax_data%s_heat-grande_%s.txt' %
(SACnetwork.amac_rec[0], SACnetwork.amac_rec[1],
SACnetwork.amac_rec[2], SACnetwork.dataidx, SACnetwork.today),
np.array(reslist), fmt='%6g', delimiter=',', header=head)
# Parameters
grid = {'Initial speed': stimuliSpeed[0],
'Final Speed': stimuliSpeed[-1],
'Speed points': len(stimuliSpeed),
'Initial conductance': gmax[0],
'Final conductance': gmax[-1],
'Conductance points': len(gmax)}
with open(simPath +
synapse_type + '_simulation_parameters_%s.json' %
SACnetwork.today, 'w') as fp:
json.dump(grid, fp)
json.dump(SACnetwork.dataFiles, fp)
json.dump(SACnetwork.recording, fp)
json.dump(SACnetwork.neuron_sim_opts, fp)
json.dump(SACnetwork.amacrines, fp)
json.dump(SACnetwork.bipolar, fp)
json.dump(SACnetwork.gaba_sacs, fp)
elapsed = time.time() - t
print "Total Execution time: %s seconds" % elapsed
# Plots
# pl.clf()
#
# pl.subplot(211)
# for vec in Vvecs:
# vvec=vec[0].as_numpy()[0:int(h.tstop/sampinvl)]
# pl.plot(np.arange(0,h.tstop,sampinvl),vvec)
# vvec=vec[1].as_numpy()[0:int(h.tstop/sampinvl)]
# pl.plot(np.arange(0,h.tstop,sampinvl),vvec)
#
# pl.legend(np.hstack([('Vnull%g'%(i+1),
# 'Vpref%g'%(i+1)) for i in range(len(Vvecs))]))
#
# pl.subplot(212)
# for vec in scvecs:
# svec=vec[0].as_numpy()[0:int(h.tstop/sampinvl)]
# pl.plot(np.arange(0,h.tstop,sampinvl),svec)
# svec=vec[1].as_numpy()[0:int(h.tstop/sampinvl)]
# pl.plot(np.arange(0,h.tstop,sampinvl),svec)
#
# pl.legend(np.hstack([('scnull%g'%(i+1),
# 'scpref%g'%(i+1)) for i in range(len(scvecs))]))
# pl.savefig("%d.png"%())
```
#### File: neuron_simulator_service/SAC_network/tools.py
```python
import numpy as np
import glob
import pandas as pd
# Direction selectivity index for a spiking cell like DSGC
def DSI(angles, nspk, ref_angle=None):
# angles: the angles of the applied stimuli
# nspk: number of spikes obtained for the applied angles
# ref_angle: reference angle to obtain DSI for a particular direction
DSIx = 0
DSIy = 0
ntot = 0
for (ang, n) in zip(angles, nspk):
DSIx += n * np.cos(np.deg2rad(ang))
DSIy += n * np.sin(np.deg2rad(ang))
ntot += n
if ntot == 0:
# no selectivity if no spike
return 0
if ref_angle is not None:
# DSI w/r a reference angle
# The following rotates (DSIx,DSIy) and gets the DSIx of
# the rotated axes
ref_angle = np.deg2rad(ref_angle)
return (DSIx * np.cos(ref_angle) - DSIy * np.sin(ref_angle)) / ntot
return np.linalg.norm((DSIx, DSIy)) / ntot
# Direction selectivity index
def DS(t_es, nullvec, prefvec, sampinvl, area=0):
init_idx = int(t_es / sampinvl) # Time step
end_idx = len(nullvec) - 1
if area == 1: # Calcula area bajo la curva
resp_pref = sum(prefvec[init_idx:end_idx]) / (end_idx - init_idx)
resp_null = sum(nullvec[init_idx:end_idx]) / (end_idx - init_idx)
base_pref = sum(prefvec[init_idx - 10:init_idx]) / 10
base_null = sum(nullvec[init_idx - 10:init_idx]) / 10
else: # Calcula DSI
resp_pref = max(prefvec[init_idx:])
resp_null = max(nullvec[init_idx:])
base_pref = np.mean(prefvec[init_idx - 50:init_idx])
base_null = np.mean(nullvec[init_idx - 50:init_idx])
return [resp_pref - base_pref,
((resp_pref - base_pref) - (resp_null - base_null)) /
((resp_pref - base_pref) + (resp_null - base_null))]
def return_syn_rec(amac_rec, synM, pref_dend=0, null_dend=2):
syn_rec = []
for amac in amac_rec:
a = np.where((synM[:, 0] == amac) & (synM[:, 2] == null_dend))[0][0]
b = np.where((synM[:, 0] == amac) & (synM[:, 2] == pref_dend))[0][0]
syn_rec.append((a, b))
return syn_rec
def make_dictionary(**kwargs):
d = dict((name, value) for name, value in kwargs.iteritems())
return d
def gather_results_array_job(folder, ext='.txt'):
filelist = glob.glob(folder + '*' + ext)
s = []
print "Gathering result files..."
for f in filelist:
r = f.replace(ext, '').split('_')[-1]
s.append(int(''.join(x for x in r if x.isdigit())))
rootstr = f.split('_array')[0] + '.txt'
# sort filelist by array number
filelist = [filelist[s.index(i)] for i in range(len(s))]
df = pd.DataFrame()
for f in filelist:
df = df.append(pd.read_csv(f))
df.to_csv(rootstr)
return rootstr
``` |
{
"source": "jpmaddox97/trend_lines",
"score": 3
} |
#### File: jpmaddox97/trend_lines/pull_datacb.py
```python
import pandas as pd
from datetime import datetime, timedelta
import cbpro
# Import client and init dictionary of granularity
c = cbpro.PublicClient()
def _concat(dataframe, data):
return pd.concat([dataframe, data])
def _make_df(df):
columns = ['time', 'Low', 'High', 'Open', 'Close', 'volume']
return pd.DataFrame(df, columns=columns)
def getCBHistory(symbol: str, period: int, cycles: int = 5):
'''
Returns historical OHLCV price data from coinbase.
Parameters:
symbol (str): The symbol of the cryptocurrency
period (int): Granularity of data in seconds.
Accepts one minute, five minutes, 15 minutes,
one hour, four hour and one day
min = 60
5min = 300
15min = 900
hour = 3600
4hour = 21600
day = 86400
cycles (int): data is returned in groups of 300 data points.
If you want more than 300 points of data you will need
to increase the number of cycles.
Default is 1 cycle.
Returns:
data_df (dataframe): A pandas dataframe
'''
# Initialize count
count = 1
# Set initial start and end time
timeEnd = datetime.now()
delta = timedelta(seconds = int(period))
timeStart = timeEnd - (300*delta)
# Iterate through cycles and return dataframe
# if cycles != 1:
while count <= cycles:
if count == 1:
start = timeStart.isoformat()
end = timeEnd.isoformat()
dataframe = c.get_product_historic_rates(f'{symbol.upper()}-USD', start, end, period)
dataframe = _make_df(dataframe)
df = dataframe
else:
# timeEnd = timeStart - (delta)
timeEnd = timeStart
timeStart = timeEnd - (300*delta)
end = timeEnd.isoformat()
start = timeStart.isoformat()
data = c.get_product_historic_rates(f'{symbol.upper()}-USD', start, end, period)
data = _make_df(data)
if data.empty:
break
df = _concat(dataframe, data)
dataframe = df
count += 1
# Check if symbol is listed and format dataframe
if df.empty:
return 'Error: That symbol is not listed on Coinbase'
else:
df['Date'] = pd.to_datetime(df['time'], unit='s')
data_df = df[['Date', 'Open', 'High', 'Low', 'Close']]
data_df = data_df[::-1].reset_index()
data_df.drop('index', axis=1, inplace=True)
data_df = data_df.set_index(['Date'])
return data_df
def getLastRow(symbol: str, period: int):
'''
Returns most recent candle
Parameters:
symbol (str): The symbol of the cryptocurrency
period (int): Granularity of data in seconds.
Accepts one minute, five minutes, 15 minutes,
one hour, four hour and one day.
Min = 60,
5Min = 300,
15Min = 900,
Hour = 3600,
4Hour = 21600,
Day = 86400,
Returns:
data_df (dataframe): A pandas dataframe containing one row
'''
# Time formatting
delta = timedelta(seconds = int(period))
timeEnd = datetime.now() - delta
timeStart = timeEnd - (delta)
start = timeStart.isoformat()
end = timeEnd.isoformat()
# Row formatting
row = c.get_product_historic_rates(f'{symbol.upper()}-USD', start, end, period)
row = _make_df(row)
row['Date'] = pd.to_datetime(row['time'], unit='s')
row = row[['Date', 'Open', 'High', 'Low', 'Close']]
row = row.reset_index()
row.drop('index', axis=1, inplace=True)
row = row.set_index(['Date'])
# row = row.set_index(['Date'])
return row
```
#### File: jpmaddox97/trend_lines/slope.py
```python
from max_min import MaxMin
import pandas as pd
def get_maxmin(df: pd.DataFrame, period: int):
max_min = MaxMin(df, period)
return max_min.output()
def slope(dataframe):
# Get slope of last two minimum values
minimums = dataframe.loc[dataframe['Min'] == 1]
last_four = minimums.tail(2)
last_four.reset_index(inplace=True)
first_point = []
second_point = []
for i, row in last_four.iterrows():
if i == 0:
dt = last_four.iloc[i]['Date']
first_point.append(dt)
price = last_four.iloc[i]['Close']
first_point.append(price)
if i == 1:
dt = last_four.iloc[i]['Date']
second_point.append(dt)
price = last_four.iloc[i]['Close']
second_point.append(price)
change_y = second_point[1] - first_point[1]
change_x = (second_point[0] - first_point[0]).total_seconds()
return change_y/change_x
def slope_tick(dataframe):
# Get slope of last two minimum values
minimums = dataframe.loc[dataframe['Min'] == 1]
last_four = minimums.tail(1)
last_four.reset_index(inplace=True)
last_call = dataframe.tail(1)
first_point = []
dt = last_four.iloc[0]['Date']
# first_point.append(dt)
price = last_four.iloc[0]['Close']
# first_point.append(price)
dt_2 = last_call.iloc[0].name
price_2 = last_call.iloc[0]['Close']
print(f'Change in dt_2: {dt_2}')
print(f'Change in price_2: {price_2}')
change_y = price_2 - price
change_x = (dt_2 - dt).total_seconds()
print(f'Change in y: {change_y}')
print(f'Change in x: {change_x}')
return change_y/change_x
``` |
{
"source": "jpmaldonado/indoor-outdoor-classifier",
"score": 3
} |
#### File: jpmaldonado/indoor-outdoor-classifier/cli.py
```python
import os
import cv2
import argparse
# Disable keras "Using Tensorflow backend" message
import sys
stderr = sys.stderr
sys.stderr = open(os.devnull, 'w')
from keras import Sequential
from keras.layers import Flatten, Dense
from keras.preprocessing.image import ImageDataGenerator
sys.stderr = stderr
# Disable tf warnings
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
def load_model():
# Initialize the saved model
model = Sequential([
Flatten(input_shape=(100, 100,3)),
Dense(128, activation='relu'),
Dense(10, activation='relu'),
Dense(1, activation='sigmoid')
])
model.load_weights("model.h5")
return model
def preprocess_img(image):
# Read and format the image to the model shape
img = cv2.imread(image)
img = cv2.resize(img,(100,100))
img = img.reshape(1,100,100,3)
return img
def main(image):
model = load_model()
img = preprocess_img(image)
pred = model.predict(img)
label = 'indoor' if pred==0 else 'outdoor' #Classes are stored alphabetically
return label
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('image', help="Path to your test image")
args = parser.parse_args()
label = main(args.image)
print(label)
``` |
{
"source": "jpmaldonado/lid-optimizer",
"score": 2
} |
#### File: lid-optimizer/lidopt/model.py
```python
import numpy as np
from pyswmm import Simulation, LidGroups
from pyswmm.lidlayers import Soil
from pyswmm.lidcontrols import LidControls
from .parsers import parse_experiment, parse_report, merge_and_correct
from . import EXP, SIM, METRICS
def evaluate(inputfile=SIM['file'], experiment=None, reportfile='report.txt', params=None):
with Simulation(inputfile=inputfile) as simulation:
lid=LidControls(simulation)[SIM['lid.name']]
lid.drain.coefficient = params['drain.coefficient']
lid.drain.exponent = params['drain.exponent']
lid.drain.offset = params['drain.offset']
lid.drain.delay = params['drain.delay']
lid.soil.thickness = params['soil.thickness']
lid.soil.porosity = params['soil.porosity']
lid.soil.field_capacity = params['soil.field_capacity']
lid.soil.wilting_point = params['soil.wilting_point']
lid.soil.k_saturated = params['soil.k_saturated']
lid.soil.k_slope = params['soil.k_slope']
lid.soil.suction_head = params['soil.suction_head']
lid.surface.thickness = params['surface.thickness']
lid.surface.void_fraction = params['surface.void_fraction']
lid.surface.roughness = params['surface.roughness']
lid.surface.slope = params['surface.slope']
lid.storage.thickness = params['storage.thickness']
lid.storage.void_fraction = params['storage.void_fraction']
lid.storage.k_saturated = params['storage.k_saturated']
lid.storage.clog_factor = params['storage.clog_factor']
for step in simulation:
pass
print("\n")
print('Simulation executed')
metrics = {}
try:
# Read report and compare with experiment
report = parse_report('report.txt')
if experiment is None:
experiment = parse_experiment(EXP['file'])
out = merge_and_correct(experiment=experiment, report=report)
out.to_csv(reportfile)
except:
for metric in METRICS:
metrics[metric] = -1
return metrics
# Recover values from simulation and exp
sim_inflow = out[SIM['inflow_mm_hr']]
sim_outflow = out[SIM['outflow_mm_hr']]
exp_inflow = out[EXP['inflow']]
exp_outflow = out[EXP['outflow']]
metrics = calculate_metrics(sim_inflow, sim_outflow, exp_inflow, exp_outflow)
return metrics
#############################
# METRICS
#############################
def calculate_metrics(sim_inflow, sim_outflow, exp_inflow, exp_outflow):
metrics = {}
####################################################
# Inflow NSE
residuals = np.sum((sim_inflow-exp_inflow)**2)
ss = np.sum((exp_inflow-exp_inflow.mean())**2)
nse_inflow = (1-residuals/ss)
metrics['nse_inflow'] = nse_inflow
# Outflow NSE
residuals = np.sum((sim_outflow-exp_outflow)**2)
ss = np.sum((exp_outflow-exp_outflow.mean())**2)
nse_outflow = (1-residuals/ss)
metrics['nse_outflow'] = nse_outflow
# Inflow vol sim
volume_inflow_sim = np.sum(sim_inflow)
metrics['volume_inflow_sim'] = volume_inflow_sim
#Outflow vol sim
volume_outflow_sim = np.sum(sim_outflow)
metrics['volume_outflow_sim'] = volume_outflow_sim
# Inflow vol exp
volume_inflow_exp = np.sum(exp_inflow)
metrics['volume_inflow_exp'] = volume_inflow_exp
#Outflow vol sim
volume_outflow_exp = np.sum(exp_outflow)
metrics['volume_outflow_exp'] = volume_outflow_exp
# Percent bias
metrics['pbias'] = 100*(exp_outflow-sim_outflow).sum()/exp_outflow.sum()
# Peak flow
metrics['peak_flow'] = np.abs(exp_outflow.max()-sim_outflow.max())
# Time peak
metrics['time_peak'] = np.argmax(exp_outflow.values)-np.argmax(sim_outflow.values)
# Systematic deviation
metrics['sd'] = (exp_outflow-sim_outflow).mean()
# Absolut deviation
metrics['ad'] = (exp_outflow-sim_outflow).abs().mean()
# Quadratic deviation
metrics['qd'] = np.sqrt(np.sum((exp_outflow.values-sim_outflow.values)**2)/len(exp_outflow))
# deltaV
metrics['deltaV'] = (volume_inflow_exp - volume_inflow_sim) / volume_inflow_exp
return metrics
```
#### File: lid-optimizer/lidopt/parsers.py
```python
import numpy as np
import pandas as pd
from . import EXP, SIM, START_DATE, END_DATE, CYLINDER, CYLINDER_DELAY_EXP, CYLINDER_DELAY_SIM
def parse_experiment(experiment_file=EXP['file']):
exp = pd.read_csv(experiment_file
, sep='\t'
, parse_dates=True
)
exp['DateTime'] = exp['Date']+' '+exp['Time']
exp.index = pd.to_datetime(exp['DateTime'], format='%m/%d/%Y %H:%M')
exp.sort_index(ascending=True, inplace=True)
return exp.loc[:,[EXP['inflow'], EXP['outflow']]]
def parse_report(report_file='report.txt', names=SIM['headers']):
report = pd.read_csv(report_file
, skiprows=9
, header=None
, parse_dates=True
, index_col=0
, sep='\t'
, names=names)
report.sort_index(ascending=True, inplace=True)
return report
def assign_events(df, t, C):
# function assigns number of events (ev_num) to datasets (df)
# df = dataset
# t = minimum time span between two events (condition of time span among the events) IN SECONDS
# C = the column with rainfall data
df = df[df[C] != 0].reset_index()
df['event_num'] = 0
for i in range(1, df.shape[0]):
td = df.loc[i, 'DateTime'] - df.loc[i-1, 'DateTime']
if td.seconds > t:
df.loc[i, 'event_num'] = df.loc[i-1, 'event_num'] + 1
else:
df.loc[i, 'event_num'] = df.loc[i-1, 'event_num']
return df
def merge_and_correct(report, experiment):
if CYLINDER:
# Merging to account for cylinder experiment
exp_cols = [EXP['inflow'], EXP['outflow']]
rain = assign_events(experiment, CYLINDER_DELAY_EXP, EXP['inflow'])
rain = rain[['DateTime', 'event_num']]
rain = pd.merge(rain, experiment, how='outer', on='DateTime')
rain = rain[['DateTime', 'event_num']+exp_cols]
rain = rain.fillna(method='ffill')
# Same shift for simulation
sim = assign_events(report, CYLINDER_DELAY_SIM, SIM['inflow_mm_hr'])
sim_cols = [SIM['inflow_mm_hr'], SIM['outflow_mm_hr']]
sim = sim[['DateTime', 'event_num']]
sim = pd.merge(sim, report, how='outer', on='DateTime')
sim = sim[['DateTime', 'event_num']+sim_cols]
sim = sim.fillna(method='ffill')
sim.drop_duplicates(inplace=True)
out = pd.merge(sim, rain, on=['DateTime','event_num'], how='outer')
cols = ['DateTime']+sim_cols+exp_cols
out = out[cols]
out.drop_duplicates(keep='first', inplace=True)
out.set_index('DateTime', inplace=True)
out.fillna(0, inplace=True)
else:
out = pd.merge(report, experiment, left_index=True, right_index=True, how='outer')
# Correct NAs
out.fillna(0, inplace=True)
out = out.loc[START_DATE:END_DATE]
if out.shape[0]==0:
print("Check the dates! Invalid date range/format")
return out[[SIM['inflow_mm_hr'], SIM['outflow_mm_hr'], EXP['inflow'], EXP['outflow']]]
def convert_units(flow):
'''
Input: flow vector in mm/hr
Output: flow vector in ml/min
'''
diameter = EXP['diameter']
area = diameter**2*np.pi/4
return flow
``` |
{
"source": "jpmaldonado/nb-hadoop",
"score": 3
} |
#### File: jpmaldonado/nb-hadoop/naive_bayes.py
```python
import re
import math
class classifier:
def __init__(self,getfeatures, filename = None):
# counts of feature/category combinations
self.fc={}
# counts of document in each category
self.cc = {}
self.getfeatures = getfeatures
# increase the count of a feature/category pair
def incf(self, f, cat):
self.fc.setdefault(f,{})
self.fc[f].setdefault(cat,0)
self.fc[f][cat]+=1
def incc(self,cat):
self.cc.setdefault(cat,0)
self.cc[cat] += 1
# The number of times a feature has appeared in a category
def fcount(self,f,cat):
if f in self.fc and cat in self.fc[f]:
return float(self.fc[f][cat])
return 0.0
# The number of items in a category
def catcount(self,cat):
if cat in self.cc:
return float(self.cc[cat])
return 0.0
#The total number of items
def totalcount(self):
return sum(self.cc.values())
def categories(self):
return self.cc.keys()
def train(self,item,cat):
features = self.getfeatures(item)
# Increment the count for every feature with this category
for f in features:
self.incf(f,cat)
self.incc(cat)
def fprob(self,f,cat):
if self.catcount(cat)==0:
return 0.0
return self.fcount(f,cat)/self.catcount(cat)
def weightedprob(self, f, cat, prf, weight = 1.0, ap=0.5):
# Calculate current probability
basicprob = prf(f,cat)
# Count the number of items the feature has appeared in all categories
totals = sum([self.fcount(f,c) for c in self.categories()])
# Calculate the weighted average
bp = (weight*ap+totals*basicprob)/(weight+totals)
return bp
def classify(self,item,default = None):
probs = {}
# Find the category with the highest probability
max = 0.0
for cat in self.categories():
probs[cat] = self.prob(item,cat)
if probs[cat]>max:
max = probs[cat]
best = cat
if probs[best]<0.1: return default
# Make sure that the probability excedes threshold*next best
for cat in probs:
if cat==best: continue
if probs[cat]*self.getthreshold(best)>probs[best]: return default
return best
class naivebayes(classifier):
def __init__(self,getfeatures):
classifier.__init__(self,getfeatures)
self.thresholds = {}
def setthreshold(self,cat,t):
self.thresholds[cat] = t
def getthreshold(self,cat):
if cat not in self.thresholds: return 1.0
return self.thresholds[cat]
def docprob(self,item,cat):
features = self.getfeatures(item)
# Multiply the probabilities for all the features together
p = 1.0
for f in features: p*=self.weightedprob(f,cat,self.fprob)
return p
def prob(self,item,cat):
catprob = self.catcount(cat)/self.totalcount()
docprob = self.docprob(item,cat)
return docprob*catprob
def sampletrain(cl):
cl.train('sport hokej soccer videohry','M')
cl.train('boty fashion dieta kosmetika', 'F')
def getwords(url):
splitter = re.compile('\W*')
words = [s.lower() for s in splitter.split(url) if len(s)>2]
return dict([(w,1) for w in words])
#@outputSchema('num:long')
#def predict(feature_vector):
# cl = naivebayes(getwords)
# sampletrain(cl)
#
#
# return len(cl.classify(feature_vector,default='unknown'))
if __name__ == '__main__':
cl = naivebayes(getwords)
# cl.train('the quick brown fox jumps','good')
sampletrain(cl)
print cl.prob('pubcrawl.cz/en/prague-pub-crawl/','M')
print cl.classify('pubcrawl.cz/en/prague-pub-crawl/',default = 'unknown')
``` |
{
"source": "jpmaldonado/opencv-np-workshop",
"score": 3
} |
#### File: opencv-np-workshop/pyqt-samples/process image.py
```python
import sys
import cv2
import numpy
from PyQt5.QtGui import QImage, QPixmap
from PyQt5.QtWidgets import QWidget, QApplication, QLabel, QVBoxLayout, QHBoxLayout, QPushButton, QFileDialog
from PyQt5.QtCore import Qt
class Example(QWidget):
def __init__(self):
super().__init__()
self.image = None
self.label = QLabel()
self.initUI()
def initUI(self):
self.label.setText('OpenCV Image')
self.label.setAlignment(Qt.AlignCenter)
self.label.setStyleSheet('border: gray; border-style:solid; border-width: 1px;')
btn_open = QPushButton('Open Image...')
btn_open.clicked.connect(self.openImage)
btn_process = QPushButton('Process Image')
btn_process.clicked.connect(self.processImage)
top_bar = QHBoxLayout()
top_bar.addWidget(btn_open)
top_bar.addWidget(btn_process)
root = QVBoxLayout(self)
root.addLayout(top_bar)
root.addWidget(self.label)
self.resize(540, 574)
self.setWindowTitle('OpenCV & PyQT 5')
def openImage(self):
filename, _ = QFileDialog.getOpenFileName(None, 'Search Image', '.', 'Image Files (*.png *.jpg *.jpeg *.bmp)')
if filename:
with open(filename, "rb") as file:
data = numpy.array(bytearray(file.read()))
self.image = cv2.imdecode(data, cv2.IMREAD_UNCHANGED)
# self.image = cv2.imread(filename, cv2.IMREAD_UNCHANGED)
self.showImage()
def processImage(self):
if self.image is not None:
# self.image = cv2.GaussianBlur(self.image, (5, 5), 0)
# self.image = cv2.Canny(self.image, 100, 200)
gray = cv2.cvtColor(self.image, cv2.COLOR_RGB2GRAY) \
if len(self.image.shape) >= 3 else self.image
blur = cv2.GaussianBlur(gray, (21, 21), 0, 0)
self.image = cv2.divide(gray, blur, scale=256)
self.showImage()
def showImage(self):
size = self.image.shape
step = self.image.size / size[0]
qformat = QImage.Format_Indexed8
if len(size) == 3:
if size[2] == 4:
qformat = QImage.Format_RGBA8888
else:
qformat = QImage.Format_RGB888
img = QImage(self.image, size[1], size[0], step, qformat)
img = img.rgbSwapped()
self.label.setPixmap(QPixmap.fromImage(img))
self.resize(self.label.pixmap().size())
if __name__ == '__main__':
app = QApplication(sys.argv)
win = Example()
win.show()
sys.exit(app.exec_())
``` |
{
"source": "jpmaldonado/packt-rl",
"score": 3
} |
#### File: packt-rl/Day 1/05 fa-cartpole-sklearn.py
```python
1/05 fa-cartpole-sklearn.py
import numpy as np
from collections import deque
from sklearn.neural_network import MLPRegressor
# The estimator is of the form (s,a) -> scalar value
class FunctionEstimator:
def __init__(self,n_actions):
self.n_actions = n_actions
self.initial_state = env.reset()
self.model = self._build_model()
self.memory_buffer = deque(maxlen=2000)
self.update_buffer = []
def _concat(self, state, action):
return np.hstack([state,action])
def _build_model(self):
model = MLPRegressor(
tol = 1e-3
, hidden_layer_sizes=(24,24,)
, activation='tanh'
, learning_rate='adaptive'
, solver = 'sgd'
, warm_start=True
#, batch_size=1
)
#initialize agent to the correct dimensions
model.partial_fit(
[self._concat(self.initial_state,a) for a in range(self.n_actions)]
,range(self.n_actions)
)
return model
def update(self,buffer):
states = [buffer[ix][0] for ix in range(len(buffer))]
actions = [buffer[ix][1] for ix in range(len(buffer))]
td_targets = [buffer[ix][2] for ix in range(len(buffer))]
for state, action, target in zip(states, actions, td_targets):
self.model.fit(self._concat(state,action).reshape(1,-1), [td_target])
def predict(self,state):
concats = [np.array([self._concat(state, a)]).reshape(1,-1) for a in range(self.n_actions)]
return [self.model.predict(c) for c in concats]
def remember(self, state, action,td_target):
self.memory_buffer.append((state,action,td_target))
def replay(self,batch_size):
# Experience replay
# choose only a sample from the collected experience
update_buffer_idxs = np.random.choice(len(self.memory_buffer)
, size=min(len(self.memory_buffer), batch_size)
, replace=False
)
update_buffer_idxs = np.ravel(update_buffer_idxs)
for ix in range(len(update_buffer_idxs)):
saved_ix = update_buffer_idxs[ix]
self.update_buffer.append(self.memory_buffer[saved_ix])
self.update(self.update_buffer)
## Auxiliary function for the policy
def make_policy(estimator, n_actions, ep):
def policy_fn(state):
preds = np.ravel(estimator.predict(state))
noise = np.ravel(np.random.randn(1,n_actions)*(100./(ep+1)))
action = np.argmax(preds+noise)
return action
return policy_fn
if __name__ == "__main__":
import gym
gym.logger.set_level(40)
env = gym.make('CartPole-v0')
n_episodes = 2000
gamma = 1
estimator = FunctionEstimator(env.action_space.n)
score = []
for ep in range(n_episodes):
state = env.reset()
done = False
policy = make_policy(estimator, env.action_space.n, ep)
ep_reward = 0
while not done:
action = policy(state)
new_state, reward, done, _ = env.step(action)
ep_reward += reward
# Update the Q-function
if done:
td_target = reward
else:
td_target = reward + gamma*np.argmax(estimator.predict(new_state))
estimator.remember(state,action, td_target)
# Update the state
state = new_state
#
estimator.replay(32)
# Show stats
if done:
if len(score) < 100:
score.append(ep_reward)
else:
score[ep % 100] = ep_reward
if (ep+1) % 100 == 0:
print("Number of episodes: {} . Average 100-episode reward: {}".format(ep+1, np.mean(score)))
``` |
{
"source": "jpmaldonado/python4finance",
"score": 4
} |
#### File: jpmaldonado/python4finance/01 Data structures.py
```python
print("Hello World")
###########################
# DATA TYPES
###########################
# First type: Boolean
x = True
y = 100 < 10
y
x * y
x + y
# Numeric data (integers and floats)
a = 3
b = 2
a + b
a * b
# Exact division (real numbers)
a / b
# Integer division
a // b
c, d = 2.5, 10.0
# Complex numbers
z = complex(1, 2)
z
# Strings
x = "I am a string"
y = "... and I am a string too"
# String methods:
x.upper()
x.lower()
x.capitalize()
x.split(" ")
x.find('string')
y.replace('.', '*')
# Tells us the type of the object
type(x)
x + y
x * 2
'test ' * 2
"test " * 2
###########################
# CONTAINERS
###########################
# These are objects that contain other objects.
# LISTS
x = [1, 'a', 2.0]
x
x[0]
x[2]
x[-1]
x[-2]
x[0] = 'pablo'
x # The new list is now modified
# List can be used for arrays
v = [0.5, 0.75, 1.0, 1.5]
m = [v, v, v]
m[0][0], m[1][0]
# Slicing
v[1:]
# Last index not included
v[1:3]
# Last but one
v[:-1]
# Read the list in reverse order
v[::-1]
# Lists copy values by reference!
v[0] = 'hi'
# Values of m have now changed
m
# To avoid this, you can use deepcopy
from copy import deepcopy
v = [0.5, 0.75, 1.0, 1.5]
m = [deepcopy(v), ]
print(m)
v[0] = 'hi'
print(m)
# TUPLES
y = (1, 'a', 2.0)
y
type(y)
y[0]
y[0] = 'something' # Error! You can not overwrite tuples
# Unpacking: Storing the information of an object in different parts
names = ('Juan', 'Pablo', 'Maldonado')
first_name, second_name, last_name = names
first_name
# Parse a string into a list
single_line = 'pablo,maldonado,zizkov'
my_data = single_line.split(',')
my_data
# put it again together
new_single_line = ','.join(my_data)
new_single_line
# SETS
s = set(['u','d', 'ud', 'd'])
t = set(['d', 'du'])
s.intersection(t)
s.union(t)
s.difference(t)
s.symmetric_difference(t)
# Sets are useful for removing duplicates
from random import randint
l = [randint(0,10) for i in range(1000)]
len(l)
set(l)
# DICTIONARIES
d = {'name': 'Pablo', 'last_name': 'M'}
type(d)
d['name']
d['last_name']
d.keys()
d.items()
del d['name']
################################
# CONTROL STRUCTURES
################################
x_vals = [1, 2, 3, 4, 5]
for x in x_vals:
print(x * x)
# Sum of squares
total = 0
for x in x_vals:
total = total + x * x
total
# The Python way: Using list comprehension!
sum([x * x for x in x_vals])
# List comprehension is a very useful way of doing loops "faster"
[x * x for x in x_vals]
# Ranges of numbers:
my_vals = range(1, 20)
# Run the following. What does it print?
for i in my_vals:
print(i)
my_vals
# Calculating squares in two different ways
sum([x * x for x in my_vals])
sum([x ** 2 for x in my_vals])
# Example: Calculate the distance between two vectors
####
from math import sqrt
x = [3, 0]
y = [0, 4]
dist = 0
for i in range(len(x)):
dist += (x[i] - y[i]) ** 2
dist = sqrt(dist)
dist
# How can we re-write this?
def my_dist(x, y):
dist2 = sum([(x[i] - y[i]) ** 2 for i in range(len(x))])
dist = sqrt(dist2)
return dist
```
#### File: jpmaldonado/python4finance/02 Code vectorization and numpy.py
```python
import numpy as np
a = np.array([0, 0.5, 1.0, 1.5, 2.0])
type(a)
a[:2] # Slicing works as for lists
# Built in methods
a.sum()
a.std()
a.cumsum()
a.max()
a.argmax()
# Careful with np.max!
np.max(2, 0)
np.max(-2, 0) # silent fail :) second argument is the axis
np.max(0, 2) # fail
np.maximum(-2, 0)
# Vectorized operations: operations are applied to each element
a*2
a**2
np.sqrt(a)
np.log2(a+1)
b = np.array([a, a*2])
b
b.sum(axis=0) # sum along axis 0 ==> columns
b.sum()
b.sum(axis=1)
eye = np.identity(4)
eye
np.ones_like(eye)
np.ones((2,3))
zeros = np.zeros((2,3,4))
zeros.shape
zeros[1]
# Optimized for speed!
import time
start = time.time()
acc = 0
for i in range(1000):
for j in range(1000):
acc += np.random.standard_normal()
end = time.time()
print("It took (ms): ", (end-start)*1000)
# Numpy outsources the loops to underlying C code for performance
# %timeit test = np.random.standard_normal((1000,1000)).sum()
# CODE VECTORIZATION
r = np.random.standard_normal((4,3))
s = np.random.standard_normal((4,3))
r+s
# Broadcasting
2*r+3 # same as 2*r+3*np.ones_like(r)
########################################
########################################
######## MOVE TO BROWSER HERE
########################################
########################################
# Functions are applied element-wise.
def f(x):
return 3*x+5
f(3)
f(r)
import math
math.sin(math.pi)
math.sin(r) # Error: this function only takes real numbers!
np.sin(r)
type(np.sin) # ufunc: universal function (works with arrays too)
```
#### File: jpmaldonado/python4finance/07 OOP.py
```python
import numpy as np
import matplotlib.pyplot as plt
# ### Basics of Python Classes
class ExampleOne(object):
pass
c = ExampleOne()
c.__str__()
type(c)
class ExampleTwo(object):
def __init__(self, a, b):
self.a = a
self.b = b
c = ExampleTwo(1, 'text')
c.a
c.b
c.a = 100
c.a
class Math(object):
def __init__(self, a, b):
self.a = a
self.b = b
def addition(self):
return self.a + self.b
def difference(self):
return self.a-self.b
c = Math(10, 15)
c.addition()
c.difference()
class MoreMath(Math):
def multiplication(self):
return self.a * self.b
def sumproduct(self):
return self.multiplication()+self.addition()
c = MoreMath(10, 15)
# ### Simple Short Rate Class
class ShortRate(object):
''' Class to model a constant short rate object.
Parameters
==========
name : string
name of the object
rate : float
positive, constant short rate
Methods
=======
get_discount_factors :
returns discount factors for given list/array
of dates/times (as year fractions)
'''
def __init__(self, rate):
self.rate = rate
def get_discount_factors(self, times):
''' times : list/array-like '''
times = np.array(times)
return np.exp(-self.rate * times)
sr = ShortRate(0.05)
sr.name, sr.rate
times = [0.0, 0.5, 1.0, 1.25, 1.75, 2.0] # in year fractions
sr.get_discount_factors(times)
# Discount factors for different short rates over 5 years
for r in [0.025, 0.05, 0.1, 0.15]:
sr.rate = r
plt.plot(times, sr.get_discount_factors(times),
label='r=%4.2f' % sr.rate, lw=1.5)
plt.xlabel('years')
plt.ylabel('discount factor')
plt.grid(True)
plt.legend(loc=0)
# Calculate present values of future cash flows
sr.rate = 0.05
cash_flows = np.array([-100, 50, 75])
times = [0.0, 1.0, 2.0]
disc_facts = sr.get_discount_factors(times)
disc_facts
# present value list
disc_facts * cash_flows
# net present value
np.sum(disc_facts * cash_flows)
sr.rate = 0.15
np.sum(sr.get_discount_factors(times) * cash_flows)
# EXERCISES:
# 1) Make a class that wraps up these computations
# Your class should take as attributes
# a name, times, cash_flows, instance of ShortRate class
# and methods to calculate a present value list and the net present value
# 2) Extend your class to analyze the sensitivity to the short rate
# ### SOLUTION: Cash Flow Series Class
#
class CashFlowSeries:
''' Class to model a cash flows series.
Attributes
==========
name : string
name of the object
times : list/array-like
list of (positive) year fractions
cash_flows : list/array-like
corresponding list of cash flow values
short_rate : instance of short_rate class
short rate object used for discounting
Methods
=======
present_value_list :
returns an array with present values
net_present_value :
returns NPV for cash flow series
'''
def __init__(self, name, times, cash_flows, short_rate):
self.name = name
self.times = times
self.cash_flows = cash_flows
self.short_rate = short_rate
def present_value_list(self):
df = self.short_rate.get_discount_factors(self.times)
return np.array(self.cash_flows) * df
def net_present_value(self):
return np.sum(self.present_value_list())
sr.rate = 0.05
cfs = CashFlowSeries('cfs', times, cash_flows, sr)
cfs.cash_flows
cfs.times
cfs.present_value_list()
cfs.net_present_value()
class CashFlowSeriesSensitivity(CashFlowSeries):
def npv_sensitivity(self, short_rates):
npvs = []
for rate in short_rates:
sr.rate = rate
npvs.append(self.net_present_value())
return np.array(npvs)
cfs_sens = CashFlowSeriesSensitivity('cfs', times, cash_flows, sr)
short_rates = [0.01, 0.025, 0.05, 0.075, 0.1, 0.125, 0.15, 0.2]
npvs = cfs_sens.npv_sensitivity(short_rates)
npvs
plt.plot(short_rates, npvs, 'b')
plt.plot(short_rates, npvs, 'ro')
plt.plot((0, max(short_rates)), (0, 0), 'r', lw=2)
plt.grid(True)
plt.xlabel('short rate')
plt.ylabel('net present value')
plt.show()
``` |
{
"source": "jpmallarino/django",
"score": 2
} |
#### File: django/middleware/csrf.py
```python
import logging
import string
from collections import defaultdict
from urllib.parse import urlparse
from django.conf import settings
from django.core.exceptions import DisallowedHost, ImproperlyConfigured
from django.http import UnreadablePostError
from django.http.request import HttpHeaders
from django.urls import get_callable
from django.utils.cache import patch_vary_headers
from django.utils.crypto import constant_time_compare, get_random_string
from django.utils.deprecation import MiddlewareMixin
from django.utils.functional import cached_property
from django.utils.http import is_same_domain
from django.utils.log import log_response
from django.utils.regex_helper import _lazy_re_compile
logger = logging.getLogger("django.security.csrf")
# This matches if any character is not in CSRF_ALLOWED_CHARS.
invalid_token_chars_re = _lazy_re_compile("[^a-zA-Z0-9]")
REASON_BAD_ORIGIN = "Origin checking failed - %s does not match any trusted origins."
REASON_NO_REFERER = "Referer checking failed - no Referer."
REASON_BAD_REFERER = "Referer checking failed - %s does not match any trusted origins."
REASON_NO_CSRF_COOKIE = "CSRF cookie not set."
REASON_CSRF_TOKEN_MISSING = "CSRF token missing."
REASON_MALFORMED_REFERER = "Referer checking failed - Referer is malformed."
REASON_INSECURE_REFERER = (
"Referer checking failed - Referer is insecure while host is secure."
)
# The reason strings below are for passing to InvalidTokenFormat. They are
# phrases without a subject because they can be in reference to either the CSRF
# cookie or non-cookie token.
REASON_INCORRECT_LENGTH = "has incorrect length"
REASON_INVALID_CHARACTERS = "has invalid characters"
CSRF_SECRET_LENGTH = 32
CSRF_TOKEN_LENGTH = 2 * CSRF_SECRET_LENGTH
CSRF_ALLOWED_CHARS = string.ascii_letters + string.digits
CSRF_SESSION_KEY = "_csrftoken"
def _get_failure_view():
"""Return the view to be used for CSRF rejections."""
return get_callable(settings.CSRF_FAILURE_VIEW)
def _get_new_csrf_string():
return get_random_string(CSRF_SECRET_LENGTH, allowed_chars=CSRF_ALLOWED_CHARS)
def _mask_cipher_secret(secret):
"""
Given a secret (assumed to be a string of CSRF_ALLOWED_CHARS), generate a
token by adding a mask and applying it to the secret.
"""
mask = _get_new_csrf_string()
chars = CSRF_ALLOWED_CHARS
pairs = zip((chars.index(x) for x in secret), (chars.index(x) for x in mask))
cipher = "".join(chars[(x + y) % len(chars)] for x, y in pairs)
return mask + cipher
def _unmask_cipher_token(token):
"""
Given a token (assumed to be a string of CSRF_ALLOWED_CHARS, of length
CSRF_TOKEN_LENGTH, and that its first half is a mask), use it to decrypt
the second half to produce the original secret.
"""
mask = token[:CSRF_SECRET_LENGTH]
token = token[CSRF_SECRET_LENGTH:]
chars = CSRF_ALLOWED_CHARS
pairs = zip((chars.index(x) for x in token), (chars.index(x) for x in mask))
return "".join(chars[x - y] for x, y in pairs) # Note negative values are ok
def _add_new_csrf_cookie(request):
"""Generate a new random CSRF_COOKIE value, and add it to request.META."""
csrf_secret = _get_new_csrf_string()
request.META.update(
{
# RemovedInDjango50Warning: when the deprecation ends, replace
# with: 'CSRF_COOKIE': csrf_secret
"CSRF_COOKIE": (
_mask_cipher_secret(csrf_secret)
if settings.CSRF_COOKIE_MASKED
else csrf_secret
),
"CSRF_COOKIE_NEEDS_UPDATE": True,
}
)
return csrf_secret
def get_token(request):
"""
Return the CSRF token required for a POST form. The token is an
alphanumeric value. A new token is created if one is not already set.
A side effect of calling this function is to make the csrf_protect
decorator and the CsrfViewMiddleware add a CSRF cookie and a 'Vary: Cookie'
header to the outgoing response. For this reason, you may need to use this
function lazily, as is done by the csrf context processor.
"""
if "CSRF_COOKIE" in request.META:
csrf_secret = request.META["CSRF_COOKIE"]
# Since the cookie is being used, flag to send the cookie in
# process_response() (even if the client already has it) in order to
# renew the expiry timer.
request.META["CSRF_COOKIE_NEEDS_UPDATE"] = True
else:
csrf_secret = _add_new_csrf_cookie(request)
return _mask_cipher_secret(csrf_secret)
def rotate_token(request):
"""
Change the CSRF token in use for a request - should be done on login
for security purposes.
"""
_add_new_csrf_cookie(request)
class InvalidTokenFormat(Exception):
def __init__(self, reason):
self.reason = reason
def _check_token_format(token):
"""
Raise an InvalidTokenFormat error if the token has an invalid length or
characters that aren't allowed. The token argument can be a CSRF cookie
secret or non-cookie CSRF token, and either masked or unmasked.
"""
if len(token) not in (CSRF_TOKEN_LENGTH, CSRF_SECRET_LENGTH):
raise InvalidTokenFormat(REASON_INCORRECT_LENGTH)
# Make sure all characters are in CSRF_ALLOWED_CHARS.
if invalid_token_chars_re.search(token):
raise InvalidTokenFormat(REASON_INVALID_CHARACTERS)
def _does_token_match(request_csrf_token, csrf_secret):
"""
Return whether the given CSRF token matches the given CSRF secret, after
unmasking the token if necessary.
This function assumes that the request_csrf_token argument has been
validated to have the correct length (CSRF_SECRET_LENGTH or
CSRF_TOKEN_LENGTH characters) and allowed characters, and that if it has
length CSRF_TOKEN_LENGTH, it is a masked secret.
"""
# Only unmask tokens that are exactly CSRF_TOKEN_LENGTH characters long.
if len(request_csrf_token) == CSRF_TOKEN_LENGTH:
request_csrf_token = _unmask_cipher_token(request_csrf_token)
assert len(request_csrf_token) == CSRF_SECRET_LENGTH
return constant_time_compare(request_csrf_token, csrf_secret)
class RejectRequest(Exception):
def __init__(self, reason):
self.reason = reason
class CsrfViewMiddleware(MiddlewareMixin):
"""
Require a present and correct csrfmiddlewaretoken for POST requests that
have a CSRF cookie, and set an outgoing CSRF cookie.
This middleware should be used in conjunction with the {% csrf_token %}
template tag.
"""
@cached_property
def csrf_trusted_origins_hosts(self):
return [
urlparse(origin).netloc.lstrip("*")
for origin in settings.CSRF_TRUSTED_ORIGINS
]
@cached_property
def allowed_origins_exact(self):
return {origin for origin in settings.CSRF_TRUSTED_ORIGINS if "*" not in origin}
@cached_property
def allowed_origin_subdomains(self):
"""
A mapping of allowed schemes to list of allowed netlocs, where all
subdomains of the netloc are allowed.
"""
allowed_origin_subdomains = defaultdict(list)
for parsed in (
urlparse(origin)
for origin in settings.CSRF_TRUSTED_ORIGINS
if "*" in origin
):
allowed_origin_subdomains[parsed.scheme].append(parsed.netloc.lstrip("*"))
return allowed_origin_subdomains
# The _accept and _reject methods currently only exist for the sake of the
# requires_csrf_token decorator.
def _accept(self, request):
# Avoid checking the request twice by adding a custom attribute to
# request. This will be relevant when both decorator and middleware
# are used.
request.csrf_processing_done = True
return None
def _reject(self, request, reason):
response = _get_failure_view()(request, reason=reason)
log_response(
"Forbidden (%s): %s",
reason,
request.path,
response=response,
request=request,
logger=logger,
)
return response
def _get_secret(self, request):
"""
Return the CSRF secret originally associated with the request, or None
if it didn't have one.
If the CSRF_USE_SESSIONS setting is false, raises InvalidTokenFormat if
the request's secret has invalid characters or an invalid length.
"""
if settings.CSRF_USE_SESSIONS:
try:
csrf_secret = request.session.get(CSRF_SESSION_KEY)
except AttributeError:
raise ImproperlyConfigured(
"CSRF_USE_SESSIONS is enabled, but request.session is not "
"set. SessionMiddleware must appear before CsrfViewMiddleware "
"in MIDDLEWARE."
)
else:
try:
csrf_secret = request.COOKIES[settings.CSRF_COOKIE_NAME]
except KeyError:
csrf_secret = None
else:
# This can raise InvalidTokenFormat.
_check_token_format(csrf_secret)
if csrf_secret is None:
return None
# Django versions before 4.0 masked the secret before storing.
if len(csrf_secret) == CSRF_TOKEN_LENGTH:
csrf_secret = _unmask_cipher_token(csrf_secret)
return csrf_secret
def _set_csrf_cookie(self, request, response):
if settings.CSRF_USE_SESSIONS:
if request.session.get(CSRF_SESSION_KEY) != request.META["CSRF_COOKIE"]:
request.session[CSRF_SESSION_KEY] = request.META["CSRF_COOKIE"]
else:
response.set_cookie(
settings.CSRF_COOKIE_NAME,
request.META["CSRF_COOKIE"],
max_age=settings.CSRF_COOKIE_AGE,
domain=settings.CSRF_COOKIE_DOMAIN,
path=settings.CSRF_COOKIE_PATH,
secure=settings.CSRF_COOKIE_SECURE,
httponly=settings.CSRF_COOKIE_HTTPONLY,
samesite=settings.CSRF_COOKIE_SAMESITE,
)
# Set the Vary header since content varies with the CSRF cookie.
patch_vary_headers(response, ("Cookie",))
def _origin_verified(self, request):
request_origin = request.META["HTTP_ORIGIN"]
try:
good_host = request.get_host()
except DisallowedHost:
pass
else:
good_origin = "%s://%s" % (
"https" if request.is_secure() else "http",
good_host,
)
if request_origin == good_origin:
return True
if request_origin in self.allowed_origins_exact:
return True
try:
parsed_origin = urlparse(request_origin)
except ValueError:
return False
request_scheme = parsed_origin.scheme
request_netloc = parsed_origin.netloc
return any(
is_same_domain(request_netloc, host)
for host in self.allowed_origin_subdomains.get(request_scheme, ())
)
def _check_referer(self, request):
referer = request.META.get("HTTP_REFERER")
if referer is None:
raise RejectRequest(REASON_NO_REFERER)
try:
referer = urlparse(referer)
except ValueError:
raise RejectRequest(REASON_MALFORMED_REFERER)
# Make sure we have a valid URL for Referer.
if "" in (referer.scheme, referer.netloc):
raise RejectRequest(REASON_MALFORMED_REFERER)
# Ensure that our Referer is also secure.
if referer.scheme != "https":
raise RejectRequest(REASON_INSECURE_REFERER)
if any(
is_same_domain(referer.netloc, host)
for host in self.csrf_trusted_origins_hosts
):
return
# Allow matching the configured cookie domain.
good_referer = (
settings.SESSION_COOKIE_DOMAIN
if settings.CSRF_USE_SESSIONS
else settings.CSRF_COOKIE_DOMAIN
)
if good_referer is None:
# If no cookie domain is configured, allow matching the current
# host:port exactly if it's permitted by ALLOWED_HOSTS.
try:
# request.get_host() includes the port.
good_referer = request.get_host()
except DisallowedHost:
raise RejectRequest(REASON_BAD_REFERER % referer.geturl())
else:
server_port = request.get_port()
if server_port not in ("443", "80"):
good_referer = "%s:%s" % (good_referer, server_port)
if not is_same_domain(referer.netloc, good_referer):
raise RejectRequest(REASON_BAD_REFERER % referer.geturl())
def _bad_token_message(self, reason, token_source):
if token_source != "POST":
# Assume it is a settings.CSRF_HEADER_NAME value.
header_name = HttpHeaders.parse_header_name(token_source)
token_source = f"the {header_name!r} HTTP header"
return f"CSRF token from {token_source} {reason}."
def _check_token(self, request):
# Access csrf_secret via self._get_secret() as rotate_token() may have
# been called by an authentication middleware during the
# process_request() phase.
try:
csrf_secret = self._get_secret(request)
except InvalidTokenFormat as exc:
raise RejectRequest(f"CSRF cookie {exc.reason}.")
if csrf_secret is None:
# No CSRF cookie. For POST requests, we insist on a CSRF cookie,
# and in this way we can avoid all CSRF attacks, including login
# CSRF.
raise RejectRequest(REASON_NO_CSRF_COOKIE)
# Check non-cookie token for match.
request_csrf_token = ""
if request.method == "POST":
try:
request_csrf_token = request.POST.get("csrfmiddlewaretoken", "")
except UnreadablePostError:
# Handle a broken connection before we've completed reading the
# POST data. process_view shouldn't raise any exceptions, so
# we'll ignore and serve the user a 403 (assuming they're still
# listening, which they probably aren't because of the error).
pass
if request_csrf_token == "":
# Fall back to X-CSRFToken, to make things easier for AJAX, and
# possible for PUT/DELETE.
try:
# This can have length CSRF_SECRET_LENGTH or CSRF_TOKEN_LENGTH,
# depending on whether the client obtained the token from
# the DOM or the cookie (and if the cookie, whether the cookie
# was masked or unmasked).
request_csrf_token = request.META[settings.CSRF_HEADER_NAME]
except KeyError:
raise RejectRequest(REASON_CSRF_TOKEN_MISSING)
token_source = settings.CSRF_HEADER_NAME
else:
token_source = "POST"
try:
_check_token_format(request_csrf_token)
except InvalidTokenFormat as exc:
reason = self._bad_token_message(exc.reason, token_source)
raise RejectRequest(reason)
if not _does_token_match(request_csrf_token, csrf_secret):
reason = self._bad_token_message("incorrect", token_source)
raise RejectRequest(reason)
def process_request(self, request):
try:
csrf_secret = self._get_secret(request)
except InvalidTokenFormat:
_add_new_csrf_cookie(request)
else:
if csrf_secret is not None:
# Use the same secret next time. If the secret was originally
# masked, this also causes it to be replaced with the unmasked
# form, but only in cases where the secret is already getting
# saved anyways.
request.META["CSRF_COOKIE"] = csrf_secret
def process_view(self, request, callback, callback_args, callback_kwargs):
if getattr(request, "csrf_processing_done", False):
return None
# Wait until request.META["CSRF_COOKIE"] has been manipulated before
# bailing out, so that get_token still works
if getattr(callback, "csrf_exempt", False):
return None
# Assume that anything not defined as 'safe' by RFC7231 needs protection
if request.method in ("GET", "HEAD", "OPTIONS", "TRACE"):
return self._accept(request)
if getattr(request, "_dont_enforce_csrf_checks", False):
# Mechanism to turn off CSRF checks for test suite. It comes after
# the creation of CSRF cookies, so that everything else continues
# to work exactly the same (e.g. cookies are sent, etc.), but
# before any branches that call the _reject method.
return self._accept(request)
# Reject the request if the Origin header doesn't match an allowed
# value.
if "HTTP_ORIGIN" in request.META:
if not self._origin_verified(request):
return self._reject(
request, REASON_BAD_ORIGIN % request.META["HTTP_ORIGIN"]
)
elif request.is_secure():
# If the Origin header wasn't provided, reject HTTPS requests if
# the Referer header doesn't match an allowed value.
#
# Suppose user visits http://example.com/
# An active network attacker (man-in-the-middle, MITM) sends a
# POST form that targets https://example.com/detonate-bomb/ and
# submits it via JavaScript.
#
# The attacker will need to provide a CSRF cookie and token, but
# that's no problem for a MITM and the session-independent secret
# we're using. So the MITM can circumvent the CSRF protection. This
# is true for any HTTP connection, but anyone using HTTPS expects
# better! For this reason, for https://example.com/ we need
# additional protection that treats http://example.com/ as
# completely untrusted. Under HTTPS, Barth et al. found that the
# Referer header is missing for same-domain requests in only about
# 0.2% of cases or less, so we can use strict Referer checking.
try:
self._check_referer(request)
except RejectRequest as exc:
return self._reject(request, exc.reason)
try:
self._check_token(request)
except RejectRequest as exc:
return self._reject(request, exc.reason)
return self._accept(request)
def process_response(self, request, response):
if request.META.get("CSRF_COOKIE_NEEDS_UPDATE"):
self._set_csrf_cookie(request, response)
# Unset the flag to prevent _set_csrf_cookie() from being
# unnecessarily called again in process_response() by other
# instances of CsrfViewMiddleware. This can happen e.g. when both a
# decorator and middleware are used. However,
# CSRF_COOKIE_NEEDS_UPDATE is still respected in subsequent calls
# e.g. in case rotate_token() is called in process_response() later
# by custom middleware but before those subsequent calls.
request.META["CSRF_COOKIE_NEEDS_UPDATE"] = False
return response
```
#### File: management/commands/noargs_command.py
```python
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = "Test No-args commands"
requires_system_checks = []
def handle(self, **options):
print("EXECUTE: noargs_command options=%s" % sorted(options.items()))
```
#### File: tests/admin_views/test_forms.py
```python
from django.contrib.admin.forms import AdminAuthenticationForm
from django.contrib.admin.helpers import AdminForm
from django.contrib.auth.models import User
from django.test import SimpleTestCase, TestCase, override_settings
from .admin import ArticleForm
# To verify that the login form rejects inactive users, use an authentication
# backend that allows them.
@override_settings(
AUTHENTICATION_BACKENDS=["django.contrib.auth.backends.AllowAllUsersModelBackend"]
)
class AdminAuthenticationFormTests(TestCase):
@classmethod
def setUpTestData(cls):
User.objects.create_user(
username="inactive", password="password", is_active=False
)
def test_inactive_user(self):
data = {
"username": "inactive",
"password": "password",
}
form = AdminAuthenticationForm(None, data)
self.assertEqual(form.non_field_errors(), ["This account is inactive."])
class AdminFormTests(SimpleTestCase):
def test_repr(self):
fieldsets = (
(
"My fields",
{
"classes": ["collapse"],
"fields": ("url", "title", "content", "sites"),
},
),
)
form = ArticleForm()
admin_form = AdminForm(form, fieldsets, {})
self.assertEqual(
repr(admin_form),
"<AdminForm: form=ArticleForm fieldsets=(('My fields', "
"{'classes': ['collapse'], "
"'fields': ('url', 'title', 'content', 'sites')}),)>",
)
```
#### File: auth_tests/models/with_foreign_key.py
```python
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, Group
from django.db import models
class Email(models.Model):
email = models.EmailField(verbose_name="email address", max_length=255, unique=True)
class CustomUserWithFKManager(BaseUserManager):
def create_superuser(self, username, email, group, password):
user = self.model(username_id=username, email_id=email, group_id=group)
user.set_password(password)
user.save(using=self._db)
return user
class CustomUserWithFK(AbstractBaseUser):
username = models.ForeignKey(Email, models.CASCADE, related_name="primary")
email = models.ForeignKey(
Email, models.CASCADE, to_field="email", related_name="secondary"
)
group = models.ForeignKey(Group, models.CASCADE)
custom_objects = CustomUserWithFKManager()
USERNAME_FIELD = "username"
REQUIRED_FIELDS = ["email", "group"]
```
#### File: tests/auth_tests/urls_custom_user_admin.py
```python
from django.contrib import admin
from django.contrib.auth import get_user_model
from django.contrib.auth.admin import UserAdmin
from django.urls import path
site = admin.AdminSite(name="custom_user_admin")
class CustomUserAdmin(UserAdmin):
def log_change(self, request, obj, message):
# LogEntry.user column doesn't get altered to expect a UUID, so set an
# integer manually to avoid causing an error.
original_pk = request.user.pk
request.user.pk = 1
super().log_change(request, obj, message)
request.user.pk = original_pk
site.register(get_user_model(), CustomUserAdmin)
urlpatterns = [
path("admin/", site.urls),
]
```
#### File: tests/backends/test_ddl_references.py
```python
from django.db import connection
from django.db.backends.ddl_references import (
Columns,
Expressions,
ForeignKeyName,
IndexName,
Statement,
Table,
)
from django.db.models import ExpressionList, F
from django.db.models.functions import Upper
from django.db.models.indexes import IndexExpression
from django.db.models.sql import Query
from django.test import SimpleTestCase, TransactionTestCase
from .models import Person
class TableTests(SimpleTestCase):
def setUp(self):
self.reference = Table("table", lambda table: table.upper())
def test_references_table(self):
self.assertIs(self.reference.references_table("table"), True)
self.assertIs(self.reference.references_table("other"), False)
def test_rename_table_references(self):
self.reference.rename_table_references("other", "table")
self.assertIs(self.reference.references_table("table"), True)
self.assertIs(self.reference.references_table("other"), False)
self.reference.rename_table_references("table", "other")
self.assertIs(self.reference.references_table("table"), False)
self.assertIs(self.reference.references_table("other"), True)
def test_repr(self):
self.assertEqual(repr(self.reference), "<Table 'TABLE'>")
def test_str(self):
self.assertEqual(str(self.reference), "TABLE")
class ColumnsTests(TableTests):
def setUp(self):
self.reference = Columns(
"table", ["first_column", "second_column"], lambda column: column.upper()
)
def test_references_column(self):
self.assertIs(self.reference.references_column("other", "first_column"), False)
self.assertIs(self.reference.references_column("table", "third_column"), False)
self.assertIs(self.reference.references_column("table", "first_column"), True)
def test_rename_column_references(self):
self.reference.rename_column_references("other", "first_column", "third_column")
self.assertIs(self.reference.references_column("table", "first_column"), True)
self.assertIs(self.reference.references_column("table", "third_column"), False)
self.assertIs(self.reference.references_column("other", "third_column"), False)
self.reference.rename_column_references("table", "third_column", "first_column")
self.assertIs(self.reference.references_column("table", "first_column"), True)
self.assertIs(self.reference.references_column("table", "third_column"), False)
self.reference.rename_column_references("table", "first_column", "third_column")
self.assertIs(self.reference.references_column("table", "first_column"), False)
self.assertIs(self.reference.references_column("table", "third_column"), True)
def test_repr(self):
self.assertEqual(
repr(self.reference), "<Columns 'FIRST_COLUMN, SECOND_COLUMN'>"
)
def test_str(self):
self.assertEqual(str(self.reference), "FIRST_COLUMN, SECOND_COLUMN")
class IndexNameTests(ColumnsTests):
def setUp(self):
def create_index_name(table_name, column_names, suffix):
return ", ".join(
"%s_%s_%s" % (table_name, column_name, suffix)
for column_name in column_names
)
self.reference = IndexName(
"table", ["first_column", "second_column"], "suffix", create_index_name
)
def test_repr(self):
self.assertEqual(
repr(self.reference),
"<IndexName 'table_first_column_suffix, table_second_column_suffix'>",
)
def test_str(self):
self.assertEqual(
str(self.reference), "table_first_column_suffix, table_second_column_suffix"
)
class ForeignKeyNameTests(IndexNameTests):
def setUp(self):
def create_foreign_key_name(table_name, column_names, suffix):
return ", ".join(
"%s_%s_%s" % (table_name, column_name, suffix)
for column_name in column_names
)
self.reference = ForeignKeyName(
"table",
["first_column", "second_column"],
"to_table",
["to_first_column", "to_second_column"],
"%(to_table)s_%(to_column)s_fk",
create_foreign_key_name,
)
def test_references_table(self):
super().test_references_table()
self.assertIs(self.reference.references_table("to_table"), True)
def test_references_column(self):
super().test_references_column()
self.assertIs(
self.reference.references_column("to_table", "second_column"), False
)
self.assertIs(
self.reference.references_column("to_table", "to_second_column"), True
)
def test_rename_table_references(self):
super().test_rename_table_references()
self.reference.rename_table_references("to_table", "other_to_table")
self.assertIs(self.reference.references_table("other_to_table"), True)
self.assertIs(self.reference.references_table("to_table"), False)
def test_rename_column_references(self):
super().test_rename_column_references()
self.reference.rename_column_references(
"to_table", "second_column", "third_column"
)
self.assertIs(self.reference.references_column("table", "second_column"), True)
self.assertIs(
self.reference.references_column("to_table", "to_second_column"), True
)
self.reference.rename_column_references(
"to_table", "to_first_column", "to_third_column"
)
self.assertIs(
self.reference.references_column("to_table", "to_first_column"), False
)
self.assertIs(
self.reference.references_column("to_table", "to_third_column"), True
)
def test_repr(self):
self.assertEqual(
repr(self.reference),
"<ForeignKeyName 'table_first_column_to_table_to_first_column_fk, "
"table_second_column_to_table_to_first_column_fk'>",
)
def test_str(self):
self.assertEqual(
str(self.reference),
"table_first_column_to_table_to_first_column_fk, "
"table_second_column_to_table_to_first_column_fk",
)
class MockReference:
def __init__(self, representation, referenced_tables, referenced_columns):
self.representation = representation
self.referenced_tables = referenced_tables
self.referenced_columns = referenced_columns
def references_table(self, table):
return table in self.referenced_tables
def references_column(self, table, column):
return (table, column) in self.referenced_columns
def rename_table_references(self, old_table, new_table):
if old_table in self.referenced_tables:
self.referenced_tables.remove(old_table)
self.referenced_tables.add(new_table)
def rename_column_references(self, table, old_column, new_column):
column = (table, old_column)
if column in self.referenced_columns:
self.referenced_columns.remove(column)
self.referenced_columns.add((table, new_column))
def __str__(self):
return self.representation
class StatementTests(SimpleTestCase):
def test_references_table(self):
statement = Statement(
"", reference=MockReference("", {"table"}, {}), non_reference=""
)
self.assertIs(statement.references_table("table"), True)
self.assertIs(statement.references_table("other"), False)
def test_references_column(self):
statement = Statement(
"", reference=MockReference("", {}, {("table", "column")}), non_reference=""
)
self.assertIs(statement.references_column("table", "column"), True)
self.assertIs(statement.references_column("other", "column"), False)
def test_rename_table_references(self):
reference = MockReference("", {"table"}, {})
statement = Statement("", reference=reference, non_reference="")
statement.rename_table_references("table", "other")
self.assertEqual(reference.referenced_tables, {"other"})
def test_rename_column_references(self):
reference = MockReference("", {}, {("table", "column")})
statement = Statement("", reference=reference, non_reference="")
statement.rename_column_references("table", "column", "other")
self.assertEqual(reference.referenced_columns, {("table", "other")})
def test_repr(self):
reference = MockReference("reference", {}, {})
statement = Statement(
"%(reference)s - %(non_reference)s",
reference=reference,
non_reference="non_reference",
)
self.assertEqual(repr(statement), "<Statement 'reference - non_reference'>")
def test_str(self):
reference = MockReference("reference", {}, {})
statement = Statement(
"%(reference)s - %(non_reference)s",
reference=reference,
non_reference="non_reference",
)
self.assertEqual(str(statement), "reference - non_reference")
class ExpressionsTests(TransactionTestCase):
available_apps = []
def setUp(self):
compiler = Person.objects.all().query.get_compiler(connection.alias)
self.editor = connection.schema_editor()
self.expressions = Expressions(
table=Person._meta.db_table,
expressions=ExpressionList(
IndexExpression(F("first_name")),
IndexExpression(F("last_name").desc()),
IndexExpression(Upper("last_name")),
).resolve_expression(compiler.query),
compiler=compiler,
quote_value=self.editor.quote_value,
)
def test_references_table(self):
self.assertIs(self.expressions.references_table(Person._meta.db_table), True)
self.assertIs(self.expressions.references_table("other"), False)
def test_references_column(self):
table = Person._meta.db_table
self.assertIs(self.expressions.references_column(table, "first_name"), True)
self.assertIs(self.expressions.references_column(table, "last_name"), True)
self.assertIs(self.expressions.references_column(table, "other"), False)
def test_rename_table_references(self):
table = Person._meta.db_table
self.expressions.rename_table_references(table, "other")
self.assertIs(self.expressions.references_table(table), False)
self.assertIs(self.expressions.references_table("other"), True)
self.assertIn(
"%s.%s"
% (
self.editor.quote_name("other"),
self.editor.quote_name("first_name"),
),
str(self.expressions),
)
def test_rename_table_references_without_alias(self):
compiler = Query(Person, alias_cols=False).get_compiler(connection=connection)
table = Person._meta.db_table
expressions = Expressions(
table=table,
expressions=ExpressionList(
IndexExpression(Upper("last_name")),
IndexExpression(F("first_name")),
).resolve_expression(compiler.query),
compiler=compiler,
quote_value=self.editor.quote_value,
)
expressions.rename_table_references(table, "other")
self.assertIs(expressions.references_table(table), False)
self.assertIs(expressions.references_table("other"), True)
expected_str = "(UPPER(%s)), %s" % (
self.editor.quote_name("last_name"),
self.editor.quote_name("first_name"),
)
self.assertEqual(str(expressions), expected_str)
def test_rename_column_references(self):
table = Person._meta.db_table
self.expressions.rename_column_references(table, "first_name", "other")
self.assertIs(self.expressions.references_column(table, "other"), True)
self.assertIs(self.expressions.references_column(table, "first_name"), False)
self.assertIn(
"%s.%s" % (self.editor.quote_name(table), self.editor.quote_name("other")),
str(self.expressions),
)
def test_str(self):
table_name = self.editor.quote_name(Person._meta.db_table)
expected_str = "%s.%s, %s.%s DESC, (UPPER(%s.%s))" % (
table_name,
self.editor.quote_name("first_name"),
table_name,
self.editor.quote_name("last_name"),
table_name,
self.editor.quote_name("last_name"),
)
self.assertEqual(str(self.expressions), expected_str)
```
#### File: check_framework/urls/bad_class_based_error_handlers.py
```python
urlpatterns = []
class HandlerView:
@classmethod
def as_view(cls):
def view():
pass
return view
handler400 = HandlerView.as_view()
handler403 = HandlerView.as_view()
handler404 = HandlerView.as_view()
handler500 = HandlerView.as_view()
```
#### File: tests/conditional_processing/views.py
```python
from django.http import HttpResponse
from django.views.decorators.http import condition, etag, last_modified
from .tests import ETAG, FULL_RESPONSE, LAST_MODIFIED, WEAK_ETAG
@condition(lambda r: ETAG, lambda r: LAST_MODIFIED)
def index(request):
return HttpResponse(FULL_RESPONSE)
@condition(last_modified_func=lambda r: LAST_MODIFIED)
def last_modified_view1(request):
return HttpResponse(FULL_RESPONSE)
@last_modified(lambda r: LAST_MODIFIED)
def last_modified_view2(request):
return HttpResponse(FULL_RESPONSE)
@condition(etag_func=lambda r: ETAG)
def etag_view1(request):
return HttpResponse(FULL_RESPONSE)
@etag(lambda r: ETAG)
def etag_view2(request):
return HttpResponse(FULL_RESPONSE)
@condition(etag_func=lambda r: ETAG.strip('"'))
def etag_view_unquoted(request):
"""
Use an etag_func() that returns an unquoted ETag.
"""
return HttpResponse(FULL_RESPONSE)
@condition(etag_func=lambda r: WEAK_ETAG)
def etag_view_weak(request):
"""
Use an etag_func() that returns a weak ETag.
"""
return HttpResponse(FULL_RESPONSE)
@condition(etag_func=lambda r: None)
def etag_view_none(request):
"""
Use an etag_func() that returns None, as opposed to setting etag_func=None.
"""
return HttpResponse(FULL_RESPONSE)
```
#### File: db_functions/math/test_mod.py
```python
import math
from decimal import Decimal
from django.db.models.functions import Mod
from django.test import TestCase
from ..models import DecimalModel, FloatModel, IntegerModel
class ModTests(TestCase):
def test_null(self):
IntegerModel.objects.create(big=100)
obj = IntegerModel.objects.annotate(
null_mod_small=Mod("small", "normal"),
null_mod_normal=Mod("normal", "big"),
).first()
self.assertIsNone(obj.null_mod_small)
self.assertIsNone(obj.null_mod_normal)
def test_decimal(self):
DecimalModel.objects.create(n1=Decimal("-9.9"), n2=Decimal("4.6"))
obj = DecimalModel.objects.annotate(n_mod=Mod("n1", "n2")).first()
self.assertIsInstance(obj.n_mod, Decimal)
self.assertAlmostEqual(obj.n_mod, Decimal(math.fmod(obj.n1, obj.n2)))
def test_float(self):
FloatModel.objects.create(f1=-25, f2=0.33)
obj = FloatModel.objects.annotate(f_mod=Mod("f1", "f2")).first()
self.assertIsInstance(obj.f_mod, float)
self.assertAlmostEqual(obj.f_mod, math.fmod(obj.f1, obj.f2))
def test_integer(self):
IntegerModel.objects.create(small=20, normal=15, big=1)
obj = IntegerModel.objects.annotate(
small_mod=Mod("small", "normal"),
normal_mod=Mod("normal", "big"),
big_mod=Mod("big", "small"),
).first()
self.assertIsInstance(obj.small_mod, float)
self.assertIsInstance(obj.normal_mod, float)
self.assertIsInstance(obj.big_mod, float)
self.assertEqual(obj.small_mod, math.fmod(obj.small, obj.normal))
self.assertEqual(obj.normal_mod, math.fmod(obj.normal, obj.big))
self.assertEqual(obj.big_mod, math.fmod(obj.big, obj.small))
```
#### File: db_functions/text/test_upper.py
```python
from django.db.models import CharField
from django.db.models.functions import Upper
from django.test import TestCase
from django.test.utils import register_lookup
from ..models import Author
class UpperTests(TestCase):
def test_basic(self):
Author.objects.create(name="<NAME>", alias="smithj")
Author.objects.create(name="Rhonda")
authors = Author.objects.annotate(upper_name=Upper("name"))
self.assertQuerysetEqual(
authors.order_by("name"),
[
"<NAME>",
"RHONDA",
],
lambda a: a.upper_name,
)
Author.objects.update(name=Upper("name"))
self.assertQuerysetEqual(
authors.order_by("name"),
[
("<NAME>", "<NAME>"),
("RHONDA", "RHONDA"),
],
lambda a: (a.upper_name, a.name),
)
def test_transform(self):
with register_lookup(CharField, Upper):
Author.objects.create(name="<NAME>", alias="smithj")
Author.objects.create(name="Rhonda")
authors = Author.objects.filter(name__upper__exact="<NAME>")
self.assertQuerysetEqual(
authors.order_by("name"),
[
"<NAME>",
],
lambda a: a.name,
)
```
#### File: tests/empty/tests.py
```python
from django.test import TestCase
from .models import Empty
class EmptyModelTests(TestCase):
def test_empty(self):
m = Empty()
self.assertIsNone(m.id)
m.save()
Empty.objects.create()
self.assertEqual(len(Empty.objects.all()), 2)
self.assertIsNotNone(m.id)
existing = Empty(m.id)
existing.save()
```
#### File: forms_tests/field_tests/test_jsonfield.py
```python
import json
import uuid
from django.core.serializers.json import DjangoJSONEncoder
from django.forms import (
CharField,
Form,
JSONField,
Textarea,
TextInput,
ValidationError,
)
from django.test import SimpleTestCase
class JSONFieldTest(SimpleTestCase):
def test_valid(self):
field = JSONField()
value = field.clean('{"a": "b"}')
self.assertEqual(value, {"a": "b"})
def test_valid_empty(self):
field = JSONField(required=False)
self.assertIsNone(field.clean(""))
self.assertIsNone(field.clean(None))
def test_invalid(self):
field = JSONField()
with self.assertRaisesMessage(ValidationError, "Enter a valid JSON."):
field.clean("{some badly formed: json}")
def test_prepare_value(self):
field = JSONField()
self.assertEqual(field.prepare_value({"a": "b"}), '{"a": "b"}')
self.assertEqual(field.prepare_value(None), "null")
self.assertEqual(field.prepare_value("foo"), '"foo"')
self.assertEqual(field.prepare_value("你好,世界"), '"你好,世界"')
self.assertEqual(field.prepare_value({"a": "😀🐱"}), '{"a": "😀🐱"}')
self.assertEqual(
field.prepare_value(["你好,世界", "jaźń"]),
'["你好,世界", "jaźń"]',
)
def test_widget(self):
field = JSONField()
self.assertIsInstance(field.widget, Textarea)
def test_custom_widget_kwarg(self):
field = JSONField(widget=TextInput)
self.assertIsInstance(field.widget, TextInput)
def test_custom_widget_attribute(self):
"""The widget can be overridden with an attribute."""
class CustomJSONField(JSONField):
widget = TextInput
field = CustomJSONField()
self.assertIsInstance(field.widget, TextInput)
def test_converted_value(self):
field = JSONField(required=False)
tests = [
'["a", "b", "c"]',
'{"a": 1, "b": 2}',
"1",
"1.5",
'"foo"',
"true",
"false",
"null",
]
for json_string in tests:
with self.subTest(json_string=json_string):
val = field.clean(json_string)
self.assertEqual(field.clean(val), val)
def test_has_changed(self):
field = JSONField()
self.assertIs(field.has_changed({"a": True}, '{"a": 1}'), True)
self.assertIs(field.has_changed({"a": 1, "b": 2}, '{"b": 2, "a": 1}'), False)
def test_custom_encoder_decoder(self):
class CustomDecoder(json.JSONDecoder):
def __init__(self, object_hook=None, *args, **kwargs):
return super().__init__(object_hook=self.as_uuid, *args, **kwargs)
def as_uuid(self, dct):
if "uuid" in dct:
dct["uuid"] = uuid.UUID(dct["uuid"])
return dct
value = {"uuid": uuid.UUID("{c141e152-6550-4172-a784-05448d98204b}")}
encoded_value = '{"uuid": "c141e152-6550-4172-a784-05448d98204b"}'
field = JSONField(encoder=DjangoJSONEncoder, decoder=CustomDecoder)
self.assertEqual(field.prepare_value(value), encoded_value)
self.assertEqual(field.clean(encoded_value), value)
def test_formfield_disabled(self):
class JSONForm(Form):
json_field = JSONField(disabled=True)
form = JSONForm({"json_field": '["bar"]'}, initial={"json_field": ["foo"]})
self.assertIn("["foo"]</textarea>", form.as_p())
def test_redisplay_none_input(self):
class JSONForm(Form):
json_field = JSONField(required=True)
tests = [
{},
{"json_field": None},
]
for data in tests:
with self.subTest(data=data):
form = JSONForm(data)
self.assertEqual(form["json_field"].value(), "null")
self.assertIn("null</textarea>", form.as_p())
self.assertEqual(form.errors["json_field"], ["This field is required."])
def test_redisplay_wrong_input(self):
"""
Displaying a bound form (typically due to invalid input). The form
should not overquote JSONField inputs.
"""
class JSONForm(Form):
name = CharField(max_length=2)
json_field = JSONField()
# JSONField input is valid, name is too long.
form = JSONForm({"name": "xyz", "json_field": '["foo"]'})
self.assertNotIn("json_field", form.errors)
self.assertIn("["foo"]</textarea>", form.as_p())
# Invalid JSONField.
form = JSONForm({"name": "xy", "json_field": '{"foo"}'})
self.assertEqual(form.errors["json_field"], ["Enter a valid JSON."])
self.assertIn("{"foo"}</textarea>", form.as_p())
```
#### File: forms_tests/tests/test_deprecation_forms.py
```python
from django.forms import CharField, EmailField, Form, HiddenInput
from django.forms.utils import ErrorList
from django.test import SimpleTestCase, ignore_warnings
from django.utils.deprecation import RemovedInDjango50Warning
from .test_forms import Person
class DivErrorList(ErrorList):
def __str__(self):
return self.as_divs()
def as_divs(self):
if not self:
return ""
return '<div class="errorlist">%s</div>' % "".join(
f'<div class="error">{error}</div>' for error in self
)
class DeprecationTests(SimpleTestCase):
def test_deprecation_warning_html_output(self):
msg = (
"django.forms.BaseForm._html_output() is deprecated. Please use "
".render() and .get_context() instead."
)
with self.assertRaisesMessage(RemovedInDjango50Warning, msg):
form = Person()
form._html_output(
normal_row='<p id="p_%(field_name)s"></p>',
error_row="%s",
row_ender="</p>",
help_text_html=" %s",
errors_on_separate_row=True,
)
def test_deprecation_warning_error_list(self):
class EmailForm(Form):
email = EmailField()
comment = CharField()
data = {"email": "invalid"}
f = EmailForm(data, error_class=DivErrorList)
msg = (
"Returning a plain string from DivErrorList is deprecated. Please "
"customize via the template system instead."
)
with self.assertRaisesMessage(RemovedInDjango50Warning, msg):
f.as_p()
@ignore_warnings(category=RemovedInDjango50Warning)
class DeprecatedTests(SimpleTestCase):
def test_errorlist_override_str(self):
class CommentForm(Form):
name = CharField(max_length=50, required=False)
email = EmailField()
comment = CharField()
data = {"email": "invalid"}
f = CommentForm(data, auto_id=False, error_class=DivErrorList)
self.assertHTMLEqual(
f.as_p(),
'<p>Name: <input type="text" name="name" maxlength="50"></p>'
'<div class="errorlist">'
'<div class="error">Enter a valid email address.</div></div>'
'<p>Email: <input type="email" name="email" value="invalid" required></p>'
'<div class="errorlist">'
'<div class="error">This field is required.</div></div>'
'<p>Comment: <input type="text" name="comment" required></p>',
)
def test_field_name(self):
"""#5749 - `field_name` may be used as a key in _html_output()."""
class SomeForm(Form):
some_field = CharField()
def as_p(self):
return self._html_output(
normal_row='<p id="p_%(field_name)s"></p>',
error_row="%s",
row_ender="</p>",
help_text_html=" %s",
errors_on_separate_row=True,
)
form = SomeForm()
self.assertHTMLEqual(form.as_p(), '<p id="p_some_field"></p>')
def test_field_without_css_classes(self):
"""
`css_classes` may be used as a key in _html_output() (empty classes).
"""
class SomeForm(Form):
some_field = CharField()
def as_p(self):
return self._html_output(
normal_row='<p class="%(css_classes)s"></p>',
error_row="%s",
row_ender="</p>",
help_text_html=" %s",
errors_on_separate_row=True,
)
form = SomeForm()
self.assertHTMLEqual(form.as_p(), '<p class=""></p>')
def test_field_with_css_class(self):
"""
`css_classes` may be used as a key in _html_output() (class comes
from required_css_class in this case).
"""
class SomeForm(Form):
some_field = CharField()
required_css_class = "foo"
def as_p(self):
return self._html_output(
normal_row='<p class="%(css_classes)s"></p>',
error_row="%s",
row_ender="</p>",
help_text_html=" %s",
errors_on_separate_row=True,
)
form = SomeForm()
self.assertHTMLEqual(form.as_p(), '<p class="foo"></p>')
def test_field_name_with_hidden_input(self):
"""
BaseForm._html_output() should merge all the hidden input fields and
put them in the last row.
"""
class SomeForm(Form):
hidden1 = CharField(widget=HiddenInput)
custom = CharField()
hidden2 = CharField(widget=HiddenInput)
def as_p(self):
return self._html_output(
normal_row="<p%(html_class_attr)s>%(field)s %(field_name)s</p>",
error_row="%s",
row_ender="</p>",
help_text_html=" %s",
errors_on_separate_row=True,
)
form = SomeForm()
self.assertHTMLEqual(
form.as_p(),
'<p><input id="id_custom" name="custom" type="text" required> custom'
'<input id="id_hidden1" name="hidden1" type="hidden">'
'<input id="id_hidden2" name="hidden2" type="hidden"></p>',
)
def test_field_name_with_hidden_input_and_non_matching_row_ender(self):
"""
BaseForm._html_output() should merge all the hidden input fields and
put them in the last row ended with the specific row ender.
"""
class SomeForm(Form):
hidden1 = CharField(widget=HiddenInput)
custom = CharField()
hidden2 = CharField(widget=HiddenInput)
def as_p(self):
return self._html_output(
normal_row="<p%(html_class_attr)s>%(field)s %(field_name)s</p>",
error_row="%s",
row_ender="<hr><hr>",
help_text_html=" %s",
errors_on_separate_row=True,
)
form = SomeForm()
self.assertHTMLEqual(
form.as_p(),
'<p><input id="id_custom" name="custom" type="text" required> custom</p>\n'
'<input id="id_hidden1" name="hidden1" type="hidden">'
'<input id="id_hidden2" name="hidden2" type="hidden"><hr><hr>',
)
```
#### File: forms_tests/tests/test_validators.py
```python
import re
import types
from unittest import TestCase
from django import forms
from django.core import validators
from django.core.exceptions import ValidationError
from django.core.files.uploadedfile import SimpleUploadedFile
class TestFieldWithValidators(TestCase):
def test_all_errors_get_reported(self):
class UserForm(forms.Form):
full_name = forms.CharField(
max_length=50,
validators=[
validators.validate_integer,
validators.validate_email,
],
)
string = forms.CharField(
max_length=50,
validators=[
validators.RegexValidator(
regex="^[a-zA-Z]*$",
message="Letters only.",
)
],
)
ignore_case_string = forms.CharField(
max_length=50,
validators=[
validators.RegexValidator(
regex="^[a-z]*$",
message="Letters only.",
flags=re.IGNORECASE,
)
],
)
form = UserForm(
{
"full_name": "not int nor mail",
"string": "2 is not correct",
"ignore_case_string": "IgnORE Case strIng",
}
)
with self.assertRaises(ValidationError) as e:
form.fields["full_name"].clean("not int nor mail")
self.assertEqual(2, len(e.exception.messages))
self.assertFalse(form.is_valid())
self.assertEqual(form.errors["string"], ["Letters only."])
self.assertEqual(form.errors["string"], ["Letters only."])
def test_field_validators_can_be_any_iterable(self):
class UserForm(forms.Form):
full_name = forms.CharField(
max_length=50,
validators=(
validators.validate_integer,
validators.validate_email,
),
)
form = UserForm({"full_name": "not int nor mail"})
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors["full_name"],
["Enter a valid integer.", "Enter a valid email address."],
)
class ValidatorCustomMessageTests(TestCase):
def test_value_placeholder_with_char_field(self):
cases = [
(validators.validate_integer, "-42.5", "invalid"),
(validators.validate_email, "a", "invalid"),
(validators.validate_email, "<EMAIL>", "invalid"),
(validators.validate_email, "<EMAIL>", "invalid"),
(validators.validate_slug, "你 好", "invalid"),
(validators.validate_unicode_slug, "你 好", "invalid"),
(validators.validate_ipv4_address, "256.1.1.1", "invalid"),
(validators.validate_ipv6_address, "1:2", "invalid"),
(validators.validate_ipv46_address, "256.1.1.1", "invalid"),
(validators.validate_comma_separated_integer_list, "a,b,c", "invalid"),
(validators.int_list_validator(), "-1,2,3", "invalid"),
(validators.MaxLengthValidator(10), 11 * "x", "max_length"),
(validators.MinLengthValidator(10), 9 * "x", "min_length"),
(validators.URLValidator(), "no_scheme", "invalid"),
(validators.URLValidator(), "http://test[.com", "invalid"),
(validators.URLValidator(), "http://[::1:2::3]/", "invalid"),
(
validators.URLValidator(),
"http://" + ".".join(["a" * 35 for _ in range(9)]),
"invalid",
),
(validators.RegexValidator("[0-9]+"), "xxxxxx", "invalid"),
]
for validator, value, code in cases:
if isinstance(validator, types.FunctionType):
name = validator.__name__
else:
name = type(validator).__name__
with self.subTest(name, value=value):
class MyForm(forms.Form):
field = forms.CharField(
validators=[validator],
error_messages={code: "%(value)s"},
)
form = MyForm({"field": value})
self.assertIs(form.is_valid(), False)
self.assertEqual(form.errors, {"field": [value]})
def test_value_placeholder_with_null_character(self):
class MyForm(forms.Form):
field = forms.CharField(
error_messages={"null_characters_not_allowed": "%(value)s"},
)
form = MyForm({"field": "a\0b"})
self.assertIs(form.is_valid(), False)
self.assertEqual(form.errors, {"field": ["a\x00b"]})
def test_value_placeholder_with_integer_field(self):
cases = [
(validators.MaxValueValidator(0), 1, "max_value"),
(validators.MinValueValidator(0), -1, "min_value"),
(validators.URLValidator(), "1", "invalid"),
]
for validator, value, code in cases:
with self.subTest(type(validator).__name__, value=value):
class MyForm(forms.Form):
field = forms.IntegerField(
validators=[validator],
error_messages={code: "%(value)s"},
)
form = MyForm({"field": value})
self.assertIs(form.is_valid(), False)
self.assertEqual(form.errors, {"field": [str(value)]})
def test_value_placeholder_with_decimal_field(self):
cases = [
("NaN", "invalid"),
("123", "max_digits"),
("0.12", "max_decimal_places"),
("12", "max_whole_digits"),
]
for value, code in cases:
with self.subTest(value=value):
class MyForm(forms.Form):
field = forms.DecimalField(
max_digits=2,
decimal_places=1,
error_messages={code: "%(value)s"},
)
form = MyForm({"field": value})
self.assertIs(form.is_valid(), False)
self.assertEqual(form.errors, {"field": [value]})
def test_value_placeholder_with_file_field(self):
class MyForm(forms.Form):
field = forms.FileField(
validators=[validators.validate_image_file_extension],
error_messages={"invalid_extension": "%(value)s"},
)
form = MyForm(files={"field": SimpleUploadedFile("myfile.txt", b"abc")})
self.assertIs(form.is_valid(), False)
self.assertEqual(form.errors, {"field": ["myfile.txt"]})
```
#### File: forms_tests/widget_tests/test_input.py
```python
from django.forms.widgets import Input
from .base import WidgetTest
class InputTests(WidgetTest):
def test_attrs_with_type(self):
attrs = {"type": "date"}
widget = Input(attrs)
self.check_html(
widget, "name", "value", '<input type="date" name="name" value="value">'
)
# reuse the same attrs for another widget
self.check_html(
Input(attrs),
"name",
"value",
'<input type="date" name="name" value="value">',
)
attrs["type"] = "number" # shouldn't change the widget type
self.check_html(
widget, "name", "value", '<input type="date" name="name" value="value">'
)
```
#### File: forms_tests/widget_tests/test_select.py
```python
import copy
import datetime
from django.forms import Select
from django.test import override_settings
from django.utils.safestring import mark_safe
from .base import WidgetTest
class SelectTest(WidgetTest):
widget = Select
nested_widget = Select(
choices=(
("outer1", "Outer 1"),
('Group "1"', (("inner1", "Inner 1"), ("inner2", "Inner 2"))),
)
)
def test_render(self):
self.check_html(
self.widget(choices=self.beatles),
"beatle",
"J",
html=(
"""<select name="beatle">
<option value="J" selected>John</option>
<option value="P">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>"""
),
)
def test_render_none(self):
"""
If the value is None, none of the options are selected.
"""
self.check_html(
self.widget(choices=self.beatles),
"beatle",
None,
html=(
"""<select name="beatle">
<option value="J">John</option>
<option value="P">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>"""
),
)
def test_render_label_value(self):
"""
If the value corresponds to a label (but not to an option value), none
of the options are selected.
"""
self.check_html(
self.widget(choices=self.beatles),
"beatle",
"John",
html=(
"""<select name="beatle">
<option value="J">John</option>
<option value="P">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>"""
),
)
def test_render_selected(self):
"""
Only one option can be selected (#8103).
"""
choices = [("0", "0"), ("1", "1"), ("2", "2"), ("3", "3"), ("0", "extra")]
self.check_html(
self.widget(choices=choices),
"choices",
"0",
html=(
"""<select name="choices">
<option value="0" selected>0</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="0">extra</option>
</select>"""
),
)
def test_constructor_attrs(self):
"""
Select options shouldn't inherit the parent widget attrs.
"""
widget = Select(
attrs={"class": "super", "id": "super"},
choices=[(1, 1), (2, 2), (3, 3)],
)
self.check_html(
widget,
"num",
2,
html=(
"""<select name="num" class="super" id="super">
<option value="1">1</option>
<option value="2" selected>2</option>
<option value="3">3</option>
</select>"""
),
)
def test_compare_to_str(self):
"""
The value is compared to its str().
"""
self.check_html(
self.widget(choices=[("1", "1"), ("2", "2"), ("3", "3")]),
"num",
2,
html=(
"""<select name="num">
<option value="1">1</option>
<option value="2" selected>2</option>
<option value="3">3</option>
</select>"""
),
)
self.check_html(
self.widget(choices=[(1, 1), (2, 2), (3, 3)]),
"num",
"2",
html=(
"""<select name="num">
<option value="1">1</option>
<option value="2" selected>2</option>
<option value="3">3</option>
</select>"""
),
)
self.check_html(
self.widget(choices=[(1, 1), (2, 2), (3, 3)]),
"num",
2,
html=(
"""<select name="num">
<option value="1">1</option>
<option value="2" selected>2</option>
<option value="3">3</option>
</select>"""
),
)
def test_choices_constructor(self):
widget = Select(choices=[(1, 1), (2, 2), (3, 3)])
self.check_html(
widget,
"num",
2,
html=(
"""<select name="num">
<option value="1">1</option>
<option value="2" selected>2</option>
<option value="3">3</option>
</select>"""
),
)
def test_choices_constructor_generator(self):
"""
If choices is passed to the constructor and is a generator, it can be
iterated over multiple times without getting consumed.
"""
def get_choices():
for i in range(5):
yield (i, i)
widget = Select(choices=get_choices())
self.check_html(
widget,
"num",
2,
html=(
"""<select name="num">
<option value="0">0</option>
<option value="1">1</option>
<option value="2" selected>2</option>
<option value="3">3</option>
<option value="4">4</option>
</select>"""
),
)
self.check_html(
widget,
"num",
3,
html=(
"""<select name="num">
<option value="0">0</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3" selected>3</option>
<option value="4">4</option>
</select>"""
),
)
def test_choices_escaping(self):
choices = (("bad", "you & me"), ("good", mark_safe("you > me")))
self.check_html(
self.widget(choices=choices),
"escape",
None,
html=(
"""<select name="escape">
<option value="bad">you & me</option>
<option value="good">you > me</option>
</select>"""
),
)
def test_choices_unicode(self):
self.check_html(
self.widget(choices=[("ŠĐĆŽćžšđ", "ŠĐabcĆŽćžšđ"), ("ćžšđ", "abcćžšđ")]),
"email",
"ŠĐĆŽćžšđ",
html=(
"""
<select name="email">
<option value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111"
selected>
\u0160\u0110abc\u0106\u017d\u0107\u017e\u0161\u0111
</option>
<option value="\u0107\u017e\u0161\u0111">abc\u0107\u017e\u0161\u0111
</option>
</select>
"""
),
)
def test_choices_optgroup(self):
"""
Choices can be nested one level in order to create HTML optgroups.
"""
self.check_html(
self.nested_widget,
"nestchoice",
None,
html=(
"""<select name="nestchoice">
<option value="outer1">Outer 1</option>
<optgroup label="Group "1"">
<option value="inner1">Inner 1</option>
<option value="inner2">Inner 2</option>
</optgroup>
</select>"""
),
)
def test_choices_select_outer(self):
self.check_html(
self.nested_widget,
"nestchoice",
"outer1",
html=(
"""<select name="nestchoice">
<option value="outer1" selected>Outer 1</option>
<optgroup label="Group "1"">
<option value="inner1">Inner 1</option>
<option value="inner2">Inner 2</option>
</optgroup>
</select>"""
),
)
def test_choices_select_inner(self):
self.check_html(
self.nested_widget,
"nestchoice",
"inner1",
html=(
"""<select name="nestchoice">
<option value="outer1">Outer 1</option>
<optgroup label="Group "1"">
<option value="inner1" selected>Inner 1</option>
<option value="inner2">Inner 2</option>
</optgroup>
</select>"""
),
)
@override_settings(USE_THOUSAND_SEPARATOR=True)
def test_doesnt_localize_option_value(self):
choices = [
(1, "One"),
(1000, "One thousand"),
(1000000, "One million"),
]
html = """
<select name="number">
<option value="1">One</option>
<option value="1000">One thousand</option>
<option value="1000000">One million</option>
</select>
"""
self.check_html(self.widget(choices=choices), "number", None, html=html)
choices = [
(datetime.time(0, 0), "midnight"),
(datetime.time(12, 0), "noon"),
]
html = """
<select name="time">
<option value="00:00:00">midnight</option>
<option value="12:00:00">noon</option>
</select>
"""
self.check_html(self.widget(choices=choices), "time", None, html=html)
def test_options(self):
options = list(
self.widget(choices=self.beatles).options(
"name",
["J"],
attrs={"class": "super"},
)
)
self.assertEqual(len(options), 4)
self.assertEqual(options[0]["name"], "name")
self.assertEqual(options[0]["value"], "J")
self.assertEqual(options[0]["label"], "John")
self.assertEqual(options[0]["index"], "0")
self.assertIs(options[0]["selected"], True)
# Template-related attributes
self.assertEqual(options[1]["name"], "name")
self.assertEqual(options[1]["value"], "P")
self.assertEqual(options[1]["label"], "Paul")
self.assertEqual(options[1]["index"], "1")
self.assertIs(options[1]["selected"], False)
def test_optgroups(self):
choices = [
(
"Audio",
[
("vinyl", "Vinyl"),
("cd", "CD"),
],
),
(
"Video",
[
("vhs", "VHS Tape"),
("dvd", "DVD"),
],
),
("unknown", "Unknown"),
]
groups = list(
self.widget(choices=choices).optgroups(
"name",
["vhs"],
attrs={"class": "super"},
)
)
audio, video, unknown = groups
label, options, index = audio
self.assertEqual(label, "Audio")
self.assertEqual(
options,
[
{
"value": "vinyl",
"type": "select",
"attrs": {},
"index": "0_0",
"label": "Vinyl",
"template_name": "django/forms/widgets/select_option.html",
"name": "name",
"selected": False,
"wrap_label": True,
},
{
"value": "cd",
"type": "select",
"attrs": {},
"index": "0_1",
"label": "CD",
"template_name": "django/forms/widgets/select_option.html",
"name": "name",
"selected": False,
"wrap_label": True,
},
],
)
self.assertEqual(index, 0)
label, options, index = video
self.assertEqual(label, "Video")
self.assertEqual(
options,
[
{
"value": "vhs",
"template_name": "django/forms/widgets/select_option.html",
"label": "VHS Tape",
"attrs": {"selected": True},
"index": "1_0",
"name": "name",
"selected": True,
"type": "select",
"wrap_label": True,
},
{
"value": "dvd",
"template_name": "django/forms/widgets/select_option.html",
"label": "DVD",
"attrs": {},
"index": "1_1",
"name": "name",
"selected": False,
"type": "select",
"wrap_label": True,
},
],
)
self.assertEqual(index, 1)
label, options, index = unknown
self.assertIsNone(label)
self.assertEqual(
options,
[
{
"value": "unknown",
"selected": False,
"template_name": "django/forms/widgets/select_option.html",
"label": "Unknown",
"attrs": {},
"index": "2",
"name": "name",
"type": "select",
"wrap_label": True,
}
],
)
self.assertEqual(index, 2)
def test_optgroups_integer_choices(self):
"""The option 'value' is the same type as what's in `choices`."""
groups = list(
self.widget(choices=[[0, "choice text"]]).optgroups("name", ["vhs"])
)
label, options, index = groups[0]
self.assertEqual(options[0]["value"], 0)
def test_deepcopy(self):
"""
__deepcopy__() should copy all attributes properly (#25085).
"""
widget = Select()
obj = copy.deepcopy(widget)
self.assertIsNot(widget, obj)
self.assertEqual(widget.choices, obj.choices)
self.assertIsNot(widget.choices, obj.choices)
self.assertEqual(widget.attrs, obj.attrs)
self.assertIsNot(widget.attrs, obj.attrs)
def test_doesnt_render_required_when_impossible_to_select_empty_field(self):
widget = self.widget(choices=[("J", "John"), ("P", "Paul")])
self.assertIs(widget.use_required_attribute(initial=None), False)
def test_renders_required_when_possible_to_select_empty_field_str(self):
widget = self.widget(choices=[("", "select please"), ("P", "Paul")])
self.assertIs(widget.use_required_attribute(initial=None), True)
def test_renders_required_when_possible_to_select_empty_field_list(self):
widget = self.widget(choices=[["", "select please"], ["P", "Paul"]])
self.assertIs(widget.use_required_attribute(initial=None), True)
def test_renders_required_when_possible_to_select_empty_field_none(self):
widget = self.widget(choices=[(None, "select please"), ("P", "Paul")])
self.assertIs(widget.use_required_attribute(initial=None), True)
def test_doesnt_render_required_when_no_choices_are_available(self):
widget = self.widget(choices=[])
self.assertIs(widget.use_required_attribute(initial=None), False)
```
#### File: gis_tests/gdal_tests/tests.py
```python
import unittest
from django.contrib.gis.gdal import GDAL_VERSION, gdal_full_version, gdal_version
class GDALTest(unittest.TestCase):
def test_gdal_version(self):
if GDAL_VERSION:
self.assertEqual(gdal_version(), ("%s.%s.%s" % GDAL_VERSION).encode())
else:
self.assertIn(b".", gdal_version())
def test_gdal_full_version(self):
full_version = gdal_full_version()
self.assertIn(gdal_version(), full_version)
self.assertTrue(full_version.startswith(b"GDAL"))
```
#### File: gis_tests/gdal_tests/test_srs.py
```python
from unittest import skipIf
from django.contrib.gis.gdal import (
GDAL_VERSION,
AxisOrder,
CoordTransform,
GDALException,
SpatialReference,
SRSException,
)
from django.contrib.gis.geos import GEOSGeometry
from django.test import SimpleTestCase
class TestSRS:
def __init__(self, wkt, **kwargs):
self.wkt = wkt
for key, value in kwargs.items():
setattr(self, key, value)
WGS84_proj = "+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs "
# Some Spatial Reference examples
srlist = (
TestSRS(
'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,'
'AUTHORITY["EPSG","7030"]],TOWGS84[0,0,0,0,0,0,0],AUTHORITY["EPSG","6326"]],'
'PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",'
'0.0174532925199433,AUTHORITY["EPSG","9122"]],AXIS["Latitude",NORTH],'
'AXIS["Longitude",EAST],AUTHORITY["EPSG","4326"]]',
epsg=4326,
projected=False,
geographic=True,
local=False,
lin_name="unknown",
ang_name="degree",
lin_units=1.0,
ang_units=0.0174532925199,
auth={"GEOGCS": ("EPSG", "4326"), "spheroid": ("EPSG", "7030")},
attr=(
("DATUM", "WGS_1984"),
(("SPHEROID", 1), "6378137"),
("primem|authority", "EPSG"),
),
),
TestSRS(
'PROJCS["NAD83 / Texas South Central",'
'GEOGCS["NAD83",DATUM["North_American_Datum_1983",'
'SPHEROID["GRS 1980",6378137,298.257222101,AUTHORITY["EPSG","7019"]],'
'AUTHORITY["EPSG","6269"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],'
'UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],'
'AUTHORITY["EPSG","4269"]],PROJECTION["Lambert_Conformal_Conic_2SP"],'
'PARAMETER["standard_parallel_1",30.2833333333333],'
'PARAMETER["standard_parallel_2",28.3833333333333],'
'PARAMETER["latitude_of_origin",27.8333333333333],'
'PARAMETER["central_meridian",-99],PARAMETER["false_easting",600000],'
'PARAMETER["false_northing",4000000],UNIT["metre",1,AUTHORITY["EPSG","9001"]],'
'AXIS["Easting",EAST],AXIS["Northing",NORTH],AUTHORITY["EPSG","32140"]]',
epsg=32140,
projected=True,
geographic=False,
local=False,
lin_name="metre",
ang_name="degree",
lin_units=1.0,
ang_units=0.0174532925199,
auth={
"PROJCS": ("EPSG", "32140"),
"spheroid": ("EPSG", "7019"),
"unit": ("EPSG", "9001"),
},
attr=(
("DATUM", "North_American_Datum_1983"),
(("SPHEROID", 2), "298.257222101"),
("PROJECTION", "Lambert_Conformal_Conic_2SP"),
),
),
TestSRS(
'PROJCS["NAD83 / Texas South Central (ftUS)",'
'GEOGCS["NAD83",DATUM["North_American_Datum_1983",'
'SPHEROID["GRS 1980",6378137,298.257222101,AUTHORITY["EPSG","7019"]],'
'AUTHORITY["EPSG","6269"]],'
'PRIMEM["Greenwich",0],'
'UNIT["Degree",0.0174532925199433]],PROJECTION["Lambert_Conformal_Conic_2SP"],'
'PARAMETER["false_easting",1968500],'
'PARAMETER["false_northing",13123333.3333333],'
'PARAMETER["central_meridian",-99],'
'PARAMETER["standard_parallel_1",28.3833333333333],'
'PARAMETER["standard_parallel_2",30.2833333333333],'
'PARAMETER["latitude_of_origin",27.8333333333333],'
'UNIT["US survey foot",0.304800609601219],AXIS["Easting",EAST],'
'AXIS["Northing",NORTH]]',
epsg=None,
projected=True,
geographic=False,
local=False,
lin_name="US survey foot",
ang_name="Degree",
lin_units=0.3048006096012192,
ang_units=0.0174532925199,
auth={"PROJCS": (None, None)},
attr=(
("PROJCS|GeOgCs|spheroid", "GRS 1980"),
(("projcs", 9), "UNIT"),
(("projcs", 11), "AXIS"),
),
),
# This is really ESRI format, not WKT -- but the import should work the same
TestSRS(
'LOCAL_CS["Non-Earth (Meter)",LOCAL_DATUM["Local Datum",32767],'
'UNIT["Meter",1],AXIS["X",EAST],AXIS["Y",NORTH]]',
esri=True,
epsg=None,
projected=False,
geographic=False,
local=True,
lin_name="Meter",
ang_name="degree",
lin_units=1.0,
ang_units=0.0174532925199,
attr=(("LOCAL_DATUM", "Local Datum"),),
),
)
# Well-Known Names
well_known = (
TestSRS(
'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,'
'AUTHORITY["EPSG","7030"]],TOWGS84[0,0,0,0,0,0,0],AUTHORITY["EPSG","6326"]],'
'PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],'
'UNIT["degree",0.01745329251994328,'
'AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]]',
wk="WGS84",
name="WGS 84",
attrs=(("GEOGCS|AUTHORITY", 1, "4326"), ("SPHEROID", "WGS 84")),
),
TestSRS(
'GEOGCS["WGS 72",DATUM["WGS_1972",SPHEROID["WGS 72",6378135,298.26,'
'AUTHORITY["EPSG","7043"]],AUTHORITY["EPSG","6322"]],'
'PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],'
'UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],'
'AUTHORITY["EPSG","4322"]]',
wk="WGS72",
name="WGS 72",
attrs=(("GEOGCS|AUTHORITY", 1, "4322"), ("SPHEROID", "WGS 72")),
),
TestSRS(
'GEOGCS["NAD27",DATUM["North_American_Datum_1927",'
'SPHEROID["Clarke 1866",6378206.4,294.9786982138982,'
'AUTHORITY["EPSG","7008"]],AUTHORITY["EPSG","6267"]],'
'PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],'
'UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],'
'AUTHORITY["EPSG","4267"]]',
wk="NAD27",
name="NAD27",
attrs=(("GEOGCS|AUTHORITY", 1, "4267"), ("SPHEROID", "Clarke 1866")),
),
TestSRS(
'GEOGCS["NAD83",DATUM["North_American_Datum_1983",'
'SPHEROID["GRS 1980",6378137,298.257222101,'
'AUTHORITY["EPSG","7019"]],AUTHORITY["EPSG","6269"]],'
'PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],'
'UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],'
'AUTHORITY["EPSG","4269"]]',
wk="NAD83",
name="NAD83",
attrs=(("GEOGCS|AUTHORITY", 1, "4269"), ("SPHEROID", "GRS 1980")),
),
TestSRS(
'PROJCS["NZGD49 / Karamea Circuit",GEOGCS["NZGD49",'
'DATUM["New_Zealand_Geodetic_Datum_1949",'
'SPHEROID["International 1924",6378388,297,'
'AUTHORITY["EPSG","7022"]],'
"TOWGS84[59.47,-5.04,187.44,0.47,-0.1,1.024,-4.5993],"
'AUTHORITY["EPSG","6272"]],PRIMEM["Greenwich",0,'
'AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,'
'AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4272"]],'
'PROJECTION["Transverse_Mercator"],'
'PARAMETER["latitude_of_origin",-41.28991152777778],'
'PARAMETER["central_meridian",172.1090281944444],'
'PARAMETER["scale_factor",1],PARAMETER["false_easting",300000],'
'PARAMETER["false_northing",700000],'
'UNIT["metre",1,AUTHORITY["EPSG","9001"]],AUTHORITY["EPSG","27216"]]',
wk="EPSG:27216",
name="NZGD49 / Karamea Circuit",
attrs=(
("PROJECTION", "Transverse_Mercator"),
("SPHEROID", "International 1924"),
),
),
)
bad_srlist = (
"Foobar",
'OOJCS["NAD83 / Texas South Central",GEOGCS["NAD83",'
'DATUM["North_American_Datum_1983",'
'SPHEROID["GRS 1980",6378137,298.257222101,AUTHORITY["EPSG","7019"]],'
'AUTHORITY["EPSG","6269"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],'
'UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],'
'AUTHORITY["EPSG","4269"]],PROJECTION["Lambert_Conformal_Conic_2SP"],'
'PARAMETER["standard_parallel_1",30.28333333333333],'
'PARAMETER["standard_parallel_2",28.38333333333333],'
'PARAMETER["latitude_of_origin",27.83333333333333],'
'PARAMETER["central_meridian",-99],PARAMETER["false_easting",600000],'
'PARAMETER["false_northing",4000000],UNIT["metre",1,'
'AUTHORITY["EPSG","9001"]],AUTHORITY["EPSG","32140"]]',
)
class SpatialRefTest(SimpleTestCase):
def test01_wkt(self):
"Testing initialization on valid OGC WKT."
for s in srlist:
SpatialReference(s.wkt)
def test02_bad_wkt(self):
"Testing initialization on invalid WKT."
for bad in bad_srlist:
try:
srs = SpatialReference(bad)
srs.validate()
except (SRSException, GDALException):
pass
else:
self.fail('Should not have initialized on bad WKT "%s"!')
def test03_get_wkt(self):
"Testing getting the WKT."
for s in srlist:
srs = SpatialReference(s.wkt)
# GDAL 3 strips UNIT part in the last occurrence.
self.assertEqual(
s.wkt.replace(',UNIT["Meter",1]', ""),
srs.wkt.replace(',UNIT["Meter",1]', ""),
)
def test04_proj(self):
"""PROJ import and export."""
proj_parts = [
"+proj=longlat",
"+ellps=WGS84",
"+towgs84=0,0,0,0,0,0,0",
"+datum=WGS84",
"+no_defs",
]
srs1 = SpatialReference(srlist[0].wkt)
srs2 = SpatialReference(WGS84_proj)
self.assertTrue(all(part in proj_parts for part in srs1.proj.split()))
self.assertTrue(all(part in proj_parts for part in srs2.proj.split()))
def test05_epsg(self):
"Test EPSG import."
for s in srlist:
if s.epsg:
srs1 = SpatialReference(s.wkt)
srs2 = SpatialReference(s.epsg)
srs3 = SpatialReference(str(s.epsg))
srs4 = SpatialReference("EPSG:%d" % s.epsg)
for srs in (srs1, srs2, srs3, srs4):
for attr, expected in s.attr:
self.assertEqual(expected, srs[attr])
def test07_boolean_props(self):
"Testing the boolean properties."
for s in srlist:
srs = SpatialReference(s.wkt)
self.assertEqual(s.projected, srs.projected)
self.assertEqual(s.geographic, srs.geographic)
def test08_angular_linear(self):
"Testing the linear and angular units routines."
for s in srlist:
srs = SpatialReference(s.wkt)
self.assertEqual(s.ang_name, srs.angular_name)
self.assertEqual(s.lin_name, srs.linear_name)
self.assertAlmostEqual(s.ang_units, srs.angular_units, 9)
self.assertAlmostEqual(s.lin_units, srs.linear_units, 9)
def test09_authority(self):
"Testing the authority name & code routines."
for s in srlist:
if hasattr(s, "auth"):
srs = SpatialReference(s.wkt)
for target, tup in s.auth.items():
self.assertEqual(tup[0], srs.auth_name(target))
self.assertEqual(tup[1], srs.auth_code(target))
def test10_attributes(self):
"Testing the attribute retrieval routines."
for s in srlist:
srs = SpatialReference(s.wkt)
for tup in s.attr:
att = tup[0] # Attribute to test
exp = tup[1] # Expected result
self.assertEqual(exp, srs[att])
def test11_wellknown(self):
"Testing Well Known Names of Spatial References."
for s in well_known:
srs = SpatialReference(s.wk)
self.assertEqual(s.name, srs.name)
for tup in s.attrs:
if len(tup) == 2:
key = tup[0]
exp = tup[1]
elif len(tup) == 3:
key = tup[:2]
exp = tup[2]
self.assertEqual(srs[key], exp)
def test12_coordtransform(self):
"Testing initialization of a CoordTransform."
target = SpatialReference("WGS84")
CoordTransform(SpatialReference(srlist[0].wkt), target)
def test13_attr_value(self):
"Testing the attr_value() method."
s1 = SpatialReference("WGS84")
with self.assertRaises(TypeError):
s1.__getitem__(0)
with self.assertRaises(TypeError):
s1.__getitem__(("GEOGCS", "foo"))
self.assertEqual("WGS 84", s1["GEOGCS"])
self.assertEqual("WGS_1984", s1["DATUM"])
self.assertEqual("EPSG", s1["AUTHORITY"])
self.assertEqual(4326, int(s1["AUTHORITY", 1]))
self.assertIsNone(s1["FOOBAR"])
def test_unicode(self):
wkt = (
'PROJCS["DHDN / Soldner 39 Langschoß",'
'GEOGCS["DHDN",DATUM["Deutsches_Hauptdreiecksnetz",'
'SPHEROID["Bessel 1841",6377397.155,299.1528128,AUTHORITY["EPSG","7004"]],'
'AUTHORITY["EPSG","6314"]],'
'PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],'
'UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],'
'AUTHORITY["EPSG","4314"]],PROJECTION["Cassini_Soldner"],'
'PARAMETER["latitude_of_origin",50.66738711],'
'PARAMETER["central_meridian",6.28935703],'
'PARAMETER["false_easting",0],PARAMETER["false_northing",0],'
'UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["X",NORTH],AXIS["Y",EAST],'
'AUTHORITY["mj10777.de","187939"]]'
)
srs = SpatialReference(wkt)
srs_list = [srs, srs.clone()]
srs.import_wkt(wkt)
for srs in srs_list:
self.assertEqual(srs.name, "DHDN / Soldner 39 Langschoß")
self.assertEqual(srs.wkt, wkt)
self.assertIn("Langschoß", srs.pretty_wkt)
self.assertIn("Langschoß", srs.xml)
@skipIf(GDAL_VERSION < (3, 0), "GDAL >= 3.0 is required")
def test_axis_order(self):
wgs84_trad = SpatialReference(4326, axis_order=AxisOrder.TRADITIONAL)
wgs84_auth = SpatialReference(4326, axis_order=AxisOrder.AUTHORITY)
# Coordinate interpretation may depend on the srs axis predicate.
pt = GEOSGeometry("POINT (992385.4472045 481455.4944650)", 2774)
pt_trad = pt.transform(wgs84_trad, clone=True)
self.assertAlmostEqual(pt_trad.x, -104.609, 3)
self.assertAlmostEqual(pt_trad.y, 38.255, 3)
pt_auth = pt.transform(wgs84_auth, clone=True)
self.assertAlmostEqual(pt_auth.x, 38.255, 3)
self.assertAlmostEqual(pt_auth.y, -104.609, 3)
# clone() preserves the axis order.
pt_auth = pt.transform(wgs84_auth.clone(), clone=True)
self.assertAlmostEqual(pt_auth.x, 38.255, 3)
self.assertAlmostEqual(pt_auth.y, -104.609, 3)
def test_axis_order_invalid(self):
msg = "SpatialReference.axis_order must be an AxisOrder instance."
with self.assertRaisesMessage(ValueError, msg):
SpatialReference(4326, axis_order="other")
@skipIf(GDAL_VERSION > (3, 0), "GDAL < 3.0 doesn't support authority.")
def test_axis_order_non_traditional_invalid(self):
msg = "AxisOrder.AUTHORITY is not supported in GDAL < 3.0."
with self.assertRaisesMessage(ValueError, msg):
SpatialReference(4326, axis_order=AxisOrder.AUTHORITY)
def test_esri(self):
srs = SpatialReference("NAD83")
pre_esri_wkt = srs.wkt
srs.to_esri()
self.assertNotEqual(srs.wkt, pre_esri_wkt)
self.assertIn('DATUM["D_North_American_1983"', srs.wkt)
srs.from_esri()
self.assertIn('DATUM["North_American_Datum_1983"', srs.wkt)
```
#### File: gis_tests/rasterapp/models.py
```python
from django.contrib.gis.db import models
class RasterModel(models.Model):
rast = models.RasterField(
"A Verbose Raster Name", null=True, srid=4326, spatial_index=True, blank=True
)
rastprojected = models.RasterField("A Projected Raster Table", srid=3086, null=True)
geom = models.PointField(null=True)
class Meta:
required_db_features = ["supports_raster"]
def __str__(self):
return str(self.id)
class RasterRelatedModel(models.Model):
rastermodel = models.ForeignKey(RasterModel, models.CASCADE)
class Meta:
required_db_features = ["supports_raster"]
def __str__(self):
return str(self.id)
```
#### File: tests/handlers/tests_custom_error_handlers.py
```python
from django.core.exceptions import PermissionDenied
from django.template.response import TemplateResponse
from django.test import SimpleTestCase, modify_settings, override_settings
from django.urls import path
class MiddlewareAccessingContent:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
response = self.get_response(request)
# Response.content should be available in the middleware even with a
# TemplateResponse-based exception response.
assert response.content
return response
def template_response_error_handler(request, exception=None):
return TemplateResponse(request, "test_handler.html", status=403)
def permission_denied_view(request):
raise PermissionDenied
urlpatterns = [
path("", permission_denied_view),
]
handler403 = template_response_error_handler
@override_settings(ROOT_URLCONF="handlers.tests_custom_error_handlers")
@modify_settings(
MIDDLEWARE={
"append": "handlers.tests_custom_error_handlers.MiddlewareAccessingContent"
}
)
class CustomErrorHandlerTests(SimpleTestCase):
def test_handler_renders_template_response(self):
"""
BaseHandler should render TemplateResponse if necessary.
"""
response = self.client.get("/")
self.assertContains(response, "Error handler content", status_code=403)
```
#### File: tests/logging_tests/logconfig.py
```python
import logging
from django.conf import settings
from django.core.mail.backends.base import BaseEmailBackend
from django.views.debug import ExceptionReporter
class MyHandler(logging.Handler):
def __init__(self):
logging.Handler.__init__(self)
self.config = settings.LOGGING
class MyEmailBackend(BaseEmailBackend):
def send_messages(self, email_messages):
pass
class CustomExceptionReporter(ExceptionReporter):
def get_traceback_text(self):
return "custom traceback text"
```
#### File: tests/lookup/models.py
```python
from django.db import models
from django.db.models.lookups import IsNull
class Alarm(models.Model):
desc = models.CharField(max_length=100)
time = models.TimeField()
def __str__(self):
return "%s (%s)" % (self.time, self.desc)
class Author(models.Model):
name = models.CharField(max_length=100)
alias = models.CharField(max_length=50, null=True, blank=True)
class Meta:
ordering = ("name",)
class Article(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateTimeField()
author = models.ForeignKey(Author, models.SET_NULL, blank=True, null=True)
slug = models.SlugField(unique=True, blank=True, null=True)
class Meta:
ordering = ("-pub_date", "headline")
def __str__(self):
return self.headline
class Tag(models.Model):
articles = models.ManyToManyField(Article)
name = models.CharField(max_length=100)
class Meta:
ordering = ("name",)
class NulledTextField(models.TextField):
def get_prep_value(self, value):
return None if value == "" else value
@NulledTextField.register_lookup
class NulledTransform(models.Transform):
lookup_name = "nulled"
template = "NULL"
@NulledTextField.register_lookup
class IsNullWithNoneAsRHS(IsNull):
lookup_name = "isnull_none_rhs"
can_use_none_as_rhs = True
class Season(models.Model):
year = models.PositiveSmallIntegerField()
gt = models.IntegerField(null=True, blank=True)
nulled_text_field = NulledTextField(null=True)
class Meta:
constraints = [
models.UniqueConstraint(fields=["year"], name="season_year_unique"),
]
def __str__(self):
return str(self.year)
class Game(models.Model):
season = models.ForeignKey(Season, models.CASCADE, related_name="games")
home = models.CharField(max_length=100)
away = models.CharField(max_length=100)
class Player(models.Model):
name = models.CharField(max_length=100)
games = models.ManyToManyField(Game, related_name="players")
class Product(models.Model):
name = models.CharField(max_length=80)
qty_target = models.DecimalField(max_digits=6, decimal_places=2)
class Stock(models.Model):
product = models.ForeignKey(Product, models.CASCADE)
short = models.BooleanField(default=False)
qty_available = models.DecimalField(max_digits=6, decimal_places=2)
class Freebie(models.Model):
gift_product = models.ForeignKey(Product, models.CASCADE)
stock_id = models.IntegerField(blank=True, null=True)
stock = models.ForeignObject(
Stock,
from_fields=["stock_id", "gift_product"],
to_fields=["id", "product"],
on_delete=models.CASCADE,
)
```
#### File: tests/messages_tests/test_middleware.py
```python
import unittest
from django.contrib.messages.middleware import MessageMiddleware
from django.http import HttpRequest, HttpResponse
class MiddlewareTests(unittest.TestCase):
def test_response_without_messages(self):
"""
MessageMiddleware is tolerant of messages not existing on request.
"""
request = HttpRequest()
response = HttpResponse()
MessageMiddleware(lambda req: HttpResponse()).process_response(
request, response
)
```
#### File: tests/messages_tests/tests.py
```python
from unittest import mock
from django.contrib.messages import constants
from django.contrib.messages.storage import base
from django.contrib.messages.storage.base import Message
from django.test import SimpleTestCase, override_settings
class MessageTests(SimpleTestCase):
def test_eq(self):
msg_1 = Message(constants.INFO, "Test message 1")
msg_2 = Message(constants.INFO, "Test message 2")
msg_3 = Message(constants.WARNING, "Test message 1")
self.assertEqual(msg_1, msg_1)
self.assertEqual(msg_1, mock.ANY)
self.assertNotEqual(msg_1, msg_2)
self.assertNotEqual(msg_1, msg_3)
self.assertNotEqual(msg_2, msg_3)
class TestLevelTags(SimpleTestCase):
message_tags = {
constants.INFO: "info",
constants.DEBUG: "",
constants.WARNING: "",
constants.ERROR: "bad",
constants.SUCCESS: "",
12: "custom",
}
@override_settings(MESSAGE_TAGS=message_tags)
def test_override_settings_level_tags(self):
self.assertEqual(base.LEVEL_TAGS, self.message_tags)
```
#### File: tests/migration_test_data_persistence/tests.py
```python
from django.test import TestCase, TransactionTestCase
from .models import Book
class MigrationDataPersistenceTestCase(TransactionTestCase):
"""
Data loaded in migrations is available if
TransactionTestCase.serialized_rollback = True.
"""
available_apps = ["migration_test_data_persistence"]
serialized_rollback = True
def test_persistence(self):
self.assertEqual(
Book.objects.count(),
1,
)
class MigrationDataNormalPersistenceTestCase(TestCase):
"""
Data loaded in migrations is available on TestCase
"""
def test_persistence(self):
self.assertEqual(
Book.objects.count(),
1,
)
```
#### File: tests/model_fields/test_booleanfield.py
```python
from django import forms
from django.core.exceptions import ValidationError
from django.db import IntegrityError, models, transaction
from django.test import SimpleTestCase, TestCase
from .models import BooleanModel, FksToBooleans, NullBooleanModel
class BooleanFieldTests(TestCase):
def _test_get_prep_value(self, f):
self.assertIs(f.get_prep_value(True), True)
self.assertIs(f.get_prep_value("1"), True)
self.assertIs(f.get_prep_value(1), True)
self.assertIs(f.get_prep_value(False), False)
self.assertIs(f.get_prep_value("0"), False)
self.assertIs(f.get_prep_value(0), False)
self.assertIsNone(f.get_prep_value(None))
def _test_to_python(self, f):
self.assertIs(f.to_python(1), True)
self.assertIs(f.to_python(0), False)
def test_booleanfield_get_prep_value(self):
self._test_get_prep_value(models.BooleanField())
def test_nullbooleanfield_get_prep_value(self):
self._test_get_prep_value(models.BooleanField(null=True))
def test_booleanfield_to_python(self):
self._test_to_python(models.BooleanField())
def test_nullbooleanfield_to_python(self):
self._test_to_python(models.BooleanField(null=True))
def test_booleanfield_choices_blank(self):
"""
BooleanField with choices and defaults doesn't generate a formfield
with the blank option (#9640, #10549).
"""
choices = [(1, "Si"), (2, "No")]
f = models.BooleanField(choices=choices, default=1, null=False)
self.assertEqual(f.formfield().choices, choices)
def test_booleanfield_choices_blank_desired(self):
"""
BooleanField with choices and no default should generated a formfield
with the blank option.
"""
choices = [(1, "Si"), (2, "No")]
f = models.BooleanField(choices=choices)
self.assertEqual(f.formfield().choices, [("", "---------")] + choices)
def test_nullbooleanfield_formfield(self):
f = models.BooleanField(null=True)
self.assertIsInstance(f.formfield(), forms.NullBooleanField)
def test_return_type(self):
b = BooleanModel.objects.create(bfield=True)
b.refresh_from_db()
self.assertIs(b.bfield, True)
b2 = BooleanModel.objects.create(bfield=False)
b2.refresh_from_db()
self.assertIs(b2.bfield, False)
b3 = NullBooleanModel.objects.create(nbfield=True)
b3.refresh_from_db()
self.assertIs(b3.nbfield, True)
b4 = NullBooleanModel.objects.create(nbfield=False)
b4.refresh_from_db()
self.assertIs(b4.nbfield, False)
# When an extra clause exists, the boolean conversions are applied with
# an offset (#13293).
b5 = BooleanModel.objects.all().extra(select={"string_col": "string"})[0]
self.assertNotIsInstance(b5.pk, bool)
def test_select_related(self):
"""
Boolean fields retrieved via select_related() should return booleans.
"""
bmt = BooleanModel.objects.create(bfield=True)
bmf = BooleanModel.objects.create(bfield=False)
nbmt = NullBooleanModel.objects.create(nbfield=True)
nbmf = NullBooleanModel.objects.create(nbfield=False)
m1 = FksToBooleans.objects.create(bf=bmt, nbf=nbmt)
m2 = FksToBooleans.objects.create(bf=bmf, nbf=nbmf)
# select_related('fk_field_name')
ma = FksToBooleans.objects.select_related("bf").get(pk=m1.id)
self.assertIs(ma.bf.bfield, True)
self.assertIs(ma.nbf.nbfield, True)
# select_related()
mb = FksToBooleans.objects.select_related().get(pk=m1.id)
mc = FksToBooleans.objects.select_related().get(pk=m2.id)
self.assertIs(mb.bf.bfield, True)
self.assertIs(mb.nbf.nbfield, True)
self.assertIs(mc.bf.bfield, False)
self.assertIs(mc.nbf.nbfield, False)
def test_null_default(self):
"""
A BooleanField defaults to None, which isn't a valid value (#15124).
"""
boolean_field = BooleanModel._meta.get_field("bfield")
self.assertFalse(boolean_field.has_default())
b = BooleanModel()
self.assertIsNone(b.bfield)
with transaction.atomic():
with self.assertRaises(IntegrityError):
b.save()
nb = NullBooleanModel()
self.assertIsNone(nb.nbfield)
nb.save() # no error
class ValidationTest(SimpleTestCase):
def test_boolean_field_doesnt_accept_empty_input(self):
f = models.BooleanField()
with self.assertRaises(ValidationError):
f.clean(None, None)
def test_nullbooleanfield_blank(self):
"""
NullBooleanField shouldn't throw a validation error when given a value
of None.
"""
nullboolean = NullBooleanModel(nbfield=None)
nullboolean.full_clean()
```
#### File: tests/model_regress/test_state.py
```python
from django.db.models.base import ModelState, ModelStateCacheDescriptor
from django.test import SimpleTestCase
class ModelStateTests(SimpleTestCase):
def test_fields_cache_descriptor(self):
self.assertIsInstance(ModelState.fields_cache, ModelStateCacheDescriptor)
def test_related_managers_descriptor(self):
self.assertIsInstance(
ModelState.related_managers_cache, ModelStateCacheDescriptor
)
```
#### File: tests/queries/test_query.py
```python
from datetime import datetime
from django.core.exceptions import FieldError
from django.db.models import BooleanField, CharField, F, Q
from django.db.models.expressions import Col, Func
from django.db.models.fields.related_lookups import RelatedIsNull
from django.db.models.functions import Lower
from django.db.models.lookups import Exact, GreaterThan, IsNull, LessThan
from django.db.models.sql.query import JoinPromoter, Query
from django.db.models.sql.where import OR
from django.test import SimpleTestCase
from django.test.utils import register_lookup
from .models import Author, Item, ObjectC, Ranking
class TestQuery(SimpleTestCase):
def test_simple_query(self):
query = Query(Author)
where = query.build_where(Q(num__gt=2))
lookup = where.children[0]
self.assertIsInstance(lookup, GreaterThan)
self.assertEqual(lookup.rhs, 2)
self.assertEqual(lookup.lhs.target, Author._meta.get_field("num"))
def test_non_alias_cols_query(self):
query = Query(Author, alias_cols=False)
where = query.build_where(Q(num__gt=2, name__isnull=False) | Q(num__lt=F("id")))
name_isnull_lookup, num_gt_lookup = where.children[0].children
self.assertIsInstance(num_gt_lookup, GreaterThan)
self.assertIsInstance(num_gt_lookup.lhs, Col)
self.assertIsNone(num_gt_lookup.lhs.alias)
self.assertIsInstance(name_isnull_lookup, IsNull)
self.assertIsInstance(name_isnull_lookup.lhs, Col)
self.assertIsNone(name_isnull_lookup.lhs.alias)
num_lt_lookup = where.children[1]
self.assertIsInstance(num_lt_lookup, LessThan)
self.assertIsInstance(num_lt_lookup.rhs, Col)
self.assertIsNone(num_lt_lookup.rhs.alias)
self.assertIsInstance(num_lt_lookup.lhs, Col)
self.assertIsNone(num_lt_lookup.lhs.alias)
def test_complex_query(self):
query = Query(Author)
where = query.build_where(Q(num__gt=2) | Q(num__lt=0))
self.assertEqual(where.connector, OR)
lookup = where.children[0]
self.assertIsInstance(lookup, GreaterThan)
self.assertEqual(lookup.rhs, 2)
self.assertEqual(lookup.lhs.target, Author._meta.get_field("num"))
lookup = where.children[1]
self.assertIsInstance(lookup, LessThan)
self.assertEqual(lookup.rhs, 0)
self.assertEqual(lookup.lhs.target, Author._meta.get_field("num"))
def test_multiple_fields(self):
query = Query(Item, alias_cols=False)
where = query.build_where(Q(modified__gt=F("created")))
lookup = where.children[0]
self.assertIsInstance(lookup, GreaterThan)
self.assertIsInstance(lookup.rhs, Col)
self.assertIsNone(lookup.rhs.alias)
self.assertIsInstance(lookup.lhs, Col)
self.assertIsNone(lookup.lhs.alias)
self.assertEqual(lookup.rhs.target, Item._meta.get_field("created"))
self.assertEqual(lookup.lhs.target, Item._meta.get_field("modified"))
def test_transform(self):
query = Query(Author, alias_cols=False)
with register_lookup(CharField, Lower):
where = query.build_where(~Q(name__lower="foo"))
lookup = where.children[0]
self.assertIsInstance(lookup, Exact)
self.assertIsInstance(lookup.lhs, Lower)
self.assertIsInstance(lookup.lhs.lhs, Col)
self.assertIsNone(lookup.lhs.lhs.alias)
self.assertEqual(lookup.lhs.lhs.target, Author._meta.get_field("name"))
def test_negated_nullable(self):
query = Query(Item)
where = query.build_where(~Q(modified__lt=datetime(2017, 1, 1)))
self.assertTrue(where.negated)
lookup = where.children[0]
self.assertIsInstance(lookup, LessThan)
self.assertEqual(lookup.lhs.target, Item._meta.get_field("modified"))
lookup = where.children[1]
self.assertIsInstance(lookup, IsNull)
self.assertEqual(lookup.lhs.target, Item._meta.get_field("modified"))
def test_foreign_key(self):
query = Query(Item)
msg = "Joined field references are not permitted in this query"
with self.assertRaisesMessage(FieldError, msg):
query.build_where(Q(creator__num__gt=2))
def test_foreign_key_f(self):
query = Query(Ranking)
with self.assertRaises(FieldError):
query.build_where(Q(rank__gt=F("author__num")))
def test_foreign_key_exclusive(self):
query = Query(ObjectC, alias_cols=False)
where = query.build_where(Q(objecta=None) | Q(objectb=None))
a_isnull = where.children[0]
self.assertIsInstance(a_isnull, RelatedIsNull)
self.assertIsInstance(a_isnull.lhs, Col)
self.assertIsNone(a_isnull.lhs.alias)
self.assertEqual(a_isnull.lhs.target, ObjectC._meta.get_field("objecta"))
b_isnull = where.children[1]
self.assertIsInstance(b_isnull, RelatedIsNull)
self.assertIsInstance(b_isnull.lhs, Col)
self.assertIsNone(b_isnull.lhs.alias)
self.assertEqual(b_isnull.lhs.target, ObjectC._meta.get_field("objectb"))
def test_clone_select_related(self):
query = Query(Item)
query.add_select_related(["creator"])
clone = query.clone()
clone.add_select_related(["note", "creator__extra"])
self.assertEqual(query.select_related, {"creator": {}})
def test_iterable_lookup_value(self):
query = Query(Item)
where = query.build_where(Q(name=["a", "b"]))
name_exact = where.children[0]
self.assertIsInstance(name_exact, Exact)
self.assertEqual(name_exact.rhs, "['a', 'b']")
def test_filter_conditional(self):
query = Query(Item)
where = query.build_where(Func(output_field=BooleanField()))
exact = where.children[0]
self.assertIsInstance(exact, Exact)
self.assertIsInstance(exact.lhs, Func)
self.assertIs(exact.rhs, True)
def test_filter_conditional_join(self):
query = Query(Item)
filter_expr = Func("note__note", output_field=BooleanField())
msg = "Joined field references are not permitted in this query"
with self.assertRaisesMessage(FieldError, msg):
query.build_where(filter_expr)
def test_filter_non_conditional(self):
query = Query(Item)
msg = "Cannot filter against a non-conditional expression."
with self.assertRaisesMessage(TypeError, msg):
query.build_where(Func(output_field=CharField()))
class JoinPromoterTest(SimpleTestCase):
def test_repr(self):
self.assertEqual(
repr(JoinPromoter("AND", 3, True)),
"JoinPromoter(connector='AND', num_children=3, negated=True)",
)
```
#### File: tests/serializers/test_xml.py
```python
from xml.dom import minidom
from django.core import serializers
from django.core.serializers.xml_serializer import DTDForbidden
from django.test import TestCase, TransactionTestCase
from .tests import SerializersTestBase, SerializersTransactionTestBase
class XmlSerializerTestCase(SerializersTestBase, TestCase):
serializer_name = "xml"
pkless_str = """<?xml version="1.0" encoding="utf-8"?>
<django-objects version="1.0">
<object model="serializers.category">
<field type="CharField" name="name">Reference</field>
</object>
<object model="serializers.category">
<field type="CharField" name="name">Non-fiction</field>
</object>
</django-objects>"""
mapping_ordering_str = """<?xml version="1.0" encoding="utf-8"?>
<django-objects version="1.0">
<object model="serializers.article" pk="%(article_pk)s">
<field name="author" rel="ManyToOneRel" to="serializers.author">%(author_pk)s</field>
<field name="headline" type="CharField">Poker has no place on ESPN</field>
<field name="pub_date" type="DateTimeField">2006-06-16T11:00:00</field>
<field name="categories" rel="ManyToManyRel" to="serializers.category"><object pk="%(first_category_pk)s"></object><object pk="%(second_category_pk)s"></object></field>
<field name="meta_data" rel="ManyToManyRel" to="serializers.categorymetadata"></field>
</object>
</django-objects>""" # NOQA
@staticmethod
def _validate_output(serial_str):
try:
minidom.parseString(serial_str)
except Exception:
return False
else:
return True
@staticmethod
def _get_pk_values(serial_str):
ret_list = []
dom = minidom.parseString(serial_str)
fields = dom.getElementsByTagName("object")
for field in fields:
ret_list.append(field.getAttribute("pk"))
return ret_list
@staticmethod
def _get_field_values(serial_str, field_name):
ret_list = []
dom = minidom.parseString(serial_str)
fields = dom.getElementsByTagName("field")
for field in fields:
if field.getAttribute("name") == field_name:
temp = []
for child in field.childNodes:
temp.append(child.nodeValue)
ret_list.append("".join(temp))
return ret_list
def test_control_char_failure(self):
"""
Serializing control characters with XML should fail as those characters
are not supported in the XML 1.0 standard (except HT, LF, CR).
"""
self.a1.headline = "This contains \u0001 control \u0011 chars"
msg = "Article.headline (pk:%s) contains unserializable characters" % self.a1.pk
with self.assertRaisesMessage(ValueError, msg):
serializers.serialize(self.serializer_name, [self.a1])
self.a1.headline = "HT \u0009, LF \u000A, and CR \u000D are allowed"
self.assertIn(
"HT \t, LF \n, and CR \r are allowed",
serializers.serialize(self.serializer_name, [self.a1]),
)
def test_no_dtd(self):
"""
The XML deserializer shouldn't allow a DTD.
This is the most straightforward way to prevent all entity definitions
and avoid both external entities and entity-expansion attacks.
"""
xml = (
'<?xml version="1.0" standalone="no"?>'
'<!DOCTYPE example SYSTEM "http://example.com/example.dtd">'
)
with self.assertRaises(DTDForbidden):
next(serializers.deserialize("xml", xml))
class XmlSerializerTransactionTestCase(
SerializersTransactionTestBase, TransactionTestCase
):
serializer_name = "xml"
fwd_ref_str = """<?xml version="1.0" encoding="utf-8"?>
<django-objects version="1.0">
<object pk="1" model="serializers.article">
<field to="serializers.author" name="author" rel="ManyToOneRel">1</field>
<field type="CharField" name="headline">Forward references pose no problem</field>
<field type="DateTimeField" name="pub_date">2006-06-16T15:00:00</field>
<field to="serializers.category" name="categories" rel="ManyToManyRel">
<object pk="1"></object>
</field>
<field to="serializers.categorymetadata" name="meta_data" rel="ManyToManyRel"></field>
</object>
<object pk="1" model="serializers.author">
<field type="CharField" name="name">Agnes</field>
</object>
<object pk="1" model="serializers.category">
<field type="CharField" name="name">Reference</field></object>
</django-objects>""" # NOQA
```
#### File: syntax_tests/i18n/test_language.py
```python
from template_tests.utils import setup
from django.template import TemplateSyntaxError
from django.test import SimpleTestCase
class I18nLanguageTagTests(SimpleTestCase):
libraries = {"i18n": "django.templatetags.i18n"}
@setup({"i18n_language": "{% load i18n %} {% language %} {% endlanguage %}"})
def test_no_arg(self):
with self.assertRaisesMessage(
TemplateSyntaxError, "'language' takes one argument (language)"
):
self.engine.render_to_string("i18n_language")
```
#### File: template_tests/syntax_tests/test_filter_tag.py
```python
from django.template import TemplateSyntaxError
from django.test import SimpleTestCase
from ..utils import setup
class FilterTagTests(SimpleTestCase):
@setup({"filter01": "{% filter upper %}{% endfilter %}"})
def test_filter01(self):
output = self.engine.render_to_string("filter01")
self.assertEqual(output, "")
@setup({"filter02": "{% filter upper %}django{% endfilter %}"})
def test_filter02(self):
output = self.engine.render_to_string("filter02")
self.assertEqual(output, "DJANGO")
@setup({"filter03": "{% filter upper|lower %}django{% endfilter %}"})
def test_filter03(self):
output = self.engine.render_to_string("filter03")
self.assertEqual(output, "django")
@setup({"filter04": "{% filter cut:remove %}djangospam{% endfilter %}"})
def test_filter04(self):
output = self.engine.render_to_string("filter04", {"remove": "spam"})
self.assertEqual(output, "django")
@setup({"filter05": "{% filter safe %}fail{% endfilter %}"})
def test_filter05(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template("filter05")
@setup({"filter05bis": "{% filter upper|safe %}fail{% endfilter %}"})
def test_filter05bis(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template("filter05bis")
@setup({"filter06": "{% filter escape %}fail{% endfilter %}"})
def test_filter06(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template("filter06")
@setup({"filter06bis": "{% filter upper|escape %}fail{% endfilter %}"})
def test_filter06bis(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template("filter06bis")
```
#### File: template_tests/syntax_tests/test_multiline.py
```python
from django.test import SimpleTestCase
from ..utils import setup
multiline_string = """
Hello,
boys.
How
are
you
gentlemen.
"""
class MultilineTests(SimpleTestCase):
@setup({"multiline01": multiline_string})
def test_multiline01(self):
output = self.engine.render_to_string("multiline01")
self.assertEqual(output, multiline_string)
```
#### File: tests/template_tests/test_library.py
```python
import functools
from django.template import Library
from django.template.base import Node
from django.test import SimpleTestCase
class FilterRegistrationTests(SimpleTestCase):
def setUp(self):
self.library = Library()
def test_filter(self):
@self.library.filter
def func():
return ""
self.assertEqual(self.library.filters["func"], func)
def test_filter_parens(self):
@self.library.filter()
def func():
return ""
self.assertEqual(self.library.filters["func"], func)
def test_filter_name_arg(self):
@self.library.filter("name")
def func():
return ""
self.assertEqual(self.library.filters["name"], func)
def test_filter_name_kwarg(self):
@self.library.filter(name="name")
def func():
return ""
self.assertEqual(self.library.filters["name"], func)
def test_filter_call(self):
def func():
return ""
self.library.filter("name", func)
self.assertEqual(self.library.filters["name"], func)
def test_filter_invalid(self):
msg = "Unsupported arguments to Library.filter: (None, '')"
with self.assertRaisesMessage(ValueError, msg):
self.library.filter(None, "")
class InclusionTagRegistrationTests(SimpleTestCase):
def setUp(self):
self.library = Library()
def test_inclusion_tag(self):
@self.library.inclusion_tag("template.html")
def func():
return ""
self.assertIn("func", self.library.tags)
def test_inclusion_tag_name(self):
@self.library.inclusion_tag("template.html", name="name")
def func():
return ""
self.assertIn("name", self.library.tags)
def test_inclusion_tag_wrapped(self):
@self.library.inclusion_tag("template.html")
@functools.lru_cache(maxsize=32)
def func():
return ""
func_wrapped = self.library.tags["func"].__wrapped__
self.assertIs(func_wrapped, func)
self.assertTrue(hasattr(func_wrapped, "cache_info"))
class SimpleTagRegistrationTests(SimpleTestCase):
def setUp(self):
self.library = Library()
def test_simple_tag(self):
@self.library.simple_tag
def func():
return ""
self.assertIn("func", self.library.tags)
def test_simple_tag_parens(self):
@self.library.simple_tag()
def func():
return ""
self.assertIn("func", self.library.tags)
def test_simple_tag_name_kwarg(self):
@self.library.simple_tag(name="name")
def func():
return ""
self.assertIn("name", self.library.tags)
def test_simple_tag_invalid(self):
msg = "Invalid arguments provided to simple_tag"
with self.assertRaisesMessage(ValueError, msg):
self.library.simple_tag("invalid")
def test_simple_tag_wrapped(self):
@self.library.simple_tag
@functools.lru_cache(maxsize=32)
def func():
return ""
func_wrapped = self.library.tags["func"].__wrapped__
self.assertIs(func_wrapped, func)
self.assertTrue(hasattr(func_wrapped, "cache_info"))
class TagRegistrationTests(SimpleTestCase):
def setUp(self):
self.library = Library()
def test_tag(self):
@self.library.tag
def func(parser, token):
return Node()
self.assertEqual(self.library.tags["func"], func)
def test_tag_parens(self):
@self.library.tag()
def func(parser, token):
return Node()
self.assertEqual(self.library.tags["func"], func)
def test_tag_name_arg(self):
@self.library.tag("name")
def func(parser, token):
return Node()
self.assertEqual(self.library.tags["name"], func)
def test_tag_name_kwarg(self):
@self.library.tag(name="name")
def func(parser, token):
return Node()
self.assertEqual(self.library.tags["name"], func)
def test_tag_call(self):
def func(parser, token):
return Node()
self.library.tag("name", func)
self.assertEqual(self.library.tags["name"], func)
def test_tag_invalid(self):
msg = "Unsupported arguments to Library.tag: (None, '')"
with self.assertRaisesMessage(ValueError, msg):
self.library.tag(None, "")
```
#### File: tests/utils_tests/test_inspect.py
```python
import unittest
from django.utils import inspect
class Person:
def no_arguments(self):
return None
def one_argument(self, something):
return something
def just_args(self, *args):
return args
def all_kinds(self, name, address="home", age=25, *args, **kwargs):
return kwargs
@classmethod
def cls_all_kinds(cls, name, address="home", age=25, *args, **kwargs):
return kwargs
class TestInspectMethods(unittest.TestCase):
def test_get_callable_parameters(self):
self.assertIs(
inspect._get_callable_parameters(Person.no_arguments),
inspect._get_callable_parameters(Person.no_arguments),
)
self.assertIs(
inspect._get_callable_parameters(Person().no_arguments),
inspect._get_callable_parameters(Person().no_arguments),
)
def test_get_func_full_args_no_arguments(self):
self.assertEqual(inspect.get_func_full_args(Person.no_arguments), [])
self.assertEqual(inspect.get_func_full_args(Person().no_arguments), [])
def test_get_func_full_args_one_argument(self):
self.assertEqual(
inspect.get_func_full_args(Person.one_argument), [("something",)]
)
self.assertEqual(
inspect.get_func_full_args(Person().one_argument),
[("something",)],
)
def test_get_func_full_args_all_arguments_method(self):
arguments = [
("name",),
("address", "home"),
("age", 25),
("*args",),
("**kwargs",),
]
self.assertEqual(inspect.get_func_full_args(Person.all_kinds), arguments)
self.assertEqual(inspect.get_func_full_args(Person().all_kinds), arguments)
def test_get_func_full_args_all_arguments_classmethod(self):
arguments = [
("name",),
("address", "home"),
("age", 25),
("*args",),
("**kwargs",),
]
self.assertEqual(inspect.get_func_full_args(Person.cls_all_kinds), arguments)
self.assertEqual(inspect.get_func_full_args(Person().cls_all_kinds), arguments)
def test_func_accepts_var_args_has_var_args(self):
self.assertIs(inspect.func_accepts_var_args(Person.just_args), True)
self.assertIs(inspect.func_accepts_var_args(Person().just_args), True)
def test_func_accepts_var_args_no_var_args(self):
self.assertIs(inspect.func_accepts_var_args(Person.one_argument), False)
self.assertIs(inspect.func_accepts_var_args(Person().one_argument), False)
def test_method_has_no_args(self):
self.assertIs(inspect.method_has_no_args(Person.no_arguments), True)
self.assertIs(inspect.method_has_no_args(Person().no_arguments), True)
self.assertIs(inspect.method_has_no_args(Person.one_argument), False)
self.assertIs(inspect.method_has_no_args(Person().one_argument), False)
def test_func_supports_parameter(self):
self.assertIs(
inspect.func_supports_parameter(Person.all_kinds, "address"), True
)
self.assertIs(
inspect.func_supports_parameter(Person().all_kinds, "address"),
True,
)
self.assertIs(inspect.func_supports_parameter(Person.all_kinds, "zone"), False)
self.assertIs(
inspect.func_supports_parameter(Person().all_kinds, "zone"),
False,
)
def test_func_accepts_kwargs(self):
self.assertIs(inspect.func_accepts_kwargs(Person.just_args), False)
self.assertIs(inspect.func_accepts_kwargs(Person().just_args), False)
self.assertIs(inspect.func_accepts_kwargs(Person.all_kinds), True)
self.assertIs(inspect.func_accepts_kwargs(Person().just_args), False)
```
#### File: tests/utils_tests/test_simplelazyobject.py
```python
import pickle
from django.contrib.auth.models import User
from django.test import TestCase
from django.utils.functional import SimpleLazyObject
class TestUtilsSimpleLazyObjectDjangoTestCase(TestCase):
def test_pickle(self):
user = User.objects.create_user("johndoe", "<EMAIL>", "<PASSWORD>")
x = SimpleLazyObject(lambda: user)
pickle.dumps(x)
# Try the variant protocol levels.
pickle.dumps(x, 0)
pickle.dumps(x, 1)
pickle.dumps(x, 2)
```
#### File: tests/utils_tests/test_timesince.py
```python
import datetime
from django.test import TestCase
from django.test.utils import requires_tz_support
from django.utils import timezone, translation
from django.utils.timesince import timesince, timeuntil
from django.utils.translation import npgettext_lazy
class TimesinceTests(TestCase):
def setUp(self):
self.t = datetime.datetime(2007, 8, 14, 13, 46, 0)
self.onemicrosecond = datetime.timedelta(microseconds=1)
self.onesecond = datetime.timedelta(seconds=1)
self.oneminute = datetime.timedelta(minutes=1)
self.onehour = datetime.timedelta(hours=1)
self.oneday = datetime.timedelta(days=1)
self.oneweek = datetime.timedelta(days=7)
self.onemonth = datetime.timedelta(days=30)
self.oneyear = datetime.timedelta(days=365)
def test_equal_datetimes(self):
"""equal datetimes."""
# NOTE: \xa0 avoids wrapping between value and unit
self.assertEqual(timesince(self.t, self.t), "0\xa0minutes")
def test_ignore_microseconds_and_seconds(self):
"""Microseconds and seconds are ignored."""
self.assertEqual(
timesince(self.t, self.t + self.onemicrosecond), "0\xa0minutes"
)
self.assertEqual(timesince(self.t, self.t + self.onesecond), "0\xa0minutes")
def test_other_units(self):
"""Test other units."""
self.assertEqual(timesince(self.t, self.t + self.oneminute), "1\xa0minute")
self.assertEqual(timesince(self.t, self.t + self.onehour), "1\xa0hour")
self.assertEqual(timesince(self.t, self.t + self.oneday), "1\xa0day")
self.assertEqual(timesince(self.t, self.t + self.oneweek), "1\xa0week")
self.assertEqual(timesince(self.t, self.t + self.onemonth), "1\xa0month")
self.assertEqual(timesince(self.t, self.t + self.oneyear), "1\xa0year")
def test_multiple_units(self):
"""Test multiple units."""
self.assertEqual(
timesince(self.t, self.t + 2 * self.oneday + 6 * self.onehour),
"2\xa0days, 6\xa0hours",
)
self.assertEqual(
timesince(self.t, self.t + 2 * self.oneweek + 2 * self.oneday),
"2\xa0weeks, 2\xa0days",
)
def test_display_first_unit(self):
"""
If the two differing units aren't adjacent, only the first unit is
displayed.
"""
self.assertEqual(
timesince(
self.t,
self.t + 2 * self.oneweek + 3 * self.onehour + 4 * self.oneminute,
),
"2\xa0weeks",
)
self.assertEqual(
timesince(self.t, self.t + 4 * self.oneday + 5 * self.oneminute),
"4\xa0days",
)
def test_display_second_before_first(self):
"""
When the second date occurs before the first, we should always
get 0 minutes.
"""
self.assertEqual(
timesince(self.t, self.t - self.onemicrosecond), "0\xa0minutes"
)
self.assertEqual(timesince(self.t, self.t - self.onesecond), "0\xa0minutes")
self.assertEqual(timesince(self.t, self.t - self.oneminute), "0\xa0minutes")
self.assertEqual(timesince(self.t, self.t - self.onehour), "0\xa0minutes")
self.assertEqual(timesince(self.t, self.t - self.oneday), "0\xa0minutes")
self.assertEqual(timesince(self.t, self.t - self.oneweek), "0\xa0minutes")
self.assertEqual(timesince(self.t, self.t - self.onemonth), "0\xa0minutes")
self.assertEqual(timesince(self.t, self.t - self.oneyear), "0\xa0minutes")
self.assertEqual(
timesince(self.t, self.t - 2 * self.oneday - 6 * self.onehour),
"0\xa0minutes",
)
self.assertEqual(
timesince(self.t, self.t - 2 * self.oneweek - 2 * self.oneday),
"0\xa0minutes",
)
self.assertEqual(
timesince(
self.t,
self.t - 2 * self.oneweek - 3 * self.onehour - 4 * self.oneminute,
),
"0\xa0minutes",
)
self.assertEqual(
timesince(self.t, self.t - 4 * self.oneday - 5 * self.oneminute),
"0\xa0minutes",
)
def test_second_before_equal_first_humanize_time_strings(self):
time_strings = {
"minute": npgettext_lazy(
"naturaltime-future",
"%(num)d minute",
"%(num)d minutes",
"num",
),
}
with translation.override("cs"):
for now in [self.t, self.t - self.onemicrosecond, self.t - self.oneday]:
with self.subTest(now):
self.assertEqual(
timesince(self.t, now, time_strings=time_strings),
"0\xa0minut",
)
@requires_tz_support
def test_different_timezones(self):
"""When using two different timezones."""
now = datetime.datetime.now()
now_tz = timezone.make_aware(now, timezone.get_default_timezone())
now_tz_i = timezone.localtime(now_tz, timezone.get_fixed_timezone(195))
self.assertEqual(timesince(now), "0\xa0minutes")
self.assertEqual(timesince(now_tz), "0\xa0minutes")
self.assertEqual(timesince(now_tz_i), "0\xa0minutes")
self.assertEqual(timesince(now_tz, now_tz_i), "0\xa0minutes")
self.assertEqual(timeuntil(now), "0\xa0minutes")
self.assertEqual(timeuntil(now_tz), "0\xa0minutes")
self.assertEqual(timeuntil(now_tz_i), "0\xa0minutes")
self.assertEqual(timeuntil(now_tz, now_tz_i), "0\xa0minutes")
def test_date_objects(self):
"""Both timesince and timeuntil should work on date objects (#17937)."""
today = datetime.date.today()
self.assertEqual(timesince(today + self.oneday), "0\xa0minutes")
self.assertEqual(timeuntil(today - self.oneday), "0\xa0minutes")
def test_both_date_objects(self):
"""Timesince should work with both date objects (#9672)"""
today = datetime.date.today()
self.assertEqual(timeuntil(today + self.oneday, today), "1\xa0day")
self.assertEqual(timeuntil(today - self.oneday, today), "0\xa0minutes")
self.assertEqual(timeuntil(today + self.oneweek, today), "1\xa0week")
def test_leap_year(self):
start_date = datetime.date(2016, 12, 25)
self.assertEqual(timeuntil(start_date + self.oneweek, start_date), "1\xa0week")
self.assertEqual(timesince(start_date, start_date + self.oneweek), "1\xa0week")
def test_leap_year_new_years_eve(self):
t = datetime.date(2016, 12, 31)
now = datetime.datetime(2016, 12, 31, 18, 0, 0)
self.assertEqual(timesince(t + self.oneday, now), "0\xa0minutes")
self.assertEqual(timeuntil(t - self.oneday, now), "0\xa0minutes")
def test_naive_datetime_with_tzinfo_attribute(self):
class naive(datetime.tzinfo):
def utcoffset(self, dt):
return None
future = datetime.datetime(2080, 1, 1, tzinfo=naive())
self.assertEqual(timesince(future), "0\xa0minutes")
past = datetime.datetime(1980, 1, 1, tzinfo=naive())
self.assertEqual(timeuntil(past), "0\xa0minutes")
def test_thousand_years_ago(self):
t = datetime.datetime(1007, 8, 14, 13, 46, 0)
self.assertEqual(timesince(t, self.t), "1000\xa0years")
self.assertEqual(timeuntil(self.t, t), "1000\xa0years")
def test_depth(self):
t = (
self.t
+ self.oneyear
+ self.onemonth
+ self.oneweek
+ self.oneday
+ self.onehour
)
tests = [
(t, 1, "1\xa0year"),
(t, 2, "1\xa0year, 1\xa0month"),
(t, 3, "1\xa0year, 1\xa0month, 1\xa0week"),
(t, 4, "1\xa0year, 1\xa0month, 1\xa0week, 1\xa0day"),
(t, 5, "1\xa0year, 1\xa0month, 1\xa0week, 1\xa0day, 1\xa0hour"),
(t, 6, "1\xa0year, 1\xa0month, 1\xa0week, 1\xa0day, 1\xa0hour"),
(self.t + self.onehour, 5, "1\xa0hour"),
(self.t + (4 * self.oneminute), 3, "4\xa0minutes"),
(self.t + self.onehour + self.oneminute, 1, "1\xa0hour"),
(self.t + self.oneday + self.onehour, 1, "1\xa0day"),
(self.t + self.oneweek + self.oneday, 1, "1\xa0week"),
(self.t + self.onemonth + self.oneweek, 1, "1\xa0month"),
(self.t + self.oneyear + self.onemonth, 1, "1\xa0year"),
(self.t + self.oneyear + self.oneweek + self.oneday, 3, "1\xa0year"),
]
for value, depth, expected in tests:
with self.subTest():
self.assertEqual(timesince(self.t, value, depth=depth), expected)
self.assertEqual(timeuntil(value, self.t, depth=depth), expected)
def test_depth_invalid(self):
msg = "depth must be greater than 0."
with self.assertRaisesMessage(ValueError, msg):
timesince(self.t, self.t, depth=0)
```
#### File: tests/utils_tests/utils.py
```python
import platform
def on_macos_with_hfs():
"""
MacOS 10.13 (High Sierra) and lower can use HFS+ as a filesystem.
HFS+ has a time resolution of only one second which can be too low for
some of the tests.
"""
macos_version = platform.mac_ver()[0]
if macos_version != "":
parsed_macos_version = tuple(int(x) for x in macos_version.split("."))
return parsed_macos_version < (10, 14)
return False
```
#### File: tests/validation/test_custom_messages.py
```python
from django.test import SimpleTestCase
from . import ValidationAssertions
from .models import CustomMessagesModel
class CustomMessagesTests(ValidationAssertions, SimpleTestCase):
def test_custom_simple_validator_message(self):
cmm = CustomMessagesModel(number=12)
self.assertFieldFailsValidationWithMessage(cmm.full_clean, "number", ["AAARGH"])
def test_custom_null_message(self):
cmm = CustomMessagesModel()
self.assertFieldFailsValidationWithMessage(cmm.full_clean, "number", ["NULL"])
```
#### File: tests/validation/test_validators.py
```python
from django.test import SimpleTestCase
from . import ValidationAssertions
from .models import ModelToValidate
class TestModelsWithValidators(ValidationAssertions, SimpleTestCase):
def test_custom_validator_passes_for_correct_value(self):
mtv = ModelToValidate(
number=10,
name="<NAME>",
f_with_custom_validator=42,
f_with_iterable_of_validators=42,
)
self.assertIsNone(mtv.full_clean())
def test_custom_validator_raises_error_for_incorrect_value(self):
mtv = ModelToValidate(
number=10,
name="<NAME>",
f_with_custom_validator=12,
f_with_iterable_of_validators=42,
)
self.assertFailsValidation(mtv.full_clean, ["f_with_custom_validator"])
self.assertFieldFailsValidationWithMessage(
mtv.full_clean,
"f_with_custom_validator",
["This is not the answer to life, universe and everything!"],
)
def test_field_validators_can_be_any_iterable(self):
mtv = ModelToValidate(
number=10,
name="<NAME>",
f_with_custom_validator=42,
f_with_iterable_of_validators=12,
)
self.assertFailsValidation(mtv.full_clean, ["f_with_iterable_of_validators"])
self.assertFieldFailsValidationWithMessage(
mtv.full_clean,
"f_with_iterable_of_validators",
["This is not the answer to life, universe and everything!"],
)
``` |
{
"source": "jpmallarino/kadalu",
"score": 3
} |
#### File: kadalu/server/exporter.py
```python
import logging
import os
import uvicorn
from fastapi import FastAPI
from kadalulib import logf, logging_setup
metrics_app = FastAPI()
@metrics_app.get("/_api/metrics")
def metrics():
"""
Gathers storage and pvcs metrics.
Starts process by exposing the data collected in port 8050 at '/_api/metrics'.
"""
data = {
"pod": {}
}
memory_usage_in_bytes = 0
# Return -1 if unable to fetch 'cgroup' data,
# until new method is found to get CPU & Memory data in LXD containers.
memory_usage_in_bytes = -1
cpu_usage_in_nanoseconds = -1
memory_usage_file_path = '/sys/fs/cgroup/memory/memory.usage_in_bytes'
if os.path.exists(memory_usage_file_path):
with open(memory_usage_file_path, 'r') as memory_fd:
memory_usage_in_bytes = int(memory_fd.read().strip())
cpu_usage_file_path = '/sys/fs/cgroup/cpu/cpuacct.usage'
if os.path.exists(cpu_usage_file_path):
with open(cpu_usage_file_path, 'r') as cpu_fd:
cpu_usage_in_nanoseconds = int(cpu_fd.read().strip())
data["pod"] = {
"memory_usage_in_bytes": memory_usage_in_bytes,
"cpu_usage_in_nanoseconds": cpu_usage_in_nanoseconds
}
return data
if __name__ == "__main__":
logging_setup()
logging.info(logf(
"Started metrics exporter process at port 8050"
))
uvicorn.run("exporter:metrics_app", host="0.0.0.0", port=8050, log_level="info")
``` |
{
"source": "jpmarques19/tensorflwo-test",
"score": 3
} |
#### File: fairseq_translation/fairseq/predictor.py
```python
from sagemaker_translate import model_fn, input_fn, output_fn, predict_fn
import flask
import os
prefix = '/opt/ml/'
model_path = os.path.join(prefix, 'model')
print("in predictor.py")
# A singleton for holding the model. This simply loads the model and holds it.
# It has a predict function that does a prediction based on the model and the input data.
class ScoringService(object):
model = None # Where we keep the model when it's loaded
@classmethod
def get_model(cls):
"""Get the model object for this instance, loading it if it's not already loaded."""
if cls.model is None:
cls.model = model_fn(model_path)
return cls.model
@classmethod
def predict(cls, serialized_input_data):
"""For the input, do the predictions and return them.
Args:
input (a pandas dataframe): The data on which to do the predictions. There will be
one prediction per row in the dataframe"""
clf = cls.get_model()
input_data = input_fn(serialized_input_data)
output = predict_fn(input_data, clf)
return output_fn(output)
# The flask app for serving predictions
app = flask.Flask(__name__)
@app.route('/ping', methods=['GET'])
def ping():
"""Determine if the container is working and healthy. In this sample container, we declare
it healthy if we can load the model successfully."""
health = ScoringService.get_model() is not None # You can insert a health check here
status = 200 if health else 404
return flask.Response(response='\n', status=status, mimetype='application/json')
@app.route('/invocations', methods=['POST'])
def transformation():
"""Do an inference on a single batch of data.
"""
data = None
data = flask.request.data.decode('utf-8')
# Do the prediction
result, accept = ScoringService.predict(data)
return flask.Response(response=result, status=200, mimetype='text/json')
```
#### File: advanced_functionality/inference_pipeline_sparkml_blazingtext_dbpedia/dbpedia_processing.py
```python
from __future__ import print_function
from __future__ import unicode_literals
import time
import sys
import os
import shutil
import csv
import boto3
from awsglue.utils import getResolvedOptions
import pyspark
from pyspark.sql import SparkSession
from pyspark.sql.types import IntegerType, StructField, StructType, StringType
from pyspark.ml.feature import Tokenizer
from pyspark.sql.functions import *
from mleap.pyspark.spark_support import SimpleSparkSerializer
from awsglue.utils import getResolvedOptions
def csv_line(data):
r = ' '.join(d for d in data[1])
return ('__label__' + str(data[0])) + " " + r
def main():
spark = SparkSession.builder.appName("DBPediaSpark").getOrCreate()
args = getResolvedOptions(sys.argv, ['S3_INPUT_BUCKET',
'S3_INPUT_KEY_PREFIX',
'S3_OUTPUT_BUCKET',
'S3_OUTPUT_KEY_PREFIX',
'S3_MODEL_BUCKET',
'S3_MODEL_KEY_PREFIX'])
# This is needed to save RDDs which is the only way to write nested Dataframes into CSV format
spark.sparkContext._jsc.hadoopConfiguration().set("mapred.output.committer.class",
"org.apache.hadoop.mapred.FileOutputCommitter")
# Defining the schema corresponding to the input data. The input data does not contain the headers
schema = StructType([StructField("label", IntegerType(), True),
StructField("title", StringType(), True),
StructField("abstract", StringType(), True)])
# Download the data from S3 into two separate Dataframes
traindf = spark.read.csv(('s3://' + os.path.join(args['S3_INPUT_BUCKET'], args['S3_INPUT_KEY_PREFIX'],
'train.csv')), header=False, schema=schema, encoding='UTF-8')
validationdf = spark.read.csv(('s3://' + os.path.join(args['S3_INPUT_BUCKET'], args['S3_INPUT_KEY_PREFIX'],
'test.csv')), header=False, schema=schema, encoding='UTF-8')
# Tokenize the abstract column which contains the input text
tokenizer = Tokenizer(inputCol="abstract", outputCol="tokenized_abstract")
# Save transformed training data to CSV in S3 by converting to RDD.
transformed_traindf = tokenizer.transform(traindf)
transformed_train_rdd = transformed_traindf.rdd.map(lambda x: (x.label, x.tokenized_abstract))
lines = transformed_train_rdd.map(csv_line)
lines.coalesce(1).saveAsTextFile('s3://' + os.path.join(args['S3_OUTPUT_BUCKET'], args['S3_OUTPUT_KEY_PREFIX'], 'train'))
# Similar data processing for validation dataset.
transformed_validation = tokenizer.transform(validationdf)
transformed_validation_rdd = transformed_validation.rdd.map(lambda x: (x.label, x.tokenized_abstract))
lines = transformed_validation_rdd.map(csv_line)
lines.coalesce(1).saveAsTextFile('s3://' + os.path.join(args['S3_OUTPUT_BUCKET'], args['S3_OUTPUT_KEY_PREFIX'], 'validation'))
# Serialize the tokenizer via MLeap and upload to S3
SimpleSparkSerializer().serializeToBundle(tokenizer, "jar:file:/tmp/model.zip", transformed_validation)
# Unzip as SageMaker expects a .tar.gz file but MLeap produces a .zip file.
import zipfile
with zipfile.ZipFile("/tmp/model.zip") as zf:
zf.extractall("/tmp/model")
# Write back the content as a .tar.gz file
import tarfile
with tarfile.open("/tmp/model.tar.gz", "w:gz") as tar:
tar.add("/tmp/model/bundle.json", arcname='bundle.json')
tar.add("/tmp/model/root", arcname='root')
s3 = boto3.resource('s3')
file_name = os.path.join(args['S3_MODEL_KEY_PREFIX'], 'model.tar.gz')
s3.Bucket(args['S3_MODEL_BUCKET']).upload_file('/tmp/model.tar.gz', file_name)
if __name__ == "__main__":
main()
```
#### File: advanced_functionality/inference_pipeline_sparkml_xgboost_car_evaluation/preprocessor.py
```python
from __future__ import print_function
import time
import sys
import os
import shutil
import csv
import boto3
from awsglue.utils import getResolvedOptions
import pyspark
from pyspark.sql import SparkSession
from pyspark.ml import Pipeline
from pyspark.ml.feature import StringIndexer, VectorIndexer, OneHotEncoder, VectorAssembler, IndexToString
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
from pyspark.sql.functions import *
from mleap.pyspark.spark_support import SimpleSparkSerializer
def toCSVLine(data):
r = ','.join(str(d) for d in data[1])
return str(data[0]) + "," + r
def main():
spark = SparkSession.builder.appName("PySparkTitanic").getOrCreate()
args = getResolvedOptions(sys.argv, ['s3_input_data_location',
's3_output_bucket',
's3_output_bucket_prefix',
's3_model_bucket',
's3_model_bucket_prefix'])
# This is needed to write RDDs to file which is the only way to write nested Dataframes into CSV.
spark.sparkContext._jsc.hadoopConfiguration().set("mapred.output.committer.class",
"org.apache.hadoop.mapred.FileOutputCommitter")
train = spark.read.csv(args['s3_input_data_location'], header=False)
oldColumns = train.schema.names
newColumns = ['buying', 'maint', 'doors', 'persons', 'lug_boot', 'safety', 'cat']
train = reduce(lambda train, idx: train.withColumnRenamed(oldColumns[idx], newColumns[idx]), xrange(len(oldColumns)), train)
# dropping null values
train = train.dropna()
# Target label
catIndexer = StringIndexer(inputCol="cat", outputCol="label")
labelIndexModel = catIndexer.fit(train)
train = labelIndexModel.transform(train)
converter = IndexToString(inputCol="label", outputCol="cat")
# Spliting in train and test set. Beware : It sorts the dataset
(traindf, validationdf) = train.randomSplit([0.8, 0.2])
# Index labels, adding metadata to the label column.
# Fit on whole dataset to include all labels in index.
buyingIndexer = StringIndexer(inputCol="buying", outputCol="indexedBuying")
maintIndexer = StringIndexer(inputCol="maint", outputCol="indexedMaint")
doorsIndexer = StringIndexer(inputCol="doors", outputCol="indexedDoors")
personsIndexer = StringIndexer(inputCol="persons", outputCol="indexedPersons")
lug_bootIndexer = StringIndexer(inputCol="lug_boot", outputCol="indexedLug_boot")
safetyIndexer = StringIndexer(inputCol="safety", outputCol="indexedSafety")
# One Hot Encoder on indexed features
buyingEncoder = OneHotEncoder(inputCol="indexedBuying", outputCol="buyingVec")
maintEncoder = OneHotEncoder(inputCol="indexedMaint", outputCol="maintVec")
doorsEncoder = OneHotEncoder(inputCol="indexedDoors", outputCol="doorsVec")
personsEncoder = OneHotEncoder(inputCol="indexedPersons", outputCol="personsVec")
lug_bootEncoder = OneHotEncoder(inputCol="indexedLug_boot", outputCol="lug_bootVec")
safetyEncoder = OneHotEncoder(inputCol="indexedSafety", outputCol="safetyVec")
# Create the vector structured data (label,features(vector))
assembler = VectorAssembler(inputCols=["buyingVec", "maintVec", "doorsVec", "personsVec", "lug_bootVec", "safetyVec"], outputCol="features")
# Chain featurizers in a Pipeline
pipeline = Pipeline(stages=[buyingIndexer, maintIndexer, doorsIndexer, personsIndexer, lug_bootIndexer, safetyIndexer, buyingEncoder, maintEncoder, doorsEncoder, personsEncoder, lug_bootEncoder, safetyEncoder, assembler])
# Train model. This also runs the indexers.
model = pipeline.fit(traindf)
# Delete previous data from output
s3 = boto3.resource('s3')
bucket = s3.Bucket(args['s3_output_bucket'])
bucket.objects.filter(Prefix=args['s3_output_bucket_prefix']).delete()
# Save transformed training data to CSV in S3 by converting to RDD.
transformed_traindf = model.transform(traindf)
transformed_train_rdd = transformed_traindf.rdd.map(lambda x: (x.label, x.features))
lines = transformed_train_rdd.map(toCSVLine)
lines.saveAsTextFile('s3a://' + args['s3_output_bucket'] + '/' +args['s3_output_bucket_prefix'] + '/' + 'train')
# Similar data processing for validation dataset.
predictions = model.transform(validationdf)
transformed_train_rdd = predictions.rdd.map(lambda x: (x.label, x.features))
lines = transformed_train_rdd.map(toCSVLine)
lines.saveAsTextFile('s3a://' + args['s3_output_bucket'] + '/' +args['s3_output_bucket_prefix'] + '/' + 'validation')
# Serialize and store via MLeap
SimpleSparkSerializer().serializeToBundle(model, "jar:file:/tmp/model.zip", predictions)
# Unzipping as SageMaker expects a .tar.gz file but MLeap produces a .zip file.
import zipfile
with zipfile.ZipFile("/tmp/model.zip") as zf:
zf.extractall("/tmp/model")
# Writing back the content as a .tar.gz file
import tarfile
with tarfile.open("/tmp/model.tar.gz", "w:gz") as tar:
tar.add("/tmp/model/bundle.json", arcname='bundle.json')
tar.add("/tmp/model/root", arcname='root')
s3 = boto3.resource('s3')
file_name = args['s3_model_bucket_prefix'] + '/' + 'model.tar.gz'
s3.Bucket(args['s3_model_bucket']).upload_file('/tmp/model.tar.gz', file_name)
os.remove('/tmp/model.zip')
os.remove('/tmp/model.tar.gz')
shutil.rmtree('/tmp/model')
# Save postprocessor
SimpleSparkSerializer().serializeToBundle(converter, "jar:file:/tmp/postprocess.zip", predictions)
with zipfile.ZipFile("/tmp/postprocess.zip") as zf:
zf.extractall("/tmp/postprocess")
# Writing back the content as a .tar.gz file
import tarfile
with tarfile.open("/tmp/postprocess.tar.gz", "w:gz") as tar:
tar.add("/tmp/postprocess/bundle.json", arcname='bundle.json')
tar.add("/tmp/postprocess/root", arcname='root')
file_name = args['s3_model_bucket_prefix'] + '/' + 'postprocess.tar.gz'
s3.Bucket(args['s3_model_bucket']).upload_file('/tmp/postprocess.tar.gz', file_name)
os.remove('/tmp/postprocess.zip')
os.remove('/tmp/postprocess.tar.gz')
shutil.rmtree('/tmp/postprocess')
if __name__ == "__main__":
main()
```
#### File: advanced_functionality/jax_bring_your_own/sagemaker_jax.py
```python
from sagemaker.estimator import Framework
from sagemaker.vpc_utils import VPC_CONFIG_DEFAULT
from sagemaker.tensorflow.model import TensorFlowModel
class JaxEstimator(Framework):
def __init__(
self,
entry_point,
source_dir=None,
hyperparameters=None,
image_name=None,
**kwargs
):
super(JaxEstimator, self).__init__(
entry_point, source_dir, hyperparameters, image_name=image_name, **kwargs
)
def create_model(
self,
role=None,
vpc_config_override=VPC_CONFIG_DEFAULT,
entry_point=None,
source_dir=None,
dependencies=None,
**kwargs
):
"""Creates ``TensorFlowModel`` object to be used for creating SageMaker model entities
"""
kwargs["name"] = self._get_or_create_name(kwargs.get("name"))
if "enable_network_isolation" not in kwargs:
kwargs["enable_network_isolation"] = self.enable_network_isolation()
return TensorFlowModel(
model_data=self.model_data,
role=role or self.role,
container_log_level=self.container_log_level,
framework_version='2.3.1',
sagemaker_session=self.sagemaker_session,
vpc_config=self.get_vpc_config(vpc_config_override),
entry_point=entry_point,
source_dir=source_dir,
dependencies=dependencies,
**kwargs
)
```
#### File: advanced_functionality/multi_model_linear_learner_home_value/sklearn_preprocessor.py
```python
from __future__ import print_function
import time
import sys
from io import StringIO
import os
import shutil
import argparse
import csv
import json
import numpy as np
import pandas as pd
from sklearn.compose import ColumnTransformer
from sklearn.externals import joblib
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Binarizer, StandardScaler, OneHotEncoder
from sagemaker_containers.beta.framework import (
content_types, encoders, env, modules, transformer, worker)
# Since we get a headerless CSV file we specify the column names here.
feature_columns_names = [
'YEAR_BUILT',
'SQUARE_FEET',
'NUM_BEDROOMS',
'NUM_BATHROOMS',
'LOT_ACRES',
'GARAGE_SPACES',
'FRONT_PORCH',
'DECK']
label_column = 'PRICE'
feature_columns_dtype = {
'YEAR_BUILT': str,
'SQUARE_FEET': np.float64,
'NUM_BEDROOMS': np.float64,
'NUM_BATHROOMS': np.float64,
'LOT_ACRES': np.float64,
'GARAGE_SPACES': np.float64,
'FRONT_PORCH': str,
'DECK': str}
label_column_dtype = {'PRICE': np.float64}
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Sagemaker specific arguments. Defaults are set in the environment variables.
parser.add_argument('--output-data-dir', type=str, default=os.environ['SM_OUTPUT_DATA_DIR'])
parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])
parser.add_argument('--train', type=str, default=os.environ['SM_CHANNEL_TRAIN'])
args = parser.parse_args()
# Take the set of files and read them all into a single pandas dataframe
input_files = [os.path.join(args.train, file) for file in os.listdir(args.train)]
if len(input_files) == 0:
raise ValueError(('There are no files in {}.\n' +
'This usually indicates that the train channel was incorrectly specified,\n' +
'the data specification in S3 was incorrectly specified or the role specified\n' +
'does not have permission to access the data.'.format(args.train)))
for file in input_files:
print("file :", file)
raw_data = [pd.read_csv(
file,
header=None,
names=feature_columns_names + [label_column]
)]
concat_data = pd.concat(raw_data)
print(concat_data)
# This section is adapted from the scikit-learn example of using preprocessing pipelines:
#
# https://scikit-learn.org/stable/auto_examples/compose/plot_column_transformer_mixed_types.html
#
numeric_features = list(feature_columns_names)
numeric_features.remove('FRONT_PORCH')
numeric_features.remove('DECK')
numeric_transformer = Pipeline(steps=[
('scaler', StandardScaler())])
categorical_features = ['FRONT_PORCH','DECK']
categorical_transformer = Pipeline(steps=[
('onehot', OneHotEncoder(handle_unknown='ignore'))])
preprocessor = ColumnTransformer(
transformers=[
('num', numeric_transformer, numeric_features),
('cat', categorical_transformer, categorical_features)],
remainder="drop")
preprocessor.fit(concat_data)
joblib.dump(preprocessor, os.path.join(args.model_dir, "model.joblib"))
print("saved model!")
def input_fn(input_data, content_type):
"""Parse input data payload
We currently only take csv input. Since we need to process both labelled
and unlabelled data we first determine whether the label column is present
by looking at how many columns were provided.
"""
if content_type == 'text/csv':
# Read the raw input data as CSV.
df = pd.read_csv(StringIO(input_data),
header=None)
if len(df.columns) == len(feature_columns_names) + 1:
# This is a labelled example, includes the ring label
df.columns = feature_columns_names + [label_column]
elif len(df.columns) == len(feature_columns_names):
# This is an unlabelled example.
df.columns = feature_columns_names
return df
else:
raise ValueError("{} not supported by script!".format(content_type))
def output_fn(prediction, accept):
"""Format prediction output
The default accept/content-type between containers for serial inference is JSON.
We also want to set the ContentType or mimetype as the same value as accept so the next
container can read the response payload correctly.
"""
if accept == "application/json":
instances = []
for row in prediction.tolist():
instances.append({"features": row})
json_output = {"instances": instances}
return worker.Response(json.dumps(json_output), mimetype=accept)
elif accept == 'text/csv':
return worker.Response(encoders.encode(prediction, accept), mimetype=accept)
else:
raise RuntimeException("{} accept type is not supported by this script.".format(accept))
def predict_fn(input_data, model):
"""Preprocess input data
We implement this because the default uses .predict(), but our model is a preprocessor
so we want to use .transform().
The output is returned in the following order:
rest of features either one hot encoded or standardized
"""
print("Input data type ", type(input_data))
print(input_data)
features = model.transform(input_data)
print("features type ", type(features))
print(features)
features_array = features
print("features_array ", type(features_array))
print(features_array)
if label_column in input_data:
# Return the label (as the first column) and the set of features.
return np.insert(features_array, 0, input_data[label_column], axis=1)
else:
# Return only the set of features
return features
def model_fn(model_dir):
"""Deserialize fitted model
"""
preprocessor = joblib.load(os.path.join(model_dir, "model.joblib"))
return preprocessor
```
#### File: unity_ml_agents/src/train.py
```python
import argparse
import numpy as np
import os
import tensorflow as tf
from mlagents_envs.environment import UnityEnvironment
import mlagents
import subprocess
import yaml
import json
def parse_args():
parser = argparse.ArgumentParser()
# hyperparameters sent by the client are passed as command-line arguments to the script
parser.add_argument('--epochs', type=int, default=1)
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--learning_rate', type=float, default=0.1)
# data directories
parser.add_argument('--train', type=str, default=os.environ.get('SM_CHANNEL_TRAIN'))
parser.add_argument('--test', type=str, default=os.environ.get('SM_CHANNEL_TEST'))
parser.add_argument('--env_name', type=str, default=os.environ.get('SM_HP_ENV_NAME'))
parser.add_argument('--yaml_file', type=str, default=os.environ.get('SM_HP_YAML_FILE'))
#parser.add_argument('--train_config', type=str, default=os.environ.get('SM_HP_TRAIN_CONFIG'))
# model directory: we will use the default set by SageMaker, /opt/ml/model
parser.add_argument('--model_dir', type=str, default=os.environ.get('SM_MODEL_DIR'))
return parser.parse_known_args()
if __name__ == "__main__":
args, _ = parse_args()
subprocess.call(f'chmod 755 {args.train}/{args.env_name}'.split())
subprocess.call(f'mlagents-learn --env={args.train}/{args.env_name} --train /opt/ml/code/{args.yaml_file}'.split())
subprocess.call(f'cp -arf ./models {args.model_dir}'.split())
subprocess.call(f'cp -arf ./summaries {args.model_dir}'.split())
```
#### File: frameworks/tensorflow_mnist/mnist.py
```python
from __future__ import absolute_import, division, print_function
import argparse
import json
import os
import numpy as np
import tensorflow as tf
def cnn_model_fn(features, labels, mode):
"""Model function for CNN."""
# Input Layer
# Reshape X to 4-D tensor: [batch_size, width, height, channels]
# MNIST images are 28x28 pixels, and have one color channel
input_layer = tf.reshape(features['x'], [-1, 28, 28, 1])
# Convolutional Layer #1
# Computes 32 features using a 5x5 filter with ReLU activation.
# Padding is added to preserve width and height.
# Input Tensor Shape: [batch_size, 28, 28, 1]
# Output Tensor Shape: [batch_size, 28, 28, 32]
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=32,
kernel_size=[5, 5],
padding='same',
activation=tf.nn.relu
)
# Pooling Layer #1
# First max pooling layer with a 2x2 filter and stride of 2
# Input Tensor Shape: [batch_size, 28, 28, 32]
# Output Tensor Shape: [batch_size, 14, 14, 32]
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
# Convolutional Layer #2
# Computes 64 features using a 5x5 filter.
# Padding is added to preserve width and height.
# Input Tensor Shape: [batch_size, 14, 14, 32]
# Output Tensor Shape: [batch_size, 14, 14, 64]
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=64,
kernel_size=[5, 5],
padding='same',
activation=tf.nn.relu
)
# Pooling Layer #2
# Second max pooling layer with a 2x2 filter and stride of 2
# Input Tensor Shape: [batch_size, 14, 14, 64]
# Output Tensor Shape: [batch_size, 7, 7, 64]
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
# Flatten tensor into a batch of vectors
# Input Tensor Shape: [batch_size, 7, 7, 64]
# Output Tensor Shape: [batch_size, 7 * 7 * 64]
pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])
# Dense Layer
# Densely connected layer with 1024 neurons
# Input Tensor Shape: [batch_size, 7 * 7 * 64]
# Output Tensor Shape: [batch_size, 1024]
dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
# Add dropout operation; 0.6 probability that element will be kept
dropout = tf.layers.dropout(
inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)
# Logits layer
# Input Tensor Shape: [batch_size, 1024]
# Output Tensor Shape: [batch_size, 10]
logits = tf.layers.dense(inputs=dropout, units=10)
predictions = {
# Generate predictions (for PREDICT and EVAL mode)
'classes': tf.argmax(input=logits, axis=1),
# Add `softmax_tensor` to the graph. It is used for PREDICT and by the
# `logging_hook`.
'probabilities': tf.nn.softmax(logits, name='softmax_tensor')
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Calculate Loss (for both TRAIN and EVAL modes)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Configure the Training Op (for TRAIN mode)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(
loss=loss,
global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
# Add evaluation metrics (for EVAL mode)
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predictions['classes'])}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def _load_training_data(base_dir):
x_train = np.load(os.path.join(base_dir, 'train_data.npy'))
y_train = np.load(os.path.join(base_dir, 'train_labels.npy'))
return x_train, y_train
def _load_testing_data(base_dir):
x_test = np.load(os.path.join(base_dir, 'eval_data.npy'))
y_test = np.load(os.path.join(base_dir, 'eval_labels.npy'))
return x_test, y_test
def _parse_args():
parser = argparse.ArgumentParser()
# Data, model, and output directories.
# model_dir is always passed in from SageMaker.
# By default this is a S3 path under the default bucket.
parser.add_argument('--model_dir', type=str)
parser.add_argument('--sm-model-dir', type=str, default=os.environ.get('SM_MODEL_DIR'))
parser.add_argument('--train', type=str, default=os.environ.get('SM_CHANNEL_TRAINING'))
parser.add_argument('--hosts', type=list, default=json.loads(os.environ.get('SM_HOSTS')))
parser.add_argument('--current-host', type=str, default=os.environ.get('SM_CURRENT_HOST'))
return parser.parse_known_args()
def serving_input_fn():
inputs = {'x': tf.placeholder(tf.float32, [None, 784])}
return tf.estimator.export.ServingInputReceiver(inputs, inputs)
if __name__ == '__main__':
args, _ = _parse_args()
train_data, train_labels = _load_training_data(args.train)
eval_data, eval_labels = _load_testing_data(args.train)
# Create the Estimator
mnist_classifier = tf.estimator.Estimator(model_fn=cnn_model_fn, model_dir=args.model_dir)
# Set up logging for predictions
# Log the values in the 'Softmax' tensor with label 'probabilities'
tensors_to_log = {'probabilities': 'softmax_tensor'}
logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log, every_n_iter=50)
# Train the model
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x': train_data},
y=train_labels,
batch_size=100,
num_epochs=None,
shuffle=True
)
# Evaluate the model and print results
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x': eval_data},
y=eval_labels,
num_epochs=1,
shuffle=False
)
train_spec = tf.estimator.TrainSpec(train_input_fn, max_steps=20000)
eval_spec = tf.estimator.EvalSpec(eval_input_fn)
tf.estimator.train_and_evaluate(mnist_classifier, train_spec, eval_spec)
if args.current_host == args.hosts[0]:
mnist_classifier.export_savedmodel(args.sm_model_dir, serving_input_fn)
```
#### File: docker/code/predictor.py
```python
from __future__ import print_function
import os
import sys
import stat
import json
import shutil
import flask
from flask import Flask, jsonify, request, make_response, Response
import glob
import pandas as pd
import numpy as np
import csv
from io import StringIO
from joblib import dump, load
from sagemaker_containers.beta.framework import (
content_types, encoders, env, modules, transformer, worker)
from utils import write_failure_file, print_json_object, load_json_object, save_model_artifacts, print_files_in_path
model_artifacts_path = "/opt/ml/model/"
feature_column = "words"
label_column = "label"
preprocessor = None
le = None
# The flask app for serving predictions
app = flask.Flask(__name__)
def load_model():
global preprocessor
global le
if not preprocessor:
preprocessor = load(os.path.join(model_artifacts_path, "model.joblib"))
if not le:
le = load(os.path.join(model_artifacts_path, "label.joblib"))
@app.route('/ping', methods=['GET'])
def ping():
"""Determine if the container is working and healthy. In this sample container, we declare
it healthy if we can load the model successfully."""
load_model()
health = preprocessor is not None and le is not None
status = 200 if health else 404
return flask.Response(response='\n', status=status, mimetype='application/json')
@app.route('/invocations', methods=['POST'])
def transformation():
print("data: ", request.data[:100])
print("cookies: ", request.cookies)
print("headers: ", dict(request.headers))
print("args: ", request.args)
load_model()
content_type = request.headers['Content-Type']
print("Content type", content_type)
accept = request.headers['Accept']
print("Accept", accept)
input_data = request.data.decode()
first_entry = input_data.split('\n', 1)[0].split(',', 1)[0]
print("First entry is: ", first_entry)
df = None
if first_entry == "label" or first_entry.startswith("category_"):
recs = [(row[0], set(row[1:]))
for row in csv.reader(StringIO(input_data))]
if first_entry == "label":
df = pd.DataFrame.from_records(
recs[1:], columns=[label_column, feature_column])
else:
df = pd.DataFrame.from_records(
recs, columns=[label_column, feature_column])
# This is a labelled example, includes the ring label
print("Length indicates that label is included")
else:
print("Length indicates that label is not included.")
# This is an unlabelled example.
recs = [(set(row),) for row in csv.reader(StringIO(input_data))]
df = pd.DataFrame.from_records(recs, columns=[feature_column])
print("merged df", df.head())
features = preprocessor.transform(df["words"])
prediction = None
if label_column in df:
print("label_column in input_data")
labels = le.transform(df[label_column])
# Return the label (as the first column) and the set of features.
prediction = np.insert(features.todense(), 0, labels, axis=1)
else:
print("label_column not in input_data")
# Return only the set of features
prediction = features.todense()
if accept == "application/json":
instances = []
for row in prediction.tolist():
instances.append({"features": row})
json_output = {"instances": instances}
return Response(json.dumps(json_output), mimetype=accept)
# TODO: use custom flag to indicate that this is in a pipeline rather than relying on the '*/*'
elif accept == 'text/csv' or accept == '*/*':
return Response(encoders.encode(prediction, "text/csv"), mimetype="text/csv")
else:
raise RuntimeError(
"{} accept type is not supported by this script.".format(accept))
```
#### File: frameworks/utils/mnist.py
```python
import numpy as np
from urllib import request
import gzip
import os
import boto3
import json
dirname = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(dirname, "config.json"), "r") as f:
CONFIG = json.load(f)
def mnist_to_numpy(data_dir='/tmp/data', train=True):
"""Download MNIST dataset and convert it to numpy array
Args:
data_dir (str): directory to save the data
train (bool): download training set
Returns:
tuple of images and labels as numpy arrays
"""
if not os.path.exists(data_dir):
os.makedirs(data_dir)
if train:
images_file = "train-images-idx3-ubyte.gz"
labels_file = "train-labels-idx1-ubyte.gz"
else:
images_file = "t10k-images-idx3-ubyte.gz"
labels_file = "t10k-labels-idx1-ubyte.gz"
# download objects
s3 = boto3.client('s3')
bucket = CONFIG["public_bucket"]
for obj in [images_file, labels_file]:
key = os.path.join("datasets/image/MNIST", obj)
dest = os.path.join(data_dir, obj)
if not os.path.exists(dest):
s3.download_file(bucket, key, dest)
return _convert_to_numpy(data_dir, images_file, labels_file)
def _convert_to_numpy(data_dir, images_file, labels_file):
"""Byte string to numpy arrays"""
with gzip.open(os.path.join(data_dir, images_file), 'rb') as f:
images = np.frombuffer(f.read(), np.uint8, offset=16).reshape(-1, 28, 28)
with gzip.open(os.path.join(data_dir, labels_file), 'rb') as f:
labels = np.frombuffer(f.read(), np.uint8, offset=8)
return (images, labels)
def normalize(x, axis):
eps = np.finfo(float).eps
mean = np.mean(x, axis=axis, keepdims=True)
# avoid division by zero
std = np.std(x, axis=axis, keepdims=True) + eps
return (x - mean) / std
def adjust_to_framework(x, framework='pytorch'):
"""Adjust a ``numpy.ndarray`` to be used as input for specified framework
Args:
x (numpy.ndarray): Batch of images to be adjusted
to follow the convention in pytorch / tensorflow / mxnet
framework (str): Framework to use. Takes value in
``pytorch``, ``tensorflow`` or ``mxnet``
Return:
numpy.ndarray following the convention of tensors in the given
framework
"""
if x.ndim == 3:
# input is gray-scale
x = np.expand_dims(x, 1)
if framework in ['pytorch', 'mxnet']:
# depth-major
return x
elif framework == 'tensorlfow':
# depth-minor
return np.transpose(x, (0, 2, 3, 1))
elif framework == 'mxnet':
return x
else:
raise ValueError('framework must be one of ' + \
'[pytorch, tensorflow, mxnet], got {}'.format(framework))
if __name__ == '__main__':
X, Y = mnist_to_numpy()
X, Y = X.astype(np.float32), Y.astype(np.int8)
```
#### File: src/ActiveLearning/helper.py
```python
from datetime import datetime
import random
AUTOANNOTATION_THRESHOLD = 0.50
JOB_TYPE = "groundtruth/text-classification"
class SimpleActiveLearning:
def __init__(self, job_name, label_category_name,
label_names, max_selections):
self.job_name = job_name
self.label_category_name = label_category_name
self.label_names = label_names
self.max_selections = max_selections
def compute_margin(self, probabilities, labels):
"""
compute the confidence and the best label given the probability distribution.
"""
max_probability = max(probabilities)
max_prob_index = probabilities.index(max_probability)
best_label = labels[max_prob_index]
remaining_probs = [prob for i,prob in enumerate(probabilities) if i!=max_prob_index]
second_probability = max(remaining_probs, default=0.0)
return max_probability - second_probability, best_label
def get_label_index(self, inference_label_output):
"""
inference_label_output is of the format "__label__0".
This method gets an integer suffix from the end of the string.
For this example, "__label__0" the function returns 0.
"""
return int(inference_label_output.split('_')[-1])
def make_metadata(self, margin, best_label):
"""
make required metadata to match the output label.
"""
return {
'confidence': float(f'{margin: 1.2f}'),
'job-name': self.job_name,
'class-name': self.label_names[self.get_label_index(best_label)],
'human-annotated': 'no',
'creation-date': datetime.utcnow().strftime('%Y-%m-%dT%H:%m:%S.%f'),
'type': JOB_TYPE
}
def make_autoannotation(self, prediction, source, margin, best_label):
"""
generate the final output prediction with the label and confidence.
"""
return {
'source': source['source'],
'id': prediction['id'],
f'{self.label_category_name}': self.get_label_index(best_label),
f'{self.label_category_name}-metadata': self.make_metadata(margin,
best_label)
}
def autoannotate(self, predictions, sources):
"""
auto annotate all unlabeled data with confidence above AUTOANNOTATION_THRESHOLD.
"""
sources_by_id = {
source['id']: source for source in sources
}
autoannotations = []
for prediction in predictions:
probabilities = prediction['prob']
labels = prediction['label']
margin, best_label = self.compute_margin(probabilities, labels)
if margin > AUTOANNOTATION_THRESHOLD:
autoannotations.append(self.make_autoannotation(
prediction, sources_by_id[prediction['id']],
margin, best_label
))
return autoannotations
def select_for_labeling(self, predictions, autoannotations):
"""
Select the next set of records to be labeled by humans.
"""
initial_ids = {
prediction['id'] for prediction in predictions
}
autoannotation_ids = {
autoannotation['id'] for autoannotation in autoannotations
}
remaining_ids = initial_ids - autoannotation_ids
selections = random.sample(
remaining_ids, min(self.max_selections, len(remaining_ids))
)
return selections
```
#### File: src/ActiveLearning/prepare_for_training.py
```python
import json
from functools import partial
from io import StringIO
from s3_helper import S3Ref, copy_with_query_and_transform, download_with_query, create_ref_at_parent_key
from string_helper import generate_job_id_and_s3_path
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def remove_by_ids(s3_blacklist_uri, label_attribute_name, manifest_file):
"""
helper method to remove selected id in the given input file.
This is used to create a training set which has no elements from the given validation set.
"""
logger.info("Remove validation set ids from training data.")
blacklist = S3Ref.from_uri(s3_blacklist_uri)
validation_id_query = """select s."id" from s3object[*] s where s."{}-metadata"."human-annotated" IN ('yes')""".format(
label_attribute_name)
validation_id_file = download_with_query(blacklist, validation_id_query)
validation_ids = set()
for line in validation_id_file:
data = json.loads(line)
validation_ids.add(data["id"])
training_only_file = StringIO()
training_set_size = 0
for line in manifest_file:
data = json.loads(line)
if data["id"] not in validation_ids:
training_set_size += 1
training_only_file.write(json.dumps(data) + "\n")
logger.info("Remove ids complete. training set size = {} Validation set size = {}".format(
training_set_size, len(validation_ids)))
return training_only_file
class TrainingJobParameters:
def __init__(self, event, training_folder_uri):
self.event = event
self.training_folder_uri = training_folder_uri
@property
def attribute_names(self):
"""
attribute names to be parsed from the manifest file during training.
"""
label_attribute_name = self.event['LabelAttributeName']
input_mode = "source"
return [input_mode, label_attribute_name]
@property
def training_input(self):
"""
Generates the training input in an s3 location and returns the s3 uri.
"""
label_attribute_name = self.event['LabelAttributeName']
s3_input_uri = self.event['ManifestS3Uri']
meta_data = self.event['meta_data']
source = S3Ref.from_uri(s3_input_uri)
dest = S3Ref.from_uri(self.training_folder_uri + "training_input.manifest")
logger.info("Creating training input at {} from human labeled data.".format(
dest.get_uri()))
removeValidationIds = partial(remove_by_ids, meta_data['ValidationS3Uri'],
label_attribute_name)
training_labeled_query = """select * from s3object[*] s where s."{}-metadata"."human-annotated" IN ('yes')""".format(
label_attribute_name)
copy_with_query_and_transform(source, dest, training_labeled_query, removeValidationIds)
logger.info("Uploaded training input at {}.".format(dest.get_uri()))
return dest.get_uri()
@property
def resource_config(self):
"""
configure the instance where training will be run.
"""
return {
"InstanceCount": 1,
"InstanceType": "ml.c5.2xlarge",
"VolumeSizeInGB": 60
}
@property
def algorithm_specification(self):
"""
configure the docker container uri for the training algorithm.
"""
return {
# This assumes we are running in us-east-1 (IAD).
# Refer to this doc to tweak this model if you run it in other regions.
# https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-algo-docker-registry-paths.html
"TrainingImage": "811284229777.dkr.ecr.us-east-1.amazonaws.com/blazingtext:latest",
"TrainingInputMode":"Pipe"
}
@property
def hyper_parameters(self):
"""
configure hyper parameters used for training.
"""
return {
"early_stopping":"True",
"epochs":"20",
"learning_rate":"0.05",
"min_count":"5",
"min_epochs":"1",
"mode":"supervised",
"patience":"5",
"vector_dim":"20",
"word_ngrams":"2"
}
def lambda_handler(event, context):
"""
This function sets up all the input parameters required for the training job.
"""
training_job_name_prefix = event['LabelingJobNamePrefix']
intermediate_folder_uri = event["meta_data"]["IntermediateFolderUri"]
training_job_name, training_folder_uri = generate_job_id_and_s3_path(
training_job_name_prefix, intermediate_folder_uri)
training_job_parameters = TrainingJobParameters(event, training_folder_uri)
return {
"TrainingJobName": training_job_name,
"trainS3Uri":training_job_parameters.training_input,
"ResourceConfig":training_job_parameters.resource_config,
'AlgorithmSpecification':training_job_parameters.algorithm_specification,
"HyperParameters":training_job_parameters.hyper_parameters,
"S3OutputPath": training_job_parameters.training_folder_uri,
"AttributeNames": training_job_parameters.attribute_names
}
```
#### File: src/tests/test_copy_input_manifest.py
```python
import pytest
import boto3
from moto import mock_s3
from Bootstrap.copy_input_manifest import lambda_handler
@mock_s3
def test_copy_input_manifest():
manifest_content = b'{"source":"Fed revises guidelines sending stocks up."}'
s3r = boto3.resource('s3', region_name='us-east-1')
s3r.create_bucket(Bucket='source_bucket')
s3r.Object('source_bucket', 'input.manifest').put(Body=manifest_content)
s3r.create_bucket(Bucket='output_bucket')
event = {
'ManifestS3Uri': 's3://source_bucket/input.manifest',
'S3OutputPath': 's3://output_bucket/'
}
output = lambda_handler(event, {})
intermediate_body = s3r.Object('output_bucket', 'intermediate/input.manifest').get()['Body'].read()
assert intermediate_body == manifest_content
assert output['IntermediateFolderUri'] == "s3://output_bucket/intermediate/"
assert output['IntermediateManifestS3Uri'] == "s3://output_bucket/intermediate/input.manifest"
def test_copy_input_manifest_invalid_output():
with pytest.raises(Exception, match=r"S3OutputPath should end with '/'"):
event = {
'ManifestS3Uri': 's3://source_bucket/input.manifest',
'S3OutputPath': 's3://output_bucket'
}
lambda_handler(event, {})
```
#### File: tensorflwo-test/ground_truth_labeling_jobs/sagemaker_ground_truth_semantic_segmentation_mask_index_correction.py
```python
import argparse
import base64
from collections import defaultdict
from io import BytesIO
import json
from pathlib import Path
from urllib.parse import urlparse
import boto3
import numpy as np
from PIL import Image
s3 = boto3.resource('s3')
sagemaker = boto3.client('sagemaker')
try:
next(iter(s3.buckets.all()))
except Exception as e:
raise Exception(
'Could not access your s3 resources. '
'Please verify that your AWS credentials are correctly configured and '
'try again.'
) from e
class SemSegParser(object):
def __init__(self, annotation, label_categories):
self._annotation = annotation
image_bytes = base64.b64decode(self._annotation)
img = np.asarray(Image.open(BytesIO(image_bytes)).convert("RGBA"))
self.hex_colors = defaultdict(str)
self.hex_colors['BACKGROUND'] = '#ffffff'
self._img_array, self._class_names, self._label_map = \
self.get_class_masks(img, label_categories)
def get_class_masks(self, img, label_categories):
img_no_alpha = img[:, :, 0:3]
masks = []
class_names = ['BACKGROUND']
rgb_label_maps = self.initialize_label_map()
for idx_str, category_info in label_categories.items():
i = int(idx_str)
class_name = category_info['class-name']
if class_name == 'BACKGROUND':
continue
class_names.append(class_name)
class_hex_color = category_info['hex-color']
self.hex_colors[class_name] = class_hex_color
class_rgb_color = self.hex_to_rgb(class_hex_color)
rgb_label_maps.append(class_rgb_color)
class_mask = np.all(img_no_alpha == class_rgb_color, axis=-1)
class_mask = class_mask * i
masks.append(class_mask)
masks = np.array(masks)
masks = masks.sum(axis=0)
return masks.astype(np.uint8), class_names, rgb_label_maps
# Set background to white
def initialize_label_map(self):
return [(255, 255, 255)]
@property
def class_names(self):
return self._class_names
@property
def img_array(self):
return self._img_array
@staticmethod
def hex_to_rgb(hexcode):
h = hexcode.lstrip('#')
return tuple(int(h[i:i + 2], 16) for i in (0, 2, 4))
@property
def img_w_palette(self):
im = Image.fromarray(np.uint8(self._img_array))
num_classes = len(self._label_map)
palette = self._label_map + [(255, 255, 255) for i
in range(256 - num_classes)]
palette = [item for rgb in palette for item in rgb] + ([255, ])
im.putpalette(palette)
return im
def get_bucket_and_key(s3uri):
"""Get the bucket name and key associated with an s3 object.
Args:
s3uri (str): The s3 uri.
Return:
bucket_name and key strings.
"""
url = urlparse(s3uri)
bucket_name = url.netloc
key = url.path.lstrip('/')
return bucket_name, key
def get_object_bytes(s3_obj):
"""Get bytes for an object stored in s3.
Arg:
s3_obj (boto3.resources.factory.s3.ObjectSummary): object for thing in s3
"""
body = s3_obj.get()['Body']
return body.read()
def get_metadata(entry, label_attribute_name):
metadata_key = '{}-metadata'.format(label_attribute_name)
try:
metadata = entry[metadata_key]
except KeyError as e:
raise KeyError(
'The metadata_key (derived from the label-attribute-name or '
'job-name) is missing from this manifest entry. Please specify a '
'different label-attribute-name.'
) from e
return metadata
def get_output_manifest(labeling_job_name):
description = sagemaker.describe_labeling_job(
LabelingJobName=labeling_job_name
)
label_attribute_name = description['LabelAttributeName']
manifest_path = description['LabelingJobOutput']['OutputDatasetS3Uri']
bucket_name, key = get_bucket_and_key(manifest_path)
manifest_bytes = get_object_bytes(s3.Bucket(bucket_name).Object(key))
manifest = []
for line in manifest_bytes.decode().splitlines():
manifest.append(json.loads(line))
return manifest, label_attribute_name, bucket_name, key
def fix_annotations(labeling_job_name):
manifest, label_attribute_name, output_manifest_bucket_name, \
output_manifest_key = get_output_manifest(labeling_job_name)
fixed_manifest = []
for entry in manifest:
metadata = get_metadata(entry, label_attribute_name)
try:
job_name = metadata['job-name'].replace('labeling-job/', '')
label_uri = entry[label_attribute_name]
bucket_name, key = get_bucket_and_key(label_uri)
key_for_corrected_png = key.replace(
'consolidated-annotation/output',
'consolidated-annotation/output-corrected'
)
bucket = s3.Bucket(bucket_name)
annotation_bytes = get_object_bytes(bucket.Object(key))
annotation_b64 = base64.b64encode(annotation_bytes)
color_map = metadata['internal-color-map']
parser = SemSegParser(annotation_b64, color_map)
png_img = parser.img_w_palette
with BytesIO() as in_mem_file:
png_img.save(in_mem_file, format='png')
in_mem_file.seek(0)
obj = bucket.Object(key_for_corrected_png)
obj.upload_fileobj(in_mem_file)
entry['original-label-ref'] = entry[label_attribute_name]
entry['corrected-label-ref'] = \
's3://' + bucket_name + '/' + key_for_corrected_png
except KeyError:
continue
finally:
fixed_manifest.append(entry)
# Upload corrected manifest to S3
with BytesIO() as in_mem_file:
for line in fixed_manifest:
line_to_write = json.dumps(line) + '\n'
in_mem_file.write(bytes(line_to_write.encode('utf-8')))
in_mem_file.seek(0)
corrected_manifest_bucket = s3.Bucket(output_manifest_bucket_name)
corrected_manifest_key = output_manifest_key.replace(
'output.manifest', 'output-corrected.manifest')
obj = corrected_manifest_bucket.Object(corrected_manifest_key)
obj.upload_fileobj(in_mem_file)
def parse_args():
parser = argparse.ArgumentParser(
description=(
'Correct semantic segmentation masks from Amazon SageMaker Ground '
'Truth.'
)
)
parser.add_argument(
'--labeling-job-name', type=str, required=True,
help=(
'Your labeling job.'
)
)
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
print(
'Fixing annotations from {}'.format(
args.labeling_job_name
)
)
fix_annotations(args.labeling_job_name)
print('Done.')
```
#### File: clients/ddb/experiment_db_client.py
```python
import logging
from boto3.dynamodb.conditions import Key
from orchestrator.exceptions.ddb_client_exceptions import RecordAlreadyExistsException
logger=logging.getLogger(__name__)
class ExperimentDbClient(object):
def __init__(self, table_session):
self.table_session = table_session
def get_experiment_record(self, experiment_id):
response = self.table_session.query(
ConsistentRead=True,
KeyConditionExpression=Key('experiment_id').eq(experiment_id)
)
for i in response['Items']:
return i
return None
def create_new_experiment_record(self, record):
try:
self.table_session.put_item(
Item=record,
ConditionExpression='attribute_not_exists(experiment_id)'
)
except Exception as e:
if "ConditionalCheckFailedException" in str(e):
raise RecordAlreadyExistsException()
raise e
def update_experiment_record(self, record):
self.table_session.put_item(
Item=record
)
def delete_item(self, experiment_id):
logger.warning("Deleting experiment record...")
self.table_session.delete_item(
Key={
"experiment_id": experiment_id
}
)
#### Update states for training workflow
def update_training_workflow_metadata_with_validation(
self,
experiment_id,
training_workflow_metadata,
expected_current_next_model_to_train_id
):
'''
Updates ExperimentDb record for experiment_id with new training_workflow_metadata,
while validating, next_model_to_train_id is as expected in the old record.
'''
self.table_session.update_item(
Key={'experiment_id': experiment_id},
UpdateExpression=f'SET training_workflow_metadata = :new_val',
ConditionExpression='training_workflow_metadata.next_model_to_train_id = :exp_model_id',
ExpressionAttributeValues={
':new_val': training_workflow_metadata,
':exp_model_id': expected_current_next_model_to_train_id
}
)
def update_experiment_training_state(self, experiment_id, training_state):
self.table_session.update_item(
Key={'experiment_id': experiment_id},
UpdateExpression=f'SET training_workflow_metadata.training_state = :val',
ExpressionAttributeValues={':val': training_state}
)
def update_experiment_last_trained_model_id(self, experiment_id, last_trained_model_id):
self.table_session.update_item(
Key={'experiment_id': experiment_id},
UpdateExpression=f'SET training_workflow_metadata.last_trained_model_id = :val',
ExpressionAttributeValues={':val': last_trained_model_id}
)
def update_experiment_next_model_to_train_id(self, experiment_id, next_model_to_train_id):
self.table_session.update_item(
Key={'experiment_id': experiment_id},
UpdateExpression=f'SET training_workflow_metadata.next_model_to_train_id = :val',
ExpressionAttributeValues={':val': next_model_to_train_id}
)
#### Update states for hosting workflow
def update_experiment_hosting_state(self, experiment_id, hosting_state):
self.table_session.update_item(
Key={'experiment_id': experiment_id},
UpdateExpression=f'SET hosting_workflow_metadata.hosting_state = :val',
ExpressionAttributeValues={':val': hosting_state}
)
def update_experiment_last_hosted_model_id(self, experiment_id, last_hosted_model_id):
self.table_session.update_item(
Key={'experiment_id': experiment_id},
UpdateExpression=f'SET hosting_workflow_metadata.last_hosted_model_id = :val',
ExpressionAttributeValues={':val': last_hosted_model_id}
)
def update_experiment_next_model_to_host_id(self, experiment_id, next_model_to_host_id):
self.table_session.update_item(
Key={'experiment_id': experiment_id},
UpdateExpression=f'SET hosting_workflow_metadata.next_model_to_host_id = :val',
ExpressionAttributeValues={':val': next_model_to_host_id}
)
def update_experiment_hosting_endpoint(self, experiment_id, hosting_endpoint):
self.table_session.update_item(
Key={'experiment_id': experiment_id},
UpdateExpression=f'SET hosting_workflow_metadata.hosting_endpoint = :val',
ExpressionAttributeValues={':val': hosting_endpoint}
)
#### Update states for joining workflow
def update_experiment_joining_state(self, experiment_id, joining_state):
self.table_session.update_item(
Key={'experiment_id': experiment_id},
UpdateExpression=f'SET joining_workflow_metadata.joining_state = :val',
ExpressionAttributeValues={':val': joining_state}
)
def update_experiment_last_joined_job_id(self, experiment_id, last_joined_job_id):
self.table_session.update_item(
Key={'experiment_id': experiment_id},
UpdateExpression=f'SET joining_workflow_metadata.last_joined_job_id = :val',
ExpressionAttributeValues={':val': last_joined_job_id}
)
def update_experiment_next_join_job_id(self, experiment_id, next_join_job_id):
self.table_session.update_item(
Key={'experiment_id': experiment_id},
UpdateExpression=f'SET joining_workflow_metadata.next_join_job_id = :val',
ExpressionAttributeValues={':val': next_join_job_id}
)
#### Update states for evaluation workflow
def update_experiment_evaluation_state(self, experiment_id, evaluation_state):
self.table_session.update_item(
Key={'experiment_id': experiment_id},
UpdateExpression=f'SET evaluation_workflow_metadata.evaluation_state = :val',
ExpressionAttributeValues={':val': evaluation_state}
)
def update_experiment_last_evaluation_job_id(self, experiment_id, last_evaluation_job_id):
self.table_session.update_item(
Key={'experiment_id': experiment_id},
UpdateExpression=f'SET evaluation_workflow_metadata.last_evaluation_job_id = :val',
ExpressionAttributeValues={':val': last_evaluation_job_id}
)
def update_experiment_next_evaluation_job_id(self, experiment_id, next_evaluation_job_id):
self.table_session.update_item(
Key={'experiment_id': experiment_id},
UpdateExpression=f'SET evaluation_workflow_metadata.next_evaluation_job_id = :val',
ExpressionAttributeValues={':val': next_evaluation_job_id}
)
```
#### File: workflow/datatypes/experiment_record.py
```python
class ExperimentRecord():
'''
This class captures all the data that is needed to run a experiment
for Continuosly Training and Updating models on SageMaker
'''
def __init__(
self,
experiment_id,
training_workflow_metadata={},
hosting_workflow_metadata={},
joining_workflow_metadata={},
evaluation_workflow_metadata={}
):
# unique id common across all experiments in the account
self.experiment_id = experiment_id
# training workflow metadata
self.training_workflow_metadata = training_workflow_metadata
self._training_state = training_workflow_metadata.get("training_state", None)
self._last_trained_model_id = training_workflow_metadata.get("last_trained_model_id", None)
self._next_model_to_train_id = training_workflow_metadata.get("next_model_to_train_id", None)
# hosting workflow metadata
self.hosting_workflow_metadata = hosting_workflow_metadata
self._hosting_state = hosting_workflow_metadata.get("hosting_state", None)
self._last_hosted_model_id = hosting_workflow_metadata.get("last_hosted_model_id", None)
self._next_model_to_host_id = hosting_workflow_metadata.get("next_model_to_host_id", None)
self._hosting_endpoint = hosting_workflow_metadata.get("hosting_endpoint", None)
# joining workflow metadata
self.joining_workflow_metadata = joining_workflow_metadata
self._joining_state = joining_workflow_metadata.get("joining_state", None)
self._last_joined_job_id = joining_workflow_metadata.get("last_joined_job_id", None)
self._next_join_job_id = joining_workflow_metadata.get("next_join_job_id", None)
# evaluation workflow metadata
self.evaluation_workflow_metadata = evaluation_workflow_metadata
self._evaluation_state = evaluation_workflow_metadata.get("evaluation_state", None)
self._last_evaluation_job_id = evaluation_workflow_metadata.get("last_evaluation_job_id", None)
self._next_evaluation_job_id = evaluation_workflow_metadata.get("next_evaluation_job_id", None)
def to_ddb_record(self):
self.training_workflow_metadata["training_state"] = self._training_state
self.training_workflow_metadata["last_trained_model_id"] = self._last_trained_model_id
self.training_workflow_metadata["next_model_to_train_id"] = self._next_model_to_train_id
self.hosting_workflow_metadata["hosting_state"] = self._hosting_state
self.hosting_workflow_metadata["last_hosted_model_id"] = self._last_hosted_model_id
self.hosting_workflow_metadata["next_model_to_host_id"] = self._next_model_to_host_id
self.hosting_workflow_metadata["hosting_endpoint"] = self._hosting_endpoint
self.joining_workflow_metadata["joining_state"] = self._joining_state
self.joining_workflow_metadata["last_joined_job_id"] = self._last_joined_job_id
self.joining_workflow_metadata["next_join_job_id"] = self._next_join_job_id
self.evaluation_workflow_metadata["evaluation_state"] = self._evaluation_state
self.evaluation_workflow_metadata["last_evaluation_job_id"] = self._last_evaluation_job_id
self.evaluation_workflow_metadata["next_evaluation_job_id"] = self._next_evaluation_job_id
return {
'experiment_id': self.experiment_id,
'training_workflow_metadata': self.training_workflow_metadata,
'hosting_workflow_metadata': self.hosting_workflow_metadata,
'joining_workflow_metadata': self.joining_workflow_metadata,
'evaluation_workflow_metadata': self.evaluation_workflow_metadata
}
@classmethod
def load_from_ddb_record(cls, record):
return ExperimentRecord(
record["experiment_id"],
record["training_workflow_metadata"],
record["hosting_workflow_metadata"],
record["joining_workflow_metadata"],
record["evaluation_workflow_metadata"]
)
```
#### File: markov/architecture/contrib.py
```python
import tensorflow as tf
class Attention(object):
'''Attention layer implementation. This layer needs to be placed after a 2D Convolutional,
layer.
'''
def __init__(self, units: int):
'''units - number of hidden units to use in the in the score wights '''
self.units = units
def __call__(self, input_layer, name: str = None, kernel_initializer=None,
activation=None, is_training=None):
'''input_layer - Input layer to the attention layer, this should be a conv2D layer
name - Base name for the dense layers
kernel_initializer = Initializer for the weights
activation - Activation function to use
is_training - This is to adhere to rl_coach's expected function signature, it is
not used in this layer.
'''
score_weights = tf.layers.dense(input_layer, self.units, name="Score_{}".format(name),
kernel_initializer=kernel_initializer, activation=None)
score_activation = tf.nn.tanh(score_weights)
attention_weights = tf.layers.dense(score_activation, 1, name="Attention_{}".format(name),
kernel_initializer=kernel_initializer, activation=None)
attention_weights_activation = tf.nn.softmax(attention_weights)
context_vector = tf.multiply(attention_weights_activation, input_layer)
context_conv = tf.reduce_sum(context_vector, axis=1, keepdims=True)
return context_conv
def __str__(self):
'''Returns a string with the number of hidden units for the score weights'''
return "Conv2dWithAttention (num outputs = {})".format(self.units)
```
#### File: cameras/handlers/top_camera.py
```python
import math
import logging
import xml.etree.ElementTree as ET
import rospy
from deepracer_simulation_environment.srv import TopCamDataSrvResponse, TopCamDataSrv
from gazebo_msgs.msg import ModelState
from geometry_msgs.msg import Pose
from markov.track_geom.track_data import TrackData
from markov.track_geom.utils import euler_to_quaternion
from markov.cameras.abs_camera import AbstractCamera
from markov.cameras.constants import CameraSettings
from markov.log_handler.logger import Logger
from markov.gazebo_tracker.trackers.set_model_state_tracker import SetModelStateTracker
# Height value is determined from AWS track and is maintained to prevent z fighting in top down
# view
CAMERA_HEIGHT = 6.0
# Percentage to pad the image so that the frame boundary is not exactly on the track
PADDING_PCT = 0.25
# The default horizontal field of view
DEFAULT_H_FOV = 1.13
# Default resolution
DEFAULT_RESOLUTION = (640, 480)
# Logger object
LOG = Logger(__name__, logging.INFO).get_logger()
class TopCamera(AbstractCamera):
"""this module is for top camera"""
name = "top_camera"
def __init__(self, namespace=None, model_name=None):
super(TopCamera, self).__init__(TopCamera.name, namespace=namespace,
model_name=model_name)
self.track_data = TrackData.get_instance()
x_min, y_min, x_max, y_max = self.track_data.outer_border.bounds
horizontal_width = (x_max - x_min) * (1.0 + PADDING_PCT)
vertical_width = (y_max - y_min) * (1.0 + PADDING_PCT)
horizontal_fov = DEFAULT_H_FOV
try:
if horizontal_width >= vertical_width:
horizontal_fov = 2.0 * math.atan(0.5 * horizontal_width / CAMERA_HEIGHT)
else:
vertical_fov = math.atan(0.5 * vertical_width / CAMERA_HEIGHT)
aspect_ratio = float(DEFAULT_RESOLUTION[0]) / float(DEFAULT_RESOLUTION[1])
horizontal_fov = 2.0 * math.atan(aspect_ratio * math.tan(vertical_fov))
except Exception as ex:
LOG.info('Unable to compute top camera fov, using default: %s', ex)
self.camera_settings_dict = CameraSettings.get_empty_dict()
self.camera_settings_dict[CameraSettings.HORZ_FOV] = horizontal_fov
self.camera_settings_dict[CameraSettings.PADDING_PCT] = PADDING_PCT
self.camera_settings_dict[CameraSettings.IMG_WIDTH] = DEFAULT_RESOLUTION[0]
self.camera_settings_dict[CameraSettings.IMG_HEIGHT] = DEFAULT_RESOLUTION[1]
rospy.Service('get_top_cam_data', TopCamDataSrv, self._handle_get_top_cam_data)
def _handle_get_top_cam_data(self, req):
'''Response handler for clients requesting the camera settings data
req - Client request, which should be an empty request
'''
return TopCamDataSrvResponse(self.camera_settings_dict[CameraSettings.HORZ_FOV],
self.camera_settings_dict[CameraSettings.PADDING_PCT],
self.camera_settings_dict[CameraSettings.IMG_WIDTH],
self.camera_settings_dict[CameraSettings.IMG_HEIGHT])
def _get_sdf_string(self, camera_sdf_path):
tree = ET.parse(camera_sdf_path)
root = tree.getroot()
for fov in root.iter('horizontal_fov'):
fov.text = str(self.camera_settings_dict[CameraSettings.HORZ_FOV])
return '<?xml version="1.0"?>\n {}'.format(ET.tostring(root))
def _get_initial_camera_pose(self, car_pose):
# get the bounds
x_min, y_min, x_max, y_max = self.track_data.outer_border.bounds
# update camera position
model_pose = Pose()
model_pose.position.x = (x_min+x_max) / 2.0
model_pose.position.y = (y_min+y_max) / 2.0
model_pose.position.z = CAMERA_HEIGHT
x, y, z, w = euler_to_quaternion(roll=1.57079, pitch=1.57079, yaw=3.14159)
model_pose.orientation.x = x
model_pose.orientation.y = y
model_pose.orientation.z = z
model_pose.orientation.w = w
return model_pose
def _reset(self, car_pose):
"""Reset camera position based on the track size"""
if self.is_reset_called:
return
# update camera position
model_pose = self._get_initial_camera_pose(car_pose)
camera_model_state = ModelState()
camera_model_state.model_name = self.model_name
camera_model_state.pose = model_pose
SetModelStateTracker.get_instance().set_model_state(camera_model_state)
def _update(self, model_state, delta_time):
pass
```
#### File: src/markov/camera_utils.py
```python
import rospy
import time
import threading
from markov.cameras.camera_factory import CameraFactory
from markov.log_handler.deepracer_exceptions import GenericRolloutException
from markov.defaults import DEFAULT_MAIN_CAMERA, DEFAULT_SUB_CAMERA
from markov.gazebo_tracker.trackers.get_model_state_tracker import GetModelStateTracker
is_configure_camera_called = False
configure_camera_function_lock = threading.Lock()
# Amount of time (in seconds) to wait, in order to prevent model state from
# spamming logs while the model is loading
WAIT_TO_PREVENT_SPAM = 2
def wait_for_model(model_name, relative_entity_name):
model = GetModelStateTracker.get_instance().get_model_state(model_name,
relative_entity_name,
blocking=True,
auto_sync=False)
should_wait_for_model = not model.success
while should_wait_for_model:
time.sleep(WAIT_TO_PREVENT_SPAM)
model = GetModelStateTracker.get_instance().get_model_state(model_name,
relative_entity_name,
blocking=True,
auto_sync=False)
should_wait_for_model = not model.success
def configure_camera(namespaces=None):
namespaces = namespaces or ["racecar"]
global is_configure_camera_called
with configure_camera_function_lock:
if not is_configure_camera_called:
is_configure_camera_called = True
main_camera_type = rospy.get_param("MAIN_CAMERA", DEFAULT_MAIN_CAMERA)
sub_camera_type = rospy.get_param("SUB_CAMERA", DEFAULT_SUB_CAMERA)
main_cameras = dict()
for namespace in namespaces:
wait_for_model(model_name=namespace, relative_entity_name="")
main_cameras[namespace] = CameraFactory.create_instance(camera_type=main_camera_type,
model_name="/{}/{}".format(namespace,
"main_camera"),
namespace=namespace)
sub_camera = CameraFactory.create_instance(camera_type=sub_camera_type,
model_name="/{}".format("sub_camera"),
namespace=namespace)
return main_cameras, sub_camera
else:
err_msg = "configure_camera called more than once. configure_camera MUST be called ONLY once!"
raise GenericRolloutException(err_msg)
```
#### File: domain_randomizations/visual/model_visual_randomizer.py
```python
import logging
import numpy as np
from markov.rospy_wrappers import ServiceProxyWrapper
from markov.domain_randomizations.abs_randomizer import AbstractRandomizer
from markov.log_handler.logger import Logger
from markov.domain_randomizations.constants import (ModelRandomizerType, GazeboServiceName,
Color,
RANGE_MIN, RANGE_MAX)
from markov.gazebo_tracker.trackers.set_visual_color_tracker import SetVisualColorTracker
import rospy
from gazebo_msgs.srv import GetModelProperties, GetModelPropertiesRequest
from std_msgs.msg import ColorRGBA
from deepracer_msgs.srv import (GetVisualNames, GetVisualNamesRequest)
logger = Logger(__name__, logging.INFO).get_logger()
class ModelVisualRandomizer(AbstractRandomizer):
"""Model Visual Randomizer class"""
def __init__(self, model_name, model_randomizer_type, num_selection=-1,
link_name_filter=None, visual_name_filter=None,
color_range=None):
"""
Constructor
- Bit of explanation regarding model_randomizer_type:
- There are 3 possible types (MODEL, LINK, VISUAL) due to the level of randomization.
The reason is that a model's visual is represented in three level and their relationship as below:
- 1 Model to N Links and 1 Link to M Visuals.
- The one case that LINK may contain multiple VISUALs is that gazebo merges the links
that is connected by fixed joint for the sake of physics performance. Even the links are merged
together, it still needs to keep the visuals separately to display correctly with its own materials.
Thus, single merged link can contain multiple visuals from the links before the merge.
Args:
model_name (str): name of the model
model_randomizer_type (ModelRandomizerType): Model Randomizer Type
num_selection (int): Number of visuals or link to select on each randomize. (-1 means all)
(Only used for ModelRandomizerType.LINK or ModelRandomizerType.VISUAL)
link_name_filter (set or list): If link_name_filter are provided,
randomization will only apply to given links.
visual_name_filter (set or list): If visual_name_filter are provided,
randomization will only apply to given visuals.
color_range (dict): min-max of each color component (r, g, b).
Valid format: {'r': {'min':0.0, 'max':1.0},
'g': {'min':0.0, 'max':1.0},
'b': {'min':0.0, 'max':1.0}}
"""
super(ModelVisualRandomizer, self).__init__()
self.model_name = model_name
self.model_randomizer_type = model_randomizer_type
self.num_selection = num_selection
self.color_range = {Color.R.value: {RANGE_MIN: 0.0, RANGE_MAX: 1.0},
Color.G.value: {RANGE_MIN: 0.0, RANGE_MAX: 1.0},
Color.B.value: {RANGE_MIN: 0.0, RANGE_MAX: 1.0}}
if color_range:
self.color_range.update(color_range)
# ROS Services Setup
rospy.wait_for_service(GazeboServiceName.GET_MODEL_PROPERTIES.value)
rospy.wait_for_service(GazeboServiceName.GET_VISUAL_NAMES.value)
get_model_prop = ServiceProxyWrapper(GazeboServiceName.GET_MODEL_PROPERTIES.value, GetModelProperties)
get_visual_names = ServiceProxyWrapper(GazeboServiceName.GET_VISUAL_NAMES.value, GetVisualNames)
# Get all model's link names
body_names = get_model_prop(GetModelPropertiesRequest(model_name=self.model_name)).body_names
link_names = ["%s::%s" % (model_name, b) for b in body_names]
# Convert filters to sets
link_name_filter = set(link_name_filter) if link_name_filter is not None else None
visual_name_filter = set(visual_name_filter) if visual_name_filter is not None else None
if link_name_filter is not None:
# If link_name_filter is not None then grab the link_name that is in link_name_filter only.
link_names = [link_name for link_name in link_names if link_name in link_name_filter]
self.link_visuals_map = {}
res = get_visual_names(GetVisualNamesRequest(link_names=link_names))
for idx, visual_name in enumerate(res.visual_names):
if visual_name_filter is not None and visual_name not in visual_name_filter:
continue
link_name = res.link_names[idx]
if link_name not in self.link_visuals_map:
self.link_visuals_map[link_name] = []
self.link_visuals_map[link_name].append(visual_name)
#logger.info('link_visuals_map: {}'.format({"model_name:": self.model_name, "links": self.link_visuals_map}))
def _get_random_color(self):
return ColorRGBA(*[np.random.uniform(self.color_range[Color.R.value][RANGE_MIN],
self.color_range[Color.R.value][RANGE_MAX]),
np.random.uniform(self.color_range[Color.G.value][RANGE_MIN],
self.color_range[Color.G.value][RANGE_MAX]),
np.random.uniform(self.color_range[Color.B.value][RANGE_MIN],
self.color_range[Color.B.value][RANGE_MAX]),
1.0])
def _randomize(self):
link_names = self.link_visuals_map.keys()
# Unroll all visual names
visual_names = [visual_name for visual_names in self.link_visuals_map.values()
for visual_name in visual_names]
if self.model_randomizer_type == ModelRandomizerType.LINK and self.num_selection > 0:
# Select links to randomize if model_randomizer_type is ModelRandomizerType.LINK
link_names = np.random.choice(self.link_visuals_map.keys(),
size=self.num_selection,
replace=False)
elif self.model_randomizer_type == ModelRandomizerType.VISUAL and self.num_selection > 0:
# Select visuals to randomize if model_randomizer_type is ModelRandomizerType.VISUAL
visual_names = np.random.choice(visual_names,
size=self.num_selection,
replace=False)
# Convert to set
visual_names = set(visual_names)
# Model-level random color
color = self._get_random_color()
ambient = color
diffuse = color
specular = ColorRGBA(0.0, 0.0, 0.0, 1.0)
emissive = ColorRGBA(0.0, 0.0, 0.0, 1.0)
for link_name in link_names:
for idx, visual_name in enumerate(self.link_visuals_map[link_name]):
if visual_name not in visual_names:
continue
SetVisualColorTracker.get_instance().set_visual_color(visual_name=visual_name,
link_name=link_name,
ambient=ambient,
diffuse=diffuse,
specular=specular,
emissive=emissive)
if self.model_randomizer_type == ModelRandomizerType.VISUAL:
# Visual-level random color
color = self._get_random_color()
ambient = color
diffuse = color
if self.model_randomizer_type == ModelRandomizerType.LINK:
# Link-level random color
color = self._get_random_color()
ambient = color
diffuse = color
```
#### File: src/markov/rospy_wrappers.py
```python
import time
import rospy
from markov.log_handler.exception_handler import log_and_exit
from markov.log_handler.constants import (SIMAPP_SIMULATION_WORKER_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_500)
from markov.constants import ROBOMAKER_CANCEL_JOB_WAIT_TIME
class ServiceProxyWrapper(object):
'''This class wraps rospy's ServiceProxy method so that we can wait
5 minutes if a service throws an exception. This is required to prevent
our metrics from being flooded since an exception is thrown by service
calls when the cancel simulation API is called. Because robomaker gives
us no way of knowing whether or not the exception is real or because the
sim app is shutting down we have to wait 5 minutes prior logging the exception
and exiting.
'''
def __init__(self, service_name, object_type, persistent=False):
'''service_name - Name of the service to create a client for
object_type - The object type for making a service request
persistent - flag to whether keep the connection open or not
'''
self.client = rospy.ServiceProxy(service_name, object_type, persistent)
def __call__(self, *argv):
''' Makes a client call for the stored service
argv - Arguments to pass into the client object
'''
try:
return self.client(*argv)
except TypeError as err:
log_and_exit("Invalid arguments for client {}"
.format(err),
SIMAPP_SIMULATION_WORKER_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_500)
except Exception as ex:
time.sleep(ROBOMAKER_CANCEL_JOB_WAIT_TIME)
log_and_exit("Unable to call service {}"
.format(ex),
SIMAPP_SIMULATION_WORKER_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_500)
```
#### File: s3/files/model_metadata.py
```python
import os
import logging
import json
from markov.log_handler.logger import Logger
from markov.log_handler.constants import (SIMAPP_EVENT_ERROR_CODE_500,
SIMAPP_SIMULATION_WORKER_EXCEPTION)
from markov.log_handler.exception_handler import log_and_exit
from markov.architecture.constants import Input, NeuralNetwork
from markov.constants import SIMAPP_VERSION_2, SIMAPP_VERSION_1
from markov.s3.s3_client import S3Client
LOG = Logger(__name__, logging.INFO).get_logger()
class ModelMetadata():
'''model metadata file upload, download, and parse
'''
def __init__(self, bucket, s3_key, region_name="us-east-1",
s3_endpoint_url=None,
local_path="./custom_files/agent/model_metadata.json",
max_retry_attempts=5, backoff_time_sec=1.0):
'''Model metadata upload, download, and parse
Args:
bucket (str): S3 bucket string
s3_key: (str): S3 key string.
region_name (str): S3 region name
local_path (str): file local path
max_retry_attempts (int): maximum number of retry attempts for S3 download/upload
backoff_time_sec (float): backoff second between each retry
'''
# check s3 key and s3 bucket exist
if not bucket or not s3_key:
log_and_exit("model_metadata S3 key or bucket not available for S3. \
bucket: {}, key {}"
.format(bucket, s3_key),
SIMAPP_SIMULATION_WORKER_EXCEPTION,
SIMAPP_EVENT_ERROR_CODE_500)
self._bucket = bucket
# Strip the s3://<bucket> from uri, if s3_key past in as uri
self._s3_key = s3_key.replace('s3://{}/'.format(self._bucket), '')
self._local_path = local_path
self._local_dir = os.path.dirname(self._local_path)
self._model_metadata = None
self._s3_client = S3Client(region_name,
s3_endpoint_url,
max_retry_attempts,
backoff_time_sec)
@property
def local_dir(self):
'''return local dir of model metadata'''
return self._local_dir
@property
def local_path(self):
'''return local path of model metadata'''
return self._local_path
def get_model_metadata_info(self):
'''retrive the model metadata info
Returns:
tuple (str, str, str): string of sensor, network, simapp_version
'''
# download model_metadata.json
if not self._model_metadata:
self._download()
# after successfully download or use default model metadata, then parse
self._model_metadata = self.parse_model_metadata(self._local_path)
return self._model_metadata
def persist(self, s3_kms_extra_args):
'''upload local model_metadata.json into S3 bucket
Args:
s3_kms_extra_args (dict): s3 key management service extra argument
'''
# persist model metadata
# if retry failed, s3_client upload_file will log and exit 500
self._s3_client.upload_file(bucket=self._bucket,
s3_key=self._s3_key,
local_path=self._local_path,
s3_kms_extra_args=s3_kms_extra_args)
def _download(self):
'''download model_metadata.json with retry from s3 bucket'''
# check and make local directory
if self._local_dir and not os.path.exists(self._local_dir):
os.makedirs(self._local_dir)
# download model metadata
# if retry failed, each worker.py and download_params_and_roslaunch_agent.py
# will handle 400 adn 500 separately
self._s3_client.download_file(bucket=self._bucket,
s3_key=self._s3_key,
local_path=self._local_path)
LOG.info("[s3] Successfully downloaded model metadata \
from s3 key {} to local {}.".format(self._s3_key, self._local_path))
@staticmethod
def parse_model_metadata(local_model_metadata_path):
"""parse model metadata give the local path
Args:
local_model_metadata_path (str): local model metadata string
Returns:
tuple (list, str, str): list of sensor, network, simapp_version
"""
try:
with open(local_model_metadata_path, "r") as json_file:
data = json.load(json_file)
# simapp_version 2.0+ should contain version as key in
# model_metadata.json
if 'action_space' not in data:
raise ValueError("no action space defined")
if 'version' in data:
simapp_version = float(data['version'])
if simapp_version >= SIMAPP_VERSION_2:
sensor = data['sensor']
else:
sensor = [Input.OBSERVATION.value]
else:
if 'sensor' in data:
sensor = data['sensor']
simapp_version = SIMAPP_VERSION_2
else:
sensor = [Input.OBSERVATION.value]
simapp_version = SIMAPP_VERSION_1
if 'neural_network' in data:
network = data['neural_network']
else:
network = NeuralNetwork.DEEP_CONVOLUTIONAL_NETWORK_SHALLOW.value
LOG.info("Sensor list %s, network %s, simapp_version %s", sensor, network, simapp_version)
return sensor, network, simapp_version
except ValueError as ex:
raise ValueError('model_metadata ValueError: {}'.format(ex))
except Exception as ex:
raise Exception('Model metadata does not exist: {}'.format(ex))
```
#### File: markov/tests/conftest.py
```python
import pytest
from markov.tests import test_constant
@pytest.fixture
def aws_region():
return test_constant.AWS_REGION
@pytest.fixture
def model_metadata_s3_key():
return test_constant.MODEL_METADATA_S3_KEY
@pytest.fixture
def reward_function_s3_source():
return test_constant.REWARD_FUNCTION_S3_SOURCE
@pytest.fixture
def s3_bucket():
return test_constant.S3_BUCKET
@pytest.fixture
def s3_prefix():
return test_constant.S3_PREFIX
```
#### File: tests/log_handler/test_exception_handler.py
```python
import pytest
import os
import multiprocessing
import json
from markov.log_handler import exception_handler
from markov.log_handler.constants import (SIMAPP_TRAINING_WORKER_EXCEPTION,
SIMAPP_EVENT_SYSTEM_ERROR, SIMAPP_S3_DATA_STORE_EXCEPTION,
SIMAPP_SIMULATION_WORKER_EXCEPTION,
SIMAPP_EVENT_USER_ERROR, SIMAPP_EVENT_ERROR_CODE_500,
SIMAPP_EVENT_ERROR_CODE_400, EXCEPTION_HANDLER_SYNC_FILE)
@pytest.mark.robomaker
@pytest.mark.parametrize("message, exceptionType, eventType, errorCode",
[("Sample Simulation Worker Exception", SIMAPP_SIMULATION_WORKER_EXCEPTION,
SIMAPP_EVENT_SYSTEM_ERROR, SIMAPP_EVENT_ERROR_CODE_500),
("Sample Simulation Worker Exception", SIMAPP_SIMULATION_WORKER_EXCEPTION,
SIMAPP_EVENT_USER_ERROR, SIMAPP_EVENT_ERROR_CODE_400)])
def test_log_and_exit_robomaker(message, exceptionType, eventType, errorCode):
"""The test function checks if the log_and_exit() function from exception_handler.py
once called inside robomaker environment, is able to log the appropriate error message and
abort the entire program.
The log_and_exit is tested using another process since we abort the program
with os exit when we call the function with exit code 1.
The exception stored in the sync file "EXCEPTION_HANDLER_SYNC_FILE"
is parsed to check whether the appropriate message is logged.
The sync file is useful to do this because when we run log_and_exit in multiprocess,
once the program aborts, all information along with the error logged on stderr is lost.
Args:
message: Error message that is to be logged
exceptionType: The exception type
eventType: Whether its a system or user error (test if this is decided properly)
errorCode: Error code (400 or 500)
"""
# Remove any sync file generated because of other tests generating exceptions
if os.path.isfile(EXCEPTION_HANDLER_SYNC_FILE):
os.remove(EXCEPTION_HANDLER_SYNC_FILE)
proc = multiprocessing.Process(target=exception_handler.log_and_exit,
args=(message, exceptionType, errorCode))
proc.start()
proc.join()
assert os.path.isfile(EXCEPTION_HANDLER_SYNC_FILE)
try:
with open(EXCEPTION_HANDLER_SYNC_FILE, 'r') as sync_file:
captured_log = json.loads(sync_file.read())
finally:
# Remove the sync file for next test
os.remove(EXCEPTION_HANDLER_SYNC_FILE)
assert not proc.is_alive()
assert proc.exitcode == 1
assert captured_log['simapp_exception']['message'] == message
assert captured_log['simapp_exception']['exceptionType'] == exceptionType
assert captured_log['simapp_exception']['eventType'] == eventType
assert captured_log['simapp_exception']['errorCode'] == errorCode
@pytest.mark.sagemaker
@pytest.mark.parametrize("message, exceptionType, eventType, errorCode",
[("Sample Training Worker Exception", SIMAPP_TRAINING_WORKER_EXCEPTION,
SIMAPP_EVENT_SYSTEM_ERROR, SIMAPP_EVENT_ERROR_CODE_500),
("Sample Training Worker Exception", SIMAPP_TRAINING_WORKER_EXCEPTION,
SIMAPP_EVENT_USER_ERROR, SIMAPP_EVENT_ERROR_CODE_400)])
def test_log_and_exit_sagemaker(message, exceptionType, eventType, errorCode):
"""The test function checks if the log_and_exit() function from exception_handler.py
once called inside sagemaker environment, is able to log the appropriate error message and
abort the entire program.
The log_and_exit is tested using another process since we abort the program
with os exit when we call the function with exit code 1.
The exception stored in the sync file "EXCEPTION_HANDLER_SYNC_FILE"
is parsed to check whether the appropriate message is logged.
The sync file is useful to do this because when we run log_and_exit in multiprocess,
once the program aborts, all information along with the error logged on stderr is lost.
Args:
message: Error message that is to be logged
exceptionType: The exception type
eventType: Whether its a system or user error (test if this is decided properly)
errorCode: Error code (400 or 500)
"""
# Remove any sync file generated because of other tests generating exceptions
if os.path.isfile(EXCEPTION_HANDLER_SYNC_FILE):
os.remove(EXCEPTION_HANDLER_SYNC_FILE)
proc = multiprocessing.Process(target=exception_handler.log_and_exit,
args=(message, exceptionType, errorCode))
proc.start()
proc.join()
assert os.path.isfile(EXCEPTION_HANDLER_SYNC_FILE)
try:
with open(EXCEPTION_HANDLER_SYNC_FILE, 'r') as sync_file:
captured_log = json.loads(sync_file.read())
finally:
# Remove the sync file for next test
os.remove(EXCEPTION_HANDLER_SYNC_FILE)
assert not proc.is_alive()
assert proc.exitcode == 1
assert captured_log['simapp_exception']['message'] == message
assert captured_log['simapp_exception']['exceptionType'] == exceptionType
assert captured_log['simapp_exception']['eventType'] == eventType
assert captured_log['simapp_exception']['errorCode'] == errorCode
@pytest.mark.robomaker
@pytest.mark.sagemaker
def test_log_and_exit_multiple():
"""The test function checks if multiple exceptions are thrown, only the first exception
thrown should get logged.
"""
# Remove any sync file generated because of other tests generating exceptions
if os.path.isfile(EXCEPTION_HANDLER_SYNC_FILE):
os.remove(EXCEPTION_HANDLER_SYNC_FILE)
message1 = "Sample DataStore Exception 1"
exceptionType1 = SIMAPP_S3_DATA_STORE_EXCEPTION
eventType1 = SIMAPP_EVENT_SYSTEM_ERROR
errorCode1 = SIMAPP_EVENT_ERROR_CODE_500
# Throwing the first exception and logging it
proc1 = multiprocessing.Process(target=exception_handler.log_and_exit,
args=(message1, exceptionType1, errorCode1))
proc1.start()
proc1.join()
assert os.path.isfile(EXCEPTION_HANDLER_SYNC_FILE)
with open(EXCEPTION_HANDLER_SYNC_FILE, 'r') as sync_file:
captured_log = json.loads(sync_file.read())
assert not proc1.is_alive()
assert proc1.exitcode == 1
assert captured_log['simapp_exception']['message'] == message1
assert captured_log['simapp_exception']['exceptionType'] == exceptionType1
assert captured_log['simapp_exception']['eventType'] == eventType1
assert captured_log['simapp_exception']['errorCode'] == errorCode1
# Throwing the second exception without removing the sync file
# The error shouldn't be logged, instead direct SIMAPP exit
message2 = "Sample DataStore Exception 2"
exceptionType2 = SIMAPP_S3_DATA_STORE_EXCEPTION
eventType2 = SIMAPP_EVENT_SYSTEM_ERROR
errorCode2 = SIMAPP_EVENT_ERROR_CODE_400
proc2 = multiprocessing.Process(target=exception_handler.log_and_exit,
args=(message2, exceptionType2, errorCode2))
proc2.start()
proc2.join()
assert os.path.isfile(EXCEPTION_HANDLER_SYNC_FILE)
try:
with open(EXCEPTION_HANDLER_SYNC_FILE, 'r') as sync_file:
captured_log = json.loads(sync_file.read())
finally:
# Remove the sync file
os.remove(EXCEPTION_HANDLER_SYNC_FILE)
assert not proc2.is_alive()
assert proc2.exitcode == 1
assert captured_log['simapp_exception']['message'] == message1
assert captured_log['simapp_exception']['exceptionType'] == exceptionType1
assert captured_log['simapp_exception']['eventType'] == eventType1
assert captured_log['simapp_exception']['errorCode'] == errorCode1
@pytest.mark.robomaker
@pytest.mark.sagemaker
@pytest.mark.parametrize("message, fault_code",
[("User modified ckpt, unrecoverable dataloss or corruption:", "61"),
("Unseen error while testing", "0")])
def test_get_fault_code_for_error(message, fault_code):
"""The test function checks if get_fault_code_for_error() in exception_handler.py appropriately
matches the error message with the fault_code from FAULT_MAP.
In case of an unmapped exception, it should provide the fault_code 0
Args:
message: The error message generated
fault_code: Corresponding fault_code
"""
assert exception_handler.get_fault_code_for_error(message) == fault_code
@pytest.mark.robomaker
@pytest.mark.sagemaker
def test_simapp_exit_gracefully():
"""This function tests if the simapp_exit_gracefully() function in exception_handler.py
exits and aborts the program.
The simapp_exit_gracefully is tested using another process since we abort the program
with os exit when we call the function with exit code 1.
"""
proc = multiprocessing.Process(target=exception_handler.simapp_exit_gracefully)
proc.start()
proc.join()
assert not proc.is_alive()
assert proc.exitcode == 1
```
#### File: markov/tests/test_training_worker.py
```python
import pytest
@pytest.mark.sagemaker
def test_training_worker():
assert 1 == 1
```
#### File: sagemaker/src/train_gameserver_ppo.py
```python
import json
import os
import sys
import gym
import ray
from ray.tune import run_experiments
from ray.tune.registry import register_env
from sagemaker_rl.ray_launcher import SageMakerRayLauncher
env_config={}
class MyLauncher(SageMakerRayLauncher):
def register_env_creator(self):
from gameserver_env import GameServerEnv
register_env("GameServers", lambda env_config: GameServerEnv(env_config))
def _save_tf_model(self):
print("in _save_tf_model")
ckpt_dir = '/opt/ml/output/data/checkpoint'
model_dir = '/opt/ml/model'
# Re-Initialize from the checkpoint so that you will have the latest models up.
tf.train.init_from_checkpoint(ckpt_dir,
{'main_level/agent/online/network_0/': 'main_level/agent/online/network_0'})
tf.train.init_from_checkpoint(ckpt_dir,
{'main_level/agent/online/network_1/': 'main_level/agent/online/network_1'})
# Create a new session with a new tf graph.
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
sess.run(tf.global_variables_initializer()) # initialize the checkpoint.
# This is the node that will accept the input.
input_nodes = tf.get_default_graph().get_tensor_by_name('main_level/agent/main/online/' + \
'network_0/observation/observation:0')
# This is the node that will produce the output.
output_nodes = tf.get_default_graph().get_operation_by_name('main_level/agent/main/online/' + \
'network_1/ppo_head_0/policy_mean/BiasAdd')
# Save the model as a servable model.
tf.saved_model.simple_save(session=sess,
export_dir='model',
inputs={"observation": input_nodes},
outputs={"policy": output_nodes.outputs[0]})
# Move to the appropriate folder.
shutil.move('model/', model_dir + '/model/tf-model/00000001/')
# SageMaker will pick it up and upload to the right path.
print("in _save_tf_model Success")
def get_experiment_config(self):
print('get_experiment_config')
print(env_config)
# allowing 1600 seconds to the job toto stop and save the model
time_total_s=float(env_config["time_total_s"])-4600
print("time_total_s="+str(time_total_s))
return {
"training": {
"env": "GameServers",
"run": "PPO",
"stop": {
"time_total_s": time_total_s
},
"config": {
"ignore_worker_failures": True,
"gamma": 0,
"kl_coeff": 1.0,
"num_sgd_iter": 10,
"lr": 0.0001,
"sgd_minibatch_size": 32,
"train_batch_size": 128,
"model": {
# "free_log_std": True,
# "fcnet_hiddens": [512, 512],
},
"use_gae": True,
#"num_workers": (self.num_cpus-1),
"num_gpus": self.num_gpus,
#"batch_mode": "complete_episodes",
"num_workers": 1,
"env_config": env_config,
#'observation_filter': 'MeanStdFilter',
}
}
}
if __name__ == "__main__":
for i in range(len(sys.argv)):
if i==0:
continue
if i % 2 > 0:
env_config[sys.argv[i].split('--',1)[1]]=sys.argv[i+1]
MyLauncher().train_main()
```
#### File: eplus/envs/data_center_env.py
```python
import gym
from gym import error, spaces, utils
from gym.utils import seeding
from eplus.envs import pyEp
import socket
from eplus.envs.socket_builder import socket_builder
import numpy as np
import os
class DataCenterEnv(gym.Env):
def __init__(self, config):
#timestep=12, days=1, eplus_path=None,
# weather_file = 'weather/SPtMasterTable_587017_2012_amy.epw'):
cur_dir = os.path.dirname(__file__)
#print("File directory: ", cur_dir)
# buildings/1ZoneDataCenter/1ZoneDataCenter.idf is the EnergyPlus file
# used for this environment. The 1ZoneDataCenter folder also contains
# variables.cfg which configures the external input and output
# variables
self.idf_file = cur_dir + '/buildings/1ZoneDataCenter/1ZoneDataCenter.idf'
# EnergyPlus weather file
if "weather_file" in config:
self.weather_file = cur_dir + '/' + config["weather_file"]
else:
self.weather_file = cur_dir + '/weather/SPtMasterTable_587017_2012_amy.epw'
#self.weather_file = cur_dir + '/weather/SPtMasterTable_587017_2012_amy.epw'
if "eplus_path" in config:
self.eplus_path = config["eplus_path"]
else:
# Using EnergyPlus version 8.80, path to the executable
# Assuming Mac
self.eplus_path = '/Applications/EnergyPlus-8-8-0/'
# EnergyPlus number of timesteps in an hour
if "timestep" in config:
self.epTimeStep = config["timestep"]
else:
self.epTimeStep = 12
# EnergyPlus number of simulation days
if "days" in config:
self.simDays = config["days"]
else:
self.simDays = 1
# Number of steps per day
self.DAYSTEPS = int(24 * self.epTimeStep)
# Total number of steps
self.MAXSTEPS = int(self.simDays * self.DAYSTEPS)
# Time difference between each step in seconds
self.deltaT = (60/self.epTimeStep)*60
# Outputs given by EnergyPlus, defined in variables.cfg
self.outputs = []
# Inputs expected by EnergyPlus, defined in variables.cfg
self.inputs = []
# Current step of the simulation
self.kStep = 0
# Instance of EnergyPlus simulation
self.ep = None
# state can be all the inputs required to make a control decision
# getting all the outputs coming from EnergyPlus for the time being
self.observation_space = spaces.Box(np.array([0, -50, 0]), #zone temp, outdoor drybulb temp, relative humidity
np.array([60, 70, 100]), dtype=np.float32)
# actions are all the control inputs
#self.action_space = spaces.Tuple(( #spaces.Box(low=22, high=27, shape=(1,),dtype=np.float32), #cooling setpoint
# spaces.Box(low=6, high=7, shape=(1,),dtype=np.float32), #chiller setpoint
# spaces.Box(low=0, high=1, shape=(1,),dtype=np.float32) #lighting setpoint
# ))
self.clg_min = 20 #cooling setpoint min in celcius
self.clg_max = 35 #cooling setpoint max in celcius
self.htg_min = 5 #heating setpoint min in celcius
self.htg_max = 20 #heating setpoint max in celcius
#self.action_space = spaces.Box(np.array([self.clg_min,self.htg_min]),
# np.array([self.clg_max, self.htg_max]), dtype=np.float32)
# Normalized action space
self.action_space = spaces.Box(np.array([0,0]),
np.array([1,1]), dtype=np.float32)
def step(self, action):
# while(self.kStep < self.MAXSTEPS):
# current time from start of simulation
time = self.kStep * self.deltaT
# current time from start of day
dayTime = time % 86400
if dayTime == 0:
print("Day: ", int(self.kStep/self.DAYSTEPS)+1)
#inputs should be same as actions
#bring the actions in the correct range
#For Ray: assuming mean 0 and std dev 1 by ray
#action[0] = action[0]*(self.clg_max - self.clg_min)+(self.clg_min+self.clg_max)/2.0
#action[1] = action[1]*(self.htg_max - self.htg_min)+(self.htg_min+self.htg_max)/2.0
#For Coach: input is 0 to 1 range
action[0] = action[0]*(self.clg_max - self.clg_min)+(self.clg_min)
action[1] = action[1]*(self.htg_max - self.htg_min)+(self.htg_min)
#force action to be within limits
cooling_setpoint = np.clip(action, self.clg_min, self.clg_max)[0]
heating_setpoint = np.clip(action, self.htg_min, self.htg_max)[1]
self.inputs = [cooling_setpoint, heating_setpoint]
input_packet = self.ep.encode_packet_simple(self.inputs, time)
self.ep.write(input_packet)
#after EnergyPlus runs the simulation step, it returns the outputs
output_packet = self.ep.read()
self.outputs = self.ep.decode_packet_simple(output_packet)
#print("Outputs:", self.outputs)
if not self.outputs:
print("Outputs:", self.outputs)
print("Actions:", action)
next_state = self.reset()
return next_state, 0, False, {}
# reward needs to be a combination of energy and comfort requirement
energy_coeff = -0.00001
heating_coeff = -100
cooling_coeff = -100
energy = self.outputs[0]
zone_temperature = self.outputs[1] #taking mid-zone 2 as an example
heating_setpoint = 15 #fixed lower limit in celcius
cooling_setpoint = 30 #fixed upper limit in celcius
heating_penalty = max(heating_setpoint - zone_temperature, 0)
cooling_penalty = max(zone_temperature - cooling_setpoint, 0)
# punish if action is out of limits
action_penalty_coeff = -100
max_penalty = max(self.clg_min - action[0], 0)
min_penalty = max(action[0] - self.clg_max, 0)
action_penalty = action_penalty_coeff * (max_penalty + min_penalty)
max_penalty = max(self.htg_min - action[1], 0)
min_penalty = max(action[1] - self.htg_max, 0)
action_penalty += action_penalty_coeff * (max_penalty + min_penalty)
# final reward
reward = energy_coeff * energy \
+ heating_coeff * heating_penalty \
+ cooling_coeff * cooling_penalty \
+ action_penalty
# state can be all the inputs required to make a control decision
# zone temp, outside drybulb temp, outside wetbulb temp, relative humidity
next_state = np.array([self.outputs[1], self.outputs[2], self.outputs[4]])
# fake state space
#next_state = np.array([3, 2, 1, 0])
#print("energy: %.2f, reward: %.2f, action: %.2f, %.2f" \
# % (energy, reward, action[0], action[1]))
#print("zone temp: %.2f, drybulb: %.2f, humidity: %.2f"\
# %tuple(next_state))
# increment simulation step count
self.kStep += 1
# done when number of steps of simulation reaches its maximum (e.g. 1 day)
done = False
if self.kStep >= (self.MAXSTEPS):
#requires one more step to close the simulation
input_packet = self.ep.encode_packet_simple(self.inputs, time)
self.ep.write(input_packet)
#output is empty in the final step
#but it is required to read this output for termination
output_packet = self.ep.read()
last_output = self.ep.decode_packet_simple(output_packet)
print("Finished simulation")
print("Last action: ", action)
print("Last reward: ", reward)
done = True
self.ep.close()
self.ep = None
# extra information we want to pass
info = {}
# print("State:", next_state, "Reward:", reward)
return next_state, reward, done, info
def reset(self):
# stop existing energyplus simulation
if self.ep:
print("Closing the old simulation and socket.")
self.ep.close() #needs testing: check if it stops the simulation
self.ep = None
# start new simulation
print("Starting a new simulation..")
self.kStep = 0
idf_dir = os.path.dirname(self.idf_file)
builder = socket_builder(idf_dir)
configs = builder.build()
self.ep = pyEp.ep_process('localhost', configs[0], self.idf_file, self.weather_file, self.eplus_path)
# read the initial outputs from EnergyPlus
# these outputs are from warmup phase, so this does not count as a simulation step
self.outputs = self.ep.decode_packet_simple(self.ep.read())
return np.array([self.outputs[1], self.outputs[2], self.outputs[4]])
#return np.array([3,2,1,0])
def render(self, mode='human', close=False):
pass
```
#### File: rl_knapsack_coach_custom/src/item.py
```python
import numpy as np
class Item:
def __init__(self, weight=0, volume=0, value=0):
self.weight = weight
self.volume = volume
self.value = value
@staticmethod
def get_random_item(max_value, max_weight, max_volume=None):
weight = np.random.randint(1, max_weight // 10)
if max_volume:
volume = np.random.randint(1, max_volume // 10)
else:
volume = 0
value = np.random.randint(1, max_value)
return Item(weight, volume, value)
```
#### File: compressor/core/fake_ops.py
```python
class Fake(object):
def __init__(self, shape):
self.shape = shape
```
#### File: compressor/core/model.py
```python
import tensorflow as tf
class Model(object):
def __init__ (self, name, dir, num_layers, params,
scope=''):
self.name = name
self.num_layers = num_layers
self.dir = dir
self.scope = scope
self.params = params
def set_name(name):
self.params['name'] = name
```
#### File: compressor/layers/conv.py
```python
import tensorflow as tf
import numpy as np
import logging
from ..core import Layer, Fake
from .ops import get_param_from_name, load_pkl_obj
def _fixed_padding(inputs, kernel_size, data_format):
"""Pads the input along the spatial dimensions independently of input size.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
Should be a positive integer.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
A tensor with the same format as the input with the data either intact
(if kernel_size == 1) or padded (if kernel_size > 1).
"""
pad_total = kernel_size - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
if data_format == 'channels_first':
padded_inputs = tf.pad(inputs, [[0, 0], [0, 0],
[pad_beg, pad_end], [pad_beg, pad_end]])
else:
padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],
[pad_beg, pad_end], [0, 0]])
return padded_inputs
class Conv2DFixedPadding(Layer):
""" Definition for a convolution layer.
Strided 2-D convolution with explicit padding.
Args:
Args:
name: name of the module.
inputs: the input symbol.
start: layer number for begining.
end: layer number for ending.
weights: If need to initialize with some parameters, supply the numpy pickle path.
We will read the exact same layer name.
data_format: typically 'channel_first'
kernel_size: Size of each filter. Typically 3.
filterS: Number of output channels required.
strides: Stride of convolution.
"""
def __init__ (self, name, inputs, filters, kernel_size, strides, data_format, start=None,
end=None, weights=None, weight_scope=None, fake=False):
super(Conv2DFixedPadding, self).__init__(name = name, start=start, end=end)
self.fake = fake
if not self.fake:
if weights is not None:
params_name = weight_scope + '/' + str(name) + '/conv2d/'
np_dict = load_pkl_obj(weights)
kernel_np = np_dict[params_name+'kernel:0']
in_shp = inputs.shape.as_list()[1]
if not kernel_np.shape[2] == in_shp:
kernel_np = np.resize(kernel_np, (kernel_size, kernel_size, in_shp, filters))
kernel_initializer = tf.constant_initializer(kernel_np)
else:
kernel_initializer = tf.variance_scaling_initializer()
with tf.variable_scope(self._name):
if strides > 1:
inputs = _fixed_padding(inputs, kernel_size, data_format)
self.output=tf.layers.conv2d(inputs=inputs,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=('SAME' if strides == 1 else 'VALID'),
use_bias=False,
kernel_initializer=kernel_initializer,
data_format=data_format)
self._tf_name = self.output.name.split('/')[0] + '/' + self.output.name.split('/')[1]
else:
assert isinstance(inputs, Fake)
in_shp = inputs.shape
param_shp = (kernel_size, kernel_size, in_shp[1], filters)
self.param = Fake(param_shp)
if kernel_size > 1:
out_height = int(np.floor(((in_shp[2] - kernel_size + 2)/strides )) + 1)
out_width = int(np.floor(((in_shp[3] - kernel_size + 2)/strides )) + 1)
else:
out_height = in_shp[2] / strides
out_width = in_shp[3] / strides
out_shp = (None, filters, out_height, out_width)
self.output = Fake(out_shp)
self._tf_name = 'fake'
self.description.append('Conv')
self.description.append(filters)
self.description.append(kernel_size)
self.description.append(strides)
self.description.append(('SAME' if strides == 1 else 'VALID'))
self.description.append(self.get_memory_footprint())
def _get_params_real(self):
""" Returns the kernel node """
return {self.get_name(): get_param_from_name(self._tf_name + '/conv2d/kernel:0')}
def _get_memory_footprint_real(self):
""" Number of parameters in the layer """
params = self.get_params()[self.get_name()]
return int(np.prod(params.shape))
def _get_params_fake(self):
return self.param
def _get_memory_footprint_fake(self):
return int(np.prod(self.get_params().shape))
def get_memory_footprint(self):
if self.fake:
return self._get_memory_footprint_fake()
else:
return self._get_memory_footprint_real()
def get_params(self):
if self.fake:
return self._get_params_fake()
else:
return self._get_params_real()
```
#### File: compressor/layers/descriptions.py
```python
class LayerState:
"""
This class has a lot of static methods that basically convert the layer descriptions into one
that is readable by the NetworkCompression system. These convert everything to integers. The last element of
this description is always the memory footprint
"""
LAYER_STATE_LENGTH = 8 # length of the layer description.
LAYER_IDS = { # each layer gets its own integer in description.
'Conv': 1,
'Dense': 2,
'ReLU': 3,
'BatchNorm': 4,
'Pool': 5
}
@staticmethod
def desc2state(desc): # Global method for converting description to state.
if desc[2] == 'Conv':
return LayerState.conv_state(desc)
elif desc[2] == 'Dense':
return LayerState.dense_state(desc)
elif desc[2] == 'ReLU':
return LayerState.relu_state(desc)
elif desc[2] == 'BatchNorm':
return LayerState.bn_state(desc)
elif desc[2] == 'Pool':
return LayerState.pool_state(desc)
@staticmethod
def conv_state(desc):
state = [0]*LayerState.LAYER_STATE_LENGTH
state[0] = desc[0] # start
state[1] = desc[1] # end
state[2] = LayerState.LAYER_IDS[desc[2]] # Layer id.
state[3] = desc[3] # filters
state[4] = desc[4] # kernel Size
state[5] = desc[5] # strides
if desc[6] == 'SAME':
state[6] = 1
else:
state[6] = 0
state[7] = desc[7] # memory footprint
return state
@staticmethod
def dense_state(desc):
state = [0]*LayerState.LAYER_STATE_LENGTH
state[0] = desc[0] # start
state[1] = desc[1] # end
state[2] = LayerState.LAYER_IDS[desc[2]] # Layer id.
state[3] = desc[3] #num_units
state[7] = desc[4] # memory footprint
return state
@staticmethod
def bn_state(desc):
state = [0]*LayerState.LAYER_STATE_LENGTH
state[0] = desc[0] # start
state[1] = desc[1] # end
state[2] = LayerState.LAYER_IDS[desc[2]] # Layer id.
state[7] = desc[3] # memory footprint
return state
@staticmethod
def relu_state(desc):
state = [0]*LayerState.LAYER_STATE_LENGTH
state[0] = desc[0] # start
state[1] = desc[1] # end
state[2] = LayerState.LAYER_IDS[desc[2]] # Layer id.
state[7] = desc[3] # memory footprint
return state
@staticmethod
def pool_state(desc):
state = [0]*LayerState.LAYER_STATE_LENGTH
state[0] = desc[0] # start
state[1] = desc[1] # end
state[2] = LayerState.LAYER_IDS[desc[2]] # Layer id.
state[3] = desc[3] # Pool size
state[4] = desc[4] # Strides
state[5] = desc[5] # padding
state[7] = desc[6] # memory footprint
return state
```
#### File: rl_predictive_autoscaling_coach_customEnv/src/train-coach.py
```python
from sagemaker_rl.coach_launcher import SageMakerCoachPresetLauncher
import tensorflow as tf
import shutil
class MyLauncher(SageMakerCoachPresetLauncher):
def default_preset_name(self):
"""This points to a .py file that configures everything about the RL job.
It can be overridden at runtime by specifying the RLCOACH_PRESET hyperparameter.
"""
return 'preset-autoscale-ppo'
def map_hyperparameter(self, name, value):
"""Here we configure some shortcut names for hyperparameters that we expect to use frequently.
Essentially anything in the preset file can be overridden through a hyperparameter with a name
like "rl.agent_params.algorithm.etc".
"""
if name == "warmup_latency":
return self.apply_hyperparameter("rl.env_params.additional_simulator_parameters.warmup_latency", value)
if name == "discount":
return self.apply_hyperparameter("rl.agent_params.algorithm.discount", value)
if name == "online_to_target_steps":
return self.apply_hyperparameter("rl.agent_params.algorithm.num_steps_between_copying_online_weights_to_target:EnvironmentSteps", value)
if name == "eval_period":
return self.apply_hyperparameter("rl.steps_between_evaluation_periods:EnvironmentSteps", value)
super().map_hyperparameter(name,value)
def _save_tf_model(self):
ckpt_dir = '/opt/ml/output/data/checkpoint'
model_dir = '/opt/ml/model'
# Re-Initialize from the checkpoint so that you will have the latest models up.
tf.train.init_from_checkpoint(ckpt_dir,
{'main_level/agent/online/network_0/': 'main_level/agent/online/network_0'})
tf.train.init_from_checkpoint(ckpt_dir,
{'main_level/agent/online/network_1/': 'main_level/agent/online/network_1'})
# Create a new session with a new tf graph.
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
sess.run(tf.global_variables_initializer()) # initialize the checkpoint.
# This is the node that will accept the input.
input_nodes = tf.get_default_graph().get_tensor_by_name('main_level/agent/main/online/' + \
'network_0/observation/observation:0')
# This is the node that will produce the output.
output_nodes = tf.get_default_graph().get_operation_by_name('main_level/agent/main/online/' + \
'network_1/ppo_head_0/policy_mean/BiasAdd')
# Save the model as a servable model.
tf.saved_model.simple_save(session=sess,
export_dir='model',
inputs={"observation": input_nodes},
outputs={"policy": output_nodes.outputs[0]})
# Move to the appropriate folder.
shutil.move('model/', model_dir + '/model/tf-model/00000001/')
# SageMaker will pick it up and upload to the right path.
print("Success")
if __name__ == '__main__':
MyLauncher.train_main()
```
#### File: rl_resource_allocation_ray_customEnv/src/train_bin_packing.py
```python
from ray.tune.registry import register_env
from model import register_actor_mask_model
from sagemaker_rl.ray_launcher import SageMakerRayLauncher
register_actor_mask_model()
class MyLauncher(SageMakerRayLauncher):
def register_env_creator(self):
from bin_packing_env import BinPackingActionMaskGymEnvironment
register_env("BinPackingActionMaskGymEnvironment-v1",
lambda env_config: BinPackingActionMaskGymEnvironment(env_config))
def get_experiment_config(self):
multi = 1
return {
"training": {
"env": "BinPackingActionMaskGymEnvironment-v1",
"run": "PPO",
"config": {
"gamma": 0.995,
"kl_coeff": 1.0,
"num_sgd_iter": 10,
"lr": 0.0001,
"sgd_minibatch_size": 32768,
"train_batch_size": 320000,
"use_gae": False,
"num_workers": (self.num_cpus - 1),
"num_gpus": self.num_gpus,
"batch_mode": "complete_episodes",
"env_config": {
'bag_capacity': 9 * multi,
'item_sizes': [2 * multi, 3 * multi],
'item_probabilities': [0.75, 0.25], # perfect pack -> SS: -20 to -100
# 'item_probabilities': [0.5, 0.5], #bounded waste -> SS: -11 to -20
# 'item_probabilities': [0.8, 0.2], #linear waste -> SS: -150 to -340
'time_horizon': 10000,
},
"model": {
"custom_model": "action_mask",
"fcnet_hiddens": [256, 256],
},
"ignore_worker_failures": True,
"entropy_coeff": 0.01
},
"checkpoint_freq": 1 # make sure at least one checkpoint is saved
}
}
if __name__ == "__main__":
MyLauncher().train_main()
```
#### File: rl_roboschool_stable_baselines/src/train_stable_baselines.py
```python
import argparse
from sagemaker_rl.mpi_launcher import MPILauncher
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--RLSTABLEBASELINES_PRESET', required=True, type=str)
parser.add_argument('--output_path', default="/opt/ml/output/intermediate/", type=str)
parser.add_argument('--instance_type', type=str)
return parser.parse_known_args()
if __name__ == "__main__":
args, unknown_args = parse_args()
print("Launching train script with MPI: {} and arguments: {}".format(args.RLSTABLEBASELINES_PRESET,
str(unknown_args)))
MPILauncher(train_script=args.RLSTABLEBASELINES_PRESET, train_script_args=unknown_args,
instance_type=args.instance_type).mpi_run()
```
#### File: rl_tic_tac_toe_coach_customEnv/src/tic_tac_toe.py
```python
import gym
from gym import spaces
import numpy as np
import os
import time
class TicTacToeEnv(gym.Env):
def __init__(self, opponent='moderate'):
self.opponent = opponent
self.episode = 0
self.observation_space = spaces.Box(low=-1, high=1, shape=(9, ), dtype=np.int)
self.action_space = spaces.Discrete(9)
def reset(self):
self.episode += 1
self.total_reward = 0
self.occupied = 0
self.board = np.zeros((3, 3))
state = self.board.flatten()
return state
def step(self, action):
# Convert action into board position
row = action // 3
col = action % 3
# If agent picks an occupied space repeated end the game and give a penalty
# Otherwise, give a small penalty and try again
if self.board[row, col] != 0:
self.occupied += 1
if self.occupied > 10:
reward = -1
self.total_reward += reward
self.save_board(self.total_reward)
return self.board.flatten(), reward, True, {'reward': reward}
else:
reward = -0.1
self.total_reward += reward
return self.board.flatten(), reward, False, {'reward': reward}
else:
self.occupied = 0
# Otherwise agent actions action updates the board and check for a win
self.board[row, col] = 1
if check_win(self.board) == 1:
reward = 1
self.total_reward += reward
self.save_board(self.total_reward)
return self.board.flatten(), reward, True, {'reward': reward}
# Check if last move
if (self.board != 0).all():
reward = 0
self.total_reward += reward
self.save_board(self.total_reward)
return self.board.flatten(), reward, True , {'reward': reward}
# If not then opponent moves
else:
self.move_opponent()
if check_win(self.board) == -1:
reward = -1
self.total_reward += reward
self.save_board(self.total_reward)
return self.board.flatten(), reward, True, {'reward': reward}
return self.board.flatten(), 0, False, {'reward': 0}
def move_opponent(self):
if self.opponent == 'random':
options = np.argwhere(self.board == 0)
idx = np.random.randint(options.shape[0])
self.board[tuple(options[idx])] = -1
elif self.opponent == 'moderate':
move = None
options = np.argwhere(self.board == 0)
if np.random.rand() < 0.1:
idx = np.random.randint(options.shape[0])
self.board[tuple(options[idx])] = -1
else:
# Check if there's a next move that could win
for o in options:
board = self.board.copy()
board[tuple(o)] = -1
if check_win(board) == -1:
move = tuple(o)
break
# Otherwise check for a block
if not move:
for o in options:
board = self.board.copy()
board[tuple(o)] = 1
if check_win(board) == 1:
move = tuple(o)
break
# Otherwise, take a random option
if not move:
idx = np.random.randint(options.shape[0])
move = tuple(options[idx])
self.board[move] = -1
def save_board(self, reward, path='/opt/ml/output/data/'):
np.save(os.path.join(path, 'episode_{}_reward_{}.npy'.format(self.episode, reward)),
self.board)
def check_win(board):
v = board.sum(axis=0)
h = board.sum(axis=1)
dd = board[0, 0] + board[1, 1] + board[2, 2]
du = board[2, 0] + board[1, 1] + board[0, 2]
if max(v.max(), h.max()) == 3 or dd == 3 or du == 3:
return 1
elif min(v.min(), h.min()) == -3 or dd == -3 or du == -3:
return -1
else:
return 0
```
#### File: reinforcement_learning/rl_tic_tac_toe_coach_customEnv/tic_tac_toe_game.py
```python
from ipywidgets import widgets, HBox, VBox, Layout
from IPython.display import display
from functools import partial
import numpy as np
class TicTacToeGame(object):
'''
Tic-tac-toe game within a Jupyter Notebook
Opponent is Xs and starts the game.
This is assumed to be a predictor object from a SageMaker RL trained agent
'''
def __init__(self, agent):
self.board = np.zeros((3, 3))
self.game_over = False
self.turn = 'X'
self.agent = agent
def start(self):
self.board = np.zeros((3, 3))
self.game_over = False
self.turn = 'X'
self.draw_board()
self.move_agent()
def mark_board(self):
Xs = np.argwhere(self.board == 1)
for X in Xs:
self.spaces[X[0] * 3 + X[1]].description = 'X'
Os = np.argwhere(self.board == -1)
for O in Os:
self.spaces[O[0] * 3 + O[1]].description = 'O'
def click_space(self, action, space):
row = action // 3
col = action % 3
if self.game_over:
return
if self.board[row, col] != 0:
self.text_box.value = 'Invalid'
return
if self.turn == 'O':
self.board[row, col] = -1
self.mark_board()
if check_win(self.board) == -1:
self.text_box.value = 'Os Win'
self.game_over = True
else:
self.turn = 'X'
self.text_box.value = 'Xs Turn'
self.move_agent()
def draw_board(self):
self.text_box = widgets.Text(value='Xs Turn', layout=Layout(width='100px', height='50px'))
self.spaces = []
for i in range(9):
space = widgets.Button(description='',
disabled=False,
button_style='',
tooltip='Click to make move',
icon='',
layout=Layout(width='75px', height='75px'))
self.spaces.append(space)
space.on_click(partial(self.click_space, i))
board = VBox([HBox([self.spaces[0], self.spaces[1], self.spaces[2]]),
HBox([self.spaces[3], self.spaces[4], self.spaces[5]]),
HBox([self.spaces[6], self.spaces[7], self.spaces[8]])])
display(VBox([board, self.text_box]))
return
def move_agent(self):
if self.game_over:
return
if self.turn == 'X':
# Take the first empty space with the highest preference from the agent
for action in np.argsort(-np.array(self.agent.predict(self.board.flatten())[1][0])):
row = action // 3
col = action % 3
if self.board[row, col] == 0:
self.board[action // 3, action % 3] = 1
break
self.mark_board()
if check_win(self.board) == 1:
self.text_box.value = 'Xs Win'
self.game_over = True
elif (self.board != 0).all():
self.text_box.value = 'Draw'
else:
self.turn = 'O'
self.text_box.value = 'Os Turn'
def check_win(board):
v = board.sum(axis=0)
h = board.sum(axis=1)
dd = board[0, 0] + board[1, 1] + board[2, 2]
du = board[2, 0] + board[1, 1] + board[0, 2]
if max(v.max(), h.max()) == 3 or dd == 3 or du == 3:
return 1
elif min(v.min(), h.min()) == -3 or dd == -3 or du == -3:
return -1
else:
return 0
```
#### File: rl_unity_ray/src/evaluate-unity.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import os
import subprocess
import numpy as np
import os
import gym
from gym import wrappers
import ray
from ray.rllib.models import ModelCatalog
from ray.tune.registry import register_env
from mlagents_envs.environment import UnityEnvironment
from mlagents_envs.exception import UnityWorkerInUseException
from mlagents_envs.registry import default_registry
from gym_unity.envs import UnityToGymWrapper
OUTPUT_DIR = "/opt/ml/output/intermediate"
class UnityEnvWrapper(gym.Env):
def __init__(self, env_config):
self.worker_index = 0
if 'SM_CHANNEL_TRAIN' in os.environ:
env_name = os.environ['SM_CHANNEL_TRAIN'] +'/'+ env_config['env_name']
os.chmod(env_name, 0o755)
print("Changed environment binary into executable mode.")
# Try connecting to the Unity3D game instance.
while True:
try:
unity_env = UnityEnvironment(
env_name,
no_graphics=True,
worker_id=self.worker_index,
additional_args=['-logFile', 'unity.log'])
except UnityWorkerInUseException:
self.worker_index += 1
else:
break
else:
env_name = env_config['env_name']
while True:
try:
unity_env = default_registry[env_name].make(
no_graphics=True,
worker_id=self.worker_index,
additional_args=['-logFile', 'unity.log'])
except UnityWorkerInUseException:
self.worker_index += 1
else:
break
self.env = UnityToGymWrapper(unity_env)
self.action_space = self.env.action_space
self.observation_space = self.env.observation_space
def reset(self):
return self.env.reset()
def step(self, action):
return self.env.step(action)
def create_parser(parser_creator=None):
parser = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint",
default="/opt/ml/input/data/model/checkpoint",
type=str,
help="Checkpoint from which to roll out.")
parser.add_argument(
"--algorithm",
type=str,
required=True,
help="The algorithm or model to train. This may refer to the name "
"of a built-on algorithm (e.g. RLLib's DQN or PPO), or a "
"user-defined trainable function or class registered in the "
"tune registry.")
parser.add_argument(
"--env", type=str, help="The Unity environment to use.")
parser.add_argument(
"--evaluate_episodes", default=None, help="Number of episodes to roll out.")
parser.add_argument(
"--config",
default="{}",
type=json.loads,
help="Algorithm-specific configuration (e.g. env, hyperparams). "
"Surpresses loading of configuration from checkpoint.")
return parser
def run(args, parser):
if not args.config:
# Load configuration from file
config_dir = os.path.dirname(args.checkpoint)
# params.json is saved in the model directory during ray training by default
config_path = os.path.join(config_dir, "params.json")
with open(config_path) as f:
args.config = json.load(f)
if not args.env:
if not args.config.get("env"):
parser.error("the following arguments are required: --env")
args.env = args.config.get("env")
ray.init(webui_host="127.0.0.1")
agent_env_config = {"env_name": args.env}
register_env("unity_env", lambda config: UnityEnvWrapper(agent_env_config))
if ray.__version__ >= "0.6.5":
from ray.rllib.agents.registry import get_agent_class
else:
from ray.rllib.agents.agent import get_agent_class
cls = get_agent_class(args.algorithm)
config = args.config
config["monitor"] = False
config["num_workers"] = 0
config["num_gpus"] = 0
agent = cls(env="unity_env", config=config)
agent.restore(args.checkpoint)
num_episodes = int(args.evaluate_episodes)
env_config = {"env_name": args.env}
if ray.__version__ >= "0.6.5":
env = UnityEnvWrapper(env_config)
else:
from ray.rllib.agents.dqn.common.wrappers import wrap_dqn
if args.algorithm == "DQN":
env = UnityEnvWrapper(env_config)
env = wrap_dqn(env, args.config.get("model", {}))
else:
env = ModelCatalog.get_preprocessor_as_wrapper(UnityEnvWrapper(env_config))
env = wrappers.Monitor(env, OUTPUT_DIR, force=True, video_callable=lambda episode_id: True)
all_rewards = []
for episode in range(num_episodes):
steps = 0
state = env.reset()
done = False
reward_total = 0.0
while not done:
action = agent.compute_action(state)
next_state, reward, done, _ = env.step(action)
reward_total += reward
steps += 1
state = next_state
all_rewards.append(reward_total)
print("Episode reward: %s. Episode steps: %s" % (reward_total, steps))
print("Mean Reward:", np.mean(all_rewards))
print("Max Reward:", np.max(all_rewards))
print("Min Reward:", np.min(all_rewards))
if __name__ == "__main__":
parser = create_parser()
args = parser.parse_args()
run(args, parser)
import time
time.sleep(10)
```
#### File: tensorflow_cifar-10_with_inference_script/code/model_def.py
```python
import tensorflow as tf
from tensorflow.keras.layers import Activation, Conv2D, Dense, Dropout, Flatten, MaxPooling2D, BatchNormalization
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam, SGD, RMSprop
HEIGHT = 32
WIDTH = 32
DEPTH = 3
NUM_CLASSES = 10
def get_model(learning_rate, weight_decay, optimizer, momentum, size, mpi=False, hvd=False):
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same', input_shape=(HEIGHT, WIDTH, DEPTH)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.3))
model.add(Conv2D(128, (3, 3), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(128, (3, 3)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.4))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(NUM_CLASSES))
model.add(Activation('softmax'))
if mpi:
size = hvd.size()
if optimizer.lower() == 'sgd':
opt = SGD(lr=learning_rate * size, decay=weight_decay, momentum=momentum)
elif optimizer.lower() == 'rmsprop':
opt = RMSprop(lr=learning_rate * size, decay=weight_decay)
else:
opt = Adam(lr=learning_rate * size, decay=weight_decay)
if mpi:
opt = hvd.DistributedOptimizer(opt)
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
return model
```
#### File: bert_attention_head_view/utils/neuron_view.py
```python
from bokeh.plotting import show, figure, gridplot
from bokeh.models.annotations import Title
from bokeh.models import ColumnDataSource, Label, Range1d
from bokeh.io import show, output_notebook, push_notebook
from bokeh.models.glyphs import Line, Image
from bokeh.models.mappers import LinearColorMapper
import numpy as np
output_notebook()
class NeuronView():
def __init__(self,
input_tokens=None,
keys=None,
queries=None,
layers=None,
step=0,
head=0,
n_tokens=20,
layer_names=None):
self.layers = layers
self.head = head
self.step = step
self.query = 0
self.p = None
self.input_tokens = input_tokens[:n_tokens]
self.n_tokens = n_tokens
self.keys = keys
self.queries = queries
self.layer_names = layer_names
self.key_name = layers[0][0]
self.query_name = layers[0][1]
self.source_key = None
self.source_query = None
self.product = None
self.create()
def update(self):
key = self.keys[self.key_name][self.step][0, self.head, :, :]
self.source_key.data["image"] = [key[:self.n_tokens,:]]
query = self.queries[self.query_name][self.step][0, self.head, :self.n_tokens, :]
query_input = np.zeros((self.n_tokens,query.shape[1]))
query_input[:,:] = np.nan
query_input[self.query,:] = query[self.n_tokens - self.query - 1, :]
self.source_query.data["image"] = [query_input]
product = np.multiply(query[self.n_tokens - self.query - 1, :], key)
self.product.data["image"] = [product[:self.n_tokens,:]]
dot_product = np.dot(key, query[self.query,:])
dot_product = dot_product.reshape((dot_product.shape[0],1))
self.dot_product.data["image"] = [dot_product[:self.n_tokens,:]]
def select_query(self, query):
self.query = query
self.update()
push_notebook()
def select_layer(self, layer):
layer_id = self.layer_names[layer]
self.key_name = self.layers[layer_id][0]
self.query_name = self.layers[layer_id][1]
self.update()
push_notebook()
def select_head(self, head):
self.head = head
self.update()
push_notebook()
def select_step(self, step):
self.step = step
self.update()
push_notebook()
def create(self):
# set size of figure
self.p = figure(width = 900,
plot_height = 35 * self.n_tokens,
x_range=Range1d(0, self.n_tokens + 100),
y_range=Range1d(-1, self.n_tokens))
self.p.xgrid.visible = False
self.p.ygrid.visible = False
self.p.axis.visible = False
x = np.zeros(self.n_tokens) + 2
y = np.flip(np.arange(0, self.n_tokens), axis=0) + 0.25
# set input tokens in plot
for token, x_i, y_i in zip(self.input_tokens, x, y):
text1 = Label(x = x_i - 1,
y = y_i,
text = token,
text_font_size = '10pt')
text2 = Label(x = x_i + 105,
y = y_i,
text = token,
text_font_size = '10pt')
self.p.add_layout(text2)
self.p.add_layout(text1)
# set plot labels
text = Label(x=17, y=-1, text="query", text_font_size = '15pt')
self.p.add_layout(text)
text = Label(x=50, y=-1, text="key", text_font_size = '15pt')
self.p.add_layout(text)
text = Label(x=80, y=-1, text="q x k", text_font_size = '15pt')
self.p.add_layout(text)
text = Label(x=98, y=-1, text="q * k", text_font_size = '15pt')
self.p.add_layout(text)
color_mapper = LinearColorMapper(palette="Blues8", nan_color='white')
#get key matrix and query vector
key = self.keys[self.key_name][self.step][0, self.head, :, :]
query = self.queries[self.query_name][self.step][0, self.head, :self.n_tokens, :]
#plot key matrix
self.source_key = ColumnDataSource(data=dict(image=[key[:self.n_tokens,:]], x=[40],y=[0], dw=[25], dh=[self.n_tokens]))
img = Image(image="image", x="x", y="y", dw="dw", dh="dh", color_mapper=color_mapper)
self.p.add_glyph(self.source_key, img)
#create an empty query matrix where only one vector is set
query_input = np.zeros((self.n_tokens, query.shape[-1]))
query_input[:,:] = np.nan
query_input[self.query,:] = query[self.n_tokens - self.query - 1, :]
self.source_query = ColumnDataSource(data=dict(image=[query_input], x=[10], y=[0], dw=[25], dh=[self.n_tokens]))
img = Image(image="image", x="x", y="y", dw="dw", dh="dh", color_mapper=color_mapper)
self.p.add_glyph(self.source_query, img)
#compute elementwise product between a query vector and key matrix
product = np.multiply(query[self.n_tokens - self.query - 1, :], key)
self.product = ColumnDataSource(data=dict(image=[product[:self.n_tokens,:]], x=[70], y=[0], dw=[25], dh=[self.n_tokens]))
img = Image(image="image", x="x", y="y", dw="dw", dh="dh", color_mapper=color_mapper)
self.p.add_glyph(self.product, img)
#compute dot product between query vector and key matrix
dot_product = np.dot(key, query[self.n_tokens - self.query - 1, :])
dot_product = dot_product.reshape((dot_product.shape[0], 1))
self.dot_product = ColumnDataSource(data=dict(image=[dot_product[:self.n_tokens,:]], x=[100], y=[0], dw=[2], dh=[self.n_tokens]))
img = Image(image="image", x="x", y="y", dw="dw", dh="dh", color_mapper=color_mapper)
self.p.add_glyph(self.dot_product, img)
show(self.p, notebook_handle=True)
```
#### File: pytorch_profiling/entry_point/horovod_test_launcher.py
```python
import argparse
import os
import subprocess
import sys
from distutils.util import strtobool
# Third Party
from torch.cuda import device_count
HOROVOD_PYTORCH_TEST_MNIST_SCRIPT = "./horovod_mnist.py"
HOROVOD_MNIST_SCRIPT_NAME = "horovod_mnist.py"
def launch_horovod_job(script_file_path, script_args, num_workers, smprofile_path, mode):
command = ["mpirun", "-np", str(num_workers)] + [sys.executable, script_file_path] + script_args
env_dict = os.environ.copy()
env_dict["HOROVOD_TIMELINE"] = f"{smprofile_path}"
if mode == "cpu":
env_dict["CUDA_VISIBLE_DEVICES"] = "-1"
subprocess.check_call(command, env=env_dict)
def main():
parser = argparse.ArgumentParser(description="Launch horovod test")
parser.add_argument("--script", type=str, default=HOROVOD_PYTORCH_TEST_MNIST_SCRIPT)
parser.add_argument("--batch_size", type=int, default=128)
parser.add_argument("--epoch", type=int, default=5)
parser.add_argument("--gpu", type=strtobool, default=1)
parser.add_argument("--profile_path", type=str, default="./hvd_timeline.json")
parser.add_argument("--model", type=str, default="resnet50")
opt = parser.parse_args()
if opt.gpu == 1:
mode = "gpu"
else:
mode = "cpu"
num_workers = 1 if bool(device_count()) is False else device_count()
print(f"Number of workers = {num_workers}")
mode_args = []
if mode == "cpu":
mode_args += ["--use_only_cpu", "true"]
mode_args += [
"--epochs",
str(opt.epoch),
"--batch_size",
str(opt.batch_size),
"--model",
str(opt.model),
]
launch_horovod_job(
script_file_path=opt.script,
script_args=mode_args,
num_workers=num_workers,
smprofile_path=opt.profile_path,
mode=mode,
)
if __name__ == "__main__":
main()
```
#### File: sagemaker-python-sdk/mxnet_onnx_superresolution/super_resolution.py
```python
from __future__ import print_function
import bisect
import json
import logging
import time
import random
import re
from collections import Counter, namedtuple
from itertools import chain, islice
import mxnet as mx
import mxnet.contrib.onnx as onnx_mxnet
import numpy as np
from mxnet import gluon, autograd, nd
from mxnet.io import DataIter, DataBatch, DataDesc
from mxnet.gluon import nn
logging.basicConfig(level=logging.DEBUG)
def model_fn(model_dir):
"""
Load the onnx model. Called once when hosting service starts.
:param: model_dir The directory where model files are stored.
:return: a model
"""
sym, arg_params, aux_params = onnx_mxnet.import_model('%s/super_resolution.onnx' % model_dir)
# create module
mod = mx.mod.Module(symbol=sym, data_names=['1'], label_names=None)
mod.bind(for_training=False, data_shapes=[('1', [1, 1, 224, 224])])
mod.set_params(arg_params=arg_params, aux_params=aux_params)
return mod
def transform_fn(mod, data, input_content_type, output_content_type):
"""
Transform a request using the Gluon model. Called once per request.
:param mod: The super resolution model.
:param data: The request payload.
:param input_content_type: The request content type.
:param output_content_type: The (desired) response content type.
:return: response payload and content type.
"""
input_data = json.loads(data)
batch = namedtuple('Batch', ['data'])
mod.forward(batch([mx.nd.array(input_data)]))
return json.dumps(mod.get_outputs()[0][0][0].asnumpy().clip(0, 255).tolist()), output_content_type
```
#### File: sagemaker-python-sdk/pytorch_cnn_cifar10/utils_cifar.py
```python
import torch
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
def _get_transform():
return transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
def get_train_data_loader():
transform = _get_transform()
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform)
return torch.utils.data.DataLoader(trainset, batch_size=4,
shuffle=True, num_workers=2)
def get_test_data_loader():
transform = _get_transform()
testset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=transform)
return torch.utils.data.DataLoader(testset, batch_size=4,
shuffle=False, num_workers=2)
# function to show an image
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
```
#### File: pytorch_mnist/code/deploy_ei.py
```python
from __future__ import absolute_import
import logging
import os
import sys
import torch
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler(sys.stdout))
# To use new EIA inference API, customer should use attach_eia(model, eia_ordinal_number)
VERSIONS_USE_NEW_API = ["1.5.1"]
def predict_fn(input_data, model):
logger.info('Performing EIA inference with Torch JIT context with input of size {}'.format(input_data.shape))
# With EI, client instance should be CPU for cost-efficiency. Subgraphs with unsupported arguments run locally. Server runs with CUDA
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
input_data = input_data.to(device)
# Please make sure model is loaded to cpu and has been eval(), in this example, we have done this step in model_fn()
with torch.no_grad():
if torch.__version__ in VERSIONS_USE_NEW_API:
# Please make sure torcheia has been imported
import torcheia
# We need to set the profiling executor for EIA
torch._C._jit_set_profiling_executor(False)
with torch.jit.optimized_execution(True):
return model.forward(input_data)
# Set the target device to the accelerator ordinal
else:
with torch.jit.optimized_execution(True, {'target_device': 'eia:0'}):
return model(input_data)
def model_fn(model_dir):
try:
loaded_model = torch.jit.load('model.pth', map_location=torch.device('cpu'))
if torch.__version__ in VERSIONS_USE_NEW_API:
import torcheia
loaded_model = loaded_model.eval()
loaded_model = torcheia.jit.attach_eia(loaded_model, 0)
return loaded_model
except Exception as e:
logger.exception(f"Exception in model fn {e}")
return None
```
#### File: automate_model_retraining_workflow/code/query_training_status.py
```python
import boto3
import logging
import json
logger = logging.getLogger()
logger.setLevel(logging.INFO)
sm_client = boto3.client('sagemaker')
#Retrieve transform job name from event and return transform job status.
def lambda_handler(event, context):
if ('TrainingJobName' in event):
job_name = event['TrainingJobName']
else:
raise KeyError('TrainingJobName key not found in function input!'+
' The input received was: {}.'.format(json.dumps(event)))
#Query boto3 API to check training status.
try:
response = sm_client.describe_training_job(TrainingJobName=job_name)
logger.info("Training job:{} has status:{}.".format(job_name,
response['TrainingJobStatus']))
except Exception as e:
response = ('Failed to read training status!'+
' The training job may not exist or the job name may be incorrect.'+
' Check SageMaker to confirm the job name.')
print(e)
print('{} Attempted to read job name: {}.'.format(response, job_name))
#We can't marshall datetime objects in JSON response. So convert
#all datetime objects returned to unix time.
for index, metric in enumerate(response['FinalMetricDataList']):
metric['Timestamp'] = metric['Timestamp'].timestamp()
return {
'statusCode': 200,
'trainingMetrics': response['FinalMetricDataList']
}
``` |
{
"source": "jpmarques66/operator-service",
"score": 2
} |
#### File: operator-service/operator_service/admin_routes.py
```python
import os
import logging
import psycopg2
from flask import Blueprint, jsonify, request, Response
from kubernetes.client.rest import ApiException
from operator_service.config import Config
from operator_service.kubernetes_api import KubeAPI
adminpg_services = Blueprint('adminpg_services', __name__)
admin_services = Blueprint('admin_services', __name__)
config = Config()
@adminpg_services.route('/pgsqlinit', methods=['POST'])
def init_pgsql_compute():
"""
Init pgsql database
---
tags:
- operation
consumes:
- application/json
"""
output = ""
connection = None
cursor = None
try:
connection = psycopg2.connect(user=os.getenv("POSTGRES_USER"),
password=<PASSWORD>("<PASSWORD>"),
host=os.getenv("POSTGRES_HOST"),
port=os.getenv("POSTGRES_PORT"),
database=os.getenv("POSTGRES_DB"))
cursor = connection.cursor()
create_table_query = """
CREATE TABLE IF NOT EXISTS jobs
(agreementId varchar(255) NOT NULL,
workflowId varchar(255) NOT NULL,
owner varchar(255),
status int,
statusText varchar(255),
dateCreated timestamp without time zone default NOW(),
dateFinished timestamp without time zone default NULL,
configlogURL text,
publishlogURL text,
algologURL text,
outputsURL text,
ddo text,
namespace varchar(255),
stopreq smallint default 0,
removed smallint default 0
);
"""
cursor.execute(create_table_query)
# queries below are for upgrade purposes
create_table_query = '''ALTER TABLE jobs ADD COLUMN IF NOT EXISTS namespace varchar(255)'''
cursor.execute(create_table_query)
create_table_query = '''ALTER TABLE jobs ADD COLUMN IF NOT EXISTS stopreq smallint default 0'''
cursor.execute(create_table_query)
create_table_query = '''ALTER TABLE jobs ADD COLUMN IF NOT EXISTS removed smallint default 0'''
cursor.execute(create_table_query)
create_index_query = '''CREATE unique INDEX IF NOT EXISTS uniq_agreementId_workflowId ON jobs (agreementId,workflowId)'''
cursor.execute(create_index_query)
connection.commit()
except (Exception, psycopg2.Error) as error:
output = output + "Error PostgreSQL:" + str(error)
finally:
# closing database connection.
if connection and cursor:
cursor.close()
connection.close()
return output, 200
@admin_services.route('/info', methods=['GET'])
def get_compute_job_info():
"""
Get info for an job id.
---
tags:
- operation
consumes:
- application/json
parameters:
- name: jobId
in: query
description: Id of the job.
required: true
type: string
"""
try:
job_id = request.args['jobId']
api_response = KubeAPI(config).get_namespaced_custom_object(job_id)
logging.info(api_response)
return jsonify(api_response), 200
except ApiException as e:
logging.error(f'The jobId {job_id} is not registered in your namespace: {e}')
return f'The jobId {job_id} is not registered in your namespace.', 400
@admin_services.route('/list', methods=['GET'])
def list_compute_jobs():
"""
List all the compute jobs.
---
tags:
- operation
consumes:
- application/json
"""
try:
api_response = KubeAPI(config).list_namespaced_custom_object()
result = list()
for i in api_response['items']:
result.append(i['metadata']['name'])
logging.info(api_response)
return jsonify(result), 200
except ApiException as e:
logging.error(
f'Exception when calling CustomObjectsApi->list_cluster_custom_object: {e}')
return 'Error listing workflows', 400
@admin_services.route('/logs', methods=['GET'])
def get_logs():
"""
Get the logs for an job id.
---
tags:
- operation
consumes:
- text/plain
parameters:
- name: jobId
in: query
description: Id of the job.
required: true
type: string
- name: component
in: query
description: Workflow component (configure, algorithm, publish)
required: true
type: string
responses:
200:
description: Get correctly the logs
400:
description: Error consume Kubernetes API
404:
description: Pod not found for the given parameters
"""
data = request.args
kube_api = KubeAPI(config)
try:
job_id = data.get('jobId')
component = data.get('component')
# First we need to get the name of the pods
label_selector = f'workflow={job_id},component={component}'
logging.debug(f'Looking pods in ns {kube_api.namespace} with labels {label_selector}')
pod_response = kube_api.list_namespaced_pod(label_selector=label_selector)
except ApiException as e:
logging.error(
f'Exception when calling CustomObjectsApi->list_namespaced_pod: {e}')
return 'Error getting the logs', 400
try:
pod_name = pod_response.items[0].metadata.name
logging.debug(f'pods found: {pod_response}')
except IndexError as e:
logging.warning(f'Exception getting information about the pod with labels {label_selector}.'
f' Probably pod does not exist: {e}')
return f'Pod with workflow={job_id} and component={component} not found', 404
try:
logging.debug(f'looking logs for pod {pod_name} in namespace {kube_api.namespace}')
logs_response = kube_api.read_namespaced_pod_log(name=pod_name)
r = Response(response=logs_response, status=200, mimetype="text/plain")
r.headers["Content-Type"] = "text/plain; charset=utf-8"
return r
except ApiException as e:
logging.error(
f'Exception when calling CustomObjectsApi->read_namespaced_pod_log: {e}')
return 'Error getting the logs', 400
```
#### File: operator-service/operator_service/routes.py
```python
import os
from os import path
import logging
import kubernetes
from flask import Blueprint, jsonify, request
from kubernetes.client.rest import ApiException
from operator_service.config import Config
from operator_service.data_store import create_sql_job, get_sql_status, get_sql_jobs, stop_sql_job, remove_sql_job
from operator_service.kubernetes_api import KubeAPI
from operator_service.utils import (
create_compute_job,
check_required_attributes,
generate_new_id,
process_signature_validation,
get_compute_resources
)
services = Blueprint('services', __name__)
# Configuration to connect to k8s.
if not path.exists('/.dockerenv'):
kubernetes.config.load_kube_config()
else:
kubernetes.config.load_incluster_config()
config = Config()
@services.route('/compute', methods=['POST'])
def start_compute_job():
"""
Create and start the compute job
---
tags:
- operation
consumes:
- application/json
parameters:
- in: body
name: body
required: false
description: Init workflow.
schema:
workflow:
nullable: true
example: {
"agreementId":"0x111111",
"owner":"0xC41808BBef371AD5CFc76466dDF9dEe228d2BdAA",
"providerSignature":"ae01",
"workflow":{
"stages": [
{
"index": 0,
"input": [
{
"id": "did:op:87bdaabb33354d2eb014af5091c604fb4b0f67dc6cca4d18a96547bffdc27bcf",
"url": [
"https://data.ok.gov/sites/default/files/unspsc%20codes_3.csv"
],
"index": 0
},
{
"id": "did:op:1384941e6f0b46299b6e515723df3d8e8e5d1fb175554467a1cb7bc613f5c72e",
"url": [
"https://data.ct.gov/api/views/2fi9-sgi3/rows.csv?accessType=DOWNLOAD"
],
"index": 1
}
],
"compute": {
"Instances": 1,
"namespace": "withgpu",
"maxtime": 3600
},
"algorithm": {
"id": "did:op:87bdaabb33354d2eb014af5091c604fb4b0f67dc6cca4d18a96547bffdc27bcf",
"url": "https://raw.githubusercontent.com/oceanprotocol/test-algorithm/master/javascript/algo.js",
"rawcode": "console.log('this is a test')",
"container": {
"image": "node",
"tag": "10",
"entrypoint": "node $ALGO"
}
},
"output": {
"nodeUri": "https://nile.dev-ocean.com",
"brizoUri": "https://brizo.marketplace.dev-ocean.com",
"brizoAddress": "0x4aaab179035dc57b35e2ce066919048686f82972",
"metadata": {
"name": "Workflow output"
},
"metadataUri": "https://aquarius.marketplace.dev-ocean.com",
"secretStoreUri": "https://secret-store.nile.dev-ocean.com",
"whitelist": [
"0x00Bd138aBD70e2F00903268F3Db08f2D25677C9e",
"0xACBd138aBD70e2F00903268F3Db08f2D25677C9e"
],
"owner":"0xC41808BBef371AD5CFc76466dDF9dEe228d2BdAA",
"publishOutput":true,
"publishAlgorithmLog":true
}
}
]
}
}
response:
201:
description: Workflow inited successfully.
400:
description: Some error
"""
data = request.args if request.args else request.json
required_attributes = [
'workflow',
'agreementId',
'owner',
'providerSignature'
]
msg, status = check_required_attributes(required_attributes, data, 'POST:/compute')
if msg:
return jsonify(error=msg), status
workflow = data.get('workflow')
agreement_id = data.get('agreementId')
owner = data.get('owner')
if not workflow:
return jsonify(error=f'`workflow` is required in the payload and must '
f'include workflow stages'), 400
# verify provider's signature
msg, status = process_signature_validation(data.get('providerSignature'), agreement_id)
if msg:
return jsonify(error=f'`providerSignature` of agreementId is required.'), status
stages = workflow.get('stages')
if not stages:
logging.error(f'Missing stages')
return jsonify(error='Missing stages'), 400
for _attr in ('algorithm', 'compute', 'input', 'output'):
if _attr not in stages[0]:
logging.error(f'Missing {_attr} in stage 0')
return jsonify(error=f'Missing {_attr} in stage 0'), 400
# loop through stages and add resources
timeout = int(os.getenv("ALGO_POD_TIMEOUT", 0))
compute_resources_def = get_compute_resources()
for count, astage in enumerate(workflow['stages']):
# check timeouts
if timeout > 0:
if 'maxtime' in astage['compute']:
maxtime = int(astage['compute']['maxtime'])
else:
maxtime = 0
if timeout < maxtime or maxtime <= 0:
astage['compute']['maxtime'] = timeout
logging.debug(f"Maxtime in workflow was {maxtime}. Overwritten to {timeout}")
# get resources
astage['compute']['resources'] = compute_resources_def
job_id = generate_new_id()
logging.debug(f'Got job_id: {job_id}')
body = create_compute_job(
workflow, job_id, config.group, config.version, config.namespace
)
logging.debug(f'Got body: {body}')
kube_api = KubeAPI(config)
try:
api_response = kube_api.create_namespaced_custom_object(body)
logging.info(api_response)
create_sql_job(agreement_id, str(job_id), owner)
status_list = get_sql_status(agreement_id, str(job_id), owner)
return jsonify(status_list), 200
except ApiException as e:
logging.error(f'Exception when calling CustomObjectsApi->create_namespaced_custom_object: {e}')
return jsonify(error='Unable to create job'), 400
@services.route('/compute', methods=['PUT'])
def stop_compute_job():
"""
Stop the current compute job..
---
tags:
- operation
consumes:
- application/json
parameters:
- name: agreementId
in: query
description: agreementId
type: string
- name: jobId
in: query
description: Id of the job.
type: string
- name: owner
in: query
description: owner
type: string
"""
try:
data = request.args if request.args else request.json
agreement_id = data.get('agreementId')
owner = data.get('owner')
job_id = data.get('jobId')
if not agreement_id or len(agreement_id) < 2:
agreement_id = None
if not job_id or len(job_id) < 2:
job_id = None
if not owner or len(owner) < 2:
owner = None
if owner is None and agreement_id is None and job_id is None:
msg = f'You have to specify one of agreementId, jobId or owner'
logging.error(msg)
return jsonify(error=msg), 400
jobs_list = get_sql_jobs(agreement_id, job_id, owner)
for ajob in jobs_list:
name = ajob
logging.info(f'Stopping job : {name}')
stop_sql_job(name)
status_list = get_sql_status(agreement_id, job_id, owner)
return jsonify(status_list), 200
except ApiException as e:
logging.error(f'Exception when stopping compute job: {e}')
return jsonify(error=f'Error stopping job: {e}'), 400
@services.route('/compute', methods=['DELETE'])
def delete_compute_job():
"""
Deletes the current compute job.
---
tags:
- operation
consumes:
- application/json
parameters:
- name: agreementId
in: query
description: agreementId
type: string
- name: jobId
in: query
description: Id of the job.
type: string
- name: owner
in: query
description: owner
type: string
"""
body = kubernetes.client.V1DeleteOptions() # V1DeleteOptions |
grace_period_seconds = 56 # int | The duration in seconds before the object should be
# deleted. Value must be non-negative integer. The value zero indicates delete immediately.
# If this value is nil, the default grace period for the specified type will be used.
# Defaults to a per object value if not specified. zero means delete immediately. (optional)
orphan_dependents = True # bool | Deprecated: please use the PropagationPolicy, this field
# will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false,
# the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either
# this field or PropagationPolicy may be set, but not both. (optional)
propagation_policy = 'propagation_policy_example' # str | Whether and how garbage collection
# will be performed. Either this field or OrphanDependents may be set, but not both. The
# default policy is decided by the existing finalizer set in the metadata.finalizers and the
# resource-specific default policy. (optional)
try:
data = request.args if request.args else request.json
agreement_id = data.get('agreementId')
owner = data.get('owner')
job_id = data.get('jobId')
if not agreement_id or len(agreement_id) < 2:
agreement_id = None
if not job_id or len(job_id) < 2:
job_id = None
if not owner or len(owner) < 2:
owner = None
if owner is None and agreement_id is None and job_id is None:
msg = f'You have to specify one of agreementId, jobId or owner'
logging.error(msg)
return jsonify(error=msg), 400
kube_api = KubeAPI(config)
jobs_list = get_sql_jobs(agreement_id, job_id, owner)
logging.debug(f'Got {jobs_list}')
for ajob in jobs_list:
name = ajob
remove_sql_job(name)
api_response = kube_api.delete_namespaced_custom_object(
name,
body,
grace_period_seconds=grace_period_seconds,
orphan_dependents=orphan_dependents,
propagation_policy=propagation_policy
)
logging.debug(api_response)
status_list = get_sql_status(agreement_id, job_id, owner)
return jsonify(status_list), 200
except ApiException as e:
logging.error(f'Exception when calling CustomObjectsApi->delete_namespaced_custom_object: {e}')
return jsonify(error=f'Error deleting job {e}'), 400
@services.route('/compute', methods=['GET'])
def get_compute_job_status():
"""
Get status for an specific or multiple jobs.
---
tags:
- operation
consumes:
- application/json
parameters:
- name: agreementId
in: query
description: agreementId
type: string
- name: jobId
in: query
description: Id of the job.
type: string
- name: owner
in: query
description: owner
type: string
responses:
200:
description: Get correctly the status
400:
description: Error
"""
try:
data = request.args if request.args else request.json
agreement_id = data.get('agreementId')
owner = data.get('owner')
job_id = data.get('jobId')
if not agreement_id or len(agreement_id) < 2:
agreement_id = None
if not job_id or len(job_id) < 2:
job_id = None
if not owner or len(owner) < 2:
owner = None
if owner is None and agreement_id is None and job_id is None:
msg = f'You have to specify one of agreementId, jobId or owner'
logging.error(msg)
return jsonify(error=msg), 400
logging.debug("Try to start")
api_response = get_sql_status(agreement_id, job_id, owner)
return jsonify(api_response), 200
except ApiException as e:
msg = f'Error getting the status: {e}'
logging.error(msg)
return jsonify(error=msg), 400
``` |
{
"source": "jpmartinezv/DNA-adventure",
"score": 3
} |
#### File: jpmartinezv/DNA-adventure/biolib.py
```python
from Tkinter import *
import re
from math import *
class BioLib():
def __init__(self, list, parameters):
self.list = []
self.dist = int(parameters[0])
self.radio = int(parameters[1])
self.conservacion = int(parameters[2])
self.ln = []
self.len_seq = 0
for item in list:
self.list.append(item)
self.fill()
def fill(self):
max_size = 0
for item in self.list:
max_size = max(max_size, len(item))
self.ln.append(len(item))
self.len_seq = max_size
i = 0
for i in range(len(self.list)):
tmp = '-' * abs(max_size - len(self.list[i]))
self.list[i] = tmp + self.list[i]
def f(self, x):
return {
'A': "A",
'G': "G",
'C': "C",
'T': "T",
'U': "U",
'R': "[A|G]",
'Y': "[C|T]",
'N': "[A|G|C|T|U]",
'W': "A|T",
'S': "G|C",
'M': "A|C",
'K': "G|T",
'B': "G|C|T",
'H': "A|C|T",
'D': "A|G|T",
'V': "A|G|C"
}[x]
def mapMotif(self, motif):
ret = ""
for x in motif:
ret += self.f(x)
return ret
def matchMotif(self, motif = ""):
print motif
motif = r"" + self.mapMotif(motif)
self.matches = []
for seq in self.list:
it = re.finditer(motif, seq)
tmp = []
for match in it:
tmp.append( [ match.start() , match.end() ] )
self.matches.append(tmp)
def showMatches(self):
root = Tk()
text = Text(root)
line = 1
for i in range(len(self.list)):
text.insert(INSERT, "Secuencia " + str(i+1) + ":\n")
line = line + 1
text.insert(INSERT, self.list[i] + "\n\n")
for item in self.matches[i]:
text.tag_add("yellow", str(line) + "." + str(item[0]), str(line) + "." + str(item[1]))
line = line + 2
text.tag_config("yellow", background="yellow", foreground="black")
patron = self.list[0]
patron_m = self.matches[0]
var = ceil((self.ln[0]*self.dist)/100.0)
matches_valid = []
for m in patron_m:
f = True
pos = m[1]
for i in range(1, len(self.list)):
flag = False
for mi in self.matches[i]:
if abs(pos - mi[1]) < var:
flag = True
break
if not flag:
f = False
break
if f:
matches_valid.append(m)
self.matches[0] = matches_valid
text.insert(INSERT, "Motif Validos:\n")
line = line + 1
text.insert(INSERT, self.list[0] + ":\n")
for item in self.matches[0]:
text.tag_add("red", str(line) + "." + str(item[0]), str(line) + "." + str(item[1]))
text.tag_config("red", background="red", foreground="black")
text.pack(expand = YES, fill = BOTH)
root.mainloop()
``` |
{
"source": "jpmaterial/trimesh",
"score": 3
} |
#### File: trimesh/tests/test_3mf.py
```python
try:
from . import generic as g
except BaseException:
import generic as g
class MFTest(g.unittest.TestCase):
def test_3MF(self):
# an assembly with instancing
s = g.get_mesh('counterXP.3MF')
# should be 2 unique meshes
assert len(s.geometry) == 2
# should be 6 instances around the scene
assert len(s.graph.nodes_geometry) == 6
assert all(m.is_volume for m in s.geometry.values())
# a single body 3MF assembly
s = g.get_mesh('featuretype.3MF')
# should be 2 unique meshes
assert len(s.geometry) == 1
# should be 6 instances around the scene
assert len(s.graph.nodes_geometry) == 1
def test_units(self):
# test our unit conversion function
converter = g.trimesh.units.unit_conversion
# these are the units listed in the 3MF spec as valid
units = ['micron', 'millimeter',
'centimeter', 'inch', 'foot', 'meter']
# check conversion factor for all valid 3MF units
assert all(converter(u, 'inches') > 1e-12 for u in units)
def test_kwargs(self):
# check if kwargs are properly passed to geometries
s = g.get_mesh('P_XPM_0331_01.3mf')
assert(all(len(v.vertices) == 4 for v in s.geometry.values()))
s = g.get_mesh('P_XPM_0331_01.3mf', process=False)
assert(all(len(v.vertices) == 5 for v in s.geometry.values()))
def test_names(self):
# check if two different objects with the same name are correctly processed
s = g.get_mesh('cube_and_sphere_same_name.3mf')
assert(len(s.geometry) == 2)
if __name__ == '__main__':
g.trimesh.util.attach_to_log()
g.unittest.main()
```
#### File: trimesh/tests/test_adjacency.py
```python
try:
from . import generic as g
except BaseException:
import generic as g
class AdjacencyTest(g.unittest.TestCase):
def test_radius(self):
for radius in [0.1, 1.0, 3.1459, 29.20]:
m = g.trimesh.creation.cylinder(
radius=radius, height=radius * 10)
# remove the cylinder cap
signs = (g.np.sign(m.vertices[:, 2]) < 0)[m.faces]
not_cap = ~g.np.logical_or(
signs.all(axis=1), ~signs.any(axis=1))
m.update_faces(not_cap)
# compare the calculated radius
radii = m.face_adjacency_radius
radii = radii[g.np.isfinite(radii)]
assert g.np.allclose(radii, radius, atol=radius / 100)
if __name__ == '__main__':
g.trimesh.util.attach_to_log()
g.unittest.main()
```
#### File: trimesh/tests/test_inertia.py
```python
try:
from . import generic as g
except BaseException:
import generic as g
# tetToTris extracts outward facing triangles
# of a tetrahedron given in this order
# 1
# / | \
# 0-----2
# \ | /
# 3
triIdxs = g.np.ravel([[2, 1, 0], [3, 0, 1], [3, 2, 0], [3, 1, 2]])
def tetToTris(tet):
return g.np.reshape(tet[triIdxs], (-1, 3))
class InertiaTest(g.unittest.TestCase):
def test_inertia(self):
t0 = g.np.array([[-0.419575686853, -0.898655215203, -0.127965023308, 0.],
[0.712589964872, -0.413418145015, 0.566834172697, 0.],
[-0.562291548012, 0.146643245877, 0.813832890385, 0.],
[0., 0., 0., 1.]])
t1 = g.np.array([[0.343159553585, 0.624765521319, -0.701362648103, 0.],
[0.509982849005, -0.750986657709, -0.419447891476, 0.],
[-0.788770571525, -0.213745370274, -0.57632794673, 0.],
[0., 0., 0., 1.]])
# make sure our transformations are actually still transformations
assert g.np.abs(g.np.dot(t0, t0.T) - g.np.eye(4)).max() < 1e-10
assert g.np.abs(g.np.dot(t1, t1.T) - g.np.eye(4)).max() < 1e-10
c = g.trimesh.primitives.Cylinder(height=10,
radius=1,
sections=720, # number of slices
transform=t0)
c0m = c.moment_inertia.copy()
c0 = g.trimesh.inertia.cylinder_inertia(c.volume,
c.primitive.radius,
c.primitive.height,
c.primitive.transform)
ct = g.np.abs((c0m / c0) - 1)
# we are comparing an inertia tensor from a mesh of a cylinder
# to an inertia tensor from an actual cylinder, so allow for some
# discretization uncertainty
assert ct.max() < 1e-3
# check our principal axis calculation against this cylinder
# the direction (long axis) of the cylinder should correspond to
# the smallest principal component of inertia, AKA rotation along
# the axis, rather than the other two which are perpendicular
components, vectors = g.trimesh.inertia.principal_axis(
c.moment_inertia)
# inferred cylinder axis
inferred = vectors[components.argmin()]
# inferred cylinder axis should be close to actual cylinder axis
axis_test = g.np.allclose(g.np.abs(inferred),
g.np.abs(c.direction))
assert axis_test
# make sure Trimesh attribute is plumbed correctly
assert g.np.allclose(c.principal_inertia_components, components)
assert g.np.allclose(c.principal_inertia_vectors, vectors)
# the other two axis of the cylinder should be identical
assert g.np.abs(g.np.diff(g.np.sort(components)[-2:])).max() < 1e-8
m = g.get_mesh('featuretype.STL')
i0 = m.moment_inertia.copy()
# rotate the moment of inertia
i1 = g.trimesh.inertia.transform_inertia(
transform=t0, inertia_tensor=i0)
# rotate the mesh
m.apply_transform(t0)
# check to see if the rotated mesh + recomputed moment of inertia
# is close to the rotated moment of inertia
tf_test = g.np.abs((m.moment_inertia / i1) - 1)
assert tf_test.max() < 1e-6
# do it again with another transform
i2 = g.trimesh.inertia.transform_inertia(
transform=t1, inertia_tensor=i1)
m.apply_transform(t1)
tf_test = g.np.abs((m.moment_inertia / i2) - 1)
assert tf_test.max() < 1e-6
def test_primitives(self):
primitives = [g.trimesh.primitives.Cylinder(height=5),
g.trimesh.primitives.Box(),
g.trimesh.primitives.Sphere(radius=1.23)]
for p in primitives:
for i in range(100):
# check to make sure the analytic inertia tensors are relatively
# close to the meshed inertia tensor (order of magnitude and
# sign)
b = p.to_mesh()
comparison = g.np.abs(
p.moment_inertia - b.moment_inertia)
c_max = comparison.max() / g.np.abs(p.moment_inertia).max()
assert c_max < .1
if hasattr(p.primitive, 'transform'):
matrix = g.trimesh.transformations.random_rotation_matrix()
p.primitive.transform = matrix
elif hasattr(p.primitive, 'center'):
p.primitive.center = g.np.random.random(3)
def test_tetrahedron(self):
# Based on the 'numerical example' of the paper:
# Explicit Exact Formulas for the 3-D Tetrahedron Inertia Tensor
# in Terms of its Vertex Coordinates, [<NAME>, 2004]
# http://thescipub.com/pdf/jmssp.2005.8.11.pdf
# set up given vertices
vertices = g.np.float32([[8.3322, -11.86875, 0.93355],
[0.75523, 5., 16.37072],
[52.61236, 5., -5.3858],
[2., 5., 3.]])
# set up a simple trimesh tetrahedron
tris = tetToTris(g.np.int32([0, 1, 2, 3]))
tm_tet = g.trimesh.Trimesh(vertices, tris)
# 'ground truth' values from the paper
# however, there are some minor flaws
# a small deviation in the last decimal of the mass-centers' x:
CM_gt = g.np.float32([15.92492, 0.78281, 3.72962])
# moment of inertia values
a_mi, b_mi, c_mi = [43520.33257, 194711.28938, 191168.76173]
# principle inertia values
a_pi, b_pi, c_pi = [4417.6615, -46343.16662, 11996.20119]
# NOTE: I guess there is a mistake in the paper
# b' (Eq. 9e) computes from x and z values
# c' (Eq. 9f) computes from x and y values
# therefore the given matrix E_Q (Eq. 2) is not correct
# b' and c' need to be swapped like this:
MI_gt = g.np.float32([[a_mi, -c_pi, -b_pi],
[-c_pi, b_mi, -a_pi],
[-b_pi, -a_pi, c_mi]])
# check center of mass
assert g.np.allclose(CM_gt, tm_tet.center_mass)
# check moment of inertia tensor
assert g.np.allclose(MI_gt, tm_tet.moment_inertia)
def test_cube_with_tetras(self):
# set up a unit cube, vertices in this order:
# 1-----2
# /| /|
# 0-+---3 |
# | 5---+-6
# |/ |/
# 4-----7
vertices = g.np.float32([[-1, -1, 1],
[-1, 1, 1],
[1, 1, 1],
[1, -1, 1],
[-1, -1, -1],
[-1, 1, -1],
[1, 1, -1],
[1, -1, -1]]) * 0.5
# 6 quad faces for the cube
quads = g.np.int32([[3, 2, 1, 0],
[0, 1, 5, 4],
[1, 2, 6, 5],
[2, 3, 7, 6],
[3, 0, 4, 7],
[4, 5, 6, 7]])
# set up two different tetraherdalizations of a cube
# using 5 tets (1 in the middle and 4 around)
tetsA = g.np.int32([[0, 1, 3, 4],
[1, 2, 3, 6],
[1, 4, 5, 6],
[3, 4, 6, 7],
[1, 3, 4, 6]])
# using 6 sets (around the cube diagonal)
tetsB = g.np.int32([[0, 1, 2, 6],
[0, 1, 6, 5],
[0, 4, 5, 6],
[0, 4, 6, 7],
[0, 3, 7, 6],
[0, 2, 3, 6]])
# create a trimesh cube from vertices and faces
tm_cube = g.trimesh.Trimesh(vertices, quads)
# https://en.wikipedia.org/wiki/List_of_moments_of_inertia
# ground truth for a unit cube with side length s = mass m = 1
# I = 1/6 * m * s^2
MI_gt = g.np.eye(3) / 6
assert g.np.allclose(MI_gt, tm_cube.moment_inertia)
# compare both tetrahedralizations
# should be equivalent to each other and to the cube
for tets in [tetsA, tetsB]:
# create trimesh tets from vertices and triangles
tm_tets = [g.trimesh.Trimesh(vertices, tetToTris(t)) for t in tets]
# get mass, center of mass, and moment of inertia for each tet
Ms = [tm_tet.mass for tm_tet in tm_tets]
CMs = [tm_tet.center_mass for tm_tet in tm_tets]
MIs = [tm_tet.moment_inertia for tm_tet in tm_tets]
# compute total mass and center of mass
mass = g.np.sum(Ms)
center_mass = g.np.dot(Ms, CMs)
# compare with unit cube
assert g.np.abs(tm_cube.mass - mass) < 1e-6
assert g.np.allclose(tm_cube.center_mass, center_mass)
# the moment of inertia tensors for the individual tetrahedra
# have to be re-assembled, using the parallel-axis-theorem
# https://en.wikipedia.org/wiki/Parallel_axis_theorem#Tensor_generalization
E = g.np.eye(3)
MI = g.np.zeros((3, 3), g.np.float32)
for i in range(len(tm_tets)):
R = CMs[i] - center_mass
MI += MIs[i] + Ms[i] * (g.np.dot(R, R) * E - g.np.outer(R, R))
assert g.np.allclose(MI_gt, MI)
class MassTests(g.unittest.TestCase):
def setUp(self):
# inertia numbers pulled from solidworks
self.truth = g.data['mass_properties']
self.meshes = dict()
for data in self.truth:
filename = data['filename']
self.meshes[filename] = g.get_mesh(filename)
def test_mass(self):
for truth in self.truth:
mesh = self.meshes[truth['filename']]
calc = g.trimesh.triangles.mass_properties(
triangles=mesh.triangles,
density=truth['density'],
skip_inertia=False)
for key, value in calc.items():
if key not in truth:
continue
if not g.np.allclose(calc[key], truth[key], atol=1e-2):
raise ValueError('{}({}):\n{}\n!=\n{}'.format(
truth['filename'],
key,
calc[key],
g.np.array(truth[key])))
if __name__ == '__main__':
g.trimesh.util.attach_to_log()
g.unittest.main()
```
#### File: trimesh/tests/test_nsphere.py
```python
try:
from . import generic as g
except BaseException:
import generic as g
class NSphereTest(g.unittest.TestCase):
def test_minball(self):
# how close do we need to be
tol_fit = 1e-2
# get some assorted mesh geometries to test performance
# and a perfect sphere mesh to test the degenerate case
for m in g.np.append(list(g.get_meshes(5)),
g.trimesh.primitives.Sphere()):
s = m.bounding_sphere
R_check = ((m.vertices - s.primitive.center)
** 2).sum(axis=1).max() ** .5
assert len(s.primitive.center) == 3
assert s.primitive.radius > 0.0
assert abs(s.primitive.radius - R_check) < tol_fit
assert s.volume > (m.volume - tol_fit)
# check minimum n-sphere for points in 2, 3, 4 dimensions
for d in [2, 3, 4]:
for i in range(5):
points = g.np.random.random((100, d))
C, R = g.trimesh.nsphere.minimum_nsphere(points)
R_check = ((points - C)**2).sum(axis=1).max() ** .5
assert len(C) == d
assert R > 0.0
assert abs(R - R_check) < g.tol.merge
def test_isnsphere(self):
# make sure created spheres are uv sphere
m = g.trimesh.creation.uv_sphere()
# move the mesh around for funsies
m.apply_translation(g.np.random.random(3))
m.apply_transform(
g.trimesh.transformations.random_rotation_matrix())
# all vertices should be on nsphere
assert g.trimesh.nsphere.is_nsphere(m.vertices)
if __name__ == '__main__':
g.trimesh.util.attach_to_log()
g.unittest.main()
```
#### File: trimesh/trimesh/creation.py
```python
from .base import Trimesh
from .constants import log, tol
from .geometry import faces_to_edges, align_vectors, plane_transform
from . import util
from . import grouping
from . import triangles
from . import transformations as tf
import numpy as np
import collections
try:
# shapely is a soft dependency
from shapely.geometry import Polygon
from shapely.wkb import loads as load_wkb
except BaseException as E:
# shapely will sometimes raise OSErrors
# on import rather than just ImportError
from . import exceptions
# re-raise the exception when someone tries
# to use the module that they don't have
Polygon = exceptions.closure(E)
load_wkb = exceptions.closure(E)
def revolve(linestring,
angle=None,
sections=None,
transform=None,
**kwargs):
"""
Revolve a 2D line string around the 2D Y axis, with a result with
the 2D Y axis pointing along the 3D Z axis.
This function is intended to handle the complexity of indexing
and is intended to be used to create all radially symmetric primitives,
eventually including cylinders, annular cylinders, capsules, cones,
and UV spheres.
Note that if your linestring is closed, it needs to be counterclockwise
if you would like face winding and normals facing outwards.
Parameters
-------------
linestring : (n, 2) float
Lines in 2D which will be revolved
angle : None or float
Angle in radians to revolve curve by
sections : None or int
Number of sections result should have
If not specified default is 32 per revolution
transform : None or (4, 4) float
Transform to apply to mesh after construction
**kwargs : dict
Passed to Trimesh constructor
Returns
--------------
revolved : Trimesh
Mesh representing revolved result
"""
linestring = np.asanyarray(linestring, dtype=np.float64)
# linestring must be ordered 2D points
if len(linestring.shape) != 2 or linestring.shape[1] != 2:
raise ValueError('linestring must be 2D!')
if angle is None:
# default to closing the revolution
angle = np.pi * 2
closed = True
else:
# check passed angle value
closed = angle >= ((np.pi * 2) - 1e-8)
if sections is None:
# default to 32 sections for a full revolution
sections = int(angle / (np.pi * 2) * 32)
# change to face count
sections += 1
# create equally spaced angles
theta = np.linspace(0, angle, sections)
# 2D points around the revolution
points = np.column_stack((np.cos(theta), np.sin(theta)))
# how many points per slice
per = len(linestring)
# use the 2D X component as radius
radius = linestring[:, 0]
# use the 2D Y component as the height along revolution
height = linestring[:, 1]
# a lot of tiling to get our 3D vertices
vertices = np.column_stack((
np.tile(points, (1, per)).reshape((-1, 2)) *
np.tile(radius, len(points)).reshape((-1, 1)),
np.tile(height, len(points))))
if closed:
# should be a duplicate set of vertices
assert np.allclose(vertices[:per],
vertices[-per:])
# chop off duplicate vertices
vertices = vertices[:-per]
if transform is not None:
# apply transform to vertices
vertices = tf.transform_points(vertices, transform)
# how many slices of the pie
slices = len(theta) - 1
# start with a quad for every segment
# this is a superset which will then be reduced
quad = np.array([0, per, 1,
1, per, per + 1])
# stack the faces for a single slice of the revolution
single = np.tile(quad, per).reshape((-1, 3))
# `per` is basically the stride of the vertices
single += np.tile(np.arange(per), (2, 1)).T.reshape((-1, 1))
# remove any zero-area triangle
# this covers many cases without having to think too much
single = single[triangles.area(vertices[single]) > tol.merge]
# how much to offset each slice
# note arange multiplied by vertex stride
# but tiled by the number of faces we actually have
offset = np.tile(np.arange(slices) * per,
(len(single), 1)).T.reshape((-1, 1))
# stack a single slice into N slices
stacked = np.tile(single.ravel(), slices).reshape((-1, 3))
if tol.strict:
# make sure we didn't screw up stacking operation
assert np.allclose(stacked.reshape((-1, single.shape[0], 3)) - single, 0)
# offset stacked and wrap vertices
faces = (stacked + offset) % len(vertices)
# create the mesh from our vertices and faces
mesh = Trimesh(vertices=vertices, faces=faces,
**kwargs)
# strict checks run only in unit tests
if (tol.strict and
np.allclose(radius[[0, -1]], 0.0) or
np.allclose(linestring[0], linestring[-1])):
# if revolved curve starts and ends with zero radius
# it should really be a valid volume, unless the sign
# reversed on the input linestring
assert mesh.is_volume
return mesh
def extrude_polygon(polygon,
height,
transform=None,
triangle_args=None,
**kwargs):
"""
Extrude a 2D shapely polygon into a 3D mesh
Parameters
----------
polygon : shapely.geometry.Polygon
2D geometry to extrude
height : float
Distance to extrude polygon along Z
triangle_args : str or None
Passed to triangle
**kwargs:
passed to Trimesh
Returns
----------
mesh : trimesh.Trimesh
Resulting extrusion as watertight body
"""
# create a triangulation from the polygon
vertices, faces = triangulate_polygon(
polygon, triangle_args=triangle_args, **kwargs)
# extrude that triangulation along Z
mesh = extrude_triangulation(vertices=vertices,
faces=faces,
height=height,
transform=transform,
**kwargs)
return mesh
def sweep_polygon(polygon,
path,
angles=None,
**kwargs):
"""
Extrude a 2D shapely polygon into a 3D mesh along an
arbitrary 3D path. Doesn't handle sharp curvature well.
Parameters
----------
polygon : shapely.geometry.Polygon
Profile to sweep along path
path : (n, 3) float
A path in 3D
angles : (n,) float
Optional rotation angle relative to prior vertex
at each vertex
Returns
-------
mesh : trimesh.Trimesh
Geometry of result
"""
path = np.asanyarray(path, dtype=np.float64)
if not util.is_shape(path, (-1, 3)):
raise ValueError('Path must be (n, 3)!')
# Extract 2D vertices and triangulation
verts_2d = np.array(polygon.exterior)[:-1]
base_verts_2d, faces_2d = triangulate_polygon(polygon, **kwargs)
n = len(verts_2d)
# Create basis for first planar polygon cap
x, y, z = util.generate_basis(path[0] - path[1])
tf_mat = np.ones((4, 4))
tf_mat[:3, :3] = np.c_[x, y, z]
tf_mat[:3, 3] = path[0]
# Compute 3D locations of those vertices
verts_3d = np.c_[verts_2d, np.zeros(n)]
verts_3d = tf.transform_points(verts_3d, tf_mat)
base_verts_3d = np.c_[base_verts_2d,
np.zeros(len(base_verts_2d))]
base_verts_3d = tf.transform_points(base_verts_3d,
tf_mat)
# keep matching sequence of vertices and 0- indexed faces
vertices = [base_verts_3d]
faces = [faces_2d]
# Compute plane normals for each turn --
# each turn induces a plane halfway between the two vectors
v1s = util.unitize(path[1:-1] - path[:-2])
v2s = util.unitize(path[1:-1] - path[2:])
norms = np.cross(np.cross(v1s, v2s), v1s + v2s)
norms[(norms == 0.0).all(1)] = v1s[(norms == 0.0).all(1)]
norms = util.unitize(norms)
final_v1 = util.unitize(path[-1] - path[-2])
norms = np.vstack((norms, final_v1))
v1s = np.vstack((v1s, final_v1))
# Create all side walls by projecting the 3d vertices into each plane
# in succession
for i in range(len(norms)):
verts_3d_prev = verts_3d
# Rotate if needed
if angles is not None:
tf_mat = tf.rotation_matrix(angles[i],
norms[i],
path[i])
verts_3d_prev = tf.transform_points(verts_3d_prev,
tf_mat)
# Project vertices onto plane in 3D
ds = np.einsum('ij,j->i', (path[i + 1] - verts_3d_prev), norms[i])
ds = ds / np.dot(v1s[i], norms[i])
verts_3d_new = np.einsum('i,j->ij', ds, v1s[i]) + verts_3d_prev
# Add to face and vertex lists
new_faces = [[i + n, (i + 1) % n, i] for i in range(n)]
new_faces.extend([[(i - 1) % n + n, i + n, i] for i in range(n)])
# save faces and vertices into a sequence
faces.append(np.array(new_faces))
vertices.append(np.vstack((verts_3d, verts_3d_new)))
verts_3d = verts_3d_new
# do the main stack operation from a sequence to (n,3) arrays
# doing one vstack provides a substantial speedup by
# avoiding a bunch of temporary allocations
vertices, faces = util.append_faces(vertices, faces)
# Create final cap
x, y, z = util.generate_basis(path[-1] - path[-2])
vecs = verts_3d - path[-1]
coords = np.c_[np.einsum('ij,j->i', vecs, x),
np.einsum('ij,j->i', vecs, y)]
base_verts_2d, faces_2d = triangulate_polygon(Polygon(coords))
base_verts_3d = (np.einsum('i,j->ij', base_verts_2d[:, 0], x) +
np.einsum('i,j->ij', base_verts_2d[:, 1], y)) + path[-1]
faces = np.vstack((faces, faces_2d + len(vertices)))
vertices = np.vstack((vertices, base_verts_3d))
return Trimesh(vertices, faces)
def extrude_triangulation(vertices,
faces,
height,
transform=None,
**kwargs):
"""
Extrude a 2D triangulation into a watertight mesh.
Parameters
----------
vertices : (n, 2) float
2D vertices
faces : (m, 3) int
Triangle indexes of vertices
height : float
Distance to extrude triangulation
**kwargs : dict
Passed to Trimesh constructor
Returns
---------
mesh : trimesh.Trimesh
Mesh created from extrusion
"""
vertices = np.asanyarray(vertices, dtype=np.float64)
height = float(height)
faces = np.asanyarray(faces, dtype=np.int64)
if not util.is_shape(vertices, (-1, 2)):
raise ValueError('Vertices must be (n,2)')
if not util.is_shape(faces, (-1, 3)):
raise ValueError('Faces must be (n,3)')
if np.abs(height) < tol.merge:
raise ValueError('Height must be nonzero!')
# make sure triangulation winding is pointing up
normal_test = triangles.normals(
[util.stack_3D(vertices[faces[0]])])[0]
normal_dot = np.dot(normal_test,
[0.0, 0.0, np.sign(height)])[0]
# make sure the triangulation is aligned with the sign of
# the height we've been passed
if normal_dot < 0.0:
faces = np.fliplr(faces)
# stack the (n,3) faces into (3*n, 2) edges
edges = faces_to_edges(faces)
edges_sorted = np.sort(edges, axis=1)
# edges which only occur once are on the boundary of the polygon
# since the triangulation may have subdivided the boundary of the
# shapely polygon, we need to find it again
edges_unique = grouping.group_rows(
edges_sorted, require_count=1)
# (n, 2, 2) set of line segments (positions, not references)
boundary = vertices[edges[edges_unique]]
# we are creating two vertical triangles for every 2D line segment
# on the boundary of the 2D triangulation
vertical = np.tile(boundary.reshape((-1, 2)), 2).reshape((-1, 2))
vertical = np.column_stack((vertical,
np.tile([0, height, 0, height],
len(boundary))))
vertical_faces = np.tile([3, 1, 2, 2, 1, 0],
(len(boundary), 1))
vertical_faces += np.arange(len(boundary)).reshape((-1, 1)) * 4
vertical_faces = vertical_faces.reshape((-1, 3))
# stack the (n,2) vertices with zeros to make them (n, 3)
vertices_3D = util.stack_3D(vertices)
# a sequence of zero- indexed faces, which will then be appended
# with offsets to create the final mesh
faces_seq = [faces[:, ::-1],
faces.copy(),
vertical_faces]
vertices_seq = [vertices_3D,
vertices_3D.copy() + [0.0, 0, height],
vertical]
# append sequences into flat nicely indexed arrays
vertices, faces = util.append_faces(vertices_seq, faces_seq)
if transform is not None:
# apply transform here to avoid later bookkeeping
vertices = tf.transform_points(
vertices, transform)
# if the transform flips the winding flip faces back
# so that the normals will be facing outwards
if tf.flips_winding(transform):
# fliplr makes arrays non-contiguous
faces = np.ascontiguousarray(np.fliplr(faces))
# create mesh object with passed keywords
mesh = Trimesh(vertices=vertices,
faces=faces,
**kwargs)
# only check in strict mode (unit tests)
if tol.strict:
assert mesh.volume > 0.0
return mesh
def triangulate_polygon(polygon,
triangle_args=None,
engine=None,
**kwargs):
"""
Given a shapely polygon create a triangulation using a
python interface to `triangle.c` or mapbox-earcut.
> pip install triangle
> pip install mapbox_earcut
Parameters
---------
polygon : Shapely.geometry.Polygon
Polygon object to be triangulated
triangle_args : str or None
Passed to triangle.triangulate i.e: 'p', 'pq30'
engine : None or str
Any value other than 'earcut' will use `triangle`
Returns
--------------
vertices : (n, 2) float
Points in space
faces : (n, 3) int
Index of vertices that make up triangles
"""
if engine == 'earcut':
from mapbox_earcut import triangulate_float64
# get vertices as sequence where exterior is the first value
vertices = [np.array(polygon.exterior)]
vertices.extend(np.array(i) for i in polygon.interiors)
# record the index from the length of each vertex array
rings = np.cumsum([len(v) for v in vertices])
# stack vertices into (n, 2) float array
vertices = np.vstack(vertices)
# run triangulation
faces = triangulate_float64(vertices, rings).reshape(
(-1, 3)).astype(np.int64).reshape((-1, 3))
return vertices, faces
# do the import here for soft requirement
from triangle import triangulate
# set default triangulation arguments if not specified
if triangle_args is None:
triangle_args = 'p'
# turn the polygon in to vertices, segments, and hole points
arg = _polygon_to_kwargs(polygon)
# run the triangulation
result = triangulate(arg, triangle_args)
return result['vertices'], result['triangles']
def _polygon_to_kwargs(polygon):
"""
Given a shapely polygon generate the data to pass to
the triangle mesh generator
Parameters
---------
polygon : Shapely.geometry.Polygon
Input geometry
Returns
--------
result : dict
Has keys: vertices, segments, holes
"""
if not polygon.is_valid:
raise ValueError('invalid shapely polygon passed!')
def round_trip(start, length):
"""
Given a start index and length, create a series of (n, 2) edges which
create a closed traversal.
Examples
---------
start, length = 0, 3
returns: [(0,1), (1,2), (2,0)]
"""
tiled = np.tile(np.arange(start, start + length).reshape((-1, 1)), 2)
tiled = tiled.reshape(-1)[1:-1].reshape((-1, 2))
tiled = np.vstack((tiled, [tiled[-1][-1], tiled[0][0]]))
return tiled
def add_boundary(boundary, start):
# coords is an (n, 2) ordered list of points on the polygon boundary
# the first and last points are the same, and there are no
# guarantees on points not being duplicated (which will
# later cause meshpy/triangle to shit a brick)
coords = np.array(boundary.coords)
# find indices points which occur only once, and sort them
# to maintain order
unique = np.sort(grouping.unique_rows(coords)[0])
cleaned = coords[unique]
vertices.append(cleaned)
facets.append(round_trip(start, len(cleaned)))
# holes require points inside the region of the hole, which we find
# by creating a polygon from the cleaned boundary region, and then
# using a representative point. You could do things like take the mean of
# the points, but this is more robust (to things like concavity), if
# slower.
test = Polygon(cleaned)
holes.append(np.array(test.representative_point().coords)[0])
return len(cleaned)
# sequence of (n,2) points in space
vertices = collections.deque()
# sequence of (n,2) indices of vertices
facets = collections.deque()
# list of (2) vertices in interior of hole regions
holes = collections.deque()
start = add_boundary(polygon.exterior, 0)
for interior in polygon.interiors:
try:
start += add_boundary(interior, start)
except BaseException:
log.warning('invalid interior, continuing')
continue
# create clean (n,2) float array of vertices
# and (m, 2) int array of facets
# by stacking the sequence of (p,2) arrays
vertices = np.vstack(vertices)
facets = np.vstack(facets).tolist()
# shapely polygons can include a Z component
# strip it out for the triangulation
if vertices.shape[1] == 3:
vertices = vertices[:, :2]
result = {'vertices': vertices,
'segments': facets}
# holes in meshpy lingo are a (h, 2) list of (x,y) points
# which are inside the region of the hole
# we added a hole for the exterior, which we slice away here
holes = np.array(holes)[1:]
if len(holes) > 0:
result['holes'] = holes
return result
def box(extents=None, transform=None, **kwargs):
"""
Return a cuboid.
Parameters
------------
extents : float, or (3,) float
Edge lengths
transform: (4, 4) float
Transformation matrix
**kwargs:
passed to Trimesh to create box
Returns
------------
geometry : trimesh.Trimesh
Mesh of a cuboid
"""
# vertices of the cube
vertices = np.array([0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1,
1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1],
order='C',
dtype=np.float64).reshape((-1, 3))
vertices -= 0.5
# resize cube based on passed extents
if extents is not None:
extents = np.asanyarray(extents, dtype=np.float64)
if extents.shape != (3,):
raise ValueError('Extents must be (3,)!')
vertices *= extents
else:
extents = np.asarray((1.0, 1.0, 1.0), dtype=np.float64)
# hardcoded face indices
faces = [1, 3, 0, 4, 1, 0, 0, 3, 2, 2, 4, 0, 1, 7, 3, 5, 1, 4,
5, 7, 1, 3, 7, 2, 6, 4, 2, 2, 7, 6, 6, 5, 4, 7, 5, 6]
faces = np.array(faces, order='C', dtype=np.int64).reshape((-1, 3))
face_normals = [-1, 0, 0, 0, -1, 0, -1, 0, 0, 0, 0, -1, 0, 0, 1, 0, -1,
0, 0, 0, 1, 0, 1, 0, 0, 0, -1, 0, 1, 0, 1, 0, 0, 1, 0, 0]
face_normals = np.asanyarray(face_normals,
order='C',
dtype=np.float64).reshape(-1, 3)
if 'metadata' not in kwargs:
kwargs['metadata'] = dict()
kwargs['metadata'].update(
{'shape': 'box',
'extents': extents})
box = Trimesh(vertices=vertices,
faces=faces,
face_normals=face_normals,
process=False,
**kwargs)
# do the transform here to preserve face normals
if transform is not None:
box.apply_transform(transform)
return box
def icosahedron():
"""
Create an icosahedron, a 20 faced polyhedron.
Returns
-------------
ico : trimesh.Trimesh
Icosahederon centered at the origin.
"""
t = (1.0 + 5.0**.5) / 2.0
vertices = [-1, t, 0, 1, t, 0, -1, -t, 0, 1, -t, 0, 0, -1, t, 0, 1, t,
0, -1, -t, 0, 1, -t, t, 0, -1, t, 0, 1, -t, 0, -1, -t, 0, 1]
faces = [0, 11, 5, 0, 5, 1, 0, 1, 7, 0, 7, 10, 0, 10, 11,
1, 5, 9, 5, 11, 4, 11, 10, 2, 10, 7, 6, 7, 1, 8,
3, 9, 4, 3, 4, 2, 3, 2, 6, 3, 6, 8, 3, 8, 9,
4, 9, 5, 2, 4, 11, 6, 2, 10, 8, 6, 7, 9, 8, 1]
# scale vertices so each vertex radius is 1.0
vertices = np.reshape(vertices, (-1, 3)) / np.sqrt(2.0 + t)
faces = np.reshape(faces, (-1, 3))
mesh = Trimesh(vertices=vertices,
faces=faces,
process=False)
return mesh
def icosphere(subdivisions=3, radius=1.0, color=None):
"""
Create an isophere centered at the origin.
Parameters
----------
subdivisions : int
How many times to subdivide the mesh.
Note that the number of faces will grow as function of
4 ** subdivisions, so you probably want to keep this under ~5
radius : float
Desired radius of sphere
color: (3,) float or uint8
Desired color of sphere
Returns
---------
ico : trimesh.Trimesh
Meshed sphere
"""
def refine_spherical():
vectors = ico.vertices
scalar = (vectors ** 2).sum(axis=1)**.5
unit = vectors / scalar.reshape((-1, 1))
offset = radius - scalar
ico.vertices += unit * offset.reshape((-1, 1))
ico = icosahedron()
ico._validate = False
for j in range(subdivisions):
ico = ico.subdivide()
refine_spherical()
ico._validate = True
if color is not None:
ico.visual.face_colors = color
ico.metadata.update({'shape': 'sphere',
'radius': radius})
return ico
def uv_sphere(radius=1.0,
count=[32, 32],
theta=None,
phi=None):
"""
Create a UV sphere (latitude + longitude) centered at the
origin. Roughly one order of magnitude faster than an
icosphere but slightly uglier.
Parameters
----------
radius : float
Radius of sphere
count : (2,) int
Number of latitude and longitude lines
theta : (n,) float
Optional theta angles in radians
phi : (n,) float
Optional phi angles in radians
Returns
----------
mesh : trimesh.Trimesh
Mesh of UV sphere with specified parameters
"""
count = np.array(count, dtype=np.int64)
count += np.mod(count, 2)
count[1] *= 2
# generate vertices on a sphere using spherical coordinates
if theta is None:
theta = np.linspace(0, np.pi, count[0])
if phi is None:
phi = np.linspace(0, np.pi * 2, count[1])[:-1]
spherical = np.dstack((np.tile(phi, (len(theta), 1)).T,
np.tile(theta, (len(phi), 1)))).reshape((-1, 2))
vertices = util.spherical_to_vector(spherical) * radius
# generate faces by creating a bunch of pie wedges
c = len(theta)
# a quad face as two triangles
pairs = np.array([[c, 0, 1],
[c + 1, c, 1]])
# increment both triangles in each quad face by the same offset
incrementor = np.tile(np.arange(c - 1), (2, 1)).T.reshape((-1, 1))
# create the faces for a single pie wedge of the sphere
strip = np.tile(pairs, (c - 1, 1))
strip += incrementor
# the first and last faces will be degenerate since the first
# and last vertex are identical in the two rows
strip = strip[1:-1]
# tile pie wedges into a sphere
faces = np.vstack([strip + (i * c) for i in range(len(phi))])
# poles are repeated in every strip, so a mask to merge them
mask = np.arange(len(vertices))
# the top pole are all the same vertex
mask[0::c] = 0
# the bottom pole are all the same vertex
mask[c - 1::c] = c - 1
# faces masked to remove the duplicated pole vertices
# and mod to wrap to fill in the last pie wedge
faces = mask[np.mod(faces, len(vertices))]
# we save a lot of time by not processing again
# since we did some bookkeeping mesh is watertight
mesh = Trimesh(vertices=vertices, faces=faces, process=False,
metadata={'shape': 'sphere',
'radius': radius})
return mesh
def capsule(height=1.0,
radius=1.0,
count=[32, 32]):
"""
Create a mesh of a capsule, or a cylinder with hemispheric ends.
Parameters
----------
height : float
Center to center distance of two spheres
radius : float
Radius of the cylinder and hemispheres
count : (2,) int
Number of sections on latitude and longitude
Returns
----------
capsule : trimesh.Trimesh
Capsule geometry with:
- cylinder axis is along Z
- one hemisphere is centered at the origin
- other hemisphere is centered along the Z axis at height
"""
height = float(height)
radius = float(radius)
count = np.array(count, dtype=np.int64)
count += np.mod(count, 2)
# create a theta where there is a double band around the equator
# so that we can offset the top and bottom of a sphere to
# get a nicely meshed capsule
theta = np.linspace(0, np.pi, count[0])
center = np.clip(np.arctan(tol.merge / radius),
tol.merge, np.inf)
offset = np.array([-center, center]) + (np.pi / 2)
theta = np.insert(theta,
int(len(theta) / 2),
offset)
capsule = uv_sphere(radius=radius,
count=count,
theta=theta)
top = capsule.vertices[:, 2] > tol.zero
capsule.vertices[top] += [0, 0, height]
capsule.metadata.update({'shape': 'capsule',
'height': height,
'radius': radius})
return capsule
def cone(radius,
height,
sections=None,
transform=None,
**kwargs):
"""
Create a mesh of a cone along Z centered at the origin.
Parameters
----------
radius : float
The radius of the cylinder
height : float
The height of the cylinder
sections : int or None
How many pie wedges per revolution
transform : (4, 4) float or None
Transform to apply after creation
**kwargs : dict
Passed to Trimesh constructor
Returns
----------
cone: trimesh.Trimesh
Resulting mesh of a cone
"""
# create the 2D outline of a cone
linestring = [[0, 0],
[radius, 0],
[0, height]]
# revolve the profile to create a cone
if 'metadata' not in kwargs:
kwargs['metadata'] = dict()
kwargs['metadata'].update(
{'shape': 'cone',
'radius': radius,
'height': height})
cone = revolve(linestring=linestring,
sections=sections,
transform=transform,
**kwargs)
return cone
def cylinder(radius,
height=None,
sections=None,
segment=None,
transform=None,
**kwargs):
"""
Create a mesh of a cylinder along Z centered at the origin.
Parameters
----------
radius : float
The radius of the cylinder
height : float or None
The height of the cylinder
sections : int or None
How many pie wedges should the cylinder have
segment : (2, 3) float
Endpoints of axis, overrides transform and height
transform : (4, 4) float
Transform to apply
**kwargs:
passed to Trimesh to create cylinder
Returns
----------
cylinder: trimesh.Trimesh
Resulting mesh of a cylinder
"""
if segment is not None:
# override transform and height with the segment
transform, height = _segment_to_cylinder(segment=segment)
if height is None:
raise ValueError('either `height` or `segment` must be passed!')
half = abs(float(height)) / 2.0
# create a profile to revolve
linestring = [[0, -half],
[radius, -half],
[radius, half],
[0, half]]
if 'metadata' not in kwargs:
kwargs['metadata'] = dict()
kwargs['metadata'].update(
{'shape': 'cylinder',
'height': height,
'radius': radius})
# generate cylinder through simple revolution
return revolve(linestring=linestring,
sections=sections,
transform=transform,
**kwargs)
def annulus(r_min,
r_max,
height=None,
sections=None,
transform=None,
segment=None,
**kwargs):
"""
Create a mesh of an annular cylinder along Z centered at the origin.
Parameters
----------
r_min : float
The inner radius of the annular cylinder
r_max : float
The outer radius of the annular cylinder
height : float
The height of the annular cylinder
sections : int or None
How many pie wedges should the annular cylinder have
transform : (4, 4) float or None
Transform to apply to move result from the origin
segment : None or (2, 3) float
Override transform and height with a line segment
**kwargs:
passed to Trimesh to create annulus
Returns
----------
annulus : trimesh.Trimesh
Mesh of annular cylinder
"""
if segment is not None:
# override transform and height with the segment if passed
transform, height = _segment_to_cylinder(segment=segment)
if height is None:
raise ValueError('either `height` or `segment` must be passed!')
r_min = abs(float(r_min))
# if center radius is zero this is a cylinder
if r_min < tol.merge:
return cylinder(radius=r_max,
height=height,
sections=sections,
transform=transform)
r_max = abs(float(r_max))
# we're going to center at XY plane so take half the height
half = abs(float(height)) / 2.0
# create counter-clockwise rectangle
linestring = [[r_min, -half],
[r_max, -half],
[r_max, half],
[r_min, half],
[r_min, -half]]
if 'metadata' not in kwargs:
kwargs['metadata'] = dict()
kwargs['metadata'].update(
{'shape': 'annulus',
'r_min': r_min,
'r_max': r_max,
'height': height})
# revolve the curve
annulus = revolve(linestring=linestring,
sections=sections,
transform=transform,
**kwargs)
return annulus
def _segment_to_cylinder(segment):
"""
Convert a line segment to a transform and height for a cylinder
or cylinder-like primitive.
Parameters
-----------
segment : (2, 3) float
3D line segment in space
Returns
-----------
transform : (4, 4) float
Matrix to move a Z-extruded origin cylinder to segment
height : float
The height of the cylinder needed
"""
segment = np.asanyarray(segment, dtype=np.float64)
if segment.shape != (2, 3):
raise ValueError('segment must be 2 3D points!')
vector = segment[1] - segment[0]
# override height with segment length
height = np.linalg.norm(vector)
# point in middle of line
midpoint = segment[0] + (vector * 0.5)
# align Z with our desired direction
rotation = align_vectors([0, 0, 1], vector)
# translate to midpoint of segment
translation = tf.translation_matrix(midpoint)
# compound the rotation and translation
transform = np.dot(translation, rotation)
return transform, height
def random_soup(face_count=100):
"""
Return random triangles as a Trimesh
Parameters
-----------
face_count : int
Number of faces desired in mesh
Returns
-----------
soup : trimesh.Trimesh
Geometry with face_count random faces
"""
vertices = np.random.random((face_count * 3, 3)) - 0.5
faces = np.arange(face_count * 3).reshape((-1, 3))
soup = Trimesh(vertices=vertices, faces=faces)
return soup
def axis(origin_size=0.04,
transform=None,
origin_color=None,
axis_radius=None,
axis_length=None):
"""
Return an XYZ axis marker as a Trimesh, which represents position
and orientation. If you set the origin size the other parameters
will be set relative to it.
Parameters
----------
transform : (4, 4) float
Transformation matrix
origin_size : float
Radius of sphere that represents the origin
origin_color : (3,) float or int, uint8 or float
Color of the origin
axis_radius : float
Radius of cylinder that represents x, y, z axis
axis_length: float
Length of cylinder that represents x, y, z axis
Returns
-------
marker : trimesh.Trimesh
Mesh geometry of axis indicators
"""
# the size of the ball representing the origin
origin_size = float(origin_size)
# set the transform and use origin-relative
# sized for other parameters if not specified
if transform is None:
transform = np.eye(4)
if origin_color is None:
origin_color = [255, 255, 255, 255]
if axis_radius is None:
axis_radius = origin_size / 5.0
if axis_length is None:
axis_length = origin_size * 10.0
# generate a ball for the origin
axis_origin = uv_sphere(radius=origin_size,
count=[10, 10])
axis_origin.apply_transform(transform)
# apply color to the origin ball
axis_origin.visual.face_colors = origin_color
# create the cylinder for the z-axis
translation = tf.translation_matrix(
[0, 0, axis_length / 2])
z_axis = cylinder(
radius=axis_radius,
height=axis_length,
transform=transform.dot(translation))
# XYZ->RGB, Z is blue
z_axis.visual.face_colors = [0, 0, 255]
# create the cylinder for the y-axis
translation = tf.translation_matrix(
[0, 0, axis_length / 2])
rotation = tf.rotation_matrix(np.radians(-90),
[1, 0, 0])
y_axis = cylinder(
radius=axis_radius,
height=axis_length,
transform=transform.dot(rotation).dot(translation))
# XYZ->RGB, Y is green
y_axis.visual.face_colors = [0, 255, 0]
# create the cylinder for the x-axis
translation = tf.translation_matrix(
[0, 0, axis_length / 2])
rotation = tf.rotation_matrix(np.radians(90),
[0, 1, 0])
x_axis = cylinder(
radius=axis_radius,
height=axis_length,
transform=transform.dot(rotation).dot(translation))
# XYZ->RGB, X is red
x_axis.visual.face_colors = [255, 0, 0]
# append the sphere and three cylinders
marker = util.concatenate([axis_origin,
x_axis,
y_axis,
z_axis])
return marker
def camera_marker(camera,
marker_height=0.4,
origin_size=None):
"""
Create a visual marker for a camera object, including an axis and FOV.
Parameters
---------------
camera : trimesh.scene.Camera
Camera object with FOV and transform defined
marker_height : float
How far along the camera Z should FOV indicators be
origin_size : float
Sphere radius of the origin (default: marker_height / 10.0)
Returns
------------
meshes : list
Contains Trimesh and Path3D objects which can be visualized
"""
# create sane origin size from marker height
if origin_size is None:
origin_size = marker_height / 10.0
# append the visualizations to an array
meshes = [axis(origin_size=origin_size)]
try:
# path is a soft dependency
from .path.exchange.load import load_path
except ImportError:
# they probably don't have shapely installed
log.warning('unable to create FOV visualization!',
exc_info=True)
return meshes
# calculate vertices from camera FOV angles
x = marker_height * np.tan(np.deg2rad(camera.fov[0]) / 2.0)
y = marker_height * np.tan(np.deg2rad(camera.fov[1]) / 2.0)
z = marker_height
# combine the points into the vertices of an FOV visualization
points = np.array(
[(0, 0, 0),
(-x, -y, z),
(x, -y, z),
(x, y, z),
(-x, y, z)],
dtype=float)
# create line segments for the FOV visualization
# a segment from the origin to each bound of the FOV
segments = np.column_stack(
(np.zeros_like(points), points)).reshape(
(-1, 3))
# add a loop for the outside of the FOV then reshape
# the whole thing into multiple line segments
segments = np.vstack((segments,
points[[1, 2,
2, 3,
3, 4,
4, 1]])).reshape((-1, 2, 3))
# add a single Path3D object for all line segments
meshes.append(load_path(segments))
return meshes
def truncated_prisms(tris, origin=None, normal=None):
"""
Return a mesh consisting of multiple watertight prisms below
a list of triangles, truncated by a specified plane.
Parameters
-------------
triangles : (n, 3, 3) float
Triangles in space
origin : None or (3,) float
Origin of truncation plane
normal : None or (3,) float
Unit normal vector of truncation plane
Returns
-----------
mesh : trimesh.Trimesh
Triangular mesh
"""
if origin is None:
transform = np.eye(4)
else:
transform = plane_transform(origin=origin, normal=normal)
# transform the triangles to the specified plane
transformed = tf.transform_points(
tris.reshape((-1, 3)), transform).reshape((-1, 9))
# stack triangles such that every other one is repeated
vs = np.column_stack((transformed, transformed)).reshape((-1, 3, 3))
# set the Z value of the second triangle to zero
vs[1::2, :, 2] = 0
# reshape triangles to a flat array of points and transform back to original frame
vertices = tf.transform_points(
vs.reshape((-1, 3)), matrix=np.linalg.inv(transform))
# face indexes for a *single* truncated triangular prism
f = np.array([[2, 1, 0],
[3, 4, 5],
[0, 1, 4],
[1, 2, 5],
[2, 0, 3],
[4, 3, 0],
[5, 4, 1],
[3, 5, 2]])
# find the projection of each triangle with the normal vector
cross = np.dot([0, 0, 1], triangles.cross(transformed.reshape((-1, 3, 3))).T)
# stack faces into one prism per triangle
f_seq = np.tile(f, (len(transformed), 1)).reshape((-1, len(f), 3))
# if the normal of the triangle was positive flip the winding
f_seq[cross > 0] = np.fliplr(f)
# offset stacked faces to create correct indices
faces = (f_seq + (np.arange(len(f_seq)) * 6).reshape((-1, 1, 1))).reshape((-1, 3))
# create a mesh from the data
mesh = Trimesh(vertices=vertices, faces=faces, process=False)
return mesh
```
#### File: trimesh/path/traversal.py
```python
import copy
import numpy as np
from .util import is_ccw
from .. import util
from .. import grouping
from .. import constants
try:
import networkx as nx
except BaseException as E:
# create a dummy module which will raise the ImportError
# or other exception only when someone tries to use networkx
from ..exceptions import ExceptionModule
nx = ExceptionModule(E)
def vertex_graph(entities):
"""
Given a set of entity objects generate a networkx.Graph
that represents their vertex nodes.
Parameters
--------------
entities : list
Objects with 'closed' and 'nodes' attributes
Returns
-------------
graph : networkx.Graph
Graph where node indexes represent vertices
closed : (n,) int
Indexes of entities which are 'closed'
"""
graph = nx.Graph()
closed = []
for index, entity in enumerate(entities):
if entity.closed:
closed.append(index)
else:
graph.add_edges_from(entity.nodes,
entity_index=index)
return graph, np.array(closed)
def vertex_to_entity_path(vertex_path,
graph,
entities,
vertices=None):
"""
Convert a path of vertex indices to a path of entity indices.
Parameters
----------
vertex_path : (n,) int
Ordered list of vertex indices representing a path
graph : nx.Graph
Vertex connectivity
entities : (m,) list
Entity objects
vertices : (p, dimension) float
Vertex points in space
Returns
----------
entity_path : (q,) int
Entity indices which make up vertex_path
"""
def edge_direction(a, b):
"""
Given two edges, figure out if the first needs to be
reversed to keep the progression forward.
[1,0] [1,2] -1 1
[1,0] [2,1] -1 -1
[0,1] [1,2] 1 1
[0,1] [2,1] 1 -1
Parameters
------------
a : (2,) int
b : (2,) int
Returns
------------
a_direction : int
b_direction : int
"""
if a[0] == b[0]:
return -1, 1
elif a[0] == b[1]:
return -1, -1
elif a[1] == b[0]:
return 1, 1
elif a[1] == b[1]:
return 1, -1
else:
constants.log.debug(
'edges not connected!\n'
'vertex path %s\n'
'entity path: %s\n'
'entity[a]: %s\n'
'entity[b]: %s',
vertex_path,
entity_path,
entities[ea].points,
entities[eb].points)
return None, None
if vertices is None or vertices.shape[1] != 2:
ccw_direction = 1
else:
ccw_check = is_ccw(vertices[np.append(vertex_path,
vertex_path[0])])
ccw_direction = (ccw_check * 2) - 1
# make sure vertex path is correct type
vertex_path = np.asanyarray(vertex_path, dtype=np.int64)
# we will be saving entity indexes
entity_path = []
# loop through pairs of vertices
for i in np.arange(len(vertex_path) + 1):
# get two wrapped vertex positions
vertex_path_pos = np.mod(np.arange(2) + i, len(vertex_path))
vertex_index = vertex_path[vertex_path_pos]
entity_index = graph.get_edge_data(*vertex_index)['entity_index']
entity_path.append(entity_index)
# remove duplicate entities and order CCW
entity_path = grouping.unique_ordered(entity_path)[::ccw_direction]
# check to make sure there is more than one entity
if len(entity_path) == 1:
# apply CCW reverse in place if necessary
if ccw_direction < 0:
index = entity_path[0]
entities[index].reverse()
return entity_path
# traverse the entity path and reverse entities in place to
# align with this path ordering
round_trip = np.append(entity_path, entity_path[0])
round_trip = zip(round_trip[:-1], round_trip[1:])
for ea, eb in round_trip:
da, db = edge_direction(entities[ea].end_points,
entities[eb].end_points)
if da is not None:
entities[ea].reverse(direction=da)
entities[eb].reverse(direction=db)
entity_path = np.array(entity_path)
return entity_path
def closed_paths(entities, vertices):
"""
Paths are lists of entity indices.
We first generate vertex paths using graph cycle algorithms,
and then convert them to entity paths.
This will also change the ordering of entity.points in place
so a path may be traversed without having to reverse the entity.
Parameters
-------------
entities : (n,) entity objects
Entity objects
vertices : (m, dimension) float
Vertex points in space
Returns
-------------
entity_paths : sequence of (n,) int
Ordered traversals of entities
"""
# get a networkx graph of entities
graph, closed = vertex_graph(entities)
# add entities that are closed as single- entity paths
entity_paths = np.reshape(closed, (-1, 1)).tolist()
# look for cycles in the graph, or closed loops
vertex_paths = nx.cycles.cycle_basis(graph)
# loop through every vertex cycle
for vertex_path in vertex_paths:
# a path has no length if it has fewer than 2 vertices
if len(vertex_path) < 2:
continue
# convert vertex indices to entity indices
entity_paths.append(
vertex_to_entity_path(vertex_path,
graph,
entities,
vertices))
return entity_paths
def discretize_path(entities, vertices, path, scale=1.0):
"""
Turn a list of entity indices into a path of connected points.
Parameters
-----------
entities : (j,) entity objects
Objects like 'Line', 'Arc', etc.
vertices: (n, dimension) float
Vertex points in space.
path : (m,) int
Indexes of entities
scale : float
Overall scale of drawing used for
numeric tolerances in certain cases
Returns
-----------
discrete : (p, dimension) float
Connected points in space that lie on the
path and can be connected with line segments.
"""
# make sure vertices are numpy array
vertices = np.asanyarray(vertices)
path_len = len(path)
if path_len == 0:
raise ValueError('Cannot discretize empty path!')
if path_len == 1:
# case where we only have one entity
discrete = np.asanyarray(entities[path[0]].discrete(
vertices,
scale=scale))
else:
# run through path appending each entity
discrete = []
for i, entity_id in enumerate(path):
# the current (n, dimension) discrete curve of an entity
current = entities[entity_id].discrete(vertices, scale=scale)
# check if we are on the final entity
if i >= (path_len - 1):
# if we are on the last entity include the last point
discrete.append(current)
else:
# slice off the last point so we don't get duplicate
# points from the end of one entity and the start of another
discrete.append(current[:-1])
# stack all curves to one nice (n, dimension) curve
discrete = np.vstack(discrete)
# make sure 2D curves are are counterclockwise
if vertices.shape[1] == 2 and not is_ccw(discrete):
# reversing will make array non c- contiguous
discrete = np.ascontiguousarray(discrete[::-1])
return discrete
class PathSample:
def __init__(self, points):
# make sure input array is numpy
self._points = np.array(points)
# find the direction of each segment
self._vectors = np.diff(self._points, axis=0)
# find the length of each segment
self._norms = util.row_norm(self._vectors)
# unit vectors for each segment
nonzero = self._norms > constants.tol_path.zero
self._unit_vec = self._vectors.copy()
self._unit_vec[nonzero] /= self._norms[nonzero].reshape((-1, 1))
# total distance in the path
self.length = self._norms.sum()
# cumulative sum of section length
# note that this is sorted
self._cum_norm = np.cumsum(self._norms)
def sample(self, distances):
# return the indices in cum_norm that each sample would
# need to be inserted at to maintain the sorted property
positions = np.searchsorted(self._cum_norm, distances)
positions = np.clip(positions, 0, len(self._unit_vec) - 1)
offsets = np.append(0, self._cum_norm)[positions]
# the distance past the reference vertex we need to travel
projection = distances - offsets
# find out which dirction we need to project
direction = self._unit_vec[positions]
# find out which vertex we're offset from
origin = self._points[positions]
# just the parametric equation for a line
resampled = origin + (direction * projection.reshape((-1, 1)))
return resampled
def truncate(self, distance):
"""
Return a truncated version of the path.
Only one vertex (at the endpoint) will be added.
"""
position = np.searchsorted(self._cum_norm, distance)
offset = distance - self._cum_norm[position - 1]
if offset < constants.tol_path.merge:
truncated = self._points[:position + 1]
else:
vector = util.unitize(np.diff(
self._points[np.arange(2) + position],
axis=0).reshape(-1))
vector *= offset
endpoint = self._points[position] + vector
truncated = np.vstack((self._points[:position + 1],
endpoint))
assert (util.row_norm(np.diff(
truncated, axis=0)).sum() -
distance) < constants.tol_path.merge
return truncated
def resample_path(points,
count=None,
step=None,
step_round=True):
"""
Given a path along (n,d) points, resample them such that the
distance traversed along the path is constant in between each
of the resampled points. Note that this can produce clipping at
corners, as the original vertices are NOT guaranteed to be in the
new, resampled path.
ONLY ONE of count or step can be specified
Result can be uniformly distributed (np.linspace) by specifying count
Result can have a specific distance (np.arange) by specifying step
Parameters
----------
points: (n, d) float
Points in space
count : int,
Number of points to sample evenly (aka np.linspace)
step : float
Distance each step should take along the path (aka np.arange)
Returns
----------
resampled : (j,d) float
Points on the path
"""
points = np.array(points, dtype=np.float64)
# generate samples along the perimeter from kwarg count or step
if (count is not None) and (step is not None):
raise ValueError('Only step OR count can be specified')
if (count is None) and (step is None):
raise ValueError('Either step or count must be specified')
sampler = PathSample(points)
if step is not None and step_round:
if step >= sampler.length:
return points[[0, -1]]
count = int(np.ceil(sampler.length / step))
if count is not None:
samples = np.linspace(0, sampler.length, count)
elif step is not None:
samples = np.arange(0, sampler.length, step)
resampled = sampler.sample(samples)
check = util.row_norm(points[[0, -1]] - resampled[[0, -1]])
assert check[0] < constants.tol_path.merge
if count is not None:
assert check[1] < constants.tol_path.merge
return resampled
def split(path):
"""
Split a Path2D into multiple Path2D objects where each
one has exactly one root curve.
Parameters
--------------
path : trimesh.path.Path2D
Input geometry
Returns
-------------
split : list of trimesh.path.Path2D
Original geometry as separate paths
"""
# avoid a circular import by referencing class of path
Path2D = type(path)
# save the results of the split to an array
split = []
# get objects from cache to avoid a bajillion
# cache checks inside the tight loop
paths = path.paths
discrete = path.discrete
polygons_closed = path.polygons_closed
enclosure_directed = path.enclosure_directed
for root_index, root in enumerate(path.root):
# get a list of the root curve's children
connected = list(enclosure_directed[root].keys())
# add the root node to the list
connected.append(root)
# store new paths and entities
new_paths = []
new_entities = []
for index in connected:
nodes = paths[index]
# add a path which is just sequential indexes
new_paths.append(np.arange(len(nodes)) +
len(new_entities))
# save the entity indexes
new_entities.extend(nodes)
# store the root index from the original drawing
metadata = copy.deepcopy(path.metadata)
metadata['split_2D'] = root_index
# we made the root path the last index of connected
new_root = np.array([len(new_paths) - 1])
# prevents the copying from nuking our cache
with path._cache:
# create the Path2D
split.append(Path2D(
entities=copy.deepcopy(path.entities[new_entities]),
vertices=copy.deepcopy(path.vertices),
metadata=metadata))
# add back expensive things to the cache
split[-1]._cache.update(
{'paths': new_paths,
'polygons_closed': polygons_closed[connected],
'discrete': [discrete[c] for c in connected],
'root': new_root})
# set the cache ID
split[-1]._cache.id_set()
return np.array(split)
```
#### File: trimesh/scene/transforms.py
```python
import uuid
import copy
import numpy as np
import collections
from .. import util
from .. import caching
from .. import transformations
class SceneGraph(object):
"""
Hold data about positions and instances of geometry
in a scene. This includes a forest (i.e. multi-root tree)
of transforms and information on which node is the base
frame, and which geometries are affiliated with which
nodes.
"""
def __init__(self, base_frame='world'):
"""
Create a scene graph, holding homogenous transformation
matrices and instance information about geometry.
Parameters
-----------
base_frame : any
The root node transforms will be positioned from.
"""
# a graph structure, subclass of networkx DiGraph
self.transforms = EnforcedForest()
# hashable, the base or root frame
self.base_frame = base_frame
# cache transformation matrices keyed with tuples
self._cache = caching.Cache(self.modified)
def update(self, frame_to, frame_from=None, **kwargs):
"""
Update a transform in the tree.
Parameters
------------
frame_from : hashable object
Usually a string (eg 'world').
If left as None it will be set to self.base_frame
frame_to : hashable object
Usually a string (eg 'mesh_0')
matrix : (4,4) float
Homogeneous transformation matrix
quaternion : (4,) float
Quaternion ordered [w, x, y, z]
axis : (3,) float
Axis of rotation
angle : float
Angle of rotation, in radians
translation : (3,) float
Distance to translate
geometry : hashable
Geometry object name, e.g. 'mesh_0'
extras: dictionary
Optional metadata attached to the new frame
(exports to glTF node 'extras').
"""
# if no frame specified, use base frame
if frame_from is None:
frame_from = self.base_frame
# pass through
attr = {k: v for k, v in kwargs.items()
if k in {'geometry', 'extras'}}
# convert various kwargs to a single matrix
attr['matrix'] = kwargs_to_matrix(**kwargs)
# add the edges for the transforms
# will return if it changed anything
if self.transforms.add_edge(
frame_from, frame_to, **attr):
# clear all cached matrices by setting
# modified hash to a random string
self._modified = str(uuid.uuid4())
# set the node attribute with the geometry information
if 'geometry' in kwargs:
self.transforms.node_data[
frame_to]['geometry'] = kwargs['geometry']
def get(self, frame_to, frame_from=None):
"""
Get the transform from one frame to another.
Parameters
------------
frame_to : hashable
Node name, usually a string (eg 'mesh_0')
frame_from : hashable
Node name, usually a string (eg 'world').
If None it will be set to self.base_frame
Returns
----------
transform : (4, 4) float
Homogeneous transformation matrix
Raises
-----------
ValueError
If the frames aren't connected.
"""
# use base frame if not specified
if frame_from is None:
frame_from = self.base_frame
# look up transform to see if we have it already
key = (frame_from, frame_to)
if key in self._cache:
return self._cache[key]
# get the geometry at the final node if any
geometry = self.transforms.node_data[
frame_to].get('geometry')
# get a local reference to edge data
edge_data = self.transforms.edge_data
if frame_from == frame_to:
# if we're going from ourself return identity
matrix = np.eye(4)
elif key in edge_data:
# if the path is just an edge return early
matrix = edge_data[key]['matrix']
else:
# we have a 3+ node path
# get the path from the forest always going from
# parent -> child -> child
path = self.transforms.shortest_path(
frame_from, frame_to)
# collect a homogenous transform for each edge
matrices = [edge_data[(u, v)]['matrix'] for u, v in
zip(path[:-1], path[1:])]
# multiply matrices into single transform
matrix = util.multi_dot(matrices)
# store the result
self._cache[key] = (matrix, geometry)
return matrix, geometry
def modified(self):
"""
Return the last time stamp data was modified.
"""
if hasattr(self, '_modified'):
return self._modified
return '0.0'
def copy(self):
"""
Return a copy of the current TransformForest.
Returns
------------
copied : TransformForest
Copy of current object.
"""
return copy.deepcopy(self)
def to_flattened(self):
"""
Export the current transform graph with all
transforms baked into world->instance.
Returns
---------
flat : dict
Keyed {node : {transform, geometry}
"""
flat = {}
base_frame = self.base_frame
for node in self.nodes:
if node == base_frame:
continue
# get the matrix and geometry name
matrix, geometry = self.get(
frame_to=node, frame_from=base_frame)
# store matrix as list rather than numpy array
flat[node] = {'transform': matrix.tolist(),
'geometry': geometry}
return flat
def to_gltf(self, scene, mesh_index=None):
"""
Export a transforms as the 'nodes' section of the
GLTF header dict.
Parameters
------------
scene : trimesh.Scene
Scene with geometry.
mesh_index : dict or None
Mapping { key in scene.geometry : int }
Returns
--------
gltf : dict
With 'nodes' referencing a list of dicts
"""
if mesh_index is None:
# geometry is an OrderedDict
# map mesh name to index: {geometry key : index}
mesh_index = {name: i for i, name
in enumerate(scene.geometry.keys())}
# get graph information into local scope before loop
graph = self.transforms
# get the stored node data
node_data = graph.node_data
edge_data = graph.edge_data
base_frame = self.base_frame
# list of dict, in gltf format
# start with base frame as first node index
result = [{'name': base_frame}]
# {node name : node index in gltf}
lookup = {base_frame: 0}
# collect the nodes in order
for node in node_data.keys():
if node == base_frame:
continue
# assign the index to the node-name lookup
lookup[node] = len(result)
# populate a result at the correct index
result.append({'name': node})
# get generated properties outside of loop
# does the scene have a defined camera to export
has_camera = scene.has_camera
children = graph.children
# then iterate through to collect data
for info in result:
# name of the scene node
node = info['name']
# get the original node names for children
childs = children.get(node, [])
if len(childs) > 0:
info['children'] = [lookup[k] for k in childs]
# if we have a mesh store by index
if 'geometry' in node_data[node]:
mesh_key = node_data[node]['geometry']
if mesh_key in mesh_index:
info['mesh'] = mesh_index[mesh_key]
# check to see if we have camera node
if has_camera and node == scene.camera.name:
info['camera'] = 0
if node != base_frame:
parent = graph.parents[node]
# get the matrix from this edge
matrix = edge_data[(parent, node)]['matrix']
# only include if it's not an identify matrix
if np.abs(matrix - np.eye(4)).max() > 1e-5:
info['matrix'] = matrix.T.reshape(-1).tolist()
# if an extra was stored on this edge
extras = edge_data[(parent, node)].get('extras')
if extras:
# convert any numpy arrays to lists
extras.update(
{k: v.tolist() for k, v in extras.items()
if hasattr(v, 'tolist')})
info['extras'] = extras
return {'nodes': result}
def to_edgelist(self):
"""
Export the current transforms as a list of
edge tuples, with each tuple having the format:
(node_a, node_b, {metadata})
Returns
---------
edgelist : (n,) list
Of edge tuples
"""
# save cleaned edges
export = []
# loop through (node, node, edge attributes)
for edge, attr in self.transforms.edge_data.items():
# node indexes from edge
a, b = edge
# geometry is a node property but save it to the
# edge so we don't need two dictionaries
b_attr = self.transforms.node_data[b]
# make sure we're not stomping on original
attr_new = attr.copy()
# apply node geometry to edge attributes
if 'geometry' in b_attr:
attr_new['geometry'] = b_attr['geometry']
# convert any numpy arrays to regular lists
attr_new.update(
{k: v.tolist() for k, v in attr_new.items()
if hasattr(v, 'tolist')})
export.append((a, b, attr_new))
return export
def from_edgelist(self, edges, strict=True):
"""
Load transform data from an edge list into the current
scene graph.
Parameters
-------------
edgelist : (n,) tuples
Keyed (node_a, node_b, {key: value})
strict : bool
If True raise a ValueError when a
malformed edge is passed in a tuple.
"""
# loop through each edge
for edge in edges:
# edge contains attributes
if len(edge) == 3:
self.update(edge[1], edge[0], **edge[2])
# edge just contains nodes
elif len(edge) == 2:
self.update(edge[1], edge[0])
# edge is broken
elif strict:
raise ValueError(
'edge incorrect shape: %s', str(edge))
def load(self, edgelist):
"""
Load transform data from an edge list into the current
scene graph.
Parameters
-------------
edgelist : (n,) tuples
Structured (node_a, node_b, {key: value})
"""
self.from_edgelist(edgelist, strict=True)
@caching.cache_decorator
def nodes(self):
"""
A list of every node in the graph.
Returns
-------------
nodes : (n,) array
All node names.
"""
return self.transforms.nodes
@caching.cache_decorator
def nodes_geometry(self):
"""
The nodes in the scene graph with geometry attached.
Returns
------------
nodes_geometry : (m,) array
Node names which have geometry associated
"""
return [n for n, attr in
self.transforms.node_data.items()
if 'geometry' in attr]
@caching.cache_decorator
def geometry_nodes(self):
"""
Which nodes have this geometry? Inverse
of `nodes_geometry`.
Returns
------------
geometry_nodes : dict
Keyed {geometry_name : node name}
"""
res = collections.defaultdict(list)
for node, attr in self.transforms.node_data.items():
if 'geometry' in attr:
res[attr['geometry']].append(node)
return res
def remove_geometries(self, geometries):
"""
Remove the reference for specified geometries
from nodes without deleting the node.
Parameters
------------
geometries : list or str
Name of scene.geometry to dereference.
"""
# make sure we have a set of geometries to remove
if util.is_string(geometries):
geometries = [geometries]
geometries = set(geometries)
# remove the geometry reference from the node without deleting nodes
# this lets us keep our cached paths, and will not screw up children
for node, attrib in self.transforms.node_data.items():
if 'geometry' in attrib and attrib['geometry'] in geometries:
attrib.pop('geometry')
# it would be safer to just run _cache.clear
# but the only property using the geometry should be
# nodes_geometry: if this becomes not true change this to clear!
self._cache.cache.pop('nodes_geometry', None)
def __contains__(self, key):
return key in self.transforms.node_data
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, value):
value = np.asanyarray(value)
if value.shape != (4, 4):
raise ValueError('Matrix must be specified!')
return self.update(key, matrix=value)
def clear(self):
self.transforms = EnforcedForest()
self._cache.clear()
class EnforcedForest(object):
"""
A simple forest graph data structure: every node
is allowed to have exactly one parent. This makes
traversal and implementation much simpler than a
full graph data type; by storing only one parent
reference, it enforces the structure for "free."
"""
def __init__(self, **kwargs):
# since every node can have only one parent
# this data structure transparently enforces
# the forest data structure without checks
# a dict {child : parent}
self.parents = {}
# store data for a particular edge keyed by tuple
# {(u, v) : data }
self.edge_data = collections.defaultdict(dict)
# {u: data}
self.node_data = collections.defaultdict(dict)
# if multiple calls are made for the same path
# but the connectivity hasn't changed return cached
self._cache = {}
def add_edge(self, u, v, **kwargs):
"""
Add an edge to the forest cleanly.
Parameters
-----------
u : any
Hashable node key.
v : any
Hashable node key.
kwargs : dict
Stored as (u, v) edge data.
Returns
--------
changed : bool
Return if this operation changed anything.
"""
# topology has changed so clear cache
if (u, v) not in self.edge_data:
self._cache = {}
else:
# check to see if matrix and geometry are identical
edge = self.edge_data[(u, v)]
if (np.allclose(kwargs.get('matrix', np.eye(4)),
edge.get('matrix', np.eye(4)))
and (edge.get('geometry') ==
kwargs.get('geometry'))):
return False
# store a parent reference for traversal
self.parents[v] = u
# store kwargs for edge data keyed with tuple
self.edge_data[(u, v)] = kwargs
# set empty node data
self.node_data[u].update({})
if 'geometry' in kwargs:
self.node_data[v].update(
{'geometry': kwargs['geometry']})
else:
self.node_data[v].update({})
return True
def shortest_path(self, u, v):
"""
Find the shortest path beween `u` and `v`.
Note that it will *always* be ordered from
root direction to leaf direction, so `u` may
be either the first *or* last element.
Parameters
-----------
u : any
Hashable node key.
v : any
Hashable node key.
Returns
-----------
path : (n,)
Path between `u` and `v`
"""
# see if we've already computed this path
if u == v:
# the path between itself is an edge case
return []
elif (u, v) in self._cache:
# return the same path for either direction
return self._cache[(u, v)]
elif (v, u) in self._cache:
return self._cache[(v, u)]
# local reference to parent dict for performance
parents = self.parents
# store both forward and backwards traversal
forward = [u]
backward = [v]
# cap iteration to number of total nodes
for _ in range(len(parents) + 1):
# store the parent both forwards and backwards
forward.append(parents.get(forward[-1]))
backward.append(parents.get(backward[-1]))
if forward[-1] == v:
self._cache[(u, v)] = forward
return forward
elif backward[-1] == u:
# return reversed path
backward = backward[::-1]
self._cache[(u, v)] = backward
return backward
elif forward[-1] is None and backward[-1] is None:
raise ValueError('No path between nodes!')
raise ValueError('Iteration limit exceeded!')
@property
def nodes(self):
"""
Get a set of every node.
Returns
-----------
nodes : set
Every node currently stored.
"""
return set(self.node_data.keys())
@property
def children(self):
"""
Get the children of each node.
Returns
----------
children : dict
Keyed {node : [child, child, ...]}
"""
child = collections.defaultdict(list)
# append children to parent references
# skip self-references to avoid a node loop
[child[v].append(u) for u, v in
self.parents.items() if u != v]
# return as a vanilla dict
return dict(child)
def successors(self, node):
"""
Get all nodes that are successors to specified node,
including the specified node.
Parameters
-------------
node : any
Hashable key for a node.
Returns
------------
successors : set
Nodes that succeed specified node.
"""
# get mapping of {parent : child}
children = self.children
# if node doesn't exist return early
if node not in children:
return set([node])
# children we need to collect
queue = [node]
# start collecting values with children of source
collected = set(queue)
# cap maximum iterations
for _ in range(len(self.node_data) + 1):
if len(queue) == 0:
# no more nodes to visit so we're done
return collected
# add the children of this node to be processed
childs = children.get(queue.pop())
if childs is not None:
queue.extend(childs)
collected.update(childs)
return collected
def kwargs_to_matrix(
matrix=None,
quaternion=None,
translation=None,
axis=None,
angle=None,
**kwargs):
"""
Take multiple keyword arguments and parse them
into a homogenous transformation matrix.
Returns
---------
matrix : (4, 4) float
Homogenous transformation matrix.
"""
if matrix is not None:
# a matrix takes immediate precedence over other options
return np.array(matrix, dtype=np.float64)
elif quaternion is not None:
result = transformations.quaternion_matrix(quaternion)
elif axis is not None and angle is not None:
matrix = transformations.rotation_matrix(angle, axis)
else:
matrix = np.eye(4)
if translation is not None:
# translation can be used in conjunction with any
# of the methods specifying transforms
result[:3, 3] += translation
return result
```
#### File: trimesh/trimesh/units.py
```python
from .constants import log
from . import resources
def unit_conversion(current, desired):
"""
Calculate the conversion from one set of units to another.
Parameters
---------
current : str
Unit system values are in now (eg 'millimeters')
desired : str
Unit system we'd like values in (eg 'inches')
Returns
---------
conversion : float
Number to multiply by to put values into desired units
"""
# scaling factors from various unit systems to inches
to_inch = resources.get(
'units_to_inches.json', decode_json=True)
current = str(current).strip().lower()
desired = str(desired).strip().lower()
conversion = to_inch[current] / to_inch[desired]
return conversion
def units_from_metadata(obj, guess=True):
"""
Try to extract hints from metadata and if that fails
guess based on the object scale.
Parameters
------------
obj: object
Has attributes 'metadata' (dict) and 'scale' (float)
guess : bool
If metadata doesn't indicate units, guess from scale
Returns
------------
units: str
A guess of what the units might be
"""
to_inch = resources.get(
'units_to_inches.json', decode_json=True)
# try to guess from metadata
for key in ['file_name', 'name']:
if key not in obj.metadata:
continue
# get the string which might contain unit hints
hints = obj.metadata[key].lower()
if 'unit' in hints:
# replace all delimiter options with white space
for delim in '_-.':
hints = hints.replace(delim, ' ')
# loop through each hint
for hint in hints.strip().split():
# key word is "unit" or "units"
if 'unit' not in hint:
continue
# get rid of keyword and whitespace
hint = hint.replace(
'units', '').replace(
'unit', '').strip()
# if the hint is a valid unit return it
if hint in to_inch:
return hint
if not guess:
raise ValueError('no units and not allowed to guess')
# we made it to the wild ass guess section
# if the scale is larger than 100 mystery units
# declare the model to be millimeters, otherwise inches
log.debug('no units: guessing from scale')
if float(obj.scale) > 100.0:
return 'millimeters'
else:
return 'inches'
def _convert_units(obj, desired, guess=False):
"""
Given an object with scale and units try to scale
to different units via the object's `apply_scale`.
Parameters
---------
obj : object
With apply_scale method (i.e. Trimesh, Path2D, etc)
desired : str
Units desired (eg 'inches')
guess: bool
Whether we are allowed to guess the units
if they are not specified.
"""
if obj.units is None:
# try to extract units from metadata
# if nothing specified in metadata and not allowed
# to guess will raise a ValueError
obj.units = units_from_metadata(obj, guess=guess)
log.debug('converting units from %s to %s', obj.units, desired)
# float, conversion factor
conversion = unit_conversion(obj.units, desired)
# apply scale uses transforms which preserve
# cached properties rather than just multiplying vertices
obj.apply_scale(conversion)
# units are now desired units
obj.units = desired
```
#### File: trimesh/visual/objects.py
```python
import numpy as np
from .material import from_color, pack
from .texture import TextureVisuals
from .color import ColorVisuals
def create_visual(**kwargs):
"""
Create Visuals object from keyword arguments.
Parameters
-----------
face_colors : (n, 3|4) uint8
Face colors
vertex_colors : (n, 3|4) uint8
Vertex colors
mesh : trimesh.Trimesh
Mesh object
Returns
----------
visuals : ColorVisuals
Visual object created from arguments
"""
return ColorVisuals(**kwargs)
def concatenate(visuals, *args):
"""
Concatenate multiple visual objects.
Parameters
----------
visuals : ColorVisuals or list
Visuals to concatenate
*args : ColorVisuals or list
More visuals to concatenate
Returns
----------
concat : Visuals
If all are color
"""
# get a flat list of Visuals objects
if len(args) > 0:
visuals = np.append(visuals, args)
else:
visuals = np.array(visuals)
# if there are any texture visuals convert all to texture
if any(v.kind == 'texture' for v in visuals):
# first collect materials and UV coordinates
mats = []
uvs = []
for v in visuals:
if v.kind == 'texture':
mats.append(v.material)
if v.uv is None:
# otherwise use zeros
uvs.append(np.zeros((len(v.mesh.vertices), 2)) + 0.5)
else:
# if uvs are of correct shape use them
uvs.append(v.uv)
else:
# create a material and UV coordinates from vertex colors
color_mat, color_uv = from_color(
vertex_colors=v.vertex_colors)
mats.append(color_mat)
uvs.append(color_uv)
# pack the materials and UV coordinates into one
new_mat, new_uv = pack(materials=mats, uvs=uvs)
return TextureVisuals(material=new_mat, uv=new_uv)
# convert all visuals to the kind of the first
kind = visuals[0].kind
if kind == 'face':
colors = np.vstack([
v.face_colors for v in visuals])
return ColorVisuals(face_colors=colors)
elif kind == 'vertex':
colors = np.vstack([
v.vertex_colors for v in visuals])
return ColorVisuals(vertex_colors=colors)
return ColorVisuals()
``` |
{
"source": "jpmattern/fda",
"score": 2
} |
#### File: jpmattern/fda/modelcomponents.py
```python
import numpy as np
#
# The parameters used in the functions below.
#
standard_parameters = {
# baseline irradiance parameter
'irr0':5.0,
# maximum rate in Michaelis Menten formulation
'Vmax':10.0,
# nutrient half saturation in Michaelis Menten formulation
'nuthalfsat':0.5,
# multiplicative grazing parameter
'grazphy':0.25,
# grazing parameter used in exponential functions
'grazlambda':0.5,
# maximum grazing rate
'grazmax':0.25,
# phytoplankton mortality rate
'mort_phy':0.2,
# zooplankton mortality rate
'mort_zoo':0.1,
}
#
# A selection of light response functions. Compare Table 1 in Franks (2002).
#
def lightresponse_linear(irr, parameters):
return irr/parameters['irr0']
def lightresponse_saturating(irr, parameters):
return irr/(parameters['irr0']+irr)
def lightresponse_exp(irr, parameters):
return 1.0 - np.exp(-irr/parameters['irr0'])
def lightresponse_tanh(irr, parameters):
return np.tanh(-irr/parameters['irr0'])
def lightresponse_inhibit(irr, parameters):
irr_norm = irr/parameters['irr0']
return irr_norm * np.exp(1.0-irr_norm)
#
# A selection of nutrient uptake functions. Compare Table 2 in Franks (2002).
#
def nutrientuptake_michaelismenten(nut, parameters):
return parameters['Vmax']/(parameters['nuthalfsat']+nut)
#
# A selection of zooplankton grazing functions. Compare Table 3 in Franks (2002).
#
def grazing_linear(phy, parameters):
return parameters['grazphy']*phy
def grazing_bilinear(phy, parameters):
return np.min(parameters['grazphy']*phy,parameters['grazmax'])
def grazing_ivlev(phy, parameters):
return parameters['grazmax']*(1.0 - np.exp(-parameters['grazlambda']*phy))
#
# A selection of phytoplankton loss functions. Compare Table 4 in Franks (2002).
#
def phytoplanktonloss_linear(phy, parameters):
return parameters['mort_phy']
def phytoplanktonloss_quadratic(phy, parameters):
return parameters['mort_phy']*phy
#
# A selection of zooplankton loss functions. Compare Table 4 in Franks (2002).
#
def zooplanktonloss_linear(zoo, parameters):
return parameters['mort_zoo']
def zooplanktonloss_quadratic(zoo, parameters):
return parameters['mort_zoo']*zoo
#
# A generic function that can be used in place of any of the above in order to
# "switch off" a given segment. Using generic_nomod as the zooplankton grazing
# function, for example, will turn zooplankton grazing to zero.
#
def generic_nomod(*args, **kwargs):
return 0.0
``` |
{
"source": "jpmcariry/kivy-delivey",
"score": 3
} |
#### File: kivy-delivey/App/main.py
```python
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.widget import Widget
from kivy.core.window import WindowBase
from kivy.uix.button import Button
from kivy.graphics import Canvas
from kivy.uix.scatter import Scatter
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.floatlayout import FloatLayout
from kivy.properties import ObjectProperty, ReferenceListProperty, NumericProperty
from kivy.uix.screenmanager import ScreenManager, Screen, FadeTransition
import io
from kivy.core.image import Image as CoreImage
data = io.BytesIO(open("src\\img\\ifood-logo.png", "rb").read())
im = CoreImage(data, ext="png")
import sys
import os
class Programa(Screen, object):
x = NumericProperty(0)
y = NumericProperty(0)
size_btn_out = ReferenceListProperty(x, y)
def logar(self):
print(self.ids.login.text)
print(self.ids.senha.text)
if (self.ids.login.text == "test" and self.ids.senha.text == "test"):
{"login":self.ids.login.text,"senha":self.ids.senha.text}
self.manager.current = "crud"
else:
print("senha ou login errados")
class Crud(Screen, object):
def crud_print(self):
print("dentro da tela crud!!")
class Pesquisa(Screen, object):
def pesquisar(self):
print("dentro da tela !")
class WindowManager(ScreenManager, object):
pass
screen_manager = ScreenManager()
screen_manager.add_widget(Programa())
kv_file = Builder.load_file("main.kv")
class Editor(App):
def build(self):
return kv_file
if __name__ == '__main__':
Editor().run()
``` |
{
"source": "jpm-cbna/python-backend-tpl",
"score": 3
} |
#### File: api/descriptions/entities.py
```python
from datetime import datetime
from marshmallow import Schema, fields
from sqlalchemy import Column, String, Integer, Text, DateTime
from sqlalchemy.sql import func
from shared.entity import Base
from api import db
class Description(Base, db.Model):
__tablename__ = 'description'
#__table_args__ = {'schema': 'descriptions'}
id = Column(Integer, primary_key=True)
mnemonic = Column(String(250), nullable=False)
rank = Column(String(50), nullable=False)
raw_text = Column(Text, nullable=False)
order = Column(String(5))
sciname = Column(String(150))
relationships = Column(Text)
zoobank = Column(String(250))
type_locality = Column(Text)
material_examined = Column(Text)
diagnosis = Column(Text)
description = Column(Text)
subtaxa = Column(Text)
bionomics = Column(Text)
distribution = Column(Text)
etymology = Column(Text)
comments = Column(Text)
meta_user_id = Column(Integer, default=0)
meta_date = Column(DateTime, server_default=func.now())
meta_state = Column(String, default='A')
def __init__(self, mnemonic, rank, raw_text, created_by=0, id=''):
if id != '':
self.id = id
self.mnemonic = mnemonic
self.rank = rank
self.raw_text = raw_text
self.meta_user_id = created_by # 'Unknown'
self.meta_date = datetime.now()
self.meta_state = 'A' # Added
class DescriptionSchema(Schema):
id = fields.Integer()
mnemonic = fields.Str()
rank = fields.Str()
raw_text = fields.Str(data_key="rawText")
order = fields.Str()
sciname = fields.Str()
relationships = fields.Str()
zoobank = fields.Str()
type_locality = fields.Str(data_key="typeLocality")
material_examined = fields.Str(data_key="materialExamined")
diagnosis = fields.Str()
description = fields.Str()
subtaxa = fields.Str()
bionomics = fields.Str()
distribution = fields.Str()
etymology = fields.Str()
comments = fields.Str()
meta_user_id = fields.Integer(data_key="metaUserId")
meta_date = fields.Str(data_key="metaDate")
meta_state = fields.Str(data_key="metaState")
```
#### File: api/versions/resources.py
```python
from flask import Blueprint, jsonify
# Versions infos
from flask import __version__ as flask_version
from marshmallow import __version__ as marshmallow_version
from sqlalchemy import __version__ as sqlalchemy_version
from sys import version as sys_version
resources = Blueprint('versions', __name__)
@resources.route('/versions', methods=['GET'])
def get_versions():
data = [
{
'name': 'Flask',
'version': flask_version,
'logoUrl': 'https://flask.palletsprojects.com/en/1.1.x/_images/flask-logo.png',
},
{
'name': 'Marshmallow',
'version': marshmallow_version,
'logoUrl': 'https://marshmallow.readthedocs.io/en/stable/_static/marshmallow-logo.png',
},
{
'name': 'Python',
'version': sys_version,
'logoUrl': 'https://www.python.org/static/community_logos/python-powered-h-140x182.png',
},
{
'name': 'Sqlalchemy',
'version': sqlalchemy_version,
'logoUrl': 'https://www.sqlalchemy.org/img/sqla_logo.png',
},
]
return jsonify(data)
``` |
{
"source": "jpmcb/RecoverME",
"score": 3
} |
#### File: jpmcb/RecoverME/sortingAndFilter.py
```python
from math import sqrt
import operator
class Point:
def __init__ (self, x, y, name):
self.x = x
self.y = y
self.name = name
#link class - url = URL of link, user_typ = list of user types this link is for,
#key_words = relevant link keywords
class Link :
def __init__(self, url, user_type, key_words) :
self.url = url
self.user_type = user_type
self.key_words = key_words
#filter a list of links on the user type (string)
def filterByUserType(links, type) :
filtered = []
for i in links :
if type in i.user_type :
filtered.append(i)
return filtered
#quick and dirty filter by keywords
def filterByKeywords(links, keywords) :
filtered = []
for i in links :
for j in i.key_words :
if j in keywords :
filtered.append(i)
break
return filtered
#userType cannot be null else there's nothing to sort on. Keywords can be null.
#If keywords not supplied then all links taylored to this user type is returned.
#If keywords are supplied then links taylored to the user and their preferences
#are returned.
def refineUserLinks(links, userType, keywords = "NULL") :
unrefinedLinks = filterByUserType(links, userType)
if keywords == "NULL" :
return unrefinedLinks
else:
return filterByKeywords(unrefinedLinks, keywords)
#clinic
#Parameters: location = 2 element list with x in position 0 and y in position 1,
# facility_type is the kind of facitiy and should be of type string,
# name is the name of the facility.
#Member variables: facility_type same as parameter, name same as parameter,
# location is a point object created using location list parameter.
class Clinic :
def __init__(self, name, location, facility_type) :
self.location = Point(location[0], location[1], name)
self.name = name
self.facility_type = facility_type
#filter facilitys according to their primary mode of service
def filterByFacility(clinics, keywords) :
filteredList = {}
for i in clinics :
if i.facility_type in keywords :
filteredList[i.facility_type] = i
sortedTuple = sorted(filteredList.items(), key=operator.itemgetter(1))
return sortedTuple
def distance (myPoint, referencePoint):
return sqrt((myPoint.x - referencePoint.x)**2 + (myPoint.y - referencePoint.y)**2)
def sortByDistance(clinics, curLocation):
sortedGPS = sorted(clinics, key=lambda p: distance(p, curLocation)) #sort function!
return sortedGPS
``` |
{
"source": "jpmchargue/whisk",
"score": 3
} |
#### File: jpmchargue/whisk/whisk.py
```python
import os
import time
import gentle
import multiprocessing
import shutil
import subprocess
import sys
import re
import pronouncing as pro
from pydub import AudioSegment
from pydub.playback import play
import PySimpleGUI as gui
def setUpProject(name):
createDirectory("mixes/" + name)
createDirectory("mixes/" + name + "/streams")
createDirectory("mixes/" + name + "/transcripts")
createDirectory("mixes/" + name + "/words")
createDirectory("mixes/" + name + "/phonemes")
createDirectory("mixes/" + name + "/outputs")
def createDirectory(path):
try:
os.mkdir(path)
except OSError as error:
pass
def parseStream(project, name):
print('Parsing "' + name + '" for ' + project + '...')
projectPath = "mixes/" + project
phonemes = []
words = []
# Alignment setup
resources = gentle.Resources()
# Read transcript
with open(projectPath + "/transcripts/" + name + ".txt", encoding="utf-8") as tx:
transcript = tx.read()
# Perform forced alignment
with gentle.resampled(projectPath + "/streams/" + name + ".wav") as wavfile:
aligner = gentle.ForcedAligner(resources, transcript, nthreads=multiprocessing.cpu_count(), disfluency=False, conservative=False)
result = aligner.transcribe(wavfile)
# Assemble word and phoneme timestamp lists
end = 0
for w in result.words:
if w.start is not None:
start = int(w.start * 1000)
if start > end:
phonemes.append("SIL:" + str(end) + ":" + str(start))
end = int(w.end * 1000)
words.append(w.word.upper() + ":" + str(start) + ":" + str(end))
for p in w.phones:
syllable = p.get('phone').split('_')[0].upper()
phoneStart = start
start = start + int(p.get('duration') * 1000)
phonemes.append(syllable + ":" + str(phoneStart) + ":" + str(start))
# save phoneme timestamp list
with open(projectPath + "/phonemes/" + name, "w") as phonemeFile:
for p in phonemes:
phonemeFile.write(p + "\r\n")
phonemeFile.close()
# save word timestamp list
with open(projectPath + "/words/" + name, "w") as wordFile:
for w in words:
wordFile.write(w + "\r\n")
wordFile.close()
def importStream(project, streamPath):
print("Importing " + streamPath + "...")
fileName = streamPath.split('/')[-1].split('.')[0]
fileType = streamPath.split('.')[-1]
if fileType == "wav":
shutil.copy(streamPath, 'mixes/' + project + '/streams/' + fileName + '.wav')
return 1
elif fileType == "mp3":
subprocess.call(['ffmpeg', '-i', streamPath, 'mixes/' + project + '/streams/' + fileName + '.wav'])
return 1
else:
print(streamPath + " cannot be imported-- Whisk can only import .wav and .mp3 files.")
return 0
# This function must be run after importTranscriptFolder().
def importStreamFolder(project, streamFolderPath, window):
print("Importing " + streamFolderPath + "...")
window.refresh()
for stream in os.listdir(streamFolderPath):
importStream(project, streamFolderPath + '/' + stream)
window.refresh()
parseStream(project, stream.split('.')[0])
def importTranscript(project, transcriptPath):
fileName = transcriptPath.split('/')[-1].split('.')[0]
fileType = transcriptPath.split('.')[-1]
if fileType == "txt":
shutil.copy(transcriptPath, 'mixes/' + project + '/transcripts/' + fileName + '.txt')
return 1
return 0
def importTranscriptFolder(project, transcriptFolderPath):
for transcript in os.listdir(transcriptFolderPath):
importTranscript(project, transcriptFolderPath + '/' + transcript)
def parseAllInFolder(project, ogStreamFolder):
for stream in os.listdir(ogStreamFolder):
name = stream.split('.')[0]
parseStream(project, name)
def createTranscript(project, streamName, text):
with open('mixes/' + project + '/transcripts/' + streamName + '.txt', 'w') as transcript:
transcript.write(text)
def findSubsequence(master, sub):
subLength = len(sub);
for n in range(0, 1 + (len(master) - subLength)):
#print("comparing " + str(master[n:n+subLength]) + " to " + str(sub))
if master[n:n+subLength] == sub:
return n
return -1
def generateWordSequence(seq, window):
seqString = ' '.join(seq)
if seqString in wordLibrary:
return wordLibrary[seqString]
longest = (AudioSegment.silent(duration=1), 0)
secondLongest = (AudioSegment.silent(duration=1), 0)
shortest = (AudioSegment.silent(duration=1), sys.maxsize)
for wordDCFile in os.listdir(projectPath + '/words'):
inputWords = []
wordStarts = []
wordEnds = []
with open(projectPath + '/words/' + wordDCFile, "r") as wordDC:
for line in wordDC:
row = line.split(':')
inputWords.append(row[0])
wordStarts.append(int(row[1]))
wordEnds.append(int(row[2]))
loc = findSubsequence(inputWords, seq)
if loc > -1: # the entire sequence was found intact
length = wordEnds[loc + len(seq) - 1] - wordStarts[loc]
print('Found instance of "' + seqString + '" in ' + wordDCFile + ' of length ' + str(length) + '!')
window.refresh() # I hate that I have to do this, but it works
if length >= 100:
if length < shortest[1]:
sound = AudioSegment.from_wav(projectPath + "/streams/" + wordDCFile + ".wav")
shortest = (sound[wordStarts[loc]:wordEnds[loc + len(seq) - 1]], length)
if length > longest[1]:
sound = AudioSegment.from_wav(projectPath + "/streams/" + wordDCFile + ".wav")
secondLongest = longest
longest = (sound[wordStarts[loc]:wordEnds[loc + len(seq) - 1]], length)
elif length > secondLongest[1]:
sound = AudioSegment.from_wav(projectPath + "/streams/" + wordDCFile + ".wav")
secondLongest = (sound[wordStarts[loc]:wordEnds[loc + len(seq) - 1]], length)
# Return the second-longest instance of the word.
# Generally, longer instances of words are better for mixes, as they tend to be spoken more clearly.
# However, Gentle's alignment has a few bugs, and what appears to be the longest instance of a word
# is occasionally an incorrect outliar that 'stole' some of the following word.
# Using these buggy words produces low-quality results, since they introduce garbage syllables into the mix.
# So, to avoid this Whisk always attempts to use the second-longest instance of a word.
# If only one instance was found, it uses that.
if returnLongest is True:
if longest[1] > 0:
wordLibrary[seqString] = (longest[0], 1)
return (longest[0], 1)
else:
if secondLongest[1] > 0 and secondLongest[1] != shortest[1]:
wordLibrary[seqString] = (secondLongest[0], 1)
return (secondLongest[0], 1)
elif longest[1] > 0:
wordLibrary[seqString] = (longest[0], 1)
return (longest[0], 1)
# if the sequence is only one word and it was not found, construct it
sequenceLength = len(seq)
if sequenceLength == 1:
options = pro.phones_for_word(seq[0])
if len(options) == 0:
raise ValueError("No examples or pronunciations could be found for " + seq[0] + "! Don't use proper nouns- try replacing it with a similar-sounding word, or multiple shorter words.")
else:
phonemeString = ''.join([c for c in options[0] if not c.isdigit()])
phonemeSequence = phonemeString.split(' ')
result = generatePhonemeSequence(phonemeSequence, window)
return (result[0] + AudioSegment.silent(duration=75), result[1])
# multi-word sequence was not found in one piece- split and recurse
optimal = (AudioSegment.silent(duration=1), sys.maxsize)
for i in range(1, sequenceLength):
resultA = generateWordSequence(seq[0:i], window)
resultB = generateWordSequence(seq[i:sequenceLength], window)
if (resultA[1] + resultB[1]) < optimal[1]:
optimal = (resultA[0] + resultB[0], resultA[1] + resultB[1])
wordLibrary[seqString] = optimal
return optimal
def generatePhonemeSequence(seq, window):
seqString = ' '.join(seq)
if seqString in phoneLibrary:
return phoneLibrary[seqString]
longest = (AudioSegment.silent(duration=1), 0)
secondLongest = (AudioSegment.silent(duration=1), 0)
for phoneDCFile in os.listdir(projectPath + '/phonemes'):
inputPhones = []
phoneStarts = []
phoneEnds = []
with open(projectPath + "/phonemes/" + phoneDCFile, "r") as phoneDC:
for line in phoneDC:
row = line.split(':')
inputPhones.append(row[0])
phoneStarts.append(int(row[1]))
phoneEnds.append(int(row[2]))
loc = findSubsequence(inputPhones, seq)
if loc > -1:
length = phoneEnds[loc + len(seq) - 1] - phoneStarts[loc]
print('Found instance of (' + ' '.join(seq) + ') with length: ' + str(length))
window.refresh()
if length >= (70 * len(seq)):
if length > longest[1]:
sound = AudioSegment.from_wav(projectPath + "/streams/" + phoneDCFile + ".wav")
secondLongest = longest
longest = (sound[phoneStarts[loc]:phoneEnds[loc + len(seq) - 1]], length)
elif length > secondLongest[1]:
sound = AudioSegment.from_wav(projectPath + "/streams/" + phoneDCFile + ".wav")
secondLongest = (sound[phoneStarts[loc]:phoneEnds[loc + len(seq) - 1]], length)
if returnLongest is True:
if longest[1] > 0:
phoneLibrary[seqString] = (longest[0], 1)
return (longest[0], 1)
else:
if secondLongest[1] > 0:
phoneLibrary[seqString] = (secondLongest[0], 1)
return (secondLongest[0], 1)
elif longest[1] > 0:
phoneLibrary[seqString] = (longest[0], 1)
return (longest[0], 1)
sequenceLength = len(seq)
if sequenceLength <= 1:
raise ValueError("The phoneme (" + seq[0] + ") is needed for the desired output, but it is not present in any input streams!")
optimal = (AudioSegment.silent(duration=1), sys.maxsize)
for i in range(1, sequenceLength):
resultA = generatePhonemeSequence(seq[0:i], window)
resultB = generatePhonemeSequence(seq[i:sequenceLength], window)
if (resultA[1] + resultB[1]) < optimal[1]:
optimal = (resultA[0] + resultB[0], resultA[1] + resultB[1])
phoneLibrary[seqString] = optimal
return optimal
def assembleMix(projectName, targetString, rL, doExport, saveName, window):
global wordLibrary
global phoneLibrary
global projectPath
global returnLongest
returnLongest = rL
startTime = time.time()
projectPath = 'mixes/' + projectName
wordLibrary = {}
phoneLibrary = {}
if doExport is True and saveName == '':
print("Export failed-- file name cannot be blank.")
window.refresh()
return
# Clean up and/or mark punctuation
temp = targetString.upper()
temp = temp.replace('\n', ' ')
temp = temp.replace('*', ' ')
temp = temp.replace('-', ' ')
temp = temp.replace('. ', ' *LONGPAUSE* ')
temp = temp.replace('! ', ' *LONGPAUSE* ')
temp = temp.replace('? ', ' *LONGPAUSE* ')
temp = temp.replace(', ', ' *SHORTPAUSE* ')
temp = temp.replace(': ', ' *LONGPAUSE* ')
temp = temp.replace('; ', ' *LONGPAUSE* ')
temp = temp.replace('.', '')
acc = AudioSegment.silent(duration=1)
for sentence in temp.split(' *LONGPAUSE* '):
for clause in sentence.split(' *SHORTPAUSE* '):
print('Assembling clause "' + clause + '"')
window.refresh()
acc = acc + generateWordSequence([c for c in clause.split(' ') if c != ''], window)[0] + AudioSegment.silent(duration=200)
acc = acc + AudioSegment.silent(duration=500)
totalTime = time.time() - startTime
print("SUCCESS! Mix completed in " + str(totalTime) + " seconds.")
window.refresh()
if doExport is False:
play(acc)
else:
acc.export(projectPath + "/outputs/" + saveName + ".wav", format="wav")
print("The mix was exported successfully to " + projectPath + "/outputs/" + saveName + ".wav!")
window.refresh()
def checkForPhoneme(project, phoneme):
for phoneFile in os.listdir("mixes/" + project + "/phonemes"):
with open("mixes/" + project + "/phonemes/" + phoneFile, 'r') as phoneList:
for line in phoneList:
if line.split(':')[0] == phoneme:
return True
return False
def getAvailableWords(project):
words = set()
for wordFile in os.listdir("mixes/" + project + "/words"):
with open("mixes/" + project + "/words/" + wordFile, 'r') as wordList:
for line in wordList:
words.add(line.split(':')[0])
return words
``` |
{
"source": "jpmckinney/lib-cove-web",
"score": 2
} |
#### File: management/commands/expire_files.py
```python
from django.core.management.base import BaseCommand
from cove.input.models import SuppliedData
from django.conf import settings
from django.utils import timezone
from datetime import timedelta
import shutil
class Command(BaseCommand):
help = 'Delete files that are older than 7 days'
def handle(self, *args, **options):
old_data = SuppliedData.objects.filter(created__lt=timezone.now() - timedelta(days=getattr(settings, 'DELETE_FILES_AFTER_DAYS', 7)))
for supplied_data in old_data:
try:
shutil.rmtree(supplied_data.upload_dir())
except FileNotFoundError:
continue
``` |
{
"source": "jpmckinney/mdformat-myst",
"score": 2
} |
#### File: mdformat-myst/mdformat_myst/_directives.py
```python
from __future__ import annotations
from collections.abc import Mapping, MutableMapping, Sequence
import io
from markdown_it import MarkdownIt
from mdformat.renderer import LOGGER, RenderContext, RenderTreeNode
import ruamel.yaml
yaml = ruamel.yaml.YAML()
yaml.indent(mapping=2, sequence=4, offset=2)
def longest_consecutive_sequence(seq: str, char: str) -> int:
"""Return length of the longest consecutive sequence of `char` characters
in string `seq`."""
assert len(char) == 1
longest = 0
current_streak = 0
for c in seq:
if c == char:
current_streak += 1
else:
current_streak = 0
if current_streak > longest:
longest = current_streak
return longest
def fence(node: "RenderTreeNode", context: "RenderContext") -> str:
"""Render fences (and directives).
Copied from upstream `mdformat` core and should be kept up-to-date
if upstream introduces changes. Note that only two lines are added
to the upstream implementation, i.e. the condition that calls
`format_directive_content` function.
"""
info_str = node.info.strip()
lang = info_str.split(maxsplit=1)[0] if info_str else ""
code_block = node.content
# Info strings of backtick code fences can not contain backticks or tildes.
# If that is the case, we make a tilde code fence instead.
if "`" in info_str or "~" in info_str:
fence_char = "~"
else:
fence_char = "`"
# Format the code block using enabled codeformatter funcs
if lang in context.options.get("codeformatters", {}):
fmt_func = context.options["codeformatters"][lang]
try:
code_block = fmt_func(code_block, info_str)
except Exception:
# Swallow exceptions so that formatter errors (e.g. due to
# invalid code) do not crash mdformat.
assert node.map is not None, "A fence token must have `map` attribute set"
LOGGER.warning(
f"Failed formatting content of a {lang} code block "
f"(line {node.map[0] + 1} before formatting)"
)
# This "elif" is the *only* thing added to the upstream `fence` implementation!
elif lang.startswith("{") and lang.endswith("}"):
code_block = format_directive_content(code_block)
# The code block must not include as long or longer sequence of `fence_char`s
# as the fence string itself
fence_len = max(3, longest_consecutive_sequence(code_block, fence_char) + 1)
fence_str = fence_char * fence_len
return f"{fence_str}{info_str}\n{code_block}{fence_str}"
def format_directive_content(raw_content: str) -> str:
parse_result = parse_opts_and_content(raw_content)
if not parse_result:
return raw_content
unformatted_yaml, content = parse_result
dump_stream = io.StringIO()
try:
parsed = yaml.load(unformatted_yaml)
yaml.dump(parsed, stream=dump_stream)
except ruamel.yaml.YAMLError:
LOGGER.warning("Invalid YAML in MyST directive options.")
return raw_content
formatted_yaml = dump_stream.getvalue()
# Remove the YAML closing tag if added by `ruamel.yaml`
if formatted_yaml.endswith("\n...\n"):
formatted_yaml = formatted_yaml[:-4]
# Convert empty YAML to most concise form
if formatted_yaml == "null\n":
formatted_yaml = ""
formatted = "---\n" + formatted_yaml + "---\n"
if content:
formatted += content + "\n"
return formatted
def parse_opts_and_content(raw_content: str) -> tuple[str, str] | None:
lines = raw_content.splitlines()
line = lines.pop(0)
yaml_lines = []
if all(c == "-" for c in line) and len(line) >= 3:
while lines:
line = lines.pop(0)
if all(c == "-" for c in line) and len(line) >= 3:
break
yaml_lines.append(line)
elif line.lstrip().startswith(":"):
yaml_lines.append(line.lstrip()[1:])
while lines:
if not lines[0].lstrip().startswith(":"):
break
line = lines.pop(0).lstrip()[1:]
yaml_lines.append(line)
else:
return None
first_line_is_empty_but_second_line_isnt = (
len(lines) >= 2 and not lines[0].strip() and lines[1].strip()
)
exactly_one_empty_line = len(lines) == 1 and not lines[0].strip()
if first_line_is_empty_but_second_line_isnt or exactly_one_empty_line:
lines.pop(0)
unformatted_yaml = "\n".join(yaml_lines)
content = "\n".join(lines)
return unformatted_yaml, content
def render_fence_html(
self: MarkdownIt, tokens: Sequence, idx: int, options: Mapping, env: MutableMapping
) -> str:
return ""
``` |
{
"source": "Jpmcrespo/Redes-de-Computadores",
"score": 3
} |
#### File: Redes-de-Computadores/frances/TRS.py
```python
import socket
import sys
import signal
import os
import time
import traceback
BUFFER_SIZE=1024
invalidArgs='Invalid arguments.\nusage: python3 TRS.py language [-p TRSport] [-n TCSname] [-e TCSport]'
portMsg="port must be an integer between 0-65535"
class ArgumentsError(Exception):
def __init__(self, message):
self.message=message
def sendMsg(sock, ipAddress, port, message):
sock.sendto(message.encode(), (ipAddress, port))
response=sock.recv(BUFFER_SIZE)
return (response.decode())
#---------------------------------------------------------------------------------
# Protocol Verification
#---------------------------------------------------------------------------------
def protocolSyntaxVerification(msg):
'''Protocol Verification for most messages'''
if " " in msg or msg[-1]!="\n" or msg[0]==" " or " \n" in msg:
return False
return True
def protocolSyntaxVerification2(msg):
'''Protocol Verification for file transfer case'''
if " " in msg or msg[0]==" " or " \n" in msg:
return False
return True
#---------------------------------------------------------------------------------
# TCS Communication
#---------------------------------------------------------------------------------
#-------------------------------Registration--------------------------------------
def RegisterServer(TCS, language,port):
'''Informs TCS that this server is available for the translation of the provided language'''
try:
UDP_socket= socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
UDP_socket.settimeout(5)
#no need to bind anything because the system allready binds sockets that send stuff
#you only need to bind sockets that receive before sending, which is not the case
RegMsg="SRG "+ language+ " "+ socket.gethostname()+" "+ str(port)+"\n"
command=sendMsg(UDP_socket, TCS['ip'], TCS['port'], RegMsg).split()
UDP_socket.close()
if command[0]=="SRR":
if command[1]=="OK":
print ("Successfully registered Translation Server.")
elif command[1]=="NOK":
print ("Registration refused, exiting")
sys.exit(-1)
elif command[1]=="ERR":
print ("Registration Error, exiting")
sys.exit(-1)
except socket.timeout:
sys.exit("Request to register timed out.\nExiting...")
#-----------------------------UnRegistration--------------------------------------
def UnRegisterServer(TCS, language,port):
'''Informs TCS that this server is no longer available for the translation of the provided language'''
try:
UDP_socket= socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
UDP_socket.settimeout(5)
#no need to bind anything because the system allready binds sockets that send stuff
#you only need to bind sockets that receive before sending, which is not the case
RegMsg="SUN "+ language+ " "+ socket.gethostname()+" "+ str(port)+"\n"
command=sendMsg(UDP_socket, TCS['ip'], TCS['port'], RegMsg).split()
UDP_socket.close()
if command[0]=="SUR":
if command[1]=="OK":
print ("Successfully unregistered Translation Server.\nExiting...")
elif command[1]=="NOK":
print ("Unregistration refused.")
elif command[1]=="ERR":
print ("Unregistration Error.")
sys.exit()
except socket.timeout:
sys.exit("Request to unregister timed out.\nExiting...\n")
#---------------------------------------------------------------------------------
# Word Translation
#---------------------------------------------------------------------------------
def translateWordList(Client, language, wordlist):
'''Sends to the client the translated wordlist'''
print (Client['ip'] + " "+ str(Client['port'])+ ": "+ " ".join(wordlist))
result=""
langFile= open("text_translation.txt", 'r')
for word in wordlist:
result+=getTranslation(langFile, word)+" "
result=result.strip()
print(result+" ("+str(len(result.split()))+")")
if "NTA" in result:
message="TRR NTA\n"
else:
message="TRR t "+str(len(wordlist))+" "+result+"\n"
Client['socket'].send(message.encode())
def getTranslation(file, word):
'''finds and gets the translation of the specified word'''
file.seek(0)
for line in file:
trans=line.split()
if word==trans[0]:
return trans[1]
return "NTA"
#---------------------------------------------------------------------------------
# File Translation
#---------------------------------------------------------------------------------
def receiveFile(Client, size):
file=open("TRSreceived.png","wb")
buff=""
while(size>-1):
buff=Client['socket'].recv(BUFFER_SIZE)
file.write(buff)
size-=len(buff)
if buff[-1]!=10:
raise ValueError
file.seek(-1, os.SEEK_END)
file.truncate()
file.close()
print (str(os.path.getsize("TRSreceived.png")) + " Bytes received")
def translate(Client, language,port):
aux= Client['socket'].recv(BUFFER_SIZE)
aux=aux.decode()
received=aux.split()
if received[0]=="TRQ":
if received[1]=="t":
# protocolo
if not protocolSyntaxVerification(aux):
raise ValueError
if int(received[2])!=len(received)-3:
raise ValueError
translateWordList(Client, language, received[3:])
elif received[1]=="f":
if not protocolSyntaxVerification2(aux):
raise ValueError
if len(received)!=4:
raise IndexError
print (Client['ip']+ " "+ str(Client['port'])+ " " +received[2])
receiveFile(Client, int(received[3]))
sendBack(Client, language, received[2])
else:
raise ValueError
def sendBack(Client, language, filename):
langFile= open("file_translation.txt", 'r')
filename=getTranslation(langFile, filename)
file=open(filename,"rb")
size=os.path.getsize(filename)
print (filename+ " ("+str(size)+" Bytes)")
message= "TRR f " + filename + " " + str(size) + " "
Client['socket'].send(message.encode())
time.sleep(0.005)
while(size>0):
buff=file.read(BUFFER_SIZE)
Client['socket'].send(buff)
size-=len(buff)
Client['socket'].send("\n".encode())
Client['socket'].shutdown(socket.SHUT_WR)
#---------------------------------------------------------------------------------
# Argument Validation
#---------------------------------------------------------------------------------
def validateArgs(TCS):
'''validates the arguments given to the program upon runtime'''
try:
arguments=sys.argv
port=59000
if len(arguments)%2!=0:
raise ArgumentsError(invalidArgs)
i=2
p,n,e=1,1,1
while i<len(arguments):
if arguments[i]=="-p" and p:
port= int(arguments[i+1])
if port not in range(65536):
raise ValueError
p=0
elif arguments[i]=="-n" and n:
TCS['name']=arguments[i+1]
n=0
elif arguments[i]=="-e" and e:
TCS['port']=int(arguments[i+1])
if TCS['port'] not in range(65536):
raise ValueError
e=0
else:
raise InputError (invalidArgs)
i+=2
test=socket.gethostbyname(TCS['name'])
return port
except ValueError as e:
sys.exit("port must be an integer between 0-65535")
except ArgumentsError as error:
sys.exit(error)
except IndexError:
sys.exit(invalidArgs)
#---------------------------------------------------------------------------------
# Main
#---------------------------------------------------------------------------------
def main():
try:
language=sys.argv[1]
TCS={'name':socket.gethostname(),'port':58056}
port=validateArgs(TCS)
TCS['ip']=socket.gethostbyname(TCS['name'])
TCP_socket= socket.socket(socket.AF_INET, socket.SOCK_STREAM)
TCP_socket.bind((socket.gethostbyname(socket.gethostname()),port))
TCP_socket.listen(1)
Client={}
RegisterServer(TCS, language,port)
while(1):
try:
Client['socket'] , (Client['ip'], Client['port'])=TCP_socket.accept()
translate(Client, language,port)
except KeyboardInterrupt:
UnRegisterServer(TCS,language,port)
sys.exit()
except FileNotFoundError:
Client['socket'].send('TRR NTA\n'.encode())
except:
traceback.print_exc()
Client['socket'].send('TRR ERR\n'.encode())
except socket.error as error:
sys.exit(error)
main()
```
#### File: Jpmcrespo/Redes-de-Computadores/TCS.py
```python
import socket
import sys
import traceback
import time
BUFFER_SIZE=1024
invalidArgs='\nInvalid arguments.\nusage: python3 TCS.py [-p TCSport]'
portMsg="port must be an integer between 0-65535"
class ArgumentsError(Exception):
def __init__(self, message):
self.message=message
#---------------------------------------------------------------------------------
# List operation
#---------------------------------------------------------------------------------
def sendList(sock, ipAddress, port, lst):
'''sends the list of available languages to the Client'''
print ("List request: "+socket.gethostbyaddr(ipAddress)[0]+ " "+ str(port))
print(" ".join(lst.keys()))
if len(lst)==0:
Msg='ULR EOF\n'
else:
Msg= "ULR "+str(len(lst))+ " "
Msg+=" ".join(lst)+ "\n"
sock.sendto(Msg.encode(), (ipAddress, port))
#---------------------------------------------------------------------------------
# TRServer Operations
#---------------------------------------------------------------------------------
def RegisterServer(language, name, port, LanguageList ):
'''registers a TRS in LanguageList to let TCS know that a new language is available for translation'''
Msg="SRR"
if language in LanguageList:
Msg+=" NOK\n"
else:
Msg+=" OK\n"
print("+"+language+" "+name+" "+port)
LanguageList[language]=[name,port]
return Msg
def UnRegisterServer(language, name, port, LanguageList ):
'''unregisters a TRS in LanguageList to let TCS know that the specified language is not available for translation anymore'''
Msg="SUR "
if language in LanguageList:
Msg+="OK\n"
print("-"+language+" "+name+" "+port)
del LanguageList[language]
else:
Msg+="NOK\n"
return Msg
#---------------------------------------------------------------------------------
# Argument Validation
#---------------------------------------------------------------------------------
def validateArgs():
'''validates the arguments given to the program upon runtime'''
try:
arguments=sys.argv
port=58056
if len(arguments)>3:
raise ArgumentsError(invalidArgs)
if len(arguments)>2:
if arguments[1]=="-p":
port=int(arguments[2])
if port not in range(65536):
raise ValueError
return port
raise ArgumentsError(invalidArgs)
else:
return port
except ValueError as e:
sys.exit(portMsg)
except IndexError:
sys.exit(invalidArgs)
except ArgumentsError as err:
sys.exit(err)
#---------------------------------------------------------------------------------
# Protocol Syntax Verification
#---------------------------------------------------------------------------------
def protocolSyntaxVerification(msg):
if " " in msg or msg[-1]!="\n" or msg[0]==" " or " \n" in msg:
return False
return True
def sendTRScred(sock, language, LanguageList, Host_Address,Host_Port):
if language not in LanguageList:
Msg= "UNR EOF\n"
else:
name, port= LanguageList[language][0], LanguageList[language][1]
Msg="UNR "+ socket.gethostbyname(name) + " " + port+"\n"
sock.sendto(Msg.encode(), (Host_Address, Host_Port))
#---------------------------------------------------------------------------------
# Main
#---------------------------------------------------------------------------------
def main():
port=validateArgs()
LanguageList={}
UDP_socket= socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
UDP_socket.bind((socket.gethostbyname(socket.gethostname()), port))
while(True):
command,(Host_Address,Host_Port)= UDP_socket.recvfrom(BUFFER_SIZE) #DICIONARIO
command= command.decode()
if not protocolSyntaxVerification(command):
print("Invalid Request: Protocol mismatch")
continue
command=command.split()
if command[0]=="ULQ":
if len(command)==1:
sendList(UDP_socket, Host_Address, Host_Port, LanguageList)
else:
UDP_socket.sendto("ULR ERR\n".encode(), (Host_Address, Host_Port))
elif command[0]=="UNQ":
if len(command)==2:
Lang=command[1]
sendTRScred(UDP_socket, Lang, LanguageList, Host_Address,Host_Port)
else:
UDP_socket.sendto("UNR ERR\n".encode(), (Host_Address, Host_Port))
elif command[0]=="SRG":
try:
Lang, name, port=command[1], command[2], command[3]
Msg=RegisterServer(Lang, name, port, LanguageList)
except Exception:
Msg="SRR ERR\n"
finally:
UDP_socket.sendto(Msg.encode(), (Host_Address, Host_Port))
elif command[0]=="SUN":
try:
Lang, name, port=command[1], command[2], command[3]
Msg=UnRegisterServer(Lang, name, port, LanguageList)
except Exception:
Msg="SUR ERR\n"
finally:
UDP_socket.sendto(Msg.encode(), (Host_Address, Host_Port))
else:
print("Invalid Request: Protocol mismatch")
try:
main()
except socket.error as error:
sys.exit(error)
except KeyboardInterrupt:
sys.exit("Exiting...\n")
``` |
{
"source": "jpmec/loclasspy",
"score": 4
} |
#### File: loclasspy/tests/test_iterators.py
```python
from unittest import TestCase
class TestIterators(TestCase):
def test_cannot_iterate_integer(self):
a = 1
with self.assertRaises(TypeError):
iter(a)
def test_iterate_empty_list(self):
a = []
i = iter(a)
print(type(i))
print(i)
with self.assertRaises(StopIteration):
next(i)
def test_can_iterate_list(self):
a = [1, 2]
i = iter(a)
print(type(i))
print(i)
x = next(i)
print(type(x))
print(x)
x = next(i)
print(type(x))
print(x)
with self.assertRaises(StopIteration):
next(i)
def test_for_in_list(self):
a = [1, 2]
# these two loops are equivalent
for x in iter(a):
print(x)
for x in a:
print(x)
def test_iterator_unpacking(self):
a = [1, 2]
x, y = iter(a)
print(x)
print(y)
x, y = a
print(x)
print(y)
```
#### File: loclasspy/tests/test_tuples.py
```python
from unittest import TestCase
class TestTuples(TestCase):
def test_empty_tuple(self):
t = ()
print(type(t))
print(t)
self.assertEqual(0, len(t))
def test_one_element_tuple(self):
t = (0,)
print(type(t))
print(t)
self.assertEqual(1, len(t))
def test_two_element_tuple(self):
t = (0, 1)
print(type(t))
print(t)
print(t[0])
print(t[1])
self.assertEqual(2, len(t))
def test_two_element_tuple_with_commas(self):
t = 0, 1
print(type(t))
print(t)
self.assertEqual(2, len(t))
def test_tuple_modify(self):
t = 0, 1, 2
with self.assertRaises(TypeError):
t[1] = 42
def test_tuple_zip_two(self):
from loclasspy.fizzbuzz import return_fizzbuzz
a = range(1, 101)
b = map(return_fizzbuzz, a)
for t in zip(a, b):
print(t)
def test_tuple_zip_four(self):
from loclasspy.fizzbuzz import is_fizz, is_buzz, is_fizzbuzz
a = range(1, 101)
f = map(is_fizz, a)
b = map(is_buzz, a)
fb = map(is_fizzbuzz, a)
fizzbuzz = list(zip(fb, f, b, a))
print(fizzbuzz)
def test_fizzbuzz(self):
from loclasspy.fizzbuzz import fizz, buzz, fizzbuzz
a = range(1, 101)
fb = list(
map(
lambda x: next(filter(None, x)),
zip(
map(fizz, a),
map(buzz, a),
map(fizzbuzz, a),
a
)
)
)
print(fb)
``` |
{
"source": "jpmec/shepherdpy",
"score": 3
} |
#### File: jpmec/shepherdpy/test_shepherd.py
```python
import shepherd
import unittest
import example
from multiprocessing import Pool
class TestShepherd(unittest.TestCase):
def setUp(self):
pass
def test_default_map_reduce(self):
expected = {0:'Hello World'}
result = shepherd.run(datasource = expected)
self.assertEqual(expected, result)
def test_array_input(self):
data = ['Hello World']
expected = {0:'Hello World'}
result = shepherd.run(datasource = data)
self.assertEqual(expected, result)
def test_array_input_2(self):
data = ['Hello', 'World']
expected = {0:'Hello', 1:'World'}
result = shepherd.run(datasource = data)
self.assertEqual(expected, result)
def test_example(self):
expected = {'a': 2, 'on': 1, 'great': 1, 'Humpty': 3, 'again': 1, 'wall': 1, 'Dumpty': 2, 'men': 1, 'had': 1, 'all': 1, 'together': 1, "King's": 2, 'horses': 1, 'All': 1, "Couldn't": 1, 'fall': 1, 'and': 1, 'the': 2, 'put': 1, 'sat': 1}
result = shepherd.run(
datasource = example.datasource,
mapfn = example.mapfn,
reducefn = example.reducefn
)
self.assertEqual(expected, result)
def test_word_counting_1(self):
data = ['one fish two fish red fish blue fish']
expected = {'one':1, 'two':1, 'red':1, 'blue':1, 'fish':4}
result = shepherd.run(
datasource = data,
mapfn = shepherd.map_word_count,
reducefn = shepherd.reduce_word_count,
)
self.assertEqual(expected, result)
def test_word_counting_2(self):
data = ['one fish', 'two fish', 'red fish', 'blue fish']
expected = {'one':1, 'two':1, 'red':1, 'blue':1, 'fish':4}
result = shepherd.run(
datasource = data,
mapfn = shepherd.map_word_count,
reducefn = shepherd.reduce_word_count,
)
self.assertEqual(expected, result)
def test_CustomServer(self):
data = ['one fish', 'two fish', 'red fish', 'blue fish']
expected = {0: 'one fish', 1: 'two fish', 2: 'red fish', 3: 'blue fish'}
result = shepherd.run(
datasource = data,
server = shepherd.Server,
)
self.assertEqual(expected, result)
def test_WordCountServer(self):
data = ['one fish', 'two fish', 'red fish', 'blue fish']
expected = {'one':1, 'two':1, 'red':1, 'blue':1, 'fish':4}
result = shepherd.run(
datasource = data,
server = shepherd.WordCountServer,
)
self.assertEqual(expected, result)
if __name__ == '__main__':
unittest.main()
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.