metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jm-begon/episim",
"score": 2
} |
#### File: episim/episim/model.py
```python
import os
import datetime
from collections import defaultdict
import numpy as np
from scipy import sparse
from episim.ontology import Ontology
from episim.plot.modeling import System, Accumulator
from .data import State
class EulerSimulator(object):
"""
Explicit Euler method
"""
def __init__(self, *dx_dt, step_size=1.):
self.step_size = step_size
self.dx_dt = dx_dt
self.N = len(dx_dt)
def __call__(self, *x, dt=1):
dx = np.zeros(self.N)
h = self.step_size
x = np.array(x)
n_steps_per_dt = int(1. / self.step_size)
for i in range(int(dt)):
for t in range(n_steps_per_dt):
for i, dxi_dt in enumerate(self.dx_dt):
dx[i] = dxi_dt(*x)
x = x + h * dx
yield x
class LinNonLinEulerSimulator(object):
"""
P : p
"""
def __init__(self, dx_dt_lin, dx_dt_dict, step_size=1.):
if hasattr(M, "tocsr"):
dx_dt_lin = dx_dt_lin.tocsr()
self.dx_dt_matrix = dx_dt_lin
self.dx_dt_dict = dx_dt_dict
self.N = len(dx_dt_lin)
self.step_size = step_size
def __call__(self, *x, dt=1):
dx = np.zeros(self.N)
x = np.array(x)
h = self.step_size
n_steps_per_dt = int(1. / self.step_size)
for i in range(int(dt)):
for t in range(n_steps_per_dt):
dx *= 0
# Linear part
dx[:] = self.dx_dt_matrix.dot(x)
# Non linear
for i, f in self.dx_dt_dict.items():
dx[i] += f(*x)
x = x + h * dx
yield x
class F(object):
def __init__(self, callable, label):
self.label = label
self.callable = callable
def __call__(self, *args, **kwargs):
return self.callable(*args, **kwargs)
def __str__(self):
return self.label
class Dynamic(object):
@classmethod
def from_nodes(cls, *node_and_time_deriv):
nodes = []
dx_dt = []
for node, dxi_dt in node_and_time_deriv:
nodes.append(node)
dx_dt.append(dxi_dt)
sorted_nodes = [x for x in nodes]
sorted_nodes.sort(key=lambda n: n.index)
names = [x.name for x in sorted_nodes]
dynamic = cls(*names)
for name, dxi_dt in zip(names, dx_dt):
dynamic[name] = dxi_dt
return dynamic
def __init__(self, *variable_names):
self.variable_names = variable_names
self.var2idx = {s: i for i, s in enumerate(variable_names)}
self.dx_dt = [F(lambda *x: 0, "0") for _ in range(len(variable_names))]
def _idx(self, key):
try:
idx = int(key)
except (TypeError, ValueError):
idx = self.var2idx[key]
return idx
def __setitem__(self, key, value):
self.dx_dt[self._idx(key)] = value
def __getitem__(self, item):
return self.dx_dt[self._idx(item)]
def long_repr(self):
s = ""
for idx, name in enumerate(self.variable_names):
s += "d{}/dt = {}{}".format(name, self.dx_dt[idx], os.linesep)
return s
def __iter__(self):
return iter(self.dx_dt)
class Model(object):
@classmethod
def compute_parameters(cls, virus, population):
return tuple()
@classmethod
def factory(cls, initial_state, virus, population, resolution=0.1):
t = cls.compute_parameters(virus, population)
model = cls(*t, resolution=resolution)
return model.set_state(initial_state)
def __init__(self, resolution=0.1):
self.current_state = None
self.resolution = resolution
self.ontology = Ontology.default_ontology()
def _compute_reproduction_number(self, n_susceptible, n_total):
return 0
def set_state(self, state):
queriable = self.ontology(state)
R = self._compute_reproduction_number(queriable.susceptible,
queriable.population)
state.reproduction_number = R
if state.n_infection is None:
state.n_infection = queriable.infected
self.current_state = state
return self
def _state2variables(self, state):
return tuple()
def _variables2state(self, date, *values):
return State(date)
def run(self, n_steps=1):
variables = self._state2variables(self.current_state)
date = self.current_state.date
plus_one = datetime.timedelta(days=1)
for variables in self.simulator(*variables, dt=n_steps):
date = date + plus_one
state = self._variables2state(date, *variables)
self.set_state(state)
yield state
class SEIRS(Model):
"""
beta: float
transmission coefficient: average number of contact per person per time,
multiplied by the probability of disease transmission at a contact
between a susceptible person and an infectious person
gamma: float
1/D, where D is the average time infectious time
ksi:
re-susceptibility rate (depends on the fraction of alive, recovered
people will not develop a lasting immunity and depends on the time
before the immunity drops)
"""
@classmethod
def compute_parameters(cls, virus, population):
beta = population.contact_frequency * virus.transmission_rate
kappa = 1. / virus.exposed_duration
gamma = 1. / virus.infectious_duration
ksi = virus.immunity_drop_rate
return beta, kappa, gamma, ksi
def __init__(self, beta=0, kappa=0, gamma=0, ksi=0, resolution=0.1):
if resolution is None:
resolution = EulerSimulator
super().__init__(resolution=resolution)
self.beta = beta
self.kappa = kappa
self.gamma = gamma
self.ksi = ksi
self.current_state = None
S, E, I, R = System.new("S", "E", "I", "R")
N = S + E + I + R
N.override_name("N")
S2E = self.beta * S * I / N
S2E_acc = Accumulator(S2E, self.resolution)
E2I = self.kappa * E
I2R = self.gamma * I
R2S = self.ksi * R
dS_dt = -S2E + R2S
dE_dt = S2E_acc - E2I
dI_dt = E2I - I2R
dR_dt = I2R - R2S
self.dynamic = Dynamic.from_nodes((S, dS_dt), (E, dE_dt),
(I, dI_dt), (R, dR_dt))
self.acc_n_infect = S2E_acc
self.simulator = EulerSimulator(*iter(self.dynamic),
step_size=resolution)
def __repr__(self):
s = "{}(beta={}, kappa={}, gamma={}, ksi={}, resolution={})".format(
self.__class__.__name__,
repr(self.beta),
repr(self.kappa),
repr(self.gamma),
repr(self.ksi),
repr(self.resolution),
)
if self.current_state is None:
return s
return s + ".set_state({})".format(repr(self.current_state))
def __str__(self):
return "{}(beta={:.2e}, kappa={:.2e}, gamma={:.2e}, ksi={:.2e})" \
"".format(self.__class__.__name__,
self.beta, self.kappa,
self.gamma, self.ksi)
# def __str__(self):
# return self.dynamic.long_repr()
def _compute_reproduction_number(self, n_susceptible, n_total):
return self.beta / self.gamma * n_susceptible / float(n_total)
def _state2variables(self, state):
zero = lambda x: 0 if x is None else x
S = zero(state.susceptible)
E = zero(state.exposed)
I = zero(state.infectious)
R = zero(state.recovered)
return S, E, I, R
def _variables2state(self, date, *values):
S, E, I, R = values
n_infection = self.current_state.n_infection
n_infection += self.acc_n_infect.value
self.acc_n_infect.reset()
state = State(date)
state.susceptible = S
state.exposed = E
state.infectious = I
state.recovered = R
state.n_infection = n_infection
return state
class SIR(Model):
@classmethod
def compute_parameters(cls, virus, population):
beta = population.contact_frequency * virus.transmission_rate
gamma = 1. / (virus.exposed_duration + virus.infectious_duration)
return beta, gamma
def __init__(self, beta, gamma, resolution=0.1):
super().__init__(resolution)
self.beta = beta
self.gamma = gamma
S, I, R = System.new("S", "I", "R")
N = S + I + R
N.override_name("N")
S2I = self.beta * S * I / N
I2R = self.gamma * I
dS_dt = -S2I
dI_dt = S2I - I2R
dR_dt = I2R
self.dynamic = Dynamic.from_nodes((S, dS_dt), (I, dI_dt), (R, dR_dt))
self.simulator = EulerSimulator(iter(self.dynamic), resolution)
def __repr__(self):
s = "{}(beta={}, gamma={}, resolution={})".format(
self.__class__.__name__,
repr(self.beta),
repr(self.gamma),
repr(self.resolution),
)
if self.current_state is None:
return s
return s + ".set_state({})".format(repr(self.current_state))
def __str__(self):
return "{}(beta={:.2e}, gamma={:.2e})" \
"".format(self.__class__.__name__,
self.beta, self.gamma)
def _compute_reproduction_number(self, n_susceptible, n_total):
return self.beta / self.gamma * n_susceptible / float(n_total)
def _state2variables(self, state):
zero = lambda x: 0 if x is None else x
S = zero(state.susceptible)
I = zero(state.infectious)
R = zero(state.recovered)
return S, I, R
def _variables2state(self, date, *values):
S, I, R = values
n_infection = self.current_state.n_infection
n_infection += (self.current_state.susceptible - S)
state = State(date)
state.susceptible = S
state.infectious = I
state.recovered = R
state.n_infection = n_infection
return state
```
#### File: episim/plot/single_outcome.py
```python
import os
import numpy as np
from .plot import Plot, Dashboard, TwoAxesPlot
class StatePlot(Plot):
def plot_outcome(self, outcome, *_):
t = np.arange(len(outcome))
N = outcome.population_size
# rates
s = np.array([s.susceptible for s in outcome]) / N
e = np.array([s.exposed for s in outcome]) / N
i = np.array([s.infectious for s in outcome]) / N
r = np.array([s.recovered for s in outcome]) / N
self.axes.plot(t, s, color=self.convention.susceptible_color,
label="Susceptible")
if e.max() > 0:
self.axes.plot(t, e, color=self.convention.exposed_color,
label="Exposed")
self.axes.plot(t, i, color=self.convention.infectious_color,
label="Infectious")
self.axes.plot(t, r, color=self.convention.recovered_color,
label="Recovered")
self.axes.set_ylabel("Percentage of population")
self.set_dates(outcome.dates)
self.axes.grid(True)
self.axes.legend(loc="best")
self.axes.set_title("State distribution with respect to time")
return self
class CumulStatePlot(Plot):
def plot_outcome(self, outcome, *_):
alpha = .25
t = np.arange(len(outcome))
N = outcome.population_size
# rates
s = np.array([s.susceptible for s in outcome]) / N
e = np.array([s.exposed for s in outcome]) / N
i = np.array([s.infectious for s in outcome]) / N
r = np.array([s.recovered for s in outcome]) / N
lower = 0
higher = lower + s
self.axes.plot(t, higher, label="Susceptible",
color=self.convention.susceptible_color)
self.axes.fill_between(t, lower, higher,
color=self.convention.susceptible_color,
alpha=alpha)
lower = higher
higher = higher + e
if e.max() > 0:
self.axes.plot(t, higher, label="Exposed",
color=self.convention.exposed_color)
self.axes.fill_between(t, lower, higher,
color=self.convention.exposed_color,
alpha=alpha)
lower = higher
higher = higher + i
self.axes.plot(t, higher, label="Infectious",
color=self.convention.infectious_color)
self.axes.fill_between(t, lower, higher,
color=self.convention.infectious_color,
alpha=alpha)
lower = higher
higher = higher + r
self.axes.plot(t, higher, label="Recovered",
color=self.convention.recovered_color)
self.axes.fill_between(t, lower, higher,
color=self.convention.recovered_color,
alpha=alpha)
self.axes.set_ylabel("Percentage of population")
self.axes.grid(True)
self.axes.legend(loc="lower left")
self.axes.set_title("Cumulative distribution with respect to time")
return self
class InfectedPlot(Plot):
def plot_outcome(self, outcome, *_):
t = np.arange(len(outcome))
# rates
e = np.array([s.exposed for s in outcome])
i = np.array([s.infectious for s in outcome])
if e.max() > 0:
self.axes.plot(t, e, color=self.convention.exposed_color,
label="Exposed")
self.axes.plot(t, i, color=self.convention.infectious_color,
label="Infectious")
self.axes.set_ylabel("Number of infection")
self.set_dates(outcome.dates)
self.axes.grid(True)
self.axes.legend(loc="best")
self.axes.set_title("Number of infected people with respect to time")
return self
class ReproductionNumberPlot(Plot):
@property
def default_title(self):
return "Reproduction number (R)"
def plot_outcome(self, outcome, color="k", title=None):
t = np.arange(len(outcome))
R = np.array([s.reproduction_number for s in outcome])
self.axes.plot(t, R, color=color)
self.axes.set_ylabel("Reproduction number", color=color)
self.axes.tick_params(axis='y', labelcolor=color)
self.axes.grid(True, color=color, alpha=.25)
self.axes.axhline(1, 0, 1, color=color, linestyle="--", alpha=.5)
if title is None:
title = self.default_title
self.axes.set_title(title)
return self
class InfectionNumberPlot(Plot):
@property
def default_title(self):
return "Percentage of cumulattive infection"
def plot_outcome(self, outcome, color="k", title=None):
N = outcome.population_size
t = np.arange(len(outcome))
R = np.array([s.n_infection for s in outcome]) / N
self.axes.plot(t, R, color=color)
self.axes.set_ylabel("Perc. cumul. infection", color=color)
self.axes.tick_params(axis='y', labelcolor=color)
self.axes.axhline(1, 0, 1, color=color, linestyle="--", alpha=.5)
self.axes.grid(True, color=color, alpha=.25)
if title is None:
title = self.default_title
self.axes.set_title(title)
return self
class RiskyContactPlot(Plot):
"""
Let S be the number of succeptible, I be the number of infected, k
be the average number of daily contact between any two people and
N be the population size.
The number of risky contact is: RC = k S I/N
for any succeptible individual, it has a I/N chances to have a contact
with a infectious person and there are k of such contacts (assuming
independence and uniform distribution)
The total number of contact is: TC = k N
any person has k contact (well, on average)
So the proportion of risky contact is: RC/TC = S I / N^2
"""
def plot_outcome(self, outcome, color="k", title=None):
t = np.arange(len(outcome))
N = outcome.population_size
# N = 1
# rates
s = np.array([s.susceptible for s in outcome])
i = np.array([s.infectious for s in outcome])
y = s * i / N**2
self.axes.plot(t, y, color=color)
self.axes.set_ylabel("Risky contact rate (pop. level)", color=color)
self.axes.tick_params(axis='y', labelcolor=color)
self.axes.grid(True, color=color, alpha=.25)
if title is None:
title = "TODO"
self.axes.set_title(title)
class DescriptionPlot(Plot):
def set_dates(self, dates):
return self
def plot_outcome(self, outcome, color="k", title=None):
# TODO better cut of lines
limit = False
text = []
tab = " > "
contd = " "
for date, descr in outcome.date2descr.items():
text.append(date.strftime("%d/%m/%y"))
for line in descr.split(os.linesep):
suffix = tab
while len(line) > 85:
limit = True
text.append("{}{}".format(suffix, line[:85]))
suffix = contd
line = line[85:]
text.append("{}{}".format(suffix, line))
text = os.linesep.join(text)
self.axes.text(0, 0, text, verticalalignment="bottom", color=color) # TODO max size ~90
# TODO https://matplotlib.org/3.1.1/gallery/pyplots/text_layout.html#sphx-glr-gallery-pyplots-text-layout-py
if limit:
# self.axes.axvline(len(outcome.state_history), linestyle=":", alpha=.1)
pass
if title is None:
title = "Description"
self.axes.set_title(title, color=color)
self.axes.axis("off")
class StateDashboard(Dashboard):
def __call__(self, outcome):
ax1, ax2, ax3 = self.figure.subplots(3, 1, sharex=True)
StatePlot(ax1, self.convention)(outcome)
CumulStatePlot(ax2, self.convention)(outcome)
TwoAxesPlot(ax3,
ReproductionNumberPlot, "dodgerblue",
RiskyContactPlot, "orange")(outcome)
return self
class FullDashboard(Dashboard):
def __call__(self, outcome):
all_axes = self.figure.subplots(3, 2, sharex=True)
# First column
StatePlot(all_axes[0, 0], self.convention)(outcome)
CumulStatePlot(all_axes[1, 0], self.convention)(outcome)
TwoAxesPlot(all_axes[2, 0],
ReproductionNumberPlot, "royalblue",
RiskyContactPlot, "darkorange")(outcome)
# Second column
# all_axes[0, 1].axis("off")
# all_axes[0, 1].text(0.5, 0.5, "Population size: {}".format(outcome.population_size))
title = "Description"
if outcome.name:
title = "{} -- {}".format(title, outcome.name)
DescriptionPlot(all_axes[0, 1], self.convention)(outcome,
title=title)
InfectionNumberPlot(
all_axes[1, 1],
self.convention
)(outcome)
InfectedPlot(all_axes[2, 1], self.convention)(outcome)
self.figure.subplots_adjust(wspace=0.3)
return self
```
#### File: episim/mains/strategy_comparison.py
```python
import argparse, sys
import datetime
from episim.data import Outcome, State
from episim.ontology import Ontology
from episim.parameters import PopulationBehavior, Confine, \
TransmissionRateMultiplier
from episim.plot.multi_outcome import ComparatorDashboard
from episim.scenario import Scenario
from episim.plot import FullDashboard
from episim.model import SEIRS, SIR
from episim.virus import SARSCoV2Th
def main(argv=sys.argv[1:]):
parser = argparse.ArgumentParser()
parser.add_argument("-N", "--population_size", default=int(7 * 1e6),
type=int)
parser.add_argument("-I", "--n_infectious", default=20, type=int)
parser.add_argument("--n_days_total", default=243, type=int)
parser.add_argument("--n_days_before_sanity_measures", default=30, type=int)
parser.add_argument("--n_days_before_confinement", default=30, type=int)
parser.add_argument("--n_days_confinement_duration", default=60, type=int)
parser.add_argument("--sanitary_measure_effect", default=.5, type=float,
help="Factor by which the transmission rate is "
"multiplied (0 < x < 1")
parser.add_argument("--confinement_effect", default=.1, type=float,
help="Factor by which the number of average daily"
"contact is multiplied (0 < x < 1)")
parser.add_argument("-r", "--solver_resolution", default=0.1, type=float)
parser.add_argument("--factory", choices=["SIR", "SEIRS"], default="SEIRS")
args = parser.parse_args(argv)
print(args)
if args.factory == "SIR":
factory = SIR.factory
else:
factory = SEIRS.factory
N = args.population_size
I = args.n_infectious
res = args.solver_resolution
T = args.n_days_total
nd_bs = args.n_days_before_sanity_measures
nd_as = T - nd_bs
nd_bc = args.n_days_before_confinement
nd_cd = args.n_days_confinement_duration
nd_adc = T - nd_bc - nd_cd
outcomes = []
for scenario in NoIntervention(T, N, I, res), \
SanityMeasure(args.sanitary_measure_effect,
nd_bs, nd_as, N, I, res), \
Confinement(args.confinement_effect,
nd_bc, nd_cd, nd_adc, N, I, res):
outcomes.append(scenario.run_model(factory))
ComparatorDashboard()(*outcomes).show()#.save("comparison.png")
for i, outcome in enumerate(outcomes):
FullDashboard()(outcome).show()#.save("dashboard_{}.png".format(i))
# ComparatorDashboard()(*outcomes[1:]).show()
class BaseScenario(Scenario):
def __init__(self, population_size, n_infectious, resolution,
virus=None, population=None, initial_date=None):
self.resolution = resolution
N = population_size
I = n_infectious
if initial_date is None:
initial_date = datetime.date(2020, 1, 1)
self.initial_state = State(initial_date, susceptible=N-I, infectious=I,
n_infection=I)
# self.initial_state = State(N-I, 0, I, 0, initial_date, n_infection=I)
if virus is None:
virus = SARSCoV2Th()
self.virus = virus
if population is None:
population = PopulationBehavior()
self.population = population
def get_model(self, factory, state=None, virus=None, population=None,
resolution=None):
if state is None:
state = self.initial_state
if virus is None:
virus = self.virus
if population is None:
population = self.population
if resolution is None:
resolution = self.resolution
return factory(state, virus, population, resolution)
def starting_description(self, model):
ontology = Ontology.default_ontology()
state = ontology(self.initial_state)
descr_ls = [
"Number of infectious {:d} / {:d} total population size"
"".format(state.infectious,
state.population),
"{}".format(self.virus),
"Pop.: {}".format(self.population),
"Model: {}".format(model)
]
return self.multiline(descr_ls)
class NoIntervention(BaseScenario):
def __init__(self, n_days, population_size, n_infectious, resolution):
super().__init__(population_size, n_infectious, resolution)
self.n_days = n_days
def run_model(self, model_factory):
model = self.get_model(model_factory)
outcome = Outcome.from_model(model, self.n_days,
self.starting_description(model))
outcome.name = "Do nothing"
return outcome
class SanityMeasure(BaseScenario):
def __init__(self, measure_effect, n_days_before_measures,
n_days_after_measures, population_size, n_infectious,
resolution):
super().__init__(population_size, n_infectious, resolution)
self.n_days_1 = n_days_before_measures
self.n_days_2 = n_days_after_measures
self.measure_effect = measure_effect
def run_model(self, model_factory):
model = self.get_model(model_factory)
outcome = Outcome.from_model(model, self.n_days_1,
self.starting_description(model))
virus = TransmissionRateMultiplier(self.virus, 0.5)
model = self.get_model(model_factory, virus=virus, state=outcome.last_state)
descr_ls = [
"Sanity measures: dividing transmission rate by "
"{:.2f}".format(1./self.measure_effect),
"New model: {}".format(model)
]
outcome = outcome.concat(
Outcome.from_model(model, self.n_days_2,
self.multiline(descr_ls))
)
outcome.name = "Sanity measure (lower transmission rate)"
return outcome
class Confinement(BaseScenario):
def __init__(self, confinement_effect, n_days_before_confinement,
n_days_confinement, n_days_after_confinement,
population_size, n_infectious, resolution):
super().__init__(population_size, n_infectious, resolution)
self.confinement_efficiency = 1 - confinement_effect
self.n_days_1 = n_days_before_confinement
self.n_days_2 = n_days_confinement
self.n_days_3 = n_days_after_confinement
def run_model(self, model_factory):
model = self.get_model(model_factory)
outcome = Outcome.from_model(model, self.n_days_1,
self.starting_description(model))
population = Confine(self.population, self.confinement_efficiency)
model = self.get_model(model_factory, state=outcome.last_state,
population=population)
descr_ls = [
"Confinement: {}".format(population),
"New model: {}".format(model)
]
outcome = outcome.concat(
Outcome.from_model(model, self.n_days_2,
self.multiline(descr_ls))
)
model = self.get_model(model_factory, state=outcome.last_state)
descr_ls = [
"Deconfinement: {}".format(str(population)),
"New model: {}".format(model)
]
outcome = outcome.concat(
Outcome.from_model(model, self.n_days_3,
self.multiline(descr_ls))
)
outcome.name = "Confine/deconfine"
return outcome
if __name__ == '__main__':
main()
# TODO save in pdf
``` |
{
"source": "jm-begon/fight_tracker",
"score": 3
} |
#### File: fight_tracker/fight_tracker/arithmetic.py
```python
class Boolable:
def __bool__(self):
return False
class DescriptiveTrue(Boolable):
def __init__(self, description):
self.description = description
def __bool__(self):
return True
def __str__(self):
return f"{self.description}"
def __repr__(self):
return f"{self.__class__.__name__}({repr(self.description)})"
class Intable:
def __int__(self):
return 0
def __add__(self, other):
return Addition(self, other)
def __repr__(self):
return f"{self.__class__.__name__}()"
class DescriptiveInt(Intable):
def __init__(self, value, description):
self.value = value
self.description = description
def __int__(self):
return self.value
def __str__(self):
return f"{self.value} ({self.description})"
def __repr__(self):
return f"{self.__class__.__name__}({repr(self.value)}, " \
f"{repr(self.description)})"
class Addition(Intable):
def __init__(self, *intables):
self.intables = list(intables)
def __int__(self):
return sum(int(i) for i in self.intables)
def __repr__(self):
return f"{self.__class__.__name__}(*{repr(self.intables)})"
def __str__(self):
return " + ".join(str(x) for x in self.intables)
class Subtraction(Intable):
def __init__(self, left_operand, right_operand):
self.left = left_operand
self.right = right_operand
def __int__(self):
return int(self.left) - int(self.right)
def __repr__(self):
return f"{self.__class__.__name__}({repr(self.left)}, " \
f"{repr(self.right)})"
def __str__(self):
return f"{self.left} - {self.right}"
```
#### File: fight_tracker/mechanics/ability.py
```python
from enum import Enum
class Ability(Enum):
STR = "strength"
DEX = "dexterity"
CON = "constitution"
INT = "intelligence"
WIS = "wisdom"
CHA = "charisma"
@classmethod
def __getitem__(cls, item):
if item is None:
return None
if isinstance(item, cls):
return item
for ability in cls:
if item == ability.value or ability.name:
return ability
return None
``` |
{
"source": "jm-begon/globally-induced-forest",
"score": 3
} |
#### File: gif/datasets/full_dataset.py
```python
from abc import ABCMeta, abstractmethod
import os
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.utils import check_random_state
from gif.datasets.utils import data_folder
class FullDataset(object, metaclass=ABCMeta):
@classmethod
def get_default_lengths(cls):
return 0, 0
@classmethod
def get_default_folder_name(cls):
return cls.__name__.lower()
def __init__(self, folder=None):
if folder is None:
folder = data_folder(self.__class__.get_default_folder_name())
self.folder = folder
self.tr_X_y = None
self.ts_X_y = None
def __repr__(self):
return "{}()".format(self.__class__.__name__)
def __len__(self):
if self.ts_X_y is None:
return sum(self.__class__.get_default_lengths())
return len(self.tr_X_y[-1]) + len(self.ts_X_y[-1])
def load_(self):
pass
def load(self):
if self.tr_X_y is None:
self.load_()
def partition(self, train_size=None, shuffle=True, random_state=1217):
self.load()
if train_size is None:
# Use default train size
train_size = len(self.tr_X_y[-1])
X_tr, y_tr = self.tr_X_y
X_ts, y_ts = self.ts_X_y
X = np.vstack((X_tr, X_ts))
y = np.hstack((y_tr, y_ts))
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=train_size, shuffle=shuffle,
random_state=random_state
)
self.tr_X_y = X_train, y_train
self.ts_X_y = X_test, y_test
@property
def training_set(self):
if self.tr_X_y is None:
return np.array([]), np.array([])
return self.tr_X_y
@property
def test_set(self):
if self.ts_X_y is None:
return np.array([]), np.array([])
return self.ts_X_y
def is_artificial(self):
return hasattr(self, "random_state")
```
#### File: gif/datasets/__init__.py
```python
from .classification import Waveform, Twonorm, Ringnorm, Musk2, Vowel, \
BinaryVowel, Madelon, Hastie, Covertype, BinaryCovertype, Letters, \
BinaryLetters, MNIST, MNIST8vs9, BinaryMNIST
from .regression import CTSlice, Friedman1, Cadata, Abalone, OzoneLA, \
Diabetes, Hardware, BostonHousing, MPG
__DATASET__ = {
# From <NAME>., <NAME>., & <NAME>. (2017, July). Globally induced forest: A prepruning compression scheme. In International Conference on Machine Learning (pp. 420-428). PMLR.
"waveform": Waveform,
"twonorm": Twonorm,
"ringnorm": Ringnorm,
"musk2": Musk2,
"vowel": Vowel,
"binary_vowel": BinaryVowel,
"madelon": Madelon,
"hastie": Hastie,
"covertype": Covertype,
"binary_covertype": BinaryCovertype,
"letters": Letters,
"binary_letters": BinaryLetters,
"mnist": MNIST,
"mnist8vs9": MNIST8vs9,
"binary_mnist": BinaryMNIST,
"ct_slice": CTSlice,
"friedman1": Friedman1,
"cadata": Cadata,
"abalone": Abalone,
# To compare with <NAME>., <NAME>., <NAME>., & <NAME>. (2021, March). Interpretable random forests via rule extraction. In International Conference on Artificial Intelligence and Statistics (pp. 937-945). PMLR.
"ozone": OzoneLA,
"diabetes": Diabetes,
"hardware": Hardware,
"housing": BostonHousing,
"mpg": MPG
}
def is_regression(full_dataset):
from .regression import RegressionFullDataset
return isinstance(full_dataset, RegressionFullDataset)
def is_classification(full_dataset):
from .classification import ClassificationFullDataset
return isinstance(full_dataset, ClassificationFullDataset)
def is_binary_classification(full_dataset):
from .classification import ClassificationFullDataset
return isinstance(full_dataset, ClassificationFullDataset) and \
full_dataset.n_classes == 2
def download_all(folder=None, verbose=True):
for fullset_cls in __DATASET__.values():
fullset = fullset_cls(folder)
print("Downloading for {}".format(repr(fullset)))
try:
fullset.load()
if verbose:
X, y = fullset.training_set
print("\t> Training set shapes: {}, {}".format(X.shape, y.shape))
X, y = fullset.test_set
print("\t> Test set shapes: {}, {}".format(X.shape, y.shape))
except Exception as e:
if not verbose:
raise
print("Error while downloading '{}' ({}). "
"Skipping...".format(fullset.__class__.__name__, e))
__all__ = ["__DATASET__", "download_all", "is_regression", "is_classification",
"is_binary_classification"]
```
#### File: gif/rules/display.py
```python
import os
from functools import partial
from .ruleset import AndCondition, LeqCondition, GrCondition
from .utils import fstr
class RulesetPrinter(object):
def __init__(self, output_name=None, *variable_names, float_format="{:.2f}", ):
self.output_name = output_name
self.variable_names = variable_names
self.float_format = float_format
self.str_ = partial(fstr, format=float_format)
def __repr__(self):
return "{}({}, {}, *{})".format(self.__class__.__name__,
repr(self.float_format),
repr(self.output_name),
repr(self.variable_names))
def intercept_2_str(self, ruleset):
return "intercept: {}".format(self.str_(ruleset.intercept))
def and_cond_2_str(self, condition):
return " and ".join(self.cond_2_str(cond) for cond in condition)
def get_var_name(self, base_condition):
index = base_condition.variable_index
return self.variable_names[index] \
if index < len(self.variable_names) \
else "x_{}".format(index)
def leq_cond_2_str(self, condition):
val_str = self.float_format.format(condition.threshold)
return "{} <= {}".format(self.get_var_name(condition), val_str)
def gr_cond_2_str(self, condition):
val_str = self.float_format.format(condition.threshold)
return "{} > {}".format(self.get_var_name(condition), val_str)
def cond_2_str(self, condition):
if isinstance(condition, AndCondition):
return self.and_cond_2_str(condition)
if isinstance(condition, LeqCondition):
return self.leq_cond_2_str(condition)
if isinstance(condition, GrCondition):
return self.gr_cond_2_str(condition)
raise ValueError("Unknown condition class '{}' for '{}'"
"".format(condition.__class__.__name__,
repr(condition)))
def pred_2_str(self, prediction):
pred_str = self.str_(prediction)
if self.output_name is not None:
pred_str = "{} = {}".format(self.output_name, pred_str)
return pred_str
def rule_2_str(self, rule):
prop_str = self.float_format.format(rule.proportion)
return "IF {} THEN {} ({})".format(self.cond_2_str(rule.condition),
self.pred_2_str(rule.prediction),
prop_str)
def __call__(self, ruleset):
lines = [self.intercept_2_str(ruleset)]
for rule in ruleset:
lines.append(self.rule_2_str(rule))
return os.linesep.join(lines)
```
#### File: gif/rules/operations.py
```python
import numpy as np
from .ruleset import AndCondition, LeqCondition, GrCondition
# To match <NAME>., <NAME>., <NAME>., & <NAME>. (2021, March). Interpretable random forests via rule extraction. In International Conference on Artificial Intelligence and Statistics (pp. 937-945). PMLR.
class AlaSirusIntersector(object):
def __init__(self, eps=1e-10):
self.eps = eps
def same_val(self, x1, x2):
return np.sum((x1-x2)**2) < self.eps
def __call__(self, rs1, rs2):
commons = []
for r1 in rs1:
for r2 in rs2:
if self.is_same(r1.condition, r2.condition):
commons.append(r1)
return commons
def is_same(self, cond1, cond2):
if isinstance(cond1, AndCondition) and isinstance(cond2, AndCondition):
if len(cond1) != len(cond2):
return False
for c1_i, c2_i in zip(cond1, cond2):
if not self.is_same(c1_i, c2_i):
return False
return True
if isinstance(cond1, LeqCondition) and isinstance(cond2, LeqCondition):
return cond1.variable_index == cond2.variable_index and \
self.same_val(cond1.threshold, cond2.threshold)
if isinstance(cond1, GrCondition) and isinstance(cond2, GrCondition):
return cond1.variable_index == cond2.variable_index and \
self.same_val(cond1.threshold, cond2.threshold)
return False
def stability_ala_sirus(rs1, rs2, eps=1e-10):
intersect = AlaSirusIntersector(eps)
return 2.*len(intersect(rs1, rs2)) / float(len(rs1) + len(rs2))
class SKTraversal(object):
"""
For scikit-learn trees
"""
def __init__(self, accumulator_factory, mode="prefix"):
self.accumulator_factory = accumulator_factory
self.mode = mode
def forest_traversal(self, accumulator, forest):
for t_idx, tree in enumerate(forest.estimators_):
self.tree_traversal(accumulator, tree, t_idx)
def tree_traversal(self, accumulator, tree, t_idx=0):
if hasattr(tree, "tree_"):
tree = tree.tree_
self.tree_traversal_rec(accumulator, tree, t_idx)
def tree_traversal_rec(self, accumulator, tree, t_idx=0, index=0, depth=0):
if index < 0:
return
r_index = tree.children_right[index]
l_index = tree.children_left[index]
if self.mode == "prefix":
accumulator(tree, t_idx, index, depth)
self.tree_traversal_rec(accumulator, tree, t_idx, l_index, depth + 1)
self.tree_traversal_rec(accumulator, tree, t_idx, r_index, depth + 1)
elif self.mode == "infix":
self.tree_traversal_rec(accumulator, tree, t_idx, l_index, depth + 1)
accumulator(tree, t_idx, index, depth)
self.tree_traversal_rec(accumulator, tree, t_idx, r_index, depth + 1)
else: # postfix
self.tree_traversal_rec(accumulator, tree, t_idx, l_index, depth + 1)
self.tree_traversal_rec(accumulator, tree, t_idx, r_index, depth + 1)
accumulator(tree, t_idx, index, depth)
def __call__(self, *somethings, **kwargs):
accumulator = self.accumulator_factory(**kwargs)
for something in somethings:
if hasattr(something, "estimators_"):
# sk forest
self.forest_traversal(accumulator, something)
elif hasattr(something, "tree_"):
# sk tree
self.tree_traversal(accumulator, something)
else:
self.tree_traversal_rec(accumulator, something)
return accumulator
class Accumulator(object):
def __call__(self, tree, t_idx, index, depth):
pass
def finalize(self):
pass
class RuleCount(Accumulator):
def __init__(self):
self.count = 0
def __call__(self, tree, t_idx, index, depth):
r_index = tree.children_right[index]
l_index = tree.children_left[index]
if index > 0 and (l_index < 0 or r_index < 0):
# Count only a (partial) leaf which is not a root
self.count += 1
def finalize(self):
return self.count
class TotalComplexity(Accumulator):
def __init__(self):
self.complexity = 0
def __call__(self, tree, t_idx, index, depth):
r_index = tree.children_right[index]
l_index = tree.children_left[index]
if index > 0 and (l_index < 0 or r_index < 0):
# Account only for a (partial) leaf which is not a root
self.complexity += depth
def finalize(self):
return self.complexity
class BranchingHistogram(Accumulator):
def __init__(self, max_depth=15):
self.hist = np.zeros(max_depth, dtype=int)
def __call__(self, tree, t_idx, index, depth):
r_index = tree.children_right[index]
l_index = tree.children_left[index]
if index > 0 and (l_index < 0 or r_index < 0):
# Account only for a (partial) leaf which is not a root
self.hist[depth] += 1
def rule_count(self):
return self.hist[1:].sum()
def complexity(self):
return np.sum(np.arange(len(self.hist)) * self.hist)
def finalize(self):
return self.hist.copy()
```
#### File: gif/rules/ruleset.py
```python
import numpy as np
class Condition(object):
pass
class BaseCondition(Condition):
def __init__(self, variable_index, threshold):
self.variable_index = variable_index
self.threshold = threshold
def __repr__(self):
return "{}(variable_index={}, threshold={})" \
"".format(self.__class__.__name__,
repr(self.variable_index),
repr(self.threshold))
@property
def variable_str(self):
return "x_{}".format(self.variable_index)
class LeqCondition(BaseCondition):
def __str__(self):
return "{} <= {:.2f}".format(self.variable_str, self.threshold)
def invert(self):
return GrCondition(self.variable_index, self.threshold)
class GrCondition(BaseCondition):
def __str__(self):
return "{} > {:.2f}".format(self.variable_str, self.threshold)
def invert(self):
return LeqCondition(self.variable_index, self.threshold)
class AndCondition(Condition):
@classmethod
def flatten(cls, condition, accumulator=None):
if isinstance(condition, AndCondition):
for sub_cond in condition:
cls.flatten(sub_cond, accumulator)
else:
accumulator.append(condition)
@classmethod
def by_flatting(cls, condition):
if isinstance(condition, AndCondition):
accumulator = []
cls.flatten(condition, accumulator)
return cls(*accumulator)
return condition
def __init__(self, *conditions):
self.conditions = tuple(conditions)
def __repr__(self):
return "{}(*{})".format(self.__class__.__name__,
repr(self.conditions))
def __str__(self):
return " and ".join(str(cond) for cond in self.conditions)
def __iter__(self):
return iter(self.conditions)
def __len__(self):
return len(self.conditions)
class Rule(object):
def __init__(self, condition, prediction, proportion):
self.condition = condition
self.prediction = prediction
self.proportion = proportion
def __repr__(self):
return "{}({}, {}, {})".format(self.__class__.__name__,
repr(self.condition),
repr(self.prediction),
repr(self.proportion))
def __str__(self):
return "IF {} THEN {} ({})".format(self.condition, self.prediction,
self.proportion)
class RuleSet(object):
@classmethod
def tree_2_rules(cls, rules, tree, index=0, condition=None):
if index < 0:
return
# If leaf (at least partially) prediction associated
if tree.children_left[index] < 0 or tree.children_right[index] < 0:
v = tree.value[index]
if np.sum(v**2) > 1e-10:
# Node actually holds a value
proportion = tree.n_node_samples[index] / tree.n_node_samples[0]
rule = Rule(AndCondition.by_flatting(condition), v, proportion)
rules.append(rule)
# if internal node (at least partially) only part of condition
this_condition = LeqCondition(tree.feature[index], tree.threshold[index])
left_cond = this_condition
if condition is not None:
left_cond = AndCondition(condition, this_condition)
cls.tree_2_rules(rules, tree, tree.children_left[index], left_cond)
right_cond = this_condition.invert()
if condition is not None:
right_cond = AndCondition(condition, right_cond)
cls.tree_2_rules(rules, tree, tree.children_right[index], right_cond)
@classmethod
def from_gif(cls, gif):
rules = []
intercept = gif.bias
for tree in gif.estimators_:
cls.tree_2_rules(rules, tree)
return cls(intercept, rules)
def __init__(self, intercept, rules):
self.intercept_ = intercept
self.rules_ = tuple(rules)
def __repr__(self):
return "{}(intercept={}, rules={})" \
"".format(self.__class__.__name__,
repr(self.intercept_),
repr(self.rules_))
@property
def intercept(self):
return self.intercept_
def __iter__(self):
return iter(self.rules_)
def __len__(self):
return len(self.rules_)
```
#### File: tree/tests/test_export.py
```python
from re import finditer
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.tree import export_graphviz
from sklearn.externals.six import StringIO
from sklearn.utils.testing import assert_in
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
y2 = [[-1, 1], [-1, 1], [-1, 1], [1, 2], [1, 2], [1, 3]]
w = [1, 1, 1, .5, .5, .5]
def test_graphviz_toy():
# Check correctness of export_graphviz
clf = DecisionTreeClassifier(max_depth=3,
min_samples_split=2,
criterion="gini",
random_state=2)
clf.fit(X, y)
# Test export code
out = StringIO()
export_graphviz(clf, out_file=out)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with feature_names
out = StringIO()
export_graphviz(clf, out_file=out, feature_names=["feature0", "feature1"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="feature0 <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with class_names
out = StringIO()
export_graphviz(clf, out_file=out, class_names=["yes", "no"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = yes"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]\\n' \
'class = yes"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]\\n' \
'class = no"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test plot_options
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False,
proportion=True, special_characters=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'edge [fontname=helvetica] ;\n' \
'0 [label=<X<SUB>0</SUB> ≤ 0.0<br/>samples = 100.0%<br/>' \
'value = [0.5, 0.5]>, fillcolor="#e5813900"] ;\n' \
'1 [label=<samples = 50.0%<br/>value = [1.0, 0.0]>, ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label=<samples = 50.0%<br/>value = [0.0, 1.0]>, ' \
'fillcolor="#399de5ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, class_names=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = y[0]"] ;\n' \
'1 [label="(...)"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth with plot_options
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, filled=True,
node_ids=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="node #0\\nX[0] <= 0.0\\ngini = 0.5\\n' \
'samples = 6\\nvalue = [3, 3]", fillcolor="#e5813900"] ;\n' \
'1 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test multi-output with weighted samples
clf = DecisionTreeClassifier(max_depth=2,
min_samples_split=2,
criterion="gini",
random_state=2)
clf = clf.fit(X, y2, sample_weight=w)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="X[0] <= 0.0\\nsamples = 6\\n' \
'value = [[3.0, 1.5, 0.0]\\n' \
'[3.0, 1.0, 0.5]]", fillcolor="#e5813900"] ;\n' \
'1 [label="samples = 3\\nvalue = [[3, 0, 0]\\n' \
'[3, 0, 0]]", fillcolor="#e58139ff"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="X[0] <= 1.5\\nsamples = 3\\n' \
'value = [[0.0, 1.5, 0.0]\\n' \
'[0.0, 1.0, 0.5]]", fillcolor="#e5813986"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'3 [label="samples = 2\\nvalue = [[0, 1, 0]\\n' \
'[0, 1, 0]]", fillcolor="#e58139ff"] ;\n' \
'2 -> 3 ;\n' \
'4 [label="samples = 1\\nvalue = [[0.0, 0.5, 0.0]\\n' \
'[0.0, 0.0, 0.5]]", fillcolor="#e58139ff"] ;\n' \
'2 -> 4 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test regression output with plot_options
clf = DecisionTreeRegressor(max_depth=3,
min_samples_split=2,
criterion="mse",
random_state=2)
clf.fit(X, y)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, leaves_parallel=True,
rotate=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'graph [ranksep=equally, splines=polyline] ;\n' \
'edge [fontname=helvetica] ;\n' \
'rankdir=LR ;\n' \
'0 [label="X[0] <= 0.0\\nmse = 1.0\\nsamples = 6\\n' \
'value = 0.0", fillcolor="#e5813980"] ;\n' \
'1 [label="mse = 0.0\\nsamples = 3\\nvalue = -1.0", ' \
'fillcolor="#e5813900"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="True"] ;\n' \
'2 [label="mse = 0.0\\nsamples = 3\\nvalue = 1.0", ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=45, ' \
'headlabel="False"] ;\n' \
'{rank=same ; 0} ;\n' \
'{rank=same ; 1; 2} ;\n' \
'}'
assert_equal(contents1, contents2)
def test_graphviz_errors():
# Check for errors of export_graphviz
clf = DecisionTreeClassifier(max_depth=3, min_samples_split=2)
clf.fit(X, y)
# Check feature_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, feature_names=[])
# Check class_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, class_names=[])
def test_friedman_mse_in_graphviz():
clf = DecisionTreeRegressor(criterion="friedman_mse", random_state=0)
clf.fit(X, y)
dot_data = StringIO()
export_graphviz(clf, out_file=dot_data)
clf = GradientBoostingClassifier(n_estimators=2, random_state=0)
clf.fit(X, y)
for estimator in clf.estimators_:
export_graphviz(estimator[0], out_file=dot_data)
for finding in finditer("\[.*?samples.*?\]", dot_data.getvalue()):
assert_in("friedman_mse", finding.group())
``` |
{
"source": "jm-begon/locmoss",
"score": 3
} |
#### File: locmoss/locmoss/parser.py
```python
import os
import pygments.token
import pygments.lexers
from locmoss.location import Location
class Token(object):
def __init__(self, symbol, location):
self.symbol = symbol
self.location = location
def __str__(self):
return str(self.symbol)
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__,
repr(self.symbol),
repr(self.location))
class Parser(object):
def __init__(self, fpath, lexer=None, encoding="latin-1"):
self.fpath = fpath
self.lexer = lexer
self.encoding = encoding
def __repr__(self):
return "{}({}, {}, {})".format(self.__class__.__name__,
repr(self.fpath),
repr(self.lexer),
repr(self.encoding))
def __iter__(self):
with open(self.fpath, "r", encoding=self.encoding) as hdl:
text = hdl.read()
lexer = pygments.lexers.guess_lexer_for_filename(self.fpath, text) \
if self.lexer is None else self.lexer
# Adapted from https://github.com/agranya99/MOSS-winnowing-seqMatcher/blob/master/cleanUP.py
for j, line in enumerate(text.split(os.linesep)):
line_number = j + 1
column_number = 1
for token_type, original_symbol in lexer.get_tokens(line):
symbol = original_symbol
if token_type == pygments.token.Text or token_type in pygments.token.Comment:
symbol = None
elif token_type == pygments.token.Name:
symbol = "N" # all variable names as 'N'
elif token_type in pygments.token.Literal.String:
symbol = "S" # all strings as 'S'
elif token_type in pygments.token.Name.Function:
symbol = "F" # user defined function names as 'F'
if symbol is not None:
yield Token(symbol, Location(self.fpath, line_number,
column_number))
column_number += len(original_symbol)
```
#### File: locmoss/query/query.py
```python
import os
from datetime import datetime, timedelta
from locmoss.location import LocationIterator
from locmoss.query.report import Report, Anchor, Anchorable, Reference, \
SubSection, Section
from locmoss.query.similarity import Ranking, CountSimilarity
class Query(object):
def __init__(self, label=None):
self.label = self.__class__.__name__ if label is None else label
def header(self, report):
report.add_header(self.label)
def query_(self, report, invert_index):
pass
def __call__(self, invert_index):
report = Report()
self.header(report)
self.query_(report, invert_index)
return report
class MetaData(Query):
__HEADER__ = """ __ _ ___ __ __ __ _
/ / ___ ___ __ _| | /\/\ /___\/ _\/ _\ /__\ ___ _ __ ___ _ __| |_
/ / / _ \ / __/ _` | | / \ // //\ \ \ \ / \/// _ \ '_ \ / _ \| '__| __|
/ /__| (_) | (_| (_| | | / /\/\ \/ \_// _\ \_\ \ / _ \ __/ |_) | (_) | | | |_
\____/\___/ \___\__,_|_| \/ \/\___/ \__/\__/ \/ \_/\___| .__/ \___/|_| \__|
|_| """
def __init__(self, **context):
super().__init__()
self.dict = context
self.creation = datetime.now()
def duration_str(self, duration_in_sec):
s = str(duration_in_sec)
# remove milliseconds and stuff
return s.split(".")[0]
def header(self, report):
for line in self.__HEADER__.split(os.linesep):
report.add_raw(line)
def query_(self, report, invert_index):
now = datetime.now()
with report.add_list("Context") as ls:
for k,v in self.dict.items():
ls.append("{}: {}".format(k, v))
ls.append("Hash: {}".format(hash(invert_index)))
ls.append("Duration: {}".format(self.duration_str(now - self.creation)))
class SoftwareList(Query):
def query_(self, report, invert_index):
softwares = invert_index.get_softwares()
for sf in softwares:
with report.add_list(sf.name) as report_list:
report_list.extend(sf)
class CorpusStat(Query):
def query_(self, report, invert_index):
n_fp = 0
n_skipped = 0
n_collisions = 0
for fingerprint, softwares in invert_index.iter_raw():
n_fp += 1
if invert_index.is_skipped(fingerprint):
n_skipped += 1
else:
if len(softwares) > 1:
n_collisions += 1
n_softwares = len(invert_index.get_softwares())
with report.add_list() as report_list:
report_list.append("Number of softwares: {}".format(n_softwares))
report_list.append("Total number of fingerprints: {}".format(n_fp))
report_list.append("Number of active fingerprints: {}"
"".format(n_fp - n_skipped))
report_list.append("Number of collisions: {}".format(n_collisions))
class MostSimilar(Query):
class AllScores(object):
def __init__(self, software_1, software_2, scores):
self.software_1 = software_1
self.software_2 = software_2
self.scores = scores
def __iter__(self):
for score in self.scores:
yield score
def __init__(self, *rankings, label=None):
# Ordering follow the scorers[0]
super().__init__(label)
self.rankings = rankings
self.ranking = ()
def query_(self, report, invert_index):
if len(self.rankings) == 0:
ranking = Ranking.from_invert_index(CountSimilarity(), invert_index)
self.rankings = (ranking,)
header = ["Rank", "Software 1", "Software 2"] + [r.label for r in
self.rankings]
main_ranking, other_rankings = self.rankings[0], self.rankings[1:]
report.add_raw("First {} results".format(len(main_ranking)))
with report.add_table(len(header), header) as table:
for i, (main_score, software_1, software_2) in enumerate(main_ranking):
all_scores = [main_score] + [ranking[(software_1, software_2)]
for ranking in other_rankings]
s1_name = software_1.name
s2_name = software_2.name
anchor = Section.create_anchor(Reference.join(s1_name, s2_name))
row = [Reference(str(i + 1), anchor), s1_name, s2_name]
for ranking, score in zip(self.rankings, all_scores):
row.append(ranking.similarity.format_score(score))
table.append(*row)
class MatchingLocations(Query):
def __init__(self, ranking, label=None):
super().__init__(label)
self.ranking = ranking
def query_(self, report, invert_index):
matching_graph = invert_index.derive_matching_graph()
for _, soft_1, soft_2 in self.ranking:
s1_name, s2_name = soft_1.name, soft_2.name
anchor = Section.create_anchor(Reference.join(s1_name, s2_name))
shareprints = matching_graph[(soft_1, soft_2)]
report.add(Section("{} VS. {}".format(s1_name, s2_name), anchor))
report.add_raw("Matching fingerprints: {}".format(len(shareprints)))
report.add_newline()
for fingerprint in shareprints:
with report.add_list(str(fingerprint)) as report_list:
for software in (soft_1, soft_2):
locations = software[fingerprint]
for location in locations:
desc = "{}:{}:{}".format(location.source_file,
location.start_line,
location.start_column)
report_list.append(desc)
report.add_newline()
class MatchingSnippets(Query):
def __init__(self, ranking, pre_lines=5, post_lines=5,
label=None):
super().__init__(label)
self.ranking = ranking
self.loc_iter = LocationIterator(pre_lines, post_lines)
def query_(self, report, invert_index):
matching_graph = invert_index.derive_matching_graph()
anchors = {}
for _, soft_1, soft_2 in self.ranking:
s1_name, s2_name = soft_1.name, soft_2.name
anchor = Section.create_anchor(Reference.join(s1_name, s2_name))
report.add(Section("{} VS. {}".format(s1_name, s2_name), anchor))
shareprints = matching_graph[(soft_1, soft_2)]
with report.add_list("Matching fingerprints: {}".format(len(shareprints))) as li:
for fingerprint in shareprints:
ref_s = Reference.join(s1_name, s2_name, str(fingerprint))
anchor = SubSection.create_anchor(ref_s)
anchors[ref_s] = anchor
li.append(Reference(str(fingerprint), anchor))
report.add_newline()
for fingerprint in shareprints:
s_fp = str(fingerprint)
ref_s = Reference.join(s1_name, s2_name, s_fp)
report.add(SubSection(s_fp, anchors[ref_s]))
for software in (soft_1, soft_2):
locations = software[fingerprint]
for location in locations:
desc = "{}:{}:{}".format(location.source_file,
location.start_line,
location.start_column)
with report.add_snippet(desc) as snippet:
for ln, line in self.loc_iter(location):
snippet.append(ln, line)
```
#### File: locmoss/locmoss/software.py
```python
import glob
import os
from collections import OrderedDict
from collections import defaultdict
class Software(object):
@classmethod
def list_from_globs(cls, patterns, realpath=False):
tree = Tree.from_glob_pattern(patterns, realpath)
return list(tree.to_software())
def __init__(self, name, files=()):
self.name = name
self.source_files = tuple(files)
self.fingerprints = defaultdict(list)
def __iter__(self):
for source_file in self.source_files:
yield source_file
def add_fingerprint(self, fingerprint, location):
self.fingerprints[fingerprint].append(location)
def yield_fingerprints(self):
for fp in self.fingerprints.keys():
yield fp
def __getitem__(self, fingerprint):
return self.fingerprints[fingerprint]
def count_fingerprints(self):
return len(self.fingerprints)
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__,
repr(self.name),
repr({x for x in self}))
class Tree(object):
@classmethod
def from_glob_pattern(cls, patterns, realpath=False):
tree = cls()
for pattern in patterns:
for file in glob.glob(pattern):
path = os.path.expanduser(file)
if realpath:
path = os.path.realpath(path)
splits = path.split(os.sep)
if len(splits[0]) == 0:
tup = (os.sep,) + tuple(splits[1:])
else:
tup = tuple(splits)
tree.insert(tup)
return tree
def __init__(self):
self.children = OrderedDict()
def insert(self, tup):
if len(tup) == 0:
return
head, tail = tup[0], tup[1:]
child = self.children.get(head)
if child is None:
child = Tree()
self.children[head] = child
child.insert(tail)
def to_software(self):
common = []
children = self.children
while len(children) < 2:
if len(children) == 0:
raise ValueError("No software found.")
label, subtree = list(children.items())[0]
common.append(label)
children = subtree.children
prefix = os.sep.join(common)
for software_name, subtree in children.items():
software_path = os.path.join(prefix, software_name)
files = []
for source_sub_path in subtree:
source_path = os.path.join(software_path, source_sub_path)
files.append(source_path)
software = Software(software_path, files)
yield software
def __iter__(self):
for label, child in self.children.items():
if len(child.children) == 0:
yield label
else:
for x in child:
yield os.path.join(label, x)
```
#### File: locmoss/test/test_struct.py
```python
from nose.tools import assert_equal, assert_not_equal, assert_false
from locmoss.kgram import KGrams
def get_kgrams():
kg1 = KGrams("abc")
kg1_p = KGrams("abc")
kg2 = KGrams("abg")
kg3 = KGrams("ab")
return kg1, kg1_p, kg2, kg3
def test_kgram_len():
kg1, kg1_p, kg2, kg3 = get_kgrams()
assert_equal(len(kg1), 3)
assert_equal(len(kg1_p), 3)
assert_equal(len(kg2), 3)
assert_equal(len(kg3), 2)
def test_kgram_hash_eq():
kg1, kg1_p, kg2, kg3 = get_kgrams()
assert_equal(hash(kg1), hash(kg1_p))
assert_equal(kg1, kg1_p)
assert_false(kg1 is kg1_p)
assert_not_equal(hash(kg1), hash(kg2))
assert_not_equal(hash(kg1), hash(kg3))
assert_not_equal(hash(kg2), hash(kg3))
assert_not_equal(kg1, kg2)
assert_not_equal(kg1, kg3)
assert_not_equal(kg2, kg3)
```
#### File: locmoss/locmoss/winnowing.py
```python
from .fingerprint import Fingerprinter
from .kgram import KGrams, Buffer
class Winnower(Fingerprinter):
def __init__(self, parser_factory, window_size, k):
super().__init__(parser_factory)
self.window_size = window_size
self.k = k
@property
def kgramifier(self):
# Can be overriden to change the default hash function
return KGrams.kgramify
def extract_fingerprints_(self, token_iterator):
window = Buffer(self.window_size)
selected_grams = []
min_gram = None
for location, kgram in self.kgramifier(token_iterator, self.k):
window.put(kgram)
if window.is_full():
# Note: using built-in `min` should be much faster than
# re-impl. it. Moreover, the window is expected to be small
# and the cost of deriving and inverting an array should be
# small.
# `min` keeps the leftmost minima:
# >> min([(1, 1), (1, 2)], key=lambda x:x[0])
# (1, 1)
window_min = min(list(window)[::-1], key=hash)
if window_min is not min_gram:
selected_grams.append(window_min)
min_gram = window_min
yield location, window_min
``` |
{
"source": "jm-begon/ood_samplefree",
"score": 2
} |
#### File: ood_samplefree/datasets/custom_transforms.py
```python
from copy import copy
from functools import partial
import torch
from torchvision import transforms
class Shuffle(object):
def __call__(self, tensor):
# TODO support gray images
n_channel, height, width = tensor.size()
perm = torch.randperm(height*width)
for c in range(n_channel):
tensor[c] = tensor[c].view(width*height)[perm].view(height, width)
return tensor
def make_shuffle_variant(full_dataset):
d2 = copy(full_dataset)
d2.ls_transform = transforms.Compose([d2.ls_transform, Shuffle()])
d2.vs_transform = transforms.Compose([d2.vs_transform, Shuffle()])
d2.ts_transform = transforms.Compose([d2.ts_transform, Shuffle()])
return d2
class ShuffleFactory(object):
@classmethod
def same_as(cls, full_dataset):
return make_shuffle_variant(full_dataset)
class InverseFactory(object):
@classmethod
def apply_invert(cls, full_dataset, make_copy=True):
try:
from PIL.ImageChops import invert
except ImportError:
from PIL.ImageOps import invert
d2 = copy(full_dataset) if make_copy else full_dataset
d2.ls_transform = transforms.Compose([invert, d2.ls_transform])
d2.vs_transform = transforms.Compose([invert, d2.vs_transform])
d2.ts_transform = transforms.Compose([invert, d2.ts_transform])
return d2
def __init__(self, full_dataset_factory):
self.fdf = full_dataset_factory
def __call__(self, *args, **kwargs):
full_dataset = self.fdf(*args, **kwargs)
return self.__class__.apply_invert(full_dataset, make_copy=False)
def same_as(self, ref_full_dataset):
fd = self.fdf.same_as(ref_full_dataset)
return self.__class__.apply_invert(fd, make_copy=False)
# ================================ DATA AUG. ================================= #
class DataAugmentation(object):
def get_transform(self):
return transforms.Compose()
def partial(self, full_dataset_factory, **kwargs):
return partial(full_dataset_factory,
ls_data_augmentation=self.get_transform(), **kwargs)
class CropAugmented(DataAugmentation):
def __init__(self, size=32, padding=4, padding_mode="reflect"):
self.kwargs = {"size": size, "padding":padding,
"padding_mode":padding_mode}
def get_transform(self):
return transforms.RandomCrop(**self.kwargs)
class CropHzFlipAugmented(DataAugmentation):
def __init__(self, size=32, padding=4, padding_mode="reflect"):
self.kwargs = {"size": size, "padding": padding,
"padding_mode": padding_mode}
def get_transform(self):
return transforms.Compose([
transforms.RandomCrop(**self.kwargs),
transforms.RandomHorizontalFlip(),
])
class CropHzVFlipAugmented(DataAugmentation):
def __init__(self, size=32, padding=4, padding_mode="reflect"):
self.kwargs = {"size": size, "padding": padding,
"padding_mode": padding_mode}
def get_transform(self):
return transforms.Compose([
transforms.RandomCrop(**self.kwargs),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
])
class FlipsAugmented(DataAugmentation):
def get_transform(self):
return transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
])
```
#### File: ood_samplefree/datasets/utils.py
```python
from torch.utils.data import DataLoader, ConcatDataset, Subset
def get_transform(dataset):
if isinstance(dataset, DataLoader):
return get_transform(dataset.dataset)
if isinstance(dataset, ConcatDataset):
return get_transform(dataset.datasets[0])
if isinstance(dataset, Subset):
return get_transform(dataset.dataset)
return dataset.transform
```
#### File: ood_samplefree/features/latent.py
```python
import os
import shutil
import warnings
import numpy as np
from .structures import Monitor, MultiHook
from .baselines import get_linear_layer
def norm_of(X, axis=-1):
"""
X: array [n_samples, n_features]
"""
return np.sqrt(np.sum(X**2, axis=axis))
def predicted_class(Z):
return Z.argmax(axis=1)
def latent_metrics(U, C, W):
W_norm = norm_of(W)
U_norm = norm_of(U)
M_k = U * W[C]
A = np.sum(M_k, axis=1)
Q = A / W_norm[C]
norm = -U_norm
proj = -Q
ang = 1 - (Q / U_norm)
act = -A
return norm, act, proj, ang
def compute_ang_p(ang, act_p, act):
rAct = act_p / act
return 1 - ((1 - ang) * rAct)
class LatentMonitor(Monitor):
def __init__(self, linear_layer_getter=None):
super().__init__()
if linear_layer_getter is None:
linear_layer_getter = get_linear_layer
self.linear_layer = linear_layer_getter
self.W = None
self.W_pos = None
self.W_pos_mask = None
def create_hook(self):
def hook(module, input, output):
latents = input[0].data.cpu().numpy()
logits = output.data.cpu().numpy()
C = predicted_class(logits)
norm, act, proj, ang = latent_metrics(latents, C, self.W)
self.cache.save("norm", norm)
self.cache.save("act", act)
self.cache.save("proj", proj)
self.cache.save("ang", ang)
latents *= self.W_pos_mask[C]
norm_p, act_p, proj_p, ang_pp = latent_metrics(latents, C, self.W_pos)
self.cache.save("norm+", norm_p)
self.cache.save("act+", act_p)
self.cache.save("proj+", proj_p)
self.cache.save("ang++", ang_pp)
self.cache.save("ang+", compute_ang_p(ang, act_p, act))
return hook
def watch(self, model):
linear_layer = self.linear_layer(model)
W = linear_layer.weight.data.cpu().numpy()
self.W = W
self.W_pos_mask = (W>0).astype(float)
self.W_pos = W * self.W_pos_mask
handle = linear_layer.register_forward_hook(self.create_hook())
self.register_handle(handle)
class LatentSaver(MultiHook):
# Hook
@classmethod
def load_latent_matrix(cls, folder, force=False):
files = []
with os.scandir(os.path.expanduser(folder)) as entries:
# Get npy files
files = [entry for entry in entries if entry.is_file() and
entry.name.endswith(".npy")]
if len(files) == 0:
return list()
files.sort(key=(lambda x: x.name))
# Verify timestamp ordering match
prev_time = files[0].stat().st_mtime_ns
for entry in files[1:]:
curr_time = entry.stat().st_mtime_ns
if curr_time < prev_time:
if force:
warnings.warn("Ordering mismatch")
else:
raise IOError("Ordering mismatch")
prev_time = curr_time
arrs = []
for entry in files:
arrs.append(np.load(entry.path))
return np.vstack(arrs)
@classmethod
def load_batches_save_whole(cls, folder, force=False):
folder = os.path.expanduser(folder)
arr = cls.load_latent_matrix(folder, force=force)
if len(arr) > 0:
np.save(folder, arr)
return len(arr) > 0
def __init__(self, folder, linear_layer_getter=None, max_n_batch_order=10,
auto_remove=False, fail_fast=True):
super().__init__()
self.folder = os.path.expanduser(folder)
if not os.path.exists(self.folder):
os.makedirs(self.folder)
if linear_layer_getter is None:
linear_layer_getter = get_linear_layer
self.linear_layer = linear_layer_getter
self.batch_number = 0
self.suffix_length = max_n_batch_order
self.auto_remove = auto_remove
self.fail_fast = fail_fast
def create_hook(self):
def hook(module, input, output):
latents = input[0].data.cpu().numpy()
fname = "batch_{}".format(str(self.batch_number).zfill(self.suffix_length))
fpath = os.path.join(self.folder, fname)
np.save(fpath, latents)
self.batch_number += 1
return hook
def __call__(self, model):
linear_layer = self.linear_layer(model)
handle = linear_layer.register_forward_hook(self.create_hook())
self.register_handle(handle)
return self
def concatenate_batches(self, remove_folder=False, force=False):
saved = self.__class__.load_batches_save_whole(self.folder, force=force)
if saved and remove_folder:
shutil.rmtree(self.folder)
def __exit__(self, exc_type, exc_val, exc_tb):
super().__exit__(exc_type, exc_val, exc_tb)
self.concatenate_batches(self.auto_remove, force=not self.fail_fast)
def create_latent_saver(folder, linear_layer_getter=None, max_n_batch_order=10,
fail_fast=True):
if folder is None:
return MultiHook()
return LatentSaver(folder, linear_layer_getter=linear_layer_getter,
max_n_batch_order=max_n_batch_order, fail_fast=fail_fast)
```
#### File: ood_samplefree/features/summary.py
```python
from sklearn.preprocessing import StandardScaler
class OneClassSum(object):
def fit(self, X, y=None, sample_weight=None):
self.scaler = StandardScaler()
X = self.scaler.fit_transform(X)
return self
def predict(self, X):
X = self.scaler.transform(X)
S = X.sum(axis=1)
return S
``` |
{
"source": "jm-begon/progressmonitor",
"score": 3
} |
#### File: progressmonitor/progressmonitor/callback.py
```python
__author__ = "<NAME> <<EMAIL>>"
__copyright__ = "3-clause BSD License"
__version__ = '1.0'
__date__ = "08 January 2015"
import sys
import os
from functools import partial
from logging import getLogger, INFO
from .util import call_with
def _writeln(stream, string, last_com=False):
"""
Writes and flushes the string on the stream
Parameters
----------
stream : :class:`StringIO`
The stream to uses
string : str
The string to write
last_com : bool (Default : False)
Whether is it the last message or not
"""
stream.write(string)
stream.write(os.linesep)
stream.flush()
def stdout_callback_factory():
"""
:func:`callback_factory`
Return
------
callback : callable
a callback issuing on stdout
"""
return partial(_writeln, sys.stdout)
def stderr_callback_factory():
"""
:func:`callback_factory`
Return
------
callback : callable
a callback issuing on stderr
"""
return partial(_writeln, sys.stderr)
def overwrite_callback_factory(stream=sys.stdout):
"""
A :func:`callback_factory` which outputs on a stream and overwrite
the previous message (provided the message is monoline)
Parameters
----------
stream : :class:`StringIO` (Default : stdout)
The stream to uses
Return
------
:func:`overwrite_callback`
"""
string_length = [0]
def overwrite_callback(string, last_com=False):
"""
A :func:`callback` which overwrite the previous message
Parameters
----------
string : str
The string to process
last_com : bool (Default : False)
Whether is it the last message or not
"""
stream.write("\b"*string_length[0])
stream.write(string)
new_length = len(string)
length_diff = string_length[0] - new_length
if length_diff > 0:
stream.write(" "*length_diff)
stream.write("\b"*length_diff)
string_length[0] = new_length
if last_com:
stream.write("\n")
stream.flush()
return overwrite_callback
def logging_callback_factory(logger_name="", log_level=INFO):
"""
A :func:`callback_factory` which uses the logging facility
Parameters
----------
logger_name : str (Default : '')
The name of the logger to use
log_level : int (Default : INFO)
The logging level
Return
------
:func:`logging_callback`
"""
logger = getLogger(logger_name)
def logging_callback(string, last_com=False):
"""
A :func:`callback` write the string to a logger
Parameters
----------
string : str
The string to process
last_com : bool (Default : False)
Whether is it the last message or not
"""
logger.log(log_level, string)
if last_com and hasattr(logger, "flush"):
logger.flush()
return logging_callback
def store_till_end_callback_factory(destination=lambda m: None):
"""
A :func:`callback_factory` which stores the messages in a list
and send them to the destination at the last message
Parameters
----------
destination : callable (Default : destination=lambda m: None)
A function which takes as input a list of string
Return
------
:func:`store_till_end_callback`
"""
messages = []
def store_till_end_callback(string, last_com=False):
"""
A :func:`callback` which stores the messages in a list
and send them to the destination at the last message
Parameters
----------
string : str
The string to process
last_com : bool (Default : False)
Whether is it the last message or not
"""
messages.append(string)
if last_com:
destination(messages)
return store_till_end_callback
def multi_callback_factory(callback_factories, **kwargs):
"""
A :func:`callback_factory` which multiplexes the messages
Parameters
----------
callback_factories : iterable of :func:`callback_factory`
The callback to use to issue the message
Return
------
:func:`multi_callback`
"""
callbacks = []
for factory in callback_factories:
callbacks.append(call_with(factory, kwargs))
def multi_callback(string, last_com=False):
"""
A :func:`callback` which multipex messages
Parameters
----------
string : str
The string to process
last_com : bool (Default : False)
Whether is it the last message or not
"""
for callback in callbacks:
callback(string, last_com)
return multi_callback
__callback_factories__ = {
"$stdout" : stdout_callback_factory,
"$stderr" : stderr_callback_factory,
"$overwrite" : overwrite_callback_factory,
"$log" : logging_callback_factory,
"$store_till_end" : store_till_end_callback_factory,
"$multi" : multi_callback_factory
}
```
#### File: progressmonitor/progressmonitor/config.py
```python
__author__ = "<NAME> <<EMAIL>>"
__copyright__ = "3-clause BSD License"
__version__ = '1.0'
__date__ = "08 January 2015"
import re
from ast import literal_eval
import logging
from .factory import monitor_generator_factory
from .factory import (monitor_function_factory, formated_code_monitoring)
from .formatter import __formatter_factories__
from .rule import __rule_factories__
from .callback import __callback_factories__
from .util import IdProxy
# ============================ MANAGER ============================ #
class Manager(object):
UNKNOWN_MONITOR = 0
GENERATOR_MONITOR = 1
FUNCTION_MONITOR = 2
CODE_MONITOR = 3
_singleton = None
def __new__(cls, *args, **kwargs):
if cls._singleton is None:
cls._singleton = super(Manager, cls).__new__(cls, *args, **kwargs)
cls._singleton._meta = dict()
return cls._singleton
def add_config(self, monitor_name, conf, monitor_type):
self._meta[monitor_name] = (conf, monitor_type)
def _get_ancestors_conf(self, monitor_name):
unknown = Manager.UNKNOWN_MONITOR
monitor_type = unknown
conf = dict()
prefixes = [monitor_name[:m.start()]
for m in re.finditer('\.',monitor_name)]
prefixes.append(monitor_name)
for prefix in prefixes:
anc_conf, type_ = self._meta.get(prefix, (dict(), unknown))
if type_ != Manager.UNKNOWN_MONITOR:
monitor_type = type_
conf.update(anc_conf)
return conf, monitor_type
def get_config(self, monitor_name, **kwargs):
conf, monitor_type = self._get_ancestors_conf(monitor_name)
if len(kwargs) > 0:
conf.update(kwargs)
return conf, monitor_type
# ============================ DICT PARSING ============================ #
class Const(object):
VERSION = "version"
MONITORS = "generator_monitors"
FUNC_MONITORS = "function_monitors"
CODE_MONITORS = "code_monitors"
CALLBACK_SEC = "callbacks"
RULE_SEC = "rules"
FORMATTER_SEC = "formatters"
def _external_load(string):
mod_str, obj_str = string.rsplit(".", 1)
mod = __import__(mod_str, fromlist=[obj_str])
obj = getattr(mod, obj_str)
return obj
def _substitute(struct, substit_dict):
if hasattr(struct, "startswith"):
if struct.startswith("$"):
return substit_dict[struct]
elif hasattr(struct, "iteritems"):
# dict --> inspect
for k, v in struct.iteritems():
struct[k] = _substitute(v, substit_dict)
else:
try:
# List -> inspect
for i, elem in enumerate(struct):
struct[i] = _substitute(elem, substit_dict)
except TypeError:
pass
return struct
def _dict_config_v1(config_dict):
manager = Manager()
# ---- Predefined replacement rules ---- #
substit_dict = dict()
substit_dict.update(__rule_factories__)
substit_dict.update(__formatter_factories__)
substit_dict.update(__callback_factories__)
# ---- Adding the substitutions ---- #
# rules
if Const.RULE_SEC in config_dict:
for k, v in config_dict[Const.RULE_SEC].iteritems():
if k.startswith("$"):
loaded = _external_load(v)
substit_dict[k] = loaded
__rule_factories__[k] = loaded
# hook
if Const.FORMATTER_SEC in config_dict:
for k, v in config_dict[Const.FORMATTER_SEC].iteritems():
if k.startswith("$"):
loaded = _external_load(v)
substit_dict[k] = loaded
__formatter_factories__[k] = loaded
# callback
if Const.CALLBACK_SEC in config_dict:
for k, v in config_dict[Const.CALLBACK_SEC].iteritems():
if k.startswith("$"):
loaded = _external_load(v)
substit_dict[k] = loaded
__callback_factories__[k] = loaded
# ---- Performing the substitutions ---- #
config_dict = _substitute(config_dict, substit_dict)
# ---- Getting the monitors for generators---- #
if Const.MONITORS in config_dict:
for name, conf in config_dict[Const.MONITORS].iteritems():
# Adding to the manager
manager.add_config(name, conf, Manager.GENERATOR_MONITOR)
# ---- Getting the monitors for functions ---- #
if Const.FUNC_MONITORS in config_dict:
for name, conf in config_dict[Const.FUNC_MONITORS].iteritems():
# Adding to the manager
manager.add_config(name, conf, Manager.FUNCTION_MONITOR)
# ---- Getting the monitors for functions ---- #
if Const.CODE_MONITORS in config_dict:
for name, conf in config_dict[Const.CODE_MONITORS].iteritems():
# Adding to the manager
manager.add_config(name, conf, Manager.CODE_MONITOR)
# ============================ PUBLIC EXPOSURE ============================ #
def get_config(monitor_name, **kwargs):
conf, _ = Manager().get_config(monitor_name, **kwargs)
return conf
def get_monitor(monitor_name, **kwargs):
conf, monitor_type = Manager().get_config(monitor_name, **kwargs)
if monitor_type == Manager.GENERATOR_MONITOR:
return monitor_generator_factory(**conf)
elif monitor_type == Manager.FUNCTION_MONITOR:
return monitor_function_factory(**conf)
elif monitor_type == Manager.CODE_MONITOR:
return formated_code_monitoring(**conf)
else:
# If unknown, do not crash caller code
logger = logging.getLogger('progressmonitor.config')
msg = "Unknown monitor name '%s'. Skipping monitoring." % monitor_name
logger.warning(msg)
return IdProxy()
def get_generator_monitor(monitor_name, **kwargs):
conf = get_config(monitor_name, **kwargs)
return monitor_generator_factory(**conf)
def get_function_monitor(monitor_name, **kwargs):
conf = get_config(monitor_name, **kwargs)
return monitor_function_factory(**conf)
def get_code_monitor(monitor_name, **kwargs):
conf = get_config(monitor_name, **kwargs)
return formated_code_monitoring(**conf)
def parse_dict_config(config_dict):
version = config_dict.get(Const.VERSION, 1)
if version == 1:
_dict_config_v1(config_dict)
else:
raise AttributeError("Version "+str(version)+" is not supported")
def parse_file_config(config_file):
with open(config_file) as fp:
config_dict = literal_eval(fp.read())
parse_dict_config(config_dict)
```
#### File: progressmonitor/test/testfallback.py
```python
__author__ = "<NAME> <<EMAIL>>"
__copyright__ = "3-clause BSD License"
__version__ = '1.0'
__date__ = "15 January 2015"
from nose.tools import assert_equal
import dis
from progressmonitor.formatter import (progressbar_formatter_factory,
nb_iterations_formatter_factory,
elapsed_time_formatter_factory,
remaining_time_formatter_factory)
from progressmonitor.rule import (periodic_rule_factory,
span_rule_factory, rate_rule_factory)
from progressmonitor.util import call_with
def test_fb_rate2span():
kwargs = {"rate": 0.1, "span":10}
r1 = call_with(rate_rule_factory, kwargs)
r2 = call_with(span_rule_factory, kwargs)
assert_equal(r1.__name__, r2.__name__)
def test_fb_span2period():
kwargs = {"period":1}
r1 = call_with(span_rule_factory, kwargs)
r2 = call_with(periodic_rule_factory, kwargs)
assert_equal(r1.__name__, r2.__name__)
def test_fb_pb2nbiter():
kwargs = {}
r1 = call_with(progressbar_formatter_factory, kwargs)
r2 = call_with(nb_iterations_formatter_factory, kwargs)
assert_equal(r1.__name__, r2.__name__)
def test_fb_remaining2elapsed():
kwargs = {}
r1 = call_with(remaining_time_formatter_factory, kwargs)
r2 = call_with(elapsed_time_formatter_factory, kwargs)
assert_equal(r1.__name__, r2.__name__)
``` |
{
"source": "jm-begon/pt_tools",
"score": 2
} |
#### File: pt_tools/datasets/compute_normalization.py
```python
from collections import defaultdict
import torch
from pt_inspector.stat import Stat
def compute_normalization(full_dataset, what="ls"):
collector = {}
ls, vs, ts = full_dataset.get_ls_vs_ts()
if what == "ls":
dataset = ls
elif what == "vs":
dataset = vs
elif what == "ts":
dataset = ts
else:
dataset = torch.utils.data.ConcatDataset([ls, vs, ts])
# dataset[0] is a pair (image tensor, label)
shape = dataset[0][0].size()
if len(shape) == 2:
# 1 channel
n_channels = 1
get_channel = (lambda x, _: x)
else:
n_channels = shape[0]
get_channel = (lambda x, i: x[i])
stats = [Stat() for _ in range(n_channels)]
hist_shape = defaultdict(int)
hist_cls = defaultdict(int)
n = 0
for n, t in enumerate(dataset):
x = t[0].numpy()
y = t[1]
if not isinstance(y, int):
try:
y = t[1].item()
except Exception:
y = int(t[1])
hist_cls[y] += 1
hist_shape[x.shape] += 1
for c in range(n_channels):
stats[c].add(get_channel(x, c))
collector["n_samples"] = n + 1
collector["hist_shape"] = hist_shape
collector["hist_cls"] = hist_cls
means = []
stds = []
for stat in stats:
m, s = stat.get_running()
means.append(m)
stds.append(s)
collector["means"] = tuple(means)
collector["stds"] = tuple(stds)
return collector
if __name__ == '__main__':
pass
```
#### File: pt_tools/datasets/utils.py
```python
from torch.utils.data import DataLoader, ConcatDataset, Subset
def get_base_dataset(dataset):
if isinstance(dataset, DataLoader):
return get_base_dataset(dataset.dataset)
if isinstance(dataset, ConcatDataset):
return get_base_dataset(dataset.datasets[0])
if isinstance(dataset, Subset):
return get_base_dataset(dataset.dataset)
return dataset
def get_transform(dataset):
return get_base_dataset(dataset).transform
``` |
{
"source": "jm-begon/randconv",
"score": 3
} |
#### File: randconv/randconv/classifier.py
```python
__author__ = "<NAME> <<EMAIL>>"
__copyright__ = "3-clause BSD License"
__date__ = "20 January 2015"
import numpy as np
import scipy.sparse as sps
from sklearn.metrics import confusion_matrix
from sklearn.ensemble import RandomTreesEmbedding
from progressmonitor import monitor_with
from ._compute_histogram import compute_histogram
class Classifier(object):
"""
==========
Classifier
==========
A :class:`Classifier` uses a :class:`Coordinator` to extract data from
an :class:`ImageBuffer` and feed it to a **scikit-learn base classifier**.
The :class:`Classifier` can take care of multiple feature vectors per
object.
"""
def __init__(self, coordinator, base_classifier):
"""
Construct a :class:`Classifier`
Parameters
----------
coordinator : :class:`Coordinator`
The coordinator responsible for the features extraction
base_classifier : scikit-learn classifier (:meth:`predict_proba`
required)
The learning algorithm which will classify the data
"""
self._classifier = base_classifier
self._coord = coordinator
self._classif2user_lut = []
self._user2classif_lut = {}
def _build_luts(self, y_user):
"""
Builds the lookup tables for converting user labels to/from
classifier label
Parameters
----------
y_user : list
the list of user labels
"""
user_labels = np.unique(y_user)
self._classif2user_lut = user_labels
self._user2classif_lut = {j: i for i, j in enumerate(user_labels)}
def _convert_labels(self, y_user):
"""
Convert labels from the user labels to the internal labels
Parameters
----------
y_user : list
list of user labels to convert into internal labels
Returns
-------
y_classif : list
the corresponding internal labels
"""
print self._user2classif_lut
return [self._user2classif_lut[x] for x in y_user]
def _convert_labels_back(self, y_classif):
"""
Convert labels back to the user labels
Parameters
----------
y_classif : list
list of internal labels to convert
Returns
-------
y_user : list
the corresponding user labels
"""
return [self._classif2user_lut[x] for x in y_classif]
@monitor_with("rc.func.RCClassifier", task_name="Learning the model")
def fit(self, image_buffer):
"""
Fits the data contained in the :class:`ImageBuffer` instance
Parameters
-----------
image_buffer : :class:`ImageBuffer`
The data to learn from
Return
-------
self : :class:`Classifier`
This instance
"""
#Updating the labels
y_user = image_buffer.get_labels()
self._build_luts(y_user)
#Extracting the features
X, y_user = self._coord.process(image_buffer, learning_phase=True)
#Converting the labels
y = self._convert_labels(y_user)
#Delegating the classification
self._classifier.fit(X, y)
#Cleaning up
self._coord.clean(X, y_user)
return self
def predict_predict_proba(self, image_buffer):
"""
Classify the data contained in the :class:`ImageBuffer` instance
Parameters
-----------
image_buffer : :class:`ImageBuffer`
The data to classify
Return
-------
pair : (y_proba, y_classif)
y_proba: list of list of float
each entry is the probability vector of the input of the same
index as computed by the base classifier
y_classif : a list of int
each entry is the classification label corresponding to the input
"""
y_prob = self.predict_proba(image_buffer)
y_classif = np.argmax(y_prob, axis=1)
return y_prob, self._convert_labels_back(y_classif)
@monitor_with("func.rc.Classifier", task_name="Classifying")
def predict(self, image_buffer):
"""
Classify the data contained in the :class:`ImageBuffer` instance
Parameters
-----------
image_buffer : :class:`ImageBuffer`
The data to classify
Return
-------
list : list of int
each entry is the classification label corresponding to the input
"""
_, y_classif = self.predict_predict_proba(image_buffer)
return y_classif
@monitor_with("func.rc.Classifier", task_name="Classifying")
def predict_proba(self, image_buffer):
"""
Classify softly the data contained is the :class:`ImageBuffer`
instance. i.e. yields a probability vector of belongin to each
class
Parameters
-----------
image_buffer : :class:`ImageBuffer`
The data to classify
Return
-------
list : list of list of float
each entry is the probability vector of the input of the same
index as computed by the base classifier
"""
#Extracting the features
X_pred, y = self._coord.process(image_buffer, learning_phase=False)
#Cleaning up
self._coord.clean(y)
del y
y = self._predict_proba(X_pred, len(image_buffer))
#Cleaning up
self._coord.clean(X_pred)
del X_pred
return y
def _predict_proba(self, X_pred, nb_objects):
#Misc.
nb_factor = len(X_pred)/nb_objects
y = np.zeros((nb_objects, len(self._user2classif_lut)))
#Classifying the data
_y = self._classifier.predict_proba(X_pred)
for i in xrange(nb_objects):
y[i] = np.sum(_y[i * nb_factor:(i+1) * nb_factor], axis=0) / nb_factor
return y
def _predict(self, X_pred, nb_objects):
y_classif = np.argmax(self._predict_proba(X_pred, nb_objects), axis=1)
return self._convert_labels_back(y_classif)
def accuracy(self, y_pred, y_truth):
"""
Compute the frequency of correspondance between the two vectors
Parameters
-----------
y_pred : list of int
The prediction by the model
y_truth : list of int
The ground truth
Return
-------
accuracy : float
the accuracy
"""
s = sum([1 for x, y in zip(y_pred, y_truth) if x == y])
return float(s)/len(y_truth)
def confusion_matrix(self, y_pred, y_truth):
"""
Compute the confusion matrix
Parameters
-----------
y_pred : list of int
The prediction by the model
y_truth : list of int
The ground truth
Return
-------
mat : 2D numpy array
The confusion matrix
"""
return confusion_matrix(y_truth, y_pred)
class UnsupervisedVisualBagClassifier(Classifier):
"""
===============================
UnsupervisedVisualBagClassifier
===============================
1. Unsupervised
2. Binary bag of words
3. Totally random trees
"""
def __init__(self, coordinator, base_classifier, n_estimators=10,
max_depth=5, min_samples_split=2, min_samples_leaf=1,
n_jobs=-1, random_state=None, verbose=0):
Classifier.__init__(self, coordinator, base_classifier)
self.histoSize = 0
self._visualBagger = RandomTreesEmbedding(n_estimators=n_estimators,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
@monitor_with("func.rc.UVBClassif", task_name="Extracting features")
def _preprocess(self, image_buffer, learning_phase):
X_pred, y = self._coord.process(image_buffer,
learning_phase=learning_phase)
y_user = self._convert_labels(y)
#Cleaning up
self._coord.clean(y)
del y
#Bag-of-word transformation
with monitor_with("code.rc.UVBClassif", taskname="Bag-of-word transformation"):
X2 = None
if learning_phase:
X2 = self._visualBagger.fit_transform(X_pred, y_user)
self.histoSize = X2.shape[1]
else:
X2 = self._visualBagger.transform(X_pred)
#Cleaning up
self._coord.clean(X_pred)
del X_pred
del y_user
nb_factor = X2.shape[0] // len(image_buffer)
if not sps.isspmatrix_csr(X2):
X2 = X2.tocsr()
if nb_factor == 1:
return X2
with monitor_with("code.rc.UVBClassif", taskname="Histogram"):
nbTrees = self._visualBagger.n_estimators
X3 = compute_histogram(len(image_buffer), nb_factor, nbTrees, X2)
#Cleaning up
del X2 # Should be useless
return X3
@monitor_with("func.rc.UVBClassif", task_name="Fitting histogram")
def fit_histogram(self, hist, y):
#Delegating the classification
self._classifier.fit(hist, y)
return self
def fit(self, image_buffer):
"""
Fits the data contained in the :class:`ImageBuffer` instance
Parameters
-----------
image_buffer : :class:`ImageBuffer`
The data to learn from
Return
-------
self : :class:`Classifier`
This instance
"""
#Updating the labels
y_user = image_buffer.get_labels()
self._build_luts(y_user)
y = self._convert_labels(y_user)
X = self._preprocess(image_buffer, learning_phase=True)
return self.fit_histogram(X, y)
def predict(self, image_buffer):
"""
Classify the data contained in the :class:`ImageBuffer` instance
Parameters
-----------
image_buffer : :class:`ImageBuffer`
The data to classify
Return
-------
list : list of int
each entry is the classification label corresponding to the input
"""
X = self._preprocess(image_buffer, learning_phase=False)
y_classif = self._classifier.predict(X)
return self._convert_labels_back(y_classif)
def predict_proba(self, image_buffer):
"""
Classify softly the data contained is the :class:`ImageBuffer`
instance. i.e. yields a probability vector of belongin to each
class
Parameters
-----------
image_buffer : :class:`ImageBuffer`
The data to classify
Return
-------
list : list of list of float
each entry is the probability vector of the input of the same
index as computed by the base classifier
"""
if not hasattr(self._classifier, "predict_proba"):
#Early error
self._classifier.predict_proba(np.zeros((1, 1)))
X = self._preprocess(image_buffer, learning_phase=False)
return self._classifier.predict_proba(X)
```
#### File: randconv/randconv/coordinator_factory.py
```python
__author__ = "<NAME> <<EMAIL>>"
__copyright__ = "3-clause BSD License"
__date__ = "20 January 2015"
import math
from .image import *
from .util import (OddUniformGenerator, NumberGenerator,
CustomDiscreteNumberGenerator, GaussianNumberGenerator)
from .feature_extractor import ImageLinearizationExtractor, DepthCompressorILE
from .coordinator import (RandConvCoordinator, PyxitCoordinator)
class Const:
RND_RU = "RND_RU" # -1 (real uniform)
RND_SET = "RND_SET" # -2 (Discrete set with predifined probabilities)
RND_GAUSS = "RND_GAUSS" # (Gaussian distribution)
FGEN_ORDERED = "FGEN_ORDERED" # Ordered combination of others
FGEN_CUSTOM = "FGEN_CUSTOM" # Custom filters
FGEN_ZEROPERT = "FGEN_ZEROPERT" # Perturbation around origin
FGEN_IDPERT = "FGEN_IDPERT" # Perturbation around id filter
FGEN_IDDIST = "FGEN_IDDIST" # Maximum distance around id filter
FGEN_STRAT = "FGEN_STRAT" # Stratified scheme
POOLING_NONE = "POOLING_NONE" # 0
POOLING_AGGREG_MIN = "POOLING_AGGREG_MIN" # 1
POOLING_AGGREG_AVG = "POOLING_AGGREG_AVG" # 2
POOLING_AGGREG_MAX = "POOLING_AGGREG_MAX" # 3
POOLING_CONV_MIN = "POOLING_MW_MIN" # 4
POOLING_CONV_AVG = "POOLING_MW_AVG" # 5
POOLING_CONV_MAX = "POOLING_MW_MAX" # 6
POOLING_MORPH_OPENING = "POOLING_MORPH_OPENING" # 7
POOLING_MORPH_CLOSING = "POOLING_MORPH_CLOSING" # 8
FEATEXT_ALL = "FEATEXTRACT_ALL"
FEATEXT_SPASUB = "FEATEXTRACT_SPASUB"
def pyxit_factory(
nb_subwindows=10,
sw_min_size_ratio=0.5, sw_max_size_ratio=1.,
sw_target_width=16, sw_target_height=16,
fixed_size=False,
sw_interpolation=SubWindowExtractor.INTERPOLATION_BILINEAR,
n_jobs=-1, verbosity=10, temp_folder=None,
random=True):
"""
Factory method to create :class:`PyxitCoordinator`
Parameters
----------
nb_subwindows : int >= 0 (default : 10)
The number of subwindow to extract
sw_min_size_ratio : float > 0 (default : 0.5)
The minimum size of a subwindow expressed as the ratio of the size
of the original image
sw_max_size_ratio : float : sw_min_size_ratio
<= sw_max_size_ratio <= 1 (default : 1.)
The maximim size of a subwindow expressed as the ratio of the size
of the original image
sw_target_width : int > 0 (default : 16)
The width of the subwindows after reinterpolation
sw_target_height : int > 0 (default : 16)
The height of the subwindows after reinterpolation
fixed_size : boolean (default : False)
Whether to use fixe size subwindow. If False, subwindows are drawn
randomly. If True, the target size is use as the subwindow size and
only the position is drawn randomly
sw_interpolation : int (default :
SubWindowExtractor.INTERPOLATION_BILINEAR)
The subwindow reinterpolation algorithm. For more information, see
:class:`SubWindowExtractor`
n_jobs : int >0 or -1 (default : -1)
The number of process to spawn for parallelizing the computation.
If -1, the maximum number is selected. See also :mod:`Joblib`.
verbosity : int >= 0 (default : 10)
The verbosity level
temp_folder : string (directory path) (default : None)
The temporary folder used for memmap. If none, some default folder
will be use (see the :class:`ParallelCoordinator`)
random : bool (default : True)
Whether to use randomness or use a predefined seed
Return
------
coordinator : :class:`Coordinator`
The PyxitCoordinator (possibly decorated) corresponding to the set
of parameters
Notes
-----
- Subwindow random generator
The subwindow random generator is a :class:`NumberGenerator` base
instance (generate real nubers uniformely).
- Feature extractor
Base instance of :class:`ImageLinearizationExtractor`
"""
swngSeed = 0
#Randomness
if random:
swngSeed = None
#SubWindowExtractor
swNumGenerator = NumberGenerator(seed=swngSeed)
if fixed_size:
sw_extractor = FixTargetSWExtractor(sw_target_width,
sw_target_height,
sw_interpolation,
swNumGenerator)
else:
sw_extractor = SubWindowExtractor(sw_min_size_ratio,
sw_max_size_ratio,
sw_target_width,
sw_target_height,
sw_interpolation,
swNumGenerator)
multi_sw_extractor = MultiSWExtractor(sw_extractor, nb_subwindows, True)
#FEATURE EXTRACTOR
feature_extractor = ImageLinearizationExtractor()
#LOGGER
autoFlush = verbosity >= 45
logger = ProgressLogger(StandardLogger(autoFlush=autoFlush,
verbosity=verbosity))
#COORDINATOR
coordinator = PyxitCoordinator(multi_sw_extractor, feature_extractor, logger,
verbosity)
if n_jobs != 1:
coordinator.parallelize(n_jobs, temp_folder)
return coordinator
def get_multi_poolers(poolings, finalHeight, finalWidth):
#Aggregator
poolers = []
for height, width, policy in poolings:
if policy is Const.POOLING_NONE:
poolers.append(IdentityPooler())
elif policy is Const.POOLING_AGGREG_AVG:
poolers.append(AverageAggregator(width, height,
finalWidth,
finalHeight))
elif policy is Const.POOLING_AGGREG_MAX:
poolers.append(MaximumAggregator(width, height,
finalWidth,
finalHeight))
elif policy is Const.POOLING_AGGREG_MIN:
poolers.append(MinimumAggregator(width, height,
finalWidth,
finalHeight))
elif policy is Const.POOLING_CONV_MIN:
poolers.append(FastMWMinPooler(height, width))
elif policy is Const.POOLING_CONV_AVG:
poolers.append(FastMWAvgPooler(height, width))
elif policy is Const.POOLING_CONV_MAX:
poolers.append(FastMWMaxPooler(height, width))
elif policy is Const.POOLING_MORPH_OPENING:
poolers.append(MorphOpeningPooler(height, width))
elif policy is Const.POOLING_MORPH_CLOSING:
poolers.append(MorphClosingPooler(height, width))
return MultiPooler(poolers)
def get_number_generator(genType, min_value, max_value, seed, **kwargs):
if genType is Const.RND_RU:
value_generatorerator = NumberGenerator(min_value, max_value, seed)
elif genType is Const.RND_SET:
probLaw = kwargs["probLaw"]
value_generatorerator = CustomDiscreteNumberGenerator(probLaw, seed)
elif genType is Const.RND_GAUSS:
if "outRange" in kwargs:
outRange = kwargs["outRange"]
value_generatorerator = GaussianNumberGenerator(min_value, max_value, seed,
outRange)
else:
value_generatorerator = GaussianNumberGenerator(min_value, max_value, seed)
return value_generatorerator
def get_filter_generator(policy, parameters, nb_filterss, random=False):
if policy == Const.FGEN_ORDERED:
#Parameters is a list of tuples (policy, parameters)
ls = []
subNbFilters = int(math.ceil(nb_filterss/len(parameters)))
for subPolicy, subParameters in parameters:
ls.append(get_filter_generator(subPolicy, subParameters,
subNbFilters, random))
return OrderedMFF(ls, nb_filterss)
if policy is Const.FGEN_CUSTOM:
print "Custom filters"
return custom_finite_3_same_filter()
#Parameters is a dictionary
valSeed = None
sizeSeed = None
shuffling_seed = None
perturbationSeed = None
cell_seed = None
sparseSeed = 5
if random:
valSeed = 1
sizeSeed = 2
shuffling_seed = 3
perturbationSeed = 4
cell_seed = 5
sparseSeed = 6
min_size = parameters["min_size"]
max_size = parameters["max_size"]
size_generatorerator = OddUniformGenerator(min_size, max_size, seed=sizeSeed)
min_val = parameters["min_val"]
max_val = parameters["max_val"]
value_generator = parameters["value_generator"]
value_generatorerator = get_number_generator(value_generator, min_val, max_val,
valSeed, **parameters)
normalization = None
if "normalization" in parameters:
normalization = parameters["normalization"]
if policy is Const.FGEN_ZEROPERT:
print "Zero perturbation filters"
baseFilterGenerator = FilterGenerator(value_generatorerator, size_generatorerator,
normalisation=normalization)
elif policy is Const.FGEN_IDPERT:
print "Id perturbation filters"
baseFilterGenerator = IdPerturbatedFG(value_generatorerator, size_generatorerator,
normalisation=normalization)
elif policy is Const.FGEN_IDDIST:
print "Id distance filters"
max_dist = parameters["max_dist"]
baseFilterGenerator = IdMaxL1DistPerturbFG(value_generatorerator, size_generatorerator,
max_dist,
normalisation=normalization,
shuffling_seed=shuffling_seed)
elif policy is Const.FGEN_STRAT:
print "Stratified filters"
nb_cells = parameters["strat_nb_cells"]
minPerturbation = 0
if "minPerturbation" in parameters:
minPerturbation = parameters["minPerturbation"]
maxPerturbation = 1
if "maxPerturbation" in parameters:
maxPerturbation = parameters["maxPerturbation"]
perturbationGenerator = get_number_generator(value_generator,
minPerturbation,
maxPerturbation,
perturbationSeed)
baseFilterGenerator = StratifiedFG(min_val, max_val, nb_cells,
perturbationGenerator,
size_generatorerator,
normalisation=normalization,
cell_seed=cell_seed)
if "sparse_proba" in parameters:
print "Adding sparcity"
sparse_proba = parameters["sparse_proba"]
baseFilterGenerator = SparsityDecoratorFG(baseFilterGenerator,
sparse_proba,
sparseSeed)
print "Returning filters"
return Finite3SameFilter(baseFilterGenerator, nb_filterss)
def get_feature_extractor(policy, **kwargs):
if policy is Const.FEATEXT_SPASUB:
nbCol = kwargs.get("nbCol", 2)
return DepthCompressorILE(nbCol)
else: # Suupose Const.FEATEXT_ALL
return ImageLinearizationExtractor()
#TODO : include in randconv : (Const.FEATEXT_ALL, {}), (Const.FEATEXT_SPASUB, {"nbCol":2})
def randconv_factory(
nb_filters=5,
filter_policy=(Const.FGEN_ZEROPERT,
{"min_size": 2, "max_size": 32, "min_val": -1, "max_val": 1,
"value_generator": Const.RND_RU,
"normalization": FilterGenerator.NORMALISATION_MEANVAR}),
poolings=[(3, 3, Const.POOLING_AGGREG_AVG)],
extractor=(Const.FEATEXT_ALL, {}),
nb_subwindows=10,
sw_min_size_ratio=0.5, sw_max_size_ratio=1.,
sw_target_width=16, sw_target_height=16,
sw_interpolation=SubWindowExtractor.INTERPOLATION_BILINEAR,
include_original_img=False,
n_jobs=-1, verbosity=10, temp_folder=None,
random=True):
"""
Factory method to create :class:`RandConvCoordinator` tuned for RGB images
Parameters
----------
nb_filterss : int >= 0 (default : 5)
The number of filter
filter_policy : pair (policyType, parameters)
policyType : one of Const.FGEN_*
The type of filter generation policy to use
parameters : dict
The parameter dictionnary to forward to :func:`get_filter_generator`
poolings : iterable of triple (height, width, policy) (default :
[(3, 3, Const.POOLING_AGGREG_AVG)])
A list of parameters to instanciate the according :class:`Pooler`
height : int > 0
the height of the neighborhood window
width : int > 0
the width of the neighborhood window
policy : int in {Const.POOLING_NONE, Const.POOLING_AGGREG_MIN,
Const.POOLING_AGGREG_AVG, Const.POOLING_AGGREG_MAX,
Const.POOLING_CONV_MIN, Const.POOLING_CONV_AVG, Const.POOLING_CONV_MAX}
nb_subwindows : int >= 0 (default : 10)
The number of subwindow to extract
sw_min_size_ratio : float > 0 (default : 0.5)
The minimum size of a subwindow expressed as the ratio of the size
of the original image
sw_max_size_ratio : float : sw_min_size_ratio
<= sw_max_size_ratio <= 1 (default : 1.)
The maximim size of a subwindow expressed as the ratio of the size
of the original image
sw_target_width : int > 0 (default : 16)
The width of the subwindows after reinterpolation
sw_target_height : int > 0 (default : 16)
The height of the subwindows after reinterpolation
sw_interpolation : int (default :
SubWindowExtractor.INTERPOLATION_BILINEAR)
The subwindow reinterpolation algorithm. For more information, see
:class:`SubWindowExtractor`
include_original_img : boolean (default : False)
Whether or not to include the original image in the subwindow
extraction process
n_jobs : int >0 or -1 (default : -1)
The number of process to spawn for parallelizing the computation.
If -1, the maximum number is selected. See also :mod:`Joblib`.
verbosity : int >= 0 (default : 10)
The verbosity level
temp_folder : string (directory path) (default : None)
The temporary folder used for memmap. If none, some default folder
will be use (see the :class:`ParallelCoordinator`)
random : bool (default : True)
Whether to use randomness or use a predefined seed
Return
------
coordinator : :class:`Coordinator`
The RandConvCoordinator corresponding to the
set of parameters
Notes
-----
- Filter generator
Base instance of :class:`Finite3SameFilter` with a base instance of
:class:`NumberGenerator` for the values and
:class:`OddUniformGenerator` for the sizes
- Filter size
The filter are square (same width as height)
- Convolver
Base instance of :class:`RGBConvolver`
- Subwindow random generator
The subwindow random generator is a :class:`NumberGenerator` base
instance (generate real nubers uniformely).
- Feature extractor
Base instance of :class:`ImageLinearizationExtractor`
"""
#RANDOMNESS
swngSeed = None
if random is False:
swngSeed = 0
#CONVOLUTIONAL EXTRACTOR
#Filter generator
#Type/policy parameters, #filters, random
filter_policyType, filter_policyParam = filter_policy
filter_generator = get_filter_generator(filter_policyType, filter_policyParam,
nb_filters, random)
#Convolver
convolver = RGBConvolver()
#Aggregator
multi_pooler = get_multi_poolers(poolings, sw_target_height,
sw_target_width)
#SubWindowExtractor
swNumGenerator = NumberGenerator(seed=swngSeed)
sw_extractor = SubWindowExtractor(sw_min_size_ratio,
sw_max_size_ratio,
sw_target_width,
sw_target_height,
sw_interpolation, swNumGenerator)
multi_sw_extractor = MultiSWExtractor(sw_extractor, nb_subwindows, False)
#ConvolutionalExtractor
convolutional_extractor = ConvolutionalExtractor(filter_generator,
convolver,
multi_sw_extractor,
multi_pooler,
include_original_img)
#FEATURE EXTRACTOR
feature_extractor = get_feature_extractor(extractor[0], **extractor[1])
#COORDINATOR
coordinator = RandConvCoordinator(convolutional_extractor, feature_extractor)
if n_jobs != 1:
coordinator.parallelize(n_jobs, temp_folder)
return coordinator
```
#### File: randconv/image/convolutional_extractor.py
```python
import numpy as np
try:
import Image
except ImportError:
from PIL import Image
from .numpy_pil_converter import NumpyPILConvertor
__all__ = ["ConvolutionalExtractor"]
class ConvolutionalExtractor:
"""
======================
ConvolutionalExtractor
======================
A :class:`ConvolutionalExtractor` extract features from images. It
proceeds in 3 steps :
1. Filtering
It uses a :class:`FiniteFilter` to generate filters. Thoses filters
are then applied by a :class:`Convolver` to the given image thus
creating several new images (one per filter). Let us call them
*image2*.
2. Pooling
Each new *image2* is aggregated by a :class:`Aggregator`, yielding one
image (let us call them *image3*) by processed *image2*s.
3. Subwindow extraction
On each *image3* the same subwindows are extracted giving the set
of *image4*. This set contains nb_filter*nb_subwindow images
Note
----
- Compatibily
The :class:`FiniteFilter` and the :class:`Convolver` must be compatible
with the kind of image provided !
Example : the image is a RGB PIL image or RGB numpy array with a
:class:`RGBConvolver` and a :class:`Finite3Filter`
- Image representation
See :mod:`ImageBuffer` for more information
- It is also possible to include the original image in the process
"""
def __init__(self, finite_filter, convolver, multi_sw_extractor, multi_pooler,
include_original_image=False):
"""
Construct a :class:`ConvolutionalExtractor`
Parameters
----------
finite_filter : :class:`FiniteFilter`
The filter generator and holder
convolver : :class:`Convolver`
The convolver which will apply the filter. Must correspond with
the filter generator and the image type
pooler : :class:`MultiPooler`
The :class:`MultiPooler`which will carry the spatial poolings
**Note** : the spatial poolings must produce ouputs of the same
shape !
include_original_image : boolean (default : False)
Whether or not to include the original image for the subwindow
extraction part
"""
self._finite_filter = finite_filter
self._convolver = convolver
self._sw_extractor = multi_sw_extractor
self._multi_pooler = multi_pooler
self._include_image = include_original_image
def extract(self, image):
"""
Extract feature from the given image
Parameters
----------
image : :class:`PIL.Image` or preferably a numpy array
The image to process
Return
------
all_subwindow : a list of lists of subwindows
The element e[i][j] is a numpy array correspond to the ith
subwindow of the jth filter.
If the original image is included, it correspond to the first
(0th) filter.
"""
#Converting image in the right format
convertor = NumpyPILConvertor()
image = convertor.pil2numpy(image)
filtered = []
#Including the original image if desired
if self._include_image:
pooled_ls = self._multi_pooler.multipool(image)
for pooled in pooled_ls:
filtered.append(pooled)
#Applying the filters & Aggregating
for filt in self._finite_filter:
#Filtering
npTmp = self._convolver(image, filt)
#Aggregating
pooled_ls = self._multi_pooler.multipool(npTmp)
for pooled in pooled_ls:
filtered.append(pooled)
#Refreshing the boxes
shape = filtered[0].shape
self._sw_extractor.refresh(shape[1], shape[0]) # width, height
#Extracting the subwindows
nb_filterss = len(self._finite_filter)
nbSubWindow = len(self._sw_extractor)
nbPoolers = len(self._multi_pooler)
nbImageFactor = nb_filterss*nbPoolers
if self._include_image:
nbImageFactor += nbPoolers
all_subwindows = [[0] * nbImageFactor for i in xrange(nbSubWindow)]
for col, numpies in enumerate(filtered):
#converting image to the right format
img = convertor.numpy2pil(numpies)
#Extracting the subwindows s.s.
subwindows = self._sw_extractor.extract(img)
for row in xrange(nbSubWindow):
all_subwindows[row][col] = convertor.pil2numpy(subwindows[row])
return all_subwindows
def get_filters(self):
"""
Return the filters used to process the image
Return
------
filters : iterable of numpy arrays
The filters used to process the image, with the exclusion
of the identity filter if the raw image was included
"""
return self._finite_filter
def get_poolers(self):
"""
Return
------
multi_pooler : class:`MultiPooler`
The poolers
"""
return self._multi_pooler
def is_image_included(self):
"""
Whether the raw image was included
Return
------
isIncluded : boolean
True if the raw image was included
"""
return self._include_image
def get_nb_subwindows(self):
"""
Return the number of subwindows extracted
"""
return self._sw_extractor.nb_subwidows()
def get_final_size_per_subwindow(self):
return self._sw_extractor.get_final_size()
```
#### File: randconv/image/filter_holder.py
```python
import numpy as np
from math import sqrt
from randconv.image import Finite3SameFilter
__all__ = ["custom_filters", "custom_finite_3_same_filter"]
def shape2D(squareIterable, normalisation=None):
"""
Return the corresponding 2D filter
Parametes
---------
squareIterable : iterable of number whose length is a square interger
the 1D form of the filter
normalization : (default : None)
TODO XXX
Return
------
filt : 2D numpy array
The 2D filter
Example
-------
>>> sobelHz = [-1,0,1,-2,0,2,-1,0,1]
>>> shape2D(sobelHz) # doctest: +SKIP
array([[-1., 0., 1.],
[-2., 0., 2.],
[-1., 0., 1.]])
"""
size = int(sqrt(len(squareIterable)))
if size*size != len(squareIterable):
raise ValueError("The length of the iterable must be a square integer")
filt = np.zeros((size, size))
for i, val in enumerate(squareIterable):
x = i // size
y = i % size
filt[x][y] = val
return filt
def custom_filters():
filters = []
centralEmphasis = shape2D([0.075, 0.125, 0.075, 0.125, 0.2, 0.125,
0.075, 0.125, 0.075])
filters.append(centralEmphasis)
#discrete, two-dimensional gaussian 5x5 (which stdev ?)
gauss5x5 = shape2D([0, 1, 2, 1, 0, 1, 3, 5, 3, 1, 2, 5, 9, 5, 2, 1, 3,
5, 3, 1, 0, 1, 2, 1, 0])
filters.append(gauss5x5)
#Derivative
sobelHz = shape2D([-1, 0, 1, -2, 0, 2, -1, 0, 1])
filters.append(sobelHz)
sobelV = shape2D([1, 2, 1, 0, 0, 0, -1, -2, -1])
filters.append(sobelV)
laplaceIso = shape2D([0, 1, 0, 1, -4, 1, 0, 1, 0])
filters.append(laplaceIso)
laplaceFullIso = shape2D([1, 1, 1, 1, -8, 1, 1, 1, 1])
filters.append(laplaceFullIso)
bigLaplace = shape2D([0, 0, -1, 0, 0, 0, -1, -2, -1, 0, -1, -2, 16, -2, -1,
0, -1, -2, -1, 0, 0, 0, -1, 0, 0])
filters.append(bigLaplace)
bigLaplace2 = shape2D([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 24, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1])
filters.append(bigLaplace2)
#Inverted
laplaceIsoInv = shape2D([0, -1, 0, -1, 4, -1, 0, -1, 0])
filters.append(laplaceIsoInv)
laplaceFullIsoInv = shape2D([-1, -1, -1, -1, 8, -1, 1, -1, -1])
filters.append(laplaceFullIsoInv)
#Prewitt
prewittHz = shape2D([-1, 0, 1, -1, 0, 1, -1, 0, 1])
filters.append(prewittHz)
prewittV = shape2D([-1, -1, -1, 0, 0, 0, 1, 1, 1])
filters.append(prewittV)
#Oriented edges
hzEdges = shape2D([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, -1, 2, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0])
filters.append(hzEdges)
vEdges = shape2D([0, 0, -1, 0, 0, 0, 0, -1, 0, 0, 0, 0, 4, 0, 0, 0, 0, -1,
0, 0, 0, 0, -1, 0, 0])
filters.append(vEdges)
plus45Edges = shape2D([-1, 0, 0, 0, 0, 0, -2, 0, 0, 0, 0, 0, 6, 0, 0, 0,
0, 0, -2, 0, 0, 0, 0, 0, -1])
filters.append(plus45Edges)
minus45Edges = shape2D([0, 0, 0, 0, -1, 0, 0, 0, -2, 0, 0, 0, 6, 0, 0, 0,
-2, 0, 0, 0, -1, 0, 0, 0, 0])
filters.append(minus45Edges)
plus45Edges3x3 = shape2D([1, 1, 1, -1, 0, 1, -1, -1, 0])
# TODO XXX non-symetric ?
filters.append(plus45Edges3x3)
minus45Edges3x3 = shape2D([1, 1, 1, 1, 0, -1, 1, -1, -1])
filters.append(minus45Edges3x3)
#Frequencies
lowPass = shape2D([0.25, 0.5, 0.25, 0.5, 1, 0.5, 0.25, 0.5, 0.25])
filters.append(lowPass)
highPass = shape2D([1, -2, 1, -2, 5, -2, 1, -2, 1])
filters.append(highPass)
highPassMean = shape2D([-1, -1, -1, -1, 9, -1, -1, -1, -1])
filters.append(highPassMean)
#Compass gradient masks
northCGM = shape2D([1, 1, 1, 1, -2, 1, -1, -1, -1])
filters.append(northCGM)
northeastCGM = shape2D([1, 1, 1, -1, -2, 1, -1, -1, 1])
filters.append(northeastCGM)
eastCGM = shape2D([-1, 1, 1, -1, -2, 1, -1, 1, 1])
filters.append(eastCGM)
southeastCGM = shape2D([-1, -1, 1, -1, -2, 1, 1, 1, 1])
filters.append(southeastCGM)
southCGM = shape2D([-1, -1, -1, 1, -2, 1, 1, 1, 1])
filters.append(southCGM)
southwestCGM = shape2D([-1, -1, -1, 1, -2, 1, 1, 1, 1])
filters.append(southwestCGM)
westCGM = shape2D([-1, 1, -1, 1, -2, -1, 1, 1, -1])
filters.append(westCGM)
northwestCGM = shape2D([1, 1, 1, 1, -2, -1, 1, -1, -1])
filters.append(northwestCGM)
#log
logM = shape2D([0, 0, 1, 0, 0, 0, 1, 2, 1, 0, 1, 2, -16, 2, 1, 0, 1, 2, 1,
0, 0, 0, 1, 0, 0])
filters.append(logM)
#application of log and laplacian
logLaplacianed = shape2D([0, 0, 1, 1, 1, 0, 0, 0, 1, 4, -4, 4, 1, 0, 1,
4, -18, -25, -18, 4, 1, 1, -4, -25, 140, -25,
-4, 1, 1, 4, -18, -25, -18, 4, 1, 0, 1, 4, -4,
4, 1, 0, 0, 0, 1, 1, 1, 0, 0])
filters.append(logLaplacianed)
#Misc.
#--jahne, pratt
misc1 = shape2D([1, -2, 1, -2, 4, -2, 1, -2, 1])
filters.append(misc1)
misc2 = shape2D([1, 1, 1, 1, -7, 1, 1, 1, 1])
filters.append(misc2)
misc3 = shape2D([0, -1, 0, -1, 5, -1, 0, -1, 0])
filters.append(misc3)
#--lines chittineni
misc4 = shape2D([-1, -1, -1, 2, 2, 2, -1, -1, -1])
filters.append(misc4)
misc5 = shape2D([-1, -1, 2, -1, 2, -1, 2, -1, -1])
filters.append(misc5)
misc6 = shape2D([-1, 2, -1, -1, 2, -1, -1, 2, -1])
filters.append(misc6)
misc7 = shape2D([2, -1, -1, -1, 2, -1, -1, -1, 2])
filters.append(misc7)
return filters
def custom_finite_3_same_filter():
return Finite3SameFilter(custom_filters())
if __name__ == "__main__":
cuFilt = custom_finite_3_same_filter()
print len(cuFilt)
```
#### File: randconv/image/image_converter.py
```python
__author__ = "<NAME> <<EMAIL>>"
__copyright__ = "3-clause BSD License"
__version__ = 'dev'
from abc import ABCMeta, abstractmethod
import numpy as np
try:
import Image
except ImportError:
from PIL import Image
import cv
def to_numpy(image):
if isinstance(image, np.ndarray):
return image
return np.asarray(image[:])
class OverflowManager:
"""
===============
OverflowManager
===============
An :class:`OverflowManager` is responsible for managing overflow value
from the range [0,255]. This base class does nothing (and therefore leaves
the default mechanism unchanged, probably letting values wrap around).
"""
def manage(self, image):
"""
Enforces the overflow policy
Parameters
----------
image : numpy.ndarray
The image to process
Return
------
processed_image : numpy.ndarray
The processed image
"""
return image
def __call__(self, image):
"""
Enforces the overflow policy
Parameters
----------
image : numpy.ndarray
The image to process
Return
------
processed_image : numpy.ndarray
The processed image
"""
return self.manage(image)
class ClipOverflow(OverflowManager):
"""
=============
ClipOverflow
=============
This class thresholds the exceeding values. That is, value below 0 are
forced to 0 and values greater than 255 are pushed down to 255.
"""
def manage(self, image):
return image.clip(0, 255)
class MaxoutOverflow(OverflowManager):
def __init__(self, dtype=np.uint8):
try:
info = np.iinfo(dtype)
except ValueError:
info = np.finfo(dtype)
self._min = info.min
self._max = info.max
def manage(self, array):
return array.clip(self._min, self._max)
class HistogramEqualizer(OverflowManager):
"""
==================
HistogramEqualizer
==================
This class performs an histogram equalization so as to ensures the correct
[0,255] range for every color channel
"""
def __init__(self):
raise NotImplementedError("This class is not yet implemented")
def manage(self, image):
raise NotImplementedError("This class is not yet implemented")
class ImageConverter:
"""
==============
ImageConverter
==============
An :class:`ImageConverter` converts images to another internal image
representation.
If you need a default exchange format, numpy is probably a good choice.
"""
__metaclass__ = ABCMeta
@abstractmethod
def convert(self, image):
"""
Converts, if need be, the given images into another representation
specified by the class policy
Parameters
----------
image :
a supported image representation
"""
pass
class NumpyConverter(ImageConverter):
"""
==============
NumpyConverter
==============
An :class:`ImageConverter` which converts images to numpy 2D arrays.
The supported types are PIL images and openCV images.
Constructor parameters
----------------------
None.
"""
def convert(self, image):
return to_numpy(image)
class PILConverter(ImageConverter):
"""
============
PILConverter
============
An :class:`ImageConverter` which converts images to PIL images.
The supported types are numpy arrays and openCV images.
Note : PIL images works on the range [0, 255] and not with real values.
A :class:`OverflowManager` is necessary to enforce a policy for the
overflowing values. The default policy is to clip exceeding values.
Constructor parameters
----------------------
overflow_manager : OverflowManager (default : :class:`ClipOverflow`)
the management policy for the overflow
"""
def __init__(self, overflow_manager=ClipOverflow()):
self.overflow_manager = overflow_manager
def convert(self, image):
if isinstance(image, Image.Image):
return image
np_image = to_numpy(image)
np_corrected = self.overflow_manager.manage(np_image)
return Image.fromarray(np.uint8(np_corrected))
class CVConverter(ImageConverter):
"""
============
PILConverter
============
An :class:`ImageConverter` which converts images to openCV images.
The supported types are numpy arrays and PIL images.
Class constants
---------------
CV : int
a constant representing the cv version
CV2 : int
a constant representing the cv2 version
Constructor parameters
----------------------
version : int in {CV, CV2} (default : CV)
The version of openCV array to convert to
"""
CV = 1
CV2 = 2
def __init__(self, version=CV):
self.version = version
def convert(self, image):
if self.version == CVConverter.CV2:
return to_numpy(image)
if isinstance(image, cv.cvmat):
return image
return cv.fromarray(to_numpy(image))
``` |
{
"source": "jm-begon/remote_control",
"score": 3
} |
#### File: remote_control/python/ser_com.py
```python
from functools import partial
from enum import Enum
import logging
import serial
class Protocol(Enum):
UNKNOWN = 0
NEC = 1
SONY = 2
NECX = 7
NUM_N_BYTES = 4
class Ack(object):
def __init__(self, label, nac_info=None):
self.label = label
self.nac_info = nac_info
def __bool__(self):
return self.nac_info is None
def __repr__(self):
return "{}({})".format(self.__class__.__name__,
repr(self.label),
repr(self.nac_info))
def __str__(self):
if self:
return "[ACK] {}".format(self.label)
return "[NAC] {} -- Reason: {}".format(self.label, self.nac_info)
class Message(object):
@property
def label(self):
return self.__class__.__name__
def _write(self, channel, *bytes):
channel.write(bytes)
def _n2b(self, number, length=NUM_N_BYTES):
bytes = number.to_bytes(length, "big")
check = (sum(bytes) % 256).to_bytes(length, "big")
return bytes, check
def _read_check(self, channel, *bytes, label=None):
ack = partial(Ack, label=self.label if label is None else label)
for i, check_byte in enumerate(bytes):
ans = channel.read(1)
if len(ans) == 0:
return ack(nac_info="Error with {}th byte. Timeout?".format(i))
if ans != check_byte:
return ack(nac_info="Error with {}th byte. Expecting {}, got {}"
"".format(i, check_byte, ans))
return ack()
def send_through(self, channel):
yield Ack("Abstract message")
class Handshake(Message):
def send_through(self, channel):
# Send 'syn'
# Recieve 'synack'
# Send 'ack'
self._write(channel, *b'syn')
yield self._read_check(channel, *b'synack')
self._write(channel, *b'ack')
class Command(Message):
def __init__(self, code, protocol, size=None):
self.code = code
self.protocol = protocol
self.size = size
def send_through(self, channel):
# Send protocol
# Receive protocol
b_protocol, check = self._n2b(self.protocol, length=1)
self._write(channel, *b_protocol)
p_ack = self._read_check(channel, *check,
label="Communicating protocol")
yield p_ack
# Send size
# Receive size
b_size, check = self._n2b(self.size, length=1)
self._write(channel, *b_size)
s_ack = self._read_check(channel, *check,
label="Communicating protocol")
yield s_ack
# Send code
bytes, check = self._n2b(self.code)
self._write(channel, *bytes)
c_ack = self._read_check(channel, *check,
label="Giving command")
yield c_ack
# Confirming
if p_ack and s_ack and c_ack:
self._write(channel, *b'ok')
yield self._read_check(channel, *b'sent')
class RemoteController(object):
count = 0
@classmethod
def gen_name(cls):
c = cls.count
cls.count += 1
return "RC{}".format(c)
def __init__(self, port, baudrate=9600, timeout=1, name=None,
fail_fast=True):
# self.con_factory = partial(serial.Serial, port=port, baudrate=baudrate,
# timeout=timeout, writeTimeout=timeout)
self.con_factory = partial(PseudoChannel, port=port, baudrate=baudrate,
timeout=timeout, writeTimeout=timeout)
self.connection = None
name = self.__class__.gen_name() if name is None else name
self.logger = logging.getLogger(name)
self.fail_fast = fail_fast
def __enter__(self):
self.connection = self.con_factory()
self.connection.open()
self.send(Handshake(), fail_fast=True)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.connection is not None:
self.connection.close()
self.connection = None
def send(self, message, fail_fast=False):
fail_fast = self.fail_fast or fail_fast
if not self.connection or not self.connection.isOpen():
raise ConnectionError(repr(self.connection))
for ack in message.send_through(self.connection):
log_to = self.logger.debug if ack else self.logger.warning
log_to(str(ack))
if fail_fast and not ack:
raise IOError(str(ack))
class PseudoChannel(object):
def __init__(self, port, baudrate, timeout, writeTimeout):
self.opened = False
self.port = port
self.baudrate = baudrate
self.timeout = timeout
self.writeTimeout = writeTimeout
self.historic = []
self.read_buffer = []
self.logger = logging.getLogger("Pseudo channel")
def __repr__(self):
return "{}(port={}, baudrate={}, timeout={}, writeTimeout={}).{}" \
"".format(self.__class__.__name__,
repr(self.port),
repr(self.baudrate),
repr(self.timeout),
repr(self.writeTimeout),
"open()" if self.opened else "close()")
def open(self):
self.opened = True
def close(self):
self.opened = False
def isOpen(self):
return self.opened
def write(self, data):
self.historic.append(data)
print("[W]", data)
self.logger.info("[W] {}".format(data))
if data == b'syn':
self.logger.info("[W] SYN")
self.read_buffer.append(b'synack')
elif data == b'ok':
self.logger.info("[W] OK")
self.read_buffer.append(b'sent')
else:
self.logger.info("[W] byte")
num = int.from_bytes(data, "big")
q = num
check = 0
while q >= 256:
q, r = int(q / 256), q % 256
check += r
check += q
self.read_buffer.append(check)
def read(self, n):
head, tail = self.read_buffer[:n], self.read_buffer[n:]
self.read_buffer = tail
self.logger.info("[R] {}".format(head))
return head
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
with RemoteController('COM1') as controller:
controller.send(Command(3772793023, Protocol.NECX))
``` |
{
"source": "jmbhughes/TapeLanguage",
"score": 4
} |
#### File: jmbhughes/TapeLanguage/TapeLanguage.py
```python
import argparse
class TapeInterpreter:
''' Inspired by Williams College's Breph language, this is an
interpreter for a simple programming language with only a simple tape of one byte values, a data pointer
that can move left and right, an instruction pointer to move through input code, and 8 operations:
+ : increase the the data value on the array
- : decrement the data value on the array
( : begin a loop if the data pointer is nonzero
) : end of a loop
l : move data pointer the left one position, if at the end of the data array stay put
r : move the data pointer to the right one position, if at the end of the data stay put
I : take one character of input
p : print the character corresponding to the ascii value at the data pointer
'''
def __init__(self, code, inputs, array_size=1024, max_iterations=1024, verbose=False):
self.verbose = verbose
self._max_iterations = max_iterations
self._instruction_pointer = 0
self._data_pointer = 0
self._iteration = 0
self._data = [0]*array_size
self.input = inputs
self.output = ""
self.code = code
if len(self.code) == 0:
raise Exception("Code is empty")
if not self._check_parentheses():
raise Exception("Code has unmatching parentheses")
self._paren_matching = self._find_parentheses()
if self.verbose:
print("Passed parentheses matching:")
print(self._paren_matching)
def interpret(self):
''' runs the code up to the maximum iterations
returns true if program terminated'''
if self.verbose:
print("Beginning run with maximum iterations as {}".format(self._max_iterations))
while self._iteration < self._max_iterations:
self._iteration += 1
if self._step():
if self.verbose:
print("Finished successfully")
return True
if self.verbose:
print("Did not finish in iterations")
return False # did not finish in iterations
def _step(self):
''' Execute the next command of the program
returns true if the program is finished running naturally'''
mapping = {'+':self._oper_plus,
'-':self._oper_minus,
'l':self._oper_left,
'r':self._oper_right,
'i':self._oper_input,
'p':self._oper_print,
"(":self._oper_lparen,
")":self._oper_rparen
}
current_instruction = self.code[self._instruction_pointer]
if self.verbose:
print("iter={}, Performing {}".format(self._iteration, current_instruction))
if current_instruction in mapping:
mapping[current_instruction]() # execute command
else:
self._oper_ignore()
return self._instruction_pointer >= len(self.code)
def _oper_ignore(self):
''' ignore whatever character is under the instruction pointer '''
self._instruction_pointer += 1
def _oper_lparen(self):
''' operation definition for ('''
if self._data[self._data_pointer] == 0:
self._instruction_pointer = self._paren_matching[self._instruction_pointer] + 1
else:
self._instruction_pointer += 1
def _oper_rparen(self):
''' operation definition for ) '''
if self._data[self._data_pointer] != 0:
self._instruction_pointer = self._paren_matching[self._instruction_pointer] + 1
else:
self._instruction_pointer += 1
def _oper_plus(self):
''' operation definition for + '''
self._data[self._data_pointer] += 1
self._instruction_pointer += 1
def _oper_minus(self):
''' operation definition for -'''
self._data[self._data_pointer] -= 1
self._instruction_pointer += 1
def _oper_left(self):
''' operation definition for l'''
self._data_pointer -= 1
if self._data_pointer < 0:
self._data_pointer = 0
self._instruction_pointer += 1
def _oper_right(self):
''' operation definition for r'''
self._data_pointer += 1
if self._data_pointer >= len(self._data):
self._data_pointer = len(self._data) - 1
self._instruction_pointer += 1
def _oper_print(self):
''' operation definition for p,
if number is outside ascii definition adds null to output'''
try:
c = chr(self._data[self._data_pointer])
except:
c = '\0'
self.output += c
self._instruction_pointer += 1
def _oper_input(self):
''' operation definition for I,
if no input remains places a null character '''
if len(self.input) > 0:
self._data[self._data_pointer] = self.input.pop(0)
else:
self._data[self._data_pointer] = '\0'
self._instruction_pointer += 1
def _check_parentheses(self):
""" Return True if the parentheses in code match, otherwise False.
Modified from https://scipython.com/blog/parenthesis-matching-in-python/
"""
j = 0
for c in self.code:
if c == ')':
j -= 1
if j < 0:
return False
elif c == '(':
j += 1
return j == 0
def _find_parentheses(self):
""" Find and return the location of the matching parentheses pairs in code.
Given code as a string return a dictionary of start: end pairs giving the
indexes of the matching parentheses. Suitable exceptions are
raised if code contains unbalanced parentheses.
Modified from https://scipython.com/blog/parenthesis-matching-in-python/
"""
# The indexes of the open parentheses are stored in a stack, implemented
# as a list
stack = []
parentheses_locs = {}
for i, c in enumerate(self.code):
if c == '(':
stack.append(i)
elif c == ')':
try:
parentheses_locs[stack.pop()] = i
except IndexError:
raise IndexError('Too many close parentheses at index {}'.format(i))
if stack:
raise IndexError('No matching close parenthesis to open parenthesis at index {}'.format(stack.pop()))
full_parentheses_locs = dict()
for i,j in parentheses_locs.items():
full_parentheses_locs[i] = j
full_parentheses_locs[j] = i
return full_parentheses_locs
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("program", help="path for program")
parser.add_argument("--verbose","-v", action='store_true', help="provides helpful feedback")
args = vars(parser.parse_args())
with open(args['program']) as f:
lines = f.readlines()
inputs = [ord(c) for c in list(lines[0][:-1]) + ['\0']]
code = "".join(lines[1:])
interpreter = TapeInterpreter(code, inputs, verbose=args['verbose'])
if interpreter.interpret():
print("output:",interpreter.output)
else:
print("***PROGRAM FAILED***")
if args['verbose']:
print("Full data array:")
print(interpreter._data)
print("Data pointer at {}".format(interpreter._data_pointer))
``` |
{
"source": "jmbjorndalen/aPyCSP_lockver",
"score": 2
} |
#### File: aPyCSP_lockver/test/net_commstime_client.py
```python
import common
import apycsp
import apycsp.net
import asyncio
from apycsp.plugNplay import *
from apycsp import run_CSP
import time
args = common.handle_common_args([
(("-s", "--serv"), dict(help="specify server as host:port (use multiple times for multiple servers)", action="append", default=[]))
])
if len(args.serv) < 1:
apycsp.net.setup_client()
else:
for sp in args.serv:
apycsp.net.setup_client(sp)
loop = asyncio.get_event_loop()
@process
async def consumer(cin):
"Commstime consumer process"
#N = 5000
N = 500
ts = time.time
t1 = ts()
await cin()
t1 = ts()
for i in range(N):
await cin()
t2 = ts()
dt = t2-t1
tchan = dt / (4 * N)
print("DT = %f.\nTime per ch : %f/(4*%d) = %f s = %f us" % \
(dt, dt, N, tchan, tchan * 1000000))
print("consumer done, posioning channel")
await cin.poison()
return tchan
def CommsTimeBM():
# Get access to remote channels.
# TODO: we can only run this benchmark once before we need to restart the server side
# as we will need to re_create the channels between each benchmark run (the first run will poison them).
a = apycsp.net.get_channel_proxy_s("a")
b = apycsp.net.get_channel_proxy_s("b")
c = apycsp.net.get_channel_proxy_s("c")
d = apycsp.net.get_channel_proxy_s("d")
print("Running commstime test")
# Rather than pass the objects and get the channel ends wrong, or doing complex
# addons like in csp.net, i simply pass the write and read functions as channel ends.
# Note: c.read.im_self == c, also check im_func, im_class
rets = run_CSP(Prefix(c.read, a.write, prefixItem = 0), # initiator
Delta2(a.read, b.write, d.write), # forwarding to two
Successor(b.read, c.write), # feeding back to prefix
consumer(d.read)) # timing process
return rets[-1]
time = CommsTimeBM()
``` |
{
"source": "jmbjorndalen/pycsp_classic",
"score": 3
} |
#### File: pycsp_classic/test/net_t2.py
```python
from common import *
from pycsp import *
from pycsp.plugNplay import *
from pycsp.net import *
@process
def test1():
print("Test1")
waitForSignal()
c = getNamedChannel("foo1")
print("- Trying to write to channel")
print("-", c.write("I'm here"))
print("- Trying next write (should be poisoned)")
c.write("I'm here")
print("---poison failed !!!!!")
@process
def test2():
print("Test2")
waitForSignal()
c = getNamedChannel("foo2")
print("- Trying to write to channel")
c.write("I'm here")
time.sleep(2)
print("- poisoning channel method")
time.sleep(1)
poisonChannel(c.read)
@process
def test3():
print("Test3")
waitForSignal()
ca = getNamedChannel("foo3a")
cb = getNamedChannel("foo3b")
print("- Trying to write to channel")
ca.write("I'm here")
print("- Trying to use Alt on channel b")
alt = Alternative(cb.read)
ret = alt.select()
print("- returned from alt.select():", ret)
print("- reading :", ret())
print("- Done")
def waitForSignal():
"Waits until the other side has registered its channels"
global ctrl
ctrl.read()
ctrl = getNamedChannel("foo")
Sequence(test1())
Sequence(test2())
Sequence(test3())
ctrl.read()
print("all tests done")
time.sleep(1)
``` |
{
"source": "jmblake/michelin-api",
"score": 3
} |
#### File: michelin-api/api/utils.py
```python
def simplify_edition_name(edition_name):
"""
The first two words in the edition name are always "MICHELIN Guide".
Remove those, make all characters lower case, and replace spaces with
hyphens to simplify the URIs.
:param edition_name:
:return:
"""
simplified_name = '-'.join(edition_name.split()[2:]).lower()
return simplified_name.replace('ñ', 'n')
def complicate_edition_name(simplified_name):
"""
:param simplified_name:
:return:
"""
if simplified_name == 'espana':
simplified_name = 'españa'
short_edition_name = (simplified_name
.replace('-', ' ')
.title()
.replace(' Of ', ' of '))
return f"MICHELIN Guide {short_edition_name}"
``` |
{
"source": "jmblake/trade_summary",
"score": 3
} |
#### File: jmblake/trade_summary/test_summary.py
```python
import unittest
from summary import Summary
class FirstTestCase(unittest.TestCase):
def setUp(self):
# A simple test case set up on two symbols. The first four elements are
# valid test data, but trade_data[4] has a numeric symbol.
self.test = Summary("", "")
# Each list within the trade_data list has the form:
# [timeStamp, symbol, quantity, price]
self.test.trade_data = [['1', 'aaa', '1', '0'], ['2', 'aaa', '1', '1'],
['2', 'bbb', '2', '2'], ['5', 'aaa', '1', '3'],
['7', '123', '2', '3'], ['9', '', '1', '1'],
['10', 'ddd', 'large', '10'],
['11', 'ddd', '1.2', '2']]
class SecondTestCase(unittest.TestCase):
def setUp(self):
# A test with a very large input set.
self.test = Summary("", "")
self.test.trade_data = (10 ^ 9) * [['1', 'aaa', '1', '1']]
class SymbolTest(FirstTestCase):
def test_invalid_symbol(self):
# A KeyError shouldn't occur in operation unless something has
# gone very wrong (keys are written before being called).
with self.assertRaises(KeyError):
_ = self.test.trade_summary['ccc']
def test_numeric_symbol(self):
# This could occur in the case that symbols are numeric.
# I assume that numeric symbols are permitted, else a Raise statement
# should be added to the main code.
# Note that all data are initially read from csv as strings.
self.test.update_symbol_details(self.test.trade_data[4])
self.assertDictEqual(self.test.trade_summary['123'],
{'time_stamp': 7,
'max_gap': 0,
'volume': 2,
'max_price': 3,
'total_price': 6})
def test_empty_symbol(self):
# The program accepts any nonempty string as a key for the trade
# summary / trade_data dictionary.
with self.assertRaises(ValueError):
self.test.update_symbol_details(self.test.trade_data[5])
class MaxGapTest(FirstTestCase):
# It can be assumed that the time gap will never be negative, so this
# cuts down on the necessary tests in this class.
def test_row_by_row(self):
# Here, we add the "rows" of the test trade data in one-by-one, and
# ensure that the values written to each symbol's "max_gap" entry are
# as expected.
self.test.update_symbol_details(self.test.trade_data[0])
self.assertEqual(self.test.trade_summary['aaa']['max_gap'], 0)
self.test.update_symbol_details(self.test.trade_data[1])
self.assertEqual(self.test.trade_summary['aaa']['max_gap'], 1)
self.test.update_symbol_details(self.test.trade_data[2])
self.assertEqual(self.test.trade_summary['aaa']['max_gap'], 1)
self.assertEqual(self.test.trade_summary['bbb']['max_gap'], 0)
self.test.update_symbol_details(self.test.trade_data[3])
self.assertEqual(self.test.trade_summary['aaa']['max_gap'], 3)
self.assertEqual(self.test.trade_summary['bbb']['max_gap'], 0)
class QuantityTest(FirstTestCase):
def test_row_by_row(self):
# Checking that we receive the expected volumes.
self.test.update_symbol_details(self.test.trade_data[0])
self.assertEqual(self.test.trade_summary['aaa']['volume'], 1)
self.test.update_symbol_details(self.test.trade_data[1])
self.assertEqual(self.test.trade_summary['aaa']['volume'], 2)
self.test.update_symbol_details(self.test.trade_data[2])
self.assertEqual(self.test.trade_summary['aaa']['volume'], 2)
self.assertEqual(self.test.trade_summary['bbb']['volume'], 2)
self.test.update_symbol_details(self.test.trade_data[3])
self.assertEqual(self.test.trade_summary['aaa']['volume'], 3)
self.assertEqual(self.test.trade_summary['bbb']['volume'], 2)
def test_invalid_quantity(self):
# Ensuring that a ValueError is raised if the input cannot be passed
# to int.
with self.assertRaises(ValueError):
self.test.update_symbol_details(self.test.trade_data[6])
self.test.update_symbol_details(self.test.trade_data[7])
class MaxPriceTest(FirstTestCase):
def test_row_by_row(self):
# As before, testing that we receive the expected results.
self.test.update_symbol_details(self.test.trade_data[0])
self.assertEqual(self.test.trade_summary['aaa']['max_price'], 0)
self.test.update_symbol_details(self.test.trade_data[1])
self.assertEqual(self.test.trade_summary['aaa']['max_price'], 1)
self.test.update_symbol_details(self.test.trade_data[2])
self.assertEqual(self.test.trade_summary['aaa']['max_price'], 1)
self.assertEqual(self.test.trade_summary['bbb']['max_price'], 2)
self.test.update_symbol_details(self.test.trade_data[3])
self.assertEqual(self.test.trade_summary['aaa']['max_price'], 3)
self.assertEqual(self.test.trade_summary['bbb']['max_price'], 2)
class LargeInputTest(SecondTestCase):
def test_large_input(self):
# This verifies that the program can handle input files with in the
# order of 1 billion rows. This should be sufficient.
for elem in self.test.trade_data:
self.test.update_symbol_details(elem)
self.assertDictEqual(self.test.trade_summary['aaa'],
{'time_stamp': 1,
'max_gap': 0,
'volume': 10 ^ 9,
'max_price': 1,
'total_price': 10 ^ 9})
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "JMB-McFarlane/SLICE",
"score": 3
} |
#### File: materials/pyfiles/check_file_update.py
```python
import os
def check_file_status(filename):
test = str(os.stat(filename))
# print test.split("st_ctime=")[1].split(")")[0]
return test.split("st_ctime=")[1].split(")")[0]
# print type(test)
check_file_status("/storage/home/jmbm87/SLICE_dev/materials/pyfiles/test_inps/text.txt")
```
#### File: materials/pyfiles/prodock1.py
```python
import os
from operator import itemgetter
import math
import shutil
import timeit
import time
import subprocess
workingdir = str(os.getcwd())
charges = []
def pdb_reader():
for file in os.listdir(workingdir):
lines = []
x = []
y = []
z = []
a = []
b = []
c = []
d = []
e = []
f = []
g = []
h = []
o = []
j = []
if "OUT.pdb" in file:
inp = open(file,'r')
print file
for line in inp:
if "ATOM" in line:
if len(line.split()) >= 6:
# print line
x.append(line.split()[5])
y.append(line.split()[6])
z.append(line.split()[7])
pout = open("MBP_" + file.split(".pdb.")[1] + "host.pdbqt",'wr')
print pout
ref = open('MBP.receptor.pdbqt','r')
i=0
for line in ref:
if "ATOM" in line:
a.append(line.split()[0])
b.append(line.split()[1])
c.append(line.split()[2])
d.append(line.split()[3])
e.append(line.split()[4])
f.append(line.split()[8])
g.append(line.split()[9])
h.append(line.split()[10])
o.append(line.split()[11])
# j.append(line.split()[12])
print len(x)
print len(y)
print len(z)
print len(a)
print len(b)
print len(c)
print len(d)
print len(e)
print len(f)
print len(g)
for i in range(len(a)):
lines.append(a[i]+ ' '+ b[i]+ ' '+ c[i]+ ' '+ d[i]+ ' '+ e[i]+ ' ' + x[i]+ ' '+ y[i]+ ' '+ z[i]+ ' ' + f[i] + ' '+ g[i]+ ' '+ h[i]+ ' '+ o[i])
i = 0
ref.close()
ref = open('MBP.receptor.pdbqt','r')
for line in ref:
if "ATOM" in line:
pout.write('{:>4} {:>6} {:<4} {:>3} {:>5} {:>11} {:>7} {:>7} {:>5} {:>5} {:>9} {:<2} ' .format(*lines[i].split()))
pout.write('\n')
i = i + 1
if "ATOM" not in line:
pout.write(line)
# print len(a)
pdb_reader()
def docker(hostname,ligandname,poses):
config = open(hostname.split("host.")[0] + ".conf", 'w')
#ligandname = raw_input("ligand pdbqt:")
#hostname = raw_input("host pdbqt:")
#poses = raw_input("number of poses:")
config.write('receptor = '+ hostname + '\n')
config.write('ligand = ' + ligandname + '\n' + '\n')
# Box configuration
boxresidues = []
res_x = []
res_y = []
res_z = []
cyx = []
with open('box.in') as file:
for line in file:
boxresidues = str.split(line)
for res in boxresidues:
with open(hostname) as file:
for line in file:
if ' ' + res + ' ' in line:
if 'ATOM' in line:
clean = line.split()
res_x.append(float(clean[5]))
res_y.append(float(clean[6]))
res_z.append(float(clean[7]))
# Disulfide bridge finder
with open(hostname) as file:
for line in file:
if "SG" in line:
cyx.append(line.split())
connect_info = []
for i in range(len(cyx)):
for j in range(len(cyx)):
if j != i:
if math.sqrt((float(cyx[i][6]) - float(cyx[j][6]))**2) <= 4:
connect_info.append('CONECT ' + cyx[i][1] + ' ' + cyx[j][1])
Cx = str((float(max(res_x)) - float(min(res_x)))/2 +float(min(res_x)))
Cy = str((float(max(res_y)) - float(min(res_y)))/2 +float(min(res_y)))
Cz = str((float(max(res_z)) - float(min(res_z)))/2 +float(min(res_z)))
print Cx,Cy,Cz
Sx = str((float(max(res_x)) - float(min(res_x)) +8))
Sy = str((float(max(res_y)) - float(min(res_y)) +8))
Sz = str((float(max(res_z)) - float(min(res_z)) + 8))
print("Box size = ")
print Sx,Sy,Sz
# Configuration file
config.write('center_x = ' + Cx + '\n')
config.write('center_y = ' + Cy + '\n')
config.write('center_z = ' + Cz + '\n' + '\n')
config.write('size_x = ' + Sx + '\n')
config.write('size_y = ' + Sy + '\n')
config.write('size_z = ' + Sz + '\n' + '\n')
config.write('out = ' + hostname +'.out'+ '\n')
config.write('exhaustiveness = 7' + '\n')
config.write('num_modes = ' + poses + '\n')
config.write('cpu = 7')
config.close()
# print ('Submitting to queue...')
#os.popen('/Users/jmbm/Desktop/RA_docking/jdock/vina' + ' --config ' + workingdir + '/conf.txt')
def pbs_script():
scr = open("script.pbs",'r')
newscr = open(hostname.split('host.')[0] + ".pbs", "wr")
for line in scr:
newline = line.replace("CONFIG_FILE",hostname.split('host.')[0] + ".conf")
newscr.write(newline)
arg = str(workingdir+"/"+hostname.split('host.')[0] + ".pbs")
#subprocess.Popen("qsub -l nodes=1:ppn=4,mem=2gb,walltime=30:00:00 " + arg, shell =True).wait()
#p = subprocess.check_output(["qsub", "-l", "nodes=1:ppn=4,mem=2gb,walltime=0:30:00",arg])
#os.popen('/opt/torque-2.5.13/bin/qsub -q prometheus -l nodes=1:ppn=2,mem=2gb,walltime=1:00:00 ' + hostname.split('host.')[0] + ".pbs")
# p.wait()
print(arg)
print(hostname.split('host.')[0] + ".pbs")
pbs_script()
# print ('Writing pose pdbs...')
# reading outfile and creating list of models and lines
# outfile = open(hostname+'.out','r')
# lig_poses = [[]]
# i = 0
""" for line in outfile:
if "ATOM" in line:
lig_poses[i].append(line.split())
if "HETATM" in line:
lig_poses[i].append(line.split())
if "ENDMDL" in line:
lig_poses.append([])
i = i + 1
lig_poses.pop()
# generating poses for tleap
for n in range(len(lig_poses)):
rec_x_coords = []
num = str(n)
pdb = open('pose'+ num + "_" + hostname, 'w')
rec = open(hostname)
for line in rec:
if "ATOM" in line:
clean = line.split()
rec_x_coords.append(clean[5])
if len(rec_x_coords) > 4:
if (float(rec_x_coords[-1]) - float(rec_x_coords[-2])) >= 15:
pdb.write("TER \n")
if ("HG CYX") not in line:
pdb.write(line)
pdb.write("TER \n")
rec.close()
lig_poses[n].sort(key=lambda x: int(x[4])) # Rearrange line order for residue number
# Writing to pdb format
for i in range(len(lig_poses[n])):
pdb.write('%-6s' % str(lig_poses[n][i][0]))
pdb.write('%5s' % str(lig_poses[n][i][1]))
if str(lig_poses[n][i][2][0]).isalpha() == True:
pdb.write(' ' + '%-4s' % str(lig_poses[n][i][2]))
if str(lig_poses[n][i][2][0]).isalpha() == False:
pdb.write(' ' + '%-5s' % str(lig_poses[n][i][2]))
pdb.write('%3s' % str(lig_poses[n][i][3]))
pdb.write('%6s' % str(lig_poses[n][i][4]))
pdb.write('%12s' % str(lig_poses[n][i][5]))
pdb.write('%8s' % str(lig_poses[n][i][6]))
pdb.write('%8s' % str(lig_poses[n][i][7]))
pdb.write('%6s' % str(lig_poses[n][i][8]))
pdb.write('%6s' % str(lig_poses[n][i][9]))
pdb.write('%10s' % str(lig_poses[n][i][10]) + ' ')
pdb.write('%-3s' % str(lig_poses[n][i][11]))
pdb.write('\n')
for i in range(len(connect_info)):
pdb.write(connect_info[i] + '\n')
"""
# Building tleap script
"""
def leapscript_gen():
leap_script = open(hostname + '_' + ligandname + '_build.scr','w')
for n in range(len(lig_poses)):
filename = hostname + '_' + ligandname + '_pose' + str(n)
leap_script.write(filename + ' = loadpdb ' + 'pose' + str(n) + hostname +'\n')
leap_script.write('addions ' + filename + ' Na+ 0' +'\n')
leap_script.write('addions ' + filename + ' Cl- 0' +'\n')
leap_script.write('solvatebox ' + filename + ' TIP3PBOX 11' +'\n')
leap_script.write('saveamberparm ' + filename + ' ' + filename + '.top ' + filename +'.crd \n\n' )
# leapscript_gen()
end_time = timeit.timeit()
print('Total run time: ' + str(end_time -start_time) + ' seconds')
"""
#ligandname = raw_input("ligand pdbqt:")
for file in os.listdir(workingdir):
if "ligand.pdbqt" in file:
ligandname = str(file)
for file in os.listdir(workingdir):
if "host.pdbqt" in file:
start_time = timeit.timeit()
print(file)
docker(file,ligandname,"10")
"""
def script_writer():
script = open("leap_script.scr","wr")
script.write('source build.scr \n\n')
for file in os.listdir(workingdir):
if "pose" in file:
filename = file.split('.pdbqt')[0]
print filename
script.write(filename + ' = loadpdb ' + file + '\n')
script.write('addions ' + filename + ' Na+ 0' +'\n')
script.write('addions ' + filename + ' Cl- 0' +'\n')
script.write('solvatebox ' + filename + ' TIP3PBOX 14' +'\n')
script.write('saveamberparm ' + filename + ' ' + filename + '.top ' + filename +'.crd \n\n' )
#script_writer()
"""
``` |
{
"source": "jmb/NightLightPi",
"score": 3
} |
#### File: NightLightPi/tests/test_config.py
```python
from unittest import TestCase
from unittest.mock import patch
from nightlightpi import errorstrings
from nightlightpi.config import load_config
from nightlightpi.config import ENVCONFIGPATH
from nightlightpi.config import ETCPATH
class LoadConfigTestCase(TestCase):
@patch("nightlightpi.config.load_valid_yaml")
def test_loads_from_env_var_if_set(self, mock_load_yaml):
mock_load_yaml.return_value = self.test_config
with patch.dict("os.environ", {ENVCONFIGPATH: "some.yaml"}):
conf = load_config()
mock_load_yaml.assert_called_once_with("some.yaml")
@patch("nightlightpi.config.load_valid_yaml")
def test_falls_back_to_ETCPATH_when_env_var_not_set(self, mock_load_yaml):
mock_load_yaml.return_value = self.test_config
with patch.dict("os.environ", {}):
conf = load_config()
mock_load_yaml.assert_called_once_with(ETCPATH)
def setUp(self):
self.test_config = {'display_modes': [{'background': None,
'menu': 'images/menu_off.ppm',
'name': 'Off'},
{'background': 'images/temperature.ppm',
'menu': 'images/menu_temperature.ppm',
'name': 'Temperature'},
{'background': None,
'menu': 'images/menu_rainbow.ppm',
'name': 'Rainbow'}],
'inputs': {'buttons_display': 24, 'buttons_light': 23},
'led_strip': {'brightness': 6,
'length': 10,
'light': 10,
'max_brightness': 30},
'mqtt': {'brightness_topic': 'nightlight/brightness',
'display_topic': 'nightlight/display',
'enable': True,
'humidity_topic': 'nightlight/humidity',
'light_topic': 'nightlight/light',
'password': 'PASSWORD',
'port': 8883,
'server': 'SERVER',
'temperature_topic': 'nightlight/temperature',
'user': 'USERNAME'},
'temperature': {'sensor_colours': [{'b': 255, 'g': 0, 'r': 20},
{'b': 10, 'g': 200, 'r': 255},
{'b': 0, 'g': 128, 'r': 255},
{'b': 0, 'g': 0, 'r': 255}],
'sensor_ranges': [16, 20, 23.9]},
'timing': {'menu_button_pressed_time_in_seconds': 0,
'menu_display': 0,
'speed_in_seconds': 1}}
``` |
{
"source": "jmbo1190/python-click-cli-cookbook",
"score": 4
} |
#### File: jmbo1190/python-click-cli-cookbook/app.py
```python
import click
def change(amount):
# calculate the resultant change and store the result (res)
res = []
coins = [1, 5, 10, 25] # value of pennies, nickels, dimes, quarters
coin_lookup = {25: "quarters", 10: "dimes", 5: "nickels", 1: "pennies"}
# divide the amount*100 (the amount in cents) by a coin value
# record the number of coins that evenly divide and the remainder
coin = coins.pop()
num, rem = divmod(int(amount * 100), coin)
# append the coin type and number of coins that had no remainder
res.append({num: coin_lookup[coin]})
# while there is still some remainder, continue adding coins to the result
while rem > 0:
coin = coins.pop()
num, rem = divmod(rem, coin)
if num:
if coin in coin_lookup:
res.append({num: coin_lookup[coin]})
return res
@click.command()
@click.option(
"--amount",
prompt="Amount: ",
help="Creates change for dollar and cents value: i.e. 1.34",
)
def make_change(amount):
"""Gives Correct Change"""
result = change(float(amount))
# click.echo(click.style(f"Change for {amount}:", fg="red"))
click.echo("Change for " + click.style(amount, fg="red") + ":")
for correct_change in result:
for num, coin in correct_change.items():
click.echo(f"{coin}: " + click.style(f"{num}", bold=True, fg="green"))
if __name__ == "__main__":
# pylint: disable=no-value-for-parameter
make_change()
``` |
{
"source": "jmbo1190/serverless-cookbook",
"score": 2
} |
#### File: jmbo1190/serverless-cookbook/test_invoke.py
```python
from invoke import cli
from click.testing import CliRunner
def test_app():
runner = CliRunner()
result = runner.invoke(cli, ["--version"])
assert result.exit_code == 0
assert "1.0" in result.output
``` |
{
"source": "jmboettcher/fall2019_sentiment_in_alternative_words",
"score": 3
} |
#### File: jmboettcher/fall2019_sentiment_in_alternative_words/wordSenseByContext.py
```python
from collections import defaultdict
from nltk.tokenize import sent_tokenize
from nltk.corpus import wordnet as wn
from nltk.corpus import semcor as sc
from nltk.corpus import stopwords
import mywordtokenizer
class SenseContextWordDict:
def __init__(self):
self.dictionary = self._create_dictionary()
def _create_dictionary(self):
dictionary = defaultdict(lambda: defaultdict(int))
myStopWords = stopwords.words('english')
for sentence in sc.tagged_sents(tag='sem'):
plainWordSent = []
taggedWordSent = []
self._make_word_lists(plainWordSent, taggedWordSent, sentence)
for taggedItemTuple in taggedWordSent:
self._update_tagged_item_entry(myStopWords, dictionary, plainWordSent, taggedItemTuple[0],taggedItemTuple[1])
return dictionary
def _make_word_lists(self, plainWordSent, taggedWordSent, sentence):
for i in range(0,len(sentence)):
item = sentence[i]
if(type(item)) == list:
plainWordSent.append(item[0])
else:
if type(item.label()) == str:
plainWordSent.append(item.leaves()[0])
else:
plainWordSent.append(item.label().name())
taggedWordSent.append([item, i])
def _update_tagged_item_entry(self, myStopWords,dictionary,plainWordSent,taggedItem,taggedItemPosition):
for j in range(0,len(plainWordSent)):
word = plainWordSent[j]
if taggedItem.label().name() != word:
taggedSynset = taggedItem.label().synset()
splitUp = word.split("_")
for thisword in splitUp:
wordTokened = mywordtokenizer.simple(thisword)
if len(wordTokened) > 0:
word = wordTokened[0]
if word not in myStopWords:
dictionary[taggedSynset][word]+=1
dictionary[taggedSynset][".total."]+=1
dictionary[taggedSynset][".totalNoStops."]+=1
elif abs(j - taggedItemPosition) == 1:
dictionary[taggedSynset][word]+=1
dictionary[taggedSynset][".total."]+=1
def getMostLikelySynset(self, word, sentence):
"""Find the set of a word's synonyms.
Parameters
----------
word : str
The string representing a given word.
Returns
-------
a set pf the given word's synonyms.
"""
myStopWords = stopwords.words('english')
highestCoverageSyn = self._synset_search(".totalNoStops.", myStopWords, word, sentence)
if highestCoverageSyn is None:
highestCoverageSyn = self._synset_search(".total.", [], word, sentence)
return highestCoverageSyn
def _synset_search(self, totalToUse, exclusionSet, word, sentence):
"""Find the set of a word's synonyms.
Parameters
----------
word : str
The string representing a given word.
Returns
-------
a set pf the given word's synonyms.
"""
myMap = self.dictionary
highestCoverage = 0
highestCoverageSyn = None
for syn in wn.synsets(word):
totalContextWordMatches = 0
totalSet = myMap[syn][totalToUse]
if totalSet > 0:
for contextWord in sentence:
if contextWord != word and contextWord not in exclusionSet:
totalContextWordMatches += myMap[syn][contextWord]
coverage = totalContextWordMatches / totalSet
if coverage > highestCoverage:
highestCoverage = coverage
highestCoverageSyn = syn
return highestCoverageSyn
def listAlternatives(self, word, sentence):
synonyms = set([])
mostLikelySynset = self.getMostLikelySynset(word, sentence)
if not mostLikelySynset is None:
for synonym in mostLikelySynset.lemmas():
synonyms.add(synonym.name())
return synonyms
def mostFrequentAlternative(self, word, sentence):
mostLikelySynset = self.getMostLikelySynset(word, sentence)
highestCount = 0
mostFrequentAlternative = None
if not mostLikelySynset is None:
for synonym in mostLikelySynset.lemmas():
count = synonym.count()
if count > highestCount:
mostFrequentAlternative = synonym.name()
highestCount = count
return mostFrequentAlternative
"""===================================================================
Place all function calls below the following conditional so that they
are called only if this module is called with
`python ling278_assign02.py`
No functions should execute if it is instead imported with
import ling278_assign02
in the interactive shell.
"""
if __name__ == '__main__':
pass
``` |
{
"source": "jmbohan/python",
"score": 3
} |
#### File: python/assignSix/HW_6.py
```python
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
grades = {
90: "A",
80: "B",
70: "C",
60: "D",
0: "F",
}
def grade_mapping(value):
for key, letter in grades.items():
if value >= key:
return letter
df = pd.read_csv('Projects.csv')
empty_frame = np.where(pd.isnull(df))
empty_frame = [df.iloc[i, j] for i, j in zip(*np.where(pd.isnull(df)))]
# drops rows that have a null value
# dropped_nan = df.dropna(axis='index', how='any')
dropped_nan = df.fillna(0)
print('\nProject one mean: {}'.format(np.mean(dropped_nan['project_one'])))
print('Project one Standard Deviation: {}'.format(np.std(dropped_nan['project_one'])))
print('\nProject two mean: {}'.format(np.mean(dropped_nan['project_two'])))
print('Project two Standard Deviation: {}'.format(np.std(dropped_nan['project_two'])))
print('\nProject three mean: {}'.format(np.mean(dropped_nan['project_three'])))
print('Project three Standard Deviation: {}'.format(np.std(dropped_nan['project_three'])))
letter_grades_one = dropped_nan['project_one'].map(grade_mapping)
letter_grades_two = dropped_nan['project_two'].map(grade_mapping)
letter_grades_three = dropped_nan['project_three'].map(grade_mapping)
dropped_nan['letter_grade_one'] = pd.Categorical(letter_grades_one, categories=grades.values(), )
dropped_nan['letter_grade_two'] = pd.Categorical(letter_grades_two, categories=grades.values(), )
dropped_nan['letter_grade_three'] = pd.Categorical(letter_grades_three, categories=grades.values(), )
# one_letter = dropped_nan.pop('letter_grade')
df = pd.DataFrame(dropped_nan,
columns=['first_name', 'last_name', 'age', 'project_one', 'letter_grade_one', 'project_two',
'letter_grade_two', 'project_three', 'letter_grade_three'])
df.to_csv('LetterGrades.csv')
project_grades = df.loc[:, ['letter_grade_one', 'letter_grade_two', 'letter_grade_three']]
first_test = pd.Series(project_grades['letter_grade_one'])
second_test = pd.Series(project_grades['letter_grade_two'])
third_test = pd.Series(project_grades['letter_grade_three'])
df = pd.concat([first_test, second_test, third_test], axis=1)
# rename
df = project_grades.reset_index().melt(id_vars=['index'])
sns.catplot(
x='value',
hue='variable',
data=df,
kind='count',
order=['A', 'B', 'C', 'D', 'F'],
palette=['blue', 'red', 'green'],
legend=False
)
plt.xlabel('Grades')
plt.ylabel('Counts')
plt.title('Test Grades')
plt.legend(['Test One', 'Test Two', 'Test Three'])
plt.show()
age_plot = pd.DataFrame(dropped_nan, columns=['age', 'project_one', 'project_two', 'project_three'])
# age_plot = age_plot.reset_index().melt(id_vars=['age'])
# age_plot = age_plot.groupby(['age'])
# age_plot = age_plot.unstack()
age_plot = age_plot.set_index(['age'])
age_plot = age_plot.sort_values(by=['age'])
# age_plot = age_plot.pivot_table(index=['age'])
# age_plot = age_plot
age_plot = age_plot.reset_index().melt(id_vars='age')
sns.scatterplot(
data=age_plot,
y='value',
x='age',
hue='variable',
legend=True
)
plt.show()
print(age_plot)
print(first_test.value_counts())
```
#### File: python/inClass/week10(MatPlotLIb).py
```python
'''
import matplotlib.pyplot as plt
days = [5,10,15,20]
celcius_values = [24, 16, 16, 23]
plt.plot(days, celcius_values, "ob")
plt.xlabel('Day')
plt.ylabel('Degrees Celsius')
plt.show()
plt.savefig('plot3.png')
'''
'''
import matplotlib.pyplot as plt
days = list(range(1,9))
celsius_min = [19.6, 24.1, 26.7, 28.3, 27.5, 30.5, 32.8, 33.1]
celsius_max = [24.8, 28.9, 31.3, 33.0, 34.9, 35.6, 38.4, 39.2]
plt.xlabel('Day')
plt.ylabel('Degrees Celsius')
plt.plot(days, celsius_min,
days, celsius_min, "oy",
days, celsius_max,
days, celsius_max, "or")
print("The current limits for the axes are:")
print(plt.axis())
print("We set the axes to the following values:")
xmin, xmax, ymin, ymax = 0, 10, 14, 45
print(xmin, xmax, ymin, ymax)
plt.axis([xmin, xmax, ymin, ymax])
plt.show()
plt.savefig('maxAndMin.png')
'''
'''
import numpy as np
import matplotlib.pyplot as plt
X = np.linspace(-2 * np.pi, 2 * np.pi, 50, endpoint=True)
F1 = 3 * np.sin(X)
F2 = np.sin(2*X)
F3 = 0.3 * np.sin(X)
startx, endx = -2 * np.pi - 0.1, 2*np.pi + 0.1
starty, endy = -3.1, 3.1
plt.axis([startx, endx, starty, endy])
plt.plot(X,F1)
plt.plot(X,F2)
plt.plot(X,F3)
plt.plot(X, F1, 'ro')
plt.plot(X, F2, 'bx')
plt.show()
plt.savefig("plot5.png")
'''
'''
import numpy as np
import matplotlib.pyplot as plt
X = np.linspace(0, 2 * np.pi, 50, endpoint=True)
F1 = 3 * np.sin(X)
F2 = np.sin(2*X)
F3 = 0.3 * np.sin(X)
F4 = np.cos(X)
plt.plot(X, F1, color="blue", linewidth=2.5, linestyle="-")
plt.fill_between(X,0,F1, color='blue', alpha=.1)
plt.plot(X, F2, color="red", linewidth=1.5, linestyle="--")
plt.plot(X, F3, color="green", linewidth=2, linestyle=":")
plt.plot(X, F4, color="grey", linewidth=2, linestyle="-.")
plt.show()
plt.savefig('plot6.png')
'''
# subplots
'''
import numpy as np
import matplotlib.pyplot as plt
for i in range(1,7):
plt.subplot(2,3,i)
plt.text(0.5, 0.5, str((2, 3, i)),
fontsize =18, ha= 'center')
plt.show()
plt.savefig('plot7.png')
'''
'''
import numpy as np
import matplotlib.pyplot as plt
numerical_data = np.array([1,2,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,5,4,6,7,8])
plt.hist(numerical_data)
plt.title("Numbergram")
plt.xlabel("Value")
plt.ylabel("Frequency")
plt.show()
plt.savefig('plot8.png')
'''
'''
import numpy as np
import matplotlib.pyplot as plt
num_data_2 = np.array([-1,-5,-2,-10,-11 ,0,0,0,0,0, 1,2,3,4,5,6,7,8,9])
plt.hist(num_data_2, bins=100,color="#875F9A")
plt.title("Number Bins")
plt.xlabel("Value")
plt.ylabel("Frequency")
plt.show()
plt.savefig('plot9.png')
'''
'''
import matplotlib.pyplot as plt
import numpy as np
mu, sigma = 0, 0.1 # mean and standard deviation
gaussian_numbers = np.random.normal(mu, sigma, 10000)
plt.hist(gaussian_numbers, bins=100)
plt.title("Gaussian Histogram")
plt.xlabel("Value")
plt.ylabel("Frequency")
plt.show()
plt.savefig("plot10.png")
'''
'''
import matplotlib.pyplot as plt
fig =plt.figure(figsize=(6,4))
fig.subplots_adjust(bottom=0.025, left=0.025, top = 0.975, right=0.975)
X = [ (3,3,(1,3)), (3,3,(4,5)), (3,3,(6,9)), (3,3,7),(3,3,8) ]
for nrows, ncols, plot_number in X:
sub = plt.subplot(nrows, ncols, plot_number)
sub.set_xticks([])
sub.set_yticks([])
plt.show()
plt.savefig('plot11.png')
'''
'''
import matplotlib.pyplot as plt
import numpy as np
X = np.linspace(-2 * np.pi, 2 * np.pi, 50, endpoint=True)
mylist1 = 3 * np.sin(X)
mylist2 = np.sin(2*X)
mylist3 = 0.3 * np.sin(X)
mylist4 = 3 * np.cos(X)
mylist5 = np.tan(2*X)
mylist6 = 0.3 * np.cos(X)
mylist=[mylist1, mylist2,mylist3,mylist4, mylist5,mylist6]
mycol=['r','b','y','k','c','m']
fig = plt.figure(figsize=(6, 4))
fig.subplots_adjust(bottom=0.025, left=0.025, top=0.975, right=0.975)
Y = [(3, 3, (1, 3)), (3, 3, (4, 5)), (3, 3, (6, 9)), (3, 3, 7), (3, 3, 8)]
i=0
for nrows, ncols, plot_number in Y:
sub = plt.subplot(nrows, ncols, plot_number)
sub.set_xticks([])
sub.set_yticks([])
plt.plot(mylist[i],mycol[i])
i+=1
plt.show()
plt.savefig('plot12.png')
'''
'''
names = ['group_a', 'group_b', 'group_c']
values = [1, 10, 100]
import matplotlib.pyplot as plt
import numpy as np
plt.figure(figsize=(9, 3))
plt.subplot(131)
plt.bar(names, values)
plt.subplot(132)
plt.scatter(names, values)
plt.subplot(133)
plt.plot(names, values)
plt.suptitle('Categorical Plotting')
plt.show()
plt.savefig('plot13.png')
'''
'''
import matplotlib.pyplot as plt
import numpy as np
def f(t):
return np.exp(-t) * np.cos(2*np.pi*t)
t1 = np.arange(0.0, 5.0, 0.1)
t2 = np.arange(0.0, 5.0, 0.02)
plt.figure() #optional
plt.subplot(211)
plt.plot(t1, f(t1), 'bo', t2, f(t2), 'k')
plt.subplot(212)
plt.plot(t2, np.cos(2*np.pi*t2), 'r--')
plt.show()
plt.savefig('plot14.png')
'''
'''
import matplotlib.pyplot as plt
import numpy as np
labels = ['G1', 'G2', 'G3', 'G4', 'G5']
USA_means = [20, 34, 30, 35, 27]
Russia_means = [25, 32, 34, 20, 25]
China_means = [25, 32, 34, 20, 25]
x = np.arange(len(labels)) # the label locations
width = 0.25 # the width of the bars
fig , ax = plt.subplots()
rects1 = ax.bar(x - width, USA_means, width, label='USA', color="blue",edgecolor="black")
rects2 = ax.bar(x , Russia_means, width, label='Russia',color="green",edgecolor="black")
rects2 = ax.bar(x + width, China_means, width, label='China',color="red",edgecolor="black")
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('Scores')
ax.set_title('Scores by Countries')
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.legend()
plt.show()
plt.savefig('plot15.png')
# '''
import matplotlib.pyplot as plt
import numpy as np
last_week_cups = (20, 35, 30, 35, 27)
this_week_cups = (25, 32, 34, 20, 25)
names = ['Mary', 'Paul', 'Billy', 'Franka', 'Stephan']
fig = plt.figure()
left, bottom, width, height = 0.1, 0.3, 0.8, 0.6
ax = fig.add_axes([left, bottom, width, height])
width = 0.35
ticks = np.arange(len(names))
ax.bar(ticks, last_week_cups, width, label='Last week')
ax.bar(ticks + width, this_week_cups, width, label='This week')
ax.set_ylabel('Cups of Coffee')
ax.set_title('Coffee Consummation')
ax.set_xticks(ticks + width/2)
ax.set_xticklabels(names)
ax.legend(loc='best')
plt.show()
plt.savefig('plot17.png')
```
#### File: python/mid/mid_3.py
```python
def palindrome(str):
end = len(str)
middle = end >> 1
for i in range(middle):
end -= 1
if(str[i] != str[end]):
return False
return True
while True:
word = input('Enter word: ')
if word == 'done' : break
palindrome(word)
if palindrome(word) == True:
print('Palindrome')
else:
print('No Palindrome')
```
#### File: python/practice/boxplot.py
```python
import matplotlib.pylab as pyp
import seaborn as sns
def custom_legend(colors,labels, legend_location = 'upper left', legend_boundary = (1,1)):
# Create custom legend for colors
recs = []
for i in range(0,len(colors)):
recs.append(mpatches.Rectangle((0,0),1,1,fc=colors[i]))
pyp.legend(recs,labels,loc=legend_location, bbox_to_anchor=legend_boundary)
# Color boxplots by organ
organ_list = sorted(df_unique(grouped_samples,'type'))
colors = sns.color_palette("Paired", len(organ_list))
color_dict = dict(zip(organ_list, colors))
organ_palette = grouped_samples.drop_duplicates('id')['type'].map(color_dict)
# Plot grouped boxplot
g = sns.factorplot("id","num_mutations",data=grouped_samples, order=id_list, kind="box", size=7, aspect=3, palette=organ_palette)
sns.despine(left=True)
plot_setup_pre()
pyp.yscale('log')
custom_legend(colors,organ_list)
```
#### File: python/projectThree/PR_3.py
```python
import pandas as pd
import seaborn as sns
import numpy as np
from matplotlib import pyplot as plt
from sklearn import preprocessing
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay, accuracy_score, classification_report
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
# read data
df = pd.read_csv('mushrooms.csv')
# Label encoder for training data
def label_encode_fit(data, columns):
result = data.copy()
encoders = {}
for column in columns:
encoder = preprocessing.LabelEncoder()
result[column] = encoder.fit_transform(result[column])
encoders[column] = encoder
return result, encoders
# normalize data
df_label, encoders1 = label_encode_fit(df, df.columns)
plt.figure(figsize=(15, 18))
# Heatmap to visualize correlation
sns.heatmap(df_label.corr(), fmt='.2f', annot=True, cmap="hot")
# save plot
plt.savefig('shroom_heatmap.png')
plt.show()
# Drop Target column
X = df_label.drop(['bruises'], axis=1)
# Target Column for training data
y = df_label['bruises']
X_train, X_test, y_train, y_test = train_test_split(X, y)
# initialize scaler
scaler = StandardScaler()
#
# # Fit only to the training data
scaler.fit(X_train)
StandardScaler(copy=True, with_mean=True, with_std=True)
# # Now apply the transformations to the data:
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
# Neural Network
mlp = MLPClassifier(hidden_layer_sizes=(16, 16), max_iter=2000)
mlp.fit(X_train, y_train)
predictions = mlp.predict(X_test)
y_test_results = np.array(y_test)
# confusion matrix to a numpy array
cm = np.array(confusion_matrix(y_true=y_test, y_pred=predictions))
# numpy array to pandas Dataframe
confusion = pd.DataFrame(cm, index=['poisonous', 'not_poisonous'],
columns=['predicted_poisonous', 'predicted_not_poisonous'])
disp = ConfusionMatrixDisplay.from_predictions(y_test_results, predictions, )
# Print Accuracy on top on confusion matrix
disp.ax_.set_title('Accuracy Score: {}'.format(accuracy_score(y_test, predictions)))
# plt.savefig('NNconfusionMatrix.png')
# plt.show()
print('Mushroom NN predicted poisonous: \n\n', classification_report(y_test, predictions, ), '\n', confusion)
# print(len(mlp.coefs_))
# print(len(mlp.coefs_[0]))
# print(len(mlp.intercepts_[0]))
##SVM
# read data
df = pd.read_csv('mushrooms.csv')
df_label, encoders1 = label_encode_fit(df, df.columns)
# pairplot to view data
# sns.pairplot(df_label, hue='class', vars=['cap-shape', 'bruises', 'gill-size', 'odor', 'gill-color'])
plt.savefig('shroom_pairplot.png')
plt.show()
# Drop Target
X = df_label.drop(['bruises'], axis=1)
# Target for training
y = df_label['bruises']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=20)
# Use svc because we are using only two features
svc_model = SVC()
svc_model.fit(X_train, y_train)
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0, decision_function_shape='ovr', degree=3, gamma='auto',
kernel='rbf', max_iter=-1, probability=False, random_state=None, shrinking=True, tol=0.001, verbose=False)
y_predict = svc_model.predict(X_test)
# confusion matrix to numpy array
cm = np.array(confusion_matrix(y_test, y_predict, labels=[1, 0]))
# numpy array to pandas Dataframe
confusion = pd.DataFrame(cm, index=['poisonous', 'not_poisonous'],
columns=['predicted_poisonous', 'predicted_not_poisonous'])
print('Mushroom poisonous SVM prediction: \n\n', confusion, '\n')
print(classification_report(y_test, y_predict))
fig, ax = plt.subplots(figsize=(10, 7))
# Barchart
sns.countplot(data=df, x='bruises', hue='class', palette='Purples')
# save plot
plt.savefig('shroom_bar.png')
plt.show()
``` |
{
"source": "jmborr/ipdflex",
"score": 2
} |
#### File: ipdflex/idpflex/cluster.py
```python
import sys
import numpy as np
import pickle
import scipy
from scipy.spatial.distance import squareform
from scipy.stats import zscore
from scipy.cluster import hierarchy
from tqdm import tqdm
from collections import namedtuple
from idpflex.distances import (rmsd_matrix, extract_coordinates)
from idpflex.cnextend import Tree
from idpflex.properties import ScalarProperty, propagator_size_weighted_sum
class ClusterTrove(namedtuple('ClusterTrove', 'idx rmsd tree')):
r"""A namedtuple with a `keys()` method for easy access of
fields, which are described below under header `Parameters`
Parameters
----------
idx : :class:`list`
Frame indexes for the representative structures (indexes start at zero)
rmsd : :class:`~numpy:numpy.ndarray`
distance matrix between representative structures.
tree : :class:`~idpflex.cnextend.Tree`
Clustering of representative structures. Leaf nodes associated with
each centroid contain property `iframe`, which is the frame index
in the trajectory pointing to the atomic structure corresponding to
the centroid.
"""
def keys(self):
r"""Return the list of field names"""
return self._fields
def save(self, filename):
r"""Serialize the cluster trove and save to file
Parameters
----------
filename: str
File name
"""
with open(filename, 'wb') as outfile:
pickle.dump(self, outfile)
def trajectory_centroids(a_universe, selection='not name H*',
segment_length=1000, n_representatives=1000):
r"""Cluster a set of consecutive trajectory segments into a set
of representative structures via structural similarity (RMSD)
The simulated trajectory is divided into consecutive segments, and
hierarchical clustering is performed on each segment to yield a
limited number of representative structures (centroids) per segment.
Parameters
----------
a_universe : :class:`~MDAnalysis.core.universe.Universe`
Topology and trajectory.
selection : str
atoms for which to calculate RMSD. See the
`selections page <https://www.mdanalysis.org/docs/documentation_pages/selections.html>`_
for atom selection syntax.
segment_length: int
divide trajectory into segments of this length
n_representatives : int
Desired total number of representative structures. The final number
may be close but not equal to the desired number.
Returns
-------
rep_ifr : list
Frame indexes of representative structures (centroids)
""" # noqa: E501
group = a_universe.select_atoms(selection)
# Fragmentation of the trajectory
n_frame = len(a_universe.trajectory)
n_segments = int(n_frame / segment_length)
nc = max(1, int(n_representatives / n_segments)) # clusters per segment
rep_ifr = list() # frame indexes of representative structures
info = """Clustering the trajectory:
Creating {} representatives by partitioning {} frames into {} segments
and retrieving {} representatives from each segment.
""".format(nc * n_segments, n_frame, n_segments, nc)
sys.stdout.write(info)
sys.stdout.flush()
# Hierarchical clustering on each trajectory fragment
for i_segment in tqdm(range(n_segments)):
indexes = range(i_segment * segment_length,
(i_segment + 1) * segment_length)
xyz = extract_coordinates(a_universe, group, indexes)
rmsd = rmsd_matrix(xyz, condensed=True)
z = hierarchy.linkage(rmsd, method='complete')
for node in Tree(z=z).nodes_at_depth(nc-1):
# Find the frame of each representative structure
i_frame = i_segment * segment_length + node.representative(rmsd).id
rep_ifr.append(i_frame)
rep_ifr.sort()
return rep_ifr
def cluster_with_properties(a_universe, pcls, p_names=None,
selection='not name H*', segment_length=1000,
n_representatives=1000):
r"""Cluster a set of representative structures by structural similarity
(RMSD) and by a set of properties
The simulated trajectory is divided into segments, and hierarchical
clustering is performed on each segment to yield a limited number of
representative structures (the centroids). Properties are calculated
for each centroid, thus each centroid is described by a property
vector. The dimensionality of the vector is related to the number of
properties and the dimensionality of each property.
The distances between any two centroids is calculated as the
Euclidean distance between their respective vector properties.
The distance matrix containing distances between all possible
centroid pairs is employed as the similarity measure to generate
the hierarchical tree of centroids.
The properties calculated for the centroids are stored in the
leaf nodes of the hierarchical tree. Properties are then propagated
up to the tree's root node.
Parameters
----------
a_universe : :class:`~MDAnalysis.core.universe.Universe`
Topology and trajectory.
pcls : list
Property classes, such as :class:`~idpflex.properties.Asphericity`
of :class:`~idpflex.properties.SaSa`
p_names : list
Property names. If None, then default property names are used
selection : str
atoms for which to calculate RMSD. See the
`selections page <https://www.mdanalysis.org/docs/documentation_pages/selections.html>`_
for atom selection syntax.
segment_length: int
divide trajectory into segments of this length
n_representatives : int
Desired total number of representative structures. The final number
may be close but not equal to the desired number.
Returns
-------
:class:`~idpflex.cluster.ClusterTrove`
Hierarchical clustering tree of the centroids
""" # noqa: E501
rep_ifr = trajectory_centroids(a_universe, selection=selection,
segment_length=segment_length,
n_representatives=n_representatives)
n_centroids = len(rep_ifr) # can be different than n_representatives
# Create names if not passed
if p_names is None:
p_names = [Property.default_name for Property in pcls]
# Calculate properties for each centroid
l_prop = list()
for p_name, Pcl in zip(p_names, pcls):
l_prop.append([Pcl(name=p_name).from_universe(a_universe, index=i)
for i in tqdm(rep_ifr)])
# Calculate distances between pair of centroids
xyz = np.zeros((len(pcls), n_centroids))
for i_prop, prop in enumerate(l_prop):
xyz[i_prop] = [p.y for p in prop]
# zero mean and unity variance for each property
xyz = np.transpose(zscore(xyz, axis=1))
distance_matrix = squareform(scipy.spatial.distance_matrix(xyz, xyz))
# Cluster the representative structures
tree = Tree(z=hierarchy.linkage(distance_matrix, method='complete'))
for i_leaf, leaf in enumerate(tree.leafs):
prop = ScalarProperty(name='iframe', y=rep_ifr[i_leaf])
leaf[prop.name] = prop
# Propagate the properties up the tree
[propagator_size_weighted_sum(prop, tree) for prop in l_prop]
return ClusterTrove(rep_ifr, distance_matrix, tree)
def cluster_trajectory(a_universe, selection='not name H*',
segment_length=1000, n_representatives=1000):
r"""Cluster a set of representative structures by structural similarity
(RMSD)
The simulated trajectory is divided into segments, and hierarchical
clustering is performed on each segment to yield a limited number of
representative structures. These are then clustered into the final
hierachical tree.
Parameters
----------
a_universe : :class:`~MDAnalysis.core.universe.Universe`
Topology and trajectory.
selection : str
atoms for which to calculate RMSD. See the
`selections page <https://www.mdanalysis.org/docs/documentation_pages/selections.html>`_
for atom selection syntax.
segment_length: int
divide trajectory into segments of this length
n_representatives : int
Desired total number of representative structures. The final number
may be close but not equal to the desired number.
distance_matrix: :class:`~numpy:numpy.ndarray`
Returns
-------
:class:`~idpflex.cluster.ClusterTrove`
clustering results for the representatives
""" # noqa: E501
rep_ifr = trajectory_centroids(a_universe, selection=selection,
segment_length=segment_length,
n_representatives=n_representatives)
group = a_universe.select_atoms(selection)
xyz = extract_coordinates(a_universe, group, rep_ifr)
distance_matrix = rmsd_matrix(xyz, condensed=True)
# Cluster the representative structures
tree = Tree(z=hierarchy.linkage(distance_matrix, method='complete'))
for i_leaf, leaf in enumerate(tree.leafs):
prop = ScalarProperty(name='iframe', y=rep_ifr[i_leaf])
leaf[prop.name] = prop
return ClusterTrove(rep_ifr, distance_matrix, tree)
def load_cluster_trove(filename):
r"""Load a previously saved
:class:`~idpflex.cluster.ClusterTrove` instance
Parameters
----------
filename: str
File name containing the serialized
:class:`~idpflex.cluster.ClusterTrove`
Returns
-------
:class:`~idpflex.cluster.ClusterTrove`
Cluster trove instance stored in file
"""
with open(filename, 'rb') as infile:
t = pickle.load(infile)
return t
```
#### File: ipdflex/idpflex/properties.py
```python
import os
import subprocess
import tempfile
import fnmatch
import functools
import numpy as np
import numbers
from collections import OrderedDict
import matplotlib as mpl
from matplotlib import pyplot as plt
from matplotlib.ticker import FuncFormatter, MaxNLocator, AutoMinorLocator
from matplotlib.colors import ListedColormap
import MDAnalysis as mda
import mdtraj
from MDAnalysis.analysis.distances import contact_matrix
from idpflex import utils as iutl
class PropertyDict(object):
r"""
A container of properties mimicking some of the behavior of
a standard python dictionary, plus methods
representing features of the properties when taken as a group.
Parameters
----------
properties: list
A list of properties to include
"""
def __init__(self, properties=None):
self._properties = dict()
if properties is not None:
self._properties.update({p.name: p for p in properties})
def __iter__(self):
return iter(self._properties.keys())
def __getitem__(self, name):
r"""
Fetch a property from `_properties` dictionary.
Parameters
----------
name: str
name of the property
Returns
-------
property object, or `None` if no property is found with *name*
"""
self._properties.get(name, None)
def __setitem__(self, name, value):
r"""
Inclue a property in the `_properties` dictionary.
Parameters
----------
name: str
name of the property
value: Property
"""
self._properties[name] = value
def get(self, name, default=None):
r"""
Mimic get method of a dictionary
Parameters
----------
name: str
name of the property
default: object
default value if `name` is not one of the properties stored
Returns
-------
Property or default object
"""
return self._properties.get(name, default)
def keys(self):
r"""
Mimic keys method of a dictionary
Returns
-------
dict_keys of `_properties`
"""
return self._properties.keys()
def items(self):
r"""
Mimic items method of a dictionary
Returns
-------
dict_items of `_properties`
"""
return self._properties.items()
def values(self):
r"""
Mimic values method of a dictionary
Returns
-------
dict_values of `_properties`
"""
return self._properties.values()
def feature_vector(self, names=None):
r"""
Feature vector for the specified sequence of names.
The feature vector is a concatenation of the feature vectors for
each of the properties and the concatenation follows the order of
names.
If names is None, return all features in the property dict in the
order of insertion.
Parameters
----------
names: list
List of property names
Returns
-------
numpy.ndarray
"""
if names is None:
return np.concatenate([prop.feature_vector
for prop in self.values()])
return np.concatenate([self._properties[n].feature_vector
for n in names])
def feature_weights(self, names=None):
r"""
Feature vector weights for the specified sequence of names.
The feature vector weights is a concatenation of the feature vectors
weights for each of the properties and the concatenation follows the
order of names.
If names is None, return all features in the property dict in the
order of insertion.
Parameters
----------
names: list
List of property names
Returns
-------
numpy.ndarray
"""
if names is None:
return np.concatenate([prop.feature_weights
for prop in self.values()])
return np.concatenate([self._properties[n].feature_weights
for n in names])
def register_as_node_property(cls, nxye):
r"""Endows a class with the node property protocol.
| The node property assumes the existence of these attributes
| - *name* name of the property
| - *x* property domain
| - *y* property values
| - *e* errors of the property values
This function will endow class *cls* with these attributes, implemented
through the python property pattern.
Names for the corresponding storage attributes must be supplied when
registering the class.
Parameters
----------
cls : class type
The class type
nxye : tuple (len==4)
nxye is a four element tuple. Its elements are in this order:
(property name, 'stores the name of the property'),
(domain_storage_attribute_name, description of the domain),
(values_storage_attribute_name, description of the values),
(errors_storage_attribute_name, description of the errors)
Example:
(('name', 'stores the name of the property'),
('qvalues', 'momentum transfer values'),
('profile', 'profile intensities'),
('errors', 'intensity errors'))
"""
def property_item(attr_name, docstring):
r"""Factory of the node property items *name*, *x*, *y*, and *e*
Parameters
----------
attr_name : str
name of the storage attribute holding the info for the
respective node property item.
docstring : str
description of the storage attribute
Returns
-------
:py:class:`property`
A node-property item
"""
def getter(instance):
return instance.__dict__[attr_name]
def setter(instance, value):
instance.__dict__[attr_name] = value
return property(fget=getter,
fset=setter,
doc='property *{}* : {}'.format(attr_name, docstring))
# Endow the class with properties name, x, y, and e
for (prop, storage) in zip(('name', 'x', 'y', 'e'), nxye):
setattr(cls, prop, property_item(*storage))
return cls
def decorate_as_node_property(nxye):
r"""Decorator that endows a class with the node property protocol
For details, see :func:`~idpflex.properties.register_as_node_property`
Parameters
----------
nxye : list
list of (name, description) pairs denoting the property components
"""
def decorate(cls):
return register_as_node_property(cls, nxye)
return decorate
class ScalarProperty(object):
r"""Implementation of a node property for a number plus an error.
Instances have *name*, *x*, *y*, and *e* attributes, so they will
follow the property node protocol.
Parameters
----------
name : str
Name associated to this type of property
x : float
Domain of the property
y : float
value of the property
e: float
error of the property's value
"""
def __init__(self, name=None, x=0.0, y=0.0, e=0.0):
r"""
"""
self.name = name
self.x = x
self.e = e
self.y = y
self.node = None
def set_scalar(self, y):
if not isinstance(y, numbers.Real):
raise TypeError("y must be a non-complex number")
self.y = y
@property
def feature_vector(self):
return np.array([self.y, ])
@property
def feature_weights(self):
return np.array([1])
def histogram(self, bins=10, errors=False, **kwargs):
r"""Histogram of values for the leaf nodes
Parameters
----------
nbins : int
number of histogram bins
errors : bool
estimate error from histogram counts
kwargs : dict
Additional arguments to underlying :func:`~numpy:numpy.histogram`
Returns
-------
:class:`~numpy:numpy.ndarray`
histogram bin edges
:class:`~numpy:numpy.ndarray`
histogram values
:class:`~numpy:numpy.ndarray`
Errors for histogram counts, if `error=True`. Otherwise None.
"""
ys = [l[self.name].y for l in self.node.leafs]
h, edges = np.histogram(ys, bins=bins, **kwargs)
e = np.sqrt(h) if errors else None
return edges, h, e
def plot(self, kind='histogram', errors=False, **kwargs):
r"""
Parameters
----------
kind : str
'histogram': Gather Rg for the leafs under the node associated
to this property, then make a histogram.
errors : bool
Estimate error from histogram counts
kwargs : dict
Additional arguments to underlying
:meth:`~matplotlib.axes.Axes.hist`
Returns
-------
:class:`~matplotlib:matplotlib.axes.Axes`
Axes object holding the plot
"""
if kind == 'histogram':
ys = [l[self.name].y for l in self.node.leafs]
fig, ax = plt.subplots()
n, bins, patches = ax.hist(ys, **kwargs)
if errors:
h, edges = np.histogram(ys, bins=bins)
centers = 0.5 * (edges[1:] + edges[:-1])
ax.bar(centers, h, width=0.05, yerr=np.sqrt(h))
ax.set_xlabel(self.name, size=25)
ax.set_ylabel('Counts', size=25)
return ax
class AsphericityMixin(object):
r"""Mixin class providing a set of methods to calculate the asphericity
from the gyration radius tensor"""
def from_universe(self, a_universe, selection=None, index=0):
r"""Calculate asphericity from an MDAnalysis universe instance
:math:`\frac{(L_1-L_2)^2+(L_1-L_3)^2+L_2-L_3)^2}{2(L_1+L_2+L_3)^2}`
where :math:`L_i` are the eigenvalues of the gyration tensor. Units
are same as units of a_universe.
Does not apply periodic boundary conditions
Parameters
----------
a_universe: :class:`~MDAnalysis.core.universe.Universe`
Trajectory or single-conformation instance
selection: str
Atomic selection. All atoms considered if None is passed. See the
`selections page <https://www.mdanalysis.org/docs/documentation_pages/selections.html>`_
for atom selection syntax.
Returns
-------
self: :class:`~idpflex.properties.Asphericity`
Instantiated Asphericity object
""" # noqa: E501
if selection is None:
self.selection = a_universe.atoms
else:
self.selection = a_universe.select_atoms(selection)
a_universe.trajectory[index] # jump to frame
r = self.selection.positions - self.selection.centroid()
gyr = np.einsum("ij,ik", r, r) / len(self.selection) # gyration tensor
eval, evec = np.linalg.eig(gyr) # diagonalize
self.y = np.sum(np.square(np.subtract.outer(eval, eval))) / \
np.square(np.sum(eval))
return self
def from_pdb(self, filename, selection=None):
r"""Calculate asphericity from a PDB file
:math:`\frac{(L_1-L_2)^2+(L_1-L_3)^2+L_2-L_3)^2}{2(L_1+L_2+L_3)^2}`
where :math:`L_i` are the eigenvalues of the gyration tensor. Units
are same as units of a_universe.
Does not apply periodic boundary conditions
Parameters
----------
filename: str
path to the PDB file
selection: str
Atomic selection. All atoms are considered if None is passed. See the
`selections page <https://www.mdanalysis.org/docs/documentation_pages/selections.html>`_
for atom selection syntax.
Returns
-------
self: :class:`~idpflex.properties.Asphericity`
Instantiated Asphericity object
""" # noqa: E501
return self.from_universe(mda.Universe(filename), selection)
class Asphericity(ScalarProperty, AsphericityMixin):
r"""Implementation of a node property to store the asphericity from the
gyration radius tensor
:math:`\frac{(L_1-L_2)^2+(L_1-L_3)^2+L_2-L_3)^2}{2(L_1+L_2+L_3)^2}`
where :math:`L_i` are the eigenvalues of the gyration tensor. Units
are same as units of a_universe.
Reference: https://pubs.acs.org/doi/pdf/10.1021/ja206839u
Does not apply periodic boundary conditions
See :class:`~idpflex.properties.ScalarProperty` for initialization
"""
default_name = 'asphericity'
def __init__(self, *args, **kwargs):
ScalarProperty.__init__(self, *args, **kwargs)
if self.name is None:
self.name = Asphericity.default_name
@property
def asphericity(self):
r"""Property to read and set the asphericity"""
return self.y
@asphericity.setter
def asphericity(self, value):
self.y = value
class SaSaMixin(object):
r"""Mixin class providing a set of methods to load and calculate the
solvent accessible surface area"""
def from_mdtraj(self, a_traj, probe_radius=1.4, **kwargs):
r"""Calculate solvent accessible surface for frames in a trajectory
SASA units are Angstroms squared
Parameters
----------
a_traj: :class:`~mdtraj.Trajectory`
mdtraj trajectory instance
probe_radius: float
The radius of the probe, in Angstroms
kwargs: dict
Optional arguments for the underlying mdtraj.shrake_rupley
algorithm doing the actual SaSa calculation
Returns
-------
self: :class:`~idpflex.properties.SaSa`
Instantiated SaSa property object
"""
self.y = 100 * mdtraj.shrake_rupley(a_traj,
probe_radius=probe_radius/10.0,
**kwargs).sum(axis=1)[0]
return self
def from_pdb(self, filename, selection=None, probe_radius=1.4, **kwargs):
r"""Calculate solvent accessible surface area (SASA) from a PDB file
If the PBD contains more than one structure, calculation is performed
only for the first one.
SASA units are Angstroms squared
Parameters
----------
filename: str
Path to the PDB file
selection: str
Atomic selection for calculating SASA. All atoms considered if
default None is passed. See the
`selections page <https://www.mdanalysis.org/docs/documentation_pages/selections.html>`_
for atom selection syntax.
probe_radius: float
The radius of the probe, in Angstroms
kwargs: dict
Optional arguments for the underlying mdtraj.shrake_rupley
algorithm doing the actual SaSa calculation
Returns
-------
self: :class:`~idpflex.properties.SaSa`
Instantiated SaSa property object
""" # noqa: E501
self.selection = selection
a_traj = mdtraj.load_pdb(filename)
if selection is not None:
selection = a_traj.top.select(selection) # atomic indices
a_traj = mdtraj.load_pdb(filename, atom_indices=selection)
return self.from_mdtraj(a_traj, probe_radius=probe_radius, **kwargs)
def from_universe(self, a_universe, selection=None, probe_radius=1.4,
index=0, **kwargs):
r"""Calculate solvent accessible surface area (SASA) from
an MDAnalysis universe instance.
This method is a thin wrapper around method `from_pdb()`
Parameters
----------
a_universe: :class:`~MDAnalysis.core.universe.Universe`
Trajectory or single-conformation instance
selection: str
Atomic selection for calculating SASA. All atoms considered if
default None is passed. See the
`selections page <https://www.mdanalysis.org/docs/documentation_pages/selections.html>`_
for atom selection syntax.
probe_radius: float
The radius of the probe, in Angstroms
kwargs: dict
Optional arguments for underlying mdtraj.shrake_rupley doing
the actual SASA calculation.
Returns
-------
self: :class:`~idpflex.properties.SaSa`
Instantiated SaSa property object
""" # noqa: E501
with iutl.temporary_file(suffix='.pdb') as filename:
a_universe.trajectory[index] # jump to frame
a_universe.atoms.write(filename)
sasa = self.from_pdb(filename, selection=selection,
probe_radius=probe_radius, **kwargs)
return sasa
class SaSa(ScalarProperty, SaSaMixin):
r"""Implementation of a node property to calculate the Solvent Accessible
Surface Area.
See :class:`~idpflex.properties.ScalarProperty` for initialization
"""
default_name = 'sasa'
def __init__(self, *args, **kwargs):
ScalarProperty.__init__(self, *args, **kwargs)
if self.name is None:
self.name = SaSa.default_name
@property
def sasa(self):
r"""Property to read and write the SASA value"""
return self.y
@sasa.setter
def sasa(self, value):
self.y = value
class EndToEndMixin(object):
r"""Mixin class providing a set of methods to load and calculate
the end-to-end distance for a protein"""
def from_universe(self, a_universe, selection='name CA', index=0):
r"""Calculate radius of gyration from an MDAnalysis Universe instance
Does not apply periodic boundary conditions
Parameters
----------
a_universe: :class:`~MDAnalysis.core.universe.Universe`
Trajectory or single-conformation instance
selection: str
Atomic selection. The first and last atoms of the selection are
considered for the calculation of the end-to-end distance. See the
`selections page <https://www.mdanalysis.org/docs/documentation_pages/selections.html>`_
for atom selection syntax.
Returns
-------
self: :class:`~idpflex.properties.EndToEnd`
Instantiated EndToEnd object
""" # noqa: E501
selection = a_universe.select_atoms(selection)
self.pair = (selection[0], selection[-1])
a_universe.trajectory[index] # jump to frame
r = self.pair[0].position - self.pair[1].position
self.y = np.linalg.norm(r)
return self
def from_pdb(self, filename, selection='name CA'):
r"""Calculate end-to-end distance from a PDB file
Does not apply periodic boundary conditions
Parameters
----------
filename: str
path to the PDB file
selection: str
Atomic selection. The first and last atoms of the selection are
considered for the calculation of the end-to-end distance. See the
`selections page <https://www.mdanalysis.org/docs/documentation_pages/selections.html>`_
for atom selection syntax.
Returns
-------
self: :class:`~idpflex.properties.EndToEnd`
Instantiated EndToEnd object
""" # noqa: E501
return self.from_universe(mda.Universe(filename), selection)
class EndToEnd(ScalarProperty, EndToEndMixin):
r"""Implementation of a node property to store the end-to-end distance
See :class:`~idpflex.properties.ScalarProperty` for initialization
"""
default_name = 'end_to_end'
def __init__(self, *args, **kwargs):
ScalarProperty.__init__(self, *args, **kwargs)
if self.name is None:
self.name = EndToEnd.default_name
@property
def end_to_end(self):
r"""Property to read and set the end-to-end distance"""
return self.y
@end_to_end.setter
def end_to_end(self, value):
self.y = value
class RadiusOfGyrationMixin(object):
r"""Mixin class providing a set of methods to load the Radius of Gyration
data into a Scalar property
"""
def from_universe(self, a_universe, selection=None, index=0):
r"""Calculate radius of gyration from an MDAnalysis Universe instance
Parameters
----------
a_universe: :class:`~MDAnalysis.core.universe.Universe`
Trajectory, or single-conformation instance.
selection: str
Atomic selection. All atoms considered if None is passed. See the
`selections page <https://www.mdanalysis.org/docs/documentation_pages/selections.html>`_
for atom selection syntax.
Returns
-------
self: :class:`~idpflex.properties.RadiusOfGyration`
Instantiated RadiusOfGyration object
""" # noqa: E501
if selection is None:
self.selection = a_universe.atoms
else:
self.selection = a_universe.select_atoms(selection)
a_universe.trajectory[index] # jump to frame
self.y = self.selection.atoms.radius_of_gyration()
return self
def from_pdb(self, filename, selection=None):
r"""Calculate Rg from a PDB file
Parameters
----------
filename: str
path to the PDB file
selection: str
Atomic selection for calculating Rg. All atoms considered if
default None is passed. See the
`selections page <https://www.mdanalysis.org/docs/documentation_pages/selections.html>`_
for atom selection syntax.
Returns
-------
self: :class:`~idpflex.properties.RadiusOfGyration`
Instantiated RadiusOfGyration property object
""" # noqa: E501
return self.from_universe(mda.Universe(filename), selection)
class RadiusOfGyration(ScalarProperty, RadiusOfGyrationMixin):
r"""Implementation of a node property to store the radius of gyration.
See :class:`~idpflex.properties.ScalarProperty` for initialization
"""
default_name = 'rg'
def __init__(self, *args, **kwargs):
ScalarProperty.__init__(self, *args, **kwargs)
if self.name is None:
self.name = RadiusOfGyration.default_name
@property
def rg(self):
r"""Property to read and write the radius of gyration value"""
return self.y
@rg.setter
def rg(self, value):
self.y = value
@decorate_as_node_property((('name', '(str) name of the contact map'),
('selection', '(:class:`~MDAnalysis.core.groups.AtomGroup`) atom selection'), # noqa: E501
('cmap', '(:class:`~numpy:numpy.ndarray`) contact map between residues'), # noqa: E501
('errors', '(:class:`~numpy:numpy.ndarray`) undeterminacies in the contact map'))) # noqa: E501
class ResidueContactMap(object):
r"""Contact map between residues of the conformation using different
definitions of contact.
Parameters
----------
name: str
Name of the contact map
selection: :class:`~MDAnalysis.core.groups.AtomGroup`
Atomic selection for calculation of the contact map, which is then
projected to a residue based map. See the
`selections page <https://www.mdanalysis.org/docs/documentation_pages/selections.html>`_
for atom selection syntax.
cmap: :class:`~numpy:numpy.ndarray`
Contact map between residues of the atomic selection
errors: :class:`~numpy:numpy.ndarray`
Underterminacies for every contact of cmap
cutoff: float
Cut-off distance defining a contact between two atoms
""" # noqa: E501
default_name = 'cm'
def __init__(self, name=None, selection=None, cmap=None, errors=None,
cutoff=None):
self.name = ResidueContactMap.default_name if name is None else name
self.selection = selection
self.cmap = cmap
self.errors = errors
self.cutoff = cutoff
def from_universe(self, a_universe, cutoff, selection=None, index=0):
r"""Calculate residue contact map from an MDAnalysis Universe instance
Parameters
----------
a_universe: :class:`~MDAnalysis.core.universe.Universe`
Trajectory or single-conformation instance
cutoff: float
Cut-off distance defining a contact between two atoms
selection: str
Atomic selection for calculating interatomic contacts. All atoms
are used if None is passed. See the
`selections page <https://www.mdanalysis.org/docs/documentation_pages/selections.html>`_
for atom selection syntax.
Returns
-------
self: :class:`~idpflex.properties.ResidueContactMap`
Instantiated ResidueContactMap object
""" # noqa: E501
if selection is None:
self.selection = a_universe.atoms
else:
self.selection = a_universe.select_atoms(selection)
n_atoms = len(self.selection)
a_universe.trajectory[index] # jump to frame
cm = contact_matrix(self.selection.positions, cutoff=cutoff)
# Cast the atomic map into a residue based map
resids = self.selection.resids
unique_resids = list(set(resids))
n_res = len(unique_resids)
self.cmap = np.full((n_res, n_res), False)
for i in range(n_atoms - 1):
k = unique_resids.index(resids[i])
for j in range(i + 1, n_atoms):
ll = unique_resids.index(resids[j])
self.cmap[k][ll] = self.cmap[k][ll] or cm[i][j]
# self always in contact
for k in range(n_res):
self.cmap[k][k] = True
# symmetrize the contact map
for k in range(n_res - 1):
for ll in range(k + 1, n_res):
self.cmap[ll][k] = self.cmap[k][ll]
self.errors = np.zeros(self.cmap.shape)
return self
def from_pdb(self, filename, cutoff, selection=None):
r"""Calculate residue contact map from a PDB file
Parameters
----------
filename: str
Path to the file in PDB format
cutoff: float
Cut-off distance defining a contact between two atoms
selection: str
Atomic selection for calculating interatomic contacts. All atoms
are used if None is passed. See the
`selections page <https://www.mdanalysis.org/docs/documentation_pages/selections.html>`_
for atom selection syntax.
Returns
-------
self: :class:`~idpflex.properties.ResidueContactMap`
Instantiated ResidueContactMap object
""" # noqa: E501
return self.from_universe(mda.Universe(filename), cutoff, selection)
def plot(self):
r"""Plot the residue contact map of the node"""
resids = [str(i) for i in list(set(self.selection.resids))]
def format_fn(tick_val, tick_pos):
r"""Translates matrix index to residue number"""
if int(tick_val) < len(resids):
return resids[int(tick_val)]
else:
return ''
fig, ax = plt.subplots()
ax.set_xlabel('Residue Numbers', size=16)
ax.xaxis.set_major_formatter(FuncFormatter(format_fn))
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.xaxis.set_minor_locator(AutoMinorLocator())
ax.set_ylabel('Residue Numbers', size=16)
ax.yaxis.set_major_formatter(FuncFormatter(format_fn))
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
ax.yaxis.set_minor_locator(AutoMinorLocator())
ax.grid(color='r', which='major', linestyle='-', linewidth=1)
ax.grid(color='b', which='minor', linestyle=':', linewidth=1)
im = ax.imshow(self.cmap, interpolation='none', origin='lower',
aspect='auto', cmap='Greys')
fig.colorbar(im, ax=ax)
plt.tight_layout()
plt.show()
@decorate_as_node_property((('name', '(str) name of the profile'),
('aa', '(:py:class:`str`) amino-acid sequence'), # noqa: E501
('profile', '(:class:`~numpy:numpy.ndarray`) secondary structure assignment'), # noqa: E501
('errors', '(:class:`~numpy:numpy.ndarray`) assignment undeterminacy'))) # noqa: E501
class SecondaryStructureProperty(object):
r"""Node property for secondary structure determined by DSSP
Every residue is assigned a vector of length 8. Indexes corresponds to
different secondary structure assignment:
| Index__||__DSSP code__||__ Color__||__Structure__||
| =======================================
| __0__||__H__||__yellow__||__Alpha helix (4-12)
| __1__||__B__||__pink__||__Isolated beta-bridge residue
| __2__||__E__||__red__||__Strand
| __3__||__G__||__orange__||__3-10 helix
| __4__||__I___||__green__||__Pi helix
| __5__||__T__||__magenta__||__Turn
| __6__||__S__||__cyan__||__Bend
| __7__||_____||__white__||__Unstructured (coil)
We follow here `Bio.PDB.DSSP ordering <http://biopython.org/DIST/docs/api/Bio.PDB.DSSP%27-module.html>`_
For a leaf node (single structure), the vector for any given residue will
be all zeroes except a value of one for the corresponding assigned
secondary structure. For all other nodes, the vector will correspond to
a probability distribution among the different DSSP codes.
Parameters
----------
name : str
Property name
aa : str
One-letter amino acid sequence encoded in a single string
profile : :class:`~numpy:numpy.ndarray`
N x 8 matrix with N number of residues and 8 types of secondary
structure
errors : :class:`~numpy:numpy.ndarray`
N x 8 matrix denoting undeterminacies for each type of assigned
secondary residue in every residue
""" # noqa: E501
#: Description of single-letter codes for secondary structure
elements = OrderedDict([('H', 'Alpha helix'),
('B', 'Isolated beta-bridge'),
('E', 'Strand'), ('G', '3-10 helix'),
('I', 'Pi helix'), ('T', 'Turn'),
('S', 'Bend'), (' ', 'Unstructured')])
#: list of single-letter codes for secondary structure. Last code is a
#: blank space denoting no secondary structure (Unstructured)
dssp_codes = ''.join(elements.keys())
#: number of distinctive elements of secondary structure
n_codes = len(dssp_codes)
#: associated colors to each element of secondary structure
colors = ('yellow', 'pink', 'red', 'orange', 'green', 'magenta', 'cyan',
'white')
@classmethod
def code2profile(cls, code):
r"""Generate a secondary structure profile vector for a
particular DSSP code
Parameters
----------
code : str
one-letter code denoting secondary structure assignment
Returns
-------
:class:`~numpy:numpy.ndarray`
profile vector
"""
if code not in cls.dssp_codes:
raise ValueError('{} is not a valid DSSP code'.format(code))
v = np.zeros(cls.n_codes)
v[cls.dssp_codes.find(code)] = 1.0
return v
default_name = 'ss'
def __init__(self, name=None, aa=None, profile=None, errors=None):
self.name = SecondaryStructureProperty.default_name \
if name is None else name
self.aa = aa
self.profile = profile
self.errors = errors
self.node = None
def from_dssp_sequence(self, codes):
r"""Load secondary structure profile from a single string of DSSP codes
Attributes *aa* and *errors* are not modified, only **profile**.
Parameters
----------
codes : str
Sequence of one-letter DSSP codes
Returns
-------
self : :class:`~idpflex.properties.SecondaryStructureProperty`
"""
if self.aa is not None and len(self.aa) != len(codes):
raise ValueError('length of {} different than that of the '
'amino acid sequence'.format(codes))
if self.errors is not None and len(self.errors) != len(codes):
raise ValueError('length of {} different than that of the '
'profile errors'.format(codes))
self.profile = np.asarray([self.code2profile(c) for c in codes])
return self
def from_dssp(self, file_name):
r"""Load secondary structure profile from a `dssp file <http://swift.cmbi.ru.nl/gv/dssp/>`_
Parameters
----------
file_name : str
File path
Returns
-------
self : :class:`~idpflex.properties.SecondaryStructureProperty`
""" # noqa: E501
aa = ''
profile = list()
start = False
with open(file_name) as handle:
for line in handle:
if '#' in line:
start = True
if start:
aa += line[13:14]
profile .append(self.code2profile(line[16:17]))
self.aa = aa
self.profile = np.asarray(profile)
self.errors = np.zeros(self.profile.shape)
return self
def from_dssp_pdb(self, file_name, command='mkdssp', silent=True):
r"""Calculate secondary structure with DSSP
Parameters
----------
file_name : str
Path to PDB file
command : str
Command to invoke dssp. You need to have DSSP installed in your
machine
silent : bool
Suppress DSSP standard output and error
Returns
-------
self : :class:`~idpflex.properties.SecondaryStructureProperty`
"""
# Generate a temporary DSSP file
curr_dir = os.getcwd()
temp_dir = tempfile.mkdtemp()
os.chdir(temp_dir)
call_stack = [command, '-i', file_name, '-o', 'pdb.dssp']
if silent:
FNULL = open(os.devnull, 'w') # silence crysol output
subprocess.call(call_stack, stdout=FNULL, stderr=subprocess.STDOUT)
else:
subprocess.call(call_stack)
# load the DSSP file
self.from_dssp('pdb.dssp')
# Delete the temporary directory
os.chdir(curr_dir)
subprocess.call('/bin/rm -rf {}'.format(temp_dir).split())
return self
@property
def fractions(self):
r"""Output fraction of each element of secondary structure.
Fractions are computed summing over all residues.
Returns
-------
dict
Elements of the form {single-letter-code: fraction}
"""
f = np.sum(self.profile, axis=0) / len(self.profile)
return dict(zip(self.dssp_codes, f))
@property
def collapsed(self):
r"""For every residue, collapse the secondary structure profile onto
the component with the highest probability
Returns
-------
:class:`~numpy:numpy.ndarray`
List of indexes corresponding to collapsed secondary structure
states
"""
return self.profile.argmax(axis=1)
def disparity(self, other):
r"""Secondary Structure disparity of other profile to self, akin to
:math:`\chi^2`
:math:`\frac{1}{N(n-1)} \sum_{i=1}^{N}\sum_{j=1}^{n} (\frac{p_{ij}-q_ {ij}}{e})^2`
with :math:`N` number of residues and :math:`n` number of DSSP codes. Errors
:math:`e` are those of *self*, and are set to one if they have not been
initialized. We divide by :math:`n-1` because it is implied a normalized
distribution of secondary structure elements for each residue.
Parameters
----------
other : :class:`~idpflex.properties.SecondaryStructureProperty`
Secondary structure property to compare to
Returns
-------
float
disparity measure
""" # noqa: E501
n = len(self.profile)
if n != len(other.profile):
raise ValueError('Profiles have different sizes')
dp = self.profile - other.profile
e = self.errors if self.errors is not None and np.all(self.errors)\
else np.ones((n, self.n_codes))
return np.sum(np.square(dp/e)) / (n * (self.n_codes - 1))
def plot(self, kind='percents'):
r"""Plot the secondary structure of the node holding the property
Parameters
----------
kind : str
'percents': bar chart with each bar denoting the percent of
a particular secondary structure in all the protein; ---
'node': gray plot of secondary structure element probabilities
for each residue; ---
'leafs': color plot of secondary structure for each leaf under the
node. Leafs are sorted by increasing disparity to the
secondary structure of the node.
"""
if kind == 'percents':
fig, ax = plt.subplots()
ind = np.arange(self.n_codes) # the x locations for the groups
width = 0.75 # the width of the bars
pcs = [100 * self.fractions[c] for c in self.dssp_codes]
rects = ax.bar(ind, pcs, width, color='b')
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2., 1.05 * height,
'%d' % int(height), ha='center', va='bottom')
ax.set_ylabel('Percents')
ax.set_xlabel('Secondary Structure')
ax.set_xticks(ind)
ax.set_xticklabels(list(self.dssp_codes))
elif kind == 'node':
fig, ax = plt.subplots()
ax.set_xlabel('Residue Index')
ax.xaxis.set_major_locator(mpl.ticker.MaxNLocator(integer=True))
ind = np.arange(self.n_codes) # the x locations for the groups
im = ax.imshow(self.profile.transpose(), interpolation='none',
origin='lower', aspect='auto', cmap='Greys',
extent=[0.5, self.profile.shape[0] + 0.5,
-0.5, self.profile.shape[1] - 0.5]
)
width = 0.5
ax.set_yticks(ind + width / 2 - 0.225)
ax.set_yticklabels(list(self.dssp_codes))
fig.colorbar(im, ax=ax)
elif kind == 'leafs':
fig, ax = plt.subplots()
ax.set_xlabel('leaf index sorted by increasing disparity to'
' average profile')
ax.set_ylabel('residue index')
leafs = self.node.leafs
if not leafs:
leafs = [self.node]
sss = [l[self.name] for l in leafs] # Sec Str props of the leafs
sss.sort(key=lambda ss: self.disparity(ss))
collapsed = np.asarray([ss.collapsed for ss in sss])
cm = ListedColormap(self.colors)
im = ax.imshow(collapsed.transpose(), interpolation='none',
norm=mpl.colors.Normalize(vmin=0,
vmax=self.n_codes - 1),
origin='lower', aspect='auto', cmap=cm,
extent=[0.5, collapsed.shape[0] + 0.5,
0.5, collapsed.shape[1] + 0.5])
# Force integer values in tick labels
ax.xaxis.set_major_locator(mpl.ticker.MaxNLocator(integer=True))
ax.yaxis.set_major_locator(mpl.ticker.MaxNLocator(integer=True))
# Color bar
tick_positions = 0.5 + np.arange(self.n_codes) *\
(self.n_codes - 1) / self.n_codes
cbar = fig.colorbar(im, ticks=tick_positions, ax=ax)
tick_lables = ['{}: {}'.format(k, v)
for k, v in self.elements.items()]
cbar.ax.set_yticklabels(tick_lables)
plt.grid()
plt.tight_layout()
plt.show()
@decorate_as_node_property((('name', '(str) name of the profile'),
('qvalues', '(:class:`~numpy:numpy.ndarray`) momentum transfer values'), # noqa: E501
('profile', '(:class:`~numpy:numpy.ndarray`) profile intensities'), # noqa: E501
('errors', '(:class:`~numpy:numpy.ndarray`) intensity errors'))) # noqa: E501
class ProfileProperty(object):
r"""Implementation of a node property valid for SANS or X-Ray data.
Parameters
----------
name : str
Property name.
qvalues : :class:`~numpy:numpy.ndarray`
Momentun transfer domain
profile : :class:`~numpy:numpy.ndarray`
Intensity values
errors : :class:`~numpy:numpy.ndarray`
Errors in the intensity values
"""
default_name = 'profile'
def __init__(self, name=None, qvalues=None, profile=None, errors=None):
self.name = name
self.qvalues = qvalues
self.profile = profile
self.errors = errors
self.node = None
@property
def feature_vector(self):
r"""
Each `qvalue` is interpreted as an independent feature,
and the related value in `profile` is a particular
"measured" value of that feature.
Returns
-------
numpy.ndarray
"""
return self.profile
@property
def feature_weights(self):
r"""
Weights to be used when calculating the square of the euclidean
distance between two feature vectors
Returns
-------
numpy.ndarray
"""
return np.ones(len(self.profile)) / np.sqrt(len(self.profile))
class SansLoaderMixin(object):
r"""Mixin class providing a set of methods to load SANS data into a
profile property
"""
def from_sassena(self, handle, profile_key='fqt', index=0):
"""Load SANS profile from sassena output.
It is assumed that Q-values are stored under item *qvalues* and
listed under the *X* column.
Parameters
----------
handle : h5py.File
h5py reading handle to HDF5 file
profile_key : str
item key where profiles are stored in the HDF5 file
param index : int
profile index, if data contains more than one profile
Returns
-------
self : :class:`~idpflex.properties.SansProperty`
"""
q = handle['qvectors'][:, 0] # q values listed in the X component
i = handle[profile_key][:, index][:, 0] # profile
# q values may be unordered
sorting_order = np.argsort(q)
q = q[sorting_order]
i = i[sorting_order]
self.qvalues = np.array(q, dtype=np.float)
self.profile = np.array(i, dtype=np.float)
self.errors = np.zeros(len(q), dtype=np.float)
return self
def from_cryson_int(self, file_name):
r"""Load profile from a `cryson \*.int <https://www.embl-hamburg.de/biosans/manuals/cryson.html#output>`_ file
Parameters
----------
file_name : str
File path
Returns
-------
self : :class:`~idpflex.properties.SansProperty`
""" # noqa: E501
contents = np.loadtxt(file_name, skiprows=1, usecols=(0, 1))
self.qvalues = contents[:, 0]
self.profile = contents[:, 1]
self.errors = np.zeros(len(self.qvalues), dtype=float)
return self
def from_cryson_fit(self, file_name):
r"""Load profile from a `cryson \*.fit <https://www.embl-hamburg.de/biosans/manuals/cryson.html#output>`_ file.
Parameters
----------
file_name : str
File path
Returns
-------
self : :class:`~idpflex.properties.SansProperty`
""" # noqa: E501
contents = np.loadtxt(file_name, skiprows=1, usecols=(0, 3))
self.qvalues = contents[:, 0]
self.profile = contents[:, 1]
self.errors = np.zeros(len(self.qvalues), dtype=float)
return self
def from_cryson_pdb(self, file_name, command='cryson',
args='-lm 20 -sm 0.6 -ns 500 -un 1 -eh -dro 0.075',
silent=True):
r"""Calculate profile with cryson from a PDB file
Parameters
----------
file_name : str
Path to PDB file
command : str
Command to invoke cryson
args : str
Arguments to pass to cryson
silent : bool
Suppress cryson standard output and standard error
Returns
-------
self : :class:`~idpflex.properties.SansProperty`
"""
# Write cryson file within a temporary directory
curr_dir = os.getcwd()
temp_dir = tempfile.mkdtemp()
os.chdir(temp_dir)
call_stack = [command] + args.split() + [file_name]
if silent:
FNULL = open(os.devnull, 'w') # silence cryson output
subprocess.call(call_stack, stdout=FNULL, stderr=subprocess.STDOUT)
else:
subprocess.call(call_stack)
# Load the cryson file
ext_2_load = dict(int=self.from_cryson_int, fit=self.from_cryson_fit)
stop_search = False
for name in os.listdir(temp_dir):
for ext in ext_2_load:
if fnmatch.fnmatch(name, '*.{}'.format(ext)):
ext_2_load[ext](name)
stop_search = True
break
if stop_search:
break
# Delete the temporary directory
os.chdir(curr_dir)
subprocess.call('/bin/rm -rf {}'.format(temp_dir).split())
return self
def from_ascii(self, file_name):
r"""Load profile from an ascii file.
| Expected file format:
| Rows have three items separated by a blank space:
| - *col1* momentum transfer
| - *col2* profile
| - *col3* errors of the profile
Parameters
----------
file_name : str
File path
Returns
-------
self : :class:`~idpflex.properties.SansProperty`
"""
contents = np.loadtxt(file_name, skiprows=0, usecols=(0, 1, 2))
self.qvalues = contents[:, 0]
self.profile = contents[:, 1]
self.errors = contents[:, 2]
return self
def to_ascii(self, file_name):
r"""Save profile as a three-column ascii file.
| Rows have three items separated by a blank space
| - *col1* momentum transfer
| - *col2* profile
| - *col3* errors of the profile
"""
dir_name = os.path.dirname(file_name)
if dir_name and not os.path.isdir(dir_name):
os.makedirs(dir_name)
xye = np.array([list(self.x), list(self.y), list(self.e)])
np.savetxt(file_name, xye.transpose(),
header='Momentum-transfer Profile Profile-errors')
class SansProperty(ProfileProperty, SansLoaderMixin):
r"""Implementation of a node property for SANS data
"""
default_name = 'sans'
def __init__(self, *args, **kwargs):
ProfileProperty.__init__(self, *args, **kwargs)
if self.name is None:
self.name = SansProperty.default_name
class SaxsLoaderMixin(object):
r"""Mixin class providing a set of methods to load X-ray data into a
profile property
"""
def from_crysol_int(self, file_name):
r"""Load profile from a `crysol \*.int <https://www.embl-hamburg.de/biosaxs/manuals/crysol.html#output>`_ file
Parameters
----------
file_name : str
File path
Returns
-------
self : :class:`~idpflex.properties.SaxsProperty`
""" # noqa: E501
contents = np.loadtxt(file_name, skiprows=1, usecols=(0, 1))
self.qvalues = contents[:, 0]
self.profile = contents[:, 1]
self.errors = np.zeros(len(self.qvalues), dtype=float)
return self
def from_crysol_fit(self, file_name):
r"""Load profile from a `crysol \*.fit <https://www.embl-hamburg.de/biosaxs/manuals/crysol.html#output>`_ file.
Parameters
----------
file_name : str
File path
Returns
-------
self : :class:`~idpflex.properties.SaxsProperty`
""" # noqa: E501
contents = np.loadtxt(file_name, skiprows=1, usecols=(0, 3))
self.qvalues = contents[:, 0]
self.profile = contents[:, 1]
self.errors = np.zeros(len(self.qvalues), dtype=float)
return self
def from_crysol_pdb(self, file_name, command='crysol',
args='-lm 20 -sm 0.6 -ns 500 -un 1 -eh -dro 0.075',
silent=True):
r"""Calculate profile with crysol from a PDB file
Parameters
----------
file_name : str
Path to PDB file
command : str
Command to invoke crysol
args : str
Arguments to pass to crysol
silent : bool
Suppress crysol standard output and standard error
Returns
-------
self : :class:`~idpflex.properties.SaxsProperty`
"""
# Write crysol file within a temporary directory
curr_dir = os.getcwd()
temp_dir = tempfile.mkdtemp()
os.chdir(temp_dir)
call_stack = [command] + args.split() + [file_name]
if silent:
FNULL = open(os.devnull, 'w') # silence crysol output
subprocess.call(call_stack, stdout=FNULL, stderr=subprocess.STDOUT)
else:
subprocess.call(call_stack)
# Load the crysol file
ext_2_load = dict(int=self.from_crysol_int, fit=self.from_crysol_fit)
stop_search = False
for name in os.listdir(temp_dir):
for ext in ext_2_load:
if fnmatch.fnmatch(name, '*.{}'.format(ext)):
ext_2_load[ext](name)
stop_search = True
break
if stop_search:
break
# Delete the temporary directory
os.chdir(curr_dir)
subprocess.call('/bin/rm -rf {}'.format(temp_dir).split())
return self
def from_ascii(self, file_name):
r"""Load profile from an ascii file.
| Expected file format:
| Rows have three items separated by a blank space:
| - *col1* momentum transfer
| - *col2* profile
| - *col3* errors of the profile
Parameters
----------
file_name : str
File path
Returns
-------
self : :class:`~idpflex.properties.SaxsProperty`
"""
contents = np.loadtxt(file_name, skiprows=0, usecols=(0, 1, 2))
self.qvalues = contents[:, 0]
self.profile = contents[:, 1]
self.errors = contents[:, 2]
return self
def to_ascii(self, file_name):
r"""Save profile as a three-column ascii file.
| Rows have three items separated by a blank space
| - *col1* momentum transfer
| - *col2* profile
| - *col3* errors of the profile
"""
dir_name = os.path.dirname(file_name)
if dir_name and not os.path.isdir(dir_name):
os.makedirs(dir_name)
xye = np.array([list(self.x), list(self.y), list(self.e)])
np.savetxt(file_name, xye.transpose(),
header='Momentum-transfer Profile Profile-errors')
class SaxsProperty(ProfileProperty, SaxsLoaderMixin):
r"""Implementation of a node property for SAXS data
"""
default_name = 'saxs'
def __init__(self, *args, **kwargs):
ProfileProperty.__init__(self, *args, **kwargs)
if self.name is None:
self.name = SaxsProperty.default_name
def propagator_weighted_sum(values, tree,
weights=lambda left_node, right_node: (1.0, 1.0)):
r"""Calculate the property of a node as the sum of its two siblings'
property values. Propagation applies only to non-leaf nodes.
Parameters
----------
values: list
List of property values (of same type), one item for each leaf node.
tree: :class:`~idpflex.cnextend.Tree`
Tree of :class:`~idpflex.cnextend.ClusterNodeX` nodes
weights: tuple
Callable of two arguments (left-node and right-node) returning
a tuple of left and right weights. Default callable returns (1.0, 1.0)
always.
"""
# Insert a property for each leaf
if len(values) != tree.nleafs:
msg = "len(values)={} but there are {} leafs".format(len(values),
tree.nleafs)
raise ValueError(msg)
for i, leaf in enumerate(tree.leafs):
leaf[values[i].name] = values[i]
property_class = values[0].__class__ # type of the property
name = values[0].name # name of the property
# Propagate up the tree nodes
for node in tree._nodes[tree.nleafs:]:
prop = property_class()
prop.name = name
left_prop = node.left[name]
right_prop = node.right[name]
w = weights(node.left, node.right)
prop.x = left_prop.x
prop.y = w[0] * left_prop.y + w[1] * right_prop.y
if left_prop.e is not None and right_prop.e is not None:
prop.e = np.sqrt(w[0] * left_prop.e**2 + w[1] * right_prop.e**2)
else:
prop.e = None
node[prop.name] = prop
def weights_by_size(left_node, right_node):
r"""Calculate the relative size of two nodes
Parameters
----------
left_node : :class:`~idpflex.cnextend.ClusterNodeX`
One of the two sibling nodes
right_node : :class:`~idpflex.cnextend.ClusterNodeX`
One of the two sibling nodes
Returns
-------
tuple
Weights representing the relative populations of two nodes
"""
w = float(left_node.count) / (left_node.count + right_node.count)
return w, 1-w
#: Calculate a property of the node as the sum of its siblings' property
#: values, weighted by the relative cluster sizes of the siblings.
#:
#: Parameters
#: ----------
#: values : list
#: List of property values (of same type), one item for each leaf node.
#: node_tree : :class:`~idpflex.cnextend.Tree`
#: Tree of :class:`~idpflex.cnextend.ClusterNodeX` nodes
propagator_size_weighted_sum = functools.partial(propagator_weighted_sum,
weights=weights_by_size)
propagator_size_weighted_sum.__name__ = 'propagator_size_weighted_sum'
propagator_size_weighted_sum.__doc__ = r"""Calculate a property of the node
as the sum of its siblings' property values, weighted by the relative cluster
sizes of the siblings.
Parameters
----------
values : list
List of property values (of same type), one item for each leaf node.
node_tree : :class:`~idpflex.cnextend.Tree`
Tree of :class:`~idpflex.cnextend.ClusterNodeX` nodes
"""
```
#### File: ipdflex/tests/test_cnextend.py
```python
import pytest
import numpy as np
from numpy.testing import assert_array_equal
from scipy.cluster import hierarchy
from idpflex import cnextend as cnx
from idpflex.properties import ScalarProperty
class TestClusterNodeX(object):
def test_property(self):
n = cnx.ClusterNodeX(0)
n.property_group['prop'] = True
assert n['prop'] is True
assert n['not_a_key'] is None
prop = ScalarProperty(name='some_prop', y=np.array([1, 2, 3]))
n[prop.name] = prop
assert_array_equal(n[prop.name].y, prop.y)
assert prop.node is n
with pytest.raises(AttributeError):
n['not_a_property'] = 'not a property class'
def test_property_group_features(self):
n = cnx.ClusterNodeX(0)
prop = ScalarProperty(name='some_prop', y=4)
n[prop.name] = prop
prop2 = ScalarProperty(name='some_prop2', y=2)
n[prop2.name] = prop2
fv = n.property_group.feature_vector()
assert_array_equal(fv, np.array([4, 2]))
ws = n.property_group.feature_weights()
assert_array_equal(ws, np.array([1, 1]))
def test_leafs(self, benchmark):
t = benchmark['tree']
cluster = t[benchmark['nleafs']] # fist cluster that is not a leaf
assert [n.id for n in cluster.leafs] == [19167, 19168]
cluster = t.root
assert cluster.leafs == t.leafs
def test_distance_submatrix(self, small_tree):
t = small_tree['tree']
a_cluster = t[-4] # leafs have indexes 6, 7, 8
dist_submat = a_cluster.distance_submatrix(small_tree['dist_mat'])
reference = np.array([1, 4, 1])
assert_array_equal(dist_submat, reference)
def test_representative(self, small_tree):
t = small_tree['tree']
a_cluster = t[-4]
r = a_cluster.representative(small_tree['dist_mat'])
assert r.id == 7
class TestTree(object):
def test_from_linkage_matrix(self, benchmark):
t = cnx.Tree()
t.from_linkage_matrix(benchmark['z'], node_class=hierarchy.ClusterNode)
r = t.root
assert hasattr(r, 'parent') is False
t.from_linkage_matrix(benchmark['z'], node_class=cnx.ClusterNodeX)
r = t.root
assert r.parent is None
assert len(t) == benchmark['nnodes']
def test_leafs(self, benchmark):
t = benchmark['tree']
assert len(t.leafs) == benchmark['nleafs']
def test_iter(self, benchmark):
t = benchmark['tree']
ids = sorted(range(benchmark['nnodes']), reverse=True)
assert ids == list(node.id for node in t)
def test_getitem(self, benchmark):
t = benchmark['tree']
assert t[-1] is t.root
assert list(n.id for n in t[:3]) == list(range(3))
def test_clusters_above_depth(self, benchmark):
t = benchmark['tree']
ids = [n.id for n in t.nodes_above_depth(depth=3)]
assert ids == [44732, 44748, 44752, 44753, 44754, 44755, 44756]
def test_clusters_at_depth(self, benchmark):
t = benchmark['tree']
ids = [n.id for n in t.nodes_at_depth(depth=3)]
assert ids == [44732, 44748, 44752, 44753]
def test_random_distance_tree():
out = cnx.random_distance_tree(9)
dm = out.distance_matrix
# Indexes of the two leaves with the bigget mutual distance
idx = set(np.unravel_index(np.argmax(dm), dm.shape))
# the first partition of the root node cannot contain the indexes
# the two leaves with the bigget mutual distance
idx not in set(out.tree[-2].leaf_ids)
if __name__ == '__main__':
pytest.main()
```
#### File: ipdflex/tests/test_properties.py
```python
import random
import numpy as np
import pytest
import tempfile
import shutil
from idpflex import properties as ps
from idpflex.properties import SecondaryStructureProperty as SSP
class TestRegisterDecorateProperties(object):
def test_register_as_node_property(self):
class SomeProperty(object):
def __init__(self):
attrs = dict(id='foo', a='ax', b='by', c='ce')
self.__dict__.update(attrs)
associations = (('id', 'name of the property'),
('a', 'this is x'),
('b', 'this is y'),
('c', 'this is e'))
ps.register_as_node_property(SomeProperty, associations)
# Test for class attributes
assert isinstance(ps.ProfileProperty.name, property)
assert isinstance(ps.ProfileProperty.x, property)
assert isinstance(ps.ProfileProperty.y, property)
assert isinstance(ps.ProfileProperty.e, property)
# Test for managed attributes
some_prop = SomeProperty()
assert some_prop.name == 'foo'
assert some_prop.x == 'ax'
assert some_prop.y == 'by'
assert some_prop.e == 'ce'
def test_decorate_as_node_property(self):
associations = (('id', 'name of the property'),
('a', 'this is x'),
('b', 'this is y'),
('c', 'this is e'))
@ps.decorate_as_node_property(associations)
class SomeProperty(object):
def __init__(self):
attrs = dict(id='foo', a='ax', b='by', c='ce')
self.__dict__.update(attrs)
# Test for class attributes
assert isinstance(ps.ProfileProperty.name, property)
assert isinstance(ps.ProfileProperty.x, property)
assert isinstance(ps.ProfileProperty.y, property)
assert isinstance(ps.ProfileProperty.e, property)
# Test for managed attributes
some_prop = SomeProperty()
assert some_prop.name == 'foo'
assert some_prop.x == 'ax'
assert some_prop.y == 'by'
assert some_prop.e == 'ce'
class TestScalarProperty(object):
def test_histogram(self, benchmark):
root_prop = benchmark['tree'].root['sc']
edges, h, e = root_prop.histogram(bins=1, errors=True)
assert h[0] == benchmark['nleafs']
assert e[0] == np.sqrt(h[0])
def test_plot_histogram(self, benchmark):
root_prop = benchmark['tree'].root['sc']
ax = root_prop.plot(kind='histogram', errors=True, bins=1)
assert ax.patches[0]._height == benchmark['nleafs']
class TestAsphericity(object):
def test_from_pdb(self, ss_benchmark):
filename = ss_benchmark['pdb_file']
prop = ps.Asphericity().from_pdb(filename)
np.testing.assert_almost_equal(prop.asphericity, 0.71, decimal=2)
class TestEndToEnd(object):
def test_from_pdb(self, ss_benchmark):
filename = ss_benchmark['pdb_file']
prop = ps.EndToEnd().from_pdb(filename)
np.testing.assert_almost_equal(prop.end_to_end, 9.244, decimal=3)
class TestSaSa(object):
def test_from_pdb(self, ss_benchmark):
filename = ss_benchmark['pdb_file']
prop = ps.SaSa().from_pdb(filename)
np.testing.assert_allclose(prop.sasa, 2964, rtol=0.10)
prop = ps.SaSa().from_pdb(filename, n_sphere_points=3)
np.testing.assert_allclose(prop.sasa, 2989, rtol=0.10)
prop = ps.SaSa().from_pdb(filename, selection='resid 0 to 10')
np.testing.assert_allclose(prop.sasa, 1350, rtol=0.16)
class TestRadiusOfGyration(object):
def test_from_pdb(self, ss_benchmark):
filename = ss_benchmark['pdb_file']
prop = ps.RadiusOfGyration().from_pdb(filename, 'name CA')
np.testing.assert_almost_equal(prop.rg, 8.75, decimal=2)
class TestResidueContactMap(object):
def test_from_universe(self, trajectory_benchmark):
cm = ps.ResidueContactMap().from_universe(trajectory_benchmark,
8, 'name CA')
assert np.sum(cm.y) == 363
cm = ps.ResidueContactMap().from_universe(trajectory_benchmark, 4)
assert np.sum(cm.y) == 313
def test_from_pdb(self, ss_benchmark):
filename = ss_benchmark['pdb_file']
cm = ps.ResidueContactMap().from_pdb(filename, 8, 'name CA')
assert np.sum(cm.y) == 351
@pytest.mark.skip(reason="Plotting not enabled in the CI")
def test_plot(self, trajectory_benchmark):
cm = ps.ResidueContactMap().from_universe(trajectory_benchmark,
8, 'name CA')
cm.plot()
class TestSecondaryStructureProperty(object):
def test_class_decorated_as_node_property(self):
assert isinstance(SSP.name, property)
assert isinstance(SSP.x, property)
assert isinstance(SSP.y, property)
assert isinstance(SSP.e, property)
def test_instance_decorated_as_node_property(self):
ss = 'GTEL'
v = np.random.rand(len(ss), SSP.n_codes)
v /= np.sum(v, axis=1)[:, np.newaxis] # normalize rows
profile_prop = SSP(name='foo', aa=ss, profile=v, errors=0.1*v)
assert profile_prop.name == 'foo'
assert np.array_equal(profile_prop.x, ss)
assert np.array_equal(profile_prop.y, v)
assert np.array_equal(profile_prop.e, 0.1*v)
def test_default_name(self):
ss_prop = SSP()
assert ss_prop.name == 'ss'
def test_from_dssp_sequence(self):
seq = ''.join(random.sample(SSP.dssp_codes, SSP.n_codes))
ss_prop = SSP().from_dssp_sequence(seq)
np.testing.assert_array_equal(ss_prop.y[-1], SSP.code2profile(seq[-1]))
def test_from_dssp(self, ss_benchmark):
name = ss_benchmark['dssp_file']
ss_prop = SSP().from_dssp(name)
np.testing.assert_array_equal(ss_prop.y[-1], SSP.code2profile(' '))
@pytest.mark.skip(reason="DSSP may not be installed in the machine")
def test_from_dssp_pdb(self, ss_benchmark):
name = ss_benchmark['pdb_file']
ss_prop = SSP().from_dssp_pdb(name)
np.testing.assert_array_equal(ss_prop.y[-1], SSP.code2profile(' '))
def test_propagator_size_weighted_sum(self, small_tree):
r"""Create random secondary sequences by shufling all codes and
assign to the leafs of the tree. Then, propagate the profiles up
the tree hiearchy. Finally, compare the profile of the root with
expected profile.
"""
tree = small_tree['tree']
ss_props = list()
for i in range(tree.nleafs):
seq = ''.join(random.sample(SSP.dssp_codes, SSP.n_codes))
ss_props.append(SSP().from_dssp_sequence(seq))
ps.propagator_size_weighted_sum(ss_props, tree)
# Manually calculate the average profile for the last residue
y = np.asarray([ss_props[i].y for i in range(tree.nleafs)])
average_profile = np.mean(y, axis=0)
np.testing.assert_array_almost_equal(average_profile,
tree.root['ss'].y, decimal=12)
def test_fractions(self):
profile = np.random.rand(42, SSP.n_codes) # not normalized
prop = SSP(profile=profile)
f = prop.fractions
assert f['H'] == np.sum(profile, axis=0)[0] / 42
def test_collapse(self):
profile = np.random.rand(42, SSP.n_codes) # not normalized
prop = SSP(profile=profile)
c = prop.collapsed
assert c[0] == np.argmax(profile[0])
def test_disparity(self):
p = np.random.rand(42, SSP.n_codes) # not normalized
o = np.zeros((42, SSP.n_codes))
pr = SSP(profile=p)
assert pr.disparity(SSP(profile=-p)) == 4 * \
pr.disparity(SSP(profile=o))
@pytest.mark.skip(reason="Plotting not enabled in the CI")
def test_plot_percents(self):
profile = np.random.rand(42, SSP.n_codes) # not normalized
profile /= np.sum(profile, axis=1)[:, np.newaxis] # normalized
prop = SSP(profile=profile)
prop.plot('percents')
@pytest.mark.skip(reason="Plotting not enabled in the CI")
def test_plot_node(self):
profile = np.random.rand(42, SSP.n_codes) # not normalized
profile /= np.sum(profile, axis=1)[:, np.newaxis] # normalized
prop = SSP(profile=profile)
prop.plot('node')
@pytest.mark.skip(reason="Plotting not enabled in the CI")
def test_plot_leafs(self, small_tree):
tree = small_tree['tree']
ss_props = list()
for i in range(tree.nleafs):
seq = ''.join(random.sample(1000*SSP.dssp_codes, 42))
ss_props.append(SSP().from_dssp_sequence(seq))
ps.propagator_size_weighted_sum(ss_props, tree)
tree.root['ss'].plot('leafs')
class TestProfileProperty(object):
def test_class_decorated_as_node_property(self):
assert isinstance(ps.ProfileProperty.name, property)
assert isinstance(ps.ProfileProperty.x, property)
assert isinstance(ps.ProfileProperty.y, property)
assert isinstance(ps.ProfileProperty.e, property)
def test_instance_decorated_as_node_property(self):
v = np.arange(9)
profile_prop = ps.ProfileProperty(name='foo', qvalues=v, profile=10*v,
errors=0.1*v)
assert profile_prop.name == 'foo'
assert np.array_equal(profile_prop.x, v)
assert np.array_equal(profile_prop.y, 10*v)
assert np.array_equal(profile_prop.e, 0.1*v)
class TestSansProperty(object):
def test_registered_as_node_property(self):
assert isinstance(ps.SansProperty.name, property)
assert isinstance(ps.SansProperty.x, property)
assert isinstance(ps.SansProperty.y, property)
assert isinstance(ps.SansProperty.e, property)
def test_default_name(self):
sans_prop = ps.SansProperty()
assert sans_prop.name == 'sans'
def test_from_sassena(self, sans_benchmark):
sans_prop = ps.SansProperty()
sans_prop.from_sassena(sans_benchmark['profiles'], index=666)
assert sans_prop.qvalues[13].item() - 0.0656565651298 < 0.000000001
assert sans_prop.profile[13].item() - 741970.84461578 < 0.000001
def test_from_cryson_int(self, sans_benchmark):
sans_prop = ps.SansProperty()
sans_prop.from_cryson_int(sans_benchmark['cryson_int'])
assert sans_prop.qvalues[8] == 0.08
assert sans_prop.profile[8] == 0.229457E+06
assert sans_prop.errors[8] == 0.0
@pytest.mark.skipif(shutil.which('cryson') is None, reason='Needs cryson')
def test_from_cryson_pdb(self, sans_benchmark):
sans_prop = ps.SansProperty()
sans_prop.from_cryson_pdb(sans_benchmark['cryson_pdb'], args='')
sans_prop_ref = ps.SansProperty()
sans_prop_ref.from_cryson_int(sans_benchmark['cryson_int'])
np.testing.assert_array_almost_equal(
sans_prop.qvalues, sans_prop_ref.qvalues)
np.testing.assert_array_almost_equal(
sans_prop.profile, sans_prop_ref.profile)
def test_to_and_from_ascii(self, sans_benchmark):
sans_prop_ref = ps.SansProperty()
sans_prop_ref.from_cryson_int(sans_benchmark['cryson_int'])
sans_prop = ps.SansProperty()
with tempfile.NamedTemporaryFile() as f:
sans_prop_ref.to_ascii(f.name)
sans_prop.from_ascii(f.name)
np.testing.assert_array_almost_equal(
sans_prop.qvalues, sans_prop_ref.qvalues)
class TestSaxsProperty(object):
def test_registered_as_node_property(self):
assert isinstance(ps.SaxsProperty.name, property)
assert isinstance(ps.SaxsProperty.x, property)
assert isinstance(ps.SaxsProperty.y, property)
assert isinstance(ps.SaxsProperty.e, property)
def test_default_name(self):
saxs_prop = ps.SaxsProperty()
assert saxs_prop.name == 'saxs'
def test_from_crysol_int(self, saxs_benchmark):
saxs_prop = ps.SaxsProperty()
saxs_prop.from_crysol_int(saxs_benchmark['crysol_file'])
assert saxs_prop.qvalues[8] == 0.008
assert saxs_prop.profile[8] == 1740900.0
assert saxs_prop.errors[8] == 0.0
@pytest.mark.skipif(shutil.which('crysol') is None, reason='Needs crysol')
def test_from_crysol_pdb(self, saxs_benchmark):
saxs_prop = ps.SaxsProperty()
saxs_prop.from_crysol_pdb(saxs_benchmark['crysol_pdb'], args='')
saxs_prop_ref = ps.SaxsProperty()
saxs_prop_ref.from_crysol_int(saxs_benchmark['crysol_int'])
np.testing.assert_array_almost_equal(
saxs_prop.qvalues, saxs_prop_ref.qvalues)
np.testing.assert_array_almost_equal(
saxs_prop.profile, saxs_prop_ref.profile)
def test_to_and_from_ascii(self, saxs_benchmark):
saxs_prop_ref = ps.SaxsProperty()
saxs_prop_ref.from_crysol_int(saxs_benchmark['crysol_int'])
saxs_prop = ps.SaxsProperty()
with tempfile.NamedTemporaryFile() as f:
saxs_prop_ref.to_ascii(f.name)
saxs_prop.from_ascii(f.name)
np.testing.assert_array_almost_equal(
saxs_prop.qvalues, saxs_prop_ref.qvalues)
class TestPropagators(object):
def test_propagator_weighted_sum(self, benchmark):
tree = benchmark['tree']
ps.propagator_weighted_sum(benchmark['simple_property'], tree)
lfs = benchmark['nleafs']
assert tree.root['foo'].bar == int(lfs * (lfs-1) / 2)
def test_propagator_size_weighted_sum(self, sans_benchmark):
tree = sans_benchmark['tree_with_no_property']
values = sans_benchmark['property_list']
ps.propagator_size_weighted_sum(values, tree)
# Test the propagation of the profiles for a node randomly picked
node_id = np.random.randint(tree.nleafs, len(tree)) # exclude leafs
node = tree[node_id]
ln = node.left
rn = node.right
w = float(ln.count) / (ln.count + rn.count)
lnp = ln['sans'] # profile of the "left" sibling node
rnp = rn['sans']
y = w * lnp.y + (1 - w) * rnp.y
assert np.array_equal(y, node['sans'].y)
if __name__ == '__main__':
pytest.main()
``` |
{
"source": "jmborr/LDRDSANS",
"score": 2
} |
#### File: idpflex/test/test_helper.py
```python
from __future__ import print_function, absolute_import
import h5py
import numpy as np
import os
import pytest
import sys
from copy import deepcopy
from distutils.version import LooseVersion
from scipy.cluster.hierarchy import linkage
from idpflex import cnextend as cnx, properties as ps
# Resolve if pytest version is modern enough
if LooseVersion(pytest.__version__) < LooseVersion('2.10.0'):
pytest_yield_fixture = pytest.yield_fixture
else:
pytest_yield_fixture = pytest.fixture
# Resolve the path to the "external data"
this_module_path = sys.modules[__name__].__file__
data_dir = os.path.join(os.path.dirname(this_module_path), 'data')
@ps.decorate_as_node_property((('name', 'name of the property'),
('domain_bar', 'property domain'),
('bar', 'property_value'),
('error_bar', 'property error')))
class SimpleProperty(object):
"""
An integer property, only for testing purposes
"""
def __init__(self, value=0):
"""
:param value: integer value
"""
self.name = 'foo' # name of the simple property
self.domain_bar = 0.0
self.bar = int(value) # value of the property
self.error_bar = 0.0
@pytest.fixture(scope="session")
def benchmark():
Z = np.loadtxt(os.path.join(data_dir, 'linkage_matrix'))
return {'Z': Z,
'tree': cnx.Tree(Z),
'nnodes': 44757,
'nleafs': 22379,
'simple_property': [SimpleProperty(i) for i in range(22379)],
}
@pytest_yield_fixture(scope="session")
def saxs_benchmark():
r"""Crysol output for one structure
Yields
------
dict
'crysol_file': absolute path to file.
"""
crysol_file = os.path.join(data_dir, 'saxs', 'crysol.dat')
yield dict(crysol_file=crysol_file)
@pytest_yield_fixture(scope="session")
def sans_benchmark():
r"""Sassena output containing 1000 I(Q) profiles for the hiAPP centroids.
Yields
------
dict
'profiles' : HDF5 handle to the file containing the I(Q) profiles
'property_list' : list of SansProperty instances, one for each leaf
'tree_with_no_property' : cnextend.Tree with random distances among
leafs and without included properties.
"""
# setup or initialization
handle = h5py.File(os.path.join(data_dir, 'sans', 'profiles.h5'), 'r')
profiles = handle['fqt']
n_leafs = len(profiles)
# Create a node tree.
# m is a 1D compressed matrix of distances between leafs
m = np.random.random(int(n_leafs * (n_leafs - 1) / 2))
Z = linkage(m)
tree = cnx.Tree(Z)
# values is a list of SansProperty instances, one for each tree leaf
values = list()
for i in range(tree.nleafs):
sans_property = ps.SansProperty()
sans_property.from_sassena(handle, index=i)
values.append(sans_property)
#
yield {'profiles': handle,
'property_list': values,
'tree_with_no_property': tree
}
# teardown code after finishing the testing session
handle.close()
@pytest.fixture(scope="session")
def sans_fit(sans_benchmark):
r"""
Parameters
----------
sans_benchmark : pytest fixture
Returns
-------
dict
'tree': cnextend.Tree with random distances among leafs and endowed
with a property.
'experiment_property': SansProperty containing experimental profile
'property_name':
'depth': tree level giving the best fit to experiment
'coefficients': weight of each cluster at tree level 'depth' after
fitting.
"""
tree = deepcopy(sans_benchmark['tree_with_no_property'])
values = sans_benchmark['property_list']
name = values[0].name # property name
ps.propagator_size_weighted_sum(values, tree)
# create a SANS profile as a linear combination of the clusters at a
# particular depth
depth = 6
coeff = (0.45, 0.00, 0.00, 0.10, 0.25, 0.00, 0.20) # they must add to one
clusters = tree.clusters_at_depth(depth)
nclusters = 1 + depth # depth=0 corresponds to the root node (nclusters=1)
sans_property = clusters[0][name]
profile = coeff[0] * sans_property.profile # init with the first cluster
flat_background = 0
for i in range(1, nclusters):
sans_property = clusters[i][name]
profile += coeff[i] * sans_property.profile
flat_background += np.mean(sans_property.profile)
flat_background /= nclusters
profile += flat_background # add a flat background
experiment_property = ps.ProfileProperty(qvalues=sans_property.qvalues,
profile=profile,
errors=0.1*profile)
return {'tree': tree,
'property_name': name,
'depth': depth,
'coefficients': coeff,
'background': flat_background,
'experiment_property': experiment_property}
```
#### File: idpflex/test/test_properties.py
```python
from __future__ import print_function, absolute_import
import numpy as np
import pytest
from idpflex import properties as ps
from idpflex.test.test_helper import benchmark, sans_benchmark, saxs_benchmark
class TestRegisterDecorateProperties(object):
def test_register_as_node_property(self):
class SomeProperty(object):
def __init__(self):
attrs = dict(id='foo', a='ax', b='by', c='ce')
self.__dict__.update(attrs)
associations = (('id', 'name of the property'),
('a', 'this is x'),
('b', 'this is y'),
('c', 'this is e'))
ps.register_as_node_property(SomeProperty, associations)
# Test for class attributes
assert isinstance(ps.ProfileProperty.name, property)
assert isinstance(ps.ProfileProperty.x, property)
assert isinstance(ps.ProfileProperty.y, property)
assert isinstance(ps.ProfileProperty.e, property)
# Test for managed attributes
some_prop = SomeProperty()
assert some_prop.name == 'foo'
assert some_prop.x == 'ax'
assert some_prop.y == 'by'
assert some_prop.e == 'ce'
def test_decorate_as_node_property(self):
associations = (('id', 'name of the property'),
('a', 'this is x'),
('b', 'this is y'),
('c', 'this is e'))
@ps.decorate_as_node_property(associations)
class SomeProperty(object):
def __init__(self):
attrs = dict(id='foo', a='ax', b='by', c='ce')
self.__dict__.update(attrs)
# Test for class attributes
assert isinstance(ps.ProfileProperty.name, property)
assert isinstance(ps.ProfileProperty.x, property)
assert isinstance(ps.ProfileProperty.y, property)
assert isinstance(ps.ProfileProperty.e, property)
# Test for managed attributes
some_prop = SomeProperty()
assert some_prop.name == 'foo'
assert some_prop.x == 'ax'
assert some_prop.y == 'by'
assert some_prop.e == 'ce'
class TestProfileProperty(object):
def test_class_decorated_as_node_property(self):
assert isinstance(ps.ProfileProperty.name, property)
assert isinstance(ps.ProfileProperty.x, property)
assert isinstance(ps.ProfileProperty.y, property)
assert isinstance(ps.ProfileProperty.e, property)
def test_instance_decorated_as_node_property(self):
v = np.arange(9)
profile_prop = ps.ProfileProperty(name='foo', qvalues=v, profile=10*v,
errors=0.1*v)
assert profile_prop.name == 'foo'
assert np.array_equal(profile_prop.x, v)
assert np.array_equal(profile_prop.y, 10*v)
assert np.array_equal(profile_prop.e, 0.1*v)
class TestSansProperty(object):
def test_registered_as_node_property(self):
assert isinstance(ps.SansProperty.name, property)
assert isinstance(ps.SansProperty.x, property)
assert isinstance(ps.SansProperty.y, property)
assert isinstance(ps.SansProperty.e, property)
def test_default_name(self):
sans_prop = ps.SansProperty()
assert sans_prop.name == 'sans'
def test_from_sassena(self, sans_benchmark):
sans_prop = ps.SansProperty()
sans_prop.from_sassena(sans_benchmark['profiles'], index=666)
assert sans_prop.qvalues[13].item() - 0.0656565651298 < 0.000000001
assert sans_prop.profile[13].item() - 741970.84461578 < 0.000001
class TestSaxsProperty(object):
def test_registered_as_node_property(self):
assert isinstance(ps.SaxsProperty.name, property)
assert isinstance(ps.SaxsProperty.x, property)
assert isinstance(ps.SaxsProperty.y, property)
assert isinstance(ps.SaxsProperty.e, property)
def test_default_name(self):
saxs_prop = ps.SaxsProperty()
assert saxs_prop.name == 'saxs'
def test_from_crysol_int(self, saxs_benchmark):
saxs_prop = ps.SaxsProperty()
saxs_prop.from_crysol_int(saxs_benchmark['crysol_file'])
assert saxs_prop.qvalues[8] == 0.008
assert saxs_prop.profile[8] == 1740900.0
assert saxs_prop.errors[8] == 0.0
class TestPropagators(object):
def test_propagator_weighted_sum(self, benchmark):
tree = benchmark['tree']
ps.propagator_weighted_sum(benchmark['simple_property'], tree)
l = benchmark['nleafs']
assert tree.root['foo'].bar == int(l * (l-1) / 2)
def test_propagator_size_weighted_sum(self, sans_benchmark):
tree = sans_benchmark['tree_with_no_property']
values = sans_benchmark['property_list']
ps.propagator_size_weighted_sum(values, tree)
# Test the propagation of the profiles for a node randomly picked
node_id = np.random.randint(tree.nleafs, len(tree)) # exclude leafs
node = tree[node_id]
ln = node.left
rn = node.right
w = float(ln.count) / (ln.count + rn.count)
lnp = ln['sans'] # profile of the "left" sibling node
rnp = rn['sans']
y = w * lnp.y + (1 - w) * rnp.y
assert np.array_equal(y, node['sans'].y)
if __name__ == '__main__':
pytest.main()
```
#### File: hiAPP/centroids/iprofile.py
```python
import h5py
import gzip
import shutil
import tempfile
import numpy as np
import os
from collections import namedtuple
import matplotlib.pyplot as plt
qi = namedtuple('qi', 'q i')
def profile(gzipfile, log10=False):
"""
Return momentum transfer and intensities
:param gzipfile: gzipped sassena file for a single conformation
:param log10: return log10 of the intensities
:return: namedtuple('qi', 'q i') with 'q' for list of Q values
and 'i' for the scattering profile.
"""
col1 = np.s_[:,0] # first column
ft, ftname = tempfile.mkstemp(dir='/tmp')
ft2 = open(ftname, 'wb')
gt = gzip.open(gzipfile)
shutil.copyfileobj(gt, ft2)
gt.close()
ft2.close()
with h5py.File(ftname) as f:
i = f['fq'][col1]
q = f['qvectors'][col1]
reorder = np.argsort(q)
q = q[reorder]
i = i[reorder]
if log10:
i = np.log10(i)
f.close()
os.close(ft)
os.remove(ftname)
return qi(q, i)
def similarity(profile1, profile2, weights = None):
"""
A measure of similarity between two I(Q) profiles
1/len(profile1) * Sum ((profile1-profile2)/(profile1+profile2))**2)
:param profile1: first profile
:param profile2: second profile
:return: similarity measure
:except: profiles of different length
"""
if weights is None:
weights = np.ones(len(profile1))
if len(profile1) != len(profile2):
raise IndexError("profiles have different length")
return np.sum(weights*((profile1-profile2)/(profile1+profile2))**2)/len(profile1)
if __name__ == '__main__':
"""
For debugging, calculate average intensities
"""
nframe = 4628
p = list()
for iframe in range(1, 1+nframe):
p.append(profile('frame{}.h5.gz'.format(iframe)).i)
if iframe%100 == 0:
print(iframe)
p = np.array(p)
y = np.mean(p, axis=0)
e = np.std(p, axis=0)
x = profile('frame1.h5.gz').q
plt.errorbar(x, y, yerr=e)
plt.show()
``` |
{
"source": "jmborr/nsc",
"score": 3
} |
#### File: nsc/nscsim/utilities.py
```python
from __future__ import (absolute_import, division, print_function)
import logging
import functools
import numpy as np
from collections import namedtuple, Mapping
import pathos
import multiprocessing
import ctypes
glog = logging.getLogger('nscsim')
glog.addHandler(logging.StreamHandler())
glog.setLevel(logging.INFO)
def namedtuplefy(func):
r"""
Decorator to transform the return dictionary of a function into
a namedtuple
Parameters
----------
func: Function
Function to be decorated
name: str
Class name for the namedtuple. If None, the name of the function
will be used
Returns
-------
Function
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
res = func(*args, **kwargs)
if wrapper.nt is None:
if isinstance(res, Mapping) is False:
raise ValueError('Cannot namedtuplefy a non-dict')
wrapper.nt = namedtuple(func.__name__ + '_nt', res.keys())
return wrapper.nt(**res)
wrapper.nt = None
return wrapper
def shared_array(from_array=None, shape=None, c_type='double'):
r"""
Read-only array shared by all CPU's
Parameters
----------
from_array: numpy.ndarray
Create a shared array by copying a non-shared array. will overwrite
any values passed through arguments `shape` and `c_type`
shape: list
desired shape of the array
c_type: str
One of 'double', 'longdouble, 'float',... See
https://docs.python.org/3/library/ctypes.html
Returns
-------
shared numpy.ndarray
"""
dtype_to_ctype = {'float64': 'double', 'float32': 'float'}
if from_array is not None:
shape = from_array.shape
dt = str(from_array.dtype)
c_type = dtype_to_ctype.get(dt, dt)
ctypes_class = getattr(ctypes, 'c_' + c_type)
_array_base = multiprocessing.Array(ctypes_class,
int(np.prod(shape)),
lock=False)
_array = np.ctypeslib.as_array(_array_base)
_array = _array.reshape(shape)
if from_array is not None:
_array[:] = from_array
return _array
def map_parallel(worker, iterator, ncpus, close_pool=True):
pool = pathos.pools.ProcessPool(ncpus=ncpus)
try:
work = pool.map(worker, iterator)
finally:
if close_pool is True:
pool.terminate()
return work
``` |
{
"source": "jmborr/QeF",
"score": 2
} |
#### File: qef/models/deltadirac.py
```python
from __future__ import (absolute_import, division, print_function)
from distutils.version import LooseVersion as version
import numpy as np
import lmfit
from lmfit.models import (Model, index_of)
def delta_dirac(x, amplitude=1.0, center=0.0):
r"""function is zero except for the x-value closest to center.
At value-closest-to-center, the function evaluates to the amplitude
divided by the x-spacing.
Parameters
----------
x :class:`~numpy:numpy.ndarray`
domain of the function, energy
amplitude : float
Integrated intensity of the curve
center : float
position of the peak
"""
dx = (x[-1] - x[0]) / (len(x) - 1) # domain spacing
y = np.zeros(len(x))
y[np.abs(x - center).argmin()] = amplitude / dx
return y
class DeltaDiracModel(Model):
r"""A function that is zero everywhere except for the x-value closest
to the center parameter.
At value-closest-to-center, the model evaluates to the amplitude
parameter divided by the x-spacing. This last division is
necessary to preserve normalization with integrating the function
over the X-axis
Fitting parameters:
- integrated intensity ``amplitude`` :math:`A`
- position of the peak ``center`` :math:`E_0`
"""
def __init__(self, independent_vars=['x'], prefix='', missing=None,
name=None, **kwargs):
kwargs.update({'prefix': prefix, 'missing': missing,
'independent_vars': independent_vars})
super(DeltaDiracModel, self).__init__(delta_dirac, **kwargs)
if version(lmfit.__version__) > version('0.9.5'):
__init__.__doc__ = lmfit.models.COMMON_INIT_DOC
def guess(self, y, x=None, **kwargs):
r"""Guess starting values for the parameters of a model.
Parameters
----------
y : :class:`~numpy:numpy.ndarray`
Intensities
x : :class:`~numpy:numpy.ndarray`
energy values
kwargs : dict
additional optional arguments, passed to model function.
Returns
-------
:class:`~lmfit.parameter.Parameters`
parameters with guessed values
"""
amplitude = max(y)
center = 0.0
if x is not None:
center = x[index_of(y, max(y))]
dx = (x[-1] - x[0]) / (len(x) - 1) # x-spacing
amplitude /= dx
return self.make_params(amplitude=amplitude, center=center)
```
#### File: tests/integration/test_water.py
```python
from __future__ import (absolute_import, division, print_function)
import pytest
import os
import numpy as np
from numpy.testing import assert_almost_equal
from lmfit.models import LinearModel, LorentzianModel, ConstantModel
import lmfit
from lmfit.model import Model
from qef.constants import hbar # units of meV x ps or ueV x ns
from qef.io.loaders import load_nexus
from qef.models.deltadirac import DeltaDiracModel
from qef.models.resolution import TabulatedResolutionModel
from qef.operators.convolve import Convolve
def test_water(io_fix):
# Load data
res = load_nexus(io_fix['irs_res_f'])
dat = load_nexus(io_fix['irs_red_f'])
q_vals = io_fix['q_values']
# Define the fitting range
e_min = -0.4
e_max = 0.4
# Find indexes of dat['x'] with values in (e_min, e_max)
mask = np.intersect1d(np.where(dat['x'] > e_min),
np.where(dat['x'] < e_max))
# Drop data outside the fitting range
fr = dict() # fitting range. Use in place of 'dat'
fr['x'] = dat['x'][mask]
fr['y'] = np.asarray([y[mask] for y in dat['y']])
fr['e'] = np.asarray([e[mask] for e in dat['e']])
# Create the model
def generate_model_and_params(spectrum_index=None):
r"""Produce an LMFIT model and related set of fitting parameters"""
sp = '' if spectrum_index is None else '{}_'.format(
spectrum_index) # prefix if spectrum_index passed
# Model components
intensity = ConstantModel(prefix='I_' + sp) # I_amplitude
elastic = DeltaDiracModel(prefix='e_' + sp) # e_amplitude, e_center
# l_amplitude, l_center, l_sigma (also l_fwhm, l_height)
inelastic = LorentzianModel(prefix='l_' + sp)
# r_amplitude, r_center (both fixed)
resolution = TabulatedResolutionModel(res['x'], res['y'],
prefix='r_' + sp)
background = LinearModel(prefix='b_' + sp) # b_slope, b_intercept
# Putting it all together
model = intensity * Convolve(resolution,
elastic + inelastic) + background
parameters = model.make_params() # model params are a separate entity
# Ties and constraints
parameters['e_' + sp + 'amplitude'].set(min=0.0, max=1.0)
parameters['l_' + sp + 'center'].set(
expr='e_' + sp + 'center') # centers tied
parameters['l_' + sp + 'amplitude'].set(
expr='1 - e_' + sp + 'amplitude')
# Some initial sensible values
init_vals = {'I_' + sp + 'c': 1.0, 'e_' + sp + 'amplitude': 0.5,
'l_' + sp + 'sigma': 0.01,
'b_' + sp + 'slope': 0, 'b_' + sp + 'intercept': 0}
for p, v in init_vals.items():
parameters[p].set(value=v)
return model, parameters
# Call the function
model, params = generate_model_and_params()
# Initial guess for first spectrum. Only set free parameters
for name, value in dict(I_c=4.0, e_center=0, e_amplitude=0.1,
l_sigma=0.03, b_slope=0, b_intercept=0).items():
params[name].set(value=value)
# Carry out the fit
fit = model.fit(fr['y'][0], x=fr['x'], params=params,
weights=1.0 / fr['e'][0])
assert_almost_equal(fit.redchi, 1.72, decimal=2)
# Carry out sequential fit
n_spectra = len(fr['y'])
fits = [None, ] * n_spectra # store fits for all the tried spectra
fits[0] = fit # store previous fit
for i in range(1, n_spectra):
y_exp = fr['y'][i]
e_exp = fr['e'][i]
fit = model.fit(y_exp, x=fr['x'], params=params, weights=1.0 / e_exp)
fits[i] = fit # store fit results
assert_almost_equal([f.redchi for f in fits],
[1.72, 1.15, 0.81, 0.73, 0.73, 0.75, 0.81, 0.86, 0.75,
0.91],
decimal=2)
# Fit HWHM(Q^2) with Teixeira model
hwhms = 0.5 * np.asarray([fit.params['l_fwhm'].value for fit in fits])
def teixeira(q2s, difcoef, tau):
dq2 = difcoef * q2s
return hbar * dq2 / (1 + dq2 * tau)
teixeira_model = Model(teixeira) # create LMFIT Model instance
teixeira_model.set_param_hint('difcoef', min=0)
teixeira_model.set_param_hint('tau', min=0)
# Carry out the fit from an initial guess
teixeira_params = teixeira_model.make_params(difcoef=1.0, tau=1.0)
teixeira_fit = teixeira_model.fit(hwhms, q2s=np.square(q_vals),
params=teixeira_params)
assert_almost_equal([teixeira_fit.best_values['difcoef'],
teixeira_fit.best_values['tau']],
[0.16, 1.11], decimal=2)
# Model for Simultaneous Fit of All Spectra with Teixeira Water Model
#
# create one model for each spectrum, but collect all parameters under
# a single instance of the Parameters class.
l_model = list()
g_params = lmfit.Parameters()
for i in range(n_spectra):
# model and parameters for one of the spectra
m, ps = generate_model_and_params(spectrum_index=i)
l_model.append(m)
[g_params.add(p) for p in ps.values()]
# Initialize parameter set with optimized parameters from sequential fit
for i in range(n_spectra):
optimized_params = fits[i].params # these are I_c, e_amplitude,...
for name in optimized_params:
# for instance, 'e_amplitude' splitted into 'e', and 'amplitude'
prefix, base = name.split('_')
# i_name is 'e_3_amplitude' for i=3
i_name = prefix + '_{}_'.format(i) + base
g_params[i_name].set(value=optimized_params[name].value)
# Introduce global parameters difcoef and tau.
# Use previous optimized values as initial guess
o_p = teixeira_fit.params
g_params.add('difcoef', value=o_p['difcoef'].value, min=0)
g_params.add('tau', value=o_p['tau'].value, min=0)
# Tie each lorentzian l_i_sigma to the teixeira expression
for i in range(n_spectra):
q2 = q_vals[i] * q_vals[i]
fmt = '{hbar}*difcoef*{q2}/(1+difcoef*{q2}*tau)'
teixeira_expression = fmt.format(hbar=hbar, q2=q2)
g_params['l_{}_sigma'.format(i)].set(expr=teixeira_expression)
# Carry out the Simultaneous Fit
def residuals(params):
l_residuals = list()
for i in range(n_spectra):
x = fr['x'] # fitting range of energies
y = fr['y'][i] # associated experimental intensities
e = fr['e'][i] # associated experimental errors
model_evaluation = l_model[i].eval(x=x, params=params)
l_residuals.append((model_evaluation - y) / e)
return np.concatenate(l_residuals)
# Minimizer object using the parameter set for all models and the
# function to calculate all the residuals.
minimizer = lmfit.Minimizer(residuals, g_params)
g_fit = minimizer.minimize()
assert_almost_equal(g_fit.redchi, 0.93, decimal=2)
if __name__ == '__main__':
pytest.main([os.path.abspath(__file__)])
```
#### File: tests/models/test_deltadirac.py
```python
import os
import pytest
import numpy as np
from numpy.testing import assert_allclose
from lmfit.models import LorentzianModel
from qef.models.deltadirac import DeltaDiracModel
from qef.operators.convolve import Convolve
def test_guess():
x = np.linspace(0, np.pi, 100) # Energies in meV
dx = (x[-1] - x[0]) / (len(x) - 1) # x-spacing
amplitude = 42.0
offset = np.pi/6.0
y = amplitude * np.sin(x + offset)
p = DeltaDiracModel().guess(y, x=x)
assert_allclose([amplitude / dx, np.pi/2 - offset],
[p['amplitude'], p['center']],
rtol=1e-3, atol=1e-5)
def test_convolution():
r"""Convolution of function with delta dirac should return the function"""
# Reference Lorentzian parameter values
amplitude = 42.0
sigma = 0.042
center = 0.0003
c1 = LorentzianModel(prefix='c1_')
p = c1.make_params(amplitude=amplitude, center=center, sigma=sigma)
c2 = DeltaDiracModel(prefix='c2_')
p.update(c2.make_params(amplitude=1.0, center=0.0))
e = 0.0004 * np.arange(-250, 1500) # energies in meV
# convolve Lorentzian with delta Dirac
y1 = Convolve(c1, c2).eval(params=p, x=e) # should be the lorentzian
# reverse order, convolve delta Dirac with Lorentzian
y2 = Convolve(c2, c1).eval(params=p, x=e) # should be the lorentzian
# We will fit a Lorentzian model against datasets y1 and y2
m = LorentzianModel()
all_params = 'amplitude sigma center'.split()
for y in (y1, y2):
params = m.guess(y, x=e)
# Set initial model Lorentzian parameters far from optimal solution
params['amplitude'].set(value=amplitude * 10)
params['sigma'].set(value=sigma * 4)
params['center'].set(value=center * 7)
# fit Lorentzian model against dataset y
r = m.fit(y, params, x=e)
# Compare the reference Lorentzian parameters against
# parameters of the fitted model
assert_allclose([amplitude, sigma, center],
[r.params[p].value for p in all_params],
rtol=0.01, atol=0.00001)
if __name__ == '__main__':
pytest.main([os.path.abspath(__file__)])
```
#### File: tests/models/test_teixeira.py
```python
from __future__ import (absolute_import, division, print_function)
import os
from numpy.testing import assert_almost_equal
import pytest
from qef.models.teixeira import TeixeiraWaterModel
from qef.constants import hbar
def test_init():
tx = TeixeiraWaterModel(prefix='tx_', q=0.3)
assert 'tx_dcf*0.09/(1+tx_tau*tx_dcf*0.09)'\
in tx.param_hints['fwhm']['expr']
def test_guess(ltz):
tx = TeixeiraWaterModel(q=0.3)
p = tx.guess(ltz['y'], x=ltz['x'])
assert_almost_equal(p['fwhm'], 2 * ltz['p']['sigma'], decimal=2)
assert_almost_equal(hbar / p['tau'], 2 * ltz['p']['sigma'], decimal=2)
def test_fit(ltz):
tx = TeixeiraWaterModel(q=0.3)
p = tx.guess(ltz['y'], x=ltz['x'])
p['tau'].value *= 2.0 # get away fro
fr = tx.fit(ltz['y'], p, x=ltz['x'])
assert_almost_equal(fr.params['fwhm'], 2 * ltz['p']['sigma'], decimal=6)
if __name__ == '__main__':
pytest.main([os.path.abspath(__file__)])
``` |
{
"source": "jmborr/sasmodels",
"score": 2
} |
#### File: sasmodels/explore/precision.py
```python
r"""
Show numerical precision of $2 J_1(x)/x$.
"""
from __future__ import division, print_function
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import numpy as np
from numpy import pi, inf
import scipy.special
try:
from mpmath import mp
except ImportError:
# CRUFT: mpmath split out into its own package
from sympy.mpmath import mp
#import matplotlib; matplotlib.use('TkAgg')
import pylab
from sasmodels import core, data, direct_model, modelinfo
class Comparator(object):
def __init__(self, name, mp_function, np_function, ocl_function, xaxis, limits):
self.name = name
self.mp_function = mp_function
self.np_function = np_function
self.ocl_function = ocl_function
self.xaxis = xaxis
self.limits = limits
def __repr__(self):
return "Comparator(%s)"%self.name
def call_mpmath(self, vec, bits=500):
"""
Direct calculation using mpmath extended precision library.
"""
with mp.workprec(bits):
return [self.mp_function(mp.mpf(x)) for x in vec]
def call_numpy(self, x, dtype):
"""
Direct calculation using numpy/scipy.
"""
x = np.asarray(x, dtype)
return self.np_function(x)
def call_ocl(self, x, dtype, platform='ocl'):
"""
Calculation using sasmodels ocl libraries.
"""
x = np.asarray(x, dtype)
model = core.build_model(self.ocl_function, dtype=dtype)
calculator = direct_model.DirectModel(data.empty_data1D(x), model)
return calculator(background=0)
def run(self, xrange="log", diff="relative"):
r"""
Compare accuracy of different methods for computing f.
*xrange* is::
log: [10^-3,10^5]
logq: [10^-4, 10^1]
linear: [1,1000]
zoom: [1000,1010]
neg: [-100,100]
*diff* is "relative", "absolute" or "none"
*x_bits* is the precision with which the x values are specified. The
default 23 should reproduce the equivalent of a single precisio
"""
linear = not xrange.startswith("log")
if xrange == "zoom":
lin_min, lin_max, lin_steps = 1000, 1010, 2000
elif xrange == "neg":
lin_min, lin_max, lin_steps = -100.1, 100.1, 2000
elif xrange == "linear":
lin_min, lin_max, lin_steps = 1, 1000, 2000
elif xrange == "log":
log_min, log_max, log_steps = -3, 5, 400
elif xrange == "logq":
log_min, log_max, log_steps = -4, 1, 400
else:
raise ValueError("unknown range "+xrange)
with mp.workprec(500):
# Note: we make sure that we are comparing apples to apples...
# The x points are set using single precision so that we are
# examining the accuracy of the transformation from x to f(x)
# rather than x to f(nearest(x)) where nearest(x) is the nearest
# value to x in the given precision.
if linear:
lin_min = max(lin_min, self.limits[0])
lin_max = min(lin_max, self.limits[1])
qrf = np.linspace(lin_min, lin_max, lin_steps, dtype='single')
#qrf = np.linspace(lin_min, lin_max, lin_steps, dtype='double')
qr = [mp.mpf(float(v)) for v in qrf]
#qr = mp.linspace(lin_min, lin_max, lin_steps)
else:
log_min = np.log10(max(10**log_min, self.limits[0]))
log_max = np.log10(min(10**log_max, self.limits[1]))
qrf = np.logspace(log_min, log_max, log_steps, dtype='single')
#qrf = np.logspace(log_min, log_max, log_steps, dtype='double')
qr = [mp.mpf(float(v)) for v in qrf]
#qr = [10**v for v in mp.linspace(log_min, log_max, log_steps)]
target = self.call_mpmath(qr, bits=500)
pylab.subplot(121)
self.compare(qr, 'single', target, linear, diff)
pylab.legend(loc='best')
pylab.subplot(122)
self.compare(qr, 'double', target, linear, diff)
pylab.legend(loc='best')
pylab.suptitle(self.name + " compared to 500-bit mpmath")
def compare(self, x, precision, target, linear=False, diff="relative"):
r"""
Compare the different computation methods using the given precision.
"""
if precision == 'single':
#n=11; plotdiff(x, target, self.call_mpmath(x, n), 'mp %d bits'%n, diff=diff)
#n=23; plotdiff(x, target, self.call_mpmath(x, n), 'mp %d bits'%n, diff=diff)
pass
elif precision == 'double':
#n=53; plotdiff(x, target, self.call_mpmath(x, n), 'mp %d bits'%n, diff=diff)
#n=83; plotdiff(x, target, self.call_mpmath(x, n), 'mp %d bits'%n, diff=diff)
pass
plotdiff(x, target, self.call_numpy(x, precision), 'numpy '+precision, diff=diff)
plotdiff(x, target, self.call_ocl(x, precision, 0), 'OpenCL '+precision, diff=diff)
pylab.xlabel(self.xaxis)
if diff == "relative":
pylab.ylabel("relative error")
elif diff == "absolute":
pylab.ylabel("absolute error")
else:
pylab.ylabel(self.name)
pylab.semilogx(x, target, '-', label="true value")
if linear:
pylab.xscale('linear')
def plotdiff(x, target, actual, label, diff):
"""
Plot the computed value.
Use relative error if SHOW_DIFF, otherwise just plot the value directly.
"""
if diff == "relative":
err = np.array([abs((t-a)/t) for t, a in zip(target, actual)], 'd')
#err = np.clip(err, 0, 1)
pylab.loglog(x, err, '-', label=label)
elif diff == "absolute":
err = np.array([abs((t-a)) for t, a in zip(target, actual)], 'd')
pylab.loglog(x, err, '-', label=label)
else:
limits = np.min(target), np.max(target)
pylab.semilogx(x, np.clip(actual, *limits), '-', label=label)
def make_ocl(function, name, source=[]):
class Kernel(object):
pass
Kernel.__file__ = name+".py"
Kernel.name = name
Kernel.parameters = []
Kernel.source = source
Kernel.Iq = function
model_info = modelinfo.make_model_info(Kernel)
return model_info
# =============== FUNCTION DEFINITIONS ================
FUNCTIONS = {}
def add_function(name, mp_function, np_function, ocl_function,
shortname=None, xaxis="x", limits=(-inf, inf)):
if shortname is None:
shortname = name.replace('(x)', '').replace(' ', '')
FUNCTIONS[shortname] = Comparator(name, mp_function, np_function, ocl_function, xaxis, limits)
add_function(
name="J0(x)",
mp_function=mp.j0,
np_function=scipy.special.j0,
ocl_function=make_ocl("return sas_J0(q);", "sas_J0", ["lib/polevl.c", "lib/sas_J0.c"]),
)
add_function(
name="J1(x)",
mp_function=mp.j1,
np_function=scipy.special.j1,
ocl_function=make_ocl("return sas_J1(q);", "sas_J1", ["lib/polevl.c", "lib/sas_J1.c"]),
)
add_function(
name="JN(-3, x)",
mp_function=lambda x: mp.besselj(-3, x),
np_function=lambda x: scipy.special.jn(-3, x),
ocl_function=make_ocl("return sas_JN(-3, q);", "sas_JN",
["lib/polevl.c", "lib/sas_J0.c", "lib/sas_J1.c", "lib/sas_JN.c"]),
shortname="J-3",
)
add_function(
name="JN(3, x)",
mp_function=lambda x: mp.besselj(3, x),
np_function=lambda x: scipy.special.jn(3, x),
ocl_function=make_ocl("return sas_JN(3, q);", "sas_JN",
["lib/polevl.c", "lib/sas_J0.c", "lib/sas_J1.c", "lib/sas_JN.c"]),
shortname="J3",
)
add_function(
name="JN(2, x)",
mp_function=lambda x: mp.besselj(2, x),
np_function=lambda x: scipy.special.jn(2, x),
ocl_function=make_ocl("return sas_JN(2, q);", "sas_JN",
["lib/polevl.c", "lib/sas_J0.c", "lib/sas_J1.c", "lib/sas_JN.c"]),
shortname="J2",
)
add_function(
name="2 J1(x)/x",
mp_function=lambda x: 2*mp.j1(x)/x,
np_function=lambda x: 2*scipy.special.j1(x)/x,
ocl_function=make_ocl("return sas_2J1x_x(q);", "sas_2J1x_x", ["lib/polevl.c", "lib/sas_J1.c"]),
)
add_function(
name="J1(x)",
mp_function=mp.j1,
np_function=scipy.special.j1,
ocl_function=make_ocl("return sas_J1(q);", "sas_J1", ["lib/polevl.c", "lib/sas_J1.c"]),
)
add_function(
name="Si(x)",
mp_function=mp.si,
np_function=lambda x: scipy.special.sici(x)[0],
ocl_function=make_ocl("return sas_Si(q);", "sas_Si", ["lib/sas_Si.c"]),
)
#import fnlib
#add_function(
# name="fnlibJ1",
# mp_function=mp.j1,
# np_function=fnlib.J1,
# ocl_function=make_ocl("return sas_J1(q);", "sas_J1", ["lib/polevl.c", "lib/sas_J1.c"]),
#)
add_function(
name="sin(x)",
mp_function=mp.sin,
np_function=np.sin,
#ocl_function=make_ocl("double sn, cn; SINCOS(q,sn,cn); return sn;", "sas_sin"),
ocl_function=make_ocl("return sin(q);", "sas_sin"),
)
add_function(
name="sin(x)/x",
mp_function=lambda x: mp.sin(x)/x if x != 0 else 1,
## scipy sinc function is inaccurate and has an implied pi*x term
#np_function=lambda x: scipy.special.sinc(x/pi),
## numpy sin(x)/x needs to check for x=0
np_function=lambda x: np.sin(x)/x,
ocl_function=make_ocl("return sas_sinx_x(q);", "sas_sinc"),
)
add_function(
name="cos(x)",
mp_function=mp.cos,
np_function=np.cos,
#ocl_function=make_ocl("double sn, cn; SINCOS(q,sn,cn); return cn;", "sas_cos"),
ocl_function=make_ocl("return cos(q);", "sas_cos"),
)
add_function(
name="gamma(x)",
mp_function=mp.gamma,
np_function=scipy.special.gamma,
ocl_function=make_ocl("return sas_gamma(q);", "sas_gamma", ["lib/sas_gamma.c"]),
limits=(-3.1, 10),
)
add_function(
name="erf(x)",
mp_function=mp.erf,
np_function=scipy.special.erf,
ocl_function=make_ocl("return sas_erf(q);", "sas_erf", ["lib/polevl.c", "lib/sas_erf.c"]),
limits=(-5., 5.),
)
add_function(
name="erfc(x)",
mp_function=mp.erfc,
np_function=scipy.special.erfc,
ocl_function=make_ocl("return sas_erfc(q);", "sas_erfc", ["lib/polevl.c", "lib/sas_erf.c"]),
limits=(-5., 5.),
)
add_function(
name="arctan(x)",
mp_function=mp.atan,
np_function=np.arctan,
ocl_function=make_ocl("return atan(q);", "sas_arctan"),
)
add_function(
name="3 j1(x)/x",
mp_function=lambda x: 3*(mp.sin(x)/x - mp.cos(x))/(x*x),
# Note: no taylor expansion near 0
np_function=lambda x: 3*(np.sin(x)/x - np.cos(x))/(x*x),
ocl_function=make_ocl("return sas_3j1x_x(q);", "sas_j1c", ["lib/sas_3j1x_x.c"]),
)
add_function(
name="(1-cos(x))/x^2",
mp_function=lambda x: (1 - mp.cos(x))/(x*x),
np_function=lambda x: (1 - np.cos(x))/(x*x),
ocl_function=make_ocl("return (1-cos(q))/q/q;", "sas_1mcosx_x2"),
)
add_function(
name="(1-sin(x)/x)/x",
mp_function=lambda x: 1/x - mp.sin(x)/(x*x),
np_function=lambda x: 1/x - np.sin(x)/(x*x),
ocl_function=make_ocl("return (1-sas_sinx_x(q))/q;", "sas_1msinx_x_x"),
)
add_function(
name="(1/2+(1-cos(x))/x^2-sin(x)/x)/x",
mp_function=lambda x: (0.5 - mp.sin(x)/x + (1-mp.cos(x))/(x*x))/x,
np_function=lambda x: (0.5 - np.sin(x)/x + (1-np.cos(x))/(x*x))/x,
ocl_function=make_ocl("return (0.5-sin(q)/q + (1-cos(q))/q/q)/q;", "sas_T2"),
)
add_function(
name="fmod_2pi",
mp_function=lambda x: mp.fmod(x, 2*mp.pi),
np_function=lambda x: np.fmod(x, 2*np.pi),
ocl_function=make_ocl("return fmod(q, 2*M_PI);", "sas_fmod"),
)
RADIUS=3000
LENGTH=30
THETA=45
def mp_cyl(x):
f = mp.mpf
theta = f(THETA)*mp.pi/f(180)
qr = x * f(RADIUS)*mp.sin(theta)
qh = x * f(LENGTH)/f(2)*mp.cos(theta)
be = f(2)*mp.j1(qr)/qr
si = mp.sin(qh)/qh
background = f(0)
#background = f(1)/f(1000)
volume = mp.pi*f(RADIUS)**f(2)*f(LENGTH)
contrast = f(5)
units = f(1)/f(10000)
#return be
#return si
return units*(volume*contrast*be*si)**f(2)/volume + background
def np_cyl(x):
f = np.float64 if x.dtype == np.float64 else np.float32
theta = f(THETA)*f(np.pi)/f(180)
qr = x * f(RADIUS)*np.sin(theta)
qh = x * f(LENGTH)/f(2)*np.cos(theta)
be = f(2)*scipy.special.j1(qr)/qr
si = np.sin(qh)/qh
background = f(0)
#background = f(1)/f(1000)
volume = f(np.pi)*f(RADIUS)**2*f(LENGTH)
contrast = f(5)
units = f(1)/f(10000)
#return be
#return si
return units*(volume*contrast*be*si)**f(2)/volume + background
ocl_cyl = """\
double THETA = %(THETA).15e*M_PI_180;
double qr = q*%(RADIUS).15e*sin(THETA);
double qh = q*0.5*%(LENGTH).15e*cos(THETA);
double be = sas_2J1x_x(qr);
double si = sas_sinx_x(qh);
double background = 0;
//double background = 0.001;
double volume = M_PI*square(%(RADIUS).15e)*%(LENGTH).15e;
double contrast = 5.0;
double units = 1e-4;
//return be;
//return si;
return units*square(volume*contrast*be*si)/volume + background;
"""%{"LENGTH":LENGTH, "RADIUS": RADIUS, "THETA": THETA}
add_function(
name="cylinder(r=%g, l=%g, theta=%g)"%(RADIUS, LENGTH, THETA),
mp_function=mp_cyl,
np_function=np_cyl,
ocl_function=make_ocl(ocl_cyl, "ocl_cyl", ["lib/polevl.c", "lib/sas_J1.c"]),
shortname="cylinder",
xaxis="$q/A^{-1}$",
)
lanczos_gamma = """\
const double coeff[] = {
76.18009172947146, -86.50532032941677,
24.01409824083091, -1.231739572450155,
0.1208650973866179e-2,-0.5395239384953e-5
};
const double x = q;
double tmp = x + 5.5;
tmp -= (x + 0.5)*log(tmp);
double ser = 1.000000000190015;
for (int k=0; k < 6; k++) ser += coeff[k]/(x + k+1);
return -tmp + log(2.5066282746310005*ser/x);
"""
add_function(
name="log gamma(x)",
mp_function=mp.loggamma,
np_function=scipy.special.gammaln,
ocl_function=make_ocl(lanczos_gamma, "lgamma"),
)
# Alternate versions of 3 j1(x)/x, for posterity
def taylor_3j1x_x(x):
"""
Calculation using taylor series.
"""
# Generate coefficients using the precision of the target value.
n = 5
cinv = [3991680, -45360, 840, -30, 3]
three = x.dtype.type(3)
p = three/np.array(cinv, x.dtype)
return np.polyval(p[-n:], x*x)
add_function(
name="3 j1(x)/x: taylor",
mp_function=lambda x: 3*(mp.sin(x)/x - mp.cos(x))/(x*x),
np_function=taylor_3j1x_x,
ocl_function=make_ocl("return sas_3j1x_x(q);", "sas_j1c", ["lib/sas_3j1x_x.c"]),
)
def trig_3j1x_x(x):
r"""
Direct calculation using linear combination of sin/cos.
Use the following trig identity:
.. math::
a \sin(x) + b \cos(x) = c \sin(x + \phi)
where $c = \surd(a^2+b^2)$ and $\phi = \tan^{-1}(b/a) to calculate the
numerator $\sin(x) - x\cos(x)$.
"""
one = x.dtype.type(1)
three = x.dtype.type(3)
c = np.sqrt(one + x*x)
phi = np.arctan2(-x, one)
return three*(c*np.sin(x+phi))/(x*x*x)
add_function(
name="3 j1(x)/x: trig",
mp_function=lambda x: 3*(mp.sin(x)/x - mp.cos(x))/(x*x),
np_function=trig_3j1x_x,
ocl_function=make_ocl("return sas_3j1x_x(q);", "sas_j1c", ["lib/sas_3j1x_x.c"]),
)
def np_2J1x_x(x):
"""
numpy implementation of 2J1(x)/x using single precision algorithm
"""
# pylint: disable=bad-continuation
f = x.dtype.type
ax = abs(x)
if ax < f(8.0):
y = x*x
ans1 = f(2)*(f(72362614232.0)
+ y*(f(-7895059235.0)
+ y*(f(242396853.1)
+ y*(f(-2972611.439)
+ y*(f(15704.48260)
+ y*(f(-30.16036606)))))))
ans2 = (f(144725228442.0)
+ y*(f(2300535178.0)
+ y*(f(18583304.74)
+ y*(f(99447.43394)
+ y*(f(376.9991397)
+ y)))))
return ans1/ans2
else:
y = f(64.0)/(ax*ax)
xx = ax - f(2.356194491)
ans1 = (f(1.0)
+ y*(f(0.183105e-2)
+ y*(f(-0.3516396496e-4)
+ y*(f(0.2457520174e-5)
+ y*f(-0.240337019e-6)))))
ans2 = (f(0.04687499995)
+ y*(f(-0.2002690873e-3)
+ y*(f(0.8449199096e-5)
+ y*(f(-0.88228987e-6)
+ y*f(0.105787412e-6)))))
sn, cn = np.sin(xx), np.cos(xx)
ans = np.sqrt(f(0.636619772)/ax) * (cn*ans1 - (f(8.0)/ax)*sn*ans2) * f(2)/x
return -ans if (x < f(0.0)) else ans
add_function(
name="2 J1(x)/x:alt",
mp_function=lambda x: 2*mp.j1(x)/x,
np_function=lambda x: np.asarray([np_2J1x_x(v) for v in x], x.dtype),
ocl_function=make_ocl("return sas_2J1x_x(q);", "sas_2J1x_x", ["lib/polevl.c", "lib/sas_J1.c"]),
)
ALL_FUNCTIONS = set(FUNCTIONS.keys())
ALL_FUNCTIONS.discard("loggamma") # OCL version not ready yet
ALL_FUNCTIONS.discard("3j1/x:taylor")
ALL_FUNCTIONS.discard("3j1/x:trig")
ALL_FUNCTIONS.discard("2J1/x:alt")
# =============== MAIN PROGRAM ================
def usage():
names = ", ".join(sorted(ALL_FUNCTIONS))
print("""\
usage: precision.py [-f/a/r] [-x<range>] name...
where
-f indicates that the function value should be plotted,
-a indicates that the absolute error should be plotted,
-r indicates that the relative error should be plotted (default),
-x<range> indicates the steps in x, where <range> is one of the following
log indicates log stepping in [10^-3, 10^5] (default)
logq indicates log stepping in [10^-4, 10^1]
linear indicates linear stepping in [1, 1000]
zoom indicates linear stepping in [1000, 1010]
neg indicates linear stepping in [-100.1, 100.1]
and name is "all [first]" or one of:
"""+names)
sys.exit(1)
def main():
import sys
diff = "relative"
xrange = "log"
options = [v for v in sys.argv[1:] if v.startswith('-')]
for opt in options:
if opt == '-f':
diff = "none"
elif opt == '-r':
diff = "relative"
elif opt == '-a':
diff = "absolute"
elif opt.startswith('-x'):
xrange = opt[2:]
else:
usage()
names = [v for v in sys.argv[1:] if not v.startswith('-')]
if not names:
usage()
if names[0] == "all":
cutoff = names[1] if len(names) > 1 else ""
names = list(sorted(ALL_FUNCTIONS))
names = [k for k in names if k >= cutoff]
if any(k not in FUNCTIONS for k in names):
usage()
multiple = len(names) > 1
pylab.interactive(multiple)
for k in names:
pylab.clf()
comparator = FUNCTIONS[k]
comparator.run(xrange=xrange, diff=diff)
if multiple:
raw_input()
if not multiple:
pylab.show()
if __name__ == "__main__":
main()
```
#### File: sasmodels/sasmodels/alignment.py
```python
import numpy as np # type: ignore
def align_empty(shape, dtype, alignment=128):
"""
Return an empty array aligned on the alignment boundary.
"""
size = np.prod(shape)
dtype = np.dtype(dtype)
# allocate array with extra space for alignment
extra = alignment//dtype.itemsize - 1
result = np.empty(size+extra, dtype)
# build a view into allocated array which starts on a boundary
offset = (result.ctypes.data%alignment)//dtype.itemsize
view = np.reshape(result[offset:offset+size], shape)
return view
def align_data(x, dtype, alignment=128):
"""
Return a copy of an array on the alignment boundary.
"""
# if x is contiguous, aligned, and of the correct type then just return x
view = align_empty(x.shape, dtype, alignment=alignment)
view[:] = x
return view
```
#### File: sasmodels/sasmodels/direct_model.py
```python
from __future__ import print_function
import numpy as np # type: ignore
# TODO: fix sesans module
from . import sesans # type: ignore
from . import weights
from . import resolution
from . import resolution2d
from .details import make_kernel_args, dispersion_mesh
try:
from typing import Optional, Dict, Tuple
except ImportError:
pass
else:
from .data import Data
from .kernel import Kernel, KernelModel
from .modelinfo import Parameter, ParameterSet
def call_kernel(calculator, pars, cutoff=0., mono=False):
# type: (Kernel, ParameterSet, float, bool) -> np.ndarray
"""
Call *kernel* returned from *model.make_kernel* with parameters *pars*.
*cutoff* is the limiting value for the product of dispersion weights used
to perform the multidimensional dispersion calculation more quickly at a
slight cost to accuracy. The default value of *cutoff=0* integrates over
the entire dispersion cube. Using *cutoff=1e-5* can be 50% faster, but
with an error of about 1%, which is usually less than the measurement
uncertainty.
*mono* is True if polydispersity should be set to none on all parameters.
"""
parameters = calculator.info.parameters
if mono:
active = lambda name: False
elif calculator.dim == '1d':
active = lambda name: name in parameters.pd_1d
elif calculator.dim == '2d':
active = lambda name: name in parameters.pd_2d
else:
active = lambda name: True
#print("pars",[p.id for p in parameters.call_parameters])
vw_pairs = [(get_weights(p, pars) if active(p.name)
else ([pars.get(p.name, p.default)], [1.0]))
for p in parameters.call_parameters]
call_details, values, is_magnetic = make_kernel_args(calculator, vw_pairs)
#print("values:", values)
return calculator(call_details, values, cutoff, is_magnetic)
def call_ER(model_info, pars):
# type: (ModelInfo, ParameterSet) -> float
"""
Call the model ER function using *values*.
*model_info* is either *model.info* if you have a loaded model,
or *kernel.info* if you have a model kernel prepared for evaluation.
"""
if model_info.ER is None:
return 1.0
elif not model_info.parameters.form_volume_parameters:
# handle the case where ER is provided but model is not polydisperse
return model_info.ER()
else:
value, weight = _vol_pars(model_info, pars)
individual_radii = model_info.ER(*value)
return np.sum(weight*individual_radii) / np.sum(weight)
def call_VR(model_info, pars):
# type: (ModelInfo, ParameterSet) -> float
"""
Call the model VR function using *pars*.
*model_info* is either *model.info* if you have a loaded model,
or *kernel.info* if you have a model kernel prepared for evaluation.
"""
if model_info.VR is None:
return 1.0
elif not model_info.parameters.form_volume_parameters:
# handle the case where ER is provided but model is not polydisperse
return model_info.VR()
else:
value, weight = _vol_pars(model_info, pars)
whole, part = model_info.VR(*value)
return np.sum(weight*part)/np.sum(weight*whole)
def call_profile(model_info, **pars):
# type: (ModelInfo, ...) -> Tuple[np.ndarray, np.ndarray, Tuple[str, str]]
"""
Returns the profile *x, y, (xlabel, ylabel)* representing the model.
"""
args = {}
for p in model_info.parameters.kernel_parameters:
if p.length > 1:
value = np.array([pars.get(p.id+str(j), p.default)
for j in range(1, p.length+1)])
else:
value = pars.get(p.id, p.default)
args[p.id] = value
x, y = model_info.profile(**args)
return x, y, model_info.profile_axes
def get_weights(parameter, values):
# type: (Parameter, Dict[str, float]) -> Tuple[np.ndarray, np.ndarray]
"""
Generate the distribution for parameter *name* given the parameter values
in *pars*.
Uses "name", "name_pd", "name_pd_type", "name_pd_n", "name_pd_sigma"
from the *pars* dictionary for parameter value and parameter dispersion.
"""
value = float(values.get(parameter.name, parameter.default))
relative = parameter.relative_pd
limits = parameter.limits
disperser = values.get(parameter.name+'_pd_type', 'gaussian')
npts = values.get(parameter.name+'_pd_n', 0)
width = values.get(parameter.name+'_pd', 0.0)
nsigma = values.get(parameter.name+'_pd_nsigma', 3.0)
if npts == 0 or width == 0:
return [value], [1.0]
value, weight = weights.get_weights(
disperser, npts, width, nsigma, value, limits, relative)
return value, weight / np.sum(weight)
def _vol_pars(model_info, pars):
# type: (ModelInfo, ParameterSet) -> Tuple[np.ndarray, np.ndarray]
vol_pars = [get_weights(p, pars)
for p in model_info.parameters.call_parameters
if p.type == 'volume']
#import pylab; pylab.plot(vol_pars[0][0],vol_pars[0][1]); pylab.show()
value, weight = dispersion_mesh(model_info, vol_pars)
return value, weight
class DataMixin(object):
"""
DataMixin captures the common aspects of evaluating a SAS model for a
particular data set, including calculating Iq and evaluating the
resolution function. It is used in particular by :class:`DirectModel`,
which evaluates a SAS model parameters as key word arguments to the
calculator method, and by :class:`bumps_model.Experiment`, which wraps the
model and data for use with the Bumps fitting engine. It is not
currently used by :class:`sasview_model.SasviewModel` since this will
require a number of changes to SasView before we can do it.
:meth:`_interpret_data` initializes the data structures necessary
to manage the calculations. This sets attributes in the child class
such as *data_type* and *resolution*.
:meth:`_calc_theory` evaluates the model at the given control values.
:meth:`_set_data` sets the intensity data in the data object,
possibly with random noise added. This is useful for simulating a
dataset with the results from :meth:`_calc_theory`.
"""
def _interpret_data(self, data, model):
# type: (Data, KernelModel) -> None
# pylint: disable=attribute-defined-outside-init
self._data = data
self._model = model
# interpret data
if hasattr(data, 'isSesans') and data.isSesans:
self.data_type = 'sesans'
elif hasattr(data, 'qx_data'):
self.data_type = 'Iqxy'
elif getattr(data, 'oriented', False):
self.data_type = 'Iq-oriented'
else:
self.data_type = 'Iq'
if self.data_type == 'sesans':
q = sesans.make_q(data.sample.zacceptance, data.Rmax)
index = slice(None, None)
res = None
if data.y is not None:
Iq, dIq = data.y, data.dy
else:
Iq, dIq = None, None
#self._theory = np.zeros_like(q)
q_vectors = [q]
q_mono = sesans.make_all_q(data)
elif self.data_type == 'Iqxy':
#if not model.info.parameters.has_2d:
# raise ValueError("not 2D without orientation or magnetic parameters")
q = np.sqrt(data.qx_data**2 + data.qy_data**2)
qmin = getattr(data, 'qmin', 1e-16)
qmax = getattr(data, 'qmax', np.inf)
accuracy = getattr(data, 'accuracy', 'Low')
index = ~data.mask & (q >= qmin) & (q <= qmax)
if data.data is not None:
index &= ~np.isnan(data.data)
Iq = data.data[index]
dIq = data.err_data[index]
else:
Iq, dIq = None, None
res = resolution2d.Pinhole2D(data=data, index=index,
nsigma=3.0, accuracy=accuracy)
#self._theory = np.zeros_like(self.Iq)
q_vectors = res.q_calc
q_mono = []
elif self.data_type == 'Iq':
index = (data.x >= data.qmin) & (data.x <= data.qmax)
if data.y is not None:
index &= ~np.isnan(data.y)
Iq = data.y[index]
dIq = data.dy[index]
else:
Iq, dIq = None, None
if getattr(data, 'dx', None) is not None:
q, dq = data.x[index], data.dx[index]
if (dq > 0).any():
res = resolution.Pinhole1D(q, dq)
else:
res = resolution.Perfect1D(q)
elif (getattr(data, 'dxl', None) is not None
and getattr(data, 'dxw', None) is not None):
res = resolution.Slit1D(data.x[index],
qx_width=data.dxl[index],
qy_width=data.dxw[index])
else:
res = resolution.Perfect1D(data.x[index])
#self._theory = np.zeros_like(self.Iq)
q_vectors = [res.q_calc]
q_mono = []
elif self.data_type == 'Iq-oriented':
index = (data.x >= data.qmin) & (data.x <= data.qmax)
if data.y is not None:
index &= ~np.isnan(data.y)
Iq = data.y[index]
dIq = data.dy[index]
else:
Iq, dIq = None, None
if (getattr(data, 'dxl', None) is None
or getattr(data, 'dxw', None) is None):
raise ValueError("oriented sample with 1D data needs slit resolution")
res = resolution2d.Slit2D(data.x[index],
qx_width=data.dxw[index],
qy_width=data.dxl[index])
q_vectors = res.q_calc
q_mono = []
else:
raise ValueError("Unknown data type") # never gets here
# Remember function inputs so we can delay loading the function and
# so we can save/restore state
self._kernel_inputs = q_vectors
self._kernel_mono_inputs = q_mono
self._kernel = None
self.Iq, self.dIq, self.index = Iq, dIq, index
self.resolution = res
def _set_data(self, Iq, noise=None):
# type: (np.ndarray, Optional[float]) -> None
# pylint: disable=attribute-defined-outside-init
if noise is not None:
self.dIq = Iq*noise*0.01
dy = self.dIq
y = Iq + np.random.randn(*dy.shape) * dy
self.Iq = y
if self.data_type in ('Iq', 'Iq-oriented'):
self._data.dy[self.index] = dy
self._data.y[self.index] = y
elif self.data_type == 'Iqxy':
self._data.data[self.index] = y
elif self.data_type == 'sesans':
self._data.y[self.index] = y
else:
raise ValueError("Unknown model")
def _calc_theory(self, pars, cutoff=0.0):
# type: (ParameterSet, float) -> np.ndarray
if self._kernel is None:
self._kernel = self._model.make_kernel(self._kernel_inputs)
self._kernel_mono = (
self._model.make_kernel(self._kernel_mono_inputs)
if self._kernel_mono_inputs else None)
Iq_calc = call_kernel(self._kernel, pars, cutoff=cutoff)
# Storing the calculated Iq values so that they can be plotted.
# Only applies to oriented USANS data for now.
# TODO: extend plotting of calculate Iq to other measurement types
# TODO: refactor so we don't store the result in the model
self.Iq_calc = None
if self.data_type == 'sesans':
Iq_mono = (call_kernel(self._kernel_mono, pars, mono=True)
if self._kernel_mono_inputs else None)
result = sesans.transform(self._data,
self._kernel_inputs[0], Iq_calc,
self._kernel_mono_inputs, Iq_mono)
else:
result = self.resolution.apply(Iq_calc)
if hasattr(self.resolution, 'nx'):
self.Iq_calc = (
self.resolution.qx_calc, self.resolution.qy_calc,
np.reshape(Iq_calc, (self.resolution.ny, self.resolution.nx))
)
return result
class DirectModel(DataMixin):
"""
Create a calculator object for a model.
*data* is 1D SAS, 2D SAS or SESANS data
*model* is a model calculator return from :func:`generate.load_model`
*cutoff* is the polydispersity weight cutoff.
"""
def __init__(self, data, model, cutoff=1e-5):
# type: (Data, KernelModel, float) -> None
self.model = model
self.cutoff = cutoff
# Note: _interpret_data defines the model attributes
self._interpret_data(data, model)
def __call__(self, **pars):
# type: (**float) -> np.ndarray
return self._calc_theory(pars, cutoff=self.cutoff)
def simulate_data(self, noise=None, **pars):
# type: (Optional[float], **float) -> None
"""
Generate simulated data for the model.
"""
Iq = self.__call__(**pars)
self._set_data(Iq, noise=noise)
def profile(self, **pars):
# type: (**float) -> None
"""
Generate a plottable profile.
"""
return call_profile(self.model.info, **pars)
def main():
# type: () -> None
"""
Program to evaluate a particular model at a set of q values.
"""
import sys
from .data import empty_data1D, empty_data2D
from .core import load_model_info, build_model
if len(sys.argv) < 3:
print("usage: python -m sasmodels.direct_model modelname (q|qx,qy) par=val ...")
sys.exit(1)
model_name = sys.argv[1]
call = sys.argv[2].upper()
if call != "ER_VR":
try:
values = [float(v) for v in call.split(',')]
except Exception:
values = []
if len(values) == 1:
q, = values
data = empty_data1D([q])
elif len(values) == 2:
qx, qy = values
data = empty_data2D([qx], [qy])
else:
print("use q or qx,qy or ER or VR")
sys.exit(1)
else:
data = empty_data1D([0.001]) # Data not used in ER/VR
model_info = load_model_info(model_name)
model = build_model(model_info)
calculator = DirectModel(data, model)
pars = dict((k, (float(v) if not k.endswith("_pd_type") else v))
for pair in sys.argv[3:]
for k, v in [pair.split('=')])
if call == "ER_VR":
ER = call_ER(model_info, pars)
VR = call_VR(model_info, pars)
print(ER, VR)
else:
Iq = calculator(**pars)
print(Iq[0])
if __name__ == "__main__":
main()
```
#### File: sasmodels/sasmodels/__init__.py
```python
__version__ = "0.97"
def data_files():
"""
Return the data files to be installed with the package.
The format is a list of (directory, [files...]) pairs which can be
used directly in setup(...,data_files=...) for setup.py.
"""
from os.path import join as joinpath
import glob
from .generate import EXTERNAL_DIR, DATA_PATH
def _expand_patterns(path, patterns):
target_path = joinpath(EXTERNAL_DIR, *path)
source_path = joinpath(DATA_PATH, *path)
files = []
for p in patterns:
files.extend(glob.glob(joinpath(source_path, p)))
return target_path, files
# Place the source for the model tree in the distribution. Minimally we
# need the c and cl files for running on OpenCL. Need the py files so
# users can easily copy existing models. Need the img files so that we
# can build model docs on the fly, including images.
return_list = [
_expand_patterns([], ['*.c', '*.cl']),
_expand_patterns(['models'], ['*.py', '*.c']),
_expand_patterns(['models', 'lib'], ['*.c']),
_expand_patterns(['models', 'img'], ['*.*']),
]
return return_list
```
#### File: sasmodels/models/broad_peak.py
```python
r"""
Definition
----------
This model calculates an empirical functional form for SAS data characterized
by a broad scattering peak. Many SAS spectra are characterized by a broad peak
even though they are from amorphous soft materials. For example, soft systems
that show a SAS peak include copolymers, polyelectrolytes, multiphase systems,
layered structures, etc.
The d-spacing corresponding to the broad peak is a characteristic distance
between the scattering inhomogeneities (such as in lamellar, cylindrical, or
spherical morphologies, or for bicontinuous structures).
The scattering intensity $I(q)$ is calculated as
.. math:: I(q) = \frac{A}{q^n} + \frac{C}{1 + (|q - q_0|\xi)^m} + B
Here the peak position is related to the d-spacing as $q_0 = 2\pi / d_0$.
$A$ is the Porod law scale factor, $n$ the Porod exponent, $C$ is the
Lorentzian scale factor, $m$ the exponent of $q$, $\xi$ the screening length,
and $B$ the flat background.
For 2D data the scattering intensity is calculated in the same way as 1D,
where the $q$ vector is defined as
.. math:: q = \sqrt{q_x^2 + q_y^2}
References
----------
None.
Authorship and Verification
----------------------------
* **Author:** <NAME> **Date:** pre 2010
* **Last Modified by:** <NAME> **Date:** July 24, 2016
* **Last Reviewed by:** <NAME> **Date:** March 21, 2016
"""
from numpy import inf, errstate
name = "broad_peak"
title = "Broad Lorentzian type peak on top of a power law decay"
description = """\
I(q) = scale_p/pow(q,exponent)+scale_l/
(1.0 + pow((fabs(q-q_peak)*length_l),exponent_l) )+ background
List of default parameters:
porod_scale = Porod term scaling
porod_exp = Porod exponent
lorentz_scale = Lorentzian term scaling
lorentz_length = Lorentzian screening length [A]
peak_pos = peak location [1/A]
lorentz_exp = Lorentzian exponent
background = Incoherent background"""
category = "shape-independent"
# pylint: disable=bad-whitespace, line-too-long
# ["name", "units", default, [lower, upper], "type", "description"],
parameters = [["porod_scale", "", 1.0e-05, [-inf, inf], "", "Power law scale factor"],
["porod_exp", "", 3.0, [-inf, inf], "", "Exponent of power law"],
["lorentz_scale", "", 10.0, [-inf, inf], "", "Scale factor for broad Lorentzian peak"],
["lorentz_length", "Ang", 50.0, [-inf, inf], "", "Lorentzian screening length"],
["peak_pos", "1/Ang", 0.1, [-inf, inf], "", "Peak position in q"],
["lorentz_exp", "", 2.0, [-inf, inf], "", "Exponent of Lorentz function"],
]
# pylint: enable=bad-whitespace, line-too-long
def Iq(q,
porod_scale=1.0e-5,
porod_exp=3.0,
lorentz_scale=10.0,
lorentz_length=50.0,
peak_pos=0.1,
lorentz_exp=2.0):
"""
:param q: Input q-value
:param porod_scale: Power law scale factor
:param porod_exp: Exponent of power law
:param lorentz_scale: Scale factor for broad Lorentzian peak
:param lorentz_length: Lorentzian screening length
:param peak_pos: Peak position in q
:param lorentz_exp: Exponent of Lorentz function
:return: Calculated intensity
"""
z = abs(q - peak_pos) * lorentz_length
with errstate(divide='ignore'):
inten = (porod_scale / q ** porod_exp
+ lorentz_scale / (1 + z ** lorentz_exp))
return inten
Iq.vectorized = True # Iq accepts an array of q values
demo = dict(scale=1, background=0,
porod_scale=1.0e-05, porod_exp=3,
lorentz_scale=10, lorentz_length=50, peak_pos=0.1, lorentz_exp=2)
```
#### File: sasmodels/models/mono_gauss_coil.py
```python
r"""
This Debye Gaussian coil model strictly describes the scattering from
*monodisperse* polymer chains in theta solvents or polymer melts, conditions
under which the distances between segments follow a Gaussian distribution.
Provided the number of segments is large (ie, high molecular weight polymers)
the single-chain form factor P(Q) is that described by Debye (1947).
To describe the scattering from *polydisperse* polymer chains see the
:ref:`poly-gauss-coil` model.
Definition
----------
.. math::
I(q) = \text{scale} \cdot I_0 \cdot P(q) + \text{background}
where
.. math::
I_0 &= \phi_\text{poly} \cdot V
\cdot (\rho_\text{poly} - \rho_\text{solv})^2
P(q) &= 2 [\exp(-Z) + Z - 1] / Z^2
Z &= (q R_g)^2
V &= M / (N_A \delta)
Here, $\phi_\text{poly}$ is the volume fraction of polymer, $V$ is the
volume of a polymer coil, *M* is the molecular weight of the polymer,
$N_A$ is Avogadro's Number, $\delta$ is the bulk density of the polymer,
$\rho_\text{poly}$ is the sld of the polymer, $\rho\text{solv}$ is the
sld of the solvent, and $R_g$ is the radius of gyration of the polymer coil.
The 2D scattering intensity is calculated in the same way as the 1D,
but where the *q* vector is redefined as
.. math::
q = \sqrt{q_x^2 + q_y^2}
References
----------
<NAME>, *J. Phys. Colloid. Chem.*, 51 (1947) 18.
<NAME>, *Methods of X-Ray and Neutron Scattering in Polymer Science*,
Oxford University Press, New York (2000).
http://www.ncnr.nist.gov/staff/hammouda/distance_learning/chapter_28.pdf
"""
from numpy import inf, exp, errstate
name = "mono_gauss_coil"
title = "Scattering from monodisperse polymer coils"
description = """
Evaluates the scattering from
monodisperse polymer chains.
"""
category = "shape-independent"
# pylint: disable=bad-whitespace, line-too-long
# ["name", "units", default, [lower, upper], "type", "description"],
parameters = [
["i_zero", "1/cm", 70.0, [0.0, inf], "", "Intensity at q=0"],
["rg", "Ang", 75.0, [0.0, inf], "", "Radius of gyration"],
]
# pylint: enable=bad-whitespace, line-too-long
# NB: Scale and Background are implicit parameters on every model
def Iq(q, i_zero, rg):
# pylint: disable = missing-docstring
z = (q * rg)**2
with errstate(invalid='ignore'):
inten = (i_zero * 2.0) * (exp(-z) + z - 1.0)/z**2
inten[q == 0] = i_zero
return inten
Iq.vectorized = True # Iq accepts an array of q values
demo = dict(scale=1.0, i_zero=70.0, rg=75.0, background=0.0)
# these unit test values taken from SasView 3.1.2
tests = [
[{'scale': 1.0, 'i_zero': 70.0, 'rg': 75.0, 'background': 0.0},
[0.0106939, 0.469418], [57.1241, 0.112859]],
]
```
#### File: sasmodels/models/power_law.py
```python
r"""
This model calculates a simple power law with a flat background.
Definition
----------
.. math::
I(q) = \text{scale} \cdot q^{-\text{power}} + \text{background}
Note the minus sign in front of the exponent. The exponent *power*
should therefore be entered as a **positive** number for fitting.
Also note that unlike many other models, *scale* in this model
is NOT explicitly related to a volume fraction. Be careful if
combining this model with other models.
References
----------
None.
"""
from numpy import inf, errstate
name = "power_law"
title = "Simple power law with a flat background"
description = """
Evaluates the function
I(q) = scale * q^(-power) + background
NB: enter power as a positive number!
"""
category = "shape-independent"
# ["name", "units", default, [lower, upper], "type", "description"],
parameters = [["power", "", 4.0, [-inf, inf], "", "Power law exponent"]]
# NB: Scale and Background are implicit parameters on every model
def Iq(q, power):
# pylint: disable=missing-docstring
with errstate(divide='ignore'):
result = q**-power
return result
Iq.vectorized = True # Iq accepts an array of q values
demo = dict(scale=1.0, power=4.0, background=0.0)
tests = [
[{'scale': 1.0, 'power': 4.0, 'background' : 0.0},
[0.0106939, 0.469418], [7.64644e+07, 20.5949]],
]
```
#### File: sasmodels/models/unified_power_Rg.py
```python
r"""
Definition
----------
This model employs the empirical multiple level unified Exponential/Power-law
fit method developed by Beaucage. Four functions are included so that 1, 2, 3,
or 4 levels can be used. In addition a 0 level has been added which simply
calculates
.. math::
I(q) = \text{scale} / q + \text{background}
The Beaucage method is able to reasonably approximate the scattering from
many different types of particles, including fractal clusters, random coils
(Debye equation), ellipsoidal particles, etc.
The model works best for mass fractal systems characterized by Porod exponents
between 5/3 and 3. It should not be used for surface fractal systems. Hammouda
(2010) has pointed out a deficiency in the way this model handles the
transitioning between the Guinier and Porod regimes and which can create
artefacts that appear as kinks in the fitted model function.
Also see the Guinier_Porod model.
The empirical fit function is:
.. math::
I(q) = \text{background}
+ \sum_{i=1}^N \Bigl[
G_i \exp\Bigl(-\frac{q^2R_{gi}^2}{3}\Bigr)
+ B_i \exp\Bigl(-\frac{q^2R_{g(i+1)}^2}{3}\Bigr)
\Bigl(\frac{1}{q_i^*}\Bigr)^{P_i} \Bigr]
where
.. math::
q_i^* = q \left[\operatorname{erf}
\left(\frac{q R_{gi}}{\sqrt{6}}\right)
\right]^{-3}
For each level, the four parameters $G_i$, $R_{gi}$, $B_i$ and $P_i$ must
be chosen. Beaucage has an additional factor $k$ in the definition of
$q_i^*$ which is ignored here.
For example, to approximate the scattering from random coils (Debye equation),
set $R_{gi}$ as the Guinier radius, $P_i = 2$, and $B_i = 2 G_i / R_{gi}$
See the references for further information on choosing the parameters.
For 2D data: The 2D scattering intensity is calculated in the same way as 1D,
where the $q$ vector is defined as
.. math::
q = \sqrt{q_x^2 + q_y^2}
References
----------
<NAME>, *J. Appl. Cryst.*, 28 (1995) 717-728
<NAME>, *J. Appl. Cryst.*, 29 (1996) 134-146
<NAME>, *Analysis of the Beaucage model, J. Appl. Cryst.*, (2010), 43, 1474-1478
"""
from __future__ import division
import numpy as np
from numpy import inf, exp, sqrt, errstate
from scipy.special import erf
category = "shape-independent"
name = "unified_power_Rg"
title = "Unified Power Rg"
description = """
The Beaucage model employs the empirical multiple level unified
Exponential/Power-law fit method developed by <NAME>. Four functions
are included so that 1, 2, 3, or 4 levels can be used.
"""
# pylint: disable=bad-whitespace, line-too-long
parameters = [
["level", "", 1, [0, 6], "", "Level number"],
["rg[level]", "Ang", 15.8, [0, inf], "", "Radius of gyration"],
["power[level]", "", 4, [-inf, inf], "", "Power"],
["B[level]", "1/cm", 4.5e-6, [-inf, inf], "", ""],
["G[level]", "1/cm", 400, [0, inf], "", ""],
]
# pylint: enable=bad-whitespace, line-too-long
def Iq(q, level, rg, power, B, G):
level = int(level + 0.5)
if level == 0:
with errstate(divide='ignore'):
return 1./q
with errstate(divide='ignore', invalid='ignore'):
result = np.zeros(q.shape, 'd')
for i in range(level):
exp_now = exp(-(q*rg[i])**2/3.)
pow_now = (erf(q*rg[i]/sqrt(6.))**3/q)**power[i]
if i < level-1:
exp_next = exp(-(q*rg[i+1])**2/3.)
else:
exp_next = 1
result += G[i]*exp_now + B[i]*exp_next*pow_now
result[q == 0] = np.sum(G[:level])
return result
Iq.vectorized = True
demo = dict(
level=2,
rg=[15.8, 21],
power=[4, 2],
B=[4.5e-6, 0.0006],
G=[400, 3],
scale=1.,
background=0.,
)
```
#### File: jmborr/sasmodels/sesansdemo.py
```python
from __future__ import division
from pylab import *
from scipy.special import jv as besselj
# q-range parameters
q = arange(0.0003, 1.0, 0.0003); # [nm^-1] range wide enough for Hankel transform
dq=(q[1]-q[0])*1e9; # [m^-1] step size in q, needed for integration
nq=len(q);
Lambda=2e-10; # [m] wavelength
# sample parameters
phi=0.1; # volume fraction
R=100; # [nm] radius particles
DeltaRho=6e14; # [m^-2]
V=4/3*pi*R**3 * 1e-27; # [m^3]
th=0.002; # [m] thickness sample
#2 PHASE SYSTEM
st= 1.5*Lambda**2*DeltaRho**2*th*phi*(1-phi)*R*1e-9 # scattering power in sesans formalism
# Form factor solid sphere
qr=q*R;
P=(3.*(sin(qr)-qr*cos(qr)) / qr**3)**2;
# Structure factor dilute
S=1.;
#2 PHASE SYSTEM
# scattered intensity [m^-1] in absolute units according to SANS
I=phi*(1-phi)*V*(DeltaRho**2)*P*S;
clf()
subplot(211) # plot the SANS calculation
plot(q,I,'k')
loglog(q,I)
xlim([0.01, 1])
ylim([1, 1e9])
xlabel(r'$Q [nm^{-1}]$')
ylabel(r'$d\Sigma/d\Omega [m^{-1}]$')
# Hankel transform to nice range for plot
nz=61;
zz=linspace(0,240,nz); # [nm], should be less than reciprocal from q
G=zeros(nz);
for i in range(len(zz)):
integr=besselj(0,q*zz[i])*I*q;
G[i]=sum(integr);
G=G*dq*1e9*2*pi; # integr step, conver q into [m**-1] and 2 pi circle integr
# plot(zz,G);
stt= th*Lambda**2/4/pi/pi*G[0] # scattering power according to SANS formalism
PP=exp(th*Lambda**2/4/pi/pi*(G-G[0]));
subplot(212)
plot(zz,PP,'k',label="Hankel transform") # Hankel transform 1D
xlabel('spin-echo length [nm]')
ylabel('polarisation normalised')
hold(True)
# Cosine transformation of 2D scattering patern
if False:
qy,qz = meshgrid(q,q)
qr=R*sqrt(qy**2 + qz**2); # reuse variable names Hankel transform, but now 2D
P=(3.*(sin(qr)-qr*cos(qr)) / qr**3)**2;
# Structure factor dilute
S=1.;
# scattered intensity [m^-1] in absolute units according to SANS
I=phi*V*(DeltaRho**2)*P*S;
GG=zeros(nz);
for i in range(len(zz)):
integr=cos(qz*zz[i])*I;
GG[i]=sum(sum(integr));
GG=4*GG* dq**2; # take integration step into account take 4 quadrants
# plot(zz,GG);
sstt= th*Lambda**2/4/pi/pi*GG[0] # scattering power according to SANS formalism
PPP=exp(th*Lambda**2/4/pi/pi*(GG-GG[0]));
plot(zz,PPP,label="cosine transform") # cosine transform 2D
# For comparison calculation in SESANS formalism, which overlaps perfectly
def gsphere(z,r):
"""
Calculate SESANS-correlation function for a solid sphere.
<NAME> after formulae <NAME> J.Appl.Cryst. 2003 article
"""
d = z/r
g = zeros_like(z)
g[d==0] = 1.
low = ((d > 0) & (d < 2))
dlow = d[low]
dlow2 = dlow**2
print dlow.shape, dlow2.shape
g[low] = sqrt(1-dlow2/4.)*(1+dlow2/8.) + dlow2/2.*(1-dlow2/16.)*log(dlow/(2.+sqrt(4.-dlow2)))
return g
if True:
plot(zz,exp(st*(gsphere(zz,R)-1)),'r', label="analytical")
legend()
show()
```
#### File: jmborr/sasmodels/setup_py2exe.py
```python
import os
import sys
#sys.dont_write_bytecode = True
# Force build before continuing
os.system('"%s" setup.py build' % sys.executable)
# Remove the current directory from the python path
here = os.path.abspath(os.path.dirname(__file__))
sys.path = [p for p in sys.path if os.path.abspath(p) != here]
import glob
from distutils.core import setup
from distutils.util import get_platform
# Augment the setup interface with the py2exe command and make sure the py2exe
# option is passed to setup.
import py2exe
if len(sys.argv) == 1:
sys.argv.append('py2exe')
# Put the build lib on the start of the path.
# For packages with binary extensions, need platform. If it is a pure
# script library, use an empty platform string.
platform = '.%s-%s' % (get_platform(), sys.version[:3])
#platform = ''
build_lib = os.path.abspath('build/lib' + platform)
sys.path.insert(0, build_lib)
# print "\n".join(sys.path)
import wx
import matplotlib
matplotlib.use('WXAgg')
import periodictable
# Retrieve the application version string.
import bumps
version = bumps.__version__
from bumps.gui.resources import resources as gui_resources
# A manifest is required to be included in a py2exe image (or accessible as a
# file in the image directory) when wxPython is included so that the Windows XP
# theme is used when rendering wx widgets. The manifest must be matched to the
# version of Python that is being used.
#
# Create a manifest for use with Python 2.5 on Windows XP or Vista. It is
# adapted from the Python manifest file (C:\Python25\pythonw.exe.manifest).
manifest_for_python25 = """
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity
version="1.0.0.0"
processorArchitecture="x86"
name="%(prog)s"
type="win32"
/>
<description>%(prog)s</description>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.Windows.Common-Controls"
version="6.0.0.0"
processorArchitecture="X86"
publicKeyToken="<KEY>"
language="*"
/>
</dependentAssembly>
</dependency>
</assembly>
"""
# Create a manifest for use with Python 2.6 or 2.7 on Windows XP or Vista.
manifest_for_python26 = """
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity
version="5.0.0.0"
processorArchitecture="x86"
name="%(prog)s"
type="win32">
</assemblyIdentity>
<description>%(prog)s</description>
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel
level="asInvoker"
uiAccess="false">
</requestedExecutionLevel>
</requestedPrivileges>
</security>
</trustInfo>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.VC90.CRT"
version="9.0.21022.8"
processorArchitecture="x86"
publicKeyToken="<KEY>">
</assemblyIdentity>
</dependentAssembly>
</dependency>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.Windows.Common-Controls"
version="6.0.0.0"
processorArchitecture="x86"
publicKeyToken="<KEY>"
language="*">
</assemblyIdentity>
</dependentAssembly>
</dependency>
</assembly>
"""
# Select the appropriate manifest to use.
if sys.version_info >= (3, 0) or sys.version_info < (2, 5):
print("*** This script only works with Python 2.5, 2.6, or 2.7.")
sys.exit()
elif sys.version_info >= (2, 6):
manifest = manifest_for_python26
elif sys.version_info >= (2, 5):
manifest = manifest_for_python25
# Create a list of all files to include along side the executable being built
# in the dist directory tree. Each element of the data_files list is a tuple
# consisting of a path (relative to dist\) and a list of files in that path.
data_files = []
# Add resource files that need to reside in the same directory as the image.
data_files.append(('.', [os.path.join('.', 'LICENSE.txt')]))
data_files.append(('.', [os.path.join('.', 'README.txt')]))
data_files.append(('.', [os.path.join('.', 'bin', 'bumps_launch.bat')]))
# Add application specific data files from the bumps\bumps-data folder.
data_files += gui_resources.data_files()
# Add data files from the matplotlib\mpl-data folder and its subfolders.
# For matploblib prior to version 0.99 see the examples at the end of the file.
data_files += matplotlib.get_py2exe_datafiles()
# Add data files from the periodictable\xsf folder.
data_files += periodictable.data_files()
# Add example directories and their files. An empty directory is ignored.
# Note that Inno Setup will determine where these files will be placed such as
# C:\My Documents\... instead of the installation folder.
for path in glob.glob(os.path.join('examples', '*')):
if os.path.isdir(path):
for file in glob.glob(os.path.join(path, '*.*')):
data_files.append((path, [file]))
else:
data_files.append(('examples', [path]))
for path in glob.glob(os.path.join('doc', 'examples', '*')):
if os.path.isdir(path):
for file in glob.glob(os.path.join(path, '*.*')):
data_files.append((path, [file]))
else:
data_files.append(('doc', [path]))
# Add PDF documentation to the dist staging directory.
pdf = os.path.join('doc', 'Bumps.pdf')
if os.path.isfile(pdf):
data_files.append(('doc', [pdf]))
else:
print("*** %s not found - building frozen image without it ***" % pdf)
# Add the Microsoft Visual C++ 2008 redistributable kit if we are building with
# Python 2.6 or 2.7. This kit will be installed on the target system as part
# of the installation process for the frozen image. Note that the Python 2.5
# interpreter requires msvcr71.dll which is included in the Python25 package,
# however, Python 2.6 and 2.7 require the msvcr90.dll but they do not bundle it
# with the Python26 or Python27 package. Thus, for Python 2.6 and later, the
# appropriate dll must be present on the target system at runtime.
if sys.version_info >= (2, 6):
pypath = os.path.dirname(sys.executable)
data_files.append(('.', [os.path.join(pypath, 'vcredist_x86.exe')]))
# Specify required packages to bundle in the executable image.
packages = ['numpy', 'scipy', 'matplotlib', 'pytz', 'pyparsing',
'periodictable', 'bumps', 'sasmodels', 'pyopencl',
]
# Specify files to include in the executable image.
includes = []
# Specify files to exclude from the executable image.
# - We can safely exclude Tk/Tcl and Qt modules because our app uses wxPython.
# - We do not use ssl services so they are omitted.
# - We can safely exclude the TkAgg matplotlib backend because our app uses
# "matplotlib.use('WXAgg')" to override the default matplotlib configuration.
# - On the web it is widely recommended to exclude certain lib*.dll modules
# but this does not seem necessary any more (but adding them does not hurt).
# - Python25 requires mscvr71.dll, however, Win XP includes this file.
# - Since we do not support Win 9x systems, w9xpopen.dll is not needed.
# - For some reason cygwin1.dll gets included by default, but it is not needed.
excludes = ['Tkinter', 'PyQt4', '_ssl', '_tkagg', 'numpy.distutils.test']
dll_excludes = ['libgdk_pixbuf-2.0-0.dll',
'libgobject-2.0-0.dll',
'libgdk-win32-2.0-0.dll',
'tcl84.dll',
'tk84.dll',
'QtGui4.dll',
'QtCore4.dll',
'msvcr71.dll',
'msvcp90.dll',
'w9xpopen.exe',
'cygwin1.dll']
class Target(object):
"""This class stores metadata about the distribution in a dictionary."""
def __init__(self, **kw):
self.__dict__.update(kw)
self.version = version
clientCLI = Target(
name='Bumps',
description='Bumps CLI application',
# module to run on application start
script=os.path.join('bin', 'bumps_cli.py'),
dest_base='bumps', # file name part of the exe file to create
# also need to specify in data_files
icon_resources=[
(1, os.path.join('bumps', 'gui', 'resources', 'bumps.ico'))],
bitmap_resources=[],
other_resources=[(24, 1, manifest % dict(prog='Bumps'))])
clientGUI = Target(
name='Bumps',
description='Bumps GUI application',
# module to run on application start
script=os.path.join('bin', 'bumps_gui.py'),
dest_base='bumps_gui', # file name part of the exe file to create
# also need to specify in data_files
icon_resources=[
(1, os.path.join('bumps', 'gui', 'resources', 'bumps.ico'))],
bitmap_resources=[],
other_resources=[(24, 1, manifest % dict(prog='Bumps'))])
# Now we do the work to create a standalone distribution using py2exe.
#
# When the application is run in console mode, a console window will be created
# to receive any logging or error messages and the application will then create
# a separate GUI application window.
#
# When the application is run in windows mode, it will create a GUI application
# window and no console window will be provided. Output to stderr will be
# written to <app-image-name>.log.
setup(
console=[clientCLI],
windows=[clientGUI],
options={'py2exe': {
'packages': packages,
'includes': includes,
'excludes': excludes,
'dll_excludes': dll_excludes,
'compressed': 1, # standard compression
'optimize': 0, # no byte-code optimization
'dist_dir': "dist", # where to put py2exe results
'xref': False, # display cross reference (as html doc)
'bundle_files': 1, # bundle python25.dll in library.zip
}
},
# Since we are building two exe's, do not put the shared library in each
# of them. Instead create a single, separate library.zip file.
# zipfile=None, # bundle library.zip in exe
data_files=data_files # list of files to copy to dist directory
)
``` |
{
"source": "jmborr/tinydb",
"score": 3
} |
#### File: tinydb/tinydb/utils.py
```python
from collections import OrderedDict, abc
from typing import List, Iterator, TypeVar, Generic, Union, Optional
K = TypeVar('K')
V = TypeVar('V')
D = TypeVar('D')
class LRUCache(abc.MutableMapping, Generic[K, V]):
def __init__(self, capacity=None):
self.capacity = capacity
self.cache = OrderedDict() # type: OrderedDict[K, V]
@property
def lru(self) -> List[K]:
return list(self.cache.keys())
@property
def length(self) -> int:
return len(self.cache)
def clear(self) -> None:
self.cache.clear()
def __len__(self) -> int:
return self.length
def __contains__(self, key: object) -> bool:
return key in self.cache
def __setitem__(self, key: K, value: V) -> None:
self.set(key, value)
def __delitem__(self, key: K) -> None:
del self.cache[key]
def __getitem__(self, key) -> V:
value = self.get(key)
if value is None:
raise KeyError(key)
return value
def __iter__(self) -> Iterator[K]:
return iter(self.cache)
def get(self, key: K, default: D = None) -> Optional[Union[V, D]]:
value = self.cache.get(key)
if value is not None:
# Put the key back to the front of the ordered dict by
# re-insertig it
del self.cache[key]
self.cache[key] = value
return value
return default
def set(self, key: K, value: V):
if self.cache.get(key):
del self.cache[key]
self.cache[key] = value
else:
self.cache[key] = value
# Check, if the cache is full and we have to remove old items
# If the queue is of unlimited size, self.capacity is NaN and
# x > NaN is always False in Python and the cache won't be cleared.
if self.capacity is not None and self.length > self.capacity:
self.cache.popitem(last=False)
class FrozenDict(dict):
def __hash__(self):
return hash(tuple(sorted(self.items())))
def _immutable(self, *args, **kws):
raise TypeError('object is immutable')
__setitem__ = _immutable
__delitem__ = _immutable
clear = _immutable
setdefault = _immutable
popitem = _immutable
def update(self, e=None, **f):
raise TypeError('object is immutable')
def pop(self, k, d=None):
raise TypeError('object is immutable')
def freeze(obj):
if isinstance(obj, dict):
return FrozenDict((k, freeze(v)) for k, v in obj.items())
elif isinstance(obj, list):
return tuple(freeze(el) for el in obj)
elif isinstance(obj, set):
return frozenset(obj)
else:
return obj
``` |
{
"source": "jmbowles/kaggle-ashrae-energy-predictor",
"score": 3
} |
#### File: kaggle-ashrae-energy-predictor/model/als_submit.py
```python
from __future__ import print_function
"""
0: electricity, 1: chilledwater, 2: steam, 3: hotwater
kaggle competitions submit -c ashrae-energy-prediction -f submittal_6.csv.gz -m "Submisssion 6. ALS by meter, month (users), and day (items), rating = meter_reading in log1p"
"""
from pyspark.ml import PipelineModel
import pyspark.sql.functions as F
from pyspark.sql import SparkSession
spark = SparkSession.builder.appName("ALS Submittal") \
.config("spark.dynamicAllocation.enabled", "true") \
.config("spark.shuffle.service.enabled", "true") \
.enableHiveSupport() \
.getOrCreate()
def get_building(df, building_id):
return df.where(F.expr("building_id = {0}".format(building_id)))
def get_meter(df, meter):
return df.where(F.expr("meter = {0}".format(meter)))
def get_meters(df):
return df.select("meter").distinct().orderBy("meter")
def load_model(building_id, meter):
model_path = "output/als_model_{0}_{1}".format(building_id, meter)
return PipelineModel.load(model_path)
def to_csv(submit_id, algo):
import os
file_name = "submittal_{0}.csv.gzip".format(submit_id)
outdir = "./output/submit"
if not os.path.exists(outdir):
os.mkdir(outdir)
path = os.path.join(outdir, file_name)
predictions = spark.table("submitted_predictions")
submittal = predictions.where(predictions.algo == algo)
submittal.select("row_id", "meter_reading").coalesce(1).toPandas().to_csv(path, header=True, index=False, compression="gz")
print("Total rows written to '{0}': {1}".format(file_name, submittal.count()))
print("Loading test data for prediction submittal")
test = spark.table("test")
test.cache()
submit_id = 6
algo = "als_log1p"
buildings = spark.read.load("../datasets/building_metadata.csv", format="csv", sep=",", inferSchema="true", header="true").select("building_id")
for row in buildings.toLocalIterator():
building_id = row.building_id
building = get_building(test, building_id)
meters = get_meters(building)
for row in meters.toLocalIterator():
meter_id = row.meter
building_meter = get_meter(building, meter_id)
print("Predicting meter readings for building {0} meter {1}".format(building_id, meter_id))
model = load_model(building_id, meter_id)
predictions = model.transform(building_meter)
predictions = predictions.withColumn("prediction", F.expm1(predictions.prediction))
print("Saving submission")
predictions = predictions.withColumn("submitted_ts", F.current_timestamp())
predictions = predictions.withColumn("submit_id", F.lit(submit_id))
predictions = predictions.withColumn("algo", F.lit(algo))
predictions = predictions.withColumnRenamed("prediction", "meter_reading").select("row_id", "building_id", "meter", "timestamp", "meter_reading", "submit_id", "submitted_ts", "algo")
predictions = predictions.withColumn("meter_reading", F.when(predictions.meter_reading < 0, F.lit(0.0)).otherwise(predictions.meter_reading))
predictions = predictions.fillna(0.0, "meter_reading")
predictions.coalesce(1).write.saveAsTable("submitted_predictions", format="parquet", mode="append")
to_csv(submit_id, algo)
```
#### File: kaggle-ashrae-energy-predictor/model/linear_submit.py
```python
from __future__ import print_function
"""
0: electricity, 1: chilledwater, 2: steam, 3: hotwater
"""
from pyspark.ml import PipelineModel
import pyspark.sql.functions as F
from pyspark.sql import SparkSession
spark = SparkSession.builder.appName("Linear Regression Submittal") \
.config("spark.dynamicAllocation.enabled", "true") \
.config("spark.shuffle.service.enabled", "true") \
.enableHiveSupport() \
.getOrCreate()
def get_building(df, building_id):
return df.where(F.expr("building_id = {0}".format(building_id)))
def get_season(df, months):
return df.where(F.expr("month in {0}".format(months)))
def load_model(building_id, season):
model_path = "output/linear_model_{0}_{1}".format(building_id, season)
return PipelineModel.load(model_path)
def to_csv(submit_id):
import os
file_name = "submittal_{0}.csv".format(submit_id)
outdir = "./output/submit"
if not os.path.exists(outdir):
os.mkdir(outdir)
path = os.path.join(outdir, file_name)
predictions = spark.table("submitted_predictions")
predictions.select("row_id", "meter_reading").coalesce(1).toPandas().to_csv(path, header=True, index=False)
print("Total rows written to '{0}': {1}".format(file_name, predictions.count()))
print("Loading test data for prediction submittal")
test = spark.table("test")
test.cache()
submit_id = 1
algo = "linear"
season_months = {"Winter": "(12,1,2)", "Spring": "(3,4,5)", "Summer": "(6,7,8)", "Fall": "(9,10,11)"}
buildings = spark.read.load("../datasets/building_metadata.csv", format="csv", sep=",", inferSchema="true", header="true").select("building_id")
for row in buildings.toLocalIterator():
building_id = row.building_id
for season, months in season_months.items():
print("Predicting meter readings for building {0} season {1}".format(building_id, season))
building = get_building(test, building_id)
building = get_season(building, months)
model = load_model(building_id, season)
predictions = model.transform(building)
print("Saving submission")
predictions = predictions.withColumn("submitted_ts", F.current_timestamp())
predictions = predictions.withColumn("submit_id", F.lit(submit_id))
predictions = predictions.withColumn("algo", F.lit(algo))
predictions = predictions.withColumnRenamed("prediction", "meter_reading").select("row_id", "building_id", "meter", "timestamp", "meter_reading", "submit_id", "submitted_ts", "algo")
predictions.coalesce(1).write.saveAsTable("submitted_predictions", format="parquet", mode="append")
to_csv(submit_id)
```
#### File: kaggle-ashrae-energy-predictor/model/weather.py
```python
from __future__ import print_function
"""
"""
import pyspark.sql.functions as F
from pyspark.sql import SparkSession
spark = SparkSession.builder.appName("TransformFeatures") \
.config("spark.dynamicAllocation.enabled", "true") \
.config("spark.shuffle.service.enabled", "true") \
.enableHiveSupport() \
.getOrCreate()
def to_gmt(df):
return df.withColumn("timestamp", F.to_utc_timestamp("timestamp", df.time_zone))
def split_timestamp(df):
df = df.withColumn("month", F.month(df.timestamp))
df = df.withColumn("day", F.dayofmonth(df.timestamp))
df = df.withColumn("hour", F.hour(df.timestamp))
return df
def create_table(df, table_name):
print("Dropping table '{0}'".format(table_name))
spark.sql("drop table if exists {0}".format(table_name))
print("Saving table '{0}'".format(table_name))
df.coalesce(4).write.saveAsTable(table_name, format="parquet", mode="overwrite")
print("Loading and caching data")
train = spark.read.load("../datasets/train.csv", format="csv", sep=",", inferSchema="true", header="true")
train = train.dropDuplicates(["building_id", "meter", "timestamp"])
train.cache()
print("Training dataset row count: {0}".format(train.count()))
test = spark.read.load("../datasets/test.csv", format="csv", sep=",", inferSchema="true", header="true")
test.cache()
print("Test dataset row count: {0}".format(test.count()))
meta = spark.read.load("../datasets/building_metadata_tz.csv", format="csv", sep=",", inferSchema="true", header="true")
meta = meta.withColumnRenamed("building_id", "building_id_meta")
meta = meta.withColumnRenamed("site_id", "site_id_meta")
print("Metadata row count: {0}".format(meta.count()))
weather = spark.read.load("../datasets/weather_train.csv", format="csv", sep=",", inferSchema="true", header="true")
weather = weather.dropDuplicates(["site_id", "timestamp"])
weather = weather.withColumnRenamed("timestamp", "timestamp_wx")
weather = weather.withColumnRenamed("site_id", "site_id_wx")
print("Weather row count: {0}".format(weather.count()))
#weather.withColumn("month_wx", F.month(weather.timestamp_wx)).withColumn("day_wx", F.dayofmonth(weather.timestamp_wx)).groupby("site_id_wx", "month_wx", "day_wx").agg(F.avg(weather.air_temperature).alias("avg_celsius")).withColumn("avg_fahrenheit", F.expr("avg_celsius * 1.8 + 32")).show()
#weather.where(F.expr("site_id_wx = 2 and month(timestamp_wx) = 1")).withColumn("month_wx", F.month(weather.timestamp_wx)).withColumn("day_wx", F.dayofmonth(weather.timestamp_wx)).groupby("site_id_wx", "month_wx", "day_wx").agg(F.avg(weather.air_temperature).alias("avg_celsius")).withColumn("avg_fahrenheit", F.expr("avg_celsius * 1.8 + 32")).orderBy("day_wx").show(50)
train = train.join(meta, [meta.building_id_meta == train.building_id])
train = to_gmt(train)
train = train.join(weather, [train.timestamp == weather.timestamp_wx, train.site_id_meta == weather.site_id_wx], "left_outer")
train = train.withColumnRenamed("site_id_meta", "site_id")
train = train.drop("building_id_meta", "site_id_wx", "timestamp_wx")
print("Training joined row count: {0}".format(train.count()))
weather_test = spark.read.load("../datasets/weather_test.csv", format="csv", sep=",", inferSchema="true", header="true")
weather_test = weather_test.dropDuplicates(["site_id", "timestamp"])
weather_test = weather_test.withColumnRenamed("timestamp", "timestamp_wx")
weather_test = weather_test.withColumnRenamed("site_id", "site_id_wx")
print("Weather test row count: {0}".format(weather_test.count()))
test = test.join(meta, [meta.building_id_meta == test.building_id])
test = to_gmt(test)
test = test.join(weather_test, [test.timestamp == weather_test.timestamp_wx, test.site_id_meta == weather_test.site_id_wx], "left_outer")
test = test.withColumnRenamed("site_id_meta", "site_id")
test = test.drop("building_id_meta", "site_id_wx", "timestamp_wx")
print("Test joined row count: {0}".format(test.count()))
print("Transforming datasets")
train = split_timestamp(train)
test = split_timestamp(test)
print("Creating tables")
create_table(train, "training")
create_table(test, "test")
``` |
{
"source": "jmbradshaw79/duckdb",
"score": 3
} |
#### File: fast/pandas/test_pandas_category.py
```python
import duckdb
import pandas as pd
import numpy
import pytest
def check_category_equal(category):
df_in = pd.DataFrame({
'x': pd.Categorical(category, ordered=True),
})
df_out = duckdb.query_df(df_in, "data", "SELECT * FROM data").df()
assert df_in.equals(df_out)
def check_create_table(category):
conn = duckdb.connect()
conn.execute ("PRAGMA enable_verification")
df_in = pd.DataFrame({
'x': pd.Categorical(category, ordered=True),
'y': pd.Categorical(category, ordered=True)
})
df_out = duckdb.query_df(df_in, "data", "SELECT * FROM data").df()
assert df_in.equals(df_out)
conn.execute("CREATE TABLE t1 AS SELECT * FROM df_in")
conn.execute("CREATE TABLE t2 AS SELECT * FROM df_in")
# Do a insert to trigger string -> cat
conn.execute("INSERT INTO t1 VALUES ('2','2')")
res = conn.execute("SELECT x FROM t1 where x = '1'").fetchall()
assert res == [('1',)]
res = conn.execute("SELECT t1.x FROM t1 inner join t2 on (t1.x = t2.x)").fetchall()
assert res == conn.execute("SELECT x FROM t1").fetchall()
# Can't compare different ENUMs
with pytest.raises(Exception):
conn.execute("SELECT * FROM t1 inner join t2 on (t1.x = t2.y)").fetchall()
assert res == conn.execute("SELECT x FROM t1").fetchall()
# Triggering the cast with ENUM as a src
conn.execute("ALTER TABLE t1 ALTER x SET DATA TYPE VARCHAR")
class TestCategory(object):
def test_category_simple(self, duckdb_cursor):
df_in = pd.DataFrame({
'float': [1.0, 2.0, 1.0],
'int': pd.Series([1, 2, 1], dtype="category")
})
df_out = duckdb.query_df(df_in, "data", "SELECT * FROM data").df()
print (duckdb.query_df(df_in, "data", "SELECT * FROM data").fetchall())
print (df_out['int'])
assert numpy.all(df_out['float'] == numpy.array([1.0, 2.0, 1.0]))
assert numpy.all(df_out['int'] == numpy.array([1, 2, 1]))
def test_category_nulls(self, duckdb_cursor):
df_in = pd.DataFrame({
'int': pd.Series([1, 2, None], dtype="category")
})
df_out = duckdb.query_df(df_in, "data", "SELECT * FROM data").df()
print (duckdb.query_df(df_in, "data", "SELECT * FROM data").fetchall())
assert df_out['int'][0] == 1
assert df_out['int'][1] == 2
assert numpy.isnan(df_out['int'][2])
def test_category_string(self, duckdb_cursor):
check_category_equal(['foo','bla','zoo', 'foo', 'foo', 'bla'])
def test_category_string_null(self, duckdb_cursor):
check_category_equal(['foo','bla',None,'zoo', 'foo', 'foo',None, 'bla'])
def test_categorical_fetchall(self, duckdb_cursor):
df_in = pd.DataFrame({
'x': pd.Categorical(['foo','bla',None,'zoo', 'foo', 'foo',None, 'bla'], ordered=True),
})
assert duckdb.query_df(df_in, "data", "SELECT * FROM data").fetchall() == [('foo',), ('bla',), (None,), ('zoo',), ('foo',), ('foo',), (None,), ('bla',)]
def test_category_string_uint8(self, duckdb_cursor):
category = []
for i in range (10):
category.append(str(i))
check_create_table(category)
def test_category_fetch_df_chunk(self, duckdb_cursor):
con = duckdb.connect()
categories = ['foo','bla',None,'zoo', 'foo', 'foo',None, 'bla']
result = categories*128
categories = result * 2
df_result = pd.DataFrame({
'x': pd.Categorical(result, ordered=True),
})
df_in = pd.DataFrame({
'x': pd.Categorical(categories, ordered=True),
})
con.register("data", df_in)
query = con.execute("SELECT * FROM data")
cur_chunk = query.fetch_df_chunk()
assert(cur_chunk.equals(df_result))
cur_chunk = query.fetch_df_chunk()
assert(cur_chunk.equals(df_result))
cur_chunk = query.fetch_df_chunk()
assert(cur_chunk.empty)
def test_category_mix(self, duckdb_cursor):
df_in = pd.DataFrame({
'float': [1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 0.0],
'x': pd.Categorical(['foo','bla',None,'zoo', 'foo', 'foo',None, 'bla'], ordered=True),
})
df_out = duckdb.query_df(df_in, "data", "SELECT * FROM data").df()
assert df_out.equals(df_in)
``` |
{
"source": "jmbreuer/strainer",
"score": 2
} |
#### File: strainer/sieve/__init__.py
```python
from sievelib import managesieve
from .connection import SieveConnectionQueue
from .parser import lex, parse, SieveScript
__all__ = ('lex', 'parse', 'SieveScript', 'SieveConnectionQueue')
# Monkeypatch sievelib#95
old_get_script = managesieve.Client.getscript
def getscript(self, name):
script = old_get_script.__get__(self)(name)
if script is not None:
script = script.replace('\n', '\r\n')
return script
managesieve.Client.getscript = getscript
```
#### File: sieve/semantics/tags.py
```python
from __future__ import annotations
from dataclasses import dataclass, field
from typing import Any, Dict, Sequence
class ValueTokens(Dict[bytes, Sequence[str]]):
def __init__(self, *args, _: Sequence[str] = (), **kwargs):
self._fallback = _
super().__init__(*args, **kwargs)
def __getitem__(self, key: Any):
try:
return super().__getitem__(key)
except KeyError:
return self._fallback
@dataclass
class Tag:
name: str
one_of: Sequence[bytes] = ()
value_tokens: ValueTokens = field(default_factory=ValueTokens)
required: bool = False
class Tags:
def __init__(self, *specs: Tag):
self._specs = specs
self._tags = {tag: spec for spec in specs for tag in spec.one_of}
def __iter__(self):
yield from self._specs
def __getitem__(self, key: bytes):
return self._tags[key]
def __contains__(self, key: bytes):
return key in self._tags
over_under = Tag('`:over` or `:under`', (b':over', b':under',), ValueTokens(_=('number',)), True)
comparator = Tag('comparator', (b':comparator',), ValueTokens(_=('string',)))
match_type = Tag('match type', (b':is', b':contains', b':matches',))
address_part = Tag('address part', (b':localpart', b':domain', b':all'))
body_transform = Tag('body transform', (b':raw', b':content', b':text'), ValueTokens({b':content': ('string_list',)}))
```
#### File: widgets/tree/widget.py
```python
from PyQt5.QtCore import Qt, QSize
from PyQt5.QtGui import QContextMenuEvent
from PyQt5.QtWidgets import QTreeWidget, QHeaderView
from ...actions import EditAccount, OpenScript, ReloadAccount
from ...controls import AccountMenu, ScriptMenu
from .items import AccountItem
class Tree(QTreeWidget):
def __init__(self, parent):
super().__init__(parent)
self.setMinimumSize(QSize(100, 200))
self.setColumnCount(2)
self.header().setMinimumSectionSize(0)
self.header().setStretchLastSection(False)
self.header().setSectionResizeMode(0, QHeaderView.Stretch)
self.header().setSectionResizeMode(1, QHeaderView.ResizeToContents)
self.setHeaderHidden(True)
self.setExpandsOnDoubleClick(False)
self._accountMenu = AccountMenu(self.window())
self._scriptMenu = ScriptMenu(self.window())
for action in (*self._accountMenu.actions(), *self._scriptMenu.actions()):
try:
action.setDefaultArgs(lambda: (self.currentItem(),))
except AttributeError:
pass
self.currentItemChanged.connect(self.onCurrentItemChanged)
self.itemActivated.connect(self.onItemActivated)
self.itemChanged.connect(self.onItemChanged)
self._updateMenus()
def sizeHint(self):
return QSize(200, 600)
def blockSignals(self, value):
super().blockSignals(value)
self._updateMenus(None if value else self.currentItem())
def onCurrentItemChanged(self, next, previous):
self._updateMenus(next)
def _updateMenus(self, item=None):
self._accountMenu.update(item)
self._scriptMenu.update(item)
def contextMenuEvent(self, event: QContextMenuEvent):
item = self.itemAt(event.pos()) or self.currentItem()
if not item:
return
if item.parent():
self._scriptMenu.popup(event.globalPos())
else:
self._accountMenu.popup(event.globalPos())
def onItemActivated(self, item):
if isinstance(item, AccountItem):
self.window().action(EditAccount).trigger(item)
else:
self.window().action(OpenScript).trigger(item)
def onItemChanged(self, item):
self.onItemsChanged([item])
def onItemsChanged(self, items):
self.sortItems(0, Qt.AscendingOrder)
for item in items:
if item.parent() is None:
self.window().action(ReloadAccount).trigger(item)
def illegalChildNames(self):
return {'', *(self.topLevelItem(i).name for i in range(self.topLevelItemCount()))}
def addAccountItem(self, account):
return self.addAccountItems([account])[0]
def addAccountItems(self, accounts):
if not accounts:
return []
items = [AccountItem(account) for account in accounts]
super().addTopLevelItems(items)
self.onItemsChanged(items)
return items
```
#### File: strainer/windows/dialogs.py
```python
from PyQt5.QtWidgets import QFormLayout
from .base import AddOrChangeDialog, DialogTitle
from ..controls import StringField, IntegerField, PasswordField, CheckboxField, OptionsField
class AccountDialog(AddOrChangeDialog):
_addTitle = DialogTitle('mdi.account-plus', 'Add new account')
_changeTitle = DialogTitle('mdi.account-edit', 'Change account settings')
_defaultValue = ('New account', '', 4190, '', '', False, None)
def __init__(self, parent):
super().__init__(parent)
self._addField('Display name:', StringField()).textChanged.connect(self.onTextChanged)
self._addField('Server address:', StringField()).textChanged.connect(self.onTextChanged)
self._addField('Server port:', IntegerField())
self._addField('User name:', StringField())
self._addField('Password:', PasswordField())
self._addField('Use STARTTLS:', CheckboxField('enable'))
self._addField('Authentication:', OptionsField([
('Automatic', None),
('Digest MD5', 'DIGEST-MD5'),
('Plain', 'PLAIN'),
('Login', 'LOGIN'),
]))
def _setValue(self, values):
for i, value in enumerate(values):
self._getField(i).setValue(value)
self._getField(0).setFocus()
def _getValue(self):
return [self._getField(i).getValue() for i in range(self.layout().rowCount() - 1)]
def onTextChanged(self, _):
name = self._getField(0).getValue()
address = self._getField(1).getValue()
self.setInputValid(self.isNameLegal(name) and bool(address))
class ScriptNameDialog(AddOrChangeDialog):
_addTitle = DialogTitle('mdi.file-plus', 'Add new script')
_changeTitle = DialogTitle('mdi.file-edit', 'Rename script')
_defaultValue = ''
def __init__(self, parent):
super().__init__(parent)
field = self._addField('New script name:', StringField(128))
field.textChanged.connect(lambda name: self.setInputValid(self.isNameLegal(name)))
self._getValue = field.getValue
def _setValue(self, newValue):
field = self.layout().itemAt(0, QFormLayout.FieldRole).widget()
field.setValue(newValue)
field.setFocus()
field.textChanged.emit(newValue)
```
#### File: strainer/windows/mainWindow.py
```python
from PyQt5.QtCore import Qt, QSettings, QSize, QTimer
from PyQt5.QtWidgets import QFrame, QHBoxLayout, QMainWindow, QSplitter, QStyle
from ..controls import ManageMenu, EditMenu, NavigateMenu, ManageToolBar, EditToolBar, NavigateToolBar, StatusBar
from ..widgets import Tree, Editor, Reference
from .messages import ConfirmCloseMessage
class MainWindow(QMainWindow):
def __init__(self, all_actions):
super().__init__()
self._actions = all_actions
self._splitter = QSplitter()
self.setCentralWidget(QFrame(self))
QHBoxLayout(self.centralWidget()).addWidget(self._splitter)
self.menuBar().addMenu(ManageMenu(self))
self.menuBar().addMenu(EditMenu(self))
self.menuBar().addMenu(NavigateMenu(self))
self.addToolBar(ManageToolBar(self))
self.addToolBar(EditToolBar(self))
self.addToolBar(NavigateToolBar(self))
self.setStatusBar(StatusBar(self))
self._tree = Tree(self._splitter)
self._editor = Editor(self._splitter)
self._reference = Reference(self._splitter)
self._parseTimer = QTimer(interval=1000)
self.statusBar().gotoError.connect(self._editor.setCursorPosition)
self.statusBar().gotoError.connect(lambda *_: self._editor.setFocus(Qt.OtherFocusReason))
self.statusBar().errorChanged.connect(self._editor.setParseError)
self._editor.modificationChanged.connect(self.onModificationChanged)
self._editor.cursorPositionChanged.connect(self.statusBar().setCursorPosition)
self._editor.textChanged.connect(self._parseTimer.start)
self._openScript = None
self._confirmClose = ConfirmCloseMessage(self).exec
self._parseTimer.timeout.connect(self.onParseTimer)
self.onModificationChanged()
def show(self, desktop):
settings = QSettings()
try:
self.restoreGeometry(settings.value('windows/main/geometry'))
except TypeError:
geometry = desktop.availableGeometry()
size = geometry.size()
size = QSize(size.width() * 0.75, size.height() * 0.75)
self.setGeometry(QStyle.alignedRect(Qt.LeftToRight, Qt.AlignCenter, size, geometry))
try:
self.restoreState(settings.value('windows/main/window-state'))
except TypeError:
pass
try:
self._splitter.restoreState(settings.value('windows/main/splitter-state'))
except TypeError:
pass
super().show()
def action(self, action_type):
return self._actions[action_type]
def tree(self):
return self._tree
def editor(self):
return self._editor
def reference(self):
return self._reference
def openScript(self):
return self._openScript
def setOpenScript(self, item, content='', *, force=False):
if self._openScript:
if not force and not self._confirmClose():
self._editor.setFocus(Qt.OtherFocusReason)
return False
self._openScript.open = False
self._openScript = None
self._editor.close()
if item:
self._editor.open(content)
self._openScript = item
self._openScript.open = True
self.statusBar().setScript(self._openScript)
self.onModificationChanged(False)
return True
def onModificationChanged(self, isModified=False):
title = 'Strainer'
if self._openScript:
title = f"{title} ({self._openScript.parent().name}: {self._openScript.name}{'*' if isModified else ''})"
self.setWindowTitle(title)
def onParseTimer(self):
self._parseTimer.stop()
self.statusBar().parseScript(bytes(self._editor.bytes(0, self._editor.length())[:-1]))
def closeEvent(self, event):
if self._confirmClose():
settings = QSettings()
settings.setValue('windows/main/splitter-state', self._splitter.saveState())
settings.setValue('windows/main/window-state', self.saveState())
settings.setValue('windows/main/geometry', self.saveGeometry())
event.accept()
else:
event.ignore()
``` |
{
"source": "jmbriody/bookofnumbers",
"score": 3
} |
#### File: bookofnumbers/tests/cdnf_tests.py
```python
import pytest
from collections import namedtuple
from cdnf import *
def test_quin():
# a = qmc(2078)
assert isinstance(canonical("ABC"), ValueError)
assert canonical(2077, True, True) == "f(2077) = AB'CD + A'BC'D' + A'B'CD' + A'B'CD + A'B'C'D'"
Term = namedtuple('Term', 'termset used ones source generation final')
assert quinemc(2078) == "B'CD + A'BC'D' + A'B'D + A'B'C"
assert quinemc(2077) == "B'CD + A'C'D' + A'B'D'"
assert quinemc(12309, True) == "ABC' + A'C'D' + A'B'D'"
assert quinemc(2003) == "B'C' + AB'D' + A'C'D' + A'BC"
assert quinemc(255) == "1"
assert quinemc(0) == "0"
canon_list = ["ABC'D", "A'B'CD'", "ABC'D'", "A'BC'D'", "A'B'C'D'"]
canon_string = "ABC'D + A'B'CD' + ABC'D' + A'BC'D' + A'B'C'D'"
canon_string_error = "ABCD + A'B'D' + ABC'D' + A'BC'D' + A'B'C'D'"
assert quinemc(canon_string) == "ABC' + A'C'D' + A'B'D'"
assert quinemc(canon_list) == "ABC' + A'C'D' + A'B'D'"
assert isinstance(quinemc({"ABC", "A'C"}), ValueError)
assert to_cdnf("B'CD + A'C'D' + A'B'D'") == "AB'CD + A'BC'D' + A'B'CD' + A'B'CD + A'B'C'D'"
assert to_cdnf(["B'CD", "A'C'D'", "A'B'D'"]) == "AB'CD + A'BC'D' + A'B'CD' + A'B'CD + A'B'C'D'"
assert to_cdnf("C + A") == "AC' + AC + A'C"
assert to_cdnf("C + A", 1) == "ABC' + ABC + AB'C' + AB'C + A'BC + A'B'C"
assert to_cdnf("ry + t") == "rty' + rty + rt'y + r'ty' + r'ty"
assert isinstance(to_cdnf(2077), ValueError)
assert isinstance(quinemc(canon_string_error), ValueError)
second_result = ["A'D", "AB'C'", "B'CD'"]
a, b, c = quinemc(2046, True, True)
c1 = c[1]
r = ["".join(sorted(ti.termset)) for ti in c1]
assert set(r) == set(second_result)
assert len(b) == 26
r, s, t = quinemc(743, 1, 1)
assert result_to_int(s) == 743
assert alternatives(s, t)[2] == "B'C'D + A'B'D' + A'BC + A'BD"
assert quinemc([743, [0, 1]]) == "B'C'D + A'CD' + A'BD"
``` |
{
"source": "jmbrooks/data_structures_and_algorithms",
"score": 4
} |
#### File: jmbrooks/data_structures_and_algorithms/coin_game.py
```python
from random import randint
class Coin():
"""Defines the two-sided fair coin."""
def __init__(self):
self.state = None
def __str__(self):
return self.state
def get_coin_state(self):
return self.state
def flip_coin(self):
state = randint(0, 1)
if state == 1:
return 'heads'
else:
return 'tails'
class Player():
"""Defines a Player in the coin flip game."""
def __init__(self, name):
self.name = name
self._coin_choice = ''
def __str__(self):
return self.name
def choose_coin(self):
coin_choice = input("Hello, " + self.name + "! Choose heads or tails: ").lower().strip()
self._coin_choice = coin_choice
return coin_choice
def get_coin_choice(self):
return self._coin_choice
def set_coin_choice(self, coin_choice):
self._coin_choice = coin_choice
def has_won(self, winning_coin_choice):
return self._coin_choice == winning_coin_choice
class CoinGame():
"""Defines an iteration of the coin toss game."""
def __init__(self, players):
self.players = players
self.player_one = Player(self.players[0])
self.player_two = Player(self.players[1])
self.coin = Coin()
def choose_first_player(self):
return randint(0, 1)
def start_game(self):
if self.choose_first_player() == 0:
print(self.player_one.name + " will choose the coin side.")
first_player = self.player_one
second_player = self.player_two
else:
print(self.player_two.name + " will choose the coin side.")
first_player = self.player_two
second_player = self.player_one
random_coin = self.coin.flip_coin()
first_coin_choice = first_player.choose_coin()
print("The winner is {}.".format(random_coin))
if first_coin_choice == 'heads':
second_player_choice = second_player.set_coin_choice('tails')
else:
second_player_choice = second_player.set_coin_choice('heads')
print("Player one wins? " + str(first_player.has_won(random_coin)))
print("Player two wins? " + str(second_player.has_won(random_coin)))
if __name__ == '__main__':
game = CoinGame(['james', 'jeff'])
print(game.start_game())
``` |
{
"source": "jmbrooks/dbtea",
"score": 3
} |
#### File: dbtea/dbtea/exceptions.py
```python
from typing import Any, Dict, Optional
import requests
class DbteaException(Exception):
"""Exception related to dbtea configuration, processing or compatibility."""
exit_code = 100
def __init__(self, name: str, title: str, detail: str):
self.type: str = "/errors/" + name
self.title = title
self.detail = detail
def __repr__(self) -> str:
return self.title
def __str__(self) -> str:
return self.title + " " + self.detail
class GitException(Exception):
"""Exception related to Git or Git provider configuration or APIs."""
exit_code = 101
def __init__(
self,
name: str,
provider: str,
title: str,
status: int,
detail: str,
response: requests.Response,
):
request: requests.PreparedRequest = response.request
super().__init__("git-errors/" + provider + "/" + name, title, detail)
self.status = status
self.looker_api_response: Optional[dict] = _details_from_http_error(response)
self.request = {"url": request.url, "method": request.method}
def _details_from_http_error(response: requests.Response) -> Optional[Dict[str, Any]]:
""""""
try:
details = response.json()
# Requests raises a ValueError if the response is invalid JSON
except ValueError:
details = None
return details
```
#### File: dbtea/tests/test_utils.py
```python
import os
from pathlib import Path
from dbtea import utils
current_directory = Path(__file__).parent.absolute()
def test_assemble_basic_path():
simple_directory = "/Users/SampleUser/"
simple_file_name = "profiles.yml"
assembled_path = utils.assemble_path(simple_directory, ".dbt", simple_file_name)
assert assembled_path == "/Users/SampleUser/.dbt/profiles.yml"
def test_assemble_windows_path():
root_windows_directory = r"C:\Program Files (x86)\Python"
second_windows_directory = r"root\tests"
file_name = "sample.py"
assembled_path = utils.assemble_path(
root_windows_directory, second_windows_directory, "unit", file_name
)
assert assembled_path == "C:/Program Files (x86)/Python/root/tests/unit/sample.py"
def test_fetch_dbt_basic_project():
dbt_basic_project_path = os.path.join(
current_directory, "resources", "dbt_projects", "basic"
)
project_path = utils.fetch_dbt_project_directory(
custom_project_directory=dbt_basic_project_path
)
assert project_path.endswith("resources/dbt_projects/basic")
``` |
{
"source": "jmbrooks/SimpleProblems",
"score": 4
} |
#### File: jmbrooks/SimpleProblems/sum_pairs_to_value.py
```python
def has_sum_pair(input_list: list, sum: int) -> list:
"""Given list and desired integer sum, determines if a pair sums to that value."""
complement_set = set()
for number in input_list:
if (sum - number) in complement_set:
return [True, [number, sum - number]]
complement_set.add(number)
return [False, []]
input_list = [1, 9, 3, 6, 7, 4, 4]
input_list_2 = [2, 5, 5, 1, 2, 6, 4]
input_list_3 = []
print(has_sum_pair(input_list, 22))
print(has_sum_pair(input_list_2, -5))
print(has_sum_pair(input_list_3, 2))
``` |
{
"source": "jmbrunskill/py-advent-of-code",
"score": 3
} |
#### File: py-advent-of-code/intcode/intcode.py
```python
class intcode():
def __init__(self):
self.instructionPointer = 0
self.memory = []
self.stopped = False
def loadMemory(self, memList):
self.memory = memList.copy()
def loadProgram(self, programString):
numbers = programString.split(",")
self.memory = list(map(int,numbers))
def isStopped(self):
return self.stopped
def add(self,p1_location,p2_location,output_location):
self.memory[output_location] = self.memory[p1_location] + self.memory[p2_location]
self.instructionPointer += 4
return
def mult(self, p1_location, p2_location, output_location):
self.memory[output_location] = self.memory[p1_location] * \
self.memory[p2_location]
self.instructionPointer += 4
return
def stop(self, p1_location, p2_location, output_location):
#Stop function doesn't use any of these parameters, but needs them to use the dictionary based case
self.stopped = True
return
def decode(self):
opcode = self.memory[self.instructionPointer]
p1_location = 0
p2_location = 0
p3_location = 0
if opcode != 99:
p1_location = self.memory[self.instructionPointer+1]
p2_location = self.memory[self.instructionPointer+2]
p3_location = self.memory[self.instructionPointer+3]
return opcode,p1_location,p2_location,p3_location
def step(self, opcode, p1_location, p2_location, p3_location):
funcs = {
1: self.add,
2: self.mult,
99: self.stop
}
funcs[opcode](p1_location, p2_location, p3_location)
def execute(self):
while not self.isStopped():
#Interpret the current instruction
#Find the parameters
opcode, p1_location, p2_location, p3_location = self.decode()
#Execute that instruction
self.step(opcode, p1_location, p2_location, p3_location)
#Update instruction Pointer - This happens automatically in each instruction
``` |
{
"source": "JMBurley/pandas",
"score": 2
} |
#### File: tests/frame/test_timeseries.py
```python
from datetime import datetime, time
from itertools import product
import numpy as np
import pytest
import pytz
import pandas as pd
from pandas import (
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
date_range,
period_range,
to_datetime,
)
import pandas.util.testing as tm
import pandas.tseries.offsets as offsets
@pytest.fixture(params=product([True, False], [True, False]))
def close_open_fixture(request):
return request.param
class TestDataFrameTimeSeriesMethods:
def test_pct_change(self, datetime_frame):
rs = datetime_frame.pct_change(fill_method=None)
tm.assert_frame_equal(rs, datetime_frame / datetime_frame.shift(1) - 1)
rs = datetime_frame.pct_change(2)
filled = datetime_frame.fillna(method="pad")
tm.assert_frame_equal(rs, filled / filled.shift(2) - 1)
rs = datetime_frame.pct_change(fill_method="bfill", limit=1)
filled = datetime_frame.fillna(method="bfill", limit=1)
tm.assert_frame_equal(rs, filled / filled.shift(1) - 1)
rs = datetime_frame.pct_change(freq="5D")
filled = datetime_frame.fillna(method="pad")
tm.assert_frame_equal(
rs, (filled / filled.shift(freq="5D") - 1).reindex_like(filled)
)
def test_pct_change_shift_over_nas(self):
s = Series([1.0, 1.5, np.nan, 2.5, 3.0])
df = DataFrame({"a": s, "b": s})
chg = df.pct_change()
expected = Series([np.nan, 0.5, 0.0, 2.5 / 1.5 - 1, 0.2])
edf = DataFrame({"a": expected, "b": expected})
tm.assert_frame_equal(chg, edf)
@pytest.mark.parametrize(
"freq, periods, fill_method, limit",
[
("5B", 5, None, None),
("3B", 3, None, None),
("3B", 3, "bfill", None),
("7B", 7, "pad", 1),
("7B", 7, "bfill", 3),
("14B", 14, None, None),
],
)
def test_pct_change_periods_freq(
self, datetime_frame, freq, periods, fill_method, limit
):
# GH 7292
rs_freq = datetime_frame.pct_change(
freq=freq, fill_method=fill_method, limit=limit
)
rs_periods = datetime_frame.pct_change(
periods, fill_method=fill_method, limit=limit
)
tm.assert_frame_equal(rs_freq, rs_periods)
empty_ts = DataFrame(index=datetime_frame.index, columns=datetime_frame.columns)
rs_freq = empty_ts.pct_change(freq=freq, fill_method=fill_method, limit=limit)
rs_periods = empty_ts.pct_change(periods, fill_method=fill_method, limit=limit)
tm.assert_frame_equal(rs_freq, rs_periods)
def test_frame_ctor_datetime64_column(self):
rng = date_range("1/1/2000 00:00:00", "1/1/2000 1:59:50", freq="10s")
dates = np.asarray(rng)
df = DataFrame({"A": np.random.randn(len(rng)), "B": dates})
assert np.issubdtype(df["B"].dtype, np.dtype("M8[ns]"))
def test_frame_append_datetime64_column(self):
rng = date_range("1/1/2000 00:00:00", "1/1/2000 1:59:50", freq="10s")
df = DataFrame(index=np.arange(len(rng)))
df["A"] = rng
assert np.issubdtype(df["A"].dtype, np.dtype("M8[ns]"))
def test_frame_datetime64_pre1900_repr(self):
df = DataFrame({"year": date_range("1/1/1700", periods=50, freq="A-DEC")})
# it works!
repr(df)
def test_frame_append_datetime64_col_other_units(self):
n = 100
units = ["h", "m", "s", "ms", "D", "M", "Y"]
ns_dtype = np.dtype("M8[ns]")
for unit in units:
dtype = np.dtype("M8[{unit}]".format(unit=unit))
vals = np.arange(n, dtype=np.int64).view(dtype)
df = DataFrame({"ints": np.arange(n)}, index=np.arange(n))
df[unit] = vals
ex_vals = to_datetime(vals.astype("O")).values
assert df[unit].dtype == ns_dtype
assert (df[unit].values == ex_vals).all()
# Test insertion into existing datetime64 column
df = DataFrame({"ints": np.arange(n)}, index=np.arange(n))
df["dates"] = np.arange(n, dtype=np.int64).view(ns_dtype)
for unit in units:
dtype = np.dtype("M8[{unit}]".format(unit=unit))
vals = np.arange(n, dtype=np.int64).view(dtype)
tmp = df.copy()
tmp["dates"] = vals
ex_vals = to_datetime(vals.astype("O")).values
assert (tmp["dates"].values == ex_vals).all()
def test_asfreq(self, datetime_frame):
offset_monthly = datetime_frame.asfreq(offsets.BMonthEnd())
rule_monthly = datetime_frame.asfreq("BM")
tm.assert_almost_equal(offset_monthly["A"], rule_monthly["A"])
filled = rule_monthly.asfreq("B", method="pad") # noqa
# TODO: actually check that this worked.
# don't forget!
filled_dep = rule_monthly.asfreq("B", method="pad") # noqa
# test does not blow up on length-0 DataFrame
zero_length = datetime_frame.reindex([])
result = zero_length.asfreq("BM")
assert result is not zero_length
def test_asfreq_datetimeindex(self):
df = DataFrame(
{"A": [1, 2, 3]},
index=[datetime(2011, 11, 1), datetime(2011, 11, 2), datetime(2011, 11, 3)],
)
df = df.asfreq("B")
assert isinstance(df.index, DatetimeIndex)
ts = df["A"].asfreq("B")
assert isinstance(ts.index, DatetimeIndex)
def test_asfreq_fillvalue(self):
# test for fill value during upsampling, related to issue 3715
# setup
rng = pd.date_range("1/1/2016", periods=10, freq="2S")
ts = pd.Series(np.arange(len(rng)), index=rng)
df = pd.DataFrame({"one": ts})
# insert pre-existing missing value
df.loc["2016-01-01 00:00:08", "one"] = None
actual_df = df.asfreq(freq="1S", fill_value=9.0)
expected_df = df.asfreq(freq="1S").fillna(9.0)
expected_df.loc["2016-01-01 00:00:08", "one"] = None
tm.assert_frame_equal(expected_df, actual_df)
expected_series = ts.asfreq(freq="1S").fillna(9.0)
actual_series = ts.asfreq(freq="1S", fill_value=9.0)
tm.assert_series_equal(expected_series, actual_series)
@pytest.mark.parametrize(
"data,idx,expected_first,expected_last",
[
({"A": [1, 2, 3]}, [1, 1, 2], 1, 2),
({"A": [1, 2, 3]}, [1, 2, 2], 1, 2),
({"A": [1, 2, 3, 4]}, ["d", "d", "d", "d"], "d", "d"),
({"A": [1, np.nan, 3]}, [1, 1, 2], 1, 2),
({"A": [np.nan, np.nan, 3]}, [1, 1, 2], 2, 2),
({"A": [1, np.nan, 3]}, [1, 2, 2], 1, 2),
],
)
def test_first_last_valid(
self, float_frame, data, idx, expected_first, expected_last
):
N = len(float_frame.index)
mat = np.random.randn(N)
mat[:5] = np.nan
mat[-5:] = np.nan
frame = DataFrame({"foo": mat}, index=float_frame.index)
index = frame.first_valid_index()
assert index == frame.index[5]
index = frame.last_valid_index()
assert index == frame.index[-6]
# GH12800
empty = DataFrame()
assert empty.last_valid_index() is None
assert empty.first_valid_index() is None
# GH17400: no valid entries
frame[:] = np.nan
assert frame.last_valid_index() is None
assert frame.first_valid_index() is None
# GH20499: its preserves freq with holes
frame.index = date_range("20110101", periods=N, freq="B")
frame.iloc[1] = 1
frame.iloc[-2] = 1
assert frame.first_valid_index() == frame.index[1]
assert frame.last_valid_index() == frame.index[-2]
assert frame.first_valid_index().freq == frame.index.freq
assert frame.last_valid_index().freq == frame.index.freq
# GH 21441
df = DataFrame(data, index=idx)
assert expected_first == df.first_valid_index()
assert expected_last == df.last_valid_index()
@pytest.mark.parametrize("klass", [Series, DataFrame])
def test_first_valid_index_all_nan(self, klass):
# GH#9752 Series/DataFrame should both return None, not raise
obj = klass([np.nan])
assert obj.first_valid_index() is None
assert obj.iloc[:0].first_valid_index() is None
def test_first_subset(self):
ts = tm.makeTimeDataFrame(freq="12h")
result = ts.first("10d")
assert len(result) == 20
ts = tm.makeTimeDataFrame(freq="D")
result = ts.first("10d")
assert len(result) == 10
result = ts.first("3M")
expected = ts[:"3/31/2000"]
tm.assert_frame_equal(result, expected)
result = ts.first("21D")
expected = ts[:21]
tm.assert_frame_equal(result, expected)
result = ts[:0].first("3M")
tm.assert_frame_equal(result, ts[:0])
def test_first_raises(self):
# GH20725
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
with pytest.raises(TypeError): # index is not a DatetimeIndex
df.first("1D")
def test_last_subset(self):
ts = tm.makeTimeDataFrame(freq="12h")
result = ts.last("10d")
assert len(result) == 20
ts = tm.makeTimeDataFrame(nper=30, freq="D")
result = ts.last("10d")
assert len(result) == 10
result = ts.last("21D")
expected = ts["2000-01-10":]
tm.assert_frame_equal(result, expected)
result = ts.last("21D")
expected = ts[-21:]
tm.assert_frame_equal(result, expected)
result = ts[:0].last("3M")
tm.assert_frame_equal(result, ts[:0])
def test_last_raises(self):
# GH20725
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
with pytest.raises(TypeError): # index is not a DatetimeIndex
df.last("1D")
def test_at_time(self):
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
rs = ts.at_time(rng[1])
assert (rs.index.hour == rng[1].hour).all()
assert (rs.index.minute == rng[1].minute).all()
assert (rs.index.second == rng[1].second).all()
result = ts.at_time("9:30")
expected = ts.at_time(time(9, 30))
tm.assert_frame_equal(result, expected)
result = ts.loc[time(9, 30)]
expected = ts.loc[(rng.hour == 9) & (rng.minute == 30)]
tm.assert_frame_equal(result, expected)
# midnight, everything
rng = date_range("1/1/2000", "1/31/2000")
ts = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts.at_time(time(0, 0))
tm.assert_frame_equal(result, ts)
# time doesn't exist
rng = date_range("1/1/2012", freq="23Min", periods=384)
ts = DataFrame(np.random.randn(len(rng), 2), rng)
rs = ts.at_time("16:00")
assert len(rs) == 0
@pytest.mark.parametrize(
"hour", ["1:00", "1:00AM", time(1), time(1, tzinfo=pytz.UTC)]
)
def test_at_time_errors(self, hour):
# GH 24043
dti = pd.date_range("2018", periods=3, freq="H")
df = pd.DataFrame(list(range(len(dti))), index=dti)
if getattr(hour, "tzinfo", None) is None:
result = df.at_time(hour)
expected = df.iloc[1:2]
tm.assert_frame_equal(result, expected)
else:
with pytest.raises(ValueError, match="Index must be timezone"):
df.at_time(hour)
def test_at_time_tz(self):
# GH 24043
dti = pd.date_range("2018", periods=3, freq="H", tz="US/Pacific")
df = pd.DataFrame(list(range(len(dti))), index=dti)
result = df.at_time(time(4, tzinfo=pytz.timezone("US/Eastern")))
expected = df.iloc[1:2]
tm.assert_frame_equal(result, expected)
def test_at_time_raises(self):
# GH20725
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
with pytest.raises(TypeError): # index is not a DatetimeIndex
df.at_time("00:00")
@pytest.mark.parametrize("axis", ["index", "columns", 0, 1])
def test_at_time_axis(self, axis):
# issue 8839
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), len(rng)))
ts.index, ts.columns = rng, rng
indices = rng[(rng.hour == 9) & (rng.minute == 30) & (rng.second == 0)]
if axis in ["index", 0]:
expected = ts.loc[indices, :]
elif axis in ["columns", 1]:
expected = ts.loc[:, indices]
result = ts.at_time("9:30", axis=axis)
tm.assert_frame_equal(result, expected)
def test_between_time(self, close_open_fixture):
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(0, 0)
etime = time(1, 0)
inc_start, inc_end = close_open_fixture
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = 13 * 4 + 1
if not inc_start:
exp_len -= 5
if not inc_end:
exp_len -= 4
assert len(filtered) == exp_len
for rs in filtered.index:
t = rs.time()
if inc_start:
assert t >= stime
else:
assert t > stime
if inc_end:
assert t <= etime
else:
assert t < etime
result = ts.between_time("00:00", "01:00")
expected = ts.between_time(stime, etime)
tm.assert_frame_equal(result, expected)
# across midnight
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = DataFrame(np.random.randn(len(rng), 2), index=rng)
stime = time(22, 0)
etime = time(9, 0)
filtered = ts.between_time(stime, etime, inc_start, inc_end)
exp_len = (12 * 11 + 1) * 4 + 1
if not inc_start:
exp_len -= 4
if not inc_end:
exp_len -= 4
assert len(filtered) == exp_len
for rs in filtered.index:
t = rs.time()
if inc_start:
assert (t >= stime) or (t <= etime)
else:
assert (t > stime) or (t <= etime)
if inc_end:
assert (t <= etime) or (t >= stime)
else:
assert (t < etime) or (t >= stime)
def test_between_time_raises(self):
# GH20725
df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])
with pytest.raises(TypeError): # index is not a DatetimeIndex
df.between_time(start_time="00:00", end_time="12:00")
def test_between_time_axis(self, axis):
# issue 8839
rng = date_range("1/1/2000", periods=100, freq="10min")
ts = DataFrame(np.random.randn(len(rng), len(rng)))
stime, etime = ("08:00:00", "09:00:00")
exp_len = 7
if axis in ["index", 0]:
ts.index = rng
assert len(ts.between_time(stime, etime)) == exp_len
assert len(ts.between_time(stime, etime, axis=0)) == exp_len
if axis in ["columns", 1]:
ts.columns = rng
selected = ts.between_time(stime, etime, axis=1).columns
assert len(selected) == exp_len
def test_between_time_axis_raises(self, axis):
# issue 8839
rng = date_range("1/1/2000", periods=100, freq="10min")
mask = np.arange(0, len(rng))
rand_data = np.random.randn(len(rng), len(rng))
ts = DataFrame(rand_data, index=rng, columns=rng)
stime, etime = ("08:00:00", "09:00:00")
msg = "Index must be DatetimeIndex"
if axis in ["columns", 1]:
ts.index = mask
with pytest.raises(TypeError, match=msg):
ts.between_time(stime, etime)
with pytest.raises(TypeError, match=msg):
ts.between_time(stime, etime, axis=0)
if axis in ["index", 0]:
ts.columns = mask
with pytest.raises(TypeError, match=msg):
ts.between_time(stime, etime, axis=1)
def test_operation_on_NaT(self):
# Both NaT and Timestamp are in DataFrame.
df = pd.DataFrame({"foo": [pd.NaT, pd.NaT, pd.Timestamp("2012-05-01")]})
res = df.min()
exp = pd.Series([pd.Timestamp("2012-05-01")], index=["foo"])
tm.assert_series_equal(res, exp)
res = df.max()
exp = pd.Series([pd.Timestamp("2012-05-01")], index=["foo"])
tm.assert_series_equal(res, exp)
# GH12941, only NaTs are in DataFrame.
df = pd.DataFrame({"foo": [pd.NaT, pd.NaT]})
res = df.min()
exp = pd.Series([pd.NaT], index=["foo"])
tm.assert_series_equal(res, exp)
res = df.max()
exp = pd.Series([pd.NaT], index=["foo"])
tm.assert_series_equal(res, exp)
def test_datetime_assignment_with_NaT_and_diff_time_units(self):
# GH 7492
data_ns = np.array([1, "nat"], dtype="datetime64[ns]")
result = pd.Series(data_ns).to_frame()
result["new"] = data_ns
expected = pd.DataFrame(
{0: [1, None], "new": [1, None]}, dtype="datetime64[ns]"
)
tm.assert_frame_equal(result, expected)
# OutOfBoundsDatetime error shouldn't occur
data_s = np.array([1, "nat"], dtype="datetime64[s]")
result["new"] = data_s
expected = pd.DataFrame(
{0: [1, None], "new": [1e9, None]}, dtype="datetime64[ns]"
)
tm.assert_frame_equal(result, expected)
def test_frame_to_period(self):
K = 5
dr = date_range("1/1/2000", "1/1/2001")
pr = period_range("1/1/2000", "1/1/2001")
df = DataFrame(np.random.randn(len(dr), K), index=dr)
df["mix"] = "a"
pts = df.to_period()
exp = df.copy()
exp.index = pr
tm.assert_frame_equal(pts, exp)
pts = df.to_period("M")
tm.assert_index_equal(pts.index, exp.index.asfreq("M"))
df = df.T
pts = df.to_period(axis=1)
exp = df.copy()
exp.columns = pr
tm.assert_frame_equal(pts, exp)
pts = df.to_period("M", axis=1)
tm.assert_index_equal(pts.columns, exp.columns.asfreq("M"))
msg = "No axis named 2 for object type <class 'pandas.core.frame.DataFrame'>"
with pytest.raises(ValueError, match=msg):
df.to_period(axis=2)
@pytest.mark.parametrize("fn", ["tz_localize", "tz_convert"])
def test_tz_convert_and_localize(self, fn):
l0 = date_range("20140701", periods=5, freq="D")
l1 = date_range("20140701", periods=5, freq="D")
int_idx = Index(range(5))
if fn == "tz_convert":
l0 = l0.tz_localize("UTC")
l1 = l1.tz_localize("UTC")
for idx in [l0, l1]:
l0_expected = getattr(idx, fn)("US/Pacific")
l1_expected = getattr(idx, fn)("US/Pacific")
df1 = DataFrame(np.ones(5), index=l0)
df1 = getattr(df1, fn)("US/Pacific")
tm.assert_index_equal(df1.index, l0_expected)
# MultiIndex
# GH7846
df2 = DataFrame(np.ones(5), MultiIndex.from_arrays([l0, l1]))
df3 = getattr(df2, fn)("US/Pacific", level=0)
assert not df3.index.levels[0].equals(l0)
tm.assert_index_equal(df3.index.levels[0], l0_expected)
tm.assert_index_equal(df3.index.levels[1], l1)
assert not df3.index.levels[1].equals(l1_expected)
df3 = getattr(df2, fn)("US/Pacific", level=1)
tm.assert_index_equal(df3.index.levels[0], l0)
assert not df3.index.levels[0].equals(l0_expected)
tm.assert_index_equal(df3.index.levels[1], l1_expected)
assert not df3.index.levels[1].equals(l1)
df4 = DataFrame(np.ones(5), MultiIndex.from_arrays([int_idx, l0]))
# TODO: untested
df5 = getattr(df4, fn)("US/Pacific", level=1) # noqa
tm.assert_index_equal(df3.index.levels[0], l0)
assert not df3.index.levels[0].equals(l0_expected)
tm.assert_index_equal(df3.index.levels[1], l1_expected)
assert not df3.index.levels[1].equals(l1)
# Bad Inputs
# Not DatetimeIndex / PeriodIndex
with pytest.raises(TypeError, match="DatetimeIndex"):
df = DataFrame(index=int_idx)
df = getattr(df, fn)("US/Pacific")
# Not DatetimeIndex / PeriodIndex
with pytest.raises(TypeError, match="DatetimeIndex"):
df = DataFrame(np.ones(5), MultiIndex.from_arrays([int_idx, l0]))
df = getattr(df, fn)("US/Pacific", level=0)
# Invalid level
with pytest.raises(ValueError, match="not valid"):
df = DataFrame(index=l0)
df = getattr(df, fn)("US/Pacific", level=1)
``` |
{
"source": "jmbyun/urobot",
"score": 3
} |
#### File: jmbyun/urobot/helper.py
```python
from .urobot.direction import Direction
from .urobot.drawer import Drawer
from .urobot.piece import Piece
from .urobot.position import Position
from .urobot.robot import Robot as GeneralRobot
from .urobot.wall import Wall
from .urobot.world import World, load_world_from_save
import json
def create_world(**kwargs):
global __urobots__
__urobots__ = {}
__urobots__['world'] = World(**kwargs)
def load_world(file_path, drawer=None):
global __urobots__
with open(file_path, 'r') as world_file:
world_save = json.loads(world_file.read())
__urobots__ = {}
__urobots__['world'] = load_world_from_save(world_save)
class Robot(GeneralRobot):
def __init__(self, **kwargs):
global __urobots__
super().__init__(**kwargs)
__urobots__['world'].add_piece(self)
__all__ = [
'create_world',
'load_world',
'Robot'
]
```
#### File: jmbyun/urobot/test.py
```python
import sys
import os
sys.path.insert(0, os.path.abspath('..'))
import json
from urobot.helper import *
from urobot import Beeper, World, Wall, Position, JsonDrawer
def turn_right():
for i in range(3):
robot.turn_left()
world = World(walls=[Wall(Position(1, 0), Position(2, 0))], drawer=JsonDrawer())
beeper = Beeper()
world.add_piece(beeper)
f = open('temp_world.json', 'w')
f.write(json.dumps(world.to_save()))
f.close()
# create_world(walls=[Wall(Position(1, 0), Position(2, 0))])
load_world('temp_world.json')
robot = Robot(beepers=3)
robot.set_pause(0.5)
robot.set_trace('#222')
robot.move()
robot.drop_beeper()
robot.drop_beeper()
robot.turn_left()
robot.move()
robot.drop_beeper()
turn_right()
robot.move()
turn_right()
robot.move()
robot.turn_left()
for i in range(10):
robot.move()
```
#### File: urobot/urobot/position.py
```python
def load_position_from_save(position_save):
return Position(position_save['x'], position_save['y'])
class Position(object):
def __init__(self, x, y):
self.x = x
self.y = y
def __eq__(self, other):
return self.x == other.x and self.y == other.y
def __ne__(self, other):
return not self == other
def __add__(self, other):
return Position(self.x + other.x, self.y + other.y)
def clone(self):
return Position(self.x, self.y)
def to_list(self):
return [self.x, self.y]
def to_save(self):
return {
'type': 'position',
'x': self.x,
'y': self.y
}
``` |
{
"source": "jmc0/Classcial_Cipher_Website",
"score": 2
} |
#### File: Classcial_Cipher_Website/backend/app.py
```python
from flask import Flask
from flask_cors import CORS
from routes import configure_routes
import config
def create_app(config=config):
app.config['SECRET_KEY'] = config.SECRET_KEY
app.config['PERMANENT_SESSION_LIFETIME'] = config.PERMANENT_SESSION_LIFETIME
configure_routes(app)
app = Flask(__name__)
create_app()
CORS(app)
if __name__ == '__main__':
app.run()
``` |
{
"source": "JMC110/License-Plate-Recognition-YOLO-Project",
"score": 3
} |
#### File: LicensePlateDetector/dl_prediction/predictor.py
```python
import cv2 as cv
import tensorflow as tf
import numpy as np
class Predictor:
# Darknet and CNN Parameters
confidence_threshold = 0.1 # Confidence threshold
nms_threshold = 0.6 # Non-maximum suppression threshold
yolo_net_width = 416
yolo_net_height = 416
# Load all models and configs
plates_yolo_config = "dl_prediction/config/yolov3_plates.cfg"
plates_yolo_weights = "dl_prediction/models/yolov3_plates_final.weights"
plates_classes = ['Plate']
chars_classes = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F', 'G',
'H', 'I', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
chars_yolo_config = "dl_prediction/config/yolov3_chars.cfg"
chars_yolo_weights = "dl_prediction/models/yolov3_chars_final.weights"
def __init__(self):
self.plates_yolo_net = self.get_yolo_net(self.plates_yolo_config, self.plates_yolo_weights)
self.chars_yolo_net = self.get_yolo_net(self.chars_yolo_config, self.chars_yolo_weights)
self.cnn_chars_model = tf.keras.models.load_model('dl_prediction/models/cnn_chars_recognition.h5')
@staticmethod
def get_yolo_net(config, weights):
yolo_net = cv.dnn.readNetFromDarknet(config, weights)
yolo_net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV)
yolo_net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)
return yolo_net
@staticmethod
def process_license_plate(license_plate):
gray = cv.cvtColor(license_plate, cv.COLOR_BGR2GRAY)
ret, thresh = cv.threshold(gray, 127, 255, 0)
contours, _ = cv.findContours(thresh, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)
areas = [cv.contourArea(c) for c in contours]
if len(areas) != 0:
max_index = np.argmax(areas)
cnt = contours[max_index]
x, y, w, h = cv.boundingRect(cnt)
cv.rectangle(license_plate, (x, y), (x + w, y + h), (0, 255, 0), 2)
processed_license_plate = license_plate[y: y + h, x: x + w]
else:
processed_license_plate = license_plate
return processed_license_plate
@staticmethod
def draw_pred(frame, name, conf, left, top, right, bottom, color=(0, 255, 0)):
cv.rectangle(frame, (left, top), (right, bottom), color, 3)
# label = '{}:{}'.format(name, '%.2f' % conf)
label = name
label_size, base_line = cv.getTextSize(label, cv.FONT_HERSHEY_SIMPLEX, 0.5, 1)
top = max(top, label_size[1])
cv.rectangle(frame, (left, top - round(1.5 * label_size[1])),
(left + round(1.5 * label_size[0]), top + base_line), (0, 0, 255), cv.FILLED)
cv.putText(frame, label, (left, top), cv.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 0), 1)
def predict_boxes(self, frame, yolo_outputs, is_license_plate=True):
classes = []
confidences = []
boxes = []
max_confidence = 0.0
for output in yolo_outputs:
for prediction in output:
scores = prediction[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
max_confidence = max(confidence, max_confidence)
if confidence > self.confidence_threshold:
center_x = int(prediction[0] * frame.shape[1])
center_y = int(prediction[1] * frame.shape[0])
width = int(prediction[2] * frame.shape[1])
height = int(prediction[3] * frame.shape[0])
left = int(center_x - width / 2)
top = int(center_y - height / 2)
classes.append(class_id)
confidences.append(float(confidence))
boxes.append([left, top, width, height])
indices = cv.dnn.NMSBoxes(boxes, confidences, self.confidence_threshold, self.nms_threshold)
positions = []
chars = []
for index in indices:
index = index[0]
box = boxes[index]
left = box[0]
top = box[1]
width = box[2]
height = box[3]
positions.append(left)
if is_license_plate and max_confidence == confidences[index]:
# Draw prediction rectangle for License Plate
license_plate = frame[top: top + height, left: left + width]
cv.rectangle(frame, (left, top), (left + width, top + height), (0, 255, 0), 3)
# cv.imshow('License Plate', license_plate)
# cv.imwrite('./test_bha.jpg', license_plate.astype(np.uint8))
# Process Licence plate to cover to Gray and to enhance contours
processed_license_plate = self.process_license_plate(license_plate)
# cv.imshow('Processed License Plate', license_plate)
# cv.imwrite('./test_bha2.jpg', processed_license_plate.astype(np.uint8))
self.draw_pred(frame, self.plates_classes[0], confidences[index], left, top, left + width, top + height)
return "", processed_license_plate
else:
char = self.chars_classes[classes[index]]
chars.append(char)
self.draw_pred(frame, char, confidences[index], left, top, left + width, top + height, color=(90, 0, 255))
sorted_chars = [x for _, x in sorted(zip(positions, chars))]
return "".join(sorted_chars), frame
def cnn_char_recognition(self, image):
gray_char = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
gray_char = cv.resize(gray_char, (75, 100))
image = gray_char.reshape((1, 100, 75, 1))
image = image / 255.0
predictions = self.cnn_chars_model.predict(image)
max_confidence_index = np.argmax(predictions)
return self.chars_classes[max_confidence_index]
def canny(self, image, sigma=0.33):
lower = int(max(0, (1.0 - sigma) * np.median(image)))
upper = int(min(255, (1.0 + sigma) * np.median(image)))
edges = cv.Canny(image, lower, upper)
return edges
def cnn_recognize_plate(self, frame):
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
thresh_inv = cv.adaptiveThreshold(gray, 255, cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY_INV, 39, 1)
edges = self.canny(thresh_inv)
contours, _ = cv.findContours(edges.copy(), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
sorted_ctrs = sorted(contours, key=lambda x: cv.boundingRect(x)[0])
area = frame.shape[0] * frame.shape[1]
chars = []
for i, ctr in enumerate(sorted_ctrs):
x, y, w, h = cv.boundingRect(ctr)
roi_area = w * h
non_max_sup = roi_area / area
if (non_max_sup >= 0.015) and (non_max_sup < 0.09):
if (h > 1.2 * w) and (3 * w >= h):
char = frame[y:y + h, x:x + w]
chars.append(self.cnn_char_recognition(char))
cv.rectangle(frame, (x, y), (x + w, y + h), (90, 0, 255), 2)
licensePlate = "".join(chars)
return licensePlate
@staticmethod
def resize_license_plate(license_plate):
scale_percent = 300 # percent of original size
width = int(license_plate.shape[1] * scale_percent / 100)
height = int(license_plate.shape[0] * scale_percent / 100)
return cv.resize(license_plate, (width, height), interpolation=cv.INTER_AREA)
def get_image_blob(self, image):
return cv.dnn.blobFromImage(image, 1 / 255, (self.yolo_net_width, self.yolo_net_height), [0, 0, 0], 1,
crop=False)
def predict(self, input_path, output_car_path, output_license_path_original, output_license_path, video_path=None, is_cnn=False, is_image=True):
vc = cv.VideoCapture(input_path)
FPS = 2
# Limit number of frames per sec
vc.set(cv.CAP_PROP_FPS, FPS)
if not is_image:
vid_writer = cv.VideoWriter(video_path, cv.VideoWriter_fourcc('H','2','6','4'), 30, (
round(vc.get(cv.CAP_PROP_FRAME_WIDTH)), round(vc.get(cv.CAP_PROP_FRAME_HEIGHT))))
# Read Frames from the file if video, else read first frame from image
while cv.waitKey(1) < 0:
exists, frame = vc.read()
if not exists:
cv.waitKey(2000)
print("End of frames")
vid_writer.release()
break
# DETECT LICENSE PLATE
car_image_blob = self.get_image_blob(frame)
# Feed the input image to the Yolo Network
self.plates_yolo_net.setInput(car_image_blob)
# Get All Unconnected Yolo layers
plates_yolo_layers = [self.plates_yolo_net.getLayerNames()[i[0] - 1] for i in
self.plates_yolo_net.getUnconnectedOutLayers()]
# Forward pass the input to yolov3 net and get outputs
plates_output = self.plates_yolo_net.forward(plates_yolo_layers)
# Remove the bounding boxes with low confidence and draw box for license plate
license_num, processed_license_plate = self.predict_boxes(frame, plates_output)
if is_image:
cv.imwrite(output_license_path_original, processed_license_plate.astype(np.uint8))
if not is_cnn:
# IDENTIFY LICENSE PLATE NUMBER USING YOLOV3
# Resize the license plate so we can feed it to the second trained yolo network
resized_license_plate = self.resize_license_plate(processed_license_plate)
license_plate_image_blob = self.get_image_blob(resized_license_plate)
# license_plate_image_blob = np.reshape(license_plate_image_blob, (1, 3, yolo_net_width,
# yolo_net_height))
# Feed the input image to the Yolo Network
self.chars_yolo_net.setInput(license_plate_image_blob)
# Get All Unconnected Yolo layers
chars_yolo_layers = [self.chars_yolo_net.getLayerNames()[i[0] - 1] for i in
self.chars_yolo_net.getUnconnectedOutLayers()]
# Forward pass the input to yolov3 net and get outputs
chars_output = self.chars_yolo_net.forward(chars_yolo_layers)
license_number, processed_license_plate = self.predict_boxes(processed_license_plate, chars_output, is_license_plate=False)
print(license_number)
elif is_cnn:
# IDENTIFY LICENSE PLATE NUMBER USING CNN
license_number = self.cnn_recognize_plate(processed_license_plate)
if is_image:
cv.imwrite(output_license_path, processed_license_plate.astype(np.uint8))
cv.imwrite(output_car_path, frame.astype(np.uint8))
return license_number
else:
vid_writer.write(frame.astype(np.uint8))
if __name__ == "__main__":
Predictor()
# license = Predictor().predict('/Users/moni/projects/LicensePlateDetector/m657_1.jpg', False)
# print(license)
```
#### File: tfx_airflow/dags/license_plate_utils.py
```python
import absl
import tensorflow as tf
import tensorflow_model_analysis as tfma
import tensorflow_transform as tft
from tensorflow.keras import layers, models
from tensorflow.keras.optimizers import Adam
_IMAGE_KEY = 'img_raw'
_LABEL_KEY = 'label'
channel = 1
height = 100
width = 75
def _transformed_name(key):
return key + '_xf'
def _image_parser(image_str):
image = tf.image.decode_image(image_str, channels=3)
image = tf.image.convert_image_dtype(image, tf.float32)
image = tf.reshape(image, (28400, 100, 75, 1))
image = tf.cast(image, tf.float32) / 255.0
return image
def _label_parser(label_id):
label = tf.one_hot(label_id, 35, dtype=tf.int64)
return label
def preprocessing_fn(inputs):
outputs = {_transformed_name(_IMAGE_KEY): tf.compat.v2.map_fn(_image_parser, tf.squeeze(inputs[_IMAGE_KEY], axis=1),
dtype=tf.float32),
_transformed_name(_LABEL_KEY): tf.compat.v2.map_fn(_label_parser, tf.squeeze(inputs[_LABEL_KEY], axis=1),
dtype=tf.int64)
}
return outputs
# CNN model to predict characters
def _model_builder():
cnn_model = models.Sequential()
cnn_model.add(layers.Conv2D(32, (3, 3), padding = 'same', activation = 'relu', input_shape = (100, 75, 1)))
cnn_model.add(layers.MaxPooling2D((2, 2)))
cnn_model.add(layers.Conv2D(64, (3, 3), activation = 'relu'))
cnn_model.add(layers.MaxPooling2D((2, 2)))
cnn_model.add(layers.Conv2D(128, (3, 3), activation = 'relu'))
cnn_model.add(layers.MaxPooling2D((2, 2)))
cnn_model.add(layers.Dense(128, activation = 'relu'))
cnn_model.add(layers.Flatten())
# 0 - 9 and A-Z => 10 + 25 = 35 -- ignoring O in alphabets.
cnn_model.add(layers.Dense(35, activation = 'softmax'))
opt = Adam(lr=0.001)
cnn_model.compile(optimizer=opt, loss=tf.keras.losses.sparse_categorical_crossentropy, metrics=['accuracy'])
absl.logging.info(cnn_model.summary())
return cnn_model
def _serving_input_receiver_fn(tf_transform_output):
raw_feature_spec = tf_transform_output.raw_feature_spec()
raw_feature_spec.pop(_LABEL_KEY)
raw_input_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn(raw_feature_spec,
default_batch_size=None)
serving_input_receiver = raw_input_fn()
transformed_features = tf_transform_output.transform_raw_features(serving_input_receiver.features)
transformed_features.pop(_transformed_name(_LABEL_KEY))
return tf.estimator.export.ServingInputReceiver(transformed_features, serving_input_receiver.receiver_tensors)
def _eval_input_receiver_fn(tf_transform_output):
raw_feature_spec = tf_transform_output.raw_feature_spec()
raw_input_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn(raw_feature_spec,
default_batch_size=None)
serving_input_receiver = raw_input_fn()
transformed_features = tf_transform_output.transform_raw_features(serving_input_receiver.features)
transformed_labels = transformed_features.pop(_transformed_name(_LABEL_KEY))
return tfma.export.EvalInputReceiver(features=transformed_features, labels=transformed_labels,
receiver_tensors=serving_input_receiver.receiver_tensors)
def _input_fn(filenames, tf_transform_output, batch_size):
transformed_feature_spec = (tf_transform_output.transformed_feature_spec().copy())
dataset = tf.data.experimental.make_batched_features_dataset(filenames, batch_size, transformed_feature_spec)
return dataset.map(lambda features: (features, features.pop(_transformed_name(_LABEL_KEY))))
def trainer_fn(trainer_fn_args, schema): # pylint: disable=unused-argument
train_batch_size = 32
eval_batch_size = 32
tf_transform_output = tft.TFTransformOutput(trainer_fn_args.transform_output)
train_input_fn = lambda: _input_fn(trainer_fn_args.train_files, tf_transform_output, batch_size=train_batch_size)
eval_input_fn = lambda: _input_fn(trainer_fn_args.eval_files, tf_transform_output, batch_size=eval_batch_size)
train_spec = tf.estimator.TrainSpec(train_input_fn, max_steps=trainer_fn_args.train_steps)
serving_receiver_fn = lambda: _serving_input_receiver_fn(tf_transform_output)
exporter = tf.estimator.FinalExporter('license_plate', serving_receiver_fn)
eval_spec = tf.estimator.EvalSpec(eval_input_fn, steps=trainer_fn_args.eval_steps, exporters=[exporter],
name='license_plate')
run_config = tf.estimator.RunConfig(save_checkpoints_steps=999, keep_checkpoint_max=1)
run_config = run_config.replace(model_dir=trainer_fn_args.serving_model_dir)
estimator = tf.keras.estimator.model_to_estimator(keras_model=_model_builder(), config=run_config)
eval_receiver_fn = lambda: _eval_input_receiver_fn(tf_transform_output)
return {
'estimator': estimator,
'train_spec': train_spec,
'eval_spec': eval_spec,
'eval_input_receiver_fn': eval_receiver_fn
}
``` |
{
"source": "jmc1283/pwndbg",
"score": 3
} |
#### File: pwndbg/pwndbg/chain.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import gdb
import pwndbg.abi
import pwndbg.color.chain as C
import pwndbg.color.memory as M
import pwndbg.color.theme as theme
import pwndbg.enhance
import pwndbg.memory
import pwndbg.symbol
import pwndbg.typeinfo
import pwndbg.vmmap
LIMIT = pwndbg.config.Parameter('dereference-limit', 5, 'max number of pointers to dereference in a chain')
def get(address, limit=LIMIT, offset=0, hard_stop=None, hard_end=0, include_start=True):
"""
Recursively dereferences an address. For bare metal, it will stop when the address is not in any of vmmap pages to avoid redundant dereference.
Arguments:
address(int): the first address to begin dereferencing
limit(int): number of valid pointers
offset(int): offset into the address to get the next pointer
hard_stop(int): address to stop at
hard_end: value to append when hard_stop is reached
include_start(bool): whether to include starting address or not
Returns:
A list representing pointers of each ```address``` and reference
"""
limit = int(limit)
result = [address] if include_start else []
for i in range(limit):
# Don't follow cycles, except to stop at the second occurrence.
if result.count(address) >= 2:
break
if hard_stop is not None and address == hard_stop:
result.append(hard_end)
break
try:
address = address + offset
# Avoid redundant dereferences in bare metal mode by checking
# if address is in any of vmmap pages
if not pwndbg.abi.linux and not pwndbg.vmmap.find(address):
break
address = int(pwndbg.memory.poi(pwndbg.typeinfo.ppvoid, address))
address &= pwndbg.arch.ptrmask
result.append(address)
except gdb.MemoryError:
break
return result
config_arrow_left = theme.Parameter('chain-arrow-left', '◂—', 'left arrow of chain formatting')
config_arrow_right = theme.Parameter('chain-arrow-right', '—▸', 'right arrow of chain formatting')
config_contiguous = theme.Parameter('chain-contiguous-marker', '...', 'contiguous marker of chain formatting')
def format(value, limit=LIMIT, code=True, offset=0, hard_stop=None, hard_end=0):
"""
Recursively dereferences an address into string representation, or convert the list representation
of address dereferences into string representation.
Arguments:
value(int|list): Either the starting address to be sent to get, or the result of get (a list)
limit(int): Number of valid pointers
code(bool): Hint that indicates the value may be an instruction
offset(int): Offset into the address to get the next pointer
hard_stop(int): Value to stop on
hard_end: Value to append when hard_stop is reached: null, value of hard stop, a string.
Returns:
A string representing pointers of each address and reference
Strings format: 0x0804a10 —▸ 0x08061000 ◂— 0x41414141
"""
limit = int(limit)
# Allow results from get function to be passed to format
if isinstance(value, list):
chain = value
else:
chain = get(value, limit, offset, hard_stop, hard_end)
arrow_left = C.arrow(' %s ' % config_arrow_left)
arrow_right = C.arrow(' %s ' % config_arrow_right)
# Colorize the chain
rest = []
for link in chain:
symbol = pwndbg.symbol.get(link) or None
if symbol:
symbol = '%#x (%s)' % (link, symbol)
rest.append(M.get(link, symbol))
# If the dereference limit is zero, skip any enhancements.
if limit == 0:
return rest[0]
# Otherwise replace last element with the enhanced information.
rest = rest[:-1]
# Enhance the last entry
# If there are no pointers (e.g. eax = 0x41414141), then enhance
# the only element there is.
if len(chain) == 1:
enhanced = pwndbg.enhance.enhance(chain[-1], code=code)
# Otherwise, the last element in the chain is the non-pointer value.
# We want to enhance the last pointer value. If an offset was used
# chain failed at that offset, so display that offset.
elif len(chain) < limit + 1:
enhanced = pwndbg.enhance.enhance(chain[-2] + offset, code=code)
else:
enhanced = C.contiguous('%s' % config_contiguous)
if len(chain) == 1:
return enhanced
return arrow_right.join(rest) + arrow_left + enhanced
```
#### File: pwndbg/pwndbg/funcparser.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import collections
from pycparser import CParser
from pycparser import c_ast
def extractTypeAndName(n, defaultName=None):
if isinstance(n, c_ast.EllipsisParam):
return ('int', 0, 'vararg')
t = n.type
d = 0
while isinstance(t, c_ast.PtrDecl) or isinstance(t, c_ast.ArrayDecl):
d += 1
children = dict(t.children())
t = children['type']
if isinstance(t, c_ast.FuncDecl):
return extractTypeAndName(t)
if isinstance(t.type, c_ast.Struct) \
or isinstance(t.type, c_ast.Union) \
or isinstance(t.type, c_ast.Enum):
typename = t.type.name
else:
typename = t.type.names[0]
if typename == 'void' and d == 0 and not t.declname:
return None
name = t.declname or defaultName or ''
return typename.lstrip('_'),d,name.lstrip('_')
Function = collections.namedtuple('Function', ('type', 'derefcnt', 'name', 'args'))
Argument = collections.namedtuple('Argument', ('type', 'derefcnt', 'name'))
def Stringify(X):
return '%s %s %s' % (X.type, X.derefcnt * '*', X.name)
def ExtractFuncDecl(node, verbose=False):
# The function name needs to be dereferenced.
ftype, fderef, fname = extractTypeAndName(node)
if not fname:
print("Skipping function without a name!")
print(node.show())
return
fargs = []
for i, (argName, arg) in enumerate(node.args.children()):
defname = 'arg%i' % i
argdata = extractTypeAndName(arg, defname)
if argdata is not None:
a = Argument(*argdata)
fargs.append(a)
Func = Function(ftype, fderef, fname, fargs)
if verbose:
print(Stringify(Func) + '(' + ','.join(Stringify(a) for a in Func.args) + ');')
return Func
def ExtractAllFuncDecls(ast, verbose=False):
Functions = {}
class FuncDefVisitor(c_ast.NodeVisitor):
def visit_FuncDecl(self, node, *a):
f = ExtractFuncDecl(node, verbose)
Functions[f.name] = f
FuncDefVisitor().visit(ast)
return Functions
def ExtractFuncDeclFromSource(source):
try:
p = CParser()
ast = p.parse(source + ';')
funcs = ExtractAllFuncDecls(ast)
for name, func in funcs.items():
return func
except Exception as e:
import traceback
traceback.print_exc()
# eat it
```
#### File: pwndbg/gdbutils/functions.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import functools
import gdb
import pwndbg.proc
functions = []
def GdbFunction(only_when_running=False):
return functools.partial(_GdbFunction, only_when_running=only_when_running)
class _GdbFunction(gdb.Function):
def __init__(self, func, only_when_running):
self.name = func.__name__
self.func = func
self.only_when_running = only_when_running
functions.append(self)
super(_GdbFunction, self).__init__(self.name)
functools.update_wrapper(self, func)
self.__doc__ = func.__doc__
def invoke(self, *args):
if self.only_when_running and not pwndbg.proc.alive:
# Returning empty string is a workaround that we can't stop e.g. `break *$rebase(offset)`
# Thx to that, gdb will print out 'evaluation of this expression requires the target program to be active'
return ''
return self.func(*args)
def __call__(self, *args):
return self.invoke(*args)
@GdbFunction(only_when_running=True)
def rebase(addr):
"""Return rebased address."""
base = pwndbg.elf.exe().address
return base + int(addr)
```
#### File: pwndbg/heap/heap.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import pwndbg.events
import pwndbg.symbol
class BaseHeap(object):
"""Heap abstraction layer."""
def breakpoint(event):
"""Enables breakpoints on the specific event.
Arguments:
event(str): One of 'alloc','realloc','free'
Returns:
A gdb.Breakpoint object.
"""
raise NotImplementedError()
def summarize(address, **kwargs):
"""Returns a textual summary of the specified address.
Arguments:
address(int): Address of the heap block to summarize.
Returns:
A string.
"""
raise NotImplementedError()
def containing(address):
"""Returns the address of the allocation which contains 'address'.
Arguments:
address(int): Address to look up.
Returns:
An integer.
"""
raise NotImplementedError()
def is_initialized(self):
"""Returns whether the allocator is initialized or not.
Returns:
A boolean.
"""
raise NotImplementedError()
def libc_has_debug_syms(self):
"""Returns whether the libc has debug symbols or not.
Returns:
A boolean.
"""
raise NotImplementedError()
```
#### File: tests/binaries/__init__.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
from . import old_bash
path = os.path.dirname(__file__)
def get(x):
return os.path.join(path, x)
```
#### File: pwndbg/tests/test_go.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import gdb
import tests
GOSAMPLE_X64 = tests.binaries.get('gosample.x64')
GOSAMPLE_X86 = tests.binaries.get('gosample.x86')
def test_typeinfo_go_x64(start_binary):
"""
Tests pwndbg's typeinfo knows about the Go x64 types.
Catches: Python Exception <class 'gdb.error'> No type named u8.:
Test catches the issue only if the binaries are not stripped.
"""
gdb.execute('file ' + GOSAMPLE_X64)
start = gdb.execute('start', to_string=True)
assert 'Python Exception' not in start
def test_typeinfo_go_x86(start_binary):
"""
Tests pwndbg's typeinfo knows about the Go x32 types
Catches: Python Exception <class 'gdb.error'> No type named u8.:
Test catches the issue only if the binaries are not stripped.
"""
gdb.execute('file ' + GOSAMPLE_X86)
start = gdb.execute('start', to_string=True)
assert 'Python Exception' not in start
``` |
{
"source": "jmc529/BentoML",
"score": 2
} |
#### File: _internal/adapters/tensorflow_tensor_output.py
```python
import json
from typing import Sequence
from ..adapters.base_output import regroup_return_value
from ..adapters.json_output import JsonOutput
from ..adapters.utils import TfTensorJsonEncoder
from ..types import InferenceError, InferenceResult, InferenceTask
from ..utils.lazy_loader import LazyLoader
np = LazyLoader("np", globals(), "numpy")
def tf_to_numpy(tensor):
"""
Tensor -> ndarray
List[Tensor] -> tuple[ndarray]
"""
import tensorflow as tf
if isinstance(tensor, (list, tuple)):
return tuple(tf_to_numpy(t) for t in tensor)
if tf.__version__.startswith("1."):
with tf.compat.v1.Session():
return tensor.numpy()
else:
return tensor.numpy()
class TfTensorOutput(JsonOutput):
"""
Output adapters converts returns of user defined API function into specific output,
such as HTTP response, command line stdout or AWS Lambda event object.
Args:
cors (str): DEPRECATED. Moved to the configuration file.
The value of the Access-Control-Allow-Origin header set in the
HTTP/AWS Lambda response object. If set to None, the header will not be set.
Default is None.
ensure_ascii(bool): Escape all non-ASCII characters. Default False.
"""
BATCH_MODE_SUPPORTED = True
@property
def pip_dependencies(self):
"""
:return: List of PyPI package names required by this OutputAdapter
"""
return ["tensorflow"]
def pack_user_func_return_value(
self, return_result, tasks: Sequence[InferenceTask],
) -> Sequence[InferenceResult[str]]:
rv = []
results = tf_to_numpy(return_result)
for result, _ in regroup_return_value(results, tasks):
try:
result_str = json.dumps(result, cls=TfTensorJsonEncoder)
rv.append(InferenceResult(data=result_str, http_status=200))
except Exception as e: # pylint: disable=broad-except
rv.append(InferenceError(err_msg=str(e), http_status=500))
return rv
```
#### File: server/marshal/dispatcher.py
```python
import asyncio
import collections
import functools
import logging
import time
import traceback
from typing import Callable
import numpy as np
from bentoml.utils import cached_property
from bentoml.utils.alg import TokenBucket
logger = logging.getLogger(__name__)
logger.setLevel(logging.ERROR)
class NonBlockSema:
def __init__(self, count):
self.sema = count
def acquire(self):
if self.sema < 1:
return False
self.sema -= 1
return True
def is_locked(self):
return self.sema < 1
def release(self):
self.sema += 1
class Optimizer:
"""
Analyse historical data to optimize CorkDispatcher.
"""
N_KEPT_SAMPLE = 50 # amount of outbound info kept for inferring params
N_SKIPPED_SAMPLE = 2 # amount of outbound info skipped after init
INTERVAL_REFRESH_PARAMS = 5 # seconds between each params refreshing
def __init__(self):
"""
assume the outbound duration follows duration = o_a * n + o_b
(all in seconds)
"""
self.o_stat = collections.deque(
maxlen=self.N_KEPT_SAMPLE
) # to store outbound stat data
self.o_a = 2
self.o_b = 1
self.wait = 0.01 # the avg wait time before outbound called
self._refresh_tb = TokenBucket(2) # to limit params refresh interval
self._outbound_counter = 0
def log_outbound(self, n, wait, duration):
if (
self._outbound_counter <= self.N_SKIPPED_SAMPLE
): # skip inaccurate info at beginning
self._outbound_counter += 1
return
self.o_stat.append((n, duration, wait))
if self._refresh_tb.consume(1, 1.0 / self.INTERVAL_REFRESH_PARAMS, 1):
self.trigger_refresh()
def trigger_refresh(self):
x = tuple((i, 1) for i, _, _ in self.o_stat)
y = tuple(i for _, i, _ in self.o_stat)
_o_a, _o_b = np.linalg.lstsq(x, y, rcond=None)[0]
_o_w = sum(w for _, _, w in self.o_stat) * 1.0 / len(self.o_stat)
self.o_a, self.o_b = max(0.000001, _o_a), max(0, _o_b)
self.wait = max(0, _o_w)
logger.info(
"optimizer params updated: o_a: %.6f, o_b: %.6f, wait: %.6f",
_o_a,
_o_b,
_o_w,
)
class CorkDispatcher:
"""
A decorator that:
* wrap batch function
* implement CORK algorithm to cork & release calling of wrapped function
The wrapped function should be an async function.
"""
def __init__(
self,
max_latency_in_ms: int,
max_batch_size: int,
shared_sema: NonBlockSema = None,
fallback: Callable = None,
):
"""
params:
* max_latency_in_ms: max_latency_in_ms for inbound tasks in milliseconds
* max_batch_size: max batch size of inbound tasks
* shared_sema: semaphore to limit concurrent outbound tasks
* fallback: callable to return fallback result
raises:
* all possible exceptions the decorated function has
"""
self.max_latency_in_ms = max_latency_in_ms / 1000.0
self.callback = None
self.fallback = fallback
self.optimizer = Optimizer()
self.max_batch_size = int(max_batch_size)
self.tick_interval = 0.001
self._controller = None
self._queue = collections.deque() # TODO(hrmthw): maxlen
self._sema = shared_sema if shared_sema else NonBlockSema(1)
async def shutdown(self):
if self._controller is not None:
self._controller.cancel()
try:
while True:
_, _, fut = self._queue.pop()
fut.cancel()
except IndexError:
pass
@cached_property
def _loop(self):
return asyncio.get_event_loop()
@cached_property
def _wake_event(self):
return asyncio.Condition()
def __call__(self, callback):
self.callback = callback
@functools.wraps(callback)
async def _func(data):
if self._controller is None:
self._controller = self._loop.create_task(self.controller())
try:
r = await self.inbound_call(data)
except asyncio.CancelledError:
return None if self.fallback is None else self.fallback()
if isinstance(r, Exception):
raise r
return r
return _func
async def controller(self):
"""
A standalone coroutine to wait/dispatch calling.
"""
while True:
try:
async with self._wake_event: # block until there's any request in queue
await self._wake_event.wait_for(self._queue.__len__)
n = len(self._queue)
dt = self.tick_interval
decay = 0.95 # the decay rate of wait time
now = time.time()
w0 = now - self._queue[0][0]
wn = now - self._queue[-1][0]
a = self.optimizer.o_a
b = self.optimizer.o_b
if n > 1 and (w0 + a * n + b) >= self.max_latency_in_ms:
self._queue.popleft()[2].cancel()
continue
if self._sema.is_locked():
if n == 1 and w0 >= self.max_latency_in_ms:
self._queue.popleft()[2].cancel()
continue
await asyncio.sleep(self.tick_interval)
continue
if n * (wn + dt + (a or 0)) <= self.optimizer.wait * decay:
await asyncio.sleep(self.tick_interval)
continue
n_call_out = min(self.max_batch_size, n,)
# call
self._sema.acquire()
inputs_info = tuple(self._queue.pop() for _ in range(n_call_out))
self._loop.create_task(self.outbound_call(inputs_info))
except asyncio.CancelledError:
break
except Exception: # pylint: disable=broad-except
logger.error(traceback.format_exc())
async def inbound_call(self, data):
t = time.time()
future = self._loop.create_future()
input_info = (t, data, future)
self._queue.append(input_info)
async with self._wake_event:
self._wake_event.notify_all()
return await future
async def outbound_call(self, inputs_info):
_time_start = time.time()
_done = False
logger.info("outbound function called: %d", len(inputs_info))
try:
outputs = await self.callback(tuple(d for _, d, _ in inputs_info))
assert len(outputs) == len(inputs_info)
for (_, _, fut), out in zip(inputs_info, outputs):
if not fut.done():
fut.set_result(out)
_done = True
self.optimizer.log_outbound(
n=len(inputs_info),
wait=_time_start - inputs_info[-1][0],
duration=time.time() - _time_start,
)
except asyncio.CancelledError:
pass
except Exception as e: # pylint: disable=broad-except
for _, _, fut in inputs_info:
if not fut.done():
fut.set_result(e)
_done = True
finally:
if not _done:
for _, _, fut in inputs_info:
if not fut.done():
fut.cancel()
self._sema.release()
```
#### File: _internal/utils/s3.py
```python
import logging
from urllib.parse import urlparse
logger = logging.getLogger(__name__)
def is_s3_url(url):
"""
Check if url is an s3, s3n, or s3a url
"""
try:
return urlparse(url).scheme in ["s3", "s3n", "s3a"]
except ValueError:
return False
def create_s3_bucket_if_not_exists(bucket_name, region):
import boto3
from botocore.exceptions import ClientError
s3_client = boto3.client("s3", region)
try:
s3_client.get_bucket_acl(Bucket=bucket_name)
logger.debug("Found bucket %s in region %s already exist", bucket_name, region)
except ClientError as error:
if error.response and error.response["Error"]["Code"] == "NoSuchBucket":
logger.debug("Creating s3 bucket: %s in region %s", bucket_name, region)
# NOTE: boto3 will raise ClientError(InvalidLocationConstraint) if
# `LocationConstraint` is set to `us-east-1` region.
# https://github.com/boto/boto3/issues/125.
# This issue still show up in boto3 1.13.4(May 6th 2020)
try:
s3_client.create_bucket(
Bucket=bucket_name,
CreateBucketConfiguration={"LocationConstraint": region},
)
except ClientError as s3_error:
if (
s3_error.response
and s3_error.response["Error"]["Code"]
== "InvalidLocationConstraint"
):
logger.debug(
"Special s3 region: %s, will attempt create bucket without "
"`LocationConstraint`",
region,
)
s3_client.create_bucket(Bucket=bucket_name)
else:
raise s3_error
else:
raise error
```
#### File: _internal/yatai_client/deployment_api.py
```python
import logging
import time
from bentoml.exceptions import BentoMLException, YataiDeploymentException
from bentoml.utils import status_pb_to_error_code_and_message
from bentoml.yatai.client.label_utils import generate_gprc_labels_selector
from bentoml.yatai.deployment import ALL_NAMESPACE_TAG
from bentoml.yatai.deployment_utils import (
deployment_dict_to_pb,
deployment_yaml_string_to_pb,
)
from bentoml.yatai.proto import status_pb2
from bentoml.yatai.proto.deployment_pb2 import (
ApplyDeploymentRequest,
DeleteDeploymentRequest,
Deployment,
DeploymentSpec,
DeploymentState,
DescribeDeploymentRequest,
GetDeploymentRequest,
ListDeploymentsRequest,
)
logger = logging.getLogger(__name__)
WAIT_TIMEOUT_LIMIT = 600
WAIT_TIME = 5
class DeploymentAPIClient:
def __init__(self, yatai_service):
self.yatai_service = yatai_service
def list(
self,
limit=None,
offset=None,
labels=None,
namespace=None,
is_all_namespaces=False,
operator=None,
order_by=None,
ascending_order=None,
):
if is_all_namespaces:
if namespace is not None:
logger.warning(
"Ignoring `namespace=%s` due to the --all-namespace flag presented",
namespace,
)
namespace = ALL_NAMESPACE_TAG
if isinstance(operator, str):
if operator == "sagemaker":
operator = DeploymentSpec.AWS_SAGEMAKER
elif operator == "lambda":
operator = DeploymentSpec.AWS_LAMBDA
elif operator == DeploymentSpec.AZURE_FUNCTIONS:
operator = "azure-functions"
elif operator == "ec2":
operator = DeploymentSpec.AWS_EC2
else:
raise BentoMLException(f"Unrecognized operator {operator}")
list_deployment_request = ListDeploymentsRequest(
limit=limit,
offset=offset,
namespace=namespace,
operator=operator,
order_by=order_by,
ascending_order=ascending_order,
)
if labels is not None:
generate_gprc_labels_selector(
list_deployment_request.label_selectors, labels
)
return self.yatai_service.ListDeployments(list_deployment_request)
def get(self, namespace, name):
return self.yatai_service.GetDeployment(
GetDeploymentRequest(deployment_name=name, namespace=namespace)
)
def describe(self, namespace, name):
return self.yatai_service.DescribeDeployment(
DescribeDeploymentRequest(deployment_name=name, namespace=namespace)
)
def delete(self, deployment_name, namespace, force_delete=False):
return self.yatai_service.DeleteDeployment(
DeleteDeploymentRequest(
deployment_name=deployment_name,
namespace=namespace,
force_delete=force_delete,
)
)
def create(self, deployment_info, wait):
from bentoml.yatai.validator import validate_deployment_pb
if isinstance(deployment_info, dict):
deployment_pb = deployment_dict_to_pb(deployment_info)
elif isinstance(deployment_info, str):
deployment_pb = deployment_yaml_string_to_pb(deployment_info)
elif isinstance(deployment_info, Deployment):
deployment_pb = deployment_info
else:
raise YataiDeploymentException(
"Unexpected argument type, expect deployment info to be str in yaml "
"format or a dict or a deployment protobuf obj, instead got: {}".format(
str(type(deployment_info))
)
)
validation_errors = validate_deployment_pb(deployment_pb)
if validation_errors:
raise YataiDeploymentException(
f"Failed to validate deployment {deployment_pb.name}: "
f"{validation_errors}"
)
# Make sure there is no active deployment with the same deployment name
get_deployment_pb = self.yatai_service.GetDeployment(
GetDeploymentRequest(
deployment_name=deployment_pb.name, namespace=deployment_pb.namespace
)
)
if get_deployment_pb.status.status_code != status_pb2.Status.NOT_FOUND:
raise BentoMLException(
f'Deployment "{deployment_pb.name}" already existed, use Update or '
f"Apply for updating existing deployment, delete the deployment, "
f"or use a different deployment name"
)
apply_result = self.yatai_service.ApplyDeployment(
ApplyDeploymentRequest(deployment=deployment_pb)
)
if apply_result.status.status_code != status_pb2.Status.OK:
error_code, error_message = status_pb_to_error_code_and_message(
apply_result.status
)
raise YataiDeploymentException(f"{error_code}:{error_message}")
if wait:
self._wait_deployment_action_complete(
deployment_pb.name, deployment_pb.namespace
)
return self.get(namespace=deployment_pb.namespace, name=deployment_pb.name)
def apply(self, deployment_info, wait):
from bentoml.yatai.validator import validate_deployment_pb
if isinstance(deployment_info, dict):
deployment_pb = deployment_dict_to_pb(deployment_info)
elif isinstance(deployment_info, str):
deployment_pb = deployment_yaml_string_to_pb(deployment_info)
elif isinstance(deployment_info, Deployment):
deployment_pb = deployment_info
else:
raise YataiDeploymentException(
"Unexpected argument type, expect deployment info to be str in yaml "
"format or a dict or a deployment protobuf obj, instead got: {}".format(
str(type(deployment_info))
)
)
validation_errors = validate_deployment_pb(deployment_pb)
if validation_errors:
raise YataiDeploymentException(
f"Failed to validate deployment {deployment_pb.name}: "
f"{validation_errors}"
)
apply_result = self.yatai_service.ApplyDeployment(
ApplyDeploymentRequest(deployment=deployment_pb)
)
if apply_result.status.status_code != status_pb2.Status.OK:
error_code, error_message = status_pb_to_error_code_and_message(
apply_result.status
)
raise YataiDeploymentException(f"{error_code}:{error_message}")
if wait:
self._wait_deployment_action_complete(
deployment_pb.name, deployment_pb.namespace
)
return self.get(namespace=deployment_pb.namespace, name=deployment_pb.name)
def _wait_deployment_action_complete(self, name, namespace):
start_time = time.time()
while (time.time() - start_time) < WAIT_TIMEOUT_LIMIT:
result = self.describe(namespace=namespace, name=name)
if (
result.status.status_code == status_pb2.Status.OK
and result.state.state is DeploymentState.PENDING
):
time.sleep(WAIT_TIME)
continue
else:
break
return result
def create_sagemaker_deployment(
self,
name,
bento_name,
bento_version,
api_name,
instance_type,
instance_count,
timeout,
num_of_gunicorn_workers_per_instance=None,
region=None,
namespace=None,
labels=None,
annotations=None,
wait=None,
data_capture_s3_prefix=None,
data_capture_sample_percent=None,
):
"""Create SageMaker deployment
Args:
name:
bento_name:
bento_version:
api_name:
instance_type:
instance_count:
timeout:
num_of_gunicorn_workers_per_instance:
region:
namespace:
labels:
annotations:
wait:
data_capture_s3_prefix:
data_capture_sample_percent:
Returns:
ApplyDeploymentResponse
Raises:
BentoMLException
"""
deployment_pb = Deployment(
name=name, namespace=namespace, labels=labels, annotations=annotations
)
deployment_pb.spec.bento_name = bento_name
deployment_pb.spec.bento_version = bento_version
deployment_pb.spec.operator = DeploymentSpec.AWS_SAGEMAKER
deployment_pb.spec.sagemaker_operator_config.api_name = api_name
deployment_pb.spec.sagemaker_operator_config.instance_count = instance_count
deployment_pb.spec.sagemaker_operator_config.instance_type = instance_type
deployment_pb.spec.sagemaker_operator_config.timeout = timeout
if data_capture_s3_prefix:
deployment_pb.spec.sagemaker_operator_config.data_capture_s3_prefix = (
data_capture_s3_prefix
)
if data_capture_sample_percent:
deployment_pb.spec.sagemaker_operator_config.data_capture_sample_percent = (
data_capture_sample_percent
)
if region:
deployment_pb.spec.sagemaker_operator_config.region = region
if num_of_gunicorn_workers_per_instance:
deployment_pb.spec.sagemaker_operator_config.num_of_gunicorn_workers_per_instance = ( # noqa E501
num_of_gunicorn_workers_per_instance
)
return self.create(deployment_pb, wait)
def update_sagemaker_deployment(
self,
deployment_name,
namespace=None,
api_name=None,
instance_type=None,
instance_count=None,
timeout=None,
num_of_gunicorn_workers_per_instance=None,
bento_name=None,
bento_version=None,
wait=None,
data_capture_s3_prefix=None,
data_capture_sample_percent=None,
):
""" Update current sagemaker deployment
Args:
namespace:
deployment_name:
api_name:
instance_type:
instance_count:
timeout:
num_of_gunicorn_workers_per_instance:
bento_name:
bento_version:
wait:
data_capture_s3_prefix:
data_capture_sample_percent:
Returns:
Protobuf message
Raises:
BentoMLException
"""
get_deployment_result = self.get(namespace, deployment_name)
if get_deployment_result.status.status_code != status_pb2.Status.OK:
get_deployment_status = get_deployment_result.status
raise BentoMLException(
f"Failed to retrieve current deployment {deployment_name} in "
f"{namespace}. "
f"{status_pb2.Status.Code.Name(get_deployment_status.status_code)}"
f":{get_deployment_status.error_message}"
)
deployment_pb = get_deployment_result.deployment
if api_name:
deployment_pb.spec.sagemaker_operator_config.api_name = api_name
if instance_type:
deployment_pb.spec.sagemaker_operator_config.instance_type = instance_type
if instance_count:
deployment_pb.spec.sagemaker_operator_config.instance_count = instance_count
if num_of_gunicorn_workers_per_instance:
deployment_pb.spec.sagemaker_operator_config.num_of_gunicorn_workers_per_instance = ( # noqa E501
num_of_gunicorn_workers_per_instance
)
if timeout:
deployment_pb.spec.sagemaker_operator_config.timeout = timeout
if bento_name:
deployment_pb.spec.bento_name = bento_name
if bento_version:
deployment_pb.spec.bento_version = bento_version
if data_capture_s3_prefix:
deployment_pb.spec.sagemaker_operator_config.data_capture_s3_prefix = (
data_capture_s3_prefix
)
if data_capture_sample_percent:
deployment_pb.spec.sagemaker_operator_config.data_capture_sample_percent = (
data_capture_sample_percent
)
logger.debug(
"Updated configuration for sagemaker deployment %s", deployment_pb.name
)
return self.apply(deployment_pb, wait)
def list_sagemaker_deployments(
self,
limit=None,
offset=None,
labels=None,
namespace=None,
is_all_namespaces=False,
order_by=None,
ascending_order=None,
):
list_result = self.list(
limit=limit,
offset=offset,
labels=labels,
namespace=namespace,
is_all_namespaces=is_all_namespaces,
operator=DeploymentSpec.AWS_SAGEMAKER,
order_by=order_by,
ascending_order=ascending_order,
)
if list_result.status.status_code != status_pb2.Status.OK:
return list_result
sagemaker_deployments = [
deployment
for deployment in list_result.deployments
if deployment.spec.operator == DeploymentSpec.AWS_SAGEMAKER
]
del list_result.deployments[:]
list_result.deployments.extend(sagemaker_deployments)
return list_result
def create_ec2_deployment(
self,
name,
namespace,
bento_name,
bento_version,
region,
min_size,
desired_capacity,
max_size,
instance_type,
ami_id,
wait=None,
):
deployment_pb = Deployment(name=name, namespace=namespace)
deployment_pb.spec.bento_name = bento_name
deployment_pb.spec.bento_version = bento_version
if region:
deployment_pb.spec.aws_ec2_operator_config.region = region
deployment_pb.spec.operator = DeploymentSpec.AWS_EC2
deployment_pb.spec.aws_ec2_operator_config.autoscale_min_size = min_size
deployment_pb.spec.aws_ec2_operator_config.autoscale_desired_capacity = (
desired_capacity
)
deployment_pb.spec.aws_ec2_operator_config.autoscale_max_size = max_size
deployment_pb.spec.aws_ec2_operator_config.instance_type = instance_type
deployment_pb.spec.aws_ec2_operator_config.ami_id = ami_id
return self.create(deployment_pb, wait)
def update_ec2_deployment(
self,
deployment_name,
bento_name,
bento_version,
namespace,
min_size,
desired_capacity,
max_size,
instance_type,
ami_id,
wait,
):
get_deployment_result = self.get(namespace=namespace, name=deployment_name)
if get_deployment_result.status.status_code != status_pb2.Status.OK:
error_code = status_pb2.Status.Code.Name(
get_deployment_result.status.status_code
)
error_message = get_deployment_result.status.error_message
raise BentoMLException(
f"Failed to retrieve current deployment {deployment_name} "
f"in {namespace}. {error_code}:{error_message}"
)
# new deloyment info with updated configs
deployment_pb = get_deployment_result.deployment
if bento_name:
deployment_pb.spec.bento_name = bento_name
if bento_version:
deployment_pb.spec.bento_version = bento_version
deployment_pb.spec.aws_ec2_operator_config.autoscale_min_size = min_size
deployment_pb.spec.aws_ec2_operator_config.autoscale_desired_capacity = (
desired_capacity
)
deployment_pb.spec.aws_ec2_operator_config.autoscale_max_size = max_size
deployment_pb.spec.aws_ec2_operator_config.instance_type = instance_type
deployment_pb.spec.aws_ec2_operator_config.ami_id = ami_id
logger.debug("Updated configuration for Lambda deployment %s", deployment_name)
return self.apply(deployment_pb, wait)
def list_ec2_deployments(
self,
limit=None,
offset=None,
labels=None,
namespace=None,
order_by=None,
ascending_order=None,
is_all_namespaces=False,
):
return self.list(
limit=limit,
offset=offset,
labels=labels,
namespace=namespace,
is_all_namespaces=is_all_namespaces,
operator=DeploymentSpec.AWS_EC2,
order_by=order_by,
ascending_order=ascending_order,
)
def create_lambda_deployment(
self,
name,
bento_name,
bento_version,
memory_size,
timeout,
api_name=None,
region=None,
namespace=None,
labels=None,
annotations=None,
wait=None,
):
"""Create Lambda deployment
Args:
name:
bento_name:
bento_version:
memory_size:
timeout:
api_name:
region:
namespace:
labels:
annotations:
wait:
Returns:
ApplyDeploymentResponse: status, deployment
Raises:
BentoMLException
"""
deployment_pb = Deployment(
name=name, namespace=namespace, labels=labels, annotations=annotations
)
deployment_pb.spec.bento_name = bento_name
deployment_pb.spec.bento_version = bento_version
deployment_pb.spec.operator = DeploymentSpec.AWS_LAMBDA
deployment_pb.spec.aws_lambda_operator_config.memory_size = memory_size
deployment_pb.spec.aws_lambda_operator_config.timeout = timeout
if api_name:
deployment_pb.spec.aws_lambda_operator_config.api_name = api_name
if region:
deployment_pb.spec.aws_lambda_operator_config.region = region
return self.create(deployment_pb, wait)
def update_lambda_deployment(
self,
deployment_name,
namespace=None,
bento_name=None,
bento_version=None,
memory_size=None,
timeout=None,
wait=None,
):
get_deployment_result = self.get(namespace=namespace, name=deployment_name)
if get_deployment_result.status.status_code != status_pb2.Status.OK:
error_code = status_pb2.Status.Code.Name(
get_deployment_result.status.status_code
)
error_message = status_pb2.status.error_message
raise BentoMLException(
f"Failed to retrieve current deployment {deployment_name} "
f"in {namespace}. {error_code}:{error_message}"
)
deployment_pb = get_deployment_result.deployment
if bento_name:
deployment_pb.spec.bento_name = bento_name
if bento_version:
deployment_pb.spec.bento_version = bento_version
if memory_size:
deployment_pb.spec.aws_lambda_operator_config.memory_size = memory_size
if timeout:
deployment_pb.spec.aws_lambda_operator_config.timeout = timeout
logger.debug("Updated configuration for Lambda deployment %s", deployment_name)
return self.apply(deployment_pb, wait)
def list_lambda_deployments(
self,
limit=None,
offset=None,
labels=None,
namespace=None,
is_all_namespaces=False,
order_by=None,
ascending_order=None,
):
return self.list(
limit=limit,
offset=offset,
labels=labels,
namespace=namespace,
is_all_namespaces=is_all_namespaces,
operator=DeploymentSpec.AWS_LAMBDA,
order_by=order_by,
ascending_order=ascending_order,
)
def create_azure_functions_deployment(
self,
name,
bento_name,
bento_version,
location,
premium_plan_sku=None,
min_instances=None,
max_burst=None,
function_auth_level=None,
namespace=None,
labels=None,
annotations=None,
wait=None,
):
deployment_pb = Deployment(
name=name, namespace=namespace, labels=labels, annotations=annotations
)
deployment_pb.spec.bento_name = bento_name
deployment_pb.spec.bento_version = bento_version
deployment_pb.spec.operator = DeploymentSpec.AZURE_FUNCTIONS
deployment_pb.spec.azure_functions_operator_config.location = location
deployment_pb.spec.azure_functions_operator_config.premium_plan_sku = (
premium_plan_sku
)
deployment_pb.spec.azure_functions_operator_config.min_instances = min_instances
deployment_pb.spec.azure_functions_operator_config.function_auth_level = (
function_auth_level
)
deployment_pb.spec.azure_functions_operator_config.max_burst = max_burst
return self.create(deployment_pb, wait)
def update_azure_functions_deployment(
self,
deployment_name,
bento_name=None,
bento_version=None,
max_burst=None,
min_instances=None,
premium_plan_sku=None,
namespace=None,
wait=None,
):
get_deployment_result = self.get(namespace=namespace, name=deployment_name)
if get_deployment_result.status.status_code != status_pb2.Status.OK:
error_code = status_pb2.Status.Code.Name(
get_deployment_result.status.status_code
)
error_message = status_pb2.status.error_message
raise BentoMLException(
f"Failed to retrieve current deployment {deployment_name} in "
f"{namespace}. {error_code}:{error_message}"
)
deployment_pb = get_deployment_result.deployment
if bento_name:
deployment_pb.spec.bento_name = bento_name
if bento_version:
deployment_pb.spec.bento_version = bento_version
if max_burst:
deployment_pb.spec.azure_functions_operator_config.max_burst = max_burst
if min_instances:
deployment_pb.spec.azure_functions_operator_config.min_instances = (
min_instances
)
if premium_plan_sku:
deployment_pb.spec.azure_functions_operator_config.premium_plan_sku = (
premium_plan_sku
)
return self.apply(deployment_pb, wait)
def list_azure_functions_deployments(
self,
limit=None,
offset=None,
labels=None,
namespace=None,
is_all_namespaces=False,
order_by=None,
ascending_order=None,
):
return self.list(
limit=limit,
offset=offset,
labels=labels,
namespace=namespace,
is_all_namespaces=is_all_namespaces,
operator=DeploymentSpec.AZURE_FUNCTIONS,
order_by=order_by,
ascending_order=ascending_order,
)
```
#### File: _internal/yatai_client/__init__.py
```python
class YataiClient:
"""
Python Client for interacting with YataiService
"""
pass
# def __init__(self, yatai_service: Optional["YataiStub"] = None):
# self.yatai_service = yatai_service if yatai_service else get_yatai_service()
# self.bento_repository_api_client = None
# self.deployment_api_client = None
#
# @cached_property
# def repository(self) -> "BentoRepositoryAPIClient":
# return BentoRepositoryAPIClient(self.yatai_service)
# def get_yatai_client(yatai_url: str = None) -> "YataiClient":
# """
# Args:
# yatai_url (`str`):
# Yatai Service URL address.
#
# Returns:
# :obj:`~YataiClient`, a python client to interact with :obj:`Yatai` gRPC server.
#
# Example::
#
# from bentoml.yatai.client import get_yatai_client
#
# custom_url = 'https://remote.yatai:50050'
# yatai_client = get_yatai_client(custom_url)
# """ # noqa: E501
#
# yatai_service = get_yatai_service(channel_address=yatai_url)
# return YataiClient(yatai_service=yatai_service)
```
#### File: projects/fastai2/service.py
```python
import pathlib
import sys
import numpy as np
import bentoml
from bentoml.adapters import DataframeInput
from bentoml.fastai import FastaiModelArtifact
@bentoml.env(infer_pip_packages=True)
@bentoml.artifacts([FastaiModelArtifact("model")])
class FastaiClassifier(bentoml.BentoService):
@bentoml.api(input=DataframeInput(), batch=True)
def predict(self, df):
input_data = df.to_numpy().astype(np.float32)
_, _, output = self.artifacts.model.predict(input_data)
return output.squeeze().item()
if __name__ == "__main__":
artifacts_path = sys.argv[1]
bento_dist_path = sys.argv[2]
service = FastaiClassifier()
from model.model import Loss, Model # noqa # pylint: disable=unused-import
service.artifacts.load_all(artifacts_path)
pathlib.Path(bento_dist_path).mkdir(parents=True, exist_ok=True)
service.save_to_dir(bento_dist_path)
```
#### File: general/tests/test_artifact.py
```python
import pytest
@pytest.mark.asyncio
async def test_api_server_with_sklearn(host):
await pytest.assert_request(
"POST",
f"http://{host}/predict_with_sklearn",
headers=(("Content-Type", "application/json"),),
data="[2.0]",
assert_status=200,
assert_data=b"2.0",
)
```
#### File: projects/paddle/service.py
```python
import pathlib
import sys
import numpy as np
import bentoml
from bentoml.adapters import DataframeInput
from bentoml.paddle import PaddlePaddleModelArtifact
@bentoml.env(infer_pip_packages=True)
@bentoml.artifacts([PaddlePaddleModelArtifact("model")])
class PaddleService(bentoml.BentoService):
@bentoml.api(input=DataframeInput(), batch=True)
def predict(self, df):
input_data = df.to_numpy().astype(np.float32)
predictor = self.artifacts.model
input_names = predictor.get_input_names()
input_handle = predictor.get_input_handle(input_names[0])
input_handle.reshape(input_data.shape)
input_handle.copy_from_cpu(input_data)
predictor.run()
output_names = predictor.get_output_names()
output_handle = predictor.get_output_handle(output_names[0])
output_data = output_handle.copy_to_cpu()
return output_data
if __name__ == "__main__":
artifacts_path = sys.argv[1]
bento_dist_path = sys.argv[2]
service = PaddleService()
service.artifacts.load_all(artifacts_path)
pathlib.Path(bento_dist_path).mkdir(parents=True, exist_ok=True)
service.save_to_dir(bento_dist_path)
```
#### File: slo/tests/test_latency.py
```python
import sys
import time
import pytest
@pytest.mark.asyncio
@pytest.mark.skipif(sys.platform == "darwin", reason="Test being flaky on Mac OS")
async def test_SLO(host):
await pytest.assert_request(
"POST",
f"http://{host}/echo_with_delay_max3",
data='"0"',
headers=(("Content-Type", "application/json"),),
assert_status=200,
)
SLO = 3
accuracy = 0.01
time_start = time.time()
await pytest.assert_request(
"POST",
f"http://{host}/echo_with_delay_max3",
data='"2.9"',
timeout=SLO * 2,
headers=(("Content-Type", "application/json"),),
assert_status=200,
)
assert time.time() - time_start < SLO * (1 + accuracy)
time_start = time.time()
await pytest.assert_request(
"POST",
f"http://{host}/echo_with_delay_max3",
data='"3.5"',
timeout=SLO * 2,
headers=(("Content-Type", "application/json"),),
assert_status=408,
)
assert time.time() - time_start < SLO * (1 + accuracy)
```
#### File: tests/integration/test_fasttext_model_artifact.py
```python
import contextlib
import tempfile
import fasttext
import pytest
import bentoml
from bentoml.yatai.client import YataiClient
from tests import FasttextClassifier
@pytest.fixture()
def fasttext_classifier_class():
FasttextClassifier._bento_service_bundle_path = None
FasttextClassifier._bento_service_bundle_version = None
return FasttextClassifier
test_json = {"text": "foo"}
def test_fasttext_artifact_pack(fasttext_classifier_class):
@contextlib.contextmanager
def _temp_filename_with_contents(contents):
temporary_file = tempfile.NamedTemporaryFile(suffix=".txt", mode="w+")
temporary_file.write(contents)
# Set file pointer to beginning to ensure correct read
temporary_file.seek(0)
yield temporary_file.name
temporary_file.close()
with _temp_filename_with_contents("__label__bar foo") as filename:
model = fasttext.train_supervised(input=filename)
svc = fasttext_classifier_class()
svc.pack("model", model)
assert svc.predict(test_json)[0] == (
"__label__bar",
), "Run inference before saving the artifact"
saved_path = svc.save()
loaded_svc = bentoml.load(saved_path)
assert loaded_svc.predict(test_json)[0] == (
"__label__bar",
), "Run inference after saving the artifact"
# clean up saved bundle
yc = YataiClient()
yc.repository.delete(f"{svc.name}:{svc.version}")
```
#### File: tests/integration/test_keras_artifact.py
```python
import json
import keras
import numpy as np
import pytest
import tensorflow as tf
import bentoml
from tests import (
build_api_server_docker_image,
export_service_bundle,
run_api_server_docker_container,
)
TF2 = tf.__version__.startswith("2")
if TF2:
from tests import KerasClassifier
else:
from tests import KerasClassifier
test_data = [1, 2, 3, 4, 5]
@pytest.fixture(params=[tf.keras, keras], scope="session")
def keras_model(request):
ke = request.param
net = ke.Sequential(
(
ke.layers.Dense(
units=1,
input_shape=(5,),
use_bias=False,
kernel_initializer=ke.initializers.Ones(),
),
)
)
net.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
return net
@pytest.fixture(scope="session")
def svc(keras_model):
"""Return a TensorFlow2 BentoService."""
# When the ExampleBentoService got saved and loaded again in the test, the
# two class attribute below got set to the loaded BentoService class.
# Resetting it here so it does not effect other tests
KerasClassifier._bento_service_bundle_path = None
KerasClassifier._bento_service_bundle_version = None
svc = KerasClassifier()
keras_model.predict(np.array([test_data]))
svc.pack("model", keras_model)
svc.pack("model2", keras_model)
return svc
@pytest.fixture(scope="session")
def image(svc, clean_context):
with export_service_bundle(svc) as saved_path:
yield clean_context.enter_context(build_api_server_docker_image(saved_path))
@pytest.fixture(scope="module")
def host(image):
with run_api_server_docker_container(image, timeout=500) as host:
yield host
def test_keras_artifact(svc):
assert svc.predict([test_data]) == [
15.0
], "Inference on unsaved Keras artifact does not match expected"
assert svc.predict2([test_data]) == [
15.0
], "Inference on unsaved Keras artifact does not match expected"
def test_keras_artifact_loaded(svc):
with export_service_bundle(svc) as saved_path:
loaded = bentoml.load(saved_path)
assert (
loaded.predict([test_data]) == 15.0
), "Inference on saved and loaded Keras artifact does not match expected"
assert (
loaded.predict2([test_data]) == 15.0
), "Inference on saved and loaded Keras artifact does not match expected"
@pytest.mark.asyncio
async def test_keras_artifact_with_docker(host):
await pytest.assert_request(
"POST",
f"http://{host}/predict",
headers=(("Content-Type", "application/json"),),
data=json.dumps(test_data),
assert_status=200,
assert_data=b"[15.0]",
)
await pytest.assert_request(
"POST",
f"http://{host}/predict2",
headers=(("Content-Type", "application/json"),),
data=json.dumps(test_data),
assert_status=200,
assert_data=b"[15.0]",
)
```
#### File: tests/integration/test_locking.py
```python
import logging
import subprocess
import pytest
from tests import ExampleBentoService, ThreadWithResult, run_delayed_thread
logger = logging.getLogger("bentoml.test")
def cli(svc, cmd, *args):
bento_tag = f"{svc.name}:{svc.version}"
proc = subprocess.Popen(
["bentoml", cmd, bento_tag, *args],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
return proc.stdout.read().decode("utf-8")
@pytest.fixture()
def packed_svc():
svc = ExampleBentoService()
svc.pack("model", [1, 2, 3])
svc.save()
return svc
def test_write_lock_on_read_lock(packed_svc):
containerize_thread = ThreadWithResult(
target=cli, args=(packed_svc, "containerize", "-t", "imagetag")
)
delete_thread = ThreadWithResult(target=cli, args=(packed_svc, "delete", "-y"))
run_delayed_thread(containerize_thread, delete_thread)
assert (
f"Build container image: imagetag:{packed_svc.version}"
in containerize_thread.result
)
assert (
"Failed to acquire write lock, another lock held. Retrying"
in delete_thread.result
)
assert f"Deleted {packed_svc.name}:{packed_svc.version}" in delete_thread.result
def test_read_lock_on_read_lock(packed_svc):
containerize_thread = ThreadWithResult(
target=cli, args=(packed_svc, "containerize", "-t", "imagetag")
)
get_thread = ThreadWithResult(target=cli, args=(packed_svc, "get"))
run_delayed_thread(containerize_thread, get_thread)
assert (
f"Build container image: imagetag:{packed_svc.version}"
in containerize_thread.result
)
assert f'"name": "{packed_svc.name}"' in get_thread.result
assert f'"version": "{packed_svc.version}"' in get_thread.result
```
#### File: tests/integration/test_pytorch_model_artifact.py
```python
import pandas
import pytest
import torch
from torch import nn
import bentoml
from bentoml.yatai.client import YataiClient
from tests import PytorchClassifier
class PytorchModel(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(5, 1, bias=False)
torch.nn.init.ones_(self.linear.weight)
def forward(self, x):
x = self.linear(x)
return x
@pytest.fixture()
def pytorch_classifier_class():
# When the ExampleBentoService got saved and loaded again in the test, the two class
# attribute below got set to the loaded BentoService class. Resetting it here so it
# does not effect other tests
PytorchClassifier._bento_service_bundle_path = None
PytorchClassifier._bento_service_bundle_version = None
return PytorchClassifier
test_df = pandas.DataFrame([[1, 1, 1, 1, 1]])
def test_pytorch_artifact_pack(pytorch_classifier_class):
svc = pytorch_classifier_class()
model = PytorchModel()
svc.pack("model", model)
assert svc.predict(test_df) == 5.0, "Run inference before save the artifact"
saved_path = svc.save()
loaded_svc = bentoml.load(saved_path)
assert loaded_svc.predict(test_df) == 5.0, "Run inference from saved artifact"
# clean up saved bundle
yc = YataiClient()
yc.repository.delete(f"{svc.name}:{svc.version}")
def test_pytorch_artifact_pack_with_traced_model(pytorch_classifier_class):
svc = pytorch_classifier_class()
input_for_tracing = torch.ones(5)
model = PytorchModel()
traced_model = torch.jit.trace(model, input_for_tracing)
svc.pack("model", traced_model)
assert svc.predict(test_df) == 5.0, "Run inference before save the artifact"
saved_path = svc.save()
loaded_svc = bentoml.load(saved_path)
assert loaded_svc.predict(test_df) == 5.0, "Run inference from saved artifact"
# clean up saved bundle
yc = YataiClient()
yc.repository.delete(f"{svc.name}:{svc.version}")
def test_pytorch_artifact_pack_with_scripted_model(pytorch_classifier_class):
svc = pytorch_classifier_class()
model = PytorchModel()
scripted_model = torch.jit.script(model)
svc.pack("model", scripted_model)
assert svc.predict(test_df) == 5.0, "Run inference before save the artifact"
saved_path = svc.save()
loaded_svc = bentoml.load(saved_path)
assert loaded_svc.predict(test_df) == 5.0, "Run inference from saved artifact"
# clean up saved bundle
yc = YataiClient()
yc.repository.delete(f"{svc.name}:{svc.version}")
```
#### File: integration/yatai_server/utils.py
```python
import contextlib
import logging
import os
import subprocess
import uuid
import docker
from bentoml.configuration import LAST_PYPI_RELEASE_VERSION
from bentoml.utils import reserve_free_port
from bentoml.utils.tempdir import TempDirectory
from tests import wait_until_container_ready
logger = logging.getLogger("bentoml.test")
def build_yatai_service_image():
docker_client = docker.from_env()
local_bentoml_repo_path = os.path.abspath(__file__ + "/../../../../")
yatai_docker_image_tag = f"bentoml/yatai-service:test-{uuid.uuid4().hex[:6]}"
# Note: When set both `custom_context` and `fileobj`, docker api will not use the
# `path` provide... docker/api/build.py L138. The solution is create an actual
# Dockerfile along with path, instead of fileobj and custom_context.
with TempDirectory() as temp_dir:
temp_docker_file_path = os.path.join(temp_dir, "Dockerfile")
with open(temp_docker_file_path, "w") as f:
f.write(
f"""\
FROM bentoml/yatai-service:{LAST_PYPI_RELEASE_VERSION}
ADD . /bentoml-local-repo
RUN pip install -U /bentoml-local-repo"""
)
logger.info(f"Building docker image {yatai_docker_image_tag}")
docker_client.images.build(
path=local_bentoml_repo_path,
dockerfile=temp_docker_file_path,
tag=yatai_docker_image_tag,
)
return yatai_docker_image_tag
# Cache the yatai docker image built for each test run session, since the source code
# of yatai will not be modified during a test run
_yatai_docker_image_tag = None
@contextlib.contextmanager
def yatai_service_container(db_url=None, repo_base_url=None):
global _yatai_docker_image_tag # pylint: disable=global-statement
if _yatai_docker_image_tag is None:
_yatai_docker_image_tag = build_yatai_service_image()
docker_client = docker.from_env()
container_name = f"yatai-test-{uuid.uuid4().hex[:6]}"
yatai_server_command = ["bentoml", "yatai-service-start", "--no-ui"]
if db_url:
yatai_server_command.extend(["--db-url", db_url])
if repo_base_url:
yatai_server_command.extend(["--repo-base-url", repo_base_url])
host = "127.0.0.1"
with reserve_free_port(host) as free_port:
# find free port on host
port = free_port
container = docker_client.containers.run(
image=_yatai_docker_image_tag,
remove=True,
environment=["BENTOML_HOME=/tmp"],
ports={"50051/tcp": (host, port)},
command=yatai_server_command,
name=container_name,
detach=True,
)
wait_until_container_ready(
container_name, "Starting BentoML YataiService gRPC Server"
)
yield f"{host}:{port}"
logger.info(f"Shutting down docker container: {container_name}")
container.kill()
@contextlib.contextmanager
def local_yatai_service_from_cli(db_url=None, repo_base_url=None, port=50051):
yatai_server_command = [
"bentoml",
"yatai-service-start",
"--no-ui",
"--grpc-port",
str(port),
]
if db_url:
yatai_server_command.extend(["--db-url", db_url])
if repo_base_url:
yatai_server_command.extend(["--repo-base-url", repo_base_url])
logger.info(f'Starting local YataiServer {" ".join(yatai_server_command)}')
proc = subprocess.Popen(
yatai_server_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
yatai_service_url = f"localhost:{port}"
logger.info(f"Setting config yatai_service.url to: {yatai_service_url}")
yield yatai_service_url
proc.kill()
```
#### File: _internal/utils/threading.py
```python
import threading
import time
class ThreadWithResult(threading.Thread):
def __init__(
self, group=None, target=None, name=None, args=(), kwargs=None, *, daemon=None
):
if kwargs is None:
kwargs = {}
self.result = None
def function():
self.result = target(*args, **kwargs)
super().__init__(group=group, target=function, name=name, daemon=daemon)
def run_delayed_thread(t1, t2, delay=1):
t1.start()
time.sleep(delay)
t2.start()
t1.join()
t2.join()
```
#### File: unit/adapters/test_string_input.py
```python
import json
from bentoml.types import HTTPRequest
def test_string_input(make_api):
from bentoml.adapters import JsonOutput, StringInput
api = make_api(
input_adapter=StringInput(), output_adapter=JsonOutput(), user_func=lambda i: i,
)
body = b'{"a": 1}'
request = HTTPRequest(body=body)
response = api.handle_request(request)
assert json.loads(response.body) == body.decode()
responses = api.handle_batch_request([request] * 3)
for response in responses:
assert json.loads(response.body) == body.decode()
```
#### File: yatai/configuration/__init__.py
```python
import os
def get_local_config_file():
if "YATAI_CONFIG" in os.environ:
# User local config file for customizing Yatai
return expand_env_var(os.environ.get("YATAI_CONFIG"))
return None
def inject_dependencies():
"""Inject dependencis and configuration for Yatai package"""
from yatai.yatai.configuration.containers import YataiConfiguration, YataiContainer
config_file = get_local_config_file()
if config_file and config_file.endswith('.yml'):
configuration = YataiConfiguration(override_config_file=config_file)
else:
configuration = YataiConfiguration()
YataiContainer.config.set(configuration.as_dict())
def expand_env_var(env_var):
"""Expands potentially nested env var by repeatedly applying `expandvars` and
`expanduser` until interpolation stops having any effect.
"""
if not env_var:
return env_var
while True:
interpolated = os.path.expanduser(os.path.expandvars(str(env_var)))
if interpolated == env_var:
return interpolated
else:
env_var = interpolated
```
#### File: yatai/deployment/docker_utils.py
```python
import logging
from typing import Dict, Optional
from urllib.parse import urlparse
import docker
from bentoml._internal.exceptions import BentoMLException, MissingDependencyException
logger = logging.getLogger(__name__)
def ensure_docker_available_or_raise() -> None:
"""
Ensure docker is available.
Raises:
:class:`~MissingDependencyException`:
for :class:`~docker.errors.APIErrors`
or :class:`~docker.errors.DockerException`
"""
try:
client = docker.from_env()
client.ping()
except docker.errors.APIError as error:
raise MissingDependencyException(f"Docker server is not responsive. {error}")
except docker.errors.DockerException:
raise MissingDependencyException(
"Docker is required for this deployment. Please visit "
"www.docker.com for instructions"
)
def _strip_scheme(url: str) -> str:
"""
Stripe url's schema.
Examples:
http://some.url/path -> some.url/path
Args:
url (`str`)
Returns:
:obj:`str`
"""
parsed = urlparse(url)
scheme = "%s://" % parsed.scheme
return parsed.geturl().replace(scheme, "", 1)
def generate_docker_image_tag(
image_name: str, version: str = "latest", registry_url=None
):
image_tag = f"{image_name}:{version}".lower()
if registry_url is not None:
return _strip_scheme(f"{registry_url}/{image_tag}")
else:
return image_tag
def build_docker_image(
context_path: str,
image_tag: str,
dockerfile: Optional[str] = "Dockerfile",
additional_build_args: Optional[Dict[str, str]] = None,
):
docker_client = docker.from_env()
try:
docker_client.images.build(
path=context_path,
tag=image_tag,
dockerfile=dockerfile,
buildargs=additional_build_args,
)
except (docker.errors.APIError, docker.errors.BuildError) as error:
logger.error(f"Failed to build docker image {image_tag}: {error}")
raise BentoMLException(f"Failed to build docker image {image_tag}: {error}")
def push_docker_image_to_repository(
repository: str,
image_tag: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
):
docker_client = docker.from_env()
docker_push_kwags = {"repository": repository, "tag": image_tag}
if username is not None and password is not None:
docker_push_kwags["auth_config"] = {"username": username, "password": password}
try:
docker_client.images.push(**docker_push_kwags)
except docker.errors.APIError as error:
raise BentoMLException(f"Failed to push docker image {image_tag}: {error}")
``` |
{
"source": "jmcabreira/Data_Science_Coursework",
"score": 3
} |
#### File: webapp/Basic_webapp/webapp1.py
```python
import streamlit as st
def main():
st.image('CabreiraLogo.png')
st.title('Codenation')
st.markdown('Button')
button = st.button('Button Name')
if button:
st.markdown('ON')
st.markdown('checkbox')
check = st.checkbox('Checkbox')
if check:
st.markdown('ON')
st.markdown('Radio')
radio = st.radio('Chose Options', ('Option 1', 'Option 2') )
if radio == 'Option 1':
st.markdown('Option 1')
if radio == 'Option 2':
st.markdown('Option 2')
st.markdown('SelectBox')
select = st.selectbox('Choose option', ('Option 1', 'Option 2'))
if select == 'Option 1':
st.markdown('Option 1')
if select == 'Option 2':
st.markdown('Option 2')
st.markdown('Multiselect')
multi = st.multiselect('Choose:', ('Option 1' , 'Option 2'))
if multi == 'Option 1':
st.markdown('Option 1')
if multi == 'Option 2':
st.markdown('Option 2')
st.markdown('File Uploader')
file = st.file_uploader('Upload File', type ='csv')
if file is not None:
st.markdown('Not empty')
if __name__ == '__main__':
main()
```
#### File: exercise_6/starter/run.py
```python
import argparse
import logging
import os
import tempfile
import pandas as pd
import wandb
from sklearn.model_selection import train_test_split
logging.basicConfig(level=logging.INFO, format="%(asctime)-15s %(message)s")
logger = logging.getLogger()
def go(args):
run = wandb.init(project="exercise_6", job_type="split_data")
logger.info("Downloading and reading artifact")
artifact = run.use_artifact(args.input_artifact)
artifact_path = artifact.file()
df = pd.read_csv(artifact_path, low_memory=False)
# Split model_dev/test
logger.info("Splitting data into train and test")
splits = {}
splits["train"], splits["test"] = train_test_split(
df,
test_size=args.test_size,
random_state=args.random_state,
stratify = df[args.stratify] if args.stratify != 'null' else None # keep the same distribution in both dataframes
)
# Now we save the artifacts. We use a temporary directory so we do not leave
# any trace behind
with tempfile.TemporaryDirectory() as tmp_dir:
for split, df in splits.items():
# Make the artifact name from the provided root plus the name of the split
artifact_name = f"{args.artifact_root}_{split}.csv"
# Get the path on disk within the temp directory
temp_path = os.path.join(tmp_dir, artifact_name)
logger.info(f"Uploading the {split} dataset to {artifact_name}")
# Save then upload to W&B
df.to_csv(temp_path)
artifact = wandb.Artifact(
name=artifact_name,
type=args.artifact_type,
description=f"{split} split of dataset {args.input_artifact}",
)
artifact.add_file(temp_path)
logger.info("Logging artifact")
run.log_artifact(artifact)
# This waits for the artifact to be uploaded to W&B. If you
# do not add this, the temp directory might be removed before
# W&B had a chance to upload the datasets, and the upload
# might fail
artifact.wait()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Split a dataset into train and test",
fromfile_prefix_chars="@",
)
parser.add_argument(
"--input_artifact",
type=str,
help="Fully-qualified name for the input artifact",
required=True,
)
parser.add_argument(
"--artifact_root",
type=str,
help="Root for the names of the produced artifacts. The script will produce 2 artifacts: "
"{root}_train.csv and {root}_test.csv",
required=True,
)
parser.add_argument(
"--artifact_type", type=str, help="Type for the produced artifacts", required=True
)
parser.add_argument(
"--test_size",
help="Fraction of dataset or number of items to include in the test split",
type=float,
required=True
)
parser.add_argument(
"--random_state",
help="An integer number to use to init the random number generator. It ensures repeatibility in the"
"splitting",
type=int,
required=False,
default=42
)
parser.add_argument(
"--stratify",
help="If set, it is the name of a column to use for stratified splitting",
type=str,
required=False,
default='null' # unfortunately mlflow does not support well optional parameters
)
args = parser.parse_args()
go(args)
```
#### File: exercise_1/starter/upload_artifact.py
```python
import argparse
import logging
import pathlib
import wandb
logging.basicConfig(level=logging.INFO, format="%(asctime)-15s %(message)s")
logger = logging.getLogger()
def go(args):
logger.info("Creating run exercise_1")
run = wandb.init(project='exercise_1', job_type = 'upload_file')
logger.info('Creating an artifcat')
artifact = wandb.Artifact(
name = args.artifact_name,
type = args.artifact_type,
description = args.artifact_description
)
logger.info('Adding file')
artifact.add_file(args.input_file)
logger.info('Logging artifact')
run.log_artifact(artifact)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Upload an artifact to W&B", fromfile_prefix_chars="@"
)
parser.add_argument(
"--input_file", type=pathlib.Path, help="Path to the input file", required=True
)
parser.add_argument(
"--artifact_name", type=str, help="Name for the artifact", required=True
)
parser.add_argument(
"--artifact_type", type=str, help="Type for the artifact", required=True
)
parser.add_argument(
"--artifact_description",
type=str,
help="Description for the artifact",
required=True,
)
args = parser.parse_args()
go(args)
``` |
{
"source": "jmcabreira/Dynamic-risk-assessment-system",
"score": 2
} |
#### File: jmcabreira/Dynamic-risk-assessment-system/deployment.py
```python
from flask import Flask, session, jsonify, request
import pandas as pd
import numpy as np
import pickle
import os
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
import json
import shutil
##################Load config.json and correct path variable
with open('config.json','r') as f:
config = json.load(f)
ingested_files_path = os.path.join(config['output_folder_path'],'ingestedfiles.txt')
model_path = os.path.join(config['output_model_path'], 'trainedmodel.pkl')
score_path = os.path.join(config['output_model_path'], 'latestscore.txt')
prod_deployment_path = os.path.join(config['prod_deployment_path'])
files = [ingested_files_path, model_path, score_path]
if not os.path.exists(prod_deployment_path):
os.makedirs(prod_deployment_path)
####################function for deployment
def store_model_into_pickle():
'''
copy the latest pickle file, the latestscore.txt value,
and the ingestfiles.txt file into the deployment directory
'''
print('copy the latest pickle file, the latestscore.txt value,and the ingestfiles.txt file into the deployment directory')
for file in files:
print(f"Copying: {file}")
if os.path.isfile(file):
shutil.copy(src = file , dst = os.path.join(prod_deployment_path, file.split('/')[-1]))
else:
pass
if __name__ == '__main__':
store_model_into_pickle()
```
#### File: jmcabreira/Dynamic-risk-assessment-system/ingestion.py
```python
import pandas as pd
import numpy as np
import os
import json
from datetime import datetime
import glob
#############Load config.json and get input and output paths
with open('config.json','r') as f:
config = json.load(f)
input_folder_path = config['input_folder_path']
output_folder_path = config['output_folder_path']
if not os.path.exists(output_folder_path):
os.makedirs(output_folder_path)
#############Function for data ingestion
def merge_multiple_dataframe():
'''check for datasets, compile them together, and write to an output file '''
csv_files = glob.glob(input_folder_path + "/*.csv")
# Append files
data_list = []
for file in csv_files:
print(f'Ingesting file: {file}')
data_list.append(pd.read_csv(file, index_col = None))
df = pd.concat(data_list, axis = 0, ignore_index = True)
# Drop duplicates
df.drop_duplicates(inplace = True)
# Save file
print(f'Saving finaldata.csv and ingestedfiles.txt in : {output_folder_path} folder')
df.to_csv(os.path.join(output_folder_path,'finaldata.csv'), index = False)
#
ingested_files_pth = os.path.join(output_folder_path, 'ingestedfiles.txt')
with open(ingested_files_pth, 'w' ) as file:
file.write(json.dumps(csv_files))
if __name__ == '__main__':
merge_multiple_dataframe()
``` |
{
"source": "jmcabreira/Machine-Learning-Devops-Engineer",
"score": 3
} |
#### File: credit_card_customer_churn/src/churn_script_logging_and_tests.py
```python
import os
import logging
import churn_library as cls
logging.basicConfig(
filename='./logs/churn_library.log',
level = logging.INFO,
filemode='w',
format='%(name)s - %(levelname)s - %(message)s')
def test_import(import_data):
'''
test data import
'''
try:
df = import_data("./data/bank_data.csv")
logging.info("Testing import_data: SUCCESS")
except FileNotFoundError as err:
logging.error("Testing import_eda: The file wasn't found")
raise err
try:
assert df.shape[0] > 0
assert df.shape[1] > 0
except AssertionError as err:
logging.error("Testing import_data: The file doesn't appear to have rows and columns")
raise err
return df
def test_eda(perform_eda, df):
'''
test perform eda function
'''
try:
perform_eda(df)
path = "./images/eda"
except AssertionError as err:
logging.error("Error in perform_eda function")
raise err
# Checking if the list is empty or not
try:
# Getting the list of directories
dir_val = os.listdir(path)
assert len(dir_val) > 0
logging.info("Testing perform_eda: SUCCESS")
except AssertionError as err:
logging.warning("Testing perform_eda function: It seems that the image "
"has not been saved in the eda folder.")
raise err
def test_encoder_helper(encoder_helper,df):
'''
test encoder helper
'''
try:
cat_columns = ['Gender', 'Education_Level', 'Marital_Status',
'Income_Category', 'Card_Category']
df = encoder_helper(df, cat_columns, 'Churn')
except AssertionError as err:
logging.error("Error in encoder_helper function!")
raise err
try:
for col in cat_columns:
assert col in df.columns
logging.info("Testing encoder_helper: SUCCESS")
except AssertionError as err:
logging.error(
"Testing encoder_helper: The dataframe appears to be missing the "
"categorical columns transformation")
return err
return df
def test_perform_feature_engineering(perform_feature_engineering,df):
'''
test perform_feature_engineering
'''
try:
X_train, X_test, y_train, y_test = perform_feature_engineering(df, 'Churn')
except AssertionError as err:
logging.error("Error in perform_feature_engineering function")
raise err
try:
assert X_train.shape[0] > 0
assert X_test.shape[0] > 0
assert len(y_train) > 0
assert len(y_test) > 0
logging.info("Testing perform_feature_engineering function: SUCCESS")
except AssertionError as err:
logging.error("Testing perform_feature_engineering: "
"Missing objects that should be returned.")
raise err
return X_train, X_test, y_train, y_test
def test_train_models(train_models, X_train, X_test, y_train, y_test):
'''
test train_models
'''
try:
train_models(X_train, X_test, y_train, y_test)
path = "./images/results/"
except:
logging.error("Error in train_models function!")
raise err
try:
# Getting the list of directories
dir_val = os.listdir(path)
assert len(dir_val) > 0
except FileNotFoundError as err:
logging.error("Testing train_models function: Results image files not found")
raise err
path = "./models/"
try:
# Getting the list of directories
dir_val = os.listdir(path)
assert len(dir_val) > 0
logging.info("Testing train_models function: SUCCESS")
except FileNotFoundError as err:
logging.error("Testing train_models function: Model files not found")
raise err
if __name__ == "__main__":
DATA_FRAME = test_import(cls.import_data)
print(DATA_FRAME.shape)
test_eda(cls.perform_eda, DATA_FRAME)
DATA_FRAME = test_encoder_helper(cls.encoder_helper, DATA_FRAME)
X_TRAIN, X_TEST, Y_TRAIN, Y_TEST = test_perform_feature_engineering(
cls.perform_feature_engineering, DATA_FRAME)
test_train_models(cls.train_models, X_TRAIN, X_TEST, Y_TRAIN, Y_TEST)
``` |
{
"source": "jmcadden/rym-extractor",
"score": 3
} |
#### File: rym-extractor/rym/utility.py
```python
def ListToString(arr, sep=''):
return sep.join(arr).strip()
``` |
{
"source": "jmcampanini/django-starter",
"score": 3
} |
#### File: django-starter/core/planeteer.py
```python
import os
def string_to_bool(s, default=False):
"""
Turns a string into a bool, giving a default value preference.
"""
if len(str(s).strip()) == 0:
return default
if default:
if s[0].upper() == "F":
return False
else:
return True
else:
if s[0].upper() == "T":
return True
else:
return False
def load_the_environment(env=None, verbose=False):
"""
Loads the environment files into the environment variables.
"""
if env is None:
env = "dev"
if env == "prod":
env_file = ".env"
else:
env_file = ".env.%s" % env
if os.path.exists(env_file):
if verbose:
print "Loading ENV File: `%s`" % env_file
with open(env_file, "r+") as f:
for curline in f:
clean_line = str(curline).strip()
if len(clean_line) > 0 and clean_line[0] != "#":
eqloc = curline.index("=")
var = str(curline[:eqloc]).strip()
val = str(curline[eqloc + 1:]).strip()
os.environ[var] = val
if verbose:
print "....Adding `%s` with value `%s`" % (var, val)
else:
if verbose:
print "Not Found - ENV File: `%s`" % env_file
``` |
{
"source": "JMCanning78/Transcrypt",
"score": 3
} |
#### File: demos/cyclejs_demo/cyclejs_http_demo.py
```python
dom, http = CycleDOM, CycleHTTPDriver
def render(user):
return dom.div('.users', [
dom.button('.get-random', 'Get random user'),
None if not user else dom.div('.user-details', [
dom.h1('.user-name' , user.name),
dom.h4('.user-email', user.email),
dom.a ('.user-website',
{'attrs': {'href': user.website}}, user.website)
])
])
def main(sources):
'''This **declares** your app. Evaluated only once(!)'''
def get_url_params():
rand = Math.round(Math.random() * 9) + 1
url = {
'url': 'http://jsonplaceholder.typicode.com/users/' + str(rand),
'category': 'users',
'method': 'GET'
}
console.log(url)
return url
get_random_user_s = sources.DOM \
.select('.get-random') \
.events('click') \
.map(get_url_params)
user_s = sources.HTTP.select('users') \
.flatten() \
.map(lambda res: res.body) \
.startWith(None)
vdom_s = user_s.map(render)
return { 'DOM' : vdom_s, 'HTTP': get_random_user_s}
Cycle.run(main, {
'DOM' : dom.makeDOMDriver('#app'),
'HTTP': http.makeHTTPDriver()
})
```
#### File: parcel_demo/example/main.py
```python
from testcontext import Test
def main():
'''Main function of the program (called from index.js)'''
# sibling module
with Test('import sibling') as test:
import sibling
test.result = sibling.sibling_func(test.random_num)
with Test('import sibling as alias_sibling') as test:
import sibling as alias_sibling
test.result = alias_sibling.sibling_func(test.random_num)
with Test('from sibling import sibling_func') as test:
from sibling import sibling_func
test.result = sibling_func(test.random_num)
with Test('from sibling import sibling_func as alias_sibling_func') as test:
from sibling import sibling_func as alias_sibling_func
test.result = alias_sibling_func(test.random_num)
# sibling2 module (using sibling2 because `import * from sibling` would overrride sibling_func above)
with Test('from sibling2 import *') as test:
from sibling2 import *
test.result = sibling2_func(test.random_num)
# siblingjs.js (Javascript file import)
with Test('import siblingjs') as test:
import siblingjs
test.result = siblingjs.siblingjs_func(test.random_num)
with Test('import siblingjs as alias_siblingjs') as test:
import siblingjs as alias_siblingjs
test.result = alias_siblingjs.siblingjs_func(test.random_num)
with Test('from siblingjs import siblingjs_func') as test:
from siblingjs import siblingjs_func
test.result = siblingjs_func(test.random_num)
with Test('from siblingjs import siblingjs_func as alias_siblingjs_func') as test:
from siblingjs import siblingjs_func as alias_siblingjs_func
test.result = alias_siblingjs_func(test.random_num)
# mymod package (__init__.py file)
with Test('import mymod') as test:
import mymod
test.result = mymod.mymod_func(test.random_num)
with Test('import mymod as alias_mymod') as test:
import mymod as alias_mymod
test.result = alias_mymod.mymod_func(test.random_num)
with Test('from mymod import mymod_func') as test:
from mymod import mymod_func
test.result = mymod_func(test.random_num)
with Test('from mymod import mymod_func as alias_mymod_func') as test:
from mymod import mymod_func as alias_mymod_func
test.result = alias_mymod_func(test.random_num)
# mymod.child (subdir module)
with Test('import mymod.child') as test:
import mymod.child
test.result = mymod.child.child_func(test.random_num)
with Test('alias_child.child_func') as test:
import mymod.child as alias_child
test.result = alias_child.child_func(test.random_num)
with Test('from mymod.child import child_func') as test:
from mymod.child import child_func
test.result = child_func(test.random_num)
with Test('from mymod.child import child_func as alias_child_func') as test:
from mymod.child import child_func as alias_child_func
test.result = alias_child_func(test.random_num)
with Test('import mymod.grandchildmod') as test:
import mymod.grandchildmod
test.result = mymod.grandchildmod.grandchildmod_func(test.random_num)
with Test('import mymod.grandchildmod as alias_grandchildmod') as test:
import mymod.grandchildmod as alias_grandchildmod
test.result = alias_grandchildmod.grandchildmod_func(test.random_num)
with Test('from mymod.grandchildmod.grandchild import grandchild_func') as test:
from mymod.grandchildmod.grandchild import grandchild_func
test.result = grandchild_func(test.random_num)
with Test('from mymod.grandchildmod.grandchild import grandchild_func as alias_grandchild_func') as test:
from mymod.grandchildmod.grandchild import grandchild_func as alias_grandchild_func
test.result = alias_grandchild_func(test.random_num)
```
#### File: mymod/grandchildmod/__init__.py
```python
from ...constants import ONE_HUNDRED
# imported package (__init__.py) two levels deep
def grandchildmod_func(val):
return val + ONE_HUNDRED
```
#### File: example/mymod/__init__.py
```python
from ..constants import ONE_HUNDRED
# imported package (__init__.py file) from subdir
def mymod_func(val):
return val + ONE_HUNDRED
```
#### File: demos/three_demo/three_demo.py
```python
from org.threejs import api
scene = api.Scene ();
camera = api.PerspectiveCamera (30, window.innerWidth/window.innerHeight, 0.1, 1000)
renderer = api.WebGLRenderer ()
renderer.setSize (window.innerWidth, window.innerHeight)
document.body.appendChild (renderer.domElement)
geometry = api.BoxGeometry (1, 1, 1 )
material = api.MeshLambertMaterial ({'color': 0xffffff})
cube = api.Mesh (geometry, material)
scene.add (cube)
ambientLight = api.AmbientLight (0x0000ff, 0.5)
scene.add (ambientLight )
directionalLight0 = api.DirectionalLight (0xff0000, 0.5)
scene.add (directionalLight0);
directionalLight1 = api.DirectionalLight (0x00ff00, 0.5)
directionalLight1.position.set (50, 50, 50)
scene.add (directionalLight1)
camera.position.z = 5
def render ():
requestAnimationFrame (render)
cube.rotation.x += 0.01
cube.rotation.y += 0.01
renderer.render (scene, camera)
render ()
# The above are just examples, all needed constructors can be added to org.threejs.api in this way
# THREE is just left in the global namespace, as three.js itself seems to need it
```
#### File: automated_tests/__future__/division.py
```python
from __future__ import division
def _check(x, test):
# Floats have different precision/representation in js and python
# Limit precision to 15 digits and convert to int if float is int
# See transcrypt/module_math for similar function
# 42.0 is 42
if x == int(x):
x = int(x)
# 15 first digits
if isinstance(x, float):
x = str(x)[:15]
test.check(x)
def run(test):
check = lambda x: _check(x, test)
for i in range(1, 10):
check(42 / i)
check(i / 42)
check(42 // i)
check(i // 42)
```
#### File: automated_tests/__future__/nested_scopes.py
```python
from __future__ import nested_scopes
def run(test):
def foo():
x = 42
def bar():
test.check(x)
bar()
foo()
```
#### File: automated_tests/__future__/unicode_literals.py
```python
from __future__ import unicode_literals
def run(test):
test.check('Hello, world!')
test.check(u'Hello, world!')
# TODO: byte strings are not supported?
# test.check(b'Hello, world!')
```
#### File: automated_tests/time/mult_time.py
```python
import time
def run (autoTester):
t = [2000, 1, 1, 1, 1, 1, 1, 1, 0]
def check(fmt):
s = time.mktime(tuple(t))
autoTester.check('gmtime' , tuple(time.gmtime(int(s))))
autoTester.check('localtime', tuple(time.localtime(int(s))))
autoTester.check('mktime' , int(s))
autoTester.check('ctime' , int(s))
for hour in (0, 1, 12, 14, 23):
t[3] = hour
for f in (
'%p %I.%d.%Y'
,'%b .%d.%y'
,'%b .%d.%Y'
,'%d%m%Y%H:%M:%S%p'
,'%b .%d.%Y'
,'M%m.%d.%Y'
,'%m.%d.%Y'
,'%m.%d.%Y'
,'%b .%d.%Y'
,'%m.%d.%Y'
,'%B %d.%Y'
,'%a %b %d %H:%M:%S %Y'
,'%d.%m.%Y %I:%M:%S%p'
,'%a%b %d %H:%M:%S %Y'
,'%a%b%d %H:%M:%S %Y'
,'%a%b%d%H:%Mx%S%Y'
,'%a%b%d%H:%Mxx%S%Y'
,'%a%b%d%H:%Mxx%S%Y +000'
,' %a%b%d%H:%Mxx%S%Y +000 '
): check(f)
autoTester.check('asctime', t)
```
#### File: transcrypt/async_await_UNDER_CONSTRUCTION/__init__.py
```python
from org.transcrypt.stubs.browser import __pragma__, __envir__
# Note that CPython will ignore all pragma's
# Provide waitAWhile for Transcrypt
__pragma__ ('js', '{}', '''
function waitAWhile (aTime) {
return new Promise (resolve => {
setTimeout (() => {
resolve (aTime);
}, 1000 * aTime);
});
}
''')
# Provide waitAWhile for CPython
__pragma__ ('skip') # Compile time, needed because import is done compile time
import asyncio
def waitAWhile (aTime):
return asyncio.sleep (aTime)
__pragma__ ('noskip')
# Actual code to be tested
async def run (autoTester):
counter = 0
async def f ():
autoTester.check ('f0')
await waitAWhile (2)
autoTester.check ('f1')
nonlocal counter
counter += 1
async def g ():
autoTester.check ('g0')
await waitAWhile (2)
autoTester.check ('g1')
nonlocal counter
counter += 1
autoTester.check ('BEGIN async/await test')
if __envir__.executor_name == __envir__.transpiler_name:
f ()
g ()
g ()
f ()
else:
eventLoop = asyncio.get_event_loop ()
tasks = [
eventLoop.create_task (f ()),
eventLoop.create_task (g ()),
eventLoop.create_task (g ()),
eventLoop.create_task (f ()),
]
waitingTasks = asyncio.wait (tasks)
eventLoop.run_until_complete (waitingTasks)
eventLoop.close ()
autoTester.check ('END async/await test')
```
#### File: issue559/mylib/mylib.py
```python
def mylibHello (autoTester):
autoTester.check ('Hello World Function')
class mylibClass:
def __init__ (self, autoTester):
self.autoTester = autoTester
self.autoTester.check ('Hello World Class')
def checkSymbols (self):
self.autoTester.check (sorted ([x for x in globals () if x.startswith ("my")])) # __: iconv
```
#### File: transcrypt/docstrings/__init__.py
```python
from org.transcrypt.stubs.browser import __pragma__
__pragma__ ('docat')
def run (autoTester):
def f (p):
'''Just a function
called f'''
'''Not visible'''
autoTester.check (p)
class C:
'''Just a class
called C'''
'''Not visible'''
def g (self, q):
'''Just a method
called g'''
'''Not visible'''
autoTester.check (q)
autoTester.check (__doc__)
autoTester.check ()
autoTester.check (f.__doc__)
autoTester.check ()
autoTester.check (C.__doc__)
autoTester.check ()
autoTester.check (C.g.__doc__)
autoTester.check ()
f ('Doc')
C () .g ('strings')
```
#### File: transcrypt/globals_function/sub.py
```python
xxa = 'subXxa'
xxb = 'subXxb'
xxp = None
xxq = None
xxr = None
xxs = None
for name in ('xxp', 'xxq'):
globals () [name] = 'sub{}'.format (name.capitalize ())
def f ():
for name in ('xxr', 'xxs'):
globals () [name] = 'sub{}'.format (name.capitalize ())
def run (autoTester):
f ()
autoTester.check ('Check sub 1', xxa, xxb)
autoTester.check ('Check sub 2', * [globals () [name] for name in ('xxa', 'xxb', 'xxp', 'xxq', 'xxr', 'xxs')])
autoTester.check ('Check sub 3', * sorted ([value for key, value in globals () .items () if key.startswith ('xx')]))
```
#### File: modules/mod2/mod21.py
```python
def f ():
return 'London is the town for me\n'
```
#### File: transcrypt/modules/mod3.py
```python
x = 'Toen wij uit Rotterdam vertrokken, vertrokken wij uit Rotterdam\n'
mod3Hundred = 100
def mod3GetTwoHundred ():
return 200
```
#### File: transcrypt/modules/mod4.py
```python
from modules.mod5 import mod5Add2
def mod4Add2FromMod5 (variable):
return mod5Add2 (variable)
def mod4Add1(variable):
return variable + 1
```
#### File: transcrypt/module_unicodedata/__init__.py
```python
from unicodedata import normalize
def run(autoTester):
autoTester.check('NFC:')
# test ç and C+◌̧
autoTester.check(normalize('NFC', 'c' + '\u0327'))
autoTester.check(normalize('NFC', '\u00e7'))
autoTester.check((normalize('NFC', 'c' + '\u0327') == normalize('NFC', '\u00e7')) == True)
# test q+◌̇+◌̣ and q+◌̣+◌̇
autoTester.check(normalize('NFC', 'q\u0307\u0323'))
autoTester.check(normalize('NFC', 'q\u0323\u0307'))
autoTester.check((normalize('NFC', 'q\u0323\u0307') == normalize('NFC', 'q\u0307\u0323')) == True)
# test 가 and ᄀ+ᅡ
autoTester.check(normalize('NFC', '가'))
autoTester.check(normalize('NFC', 'ᄀ' + 'ᅡ'))
autoTester.check((normalize('NFC', '가') == normalize('NFC', 'ᄀ' + 'ᅡ')) == True)
# test Ω
autoTester.check(normalize('NFC', 'Ω'))
autoTester.check('NFD:')
# test ç and C+◌̧
autoTester.check(normalize('NFD', 'c' + '\u0327'))
autoTester.check(normalize('NFD', '\u00e7'))
autoTester.check((normalize('NFD', 'c' + '\u0327') == normalize('NFD', '\u00e7')) == True)
# test q+◌̇+◌̣ and q+◌̣+◌̇
autoTester.check(normalize('NFD', 'q\u0307\u0323'))
autoTester.check(normalize('NFD', 'q\u0323\u0307'))
autoTester.check((normalize('NFD', 'q\u0323\u0307') == normalize('NFD', 'q\u0307\u0323')) == True)
# test 가 and ᄀ+ᅡ
autoTester.check(normalize('NFD', '가'))
autoTester.check(normalize('NFD', 'ᄀ' + 'ᅡ'))
autoTester.check((normalize('NFD', '가') == normalize('NFD', 'ᄀ' + 'ᅡ')) == True)
# test Ω
autoTester.check(normalize('NFD', 'Ω'))
```
#### File: transcrypt/set_comprehensions/__init__.py
```python
def run (autoTester):
even = {2 * i for i in [0, 9, 1, 7, 2, 8, 3, 6, 4, 5]}
autoTester.check (even)
odd = {2 * i + 1 for i in [5, 6, 7, 8, 9, 4, 3, 1, 2, 0]}
autoTester.check (odd)
even.add (12)
even.add (12)
autoTester.check (even)
even.discard (12)
even.discard (12)
autoTester.check (even)
uni = even.union (odd)
autoTester.check (uni)
autoTester.check (odd.isdisjoint (even))
autoTester.check (uni.isdisjoint (even))
autoTester.check (even.issuperset (uni))
autoTester.check (uni.issuperset (even))
autoTester.check (even.issubset (uni))
autoTester.check (uni.issubset (even))
first = {4, 1, 0, 5, 3, 2, 6}
autoTester.check (first)
second = {3, 5, 6, 9, 4, 7, 8}
autoTester.check (second)
inter = first.intersection (second)
autoTester.check (inter)
diff = first.difference (second)
autoTester.check (diff)
symDiff = first.symmetric_difference (second)
autoTester.check (symDiff)
aSet = {200, 4, 5, 100}
aSet.update (first, symDiff, second)
autoTester.check (aSet)
```
#### File: automated_tests/warnings/basic_tests.py
```python
from org.transcrypt.stubs.browser import __pragma__, __envir__
import warnings
import logging
class TestHandler(logging.Handler):
""" This handler is intended to make it easier to test the
logging module output without requiring the console or the
sys.stderr.
"""
def __init__(self, test, level):
"""
"""
logging.Handler.__init__(self, level)
self._test = test
def emit(self, record):
"""
"""
msg = self.format(record)
# @note - I'm using strip on the end of the record message
# because there are spaces and newlines after the message
# that are not substantive but do make the unit test
# more difficult
# Python includes the line content in the source file at
# the specified line which we can't really do in js right now
content = msg.split('\n')
if ( len(content) > 0 ):
checkMsg = content[0].rstrip()
self._test.check(checkMsg)
else:
self._test.check("Invalid Content in Warning message")
def run(test):
# This message should go to the console - has to be checked manually
warnings.warn_explicit(
"Console Test Message", UserWarning, "basic_tests.py", 37, "asdf", {}
)
# Setup logger so that we can capture test output
# and show in the logger.
logging.captureWarnings(True)
logger = logging.getLogger("py.warnings")
logger.setLevel(10)
hdlr = TestHandler(test, 10)
logger.addHandler(hdlr)
msgStr = "Test Message"
# The registry info we pass in replaces the
# warnings.__warningregistry__ dict so that we
# can more accurately test.
reg = {}
# @note - it is very difficult to compare warnings.warn against
# python - python just has more information available and so
# the content is much more diverse.
#warnings.warn("Invalid asdf asdf asdf")
warnings.warn_explicit(
msgStr, UserWarning, "basic_tests.py", 50, "asdf", reg
)
warnings.warn_explicit(
msgStr, UserWarning, "basic_tests.py", 53, "asdf", reg
)
warnings.warn_explicit(
msgStr, UserWarning, "basic_tests.py", 57, "asdf", reg
)
# @note - this message should not generate
warnings.warn_explicit(
msgStr, UserWarning, "basic_tests.py", 57, "asdf", reg
)
# This message should generate
warnings.warn_explicit(
msgStr + " blarg", UserWarning, "basic_tests.py", 57, "asdf", reg
)
# this message should not generate
warnings.warn_explicit(
msgStr + " blarg", UserWarning, "basic_tests.py", 57, "asdf", reg
)
reg = {}
class CustomWarning(Warning):
"""
"""
pass
if __envir__.executor_name == __envir__.transpiler_name:
warnings.addWarningCategory(CustomWarning)
# This sets the warnings module to generate an exception
# on a particular warning category.
warnings.filterwarnings("error", category=CustomWarning)
test.check( test.expectException( lambda: warnings.warn_explicit(
"This is a custom msg", CustomWarning,
"basic_tests.py", 91, "zxcv", reg
)))
warnings.filterwarnings("once", category=RuntimeWarning)
msg = "This is a once message - should not occur more than once"
warnings.warn_explicit(
msg, RuntimeWarning,
"basic_tests.py", 100, "trew", reg
)
# @note- this message should not be generated
for i in range(0,10):
warnings.warn_explicit(
msg, RuntimeWarning, "basic_tests.py", 102+i,
"qwerqwer" + str(i), reg
)
warnings.filterwarnings("always", message = "asdf", category=DeprecationWarning)
# these 3 messages should not generate
warnings.warn_explicit(
" no Message Here ", DeprecationWarning, "basic_tests.py", 112,
"itururue", reg
)
warnings.warn_explicit(
"Warning - asdf of qwer", DeprecationWarning, "basic_tests.py", 112,
"itururue", reg
)
warnings.warn_explicit(
"Warning - asdfqwer of qwer", DeprecationWarning, "basic_tests.py", 112,
"itururue", reg
)
# These message should generate
warnings.warn_explicit(
"asdf of qwer", DeprecationWarning, "basic_tests.py", 112,
"itururue", reg
)
warnings.warn_explicit(
"asdf of qwer", UserWarning, "basic_tests.py", 112,
"itururue", reg
)
# Warning with object instead of string message
warnings.warn_explicit(
UserWarning("asdf"), None, "basic_tests.py", 1234, "qwerqwe", reg
)
```
#### File: static_types/mod2/__init__.py
```python
def test (i: str) -> str:
return 3
```
#### File: development/shipment/shipment_test.py
```python
import os
import os.path
import sys
import datetime
import webbrowser
import argparse
import time
import traceback
import selenium
import selenium.webdriver.chrome.options
import pathlib
# ======== Command args singleton
class CommandArgs:
def __init__ (self):
self.argParser = argparse.ArgumentParser ()
self.argParser.add_argument ('-de', '--dextex', help = "show extended exception reports", action = 'store_true')
self.argParser.add_argument ('-f', '--fcall', help = 'test fast calls', action = 'store_true')
self.argParser.add_argument ('-i', '--inst', help = 'installed version rather than new one', action = 'store_true')
self.argParser.add_argument ('-b', '--blind', help = 'don\'t start browser', action = 'store_true')
self.argParser.add_argument ('-u', '--unattended', help = 'unattended mode', action = 'store_true')
self.__dict__.update (self.argParser.parse_args () .__dict__)
commandArgs = CommandArgs ()
# ======== Browser controller singleton
class BrowserController:
def __init__ (self):
self.options = selenium.webdriver.chrome.options.Options ()
self.options.add_argument ('start-maximized')
if commandArgs.unattended:
self.options.add_argument ('--headless') # Runs Chrome in headless mode.
self.options.add_argument ('--no-sandbox') # Bypass OS security model
self.options.add_argument ('--disable-gpu') # Applicable to windows OS only
self.options.add_argument ('disable-infobars')
self.options.add_argument ('--disable-extensions')
self.webDriver = selenium.webdriver.Chrome (chrome_options = self.options)
self.nrOfTabs = 0
def waitForNewTab (self):
while len (self.webDriver.window_handles) <= self.nrOfTabs:
time.sleep (0.5)
self.nrOfTabs = len (self.webDriver.window_handles)
def open (self, url, run):
print (f'Browser controller is opening URL: {url}')
try:
if self.nrOfTabs > 0:
if commandArgs.unattended:
# ---- Show in existing tab
self.webDriver.execute_script (f'window.location.href = "{url}";')
else:
# ---- Open new tab
self.webDriver.execute_script (f'window.open ("{url}","_blank");') # !!! Avoid redundant open command
self.waitForNewTab ()
self.webDriver.switch_to.window (self.webDriver.window_handles [-1])
else:
# ---- Open browser and default tab
self.webDriver.get (url)
self.waitForNewTab ()
except:
self.webDriver.switch_to.alert.accept();
if run:
while (True):
self.message = self.webDriver.find_element_by_id ('message')
if 'failed' in self.message.text or 'succeeded' in self.message.text:
break
time.sleep (0.5)
print ()
print ('=========================================================================')
print (f'Back to back autotest, result: {self.message.text.upper ()}')
print ('=========================================================================')
print ()
if 'succeeded' in self.message.text:
return True
else:
return False
else:
print ()
print ('=========================================================================')
print ('No back to back autotest')
print ('=========================================================================')
print ()
return True
browserController = BrowserController ()
# ======== Preparations
relSourcePrepathsOfErrors = []
host = 'http://localhost:'
pythonServerPort = '8000'
parcelServerPort = '8001'
nodeServerPort = '8002'
pythonServerUrl = host + pythonServerPort
parcelServerUrl = host + parcelServerPort
nodeServerUrl = host + nodeServerPort
transpileCommand = 'transcrypt' if commandArgs.inst else 'run_transcrypt'
shipDir = os.path.dirname (os.path.abspath (__file__)) .replace ('\\', '/')
appRootDir = '/'.join (shipDir.split ('/')[ : -2])
print (f'\nApplication root directory: {appRootDir}\n')
def getAbsPath (relPath):
return '{}/{}'.format (appRootDir, relPath)
os.system ('cls' if os.name == 'nt' else 'clear')
# ---- Start an http server in the Transcryp/transcrypt directory
if not commandArgs.blind:
if commandArgs.unattended:
os.system (f'python -m http.server --directory {appRootDir} &')
else:
os.system (f'start python -m http.server --directory {appRootDir}')
# ---- Allow visual check of all command line options
os.system (f'{transpileCommand} -h')
# ======== Individual test function
def test (relSourcePrepath, run, extraSwitches, messagePrename = '', nodeJs = False, parcelJs = False, build = True, pause = 0, needsAttention = False):
if commandArgs.unattended and needsAttention:
return # This test shouldn't be done, since it can't run unattended
print (f'\n\n******** BEGIN TEST {relSourcePrepath} ********\n')
time.sleep (pause)
# ---- Compute some slugs
sourcePrepath = getAbsPath (relSourcePrepath)
sourcePrepathSplit = sourcePrepath.split ("/")
sourceDir = '/'.join (sourcePrepathSplit [:-1])
moduleName = sourcePrepathSplit [-1]
targetDir = f'{sourceDir}/__target__'
targetPrepath = f'{targetDir}/{moduleName}'
messagePrepath = f'{targetDir}/{messagePrename}'
# ---- If there are relevant console messages of the compilation process,
# like with the static typechecking tests, write them into a file that can be served for a visual check
if not os.path.exists (targetDir):
os.makedirs (targetDir) # Transcrypt will make targetDir too late, so it has to happen here
redirect = f' > {messagePrepath}.out' if messagePrename else ''
# ---- Default switches
defaultSwitches = '-da -sf -de -m -n '
if commandArgs.dextex:
defaultSwitches += '-de '
if build:
defaultSwitches += '-b '
# ---- Run with CPython to generate HTML file with back to back reference info
if run:
os.system (f'{transpileCommand} -r {defaultSwitches}{extraSwitches}{sourcePrepath}')
# ---- Compile with Transcrypt
if parcelJs:
origDir = os.getcwd ()
os.chdir (sourceDir)
os.system (f'start cmd /k node test {parcelServerPort}')
os.chdir (origDir)
else:
os.system (f'{transpileCommand} {defaultSwitches}{extraSwitches}{sourcePrepath}{redirect}')
# ---- If it has to run on node, apply rollup to obtain monolith, since node doesn't support named imports and exports
if nodeJs:
os.system (f'rollup {targetPrepath}.js --o {targetPrepath}.bundle.js --f cjs')
# --- Compute appropriate URL and wait a while if needed
if not commandArgs.blind:
if parcelJs:
time.sleep (20)
url = parcelServerUrl
elif nodeJs:
os.system (f'start cmd /k node {targetPrepath}.bundle.js {nodeServerPort}')
time.sleep (5)
url = nodeServerUrl
else:
url = f'{pythonServerUrl}/{relSourcePrepath}.html'
success = browserController.open (url, run)
if commandArgs.unattended and not success:
relSourcePrepathsOfErrors.append (relSourcePrepath)
print (f'\n******** END TEST {relSourcePrepath} ********\n\n')
# ======== Perform individual tests
for switches in (('', '-f ') if commandArgs.fcall else ('',)):
test ('development/automated_tests/hello/autotest', True, switches)
test ('development/automated_tests/transcrypt/autotest', True, switches + '-c -xr -xg ')
test ('development/automated_tests/time/autotest', True, switches, needsAttention = True)
test ('development/automated_tests/re/autotest', True, switches)
test ('development/manual_tests/async_await/test', False, switches)
test ('development/manual_tests/import_export_aliases/test', False, switches + '-am ')
test ('development/manual_tests/module_random/module_random', False, switches)
test ('development/manual_tests/static_types/static_types', False, switches + '-ds -dc ', messagePrename = 'static_types')
test ('development/manual_tests/transcrypt_and_python_results_differ/results', False, switches)
test ('development/manual_tests/transcrypt_only/transcrypt_only', False, switches)
test ('demos/nodejs_demo/nodejs_demo', False, switches, nodeJs = True)
test ('demos/parcel_demo/test_shipment', False, switches, parcelJs = True)
test ('demos/terminal_demo/terminal_demo', False, switches, needsAttention = True)
test ('demos/hello/hello', False, switches, needsAttention = False)
test ('demos/jquery_demo/jquery_demo', False, switches)
test ('demos/d3js_demo/d3js_demo', False, switches)
test ('demos/ios_app/ios_app', False, switches)
test ('demos/react_demo/react_demo', False, switches)
test ('demos/riot_demo/riot_demo', False, switches)
test ('demos/plotly_demo/plotly_demo', False, switches)
test ('demos/three_demo/three_demo', False, switches)
test ('demos/pong/pong', False, switches)
test ('demos/pysteroids_demo/pysteroids', False, switches)
test ('demos/turtle_demos/star', False, switches, pause = 2)
test ('demos/turtle_demos/snowflake', False, switches, pause = 2)
test ('demos/turtle_demos/mondrian', False, switches, pause = 2)
test ('demos/turtle_demos/mandala', False, switches, pause = 2)
# test ('demos/cyclejs_demo/cyclejs_demo', False, switches)
test ('demos/cyclejs_demo/cyclejs_http_demo', False, switches)
test ('demos/cyclejs_demo/component_demos/isolated_bmi_slider/bmi', False, switches)
test ('demos/cyclejs_demo/component_demos/labeled_slider/labeled_slider', False, switches)
test ('tutorials/baseline/bl_010_hello_world/hello_world', False, switches)
test ('tutorials/baseline/bl_020_assign/assign', False, switches)
test ('tutorials/baseline/bl_030_if_else_prompt/if_else_prompt', False, switches, needsAttention = True)
test ('tutorials/baseline/bl_035_if_else_event/if_else_event', False, switches, needsAttention = True)
test ('tutorials/baseline/bl_040_for_simple/for_simple', False, switches)
test ('tutorials/baseline/bl_042_for_nested/for_nested', False, switches)
test ('tutorials/baseline/bl_045_while_simple/while_simple', False, switches, needsAttention = True)
test ('tutorials/static_typing/static_typing', False, switches + '-c -ds ', messagePrename = 'static_typing')
if relSourcePrepathsOfErrors:
print ('\n\n!!!!!!!!!!!!!!!!!!!!\n')
for relSourcePrepathOfError in relSourcePrepathsOfErrors:
print (f'SHIPMENT TEST ERROR: {relSourcePrepathOfError}')
print ('\n!!!!!!!!!!!!!!!!!!!!\n\n')
print ('\nSHIPMENT TEST FAILED\n')
sys.exit (1)
else:
# ---- Make docs, the resulting files are untracked
if not commandArgs.unattended:
origDir = os.getcwd ()
sphinxDir = '/'.join ([appRootDir, 'docs/sphinx'])
os.chdir (sphinxDir)
os.system ('touch *.rst')
os.system ('make html')
os.chdir (origDir)
# ---- Terminate
print ('\nSHIPMENT TEST SUCCEEDED\n')
sys.exit (0)
```
#### File: modules/math/__init__.py
```python
pi = Math.PI
e = Math.E
exp = Math.exp
def expm1 (x): # IE workaround
return Math.exp (x) - 1
def log (x, base):
return Math.log (x) if base is js_undefined else Math.log (x) / Math.log (base)
def log1p (x): # IE workaround
return Math.log (x + 1)
def log2 (x): # IE workaround
return Math.log (x) / Math.LN2
def log10 (x): # IE workaround
return Math.log (x) / Math.LN10
pow = Math.pow
sqrt = Math.sqrt
sin = Math.sin
cos = Math.cos
tan = Math.tan
asin = Math.asin
acos = Math.acos
atan = Math.atan
atan2 = Math.atan2
hypot = Math.hypot
def degrees (x):
return x * 180 / Math.PI
def radians (x):
return x * Math.PI / 180
sinh = Math.sinh
cosh = Math.cosh
tanh = Math.tanh
asinh = Math.asinh
acosh = Math.acosh
atanh = Math.atanh
floor = Math.floor
ceil = Math.ceil
trunc = Math.trunc
isnan = js_isNaN
inf = js_Infinity
nan = js_NaN
def modf(n):
sign = 1 if n >= 0 else -1
f, mod = divmod (abs(n), 1)
return mod * sign, f * sign
```
#### File: pyflakes/test/test_doctests.py
```python
import textwrap
from pyflakes import messages as m
from pyflakes.test.test_other import Test as TestOther
from pyflakes.test.test_imports import Test as TestImports
from pyflakes.test.test_undefined_names import Test as TestUndefinedNames
from pyflakes.test.harness import TestCase, skip
class _DoctestMixin(object):
withDoctest = True
def doctestify(self, input):
lines = []
for line in textwrap.dedent(input).splitlines():
if line.strip() == '':
pass
elif (line.startswith(' ') or
line.startswith('except:') or
line.startswith('except ') or
line.startswith('finally:') or
line.startswith('else:') or
line.startswith('elif ')):
line = "... %s" % line
else:
line = ">>> %s" % line
lines.append(line)
doctestificator = textwrap.dedent('''\
def doctest_something():
"""
%s
"""
''')
return doctestificator % "\n ".join(lines)
def flakes(self, input, *args, **kw):
return super(_DoctestMixin, self).flakes(self.doctestify(input), *args, **kw)
class Test(TestCase):
withDoctest = True
def test_importBeforeDoctest(self):
self.flakes("""
import foo
def doctest_stuff():
'''
>>> foo
'''
""")
@skip("todo")
def test_importBeforeAndInDoctest(self):
self.flakes('''
import foo
def doctest_stuff():
"""
>>> import foo
>>> foo
"""
foo
''', m.RedefinedWhileUnused)
def test_importInDoctestAndAfter(self):
self.flakes('''
def doctest_stuff():
"""
>>> import foo
>>> foo
"""
import foo
foo()
''')
def test_offsetInDoctests(self):
exc = self.flakes('''
def doctest_stuff():
"""
>>> x # line 5
"""
''', m.UndefinedName).messages[0]
self.assertEqual(exc.lineno, 5)
self.assertEqual(exc.col, 12)
def test_offsetInLambdasInDoctests(self):
exc = self.flakes('''
def doctest_stuff():
"""
>>> lambda: x # line 5
"""
''', m.UndefinedName).messages[0]
self.assertEqual(exc.lineno, 5)
self.assertEqual(exc.col, 20)
def test_offsetAfterDoctests(self):
exc = self.flakes('''
def doctest_stuff():
"""
>>> x = 5
"""
x
''', m.UndefinedName).messages[0]
self.assertEqual(exc.lineno, 8)
self.assertEqual(exc.col, 0)
def test_syntaxErrorInDoctest(self):
exceptions = self.flakes(
'''
def doctest_stuff():
"""
>>> from # line 4
>>> fortytwo = 42
>>> except Exception:
"""
''',
m.DoctestSyntaxError,
m.DoctestSyntaxError,
m.DoctestSyntaxError).messages
exc = exceptions[0]
self.assertEqual(exc.lineno, 4)
self.assertEqual(exc.col, 26)
exc = exceptions[1]
self.assertEqual(exc.lineno, 5)
self.assertEqual(exc.col, 16)
exc = exceptions[2]
self.assertEqual(exc.lineno, 6)
self.assertEqual(exc.col, 18)
def test_indentationErrorInDoctest(self):
exc = self.flakes('''
def doctest_stuff():
"""
>>> if True:
... pass
"""
''', m.DoctestSyntaxError).messages[0]
self.assertEqual(exc.lineno, 5)
self.assertEqual(exc.col, 16)
def test_offsetWithMultiLineArgs(self):
(exc1, exc2) = self.flakes(
'''
def doctest_stuff(arg1,
arg2,
arg3):
"""
>>> assert
>>> this
"""
''',
m.DoctestSyntaxError,
m.UndefinedName).messages
self.assertEqual(exc1.lineno, 6)
self.assertEqual(exc1.col, 19)
self.assertEqual(exc2.lineno, 7)
self.assertEqual(exc2.col, 12)
def test_doctestCanReferToFunction(self):
self.flakes("""
def foo():
'''
>>> foo
'''
""")
def test_doctestCanReferToClass(self):
self.flakes("""
class Foo():
'''
>>> Foo
'''
def bar(self):
'''
>>> Foo
'''
""")
def test_noOffsetSyntaxErrorInDoctest(self):
exceptions = self.flakes(
'''
def buildurl(base, *args, **kwargs):
"""
>>> buildurl('/blah.php', ('a', '&'), ('b', '=')
'/blah.php?a=%26&b=%3D'
>>> buildurl('/blah.php', a='&', 'b'='=')
'/blah.php?b=%3D&a=%26'
"""
pass
''',
m.DoctestSyntaxError,
m.DoctestSyntaxError).messages
exc = exceptions[0]
self.assertEqual(exc.lineno, 4)
exc = exceptions[1]
self.assertEqual(exc.lineno, 6)
def test_singleUnderscoreInDoctest(self):
self.flakes('''
def func():
"""A docstring
>>> func()
1
>>> _
1
"""
return 1
''')
class TestOther(_DoctestMixin, TestOther):
pass
class TestImports(_DoctestMixin, TestImports):
def test_futureImport(self):
"""XXX This test can't work in a doctest"""
def test_futureImportUsed(self):
"""XXX This test can't work in a doctest"""
class TestUndefinedNames(_DoctestMixin, TestUndefinedNames):
def test_doubleNestingReportsClosestName(self):
"""
Lines in doctest are a bit different so we can't use the test
from TestUndefinedNames
"""
exc = self.flakes('''
def a():
x = 1
def b():
x = 2 # line 7 in the file
def c():
x
x = 3
return x
return x
return x
''', m.UndefinedLocal).messages[0]
self.assertEqual(exc.message_args, ('x', 7))
```
#### File: transcrypt/type_check/__init__.py
```python
import sys
import os
import subprocess
import traceback
try:
from mypy import api
except:
print ('Could not find mypy')
from org.transcrypt import utils
def run (sourcePath):
utils.log (True, 'Performing static type validation on application: {}\n', sourcePath)
try:
stdOutReport, stdErrReport, exitStatus = api.run ([
sourcePath
])
except Exception as exception:
print (exception)
if stdOutReport:
utils.log (True, 'The following inconsistencies were found:\n')
for stdOutLine in stdOutReport.split ('\n'):
utils.log (True, '\t{}\n', stdOutLine)
if stdErrReport:
utils.log (True, 'Problems encountered during static type check\n')
for stdErrLine in stdErrReport.split ('\n'):
utils.log (True, '\t{}\n', stdErrLine)
utils.log (True, '\n')
``` |
{
"source": "jmcannon/alfred-tureng",
"score": 3
} |
#### File: jmcannon/alfred-tureng/lookup-turkish.py
```python
import sys
import urllib
from bs4 import BeautifulSoup
from workflow import Workflow3, web
BASE_URL = u'http://tureng.com/en/turkish-english/'
RESULTS_TABLE_CLASS = 'searchResultsTable'
ENGLISH_TERM_CLASS = 'en tm'
TURKISH_TERM_CLASS = 'tr ts'
CATEGORY_CLASS = 'hidden-xs' # Must match exactly
def main(wf):
query = wf.args[0]
raw_url = BASE_URL + query
url = urllib.quote(raw_url.encode('utf-8'), safe=":/")
r = web.get(url)
r.raise_for_status()
soup = BeautifulSoup(r.text, 'html.parser')
table = soup.find(class_=RESULTS_TABLE_CLASS)
if table:
categories = [x.get_text().strip() for x in table.find_all(lambda tag: tag.get('class') == [CATEGORY_CLASS])]
target_terms = [x.get_text().strip() for x in table.find_all(class_=[ENGLISH_TERM_CLASS, TURKISH_TERM_CLASS])]
category_index = 0
for term in target_terms[1::2]:
category = categories[category_index]
subtitle = category if category != 'General' else ''
wf.add_item(title=term, subtitle=subtitle, icon='icon.png', valid=True, arg=query)
category_index += 1
else:
wf.add_item(title='No Match', valid=True, arg=query)
wf.send_feedback()
if __name__ == '__main__':
wf = Workflow3()
sys.exit(wf.run(main))
``` |
{
"source": "jmcarbo/docker-compose",
"score": 2
} |
#### File: tests/unit/cli_test.py
```python
from __future__ import unicode_literals
from __future__ import absolute_import
import logging
import os
import tempfile
import shutil
from .. import unittest
import mock
from compose.cli import main
from compose.cli.main import TopLevelCommand
from compose.cli.errors import ComposeFileNotFound
from six import StringIO
class CLITestCase(unittest.TestCase):
def test_default_project_name(self):
cwd = os.getcwd()
try:
os.chdir('tests/fixtures/simple-composefile')
command = TopLevelCommand()
project_name = command.get_project_name(command.get_config_path())
self.assertEquals('simplecomposefile', project_name)
finally:
os.chdir(cwd)
def test_project_name_with_explicit_base_dir(self):
command = TopLevelCommand()
command.base_dir = 'tests/fixtures/simple-composefile'
project_name = command.get_project_name(command.get_config_path())
self.assertEquals('simplecomposefile', project_name)
def test_project_name_with_explicit_uppercase_base_dir(self):
command = TopLevelCommand()
command.base_dir = 'tests/fixtures/UpperCaseDir'
project_name = command.get_project_name(command.get_config_path())
self.assertEquals('uppercasedir', project_name)
def test_project_name_with_explicit_project_name(self):
command = TopLevelCommand()
name = 'explicit-project-name'
project_name = command.get_project_name(None, project_name=name)
self.assertEquals('explicitprojectname', project_name)
def test_project_name_from_environment_old_var(self):
command = TopLevelCommand()
name = 'namefromenv'
with mock.patch.dict(os.environ):
os.environ['FIG_PROJECT_NAME'] = name
project_name = command.get_project_name(None)
self.assertEquals(project_name, name)
def test_project_name_from_environment_new_var(self):
command = TopLevelCommand()
name = 'namefromenv'
with mock.patch.dict(os.environ):
os.environ['COMPOSE_PROJECT_NAME'] = name
project_name = command.get_project_name(None)
self.assertEquals(project_name, name)
def test_filename_check(self):
self.assertEqual('docker-compose.yml', get_config_filename_for_files([
'docker-compose.yml',
'docker-compose.yaml',
'fig.yml',
'fig.yaml',
]))
self.assertEqual('docker-compose.yaml', get_config_filename_for_files([
'docker-compose.yaml',
'fig.yml',
'fig.yaml',
]))
self.assertEqual('fig.yml', get_config_filename_for_files([
'fig.yml',
'fig.yaml',
]))
self.assertEqual('fig.yaml', get_config_filename_for_files([
'fig.yaml',
]))
self.assertRaises(ComposeFileNotFound, lambda: get_config_filename_for_files([]))
def test_get_project(self):
command = TopLevelCommand()
command.base_dir = 'tests/fixtures/longer-filename-composefile'
project = command.get_project(command.get_config_path())
self.assertEqual(project.name, 'longerfilenamecomposefile')
self.assertTrue(project.client)
self.assertTrue(project.services)
def test_help(self):
command = TopLevelCommand()
with self.assertRaises(SystemExit):
command.dispatch(['-h'], None)
def test_setup_logging(self):
main.setup_logging()
self.assertEqual(logging.getLogger().level, logging.DEBUG)
self.assertEqual(logging.getLogger('requests').propagate, False)
def get_config_filename_for_files(filenames):
project_dir = tempfile.mkdtemp()
try:
make_files(project_dir, filenames)
command = TopLevelCommand()
command.base_dir = project_dir
return os.path.basename(command.get_config_path())
finally:
shutil.rmtree(project_dir)
def make_files(dirname, filenames):
for fname in filenames:
with open(os.path.join(dirname, fname), 'w') as f:
f.write('')
``` |
{
"source": "jmcarcell/django-plotly-dash",
"score": 2
} |
#### File: django-plotly-dash/django_plotly_dash/dash_wrapper.py
```python
import inspect
import itertools
import json
import warnings
from typing import Dict, List, Callable
import dash
from dash import Dash, dependencies
from dash._utils import split_callback_id, inputs_to_dict
from django.urls import reverse
from django.utils.text import slugify
from flask import Flask
from plotly.utils import PlotlyJSONEncoder
from .app_name import app_name, main_view_label
from .middleware import EmbeddedHolder
from .util import serve_locally as serve_locally_setting
from .util import stateless_app_lookup_hook
from .util import static_asset_path
try:
from dataclasses import dataclass
@dataclass(frozen=True)
class CallbackContext:
inputs_list : List
inputs: Dict
states_list: List
states: Dict
outputs_list: List
outputs: Dict
triggered: List
except:
# Not got python 3.7 or dataclasses yet
class CallbackContext:
def __init__(self, **kwargs):
self._args = kwargs
@property
def inputs_list(self):
return self._args['inputs_list']
@property
def inputs(self):
return self._args['inputs']
@property
def states_list(self):
return self._args['states_list']
@property
def states(self):
return self._args['states']
@property
def outputs(self):
return self._args['outputs']
@property
def outputs_list(self):
return self._args['outputs_list']
@property
def triggered(self):
return self._args['triggered']
uid_counter = 0
usable_apps = {}
_stateless_app_lookup_func = None
def add_usable_app(name, app):
'Add app to local registry by name'
global usable_apps # pylint: disable=global-statement
usable_apps[name] = app
return name
def all_apps():
'Return a dictionary of all locally registered apps with the slug name as key'
return usable_apps
def get_local_stateless_list():
"""Return a list of all locally registered stateless apps
"""
return list(usable_apps)
def get_local_stateless_by_name(name):
'''
Locate a registered dash app by name, and return a DjangoDash instance encapsulating the app.
'''
sa = usable_apps.get(name, None)
if not sa:
global _stateless_app_lookup_func # pylint: disable=global-statement
if _stateless_app_lookup_func is None:
_stateless_app_lookup_func = stateless_app_lookup_hook()
sa = _stateless_app_lookup_func(name)
if not sa:
# TODO wrap this in raising a 404 if not found
raise KeyError("Unable to find stateless DjangoApp called %s"%name)
return sa
class Holder:
'Helper class for holding configuration options'
def __init__(self):
self.items = []
def append_css(self, stylesheet):
'Add extra css file name to component package'
self.items.append(stylesheet)
def append_script(self, script):
'Add extra script file name to component package'
self.items.append(script)
class DjangoDash:
'''
Wrapper class that provides Dash functionality in a form that can be served by Django
To use, construct an instance of DjangoDash() in place of a Dash() one.
'''
#pylint: disable=too-many-instance-attributes
def __init__(self, name=None, serve_locally=None,
add_bootstrap_links=False,
suppress_callback_exceptions=False,
external_stylesheets=None,
external_scripts=None,
**kwargs): # pylint: disable=unused-argument, too-many-arguments
# store arguments to pass them later to the WrappedDash instance
self.external_stylesheets = external_stylesheets or []
self.external_scripts = external_scripts or []
self._kwargs = kwargs
if kwargs:
warnings.warn("You are passing extra arguments {kwargs} that will be passed to Dash(...) "
"but may not be properly handled by django-plotly-dash.".format(kwargs=kwargs))
if name is None:
global uid_counter # pylint: disable=global-statement
uid_counter += 1
self._uid = "djdash_%i" % uid_counter
else:
self._uid = name
self.layout = None
self._callback_sets = []
self._clientside_callback_sets = []
self.css = Holder()
self.scripts = Holder()
add_usable_app(self._uid,
self)
if serve_locally is None:
self._serve_locally = serve_locally_setting()
else:
self._serve_locally = serve_locally
self._suppress_callback_exceptions = suppress_callback_exceptions
if add_bootstrap_links:
from bootstrap4.bootstrap import css_url
bootstrap_source = css_url()['href']
if self._serve_locally:
# Ensure package is loaded; if not present then pip install dpd-static-support
hard_coded_package_name = "dpd_static_support"
base_file_name = bootstrap_source.split('/')[-1]
self.css.append_script({'external_url': [bootstrap_source,],
'relative_package_path' : base_file_name,
'namespace': hard_coded_package_name,
})
else:
self.css.append_script({'external_url':[bootstrap_source,],})
# Remember some caller info for static files
caller_frame = inspect.stack()[1]
self.caller_module = inspect.getmodule(caller_frame[0])
try:
self.caller_module_location = inspect.getfile(self.caller_module)
except:
self.caller_module_location = None
self.assets_folder = "assets"
def get_asset_static_url(self, asset_path):
module_name = self.caller_module.__name__
return static_asset_path(module_name, asset_path)
def as_dash_instance(self, cache_id=None):
'''
Form a dash instance, for stateless use of this app
'''
return self.do_form_dash_instance(cache_id=cache_id)
def handle_current_state(self):
'Do nothing impl - only matters if state present'
pass
def update_current_state(self, wid, key, value):
'Do nothing impl - only matters if state present'
pass
def have_current_state_entry(self, wid, key):
'Do nothing impl - only matters if state present'
pass
def get_base_pathname(self, specific_identifier, cache_id):
'Base path name of this instance, taking into account any state or statelessness'
if not specific_identifier:
app_pathname = "%s:app-%s"% (app_name, main_view_label)
ndid = self._uid
else:
app_pathname = "%s:%s" % (app_name, main_view_label)
ndid = specific_identifier
kwargs = {'ident': ndid}
if cache_id:
kwargs['cache_id'] = cache_id
app_pathname = app_pathname + "--args"
full_url = reverse(app_pathname, kwargs=kwargs)
if full_url[-1] != '/':
full_url = full_url + '/'
return ndid, full_url
def do_form_dash_instance(self, replacements=None, specific_identifier=None, cache_id=None):
'Perform the act of constructing a Dash instance taking into account state'
ndid, base_pathname = self.get_base_pathname(specific_identifier, cache_id)
return self.form_dash_instance(replacements, ndid, base_pathname)
def form_dash_instance(self, replacements=None, ndid=None, base_pathname=None):
'Construct a Dash instance taking into account state'
if ndid is None:
ndid = self._uid
rd = WrappedDash(base_pathname=base_pathname,
replacements=replacements,
ndid=ndid,
serve_locally=self._serve_locally,
external_stylesheets=self.external_stylesheets,
external_scripts=self.external_scripts,
**self._kwargs)
rd.layout = self.layout
rd.config['suppress_callback_exceptions'] = self._suppress_callback_exceptions
for cb, func in self._callback_sets:
rd.callback(**cb)(func)
for cb in self._clientside_callback_sets:
rd.clientside_callback(**cb)
for s in self.css.items:
rd.css.append_css(s)
for s in self.scripts.items:
rd.scripts.append_script(s)
return rd
@staticmethod
def get_expanded_arguments(func, inputs, state):
"""Analyse a callback function signature to detect the expanded arguments to add when called.
It uses the inputs and the state information to identify what arguments are already coming from Dash.
It returns a list of the expanded parameters to inject (can be [] if nothing should be injected)
or None if all parameters should be injected."""
n_dash_parameters = len(inputs or []) + len(state or [])
parameter_types = {kind: [p.name for p in parameters] for kind, parameters in
itertools.groupby(inspect.signature(func).parameters.values(), lambda p: p.kind)}
if inspect.Parameter.VAR_KEYWORD in parameter_types:
# there is some **kwargs, inject all parameters
expanded = None
elif inspect.Parameter.VAR_POSITIONAL in parameter_types:
# there is a *args, assume all parameters afterwards (KEYWORD_ONLY) are to be injected
# some of these parameters may not be expanded arguments but that is ok
expanded = parameter_types.get(inspect.Parameter.KEYWORD_ONLY, [])
else:
# there is no **kwargs, filter argMap to take only the keyword arguments
expanded = parameter_types.get(inspect.Parameter.POSITIONAL_OR_KEYWORD, [])[
n_dash_parameters:] + parameter_types.get(inspect.Parameter.KEYWORD_ONLY, [])
return expanded
def callback(self, *_args, **_kwargs):
'''Form a callback function by wrapping, in the same way as the underlying Dash application would
but handling extra arguments provided by dpd.
It will inspect the signature of the function to ensure only relevant expanded arguments are passed to the callback.
If the function accepts a **kwargs => all expanded arguments are sent to the function in the kwargs.
If the function has a *args => expanded arguments matching parameters after the *args are injected.
Otherwise, take all arguments beyond the one provided by Dash (based on the Inputs/States provided).
'''
output, inputs, state, prevent_initial_call = dependencies.handle_callback_args(
_args, _kwargs
)
callback_set = {'output': output,
'inputs': inputs,
'state': state,
'prevent_initial_call': prevent_initial_call}
def wrap_func(func):
self._callback_sets.append((callback_set, func))
# add an expanded attribute to the function with the information to use in dispatch_with_args
# to inject properly only the expanded arguments the function can accept
# if .expanded is None => inject all
# if .expanded is a list => inject only
func.expanded = DjangoDash.get_expanded_arguments(func, inputs, state)
return func
return wrap_func
expanded_callback = callback
def clientside_callback(self, clientside_function, *_args, **_kwargs):
'Form a callback function by wrapping, in the same way as the underlying Dash application would'
output, inputs, state, prevent_initial_call = dependencies.handle_callback_args(
_args, _kwargs
)
callback_set = { 'clientside_function': clientside_function,
'output': output,
'inputs': inputs,
'state': state,
'prevent_initial_call': prevent_initial_call}
self._clientside_callback_sets.append(callback_set)
def get_asset_url(self, asset_name):
'''URL of an asset associated with this component
Use a placeholder and insert later
'''
return "assets/" + str(asset_name)
#return self.as_dash_instance().get_asset_url(asset_name)
class PseudoFlask(Flask):
'Dummy implementation of a Flask instance, providing stub functionality'
def __init__(self):
self.config = {}
self.endpoints = {}
self.name = "PseudoFlaskDummyName"
self.blueprints = {}
# pylint: disable=unused-argument, missing-docstring
def after_request(self, *args, **kwargs):
pass
def errorhandler(self, *args, **kwargs): # pylint: disable=no-self-use
def eh_func(f):
return args[0]
return eh_func
def add_url_rule(self, *args, **kwargs):
route = kwargs['endpoint']
self.endpoints[route] = kwargs
def before_first_request(self, *args, **kwargs):
pass
def run(self, *args, **kwargs):
pass
def register_blueprint(self, *args, **kwargs):
pass
def wid2str(wid):
"""Convert an python id (str or dict) into its Dash representation.
see https://github.com/plotly/dash/blob/c5ba38f0ae7b7f8c173bda10b4a8ddd035f1d867/dash-renderer/src/actions/dependencies.js#L114"""
if isinstance(wid, str):
return wid
data = ",".join(f"{json.dumps(k)}:{json.dumps(v)}" for k, v in sorted(wid.items()))
return f"{{{data}}}"
class WrappedDash(Dash):
'Wrapper around the Plotly Dash application instance'
# pylint: disable=too-many-arguments, too-many-instance-attributes
def __init__(self,
base_pathname=None, replacements=None, ndid=None, serve_locally=False,
**kwargs):
self._uid = ndid
self._flask_app = Flask(self._uid)
self._notflask = PseudoFlask()
self._base_pathname = base_pathname
kwargs['url_base_pathname'] = self._base_pathname
kwargs['server'] = self._notflask
super().__init__(__name__, **kwargs)
self.css.config.serve_locally = serve_locally
self.scripts.config.serve_locally = serve_locally
self._adjust_id = False
if replacements:
self._replacements = replacements
else:
self._replacements = dict()
self._use_dash_layout = len(self._replacements) < 1
self._return_embedded = False
def use_dash_dispatch(self):
"""Return True if underlying dash dispatching should be used.
This stub is present to allow older code to work. Following PR #304
(see https://github.com/GibbsConsulting/django-plotly-dash/pull/304/files for
details) this function is no longer needed and therefore should always
return False"""
return False
def use_dash_layout(self):
'''
Indicate if the underlying dash layout can be used.
If application state is in use, then the underlying dash layout functionality has to be
augmented with the state information and this function returns False
'''
return self._use_dash_layout
def augment_initial_layout(self, base_response, initial_arguments=None):
'Add application state to initial values'
if self.use_dash_layout() and not initial_arguments and False:
return base_response.data, base_response.mimetype
# Adjust the base layout response
baseDataInBytes = base_response.data
baseData = json.loads(baseDataInBytes.decode('utf-8'))
# Also add in any initial arguments
if not initial_arguments:
initial_arguments = {}
# Define overrides as self._replacements updated with initial_arguments
overrides = dict(self._replacements)
overrides.update(initial_arguments)
# Walk tree. If at any point we have an element whose id
# matches, then replace any named values at this level
reworked_data = self.walk_tree_and_replace(baseData, overrides)
response_data = json.dumps(reworked_data,
cls=PlotlyJSONEncoder)
return response_data, base_response.mimetype
def walk_tree_and_extract(self, data, target):
'Walk tree of properties and extract identifiers and associated values'
if isinstance(data, dict):
for key in ['children', 'props']:
self.walk_tree_and_extract(data.get(key, None), target)
ident = data.get('id', None)
if ident is not None:
ident = wid2str(ident)
idVals = target.get(ident, {})
for key, value in data.items():
if key not in ['props', 'options', 'children', 'id']:
idVals[key] = value
if idVals:
target[ident] = idVals
if isinstance(data, list):
for element in data:
self.walk_tree_and_extract(element, target)
def walk_tree_and_replace(self, data, overrides):
'''
Walk the tree. Rely on json decoding to insert instances of dict and list
ie we use a dna test for anatine, rather than our eyes and ears...
'''
if isinstance(data, dict):
response = {}
replacements = {}
# look for id entry
thisID = data.get('id', None)
if isinstance(thisID, dict):
# handle case of thisID being a dict (pattern) => linear search in overrides dict
thisID = wid2str(thisID)
for k, v in overrides.items():
if thisID == k:
replacements = v
break
elif thisID is not None:
# handle standard case of string thisID => key lookup
replacements = overrides.get(thisID, {})
# walk all keys and replace if needed
for k, v in data.items():
r = replacements.get(k, None)
if r is None:
r = self.walk_tree_and_replace(v, overrides)
response[k] = r
return response
if isinstance(data, list):
# process each entry in turn and return
return [self.walk_tree_and_replace(x, overrides) for x in data]
return data
def flask_app(self):
'Underlying flask application for stub implementation'
return self._flask_app
def base_url(self):
'Base url of this component'
return self._base_pathname
def app_context(self, *args, **kwargs):
'Extract application context from underlying flask application'
return self._flask_app.app_context(*args,
**kwargs)
def test_request_context(self, *args, **kwargs):
'Request context for testing from underluying flask application'
return self._flask_app.test_request_context(*args,
**kwargs)
def locate_endpoint_function(self, name=None):
'Locate endpoint function given name of view'
if name is not None:
ep = "%s_%s" %(self._base_pathname,
name)
else:
ep = self._base_pathname
return self._notflask.endpoints[ep]['view_func']
# pylint: disable=no-member
@Dash.layout.setter
def layout(self, value):
'Overloaded layout function to fix component names as needed'
if self._adjust_id:
self._fix_component_id(value)
return Dash.layout.fset(self, value)
def _fix_component_id(self, component):
'Fix name of component ad all of its children'
theID = getattr(component, "id", None)
if theID is not None:
setattr(component, "id", self._fix_id(theID))
try:
for c in component.children:
self._fix_component_id(c)
except: #pylint: disable=bare-except
pass
def _fix_id(self, name):
'Adjust identifier to include component name'
if not self._adjust_id:
return name
return "%s_-_%s" %(self._uid,
name)
def _fix_callback_item(self, item):
'Update component identifier'
item.component_id = self._fix_id(item.component_id)
return item
def callback(self, output, inputs, state, prevent_initial_call):
'Invoke callback, adjusting variable names as needed'
if isinstance(output, (list, tuple)):
fixed_outputs = [self._fix_callback_item(x) for x in output]
else:
fixed_outputs = self._fix_callback_item(output)
return super().callback(fixed_outputs,
[self._fix_callback_item(x) for x in inputs],
[self._fix_callback_item(x) for x in state],
prevent_initial_call=prevent_initial_call)
def clientside_callback(self, clientside_function, output, inputs, state, prevent_initial_call): # pylint: disable=dangerous-default-value
'Invoke callback, adjusting variable names as needed'
if isinstance(output, (list, tuple)):
fixed_outputs = [self._fix_callback_item(x) for x in output]
else:
fixed_outputs = self._fix_callback_item(output)
return super().clientside_callback(clientside_function,
fixed_outputs,
[self._fix_callback_item(x) for x in inputs],
[self._fix_callback_item(x) for x in state],
prevent_initial_call=prevent_initial_call)
#pylint: disable=too-many-locals
def dispatch_with_args(self, body, argMap):
'Perform callback dispatching, with enhanced arguments and recording of response'
inputs = body.get('inputs', [])
input_values = inputs_to_dict(inputs)
states = body.get('state', [])
output = body['output']
outputs_list = body.get('outputs') or split_callback_id(output)
changed_props = body.get('changedPropIds', [])
triggered_inputs = [{"prop_id": x, "value": input_values.get(x)} for x in changed_props]
callback_context_info = {
'inputs_list': inputs,
'inputs': input_values,
'states_list': states,
'states': inputs_to_dict(states),
'outputs_list': outputs_list,
'outputs': outputs_list,
'triggered': triggered_inputs,
}
callback_context = CallbackContext(**callback_context_info)
# Overload dash global variable
dash.callback_context = callback_context
# Add context to arg map, if extended callbacks in use
if len(argMap) > 0:
argMap['callback_context'] = callback_context
single_case = not(output.startswith('..') and output.endswith('..'))
if single_case:
# single Output (not in a list)
outputs = [output]
else:
# multiple outputs in a list (the list could contain a single item)
outputs = output[2:-2].split('...')
da = argMap.get('dash_app', None)
callback_info = self.callback_map[output]
args = []
for c in inputs + states:
if isinstance(c, list): # ALL, ALLSMALLER
v = [ci.get("value") for ci in c]
if da:
for ci, vi in zip(c, v):
da.update_current_state(ci['id'], ci['property'], vi)
else:
v = c.get("value")
if da:
da.update_current_state(c['id'], c['property'], v)
args.append(v)
# Dash 1.11 introduces a set of outputs
outputs_list = body.get('outputs') or split_callback_id(output)
argMap['outputs_list'] = outputs_list
# Special: intercept case of insufficient arguments
# This happens when a property has been updated with a pipe component
# TODO see if this can be attacked from the client end
if len(args) < len(callback_info['inputs']):
return 'EDGECASEEXIT'
callback = callback_info["callback"]
# smart injection of parameters if .expanded is defined
if callback.expanded is not None:
parameters_to_inject = {*callback.expanded, 'outputs_list'}
res = callback(*args, **{k: v for k, v in argMap.items() if k in parameters_to_inject})
else:
res = callback(*args, **argMap)
if da:
class LazyJson:
"""A class to allow delayed the evaluation of a dict (returned by `func`)
till the first get(...) is called on the dict."""
def __init__(self, func):
self._root_value = func
def get(self, item, default):
if isinstance(self._root_value, Callable):
self._root_value = self._root_value()
return self._root_value.get(item, default)
# wraps the json parsing of the response into LazyJson to avoid unnecessary parsing
root_value = LazyJson(lambda: json.loads(res).get('response', {}))
for output_item in outputs:
if isinstance(output_item, str):
output_id, output_property = output_item.split('.')
if da.have_current_state_entry(output_id, output_property):
value = root_value.get(output_id,{}).get(output_property, None)
da.update_current_state(output_id, output_property, value)
else:
# todo: implement saving of state for pattern matching ouputs
raise NotImplementedError("Updating state for dict keys (pattern matching) is not yet implemented")
return res
def slugified_id(self):
'Return the app id in a slug-friendly form'
pre_slugified_id = self._uid
return slugify(pre_slugified_id)
def extra_html_properties(self, prefix=None, postfix=None, template_type=None):
'''
Return extra html properties to allow individual apps to be styled separately.
The content returned from this function is injected unescaped into templates.
'''
prefix = prefix if prefix else "django-plotly-dash"
post_part = "-%s" % postfix if postfix else ""
template_type = template_type if template_type else "iframe"
slugified_id = self.slugified_id()
return "%(prefix)s %(prefix)s-%(template_type)s %(prefix)s-app-%(slugified_id)s%(post_part)s" % {'slugified_id':slugified_id,
'post_part':post_part,
'template_type':template_type,
'prefix':prefix,
}
def index(self, *args, **kwargs): # pylint: disable=unused-argument
scripts = self._generate_scripts_html()
css = self._generate_css_dist_html()
config = self._generate_config_html()
metas = self._generate_meta_html()
renderer = self._generate_renderer()
title = getattr(self, 'title', 'Dash')
if self._favicon:
import flask
favicon = '<link rel="icon" type="image/x-icon" href="{}">'.format(
flask.url_for('assets.static', filename=self._favicon))
else:
favicon = ''
_app_entry = '''
<div id="react-entry-point">
<div class="_dash-loading">
Loading...
</div>
</div>
'''
index = self.interpolate_index(
metas=metas, title=title, css=css, config=config,
scripts=scripts, app_entry=_app_entry, favicon=favicon,
renderer=renderer)
return index
def interpolate_index(self, **kwargs): #pylint: disable=arguments-differ
if not self._return_embedded:
resp = super().interpolate_index(**kwargs)
return resp
self._return_embedded.add_css(kwargs['css'])
self._return_embedded.add_config(kwargs['config'])
self._return_embedded.add_scripts(kwargs['scripts'])
return kwargs['app_entry']
def set_embedded(self, embedded_holder=None):
'Set a handler for embedded references prior to evaluating a view function'
self._return_embedded = embedded_holder if embedded_holder else EmbeddedHolder()
def exit_embedded(self):
'Exit the embedded section after processing a view'
self._return_embedded = False
```
#### File: django-plotly-dash/django_plotly_dash/tests.py
```python
import json
from unittest.mock import patch
import pytest
# pylint: disable=bare-except
from dash.dependencies import Input, State, Output
from django.urls import reverse
from django_plotly_dash import DjangoDash
from django_plotly_dash.dash_wrapper import get_local_stateless_list, get_local_stateless_by_name
from django_plotly_dash.models import DashApp, find_stateless_by_name
from django_plotly_dash.tests_dash_contract import fill_in_test_app, dash_contract_data
def test_dash_app():
'Test the import and formation of the dash app orm wrappers'
from django_plotly_dash.models import StatelessApp
stateless_a = StatelessApp(app_name="Some name")
assert stateless_a
assert stateless_a.app_name
assert str(stateless_a) == stateless_a.app_name
@pytest.mark.django_db
def test_dash_stateful_app_client_contract(client):
'Test the state management of a DashApp as well as the contract between the client and the Dash app'
from django_plotly_dash.models import StatelessApp
# create a DjangoDash, StatelessApp and DashApp
ddash = DjangoDash(name="DDash")
fill_in_test_app(ddash, write=False)
stateless_a = StatelessApp(app_name="DDash")
stateless_a.save()
stateful_a = DashApp(stateless_app=stateless_a,
instance_name="<NAME>",
slug="my-app", save_on_change=True)
stateful_a.save()
# check app can be found back
assert "DDash" in get_local_stateless_list()
assert get_local_stateless_by_name("DDash") == ddash
assert find_stateless_by_name("DDash") == ddash
# check the current_state is empty
assert stateful_a.current_state() == {}
# set the initial expected state
expected_state = {'inp1': {'n_clicks': 0, 'n_clicks_timestamp': 1611733453854},
'inp1b': {'n_clicks': 5, 'n_clicks_timestamp': 1611733454354},
'inp2': {'n_clicks': 7, 'n_clicks_timestamp': 1611733454554},
'out1-0': {'n_clicks': 1, 'n_clicks_timestamp': 1611733453954},
'out1-1': {'n_clicks': 2, 'n_clicks_timestamp': 1611733454054},
'out1-2': {'n_clicks': 3, 'n_clicks_timestamp': 1611733454154},
'out1-3': {'n_clicks': 4, 'n_clicks_timestamp': 1611733454254},
'out1b': {'href': 'http://www.example.com/null',
'n_clicks': 6,
'n_clicks_timestamp': 1611733454454},
'out2-0': {'n_clicks': 8, 'n_clicks_timestamp': 1611733454654},
'out3': {'n_clicks': 12, 'n_clicks_timestamp': 1611733455054},
'out4': {'n_clicks': 16, 'n_clicks_timestamp': 1611733455454},
'out5': {'n_clicks': 20, 'n_clicks_timestamp': 1611733455854},
'{"_id":"inp-0","_type":"btn3"}': {'n_clicks': 9,
'n_clicks_timestamp': 1611733454754},
'{"_id":"inp-0","_type":"btn4"}': {'n_clicks': 13,
'n_clicks_timestamp': 1611733455154},
'{"_id":"inp-0","_type":"btn5"}': {'n_clicks': 17,
'n_clicks_timestamp': 1611733455554},
'{"_id":"inp-1","_type":"btn3"}': {'n_clicks': 10,
'n_clicks_timestamp': 1611733454854},
'{"_id":"inp-1","_type":"btn4"}': {'n_clicks': 14,
'n_clicks_timestamp': 1611733455254},
'{"_id":"inp-1","_type":"btn5"}': {'n_clicks': 18,
'n_clicks_timestamp': 1611733455654},
'{"_id":"inp-2","_type":"btn3"}': {'n_clicks': 11,
'n_clicks_timestamp': 1611733454954},
'{"_id":"inp-2","_type":"btn4"}': {'n_clicks': 15,
'n_clicks_timestamp': 1611733455354},
'{"_id":"inp-2","_type":"btn5"}': {'n_clicks': 19,
'n_clicks_timestamp': 1611733455754}}
########## test state management of the app and conversion of components ids
# search for state values in dash layout
stateful_a.populate_values()
assert stateful_a.current_state() == expected_state
assert stateful_a.have_current_state_entry("inp1", "n_clicks")
assert stateful_a.have_current_state_entry({"_type": "btn3", "_id": "inp-0"}, "n_clicks_timestamp")
assert stateful_a.have_current_state_entry('{"_id":"inp-0","_type":"btn3"}', "n_clicks_timestamp")
assert not stateful_a.have_current_state_entry("checklist", "other-prop")
# update a non existent state => no effect on current_state
stateful_a.update_current_state("foo", "value", "random")
assert stateful_a.current_state() == expected_state
# update an existent state => update current_state
stateful_a.update_current_state('{"_id":"inp-2","_type":"btn5"}', "n_clicks", 100)
expected_state['{"_id":"inp-2","_type":"btn5"}'] = {'n_clicks': 100, 'n_clicks_timestamp': 1611733455754}
assert stateful_a.current_state() == expected_state
assert DashApp.objects.get(instance_name="Some name").current_state() == {}
stateful_a.handle_current_state()
assert DashApp.objects.get(instance_name="<NAME>").current_state() == expected_state
# check initial layout serve has the correct values injected
dash_instance = stateful_a.as_dash_instance()
resp = dash_instance.serve_layout()
# initialise layout with app state
layout, mimetype = dash_instance.augment_initial_layout(resp, {})
assert '"n_clicks": 100' in layout
# initialise layout with initial arguments
layout, mimetype = dash_instance.augment_initial_layout(resp, {
'{"_id":"inp-2","_type":"btn5"}': {"n_clicks": 200}})
assert '"n_clicks": 100' not in layout
assert '"n_clicks": 200' in layout
########### test contract between client and app by replaying interactions recorded in tests_dash_contract.json
# get update component route
url = reverse('the_django_plotly_dash:update-component', kwargs={'ident': 'my-app'})
# for all interactions in the tests_dash_contract.json
for scenario in json.load(dash_contract_data.open("r")):
body = scenario["body"]
response = client.post(url, json.dumps(body), content_type="application/json")
assert response.status_code == 200
response = json.loads(response.content)
# compare first item in response with first result
result = scenario["result"]
if isinstance(result, list):
result = result[0]
content = response["response"].popitem()[1].popitem()[1]
assert content == result
# handle state
stateful_a.handle_current_state()
# check final state has been changed accordingly
final_state = {'inp1': {'n_clicks': 1, 'n_clicks_timestamp': 1615103027288},
'inp1b': {'n_clicks': 5, 'n_clicks_timestamp': 1615103033482},
'inp2': {'n_clicks': 8, 'n_clicks_timestamp': 1615103036591},
'out1-0': {'n_clicks': 1, 'n_clicks_timestamp': 1611733453954},
'out1-1': {'n_clicks': 2, 'n_clicks_timestamp': 1611733454054},
'out1-2': {'n_clicks': 3, 'n_clicks_timestamp': 1611733454154},
'out1-3': {'n_clicks': 4, 'n_clicks_timestamp': 1611733454254},
'out1b': {'href': 'http://www.example.com/1615103033482',
'n_clicks': 6,
'n_clicks_timestamp': 1611733454454},
'out2-0': {'n_clicks': 8, 'n_clicks_timestamp': 1611733454654},
'out3': {'n_clicks': 12, 'n_clicks_timestamp': 1611733455054},
'out4': {'n_clicks': 16, 'n_clicks_timestamp': 1611733455454},
'out5': {'n_clicks': 20, 'n_clicks_timestamp': 1611733455854},
'{"_id":"inp-0","_type":"btn3"}': {'n_clicks': 10,
'n_clicks_timestamp': 1615103039030},
'{"_id":"inp-0","_type":"btn4"}': {'n_clicks': 14,
'n_clicks_timestamp': 1611733455154},
'{"_id":"inp-0","_type":"btn5"}': {'n_clicks': 18,
'n_clicks_timestamp': 1611733455554},
'{"_id":"inp-1","_type":"btn3"}': {'n_clicks': 11,
'n_clicks_timestamp': 1615103039496},
'{"_id":"inp-1","_type":"btn4"}': {'n_clicks': 15,
'n_clicks_timestamp': 1611733455254},
'{"_id":"inp-1","_type":"btn5"}': {'n_clicks': 19,
'n_clicks_timestamp': 1611733455654},
'{"_id":"inp-2","_type":"btn3"}': {'n_clicks': 12,
'n_clicks_timestamp': 1615103040528},
'{"_id":"inp-2","_type":"btn4"}': {'n_clicks': 15,
'n_clicks_timestamp': 1611733455354},
'{"_id":"inp-2","_type":"btn5"}': {'n_clicks': 20,
'n_clicks_timestamp': 1611733455754}}
assert DashApp.objects.get(instance_name="Some name").current_state() == final_state
def test_dash_callback_arguments():
'Test the flexibility of the callback arguments order (handling of inputs/outputs/states)'
# create a DjangoDash
ddash = DjangoDash(name="DashCallbackArguments")
# add a callback with the new flexible order of dependencies
@ddash.callback(
Output("one", "foo"),
Output("two", "foo"),
Input("one", "baz"),
Input("two", "baz"),
Input("three", "baz"),
State("one", "bil"),
)
def new():
pass
# add a callback with the old/classical flexible order of dependencies
@ddash.callback(
[Output("one", "foo"),
Output("two", "foo")],
[Input("one", "baz"),
Input("two", "baz"),
Input("three", "baz")],
[State("one", "bil")]
)
def old():
pass
assert ddash._callback_sets == [({'inputs': [Input("one", "baz"),
Input("two", "baz"),
Input("three", "baz"), ],
'output': [Output("one", "foo"),
Output("two", "foo")],
'prevent_initial_call': None,
'state': [State("one", "bil"), ]},
new),
({'inputs': [Input("one", "baz"),
Input("two", "baz"),
Input("three", "baz"), ],
'output': [Output("one", "foo"),
Output("two", "foo")],
'prevent_initial_call': None,
'state': [State("one", "bil"), ]},
old)
]
def test_util_error_cases(settings):
'Test handling of missing settings'
settings.PLOTLY_DASH = None
from django_plotly_dash.util import pipe_ws_endpoint_name, dpd_http_endpoint_root, http_endpoint, insert_demo_migrations
assert pipe_ws_endpoint_name() == 'dpd/ws/channel'
assert dpd_http_endpoint_root() == "dpd/views"
assert http_endpoint("fred") == '^dpd/views/fred/$'
assert not insert_demo_migrations()
del settings.PLOTLY_DASH
assert pipe_ws_endpoint_name() == 'dpd/ws/channel'
assert dpd_http_endpoint_root() == "dpd/views"
assert http_endpoint("fred") == '^dpd/views/fred/$'
assert not insert_demo_migrations()
def test_demo_routing():
'Test configuration options for the demo'
from django_plotly_dash.util import pipe_ws_endpoint_name, insert_demo_migrations
assert pipe_ws_endpoint_name() == 'ws/channel'
assert insert_demo_migrations()
def test_local_serving(settings):
'Test local serve settings'
from django_plotly_dash.util import serve_locally, static_asset_root, full_asset_path
assert serve_locally() == settings.DEBUG
assert static_asset_root() == 'dpd/assets'
assert full_asset_path('fred.jim', 'harry') == 'dpd/assets/fred/jim/harry'
@pytest.mark.django_db
def test_direct_access(client):
'Check direct use of a stateless application using demo test data'
from django.urls import reverse
from .app_name import main_view_label
for route_name in ['layout', 'dependencies', main_view_label]:
for prefix, arg_map in [('app-', {'ident':'SimpleExample'}),
('', {'ident':'simpleexample-1'}),]:
url = reverse('the_django_plotly_dash:%s%s' % (prefix, route_name), kwargs=arg_map)
response = client.get(url)
assert response.content
assert response.status_code == 200
for route_name in ['routes',]:
for prefix, arg_map in [('app-', {'ident':'SimpleExample'}),
('', {'ident':'simpleexample-1'}),]:
url = reverse('the_django_plotly_dash:%s%s' % (prefix, route_name), kwargs=arg_map)
did_fail = False
try:
response = client.get(url)
except:
did_fail = True
assert did_fail
@pytest.mark.django_db
def test_updating(client):
'Check updating of an app using demo test data'
from django.urls import reverse
route_name = 'update-component'
for prefix, arg_map in [('app-', {'ident':'SimpleExample'}),
('', {'ident':'simpleexample-1'}),]:
url = reverse('the_django_plotly_dash:%s%s' % (prefix, route_name), kwargs=arg_map)
response = client.post(url, json.dumps({'output': 'output-size.children',
'inputs':[{'id':'dropdown-color',
'property':'value',
'value':'blue'},
{'id':'dropdown-size',
'property':'value',
'value':'medium'},
]}), content_type="application/json")
assert response.content == b'{"response": {"output-size": {"children": "The chosen T-shirt is a medium blue one."}}, "multi": true}'
assert response.status_code == 200
@pytest.mark.django_db
def test_injection_app_access(client):
'Check direct use of a stateless application using demo test data'
from django.urls import reverse
from .app_name import main_view_label
for route_name in ['layout', 'dependencies', main_view_label]:
for prefix, arg_map in [('app-', {'ident':'dash_example_1'}),
#('', {'ident':'simpleexample-1'}),
]:
url = reverse('the_django_plotly_dash:%s%s' % (prefix, route_name), kwargs=arg_map)
response = client.get(url)
assert response.content
assert response.status_code == 200
for route_name in ['routes',]:
for prefix, arg_map in [('app-', {'ident':'dash_example_1'}),]:
url = reverse('the_django_plotly_dash:%s%s' % (prefix, route_name), kwargs=arg_map)
did_fail = False
try:
response = client.get(url)
except:
did_fail = True
assert did_fail
@pytest.mark.django_db
def test_injection_updating_multiple_callbacks(client):
'Check updating of an app using demo test data for multiple callbacks'
from django.urls import reverse
route_name = 'update-component'
for prefix, arg_map in [('app-', {'ident':'multiple_callbacks'}),]:
url = reverse('the_django_plotly_dash:%s%s' % (prefix, route_name), kwargs=arg_map)
# output is now a string of id and propery
response = client.post(url, json.dumps({'output':'..output-one.children...output-two.children...output-three.children..',
'inputs':[
{'id':'button',
'property':'n_clicks',
'value':'10'},
{'id':'dropdown-color',
'property':'value',
'value':'purple-ish yellow with a hint of greeny orange'},
]}), content_type="application/json")
assert response.status_code == 200
resp = json.loads(response.content.decode('utf-8'))
assert 'response' in resp
resp_detail = resp['response']
assert 'output-two' in resp_detail
assert 'children' in resp_detail['output-two']
assert resp_detail['output-two']['children'] == "Output 2: 10 purple-ish yellow with a hint of greeny orange []"
@pytest.mark.django_db
def test_flexible_expanded_callbacks(client):
'Check updating of an app using demo test data for flexible expanded callbacks'
from django.urls import reverse
route_name = 'update-component'
for prefix, arg_map in [('app-', {'ident':'flexible_expanded_callbacks'}),]:
url = reverse('the_django_plotly_dash:%s%s' % (prefix, route_name), kwargs=arg_map)
# output contains all arguments of the expanded_callback
response = client.post(url, json.dumps({'output':'output-one.children',
'inputs':[
{'id':'button',
'property':'n_clicks',
'value':'10'},
]}), content_type="application/json")
assert response.status_code == 200
resp = json.loads(response.content.decode('utf-8'))
for key in ["dash_app_id", "dash_app", "callback_context"]:
assert key in resp["response"]['output-one']['children']
# output contains all arguments of the expanded_callback
response = client.post(url, json.dumps({'output':'output-two.children',
'inputs':[
{'id':'button',
'property':'n_clicks',
'value':'10'},
]}), content_type="application/json")
assert response.status_code == 200
resp = json.loads(response.content.decode('utf-8'))
assert resp["response"]=={'output-two': {'children': 'ok'}}
# output contains all arguments of the expanded_callback
response = client.post(url, json.dumps({'output':'output-three.children',
'inputs':[
{'id':'button',
'property':'n_clicks',
'value':'10'},
]}), content_type="application/json")
assert response.status_code == 200
resp = json.loads(response.content.decode('utf-8'))
assert resp["response"]=={"output-three": {"children": "flexible_expanded_callbacks"}}
@pytest.mark.django_db
def test_injection_updating(client):
'Check updating of an app using demo test data'
from django.urls import reverse
route_name = 'update-component'
for prefix, arg_map in [('app-', {'ident':'dash_example_1'}),]:
url = reverse('the_django_plotly_dash:%s%s' % (prefix, route_name), kwargs=arg_map)
response = client.post(url, json.dumps({#'output':{'id':'test-output-div', 'property':'children'},
'output': "test-output-div.children",
'inputs':[{'id':'my-dropdown1',
'property':'value',
'value':'TestIt'},
]}), content_type="application/json")
rStart = b'{"response": {"test-output-div": {"children": [{"props": {"id": "line-area-graph2"'
assert response.content.startswith(rStart)
assert response.status_code == 200
# Single output callback, output=="component_id.component_prop"
response = client.post(url, json.dumps({'output':'test-output-div.children',
'inputs':[{'id':'my-dropdown1',
'property':'value',
'value':'TestIt'},
]}), content_type="application/json")
rStart = b'{"response": {"test-output-div": {"children": [{"props": {"id": "line-area-graph2"'
assert response.content.startswith(rStart)
assert response.status_code == 200
# Single output callback, fails if output=="..component_id.component_prop.."
with pytest.raises(KeyError, match="..test-output-div.children.."):
client.post(url, json.dumps({'output':'..test-output-div.children..',
'inputs':[{'id':'my-dropdown1',
'property':'value',
'value':'TestIt'},
]}), content_type="application/json")
# Multiple output callback, fails if output=="component_id.component_prop"
with pytest.raises(KeyError, match="test-output-div3.children"):
client.post(url, json.dumps({'output':'test-output-div3.children',
'inputs':[{'id':'my-dropdown1',
'property':'value',
'value':'TestIt'},
]}), content_type="application/json")
# Multiple output callback, output=="..component_id.component_prop.."
response = client.post(url, json.dumps({'output':'..test-output-div3.children..',
'inputs':[{'id':'my-dropdown1',
'property':'value',
'value':'TestIt'},
]}), content_type="application/json")
rStart = b'{"response": {"test-output-div3": {"children": [{"props": {"id": "line-area-graph2"'
assert response.content.startswith(rStart)
assert response.status_code == 200
with pytest.raises(KeyError, match="django_to_dash_context"):
client.post(url, json.dumps({'output': 'test-output-div2.children',
'inputs':[{'id':'my-dropdown2',
'property':'value',
'value':'TestIt'},
]}), content_type="application/json")
session = client.session
session['django_plotly_dash'] = {'django_to_dash_context': 'Test 789 content'}
session.save()
response = client.post(url, json.dumps({'output': 'test-output-div2.children',
'inputs':[{'id':'my-dropdown2',
'property':'value',
'value':'TestIt'},
]}), content_type="application/json")
rStart = b'{"response": {"test-output-div2": {"children": [{"props": {"children": ["You have '
assert response.content.startswith(rStart)
assert response.status_code == 200
assert response.content.find(b'Test 789 content') > 0
@pytest.mark.django_db
def test_argument_settings(settings, client):
'Test the setting that controls how initial arguments are propagated through to the dash app'
from django_plotly_dash.util import initial_argument_location, store_initial_arguments, get_initial_arguments
assert initial_argument_location()
settings.PLOTLY_DASH = {'cache_arguments': True}
assert initial_argument_location()
test_value = {"test":"first"}
cache_id = store_initial_arguments(None, test_value)
assert len(cache_id) > 10
fetched = get_initial_arguments(None, cache_id)
assert fetched == test_value
settings.PLOTLY_DASH = {'cache_arguments': False}
assert not initial_argument_location()
cache_id2 = store_initial_arguments(client, test_value)
assert len(cache_id2) > 10
assert cache_id != cache_id2
## For some reason, sessions are continually replaced, so lookup here doesnt work
#fetched2 = get_initial_arguments(client, cache_id2)
#assert fetched2 == test_value
assert store_initial_arguments(None, None) is None
assert get_initial_arguments(None, None) is None
assert store_initial_arguments(client, None) is None
assert get_initial_arguments(client, None) is None
def test_stateless_lookup_noop():
'Test no-op stateless lookup'
from django_plotly_dash.util import stateless_app_lookup_hook
lh_hook = stateless_app_lookup_hook()
assert lh_hook is not None
with pytest.raises(ImportError):
lh_hook("not an app")
def test_middleware_artifacts():
'Import and vaguely exercise middleware objects'
from django_plotly_dash.middleware import EmbeddedHolder, ContentCollector
eh = EmbeddedHolder()
eh.add_css("some_css")
eh.add_config("some_config")
eh.add_scripts("some_scripts")
assert eh.config == 'some_config'
cc = ContentCollector()
assert cc._encode("fred") == b'fred'
def test_finders():
'Import and vaguely exercise staticfiles finders'
from django_plotly_dash.finders import DashComponentFinder, DashAppDirectoryFinder, DashAssetFinder
dcf = DashComponentFinder()
dadf = DashAppDirectoryFinder()
daf = DashAssetFinder()
assert dcf is not None
assert dadf is not None
assert daf is not None
@pytest.mark.django_db
def test_app_loading(client):
from django_plotly_dash.models import check_stateless_loaded
from django.urls import reverse
# Function should run wthout raising errors
check_stateless_loaded()
assert True
url = reverse('the_django_plotly_dash:add_stateless_apps')
response = client.post(url)
# This view redirects to the main admin
assert response.status_code == 302
@pytest.mark.django_db
def test_external_scripts_stylesheets(client):
'Check external_stylesheets and external_scripts ends up in index'
from demo.plotly_apps import external_scripts_stylesheets
dash = external_scripts_stylesheets.as_dash_instance()
with patch.object(dash, "interpolate_index") as mock:
dash.index()
_, kwargs = mock.call_args
assert "https://codepen.io/chriddyp/pen/bWLwgP.css" in kwargs["css"]
assert "https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/css/bootstrap.min.css" in kwargs["css"]
assert "https://www.google-analytics.com/analytics.js" in kwargs["scripts"]
assert "https://cdn.polyfill.io/v2/polyfill.min.js" in kwargs["scripts"]
assert "https://cdnjs.cloudflare.com/ajax/libs/lodash.js/4.17.10/lodash.core.js" in kwargs["scripts"]
def test_callback_decorator():
inputs = [Input("one", "value"),
Input("two", "value"),
]
states = [Input("three", "value"),
Input("four", "value"),
]
def callback_standard(one, two, three, four):
return
assert DjangoDash.get_expanded_arguments(callback_standard, inputs, states) == []
def callback_standard(one, two, three, four, extra_1):
return
assert DjangoDash.get_expanded_arguments(callback_standard, inputs, states) == ['extra_1']
def callback_args(one, *args):
return
assert DjangoDash.get_expanded_arguments(callback_args, inputs, states) == []
def callback_args_extra(one, *args, extra_1):
return
assert DjangoDash.get_expanded_arguments(callback_args_extra, inputs, states) == ['extra_1' ]
def callback_args_extra_star(one, *, extra_1):
return
assert DjangoDash.get_expanded_arguments(callback_args_extra_star, inputs, states) == ['extra_1' ]
def callback_kwargs(one, two, three, four, extra_1, **kwargs):
return
assert DjangoDash.get_expanded_arguments(callback_kwargs, inputs, states) == None
def callback_kwargs(one, two, three, four, *, extra_1, **kwargs, ):
return
assert DjangoDash.get_expanded_arguments(callback_kwargs, inputs, states) == None
``` |
{
"source": "jmcardle/quiet-editor",
"score": 3
} |
#### File: quiet-editor/backend/files.py
```python
import base64
import os
from settings import Settings
files = {}
def list():
return list_directory(Settings.file_directory)
def list_trash():
return list_directory(Settings.trash_directory)
def list_directory(directory):
return [from_safe_filename(file_name) for file_name in os.listdir(directory)
if os.path.isfile(os.path.join(directory, file_name))]
def get(file_name):
# Copy exists in memory.
if file_name in files:
return files[file_name]
# Copy exists on disk. Load it in memory.
elif file_exists(file_name):
text = read_file(file_name)
files[file_name] = text
return text
# No copy exists.
else:
return ""
def set(file_name, text):
# File doesn't exist or isn't in database.
if not file_exists(file_name) or file_name not in files:
write_file(file_name, text)
# Just text is appended.
elif text.startswith(files[file_name]):
append_file(file_name, text[len(files[file_name]):])
# Just text is removed.
elif files[file_name].startswith(text):
truncate_file(file_name, len(text))
# Content added elsewhere.
else:
write_file(file_name, text)
# Update memory.
files[file_name] = text
def trash(file_name):
return move_file(file_name, Settings.file_directory, Settings.trash_directory)
def delete(file_name):
try:
os.remove(to_safe_filename(file_name, Settings.trash_directory))
return True
except OSError:
return False
def restore(file_name):
return move_file(file_name, Settings.trash_directory, Settings.file_directory)
def move_file(file_name, from_directory, to_directory):
# Try moving the file.
try:
os.rename(to_safe_filename(file_name, from_directory), to_safe_filename(file_name, to_directory))
return True
# Indicate failure if the file could not be moved.
except OSError:
return False
def write_file(file_name, content, mode="w"):
with open(to_safe_filename(file_name), mode) as file:
file.write(content)
def append_file(file_name, content):
write_file(file_name, content, "a")
def read_file(file_name):
with open(to_safe_filename(file_name), "r") as file:
return file.read()
def truncate_file(file_name, size):
with open(to_safe_filename(file_name), "w") as file:
file.truncate(size)
def file_exists(file_name):
return os.path.isfile(to_safe_filename(file_name))
def to_safe_filename(file_name, directory=Settings.file_directory):
return os.path.join(directory, base64.urlsafe_b64encode(file_name.encode()).decode())
def from_safe_filename(file_name):
return base64.urlsafe_b64decode(os.path.basename(file_name)).decode()
``` |
{
"source": "jmcarlile/website-factory",
"score": 2
} |
#### File: backend/app_proj/utility.py
```python
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
UTILITY
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
import django.db as DB
def ConvertFigureToJson(figure):
import json
from plotly.utils import PlotlyJSONEncoder
redata = json.loads(json.dumps(figure.data, cls=PlotlyJSONEncoder))
relayout = json.loads(json.dumps(figure.layout, cls=PlotlyJSONEncoder))
fig_json=json.dumps({'data': redata,'layout': relayout})
return fig_json
class BaseManager(DB.models.Manager):
def getOrNone(self, **kwargs):
try:
return self.get(**kwargs)
except DB.models.ObjectDoesNotExist:
return None
def GetTableCounts():
from django.apps import apps
customTables = []
for name, app in apps.app_configs.items():
if name in ['admin', 'auth', 'contenttypes', 'sessions']:
continue
modelLs = list(app.get_models())
for m in modelLs:
customTables.append({
'Module': name,
'Table': str(m).split('.')[-1].replace("'>", ""),
'Count': m.objects.count(),
})
return customTables
def InsertSingle(module, table, entryDx):
moduleObj = __import__(module)
folderObj = getattr(moduleObj, 'models')
classObj = getattr(folderObj, table)
try:
newModel = classObj(**entryDx)
newModel.save()
print('inserted')
except Exception as ex:
print(ex)
def InsertBulk(module, table, dataLs):
moduleObj = __import__(module)
folderObj = getattr(moduleObj, 'models')
classObj = getattr(folderObj, table)
for dx in dataLs:
for k, v in dx.items():
if str(v) in ['nan', 'NaT']:
dx[k] = None
try:
modelLs = [classObj(**d) for d in dataLs]
classObj.objects.bulk_create(modelLs, ignore_conflicts=True)
print('bulk inserted')
except Exception as ex:
print(ex)
def GetTableDictionary(module, table):
moduleObj = __import__(module)
folderObj = getattr(moduleObj, 'models')
classObj = getattr(folderObj, table)
selectLs = list(classObj.objects.values())
return selectLs
def GetRow(module, table, parameters):
moduleObj = __import__(module)
folderObj = getattr(moduleObj, 'models')
classObj = getattr(folderObj, table)
result = classObj.objects.getOrNone(**parameters)
resultDx = {}
if result: resultDx = result.__dict__
return resultDx
def DeleteTable(module, table):
moduleObj = __import__(module)
folderObj = getattr(moduleObj, 'models')
classObj = getattr(folderObj, table)
classObj.objects.all().delete()
print('table deleted')
```
#### File: business_module/logic/custom.py
```python
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
CUSTOM LOGIC
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
import pandas as PD
import app_proj.utility as UT
def GetThemeGroups():
setsLs = UT.GetTableDictionary('business_module', 'LegoSet')
setsDf = PD.DataFrame(setsLs)
themeLs = list(setsDf['ThemeGroup'].unique())
return themeLs
``` |
{
"source": "jmcarp/filteralchemy",
"score": 3
} |
#### File: filteralchemy/filteralchemy/operators.py
```python
class Operator(object):
lookup = None
label = None
multiple = False
def __call__(self, query, model, attr, value):
column = getattr(model, attr)
condition = getattr(column, self.lookup)(value)
return query.filter(condition)
class Equal(Operator):
lookup = '__eq__'
label = 'eq'
class NotEqual(Operator):
lookup = '__ne__'
label = 'ne'
class GreaterThan(Operator):
lookup = '__gt__'
label = 'gt'
class GreaterEqual(Operator):
lookup = '__ge__'
label = 'ge'
class LessThan(Operator):
lookup = '__lt__'
label = 'lt'
class LessEqual(Operator):
lookup = '__le__'
label = 'le'
class Like(Operator):
lookup = 'like'
label = 'like'
class ILike(Operator):
lookup = 'ilike'
label = 'ilike'
class In(Operator):
lookup = 'in_'
label = 'in'
multiple = True
``` |
{
"source": "jmcarp/magic-modules",
"score": 3
} |
#### File: .ci/magic-modules/extract_from_pr_description_test.py
```python
import unittest
import extract_from_pr_description
class TestExtraction(unittest.TestCase):
def setUp(self):
self.tree = extract_from_pr_description.PrDescriptionTree("""
A summary of changes goes here!
-----------------------------------------------------------------
# all
Foo
Bar
Baz
## terraform
Bar
## puppet
Baz
### puppet-dns
Qux
### puppet-compute
## chef
""")
def testEmpty(self):
self.assertEqual("Foo\nBar\nBaz", self.tree['chef'])
def testThreeDeepEmpty(self):
self.assertEqual("Baz", self.tree['puppet-compute'])
def testThreeDeep(self):
self.assertEqual("Qux", self.tree['puppet-dns'])
def testTwoDeep(self):
self.assertEqual("Bar", self.tree['terraform'])
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jmcarp/marshmallow",
"score": 2
} |
#### File: marshmallow/tests/test_legacy.py
```python
import pytest
from marshmallow import Serializer, Schema
from tests.base import UserSchema, User, UserMetaSchema
def test_serializer_alias():
assert Serializer is Schema
def test_serializing_through_contructor(user):
s = UserSchema(user)
assert s.data['name'] == user.name
def test_validate(recwarn):
valid = User("Joe", email="<EMAIL>")
invalid = User("John", email="john<EMAIL>")
assert UserSchema(valid).is_valid()
assert UserSchema(invalid).is_valid() is False
warning = recwarn.pop()
assert issubclass(warning.category, DeprecationWarning)
@pytest.mark.parametrize('SerializerClass',
[UserSchema, UserMetaSchema])
def test_validate_field(SerializerClass):
invalid = User("John", email="john<EMAIL>")
assert SerializerClass(invalid).is_valid(["name"]) is True
assert SerializerClass(invalid).is_valid(["email"]) is False
def test_validating_nonexistent_field_raises_error(user):
ser_user = UserSchema(user)
with pytest.raises(KeyError):
ser_user.is_valid(["foobar"])
``` |
{
"source": "jmcarp/python-readability",
"score": 2
} |
#### File: jmcarp/python-readability/setup.py
```python
from __future__ import print_function
import codecs
import os
import re
from setuptools import setup
import sys
lxml_requirement = "lxml"
if sys.platform == "darwin":
import platform
mac_ver = platform.mac_ver()[0]
mac_major, mac_minor = mac_ver.split('.')[:2]
if int(mac_major) == 10 and int(mac_minor) < 9:
print("Using lxml<2.4")
lxml_requirement = "lxml<2.4"
test_deps = [
# Test timeouts
"timeout_decorator",
]
extras = {
"test": test_deps,
}
# Adapted from https://github.com/pypa/pip/blob/master/setup.py
def find_version(*file_paths):
here = os.path.abspath(os.path.dirname(__file__))
# Intentionally *not* adding an encoding option to open, See:
# https://github.com/pypa/virtualenv/issues/201#issuecomment-3145690
with codecs.open(os.path.join(here, *file_paths), "r") as fp:
version_file = fp.read()
version_match = re.search(
r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M,
)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setup(
name="readability-lxml",
version=find_version("readability", "__init__.py"),
author="<NAME>",
author_email="<EMAIL>",
description="fast html to text parser (article readability tool) with python 3 support",
test_suite="tests.test_article_only",
long_description=open("README.rst").read(),
long_description_content_type='text/x-rst',
license="Apache License 2.0",
url="http://github.com/buriy/python-readability",
packages=["readability", "readability.compat"],
install_requires=["chardet", lxml_requirement, "cssselect"],
tests_require=test_deps,
extras_require=extras,
classifiers=[
"Environment :: Web Environment",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Topic :: Text Processing :: Indexing",
"Topic :: Utilities",
"Topic :: Internet",
"Topic :: Software Development :: Libraries :: Python Modules",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
)
``` |
{
"source": "jmcarp/requests-middleware",
"score": 3
} |
#### File: requests-middleware/requests_middleware/middleware.py
```python
from requests import Response
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.response import HTTPResponse
from requests.packages.urllib3.poolmanager import PoolManager
class MiddlewareHTTPAdapter(HTTPAdapter):
"""An HTTPAdapter onto which :class:`BaseMiddleware <BaseMiddleware>`
can be registered. Middleware methods are called in the order of
registration. Note: contrib that expose actions called during adapter
initialization must be passed to `__init__` rather than `register`, else
those actions will not take effect.
:param list middlewares: List of :class:`BaseMiddleware <BaseMiddleware>`
objects
"""
def __init__(self, middlewares=None, *args, **kwargs):
self.middlewares = middlewares or []
super(MiddlewareHTTPAdapter, self).__init__(*args, **kwargs)
def register(self, middleware):
"""Add a middleware to the middleware stack.
:param BaseMiddleware middleware: The middleware object
"""
self.middlewares.append(middleware)
def init_poolmanager(self, connections, maxsize, block=False):
"""Assemble keyword arguments to be passed to `PoolManager`.
Middlewares are called in reverse order, so if multiple middlewares
define conflicting arguments, the higher-priority middleware will take
precedence. Note: Arguments are passed directly to `PoolManager` and
not to the superclass `init_poolmanager` because the superclass method
does not currently accept **kwargs.
"""
kwargs = {}
for middleware in self.middlewares[::-1]:
value = middleware.before_init_poolmanager(
connections, maxsize, block
)
kwargs.update(value or {})
self._pool_connections = connections
self._pool_maxsize = maxsize
self._pool_block = block
self.poolmanager = PoolManager(
num_pools=connections, maxsize=maxsize, block=block,
**kwargs
)
def send(self, request, *args, **kwargs):
"""Send the request. If any middleware in the stack returns a `Response`
or `HTTPResponse` value from its `before_send` method, short-circuit;
else delegate to `HTTPAdapter::send`.
:param request: The :class:`PreparedRequest <PreparedRequest>`
being sent.
:returns: The :class:`Response <Response>` object.
"""
for middleware in self.middlewares:
value = middleware.before_send(request, **kwargs)
if isinstance(value, Response):
return value
if isinstance(value, HTTPResponse):
return self.build_response(request, value)
if value:
raise ValueError('Middleware "before_send" methods must return '
'`Response`, `HTTPResponse`, or `None`')
return super(MiddlewareHTTPAdapter, self).send(
request, *args, **kwargs
)
def build_response(self, req, resp):
"""Build the response. Call `HTTPAdapter::build_response`, then pass
the response object to the `after_build_response` method of each
middleware in the stack, in reverse order.
:param req: The :class:`PreparedRequest <PreparedRequest>` used to
generate the response.
:param resp: The urllib3 response object.
:returns: The :class:`Response <Response>` object.
"""
for middleware in reversed(self.middlewares):
req, resp = middleware.before_build_response(req, resp)
response = super(MiddlewareHTTPAdapter, self).build_response(req, resp)
for middleware in reversed(self.middlewares):
response = middleware.after_build_response(req, resp, response)
return response
class BaseMiddleware(object):
def before_init_poolmanager(self, connections, maxsize, block=False):
"""Called before `HTTPAdapter::init_poolmanager`. Optionally return a
dictionary of keyword arguments to `PoolManager`.
:returns: `dict` of keyword arguments or `None`
"""
pass
def before_send(self, request, *args, **kwargs):
"""Called before `HTTPAdapter::send`. If a truthy value is returned,
:class:`MiddlewareHTTPAdapter <MiddlewareHTTPAdapter>` will short-
circuit the remaining middlewares and `HTTPAdapter::send`, using the
returned value instead.
:param request: The `PreparedRequest` used to generate the response.
:returns: The `Response` object or `None`.
"""
pass
def before_build_response(self, req, resp):
"""Called before `HTTPAdapter::build_response`. Optionally modify the
returned `PreparedRequest` and `HTTPResponse` objects.
:param req: The `PreparedRequest` used to generate the response.
:param resp: The urllib3 response object.
:returns: Tuple of potentially modified (req, resp)
"""
return req, resp
def after_build_response(self, req, resp, response):
"""Called after `HTTPAdapter::build_response`. Optionally modify the
returned `Response` object.
:param req: The `PreparedRequest` used to generate the response.
:param resp: The urllib3 response object.
:param response: The `Response` object.
:returns: The potentially modified `Response` object.
"""
return response
```
#### File: requests-middleware/tests/test_source.py
```python
import pytest
import requests
from requests_middleware.middleware import MiddlewareHTTPAdapter
from requests_middleware.contrib import sourceware
@pytest.fixture
def session():
session = requests.Session()
source_middleware = sourceware.SourceMiddleware('localhost', 8080)
middlewares = [source_middleware]
adapter = MiddlewareHTTPAdapter(middlewares=middlewares)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
# Integration tests
@pytest.mark.httpretty
def test_source(session, page_fixture):
resp = session.get('http://test.com/page')
pool_kwargs = resp.connection.poolmanager.connection_pool_kw
assert pool_kwargs.get('source_address') == ('localhost', 8080)
```
#### File: requests-middleware/tests/test_throttle.py
```python
import pytest
import datetime
import requests
from dateutil.relativedelta import relativedelta
from requests_middleware.middleware import MiddlewareHTTPAdapter
from requests_middleware.contrib import throttleware
from . import utils
@pytest.fixture
def throttle_delay():
return throttleware.DelayThrottler(5)
@pytest.fixture
def throttle_per_hour():
return throttleware.RequestsPerHourThrottler(5)
def make_session_fixture(throttler):
@pytest.fixture
def fixture():
session = requests.Session()
adapter = MiddlewareHTTPAdapter()
throttle_middleware = throttleware.ThrottleMiddleware(throttler)
adapter.register(throttle_middleware)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
return fixture
throttle_delay_session = make_session_fixture(
throttleware.DelayThrottler(5)
)
throttle_per_hour_session = make_session_fixture(
throttleware.RequestsPerHourThrottler(5)
)
# Unit tests
def test_throttle_delay(throttle_delay, monkeypatch):
throttle_delay.check(None)
throttle_delay.store(None, None, None)
with pytest.raises(throttleware.ThrottleError):
throttle_delay.check(None)
now = datetime.datetime.utcnow() + relativedelta(seconds=6)
utils.mock_datetime(monkeypatch, utcnow=now)
throttle_delay.check(None)
def test_throttle_per_hour(throttle_per_hour, monkeypatch):
for _ in range(5):
throttle_per_hour.check(None)
throttle_per_hour.store(None, None, None)
with pytest.raises(throttleware.ThrottleError):
throttle_per_hour.check(None)
now = datetime.datetime.utcnow() + relativedelta(hours=2)
utils.mock_datetime(monkeypatch, utcnow=now)
throttle_per_hour.check(None)
# Integration tests
@pytest.mark.httpretty
def test_throttle_delay_integration(throttle_delay_session, page_fixture,
monkeypatch):
throttle_delay_session.get('http://test.com/page')
with pytest.raises(throttleware.ThrottleError):
throttle_delay_session.get('http://test.com/page')
now = datetime.datetime.utcnow() + relativedelta(seconds=6)
utils.mock_datetime(monkeypatch, utcnow=now)
throttle_delay_session.get('http://test.com/page')
@pytest.mark.httpretty
def test_throttle_per_hour_integration(throttle_per_hour_session, page_fixture,
monkeypatch):
for _ in range(5):
throttle_per_hour_session.get('http://test.com/page')
with pytest.raises(throttleware.ThrottleError):
throttle_per_hour_session.get('http://test.com/page')
now = datetime.datetime.utcnow() + relativedelta(hours=2)
utils.mock_datetime(monkeypatch, utcnow=now)
throttle_per_hour_session.get('http://test.com/page')
``` |
{
"source": "jmcarp/va-court-scraper",
"score": 2
} |
#### File: va-court-scraper/archived/court_search_web.py
```python
from courtreader import readers
from courtutils.database import Database
from courtutils.email import send_password_reset_email, verify_link
from courtutils.logger import get_logger
from courtutils.user import User
from flask import Flask, render_template, make_response, jsonify, redirect, request, url_for
from flask.ext.login import LoginManager, login_required, login_user, logout_user, current_user
import datetime
import os
app = Flask(__name__)
login_manager = LoginManager()
login_manager.init_app(app)
# configure logging
log = get_logger()
log.info('Web running')
@login_manager.user_loader
def load_user(user_id):
print 'loading user'
return User.get(user_id)
@app.route('/')
def index():
if current_user.is_authenticated:
return redirect(url_for('home'))
return render_template('index.html')
@app.route('/home')
@login_required
def home():
return render_template('home.html')
@app.route('/login')
def login():
if current_user.is_authenticated:
return redirect(url_for('home'))
return render_template('login.html')
@app.route('/login', methods=['POST'])
def do_login():
email = request.form['email']
password = request.form['password']
if not User.registered(email):
return 'Email address is not registered', 409
user = User.login(email, password)
if user is None:
return 'Wrong password', 409
login_user(user)
return url_for('home')
@app.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for('index'))
@app.route('/courtmap')
@login_required
def court_map():
circuit_courts = list(Database.get_circuit_courts())
district_courts = list(Database.get_district_courts())
return render_template('courtmap.html',
circuit_courts=circuit_courts,
district_courts=district_courts)
@app.route('/court_list/<court_type>/<court_name>/<miles>')
@login_required
def court_list(court_type, court_name, miles):
courts = list(Database.find_courts(court_type, court_name, int(miles)))
return jsonify(courts=courts)
@app.route('/password')
def password():
email = request.args.get('email')
expiration = request.args.get('expires')
token = request.args.get('token')
valid_request = verify_link('password', email, expiration, token)
return render_template('password.html',
email=email, expiration=expiration, token=token,
valid_request=valid_request)
@app.route('/password', methods=['POST'])
def set_password():
email = request.form['email']
expiration = request.form['expires']
token = request.form['token']
print email, expiration, token
valid_request = verify_link('password', email, expiration, token)
if not valid_request:
return 'This password reset link has expired', 409
if not User.registered(email):
return 'Email address is not registered', 409
password = request.form['password']
User.update_password(email, password)
return email
@app.route('/register', methods=['POST'])
def register():
email = request.form['email']
if User.registered(email):
return 'Email address already registered', 409
User.create(email)
return email
@app.route('/reset-password', methods=['POST'])
def reset_password():
email = request.form['email']
if not User.registered(email):
return 'Email address is not registered', 409
send_password_reset_email(request.form['email'])
return email
@app.route('/search')
def search():
return render_template('search.html')
@app.route('/search/<name>')
def lookup_search_name(name):
return render_template('search_results.html')
@app.route('/search/<name>', methods=['POST'])
def add_search_name_tasks(name):
Database.insert_tasks('circuit', name.upper())
Database.insert_tasks('district', name.upper())
return ''
if __name__ == "__main__":
# Bind to PORT if defined, otherwise default to 5000.
port = int(os.environ.get('PORT', 5000))
app.secret_key = 'doesnt-need-to-be-secret'
app.run(host='0.0.0.0', port=port, debug=True)
```
#### File: va-court-scraper/archived/data_collection_web.py
```python
import StringIO
import csv
import datetime
import os
import pymongo
from flask import Flask, Response, render_template, jsonify
app = Flask(__name__)
@app.route("/")
def index():
district_db_client = pymongo.MongoClient(os.environ['DISTRICT_DB'])
district_db = district_db_client.va_district_court_cases
courts = list(district_db.courts.find())
ten_minutes_ago = datetime.datetime.utcnow() + datetime.timedelta(minutes=-10)
one_day_ago = datetime.datetime.utcnow() + datetime.timedelta(days=-1)
scrapers = list(district_db.scrapers.find({'last_update': {'$gt': one_day_ago}}))
print courts
return render_template('data_collection.html', courts=courts, scrapers=scrapers, ten_minutes_ago=ten_minutes_ago)
@app.route("/status/<fips_code>")
def status(fips_code):
district_db_client = pymongo.MongoClient(os.environ['DISTRICT_DB'])
district_db = district_db_client.va_district_court_cases
court = {'fips_code': fips_code}
court['total_count'] = district_db.cases.count({
'FIPSCode': fips_code
})
court['collected_count'] = district_db.cases.count({
'FIPSCode': fips_code,
'date_collected': {'$exists': True}
})
return jsonify(**court)
@app.route('/export/<fips_code>/cases.csv')
def export_cases(fips_code):
district_db_client = pymongo.MongoClient(os.environ['DISTRICT_DB'])
district_db = district_db_client.va_district_court_cases
cases = district_db.cases.find({
'FIPSCode': fips_code,
'date_collected': {'$exists': True}
}, projection = {
'_id': False,
'error': False,
'date_collected': False,
'Hearings': False,
'Services': False,
})
fieldnames = [
'FIPSCode', 'CaseNumber', 'Locality', 'CourtName', 'FiledDate',
'Name', 'AKA1', 'AKA2', 'DOB', 'Gender', 'Race', 'Address',
'OffenseDate', 'ArrestDate', 'Class', 'Status', 'Complainant',
'CaseType', 'Charge', 'CodeSection',
'AmendedCaseType', 'AmendedCharge', 'AmendedCode',
'FinalDisposition', 'DefenseAttorney',
'SentenceTime', 'SentenceSuspendedTime',
'ProbationType', 'ProbationTime', 'ProbationStarts',
'Fine', 'Costs', 'FineCostsDue', 'FineCostsPaid', 'FineCostsPaidDate',
'OperatorLicenseRestrictionCodes', 'OperatorLicenseSuspensionTime',
'RestrictionStartDate', 'RestrictionEndDate', 'VASAP'
]
output = StringIO.StringIO()
writer = csv.DictWriter(output, fieldnames=fieldnames)
writer.writeheader()
writer.writerows(cases)
return Response(output.getvalue(), mimetype='text/csv')
if __name__ == "__main__":
# Bind to PORT if defined, otherwise default to 5000.
port = int(os.environ.get('PORT', 5000))
app.secret_key = 'doesnt-need-to-be-secret'
app.run(host='0.0.0.0', port=port, debug=True)
```
#### File: va-court-scraper/archived/examine_garys_data.py
```python
from collections import Counter
from courtreader import readers
from courtutils.database import Database
from courtutils.logger import get_logger
from datetime import datetime, timedelta
import boto3
import csv
import pymongo
import os
import sys
import time
import zipfile
fieldnames = [
'court_fips',
'Court',
'CaseNumber',
'Locality',
'Commencedby',
'Filed',
'Defendant',
'AKA',
'AKA2',
'DOB',
'Sex',
'Race',
'Address',
'OffenseDate',
'ArrestDate',
'Class',
'ConcludedBy',
'Charge',
'ChargeType',
'AmendedCharge',
'AmendedChargeType',
'CodeSection',
'AmendedCodeSection',
'DispositionCode',
'DispositionDate',
'LifeDeath',
'SentenceTime',
'SentenceSuspended',
'ConcurrentConsecutive',
'JailPenitentiary',
'ProbationTime',
'ProbationType',
'ProbationStarts',
'RestitutionAmount',
'RestitutionPaid',
'FineAmount',
'Costs',
'FinesCostPaid',
'TrafficFatality',
'DriverImprovementClinic',
'CourtDMVSurrender',
'OperatorLicenseSuspensionTime',
'DrivingRestrictions',
'RestrictionStartDate',
'RestrictionEndDate',
'VAAlcoholSafetyAction',
'ProgramType',
'Military'
]
excluded_fields = [
'_id', 'details', 'details_fetched', 'case_number',
'details_fetched_for_hearing_date', 'Hearings', 'defendant'
]
def get_db_connection():
return pymongo.MongoClient(os.environ['MONGO_DB'])['va_court_search']
db = get_db_connection()
courts = list(Database.get_circuit_courts())
courts_by_fips = {court['fips_code']:court for court in courts}
cases_by_court = {}
def write_cases_to_file(cases, filename, details, exclude_cases):
with open('./' + filename + '.csv', 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=sorted(fieldnames))
writer.writeheader()
for case in cases:
if case['case_number'] in exclude_cases:
continue
if 'error' in case['details']:
print 'Error getting case details', case['case_number']
continue
case['Court'] = courts_by_fips[case['court_fips']]['name']
if case['Court'] not in details:
details[case['Court']] = 0
details[case['Court']] += 1
for detail in case['details']:
new_key = detail.replace(' ', '')
case[new_key] = case['details'][detail]
for field in excluded_fields:
if field in case:
del case[field]
writer.writerow(case)
def export_data_by_year(year, court, exclude_cases):
start = datetime(year, 1, 1)
end = datetime(year + 1, 1, 1)
print 'From', start, 'to', end
cases = db.circuit_court_detailed_cases.find({
'details_fetched_for_hearing_date': {'$gte': start, '$lt': end},
'court_fips': court
})
filename = 'criminal_circuit_court_cases_' + court + '_' + str(year)
details = {}
write_cases_to_file(cases, filename, details, exclude_cases)
with open(sys.argv[1]) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
fips = row['CASE_NUM'][:3]
case_number = row['CASE_NUM'][3:-2] + '-' + row['CASE_NUM'][-2:]
if fips not in cases_by_court:
cases_by_court[fips] = {}
cases_by_court[fips][case_number] = row
for court in cases_by_court:
print ''
print '***', courts_by_fips[court]['name'], '***'
print 'Cases in Pilot File', len(cases_by_court[court])
all_cases = list(db.circuit_court_detailed_cases.find({
'court_fips': court
}, {
'case_number': True,
'details_fetched_for_hearing_date': True
}))
matched_cases = [case for case in all_cases if case['case_number'] in cases_by_court[court]]
print 'Cases in db and Pilot file', len(matched_cases)
all_case_numbers_in_db = [case['case_number'] for case in all_cases]
cases_not_in_db = list(set(cases_by_court[court].keys()) - set(all_case_numbers_in_db))
print 'Cases in Pilot file but not db', len(cases_not_in_db)
dates = [case['details_fetched_for_hearing_date'].year for case in matched_cases]
print 'Last hearing date for cases in VP file', Counter(dates)
cases_2012 = [case for case in all_cases if case['details_fetched_for_hearing_date'].year == 2012]
print '2012 cases in db', len(cases_2012)
unmatched_cases = [case for case in cases_2012 if case['case_number'] not in cases_by_court[court]]
print '2012 cases in db but not Pilot file', len(unmatched_cases)
export_data_by_year(2012, court, cases_by_court[court].keys())
```
#### File: va-court-scraper/courtreader/districtcourtparser.py
```python
import re
from datetime import datetime
def handle_parse_exception(soup):
print '\nException parsing HTML.', \
'Probably contained something unexpected.', \
'Check unexpected_output.html'
with open('unexpected_output.html', 'wb') as output:
output.write(soup.prettify().encode('UTF-8'))
def parse_court_names(soup):
try:
# Load list of courts and fips codes
fips = [tag['value'] for tag in soup.find_all('input',
{'name':'courtFips'})]
names = [tag['value'] for tag in soup.find_all('input',
{'name':'courtName'})]
court_names = {}
for f, c in zip(fips, names):
court_names[f] = c
return court_names
except:
handle_parse_exception(soup)
raise
def parse_name_search(soup):
try:
no_results = re.compile(r'No results found for the search criteria')
if soup.find('td', text=no_results) is not None:
return []
cases = []
rows = soup.find('table', {'class':'tableborder'}).find_all('tr')
for row in rows:
cells = row.find_all('td')
if cells[0]['class'][0] == 'gridheader':
continue
case_number = cells[1].a.text.strip()
defendant = cells[2].string.strip()
charge = cells[5].string.strip()
cases.append({
'case_number': case_number,
'defendant': defendant,
'charge': charge
})
return cases
except:
handle_parse_exception(soup)
raise
def next_names_button_found(soup):
try:
return soup.find('input', {'title': 'Next'}) is not None
except:
handle_parse_exception(soup)
raise
def parse_hearing_date_search(soup, case_type):
try:
no_results = re.compile(r'No results found for the search criteria')
if soup.find('td', text=no_results) is not None:
return []
cases = []
rows = soup.find('table', {'class':'tableborder'}).find_all('tr')
for row in rows:
cells = row.find_all('td')
if cells[0]['class'][0] == 'gridheader':
continue
details_url = cells[1].a['href']
case_number = list(cells[1].a.stripped_strings)[0]
defendant_cell_content = list(cells[2].stripped_strings)
defendant = defendant_cell_content[0] if len(defendant_cell_content) > 0 else ''
if case_type == 'civil':
plaintiff_cell_content = list(cells[3].stripped_strings)
plaintiff = plaintiff_cell_content[0] if len(plaintiff_cell_content) > 0 else ''
civil_case_type_content = list(cells[4].stripped_strings)
civil_case_type = civil_case_type_content[0] if len(civil_case_type_content) > 0 else ''
hearing_time_content = list(cells[5].stripped_strings)
hearing_time = hearing_time_content[0] if len(hearing_time_content) > 0 else ''
cases.append({
'case_number': case_number,
'details_url': details_url,
'defendant': defendant,
'plaintiff': plaintiff,
'civil_case_type': civil_case_type,
'hearing_time': hearing_time
})
else:
status_cell_content = list(cells[6].stripped_strings)
status = status_cell_content[0] if len(status_cell_content) > 0 else ''
cases.append({
'case_number': case_number,
'details_url': details_url,
'defendant': defendant,
'status': status
})
return cases
except:
handle_parse_exception(soup)
raise
def next_button_found(soup):
try:
return soup.find('input', {'name': 'caseInfoScrollForward'}) is not None
except:
handle_parse_exception(soup)
raise
DATES = [
'FiledDate',
'DOB',
'OffenseDate',
'ArrestDate',
'RestrictionEffectiveDate',
'RestrictionEndDate',
'FineCostsDue',
'FineCostsPaidDate',
'WritIssuedDate',
'DateSatisfactionFiled',
'AnswerDate',
'AppealDate',
'DateOrdered',
'DateDue',
'DateReceived',
'DateIssued',
'DateReturned'
]
TIME_SPANS = [
'SentenceTime',
'SentenceSuspendedTime',
'ProbationTime',
'OperatorLicenseSuspensionTime'
]
MONETARY = [
'Fine',
'Costs',
'AttorneyFees',
'PrincipalAmount',
'OtherAmount'
]
BOOL = [
'FineCostsPaid',
'VASAP',
'HomesteadExemptionWaived',
'IsJudgmentSatisfied'
]
def parse_case_details(soup, case_type):
case_details = {}
try:
#case_details['CourtName'] = soup.find(id='headerCourtName') \
# .string.strip()
# Parse grids
for label_cell in soup.find_all(class_=re.compile('labelgrid')):
value_cell = label_cell.next_sibling
while value_cell.name != 'td':
value_cell = value_cell.next_sibling
label = get_string_from_cell(label_cell, True)
value = get_string_from_cell(value_cell)
if value != '':
case_details[label] = value
# Parse tables
if case_type == 'civil':
# the table names really are backwards here
case_details['Plaintiffs'] = parse_table(soup, 'toggleDef')
case_details['Defendants'] = parse_table(soup, 'togglePlaintiff')
case_details['Reports'] = parse_table(soup, 'toggleReports')
case_details['Hearings'] = parse_table(soup, 'toggleHearing')
case_details['Services'] = parse_table(soup, 'toggleServices')
if 'CaseNumber' not in case_details:
raise ValueError('Missing Case Number')
if 'DOB' in case_details:
case_details['DOB'] = case_details['DOB'].replace('****', '1004')
if 'FinalDisposition' in case_details:
val = case_details['FinalDisposition']
if val.endswith('-'):
case_details['FinalDisposition'] = val[:-1].strip()
if 'NumberofChecksReceived' in case_details:
case_details['NumberofChecksReceived'] = int(case_details['NumberofChecksReceived'])
if 'FineCostsDue' in case_details:
case_details['FineCostsPastDue'] = 'PAST DUE' in case_details['FineCostsDue']
for key in DATES:
if key in case_details:
case_details[key] = case_details[key].replace('PAST DUE', '')
case_details[key] = datetime.strptime(case_details[key], '%m/%d/%Y')
for key in TIME_SPANS:
if key in case_details:
case_details[key] = simplify_time_str_to_days(case_details[key])
for key in MONETARY:
if key in case_details:
try:
case_details[key] = float(case_details[key]
.replace('$', '')
.replace(',', '')
.split(' ')[0])
except ValueError:
case_details[key] = -1.0
for key in BOOL:
if key in case_details:
case_details[key] = False if case_details[key].upper() == 'NO' else True
except:
handle_parse_exception(soup)
raise
return case_details
def get_string_from_cell(cell, is_label=False):
values = list(cell.stripped_strings)
if len(values) < 1:
return ''
value = values[0].encode('ascii', 'ignore') \
.replace('\t', '') \
.replace('\r', '') \
.replace('\n', '') \
.replace('\0', '') \
.strip()
if is_label:
value = value.replace(':', '') \
.replace('/', '') \
.replace(' ', '')
return value
def parse_table(soup, table_id):
table_contents = []
table_section = soup.find(id=table_id)
table_headers = [s.replace(' ', '').replace('/', '') for s in
table_section.find(class_='gridheader').stripped_strings]
for row in table_section.find_all(class_='gridrow'):
table_contents.append(parse_table_row(row, table_headers))
for row in table_section.find_all(class_='gridalternaterow'):
table_contents.append(parse_table_row(row, table_headers))
return table_contents
NO_ATTORNEY = [
'NONE',
'NOT EMPLOYED'
]
def parse_table_row(row, table_headers):
data_dict = {}
data_list = zip(
table_headers,
[cell.string.replace('\0', '').strip()
if cell.string is not None else ''
for cell in row.find_all('td')]
)
for item in data_list:
if item[1] == '':
continue
data_dict[item[0]] = item[1]
if item[0] in DATES:
data_dict[item[0]] = datetime.strptime(item[1], '%m/%d/%Y')
if 'Time' in data_dict:
full_dt = '{} {}'.format(data_dict['Date'], data_dict['Time'])
data_dict['Date'] = datetime.strptime(full_dt, '%m/%d/%Y %I:%M %p')
del data_dict['Time']
if 'Attorney' in data_dict and data_dict['Attorney'].upper() in NO_ATTORNEY:
del data_dict['Attorney']
return data_dict
def simplify_time_str_to_days(time_string):
time_string = time_string.replace(' Year(s)', 'Years ') \
.replace(' Month(s)', 'Months ') \
.replace(' Day(s)', 'Days ')
days = 0
string_parts = time_string.split(' ')
for string_part in string_parts:
if 'Years' in string_part:
days += int(string_part.replace('Years', '')) * 365
elif string_part == '12Months':
days += 365
elif 'Months' in string_part:
days += int(string_part.replace('Months', '')) * 30
elif 'Days' in string_part:
days += int(string_part.replace('Days', ''))
elif 'Hours' in string_part:
hours = int(string_part.replace('Hours', ''))
if hours > 0:
days += 1
return days
```
#### File: courtutils/databases/mongo.py
```python
import pymongo
import os
class MongoDatabase():
def __init__(self, name, court_type):
self.client = pymongo.MongoClient(os.environ['MONGO_DB'])[name]
self.court_type = court_type
def add_court(self, name, fips, location):
self.client[self.court_type + '_courts'].insert_one({
'name': name,
'fips_code': fips,
'location': {'type': 'Point', 'coordinates': [
location.longitude, location.latitude
]}
})
def add_court_location_index(self):
self.client[self.court_type + '_courts'].create_index( \
[('location', pymongo.GEOSPHERE)], background=True)
def drop_courts(self):
self.client[self.court_type + '_courts'].drop()
def get_courts(self):
return self.client[self.court_type + '_courts'].find(None, {'_id':0})
def add_date_tasks(self, tasks):
self.client[self.court_type + '_court_date_tasks'].insert_many(tasks)
def add_date_task(self, task):
self.client[self.court_type + '_court_date_tasks'].insert_one(task)
def get_and_delete_date_task(self):
return self.client[self.court_type + '_court_date_tasks'].find_one_and_delete({})
def add_date_search(self, search):
self.client[self.court_type + '_court_dates_searched'].insert_one(search)
def get_date_search(self, search):
return self.client[self.court_type + '_court_dates_searched'].find_one(search)
def get_more_recent_case_details(self, case, case_type, date):
return self.client[self.court_type + '_court_detailed_cases'].find_one({
'court_fips': case['court_fips'],
'case_number': case['case_number'],
'details_fetched_for_hearing_date': {'$gte': date}
})
def replace_case_details(self, case, case_type):
self.client[self.court_type + '_court_detailed_cases'].find_one_and_replace({
'court_fips': case['court_fips'],
'case_number': case['case_number']
}, case, upsert=True)
def get_cases_by_hearing_date(self, start, end):
return self.client[self.court_type + '_court_detailed_cases'].find({
'details_fetched_for_hearing_date': {'$gte': start, '$lt': end}
})
```
#### File: va-court-scraper/courtutils/email.py
```python
from datetime import timedelta, datetime
import hashlib
import os
import sendwithus
import urllib
def unix_time_millis(dt):
epoch = datetime.utcfromtimestamp(0)
return int((dt - epoch).total_seconds())
def generate_token(data):
hash = hashlib.sha256()
hash.update(os.environ['EMAIL_TOKEN_SALT'])
hash.update(data)
return hash.hexdigest()
def generate_uri(route, email_address, expiration):
uri = route
uri += '?email=' + email_address
uri += '&expires=' + expiration
return uri
def create_link(email_address, route):
expires = datetime.utcnow() + timedelta(days=1)
uri = generate_uri(route,
urllib.quote(email_address),
str(unix_time_millis(expires)))
uri += '&token=' + generate_token(uri)
return uri
def verify_link(route, email_address, expiration, token):
if datetime.fromtimestamp(float(expiration)) < datetime.utcnow():
return False
uri = generate_uri(route,
urllib.quote(email_address),
expiration)
return token == generate_token(uri)
def send_welcome_email(email_address):
api_key = os.environ['SEND_WITH_US']
set_password_link = create_link(email_address, 'password')
swu = sendwithus.api(api_key)
swu.send(
email_id='tem_58MQPDcuQvGKoXG3aVp4Zb',
recipient={'address': email_address},
email_data={'setPasswordLink': set_password_link})
def send_password_reset_email(email_address):
api_key = os.environ['SEND_WITH_US']
set_password_link = create_link(email_address, 'password')
swu = sendwithus.api(api_key)
swu.send(
email_id='tem_shSnhmqCSMAwdLbPhuwY4U',
recipient={'address': email_address},
email_data={'setPasswordLink': set_password_link})
``` |
{
"source": "jmcarp/webargs",
"score": 3
} |
#### File: webargs/examples/bottle_example.py
```python
import datetime as dt
import json
from dateutil import parser
from bottle import route, run, error, response
from webargs import Arg, ValidationError
from webargs.bottleparser import use_args, use_kwargs
hello_args = {
'name': Arg(str, default='Friend')
}
@route('/', method='GET')
@use_args(hello_args)
def index(args):
"""A welcome page.
"""
return {'message': 'Welcome, {}!'.format(args['name'])}
add_args = {
'x': Arg(float, required=True),
'y': Arg(float, required=True),
}
@route('/add', method='POST')
@use_kwargs(add_args)
def add(x, y):
"""An addition endpoint."""
return {'result': x + y}
def string_to_datetime(val):
return parser.parse(val)
def validate_unit(val):
if val not in ['minutes', 'days']:
raise ValidationError("Unit must be either 'minutes' or 'days'.")
dateadd_args = {
'value': Arg(default=dt.datetime.utcnow, use=string_to_datetime),
'addend': Arg(int, required=True, validate=lambda val: val >= 0),
'unit': Arg(str, validate=validate_unit)
}
@route('/dateadd', method='POST')
@use_kwargs(dateadd_args)
def dateadd(value, addend, unit):
"""A datetime adder endpoint."""
if unit == 'minutes':
delta = dt.timedelta(minutes=addend)
else:
delta = dt.timedelta(days=addend)
result = value + delta
return {'result': result.isoformat()}
# Return validation errors as JSON
@error(400)
def error400(err):
response.content_type = 'application/json'
return json.dumps({'message': str(err.body)})
if __name__ == '__main__':
run(port=5001, reloader=True, debug=True)
```
#### File: webargs/examples/tornado_example.py
```python
import datetime as dt
from dateutil import parser
import tornado.ioloop
from tornado.web import RequestHandler
from webargs import Arg, ValidationError
from webargs.tornadoparser import use_args, use_kwargs
class BaseRequestHandler(RequestHandler):
def write_error(self, status_code, **kwargs):
"""Write errors as JSON."""
self.set_header('Content-Type', 'application/json')
if 'exc_info' in kwargs:
etype, value, traceback = kwargs['exc_info']
msg = value.log_message or str(value)
self.write({'message': msg})
self.finish()
class HelloHandler(BaseRequestHandler):
"""A welcome page."""
hello_args = {
'name': Arg(str, default='Friend')
}
@use_args(hello_args)
def get(self, args):
response = {'message': 'Welcome, {}!'.format(args['name'])}
self.write(response)
class AdderHandler(BaseRequestHandler):
"""An addition endpoint."""
add_args = {
'x': Arg(float, required=True),
'y': Arg(float, required=True),
}
@use_kwargs(add_args)
def post(self, x, y):
self.write({'result': x + y})
def string_to_datetime(val):
return parser.parse(val)
def validate_unit(val):
if val not in ['minutes', 'days']:
raise ValidationError("Unit must be either 'minutes' or 'days'.")
class DateAddHandler(BaseRequestHandler):
"""A datetime adder endpoint."""
dateadd_args = {
'value': Arg(default=dt.datetime.utcnow, use=string_to_datetime),
'addend': Arg(int, required=True, validate=lambda val: val >= 0),
'unit': Arg(str, validate=validate_unit)
}
@use_kwargs(dateadd_args)
def post(self, value, addend, unit):
"""A datetime adder endpoint."""
if unit == 'minutes':
delta = dt.timedelta(minutes=addend)
else:
delta = dt.timedelta(days=addend)
result = value + delta
self.write({'result': result.isoformat()})
if __name__ == '__main__':
app = tornado.web.Application([
(r'/', HelloHandler),
(r'/add', AdderHandler),
(r'/dateadd', DateAddHandler),
], debug=True)
app.listen(5001)
tornado.ioloop.IOLoop.instance().start()
```
#### File: webargs/webargs/flaskparser.py
```python
import logging
from flask import request, abort as flask_abort
from werkzeug.exceptions import HTTPException
from webargs import core
from webargs.core import text_type
logger = logging.getLogger(__name__)
def abort(http_status_code, **kwargs):
"""Raise a HTTPException for the given http_status_code. Attach any keyword
arguments to the exception for later processing.
From Flask-Restful. See NOTICE file for license information.
"""
try:
flask_abort(http_status_code)
except HTTPException as err:
if len(kwargs):
err.data = kwargs
raise err
class FlaskParser(core.Parser):
"""Flask request argument parser."""
def parse_json(self, req, name, arg):
"""Pull a json value from the request."""
# Fail silently so that the webargs parser can handle the error
json_data = req.get_json(silent=True)
if json_data:
return core.get_value(json_data, name, arg.multiple)
else:
return core.Missing
def parse_querystring(self, req, name, arg):
"""Pull a querystring value from the request."""
return core.get_value(req.args, name, arg.multiple)
def parse_form(self, req, name, arg):
"""Pull a form value from the request."""
try:
return core.get_value(req.form, name, arg.multiple)
except AttributeError:
pass
return core.Missing
def parse_headers(self, req, name, arg):
"""Pull a value from the header data."""
return core.get_value(req.headers, name, arg.multiple)
def parse_cookies(self, req, name, arg):
"""Pull a value from the cookiejar."""
return core.get_value(req.cookies, name, arg.multiple)
def parse_files(self, req, name, arg):
"""Pull a file from the request."""
return core.get_value(req.files, name, arg.multiple)
def handle_error(self, error):
"""Handles errors during parsing. Aborts the current HTTP request and
responds with a 400 error.
"""
logger.error(error)
status_code = getattr(error, 'status_code', 400)
data = getattr(error, 'data', {})
abort(status_code, message=text_type(error), exc=error, **data)
def parse(self, argmap, req=None, *args, **kwargs):
"""Parses the request using the given arguments map.
Uses Flask's context-local request object if req=None.
"""
req_obj = req or request # Default to context-local request
return super(FlaskParser, self).parse(argmap, req_obj, *args, **kwargs)
parser = FlaskParser()
use_args = parser.use_args
use_kwargs = parser.use_kwargs
```
#### File: webargs/webargs/tornadoparser.py
```python
import json
import functools
import logging
import tornado.web
from webargs import core
logger = logging.getLogger(__name__)
def parse_json(s):
if isinstance(s, bytes):
s = s.decode('utf-8')
return json.loads(s)
def get_value(d, name, multiple):
"""Handle gets from 'multidicts' made of lists
It handles cases: ``{"key": [value]}`` and ``{"key": value}``
"""
value = d.get(name, core.Missing)
if multiple:
return [] if value is core.Missing else value
if value and isinstance(value, list):
return value[0]
return value
class TornadoParser(core.Parser):
"""Tornado request argument parser."""
def __init__(self, *args, **kwargs):
super(TornadoParser, self).__init__(*args, **kwargs)
self.json = None
def parse_json(self, req, name, arg):
"""Pull a json value from the request."""
return get_value(self.json, name, arg.multiple)
def parse_querystring(self, req, name, arg):
"""Pull a querystring value from the request."""
return get_value(req.query_arguments, name, arg.multiple)
def parse_form(self, req, name, arg):
"""Pull a form value from the request."""
return get_value(req.body_arguments, name, arg.multiple)
def parse_headers(self, req, name, arg):
"""Pull a value from the header data."""
return get_value(req.headers, name, arg.multiple)
def parse_cookies(self, req, name, arg):
"""Pull a value from the header data."""
cookie = req.cookies.get(name)
if cookie is not None:
return [cookie.value] if arg.multiple else cookie.value
else:
return [] if arg.multiple else None
def parse_files(self, req, name, arg):
"""Pull a file from the request."""
return get_value(req.files, name, arg.multiple)
def handle_error(self, error):
"""Handles errors during parsing. Raises a `tornado.web.HTTPError`
with a 400 error.
"""
logger.error(error)
status_code = getattr(error, 'status_code', 400)
data = getattr(error, 'data', {})
raise tornado.web.HTTPError(status_code, error.args[0], **data)
def _parse_json_body(self, req):
content_type = req.headers.get('Content-Type')
if content_type and 'application/json' in req.headers.get('Content-Type'):
try:
self.json = parse_json(req.body)
except (TypeError, ValueError):
self.json = {}
else:
self.json = {}
def parse(self, argmap, req, *args, **kwargs):
"""Parses the request using the given arguments map.
Initializes :attr:`json` attribute.
"""
self._parse_json_body(req)
return super(TornadoParser, self).parse(argmap, req, *args, **kwargs)
def use_args(self, argmap, req=None, targets=core.Parser.DEFAULT_TARGETS,
as_kwargs=False, validate=None):
"""Decorator that injects parsed arguments into a view function or method.
:param dict argmap: Dictionary of argument_name:Arg object pairs.
:param req: The request object to parse
:param tuple targets: Where on the request to search for values.
:param as_kwargs: Whether to pass arguments to the handler as kwargs
:param callable validate: Validation function that receives the dictionary
of parsed arguments. If the function returns ``False``, the parser
will raise a :exc:`ValidationError`.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(obj, *args, **kwargs):
parsed_args = self.parse(
argmap, req=obj.request, targets=targets, validate=validate)
if as_kwargs:
kwargs.update(parsed_args)
else:
args = (parsed_args,) + args
return func(obj, *args, **kwargs)
return wrapper
return decorator
parser = TornadoParser()
use_args = parser.use_args
use_kwargs = parser.use_kwargs
``` |
{
"source": "jmcastagnetto/spanish-anagrams-list",
"score": 4
} |
#### File: jmcastagnetto/spanish-anagrams-list/anagrams.py
```python
from collections import defaultdict
# change to the path on your system
lines = open("/usr/share/dict/spanish", "r").readlines()
words = set()
for line in lines:
line = line.strip().lower()
words.add(line)
def sig(word):
return "".join(sorted(word))
d = defaultdict(set)
for w in words:
d[sig(w)].add(w)
# make a csv file containing the signature, length of set and its elements
sig_file = open("anagrams-signature-spanish.csv", "w")
print("signature,n,elements", file = sig_file)
for s, wrds in d.items():
if len(wrds) > 1:
print("{},{},{}".format(s, str(len(wrds)), "|".join(wrds)), file = sig_file)
sig_file.close()
# make the anagram dictionary file
dict_file = open("anagrams-spanish.txt", "w")
for w in sorted(words):
anas = sorted(d[sig(w)])
if len(anas) > 1:
anas.remove(w)
print("{}: {}".format(w, ", ".join(anas)), file = dict_file)
dict_file.close()
``` |
{
"source": "JMcB17/cpython",
"score": 2
} |
#### File: cpython/Modules/getpath.py
```python
platlibdir = config.get('platlibdir') or PLATLIBDIR
if os_name == 'posix' or os_name == 'darwin':
BUILDDIR_TXT = 'pybuilddir.txt'
BUILD_LANDMARK = 'Modules/Setup.local'
DEFAULT_PROGRAM_NAME = f'python{VERSION_MAJOR}'
STDLIB_SUBDIR = f'{platlibdir}/python{VERSION_MAJOR}.{VERSION_MINOR}'
STDLIB_LANDMARKS = [f'{STDLIB_SUBDIR}/os.py', f'{STDLIB_SUBDIR}/os.pyc']
PLATSTDLIB_LANDMARK = f'{platlibdir}/python{VERSION_MAJOR}.{VERSION_MINOR}/lib-dynload'
BUILDSTDLIB_LANDMARKS = ['Lib/os.py']
VENV_LANDMARK = 'pyvenv.cfg'
ZIP_LANDMARK = f'{platlibdir}/python{VERSION_MAJOR}{VERSION_MINOR}.zip'
DELIM = ':'
SEP = '/'
elif os_name == 'nt':
BUILDDIR_TXT = 'pybuilddir.txt'
BUILD_LANDMARK = r'..\..\Modules\Setup.local'
DEFAULT_PROGRAM_NAME = f'python'
STDLIB_SUBDIR = 'Lib'
STDLIB_LANDMARKS = [f'{STDLIB_SUBDIR}\\os.py', f'{STDLIB_SUBDIR}\\os.pyc']
PLATSTDLIB_LANDMARK = f'{platlibdir}'
BUILDSTDLIB_LANDMARKS = ['Lib\\os.py']
VENV_LANDMARK = 'pyvenv.cfg'
ZIP_LANDMARK = f'python{VERSION_MAJOR}{VERSION_MINOR}{PYDEBUGEXT or ""}.zip'
WINREG_KEY = f'SOFTWARE\\Python\\PythonCore\\{PYWINVER}\\PythonPath'
DELIM = ';'
SEP = '\\'
# ******************************************************************************
# HELPER FUNCTIONS (note that we prefer C functions for performance)
# ******************************************************************************
def search_up(prefix, *landmarks, test=isfile):
while prefix:
if any(test(joinpath(prefix, f)) for f in landmarks):
return prefix
prefix = dirname(prefix)
# ******************************************************************************
# READ VARIABLES FROM config
# ******************************************************************************
program_name = config.get('program_name')
home = config.get('home')
executable = config.get('executable')
base_executable = config.get('base_executable')
prefix = config.get('prefix')
exec_prefix = config.get('exec_prefix')
base_prefix = config.get('base_prefix')
base_exec_prefix = config.get('base_exec_prefix')
ENV_PYTHONPATH = config['pythonpath_env']
use_environment = config.get('use_environment', 1)
pythonpath = config.get('module_search_paths')
real_executable_dir = None
stdlib_dir = None
platstdlib_dir = None
# ******************************************************************************
# CALCULATE program_name
# ******************************************************************************
program_name_was_set = bool(program_name)
if not program_name:
try:
program_name = config.get('orig_argv', [])[0]
except IndexError:
pass
if not program_name:
program_name = DEFAULT_PROGRAM_NAME
if EXE_SUFFIX and not hassuffix(program_name, EXE_SUFFIX) and isxfile(program_name + EXE_SUFFIX):
program_name = program_name + EXE_SUFFIX
# ******************************************************************************
# CALCULATE executable
# ******************************************************************************
if py_setpath:
# When Py_SetPath has been called, executable defaults to
# the real executable path.
if not executable:
executable = real_executable
if not executable and SEP in program_name:
# Resolve partial path program_name against current directory
executable = abspath(program_name)
if not executable:
# All platforms default to real_executable if known at this
# stage. POSIX does not set this value.
executable = real_executable
elif os_name == 'darwin':
# QUIRK: On macOS we may know the real executable path, but
# if our caller has lied to us about it (e.g. most of
# test_embed), we need to use their path in order to detect
# whether we are in a build tree. This is true even if the
# executable path was provided in the config.
real_executable = executable
if not executable and program_name:
# Resolve names against PATH.
# NOTE: The use_environment value is ignored for this lookup.
# To properly isolate, launch Python with a full path.
for p in ENV_PATH.split(DELIM):
p = joinpath(p, program_name)
if isxfile(p):
executable = p
break
if not executable:
executable = ''
# When we cannot calculate the executable, subsequent searches
# look in the current working directory. Here, we emulate that
# (the former getpath.c would do it apparently by accident).
executable_dir = abspath('.')
# Also need to set this fallback in case we are running from a
# build directory with an invalid argv0 (i.e. test_sys.test_executable)
real_executable_dir = executable_dir
if ENV_PYTHONEXECUTABLE or ENV___PYVENV_LAUNCHER__:
# If set, these variables imply that we should be using them as
# sys.executable and when searching for venvs. However, we should
# use the argv0 path for prefix calculation
base_executable = executable
if not real_executable:
real_executable = executable
executable = ENV_PYTHONEXECUTABLE or ENV___PYVENV_LAUNCHER__
executable_dir = dirname(executable)
# ******************************************************************************
# CALCULATE (default) home
# ******************************************************************************
# Used later to distinguish between Py_SetPythonHome and other
# ways that it may have been set
home_was_set = False
if home:
home_was_set = True
elif use_environment and ENV_PYTHONHOME and not py_setpath:
home = ENV_PYTHONHOME
# ******************************************************************************
# READ pyvenv.cfg
# ******************************************************************************
venv_prefix = None
# Calling Py_SetPythonHome(), Py_SetPath() or
# setting $PYTHONHOME will override venv detection.
if not home and not py_setpath:
try:
# prefix2 is just to avoid calculating dirname again later,
# as the path in venv_prefix is the more common case.
venv_prefix2 = executable_dir or dirname(executable)
venv_prefix = dirname(venv_prefix2)
try:
# Read pyvenv.cfg from one level above executable
pyvenvcfg = readlines(joinpath(venv_prefix, VENV_LANDMARK))
except FileNotFoundError:
# Try the same directory as executable
pyvenvcfg = readlines(joinpath(venv_prefix2, VENV_LANDMARK))
venv_prefix = venv_prefix2
except FileNotFoundError:
venv_prefix = None
pyvenvcfg = []
for line in pyvenvcfg:
key, had_equ, value = line.partition('=')
if had_equ and key.strip().lower() == 'home':
executable_dir = real_executable_dir = value.strip()
base_executable = joinpath(executable_dir, basename(executable))
break
else:
venv_prefix = None
# ******************************************************************************
# CALCULATE base_executable, real_executable AND executable_dir
# ******************************************************************************
if not base_executable:
base_executable = executable or real_executable or ''
if not real_executable:
real_executable = base_executable
try:
real_executable = realpath(real_executable)
except OSError as ex:
# Only warn if the file actually exists and was unresolvable
# Otherwise users who specify a fake executable may get spurious warnings.
if isfile(real_executable):
warn(f'Failed to find real location of {base_executable}')
if not executable_dir and os_name == 'darwin' and library:
# QUIRK: macOS checks adjacent to its library early
library_dir = dirname(library)
if any(isfile(joinpath(library_dir, p)) for p in STDLIB_LANDMARKS):
# Exceptions here should abort the whole process (to match
# previous behavior)
executable_dir = realpath(library_dir)
real_executable_dir = executable_dir
# If we do not have the executable's directory, we can calculate it.
# This is the directory used to find prefix/exec_prefix if necessary.
if not executable_dir:
executable_dir = real_executable_dir = dirname(real_executable)
# If we do not have the real executable's directory, we calculate it.
# This is the directory used to detect build layouts.
if not real_executable_dir:
real_executable_dir = dirname(real_executable)
# ******************************************************************************
# DETECT _pth FILE
# ******************************************************************************
# The contents of an optional ._pth file are used to totally override
# sys.path calcualation. Its presence also implies isolated mode and
# no-site (unless explicitly requested)
pth = None
pth_dir = None
# Calling Py_SetPythonHome() or Py_SetPath() will override ._pth search,
# but environment variables and command-line options cannot.
if not py_setpath and not home_was_set:
# Check adjacent to the main DLL/dylib/so
if library:
try:
pth = readlines(library.rpartition('.')[0] + '._pth')
pth_dir = dirname(library)
except FileNotFoundError:
pass
# Check adjacent to the original executable, even if we
# redirected to actually launch Python. This may allow a
# venv to override the base_executable's ._pth file, but
# it cannot override the library's one.
if not pth_dir:
try:
pth = readlines(executable.rpartition('.')[0] + '._pth')
pth_dir = dirname(executable)
except FileNotFoundError:
pass
# If we found a ._pth file, disable environment and home
# detection now. Later, we will do the rest.
if pth_dir:
use_environment = 0
home = pth_dir
pythonpath = []
# ******************************************************************************
# CHECK FOR BUILD DIRECTORY
# ******************************************************************************
build_prefix = None
if not home_was_set and real_executable_dir and not py_setpath:
# Detect a build marker and use it to infer prefix, exec_prefix,
# stdlib_dir and the platstdlib_dir directories.
try:
platstdlib_dir = joinpath(
real_executable_dir,
readlines(joinpath(real_executable_dir, BUILDDIR_TXT))[0],
)
build_prefix = joinpath(real_executable_dir, VPATH)
except IndexError:
# File exists but is empty
platstdlib_dir = real_executable_dir
build_prefix = joinpath(real_executable_dir, VPATH)
except FileNotFoundError:
if isfile(joinpath(real_executable_dir, BUILD_LANDMARK)):
build_prefix = joinpath(real_executable_dir, VPATH)
if os_name == 'nt':
# QUIRK: Windows builds need platstdlib_dir to be the executable
# dir. Normally the builddir marker handles this, but in this
# case we need to correct manually.
platstdlib_dir = real_executable_dir
if build_prefix:
if os_name == 'nt':
# QUIRK: No searching for more landmarks on Windows
build_stdlib_prefix = build_prefix
else:
build_stdlib_prefix = search_up(build_prefix, *BUILDSTDLIB_LANDMARKS)
# Always use the build prefix for stdlib
if build_stdlib_prefix:
stdlib_dir = joinpath(build_stdlib_prefix, 'Lib')
else:
stdlib_dir = joinpath(build_prefix, 'Lib')
# Only use the build prefix for prefix if it hasn't already been set
if not prefix:
prefix = build_stdlib_prefix
# Do not warn, because 'prefix' never equals 'build_prefix' on POSIX
#elif not venv_prefix and prefix != build_prefix:
# warn('Detected development environment but prefix is already set')
if not exec_prefix:
exec_prefix = build_prefix
# Do not warn, because 'exec_prefix' never equals 'build_prefix' on POSIX
#elif not venv_prefix and exec_prefix != build_prefix:
# warn('Detected development environment but exec_prefix is already set')
config['_is_python_build'] = 1
# ******************************************************************************
# CALCULATE prefix AND exec_prefix
# ******************************************************************************
if py_setpath:
# As documented, calling Py_SetPath will force both prefix
# and exec_prefix to the empty string.
prefix = exec_prefix = ''
else:
# Read prefix and exec_prefix from explicitly set home
if home:
# When multiple paths are listed with ':' or ';' delimiters,
# split into prefix:exec_prefix
prefix, had_delim, exec_prefix = home.partition(DELIM)
if not had_delim:
exec_prefix = prefix
# Reset the standard library directory if it was already set
stdlib_dir = None
# First try to detect prefix by looking alongside our runtime library, if known
if library and not prefix:
library_dir = dirname(library)
if ZIP_LANDMARK:
if os_name == 'nt':
# QUIRK: Windows does not search up for ZIP file
if isfile(joinpath(library_dir, ZIP_LANDMARK)):
prefix = library_dir
else:
prefix = search_up(library_dir, ZIP_LANDMARK)
if STDLIB_SUBDIR and STDLIB_LANDMARKS and not prefix:
if any(isfile(joinpath(library_dir, f)) for f in STDLIB_LANDMARKS):
prefix = library_dir
stdlib_dir = joinpath(prefix, STDLIB_SUBDIR)
# Detect prefix by looking for zip file
if ZIP_LANDMARK and executable_dir and not prefix:
if os_name == 'nt':
# QUIRK: Windows does not search up for ZIP file
if isfile(joinpath(executable_dir, ZIP_LANDMARK)):
prefix = executable_dir
else:
prefix = search_up(executable_dir, ZIP_LANDMARK)
if prefix:
stdlib_dir = joinpath(prefix, STDLIB_SUBDIR)
if not isdir(stdlib_dir):
stdlib_dir = None
# Detect prefix by searching from our executable location for the stdlib_dir
if STDLIB_SUBDIR and STDLIB_LANDMARKS and executable_dir and not prefix:
prefix = search_up(executable_dir, *STDLIB_LANDMARKS)
if prefix:
stdlib_dir = joinpath(prefix, STDLIB_SUBDIR)
if PREFIX and not prefix:
prefix = PREFIX
if not any(isfile(joinpath(prefix, f)) for f in STDLIB_LANDMARKS):
warn('Could not find platform independent libraries <prefix>')
if not prefix:
prefix = abspath('')
warn('Could not find platform independent libraries <prefix>')
# Detect exec_prefix by searching from executable for the platstdlib_dir
if PLATSTDLIB_LANDMARK and not exec_prefix:
if executable_dir:
exec_prefix = search_up(executable_dir, PLATSTDLIB_LANDMARK, test=isdir)
if not exec_prefix:
if EXEC_PREFIX:
exec_prefix = EXEC_PREFIX
if not isdir(joinpath(exec_prefix, PLATSTDLIB_LANDMARK)):
warn('Could not find platform dependent libraries <exec_prefix>')
else:
warn('Could not find platform dependent libraries <exec_prefix>')
# Fallback: assume exec_prefix == prefix
if not exec_prefix:
exec_prefix = prefix
if not prefix or not exec_prefix:
warn('Consider setting $PYTHONHOME to <prefix>[:<exec_prefix>]')
# If we haven't set [plat]stdlib_dir already, set them now
if not stdlib_dir:
if prefix:
stdlib_dir = joinpath(prefix, STDLIB_SUBDIR)
else:
stdlib_dir = ''
if not platstdlib_dir:
if exec_prefix:
platstdlib_dir = joinpath(exec_prefix, PLATSTDLIB_LANDMARK)
else:
platstdlib_dir = ''
# For a venv, update the main prefix/exec_prefix but leave the base ones unchanged
# XXX: We currently do not update prefix here, but it happens in site.py
#if venv_prefix:
# base_prefix = prefix
# base_exec_prefix = exec_prefix
# prefix = exec_prefix = venv_prefix
# ******************************************************************************
# UPDATE pythonpath (sys.path)
# ******************************************************************************
if py_setpath:
# If Py_SetPath was called then it overrides any existing search path
config['module_search_paths'] = py_setpath.split(DELIM)
config['module_search_paths_set'] = 1
elif not pythonpath:
# If pythonpath was already set, we leave it alone.
# This won't matter in normal use, but if an embedded host is trying to
# recalculate paths while running then we do not want to change it.
pythonpath = []
# First add entries from the process environment
if use_environment and ENV_PYTHONPATH:
for p in ENV_PYTHONPATH.split(DELIM):
pythonpath.append(abspath(p))
# Then add the default zip file
if os_name == 'nt':
# QUIRK: Windows uses the library directory rather than the prefix
if library:
library_dir = dirname(library)
else:
library_dir = executable_dir
pythonpath.append(joinpath(library_dir, ZIP_LANDMARK))
elif build_prefix or venv_prefix:
# QUIRK: POSIX uses the default prefix when in the build directory
# or a venv
pythonpath.append(joinpath(PREFIX, ZIP_LANDMARK))
else:
pythonpath.append(joinpath(prefix, ZIP_LANDMARK))
if os_name == 'nt' and use_environment and winreg:
# QUIRK: Windows also lists paths in the registry. Paths are stored
# as the default value of each subkey of
# {HKCU,HKLM}\Software\Python\PythonCore\{winver}\PythonPath
# where winver is sys.winver (typically '3.x' or '3.x-32')
for hk in (winreg.HKEY_CURRENT_USER, winreg.HKEY_LOCAL_MACHINE):
try:
key = winreg.OpenKeyEx(hk, WINREG_KEY)
try:
i = 0
while True:
try:
keyname = winreg.EnumKey(key, i)
subkey = winreg.OpenKeyEx(key, keyname)
if not subkey:
continue
try:
v = winreg.QueryValue(subkey)
finally:
winreg.CloseKey(subkey)
if isinstance(v, str):
pythonpath.append(v)
i += 1
except OSError:
break
finally:
winreg.CloseKey(key)
except OSError:
pass
# Then add any entries compiled into the PYTHONPATH macro.
if PYTHONPATH:
for p in PYTHONPATH.split(DELIM):
pythonpath.append(joinpath(prefix, p))
# Then add stdlib_dir and platstdlib_dir
if stdlib_dir:
pythonpath.append(stdlib_dir)
if platstdlib_dir:
if os_name == 'nt' and venv_prefix:
# QUIRK: Windows appends executable_dir instead of platstdlib_dir
# when in a venv
pythonpath.append(executable_dir)
else:
pythonpath.append(platstdlib_dir)
config['module_search_paths'] = pythonpath
config['module_search_paths_set'] = 1
# ******************************************************************************
# POSIX prefix/exec_prefix QUIRKS
# ******************************************************************************
# QUIRK: Non-Windows replaces prefix/exec_prefix with defaults when running
# in build directory. This happens after pythonpath calculation.
if os_name != 'nt' and build_prefix:
prefix = config.get('prefix') or PREFIX
exec_prefix = config.get('exec_prefix') or EXEC_PREFIX or prefix
# ******************************************************************************
# SET pythonpath FROM _PTH FILE
# ******************************************************************************
if pth:
config['isolated'] = 1
config['use_environment'] = 0
config['site_import'] = 0
pythonpath = []
for line in pth:
line = line.partition('#')[0].strip()
if not line:
pass
elif line == 'import site':
config['site_import'] = 1
elif line.startswith('import '):
warn("unsupported 'import' line in ._pth file")
else:
pythonpath.append(joinpath(pth_dir, line))
config['module_search_paths'] = pythonpath
config['module_search_paths_set'] = 1
# ******************************************************************************
# UPDATE config FROM CALCULATED VALUES
# ******************************************************************************
config['program_name'] = program_name
config['home'] = home
config['executable'] = executable
config['base_executable'] = base_executable
config['prefix'] = prefix
config['exec_prefix'] = exec_prefix
config['base_prefix'] = base_prefix or prefix
config['base_exec_prefix'] = base_exec_prefix or exec_prefix
config['platlibdir'] = platlibdir
config['stdlib_dir'] = stdlib_dir
config['platstdlib_dir'] = platstdlib_dir
``` |
{
"source": "JMcB17/pixels",
"score": 2
} |
#### File: pixels/pixels/__init__.py
```python
import logging
from pixels.pixels import app # noqa: F401 Unused import
ENDPOINTS_TO_FILTER_OUT = [
"/set_pixel"
]
class EndpointFilter(logging.Filter):
"""Used to filter out unicorn endpoint logging."""
def filter(self, record: logging.LogRecord) -> bool:
"""Returns true for logs that don't contain anything we want to filter out."""
log = record.getMessage()
return all(endpoint not in log for endpoint in ENDPOINTS_TO_FILTER_OUT)
# Filter out all endpoints in `ENDPOINTS_TO_FILTER_OUT`
logging.getLogger("uvicorn.access").addFilter(EndpointFilter())
``` |
{
"source": "jmcb/jquizzyva",
"score": 4
} |
#### File: jquizzyva/test/test_z___.py
```python
import unittest
import util.search, util.db
class TestZ___Anagram (unittest.TestCase):
def setUp (self):
with open("test/z___.txt") as f:
self.acceptable = set([line.strip() for line in f])
self.search = util.search.AnagramMatch("Z[AEIOU][AEIOU]?")
self.db = util.db.csw()
def test_search (self):
results = self.db.search(self.search)
for result in results:
word = result["word"]
self.assertIn(word, self.acceptable)
class TestZ___SubAnagram (unittest.TestCase):
def setUp (self):
with open("test/sub_z___.txt") as f:
self.acceptable = set([line.strip() for line in f])
self.search = util.search.SubanagramMatch("Z[AEIOU][AEIOU]?")
self.db = util.db.csw()
def test_search (self):
results = self.db.search(self.search)
for result in results:
word = result["word"]
self.assertIn(word, self.acceptable)
```
#### File: jquizzyva/util/search.py
```python
import json
import sqlite3
import util.pattern
CALLBACK_FUNCTION = "callback"
class SearchError (Exception):
"""
This error is raised whenever a search fails.
"""
pass
def alphagram (string):
"""
This converts a string into an 'alphagram': an alphabetically sorted string.
"""
return "".join(sorted(list(string.upper())))
class SearchType (object):
"""
This is the basic search type object from which all of our search types
derive. Primarily they provide a way of automatically generating SQL WHERE
clauses for searching, but in a secondary manner provide limiting and
filtering of results.
"""
negated = False
def __init__ (self, negated=False, *args, **kwargs):
"""
Create a new search type. This provides access for negation of search
terms on a general basis, but otherwise all search terms created should
be derivatives of this class; it should never be insantiated directly.
:param negated: If True, the search term will be negated. Default False.
"""
super(SearchType, self).__init__(*args, **kwargs)
self.negated = negated
def negate (self):
"""
Manually mark the search term as being negated.
"""
self.negated = True
def clause (self):
"""
Generate a clause or series of clauses to be appended to an SQL
statement. It returns False to denote that there is no clause for this
constraint.
"""
return False
def limit (result):
"""
Limit returned results by filtering them according to some specific
pattern. It is expected that the limiting will be in-place, and that
the new "limited" result list will be returned by this function.
:param result: This consists of the list of results from the database;
note that this result list may have already been limited by a
previous query.
"""
return result
def __repr__ (self):
return str(self.__class__)
def asdict (self):
result = {"search_type": self.__class__.__name__, "negated": self.negated}
for key, value in self.__dict__.iteritems():
if key.startswith("search"):
result[key] = value
return result
def asjson (self):
return json.dumps(self.asdict())
@classmethod
def fromdict (cls, ddict):
ddict = dict(ddict)
st = ddict.pop("search_type")
if not st == cls.__class__.__name__:
cls = globals()[st]
return cls(**ddict)
@classmethod
def fromjson (cls, jsond):
return cls.fromdict(json.loads(jsond))
class StringSearch (SearchType):
"""
This is another base-type for string-based searches, and should not be
instantiated directly.
"""
search_string = None
def __init__ (self, search_string="", *args, **kwargs):
"""
Create a new string-based search.
:param search_string: This string is the parameter that is stored and
is then used when generating WHERE clauses.
"""
super(StringSearch, self).__init__(*args, **kwargs)
self.search_string = search_string
def __repr__ (self):
return "<%s search_string:%s>" % (self.__class__.__name__, self.search_string)
try:
from util._search import AnagramMatchBase, SubanagramMatchBase, PatternMatchBase
except:
class PatternMatch (StringSearch):
"""
A derivative of StringSearch, this search applies a pattern specifically to
the database.
"""
patternobj = None
regexpobj = None
column = "words.word"
def clause (self):
return CALLBACK_FUNCTION
def pattern (self):
if self.patternobj is None:
self.patternobj = util.pattern.Pattern.fromstring(self.search_string)
self.regexpobj = self.patternobj.as_regexp()
def search_function (word):
match = self.regexpobj.match(word)
if not match:
return (False, 0)
blanks = ''.join([x for x in match.groups() if x])
self.patternobj.blank_store.append(blanks)
return (True, blanks)
return search_function, self.patternobj
def bounds (self):
return self.patternobj.bounds()
class AnagramMatch (StringSearch):
"""
A derivative of StringSearch, this search looks for anagrams of the string
provided.
"""
patternobj = None
column = "words.alphagram"
def clause (self):
if "?" in self.search_string or "[" in self.search_string or "*" in self.search_string:
return CALLBACK_FUNCTION
ag = alphagram(self.search_string)
return ("words.alphagram=?", (ag, ))
def pattern (self):
if self.patternobj is None:
self.patternobj = util.pattern.AnagramPattern.fromstring(self.search_string)
def search_function (word):
return self.patternobj.try_word(word)
return search_function, self.patternobj
def bounds (self):
return self.patternobj.bounds()
class SubanagramMatch (AnagramMatch):
"""
A derivative of StringSearch, this search, like AnagramMatch, searches for
anagrams of the string provided. However, it will search for anagrams of
any length, of any combination of the contained string.
"""
def clause (self):
return CALLBACK_FUNCTION
def pattern (self):
if self.patternobj is None:
self.patternobj = util.pattern.SubAnagramPattern.fromstring(self.search_string)
def search_function (word):
return self.patternobj.try_word(word)
return search_function, self.patternobj
else:
class PatternMatch (PatternMatchBase, StringSearch):
column = "words.word"
def asdict (self):
return {"search_type": "PatternMatch", "search_string": self.search_string, "negated": self.negated}
@classmethod
def fromdict (cls, my_dict):
my_dict = dict(my_dict)
my_dict.pop("search_type")
return cls(**my_dict)
def asjson (self):
return json.dumps(self.asdict())
@classmethod
def fromjson (cls, data):
return cls.fromdict(json.loads(data))
class _AlphagramMatch (StringSearch):
column = "words.alphagram"
def clause (self):
ag = alphagram(self.search_string)
return ("words.alphagram=?", (ag, ))
def asdict (self):
return {"search_type": "AnagramMatch", "search_string": self.search_string, "negated": self.negated}
class _AnagramMatch (AnagramMatchBase, StringSearch):
column = "words.alphagram"
def asdict (self):
return {"search_type": "AnagramMatch", "search_string": self.search_string, "negated": self.negated}
@classmethod
def fromdict (cls, my_dict):
my_dict = dict(my_dict)
my_dict.pop("search_type")
return cls(**my_dict)
def asjson (self):
return json.dumps(self.asdict())
@classmethod
def fromjson (cls, data):
return cls.fromdict(json.loads(data))
def AnagramMatch (search_string, negated=False):
if "?" in search_string or "[" in search_string or "*" in search_string:
return _AnagramMatch(search_string=search_string, negated=negated)
else:
return _AlphagramMatch(search_string=search_string, negated=negated)
class SubanagramMatch (SubanagramMatchBase, StringSearch):
column = "words.alphagram"
def asdict (self):
return {"search_type": "SubanagramMatch", "search_string": self.search_string, "negated": self.negated}
@classmethod
def fromdict (cls, my_dict):
my_dict = dict(my_dict)
my_dict.pop("search_type")
return cls(**my_dict)
def asjson (self):
return json.dumps(self.asdict())
@classmethod
def fromjson (cls, data):
return cls.fromdict(json.loads(data))
class TakesPrefix (StringSearch):
"""
This is a limiting search that ensures that the words returned are only
words which take a specific prefix.
"""
column = "words.word"
def clause (self):
if len(self.search_string) == 1:
return ("words.front_hooks LIKE ?", ("%" + self.search_string + "%", ))
return ("?||words.word IN (SELECT word AS pos_word FROM words WHERE pos_word=?||MYWORD)", (self.search_string, self.search_string))
class TakesSuffix (StringSearch):
"""
As per TakesPrefix, only apply to words which take a specific suffix
instead.
"""
column = "words.word"
def clause (self):
if len(self.search_string) == 1:
return ("words.back_hooks LIKE ?", ("%" + self.search_string + "%", ))
return ("words.word||? IN (SELECT word AS pos_word FROM words WHERE pos_word=MYWORD||?)", (self.search_string, self.search_string))
class RangeSearch (SearchType):
"""
This search specifies integer "start" and "stop" values, and limits results
to those whose specific column value falls between these two values; if the
"start" and "stop" parameters are the same, or the stop value is less than
the start value, only results whose column value matches the "start" value
will be returned.
"""
search_range_start = 0
search_range_stop = 0
def __init__ (self, search_range_start=0, search_range_stop=0, *args, **kwargs):
"""
Create a new RangeSearch.
:param search_range_start: The starting range limiter.
:param search_range_stop: The stopping range limiter.
"""
super(RangeSearch, self).__init__(*args, **kwargs)
try:
self.search_range_start = int(search_range_start)
except ValueError:
raise SearchError("%s is invalid start" % search_range_start)
try:
self.search_range_stop = int(search_range_stop)
except ValueError:
raise SearchError("%s is invalid stop" % search_range_stop)
if self.search_range_stop < self.search_range_start:
self.search_range_stop = self.search_range_start
def clause (self):
rt = self.search_range_start
rp = self.search_range_stop
if rt == rp or rp < rt:
return ("%s=?" % self.column, (rt, ))
else:
return ("%s >= ? AND %s <= ?" % (self.column, self.column), (rt, rp))
def __repr__ (self):
return "<%s search_range_start:%s, search_range_stop:%s>" % (self.__class__.__name__, self.search_range_start, self.search_range_stop)
class Length (RangeSearch):
"""
This search limits results to words of a specific length -- either within a
range or or a certain value.
"""
column = "words.length"
class NumberOfVowels (RangeSearch):
"""
This search limits results to words which contain either a specific number
of vowels, or a number of vowels that falls between a specified range.
"""
column = "words.num_vowels"
class NumberOfUniqueLetters (RangeSearch):
"""
This search limits results to words which container either a specific
number of unique letters, or whose number of unique letters falls between a
specified range.
"""
column = "words.num_unique_letters"
class PointValue (RangeSearch):
"""
This search limits results to words who have either a specific 'point'
value, or whose 'point' value falls between a specified range.
"""
column = "words.point_value"
class NumberOfAnagrams (RangeSearch):
"""
This search limits results to words who either have a specific number of
anagrams possible, or whose number of possible anagrams falls between a
specified range.
"""
column = "words.num_anagrams"
class ConsistsOf (StringSearch, RangeSearch):
def __repr__ (self):
return "<%s search_range_start:%s, search_range_stop:%s, search_string:%s>" % (self.__class__.__name__, self.search_range_start, self.search_range_stop, self.search_string)
class StringListSearch (SearchType):
search_string_list = None
def __init__ (self, search_string_list=None, search_string=None, *args, **kwargs):
super(StringListSearch, self).__init__(*args, **kwargs)
self.search_string_list = search_string_list
if self.search_string_list is None:
self.search_string_list = []
if search_string is not None:
self.search_string_list.extend(search_string)
def __repr__ (self):
return "<%s search_string_list:%s>" % (self.__class__.__name__, self.search_string_list)
class IncludesLetters (StringListSearch):
column = "words.word"
def clause (self):
args = []
query = ""
if self.negated:
like = "NOT LIKE"
else:
like = "LIKE"
for character in self.search_string_list:
query += "%s %s ?" % (self.column, like)
args.append("%"+character+"%")
return (query, tuple(args))
class InWordList (StringListSearch):
column = "words.word"
def clause (self):
args = []
query = ""
if self.negated:
query = "%s NOT IN (" % self.column
else:
query = "%s IN (" % self.column
for word in self.search_string_list:
query += "?, "
args.append(word)
query = query.rstrip(", ") + ")"
return (query, tuple(args))
class BelongsToGroup (SearchType):
column = "words.word"
def __init__ (self, search_term_string, *args, **kwargs):
super(BelongsToGroup, self).__init__(*args, **kwargs)
self.search_string_list = ["Hook Words", "Front Hooks", "Back Hooks",
"High Fives", "Type I Sevens", "Type II Sevens", "Type III Sevens",
"Type I Eights", "Type II Eights", "Type III Eights",
"Eights From Seven-Letter Stems"]
assert search_term_string in self.search_string_list
self.search_term_string = search_term_string
def clause (self):
args = []
query = ""
st = self.search_term_string
if "Hook" in st:
if self.negated:
m = "0"
else:
m = "1"
if "Front" in st:
query += "words.is_front_hook=?"
args.append(m)
elif "Back" in st:
query += "words.is_back_hook=?"
args.append(m)
elif "Words" in st:
query += "words.is_front_hook=? AND words.is_back_hook=?"
args.extend((m, m))
return (query, tuple(args))
else:
return False
class SearchList (object):
searches = None
def __init__ (self, *searches):
super(SearchList, self).__init__()
self.searches = []
if searches:
self.searches.extend(searches)
def append (self, item):
self.searches.append(item)
def query (self):
args = []
functions = {}
query = "SELECT front_hooks, word as MYWORD, back_hooks, definition FROM words WHERE "
maybe_query = None
for constraint in self.searches:
try:
squery, subargs = constraint.clause()
except ValueError:
ind = len(functions)+1
squery = "anagrammer%s(%s)" % (ind, constraint.column)
subargs = []
functions["anagrammer%s" % ind] = constraint.pattern()
bounds = constraint.bounds()
if (maybe_query and int(maybe_query[-1]) < int(bounds[-1])) or not maybe_query:
maybe_query = bounds
if not query.endswith("WHERE "):
query += " AND "
query += squery
args.extend(subargs)
if "words.length" not in query and maybe_query:
query += " AND " + maybe_query
query += " LIMIT 100"
return (query, args, functions)
def __repr__ (self):
return "<SearchList %s>" % self.searches
def asdicts (self):
return [item.asdict() for item in self.searches]
def asjson (self):
return json.dumps(self.asdicts())
@classmethod
def fromdicts (cls, items):
try:
return cls(*[SearchType.fromdict(item) for item in items])
except:
return cls(SearchType.fromdict(items))
@classmethod
def fromjson (cls, jsond):
return cls.fromdicts(json.loads(jsond))
``` |
{
"source": "jmcb/murderrl",
"score": 2
} |
#### File: murderrl/builder/builder.py
```python
import random, copy, room
from library import shape, collection
from library.coord import *
from library.random_util import *
from library.feature import *
# Specific build styles:
BASE_SHAPE = "single-corridor"
L_LAYOUT = "L-corridors"
Z_LAYOUT = "Z-corridors"
N_LAYOUT = "N-corridors"
H_LAYOUT = "H-corridors"
O_LAYOUT = "O-corridors"
U_LAYOUT = "U-corridors"
class BuilderCollection (collection.ShapeCollection):
corridors = None
rooms = None
legs = None
main_corridor = None
def __init__ (self, c=[]):
if c != [] and isinstance(c, BuilderCollection):
self.legs = c.legs
collection.ShapeCollection.__init__(self, c)
self.rebuild()
def copy (self):
my_copy = BuilderCollection(copy.copy(self._shapes))
my_copy.legs = copy.deepcopy(self.legs)
return my_copy
def rebuild (self):
self.corridors = []
self.rooms = []
if not self.legs:
self.legs = []
for index, sh in enumerate(self):
if isinstance(sh.shape, MainCorridor):
self.main_corridor = index
if isinstance(sh.shape, Corridor):
self.corridors.append(index)
else:
self.rooms.append(index)
def corridor (self, index):
assert index in self.corridors
return self[index]
def get_corridors (self):
return self.corridors
def get_room (self, index):
assert index in self.rooms
return self[index]
def get_rooms (self):
if not self.rooms:
return None
return self.rooms
def mark_leg (self, leg):
self.legs.append(leg)
def count_legs (self):
return len(self.legs)
def leg_at (self, side, placement):
return (side, placement) in self.legs
def get_leg (self, side, placement):
for leg in self.legs:
if leg == (side, placement):
return leg
return None
def _rebuild_wrap (function):
def wrapper (self, *args, **kwargs):
function(self, *args, **kwargs)
self.rebuild()
wrapper.__name__ = function.__name__
wrapper.__doc__ = function.__doc__ + "\n\nCalling this function automatically rebuilds the BuilderCollection index."
return wrapper
__setitem__ = _rebuild_wrap(collection.ShapeCollection.__setitem__)
append = _rebuild_wrap(collection.ShapeCollection.append)
extend = _rebuild_wrap(collection.ShapeCollection.extend)
insert = _rebuild_wrap(collection.ShapeCollection.insert)
pop = _rebuild_wrap(collection.ShapeCollection.pop)
prioritise = _rebuild_wrap(collection.ShapeCollection.prioritise)
reverse = _rebuild_wrap(collection.ShapeCollection.reverse)
reversed = _rebuild_wrap(collection.ShapeCollection.reversed)
sort = _rebuild_wrap(collection.ShapeCollection.sort)
append = _rebuild_wrap(collection.ShapeCollection.append)
prioritise = _rebuild_wrap(collection.ShapeCollection.prioritise)
class Corridor (shape.Shape):
pass
class MainCorridor (Corridor):
pass
def join_row_rooms (row, left_corr=False, right_corr=False, check_offset=False):
"""
Given a list of rooms, joins them together as a ShapeCollection.
:``row``: A list of Room objects that should be placed in a row. *Required*.
:``left_corr``: If true, leaves a gap between the first and second rooms
to make space for a corridor. *Default False*.
:``right_corr``: If true, leaves a gap between the last and second-last rooms
to make space for a corridor. *Default False*.
:``check_offset``: If true, compares the room heights to see if they
need to be offset from the top. *Default False*.
"""
assert(len(row) > 2)
first_room = row[0].as_shape()
second_room = row[1].as_shape()
# Does some weird stuff to offset everything
offset_both = False
if check_offset and first_room.height() == second_room.height():
offset_both = True
# Join the first two rooms.
top_offset = 0
if check_offset:
top_offset = 2
overlap = 1
if left_corr:
overlap = -1
row_collection = shape.adjoin(first_room, second_room, top_offset=top_offset, overlap=overlap, collect=True, offset_both=offset_both)
# Join the middle rooms.
for curr in row[2:-1]:
room_shape = curr.as_shape()
to = top_offset
if check_offset and (room_shape.height() == first_room.height() and not offset_both or room_shape.height() > first_room.height()):
to = 0
row_collection = shape.adjoin(row_collection, room_shape, top_offset=to, overlap=1, collect=True, offset_both=offset_both)
# Join the last room.
last_room = row[-1].as_shape()
if check_offset and (last_room.height() == first_room.height() and not offset_both or last_room.height() > first_room.height()):
top_offset = 0
overlap = 1
if right_corr:
overlap = -1
row_collection = shape.adjoin(row_collection, last_room, top_offset=top_offset, overlap=overlap, collect=True)
return row_collection
ROOM_WIDTH_LIST = [7, 8, 9, 10, 11, 12]
def random_room_height ():
"""
Returns a random value for the height of a room.
"""
height = 7
if coinflip():
height += 1
elif one_chance_in(3):
height -= 1
return height
def base_builder (min_rooms=0, top_left=None, top_right=None, bottom_left=None, bottom_right=None, tl_corr=False, tr_corr=False, bl_corr=False, br_corr=False,top_height=None, bottom_height=None):
"""
Attempts to build a basic rectangular manor. It returns ShapeCollection
and a list of Room objects.
:``min_rooms``: The minimum number of rooms. *Default None*.
:``top_left``: The width of the top left room. Random, if none. *Default None*.
:``top_right``: The width of the top right room. Random, if none. *Default None*.
:``bottom_left``: The width of the bottom left room. Random, if none. *Default None*.
:``bottom_right``: The width of the bottom right room. Random, if none. *Default None*.
:``tl_corr``: If true, leaves a gap for a corridor between the top-left two rooms. *Default False*.
:``tr_corr``: If true, leaves a gap for a corridor between the top-right two rooms. *Default False*.
:``bl_corr``: If true, leaves a gap for a corridor between the bottom-left two rooms. *Default False*.
:``br_corr``: If true, leaves a gap for a corridor between the bottom-right two rooms. *Default False*.
:``top_height``: The height of the top row rooms. Random, if none. *Default None*.
:``bottom_height``: The height of the bottom row rooms. Random, if none. *Default None*.
"""
if top_left == None:
top_left = random.choice(ROOM_WIDTH_LIST)
if top_right == None:
top_right = random.choice(ROOM_WIDTH_LIST)
if bottom_left == None:
bottom_left = random.choice(ROOM_WIDTH_LIST)
if bottom_right == None:
bottom_right = random.choice(ROOM_WIDTH_LIST)
# tl_corr = True
# tr_corr = True
# bl_corr = True
# br_corr = True
print "tl: %s, tr: %s, bl: %s, br: %s" % (top_left, top_right, bottom_left, bottom_right)
print "tl: %s, tr: %s, bl: %s, br: %s" % (tl_corr, tr_corr, bl_corr, br_corr)
# Top row of rooms
row1 = []
# Corridor, then bottom row of rooms
row2 = []
max_length = 6*12 # currently unused
# manor_width = random.randint(max_length/2, max_length)
# Decide the row heights.
if top_height == None:
top_height = random_room_height()
if bottom_height == None:
bottom_height = random_room_height()
print "top_height: %s, bottom_height: %s" % (top_height, bottom_height)
# first rooms on either row
height1 = top_height
height2 = bottom_height
check_overlap = False
if top_left < bottom_left or top_left == bottom_left and coinflip():
height1 += 2
else:
height2 += 2
check_overlap = True
first = room.Room(width=top_left, height=height1)
row1.append(first)
first = room.Room(width=bottom_left, height=height2)
row2.append(first)
# print "first rooms: height1=%s, height2=%s" % (height1, height2)
length1 = top_left + top_right - 2
if tl_corr:
length1 += 2
if tr_corr:
length1 += 2
length2 = bottom_left + bottom_right - 2
if bl_corr:
length2 += 2
if br_corr:
length2 += 2
print "Row 1:"
print "room 1: w=%s, length1: %s" % (top_left, length1)
while len(row1) <= 5:
# If we have four rooms, one in three chance of not adding any more
# rooms.
if len(row1) > 3 and one_chance_in(3):
break
new_room = room.Room(width=random.choice(ROOM_WIDTH_LIST), height=top_height)
row1.append(new_room)
length1 += new_room.width - 1
print "room %s: w=%s, length1: %s" % (len(row1), new_room.width, length1)
print "room %s: w=%s" % (len(row1)+1, top_right)
manor_width = length1
print "\nRow 2:"
print "room 1: w=%s, length2: %s" % (bottom_left, length2)
while length2 < manor_width:
dist_left = manor_width - length2 + 1
if dist_left < 14:
new_width = dist_left
else:
new_width = random.choice(ROOM_WIDTH_LIST)
next_width = dist_left - new_width
if next_width < 7:
new_width = random.choice((6,7,8))
new_room = room.Room(width=new_width, height=bottom_height)
row2.append(new_room)
length2 += new_width - 1
print "room %s: w=%s, length2: %s" % (len(row2), new_width, length2)
print "room %s: w=%s" % (len(row2)+1, bottom_right)
# last rooms on either row
height1 = top_height
height2 = bottom_height
if top_right < bottom_right or top_right == bottom_right and coinflip():
height1 += 2
check_overlap = False
else:
height2 += 2
# check_overlap = True
# print "last rooms: height1=%s, height2=%s" % (height1, height2)
last = room.Room(width=top_right, height=height1)
row1.append(last)
last = room.Room(width=bottom_right, height=height2)
row2.append(last)
print "\nrow1: %s rooms, row2: %s rooms, manor width: %s" % (len(row1), len(row2), manor_width)
# Try to get the minimum number of rooms.
if len(row1) + len(row2) < min_rooms:
return base_builder(min_rooms - 1)
# Now, start drawing it! YAY!
# First row
row1_collection = join_row_rooms(row1, tl_corr, tr_corr)
# second row
row2_collection = join_row_rooms(row2, bl_corr, br_corr, True)
# Finally, make a corridor!
overlap = 3
if check_overlap:
overlap = 1
my_collection = shape.underneath(row1_collection, row2_collection, overlap=overlap, collect=True)
m = BuilderCollection(my_collection)
noncorr_left = min(top_left, bottom_left)
noncorr_right = min(top_right, bottom_right)
corridor_length = my_collection.width() - noncorr_left - noncorr_right
# print "noncorr_left: %s, noncorr_right: %s, corridor_length: %s" % (noncorr_left, noncorr_right, corridor_length)
corridor = MainCorridor(shape.Row(width=corridor_length, fill="."))
m.append(collection.ShapeCoord(corridor, coord.Coord(noncorr_left, top_height)))
return m
class Placement (object):
def __init__ (self, side1, side2, this_side):
self.sides = [side1, side2]
self.this_side = this_side
def opposite (self):
return self.sides[self.this_side-1]
def __hash__ (self):
return hash(str(self))
def __str__ (self):
return self.sides[self.this_side]
def __repr__ (self):
return "<Placement %s>" % self
def __cmp__ (self, other):
return cmp(str(self), str(other))
SIDE_LEFT = Placement("left", "right", 0)
SIDE_RIGHT = Placement("left", "right", 1)
PLACE_TOP = Placement("top", "bottom", 0)
PLACE_BOTTOM = Placement("top", "bottom", 1)
class Leg (object):
"""
The representation of a manor leg (or "wing") that is attached to the
base manor.
"""
def __init__ (self, h_placement, v_placement, width=None, height=None, leg=None):
assert not (leg is None and width is None and height is None)
if leg is not None:
width, height = leg.size()
self.placement = (h_placement, v_placement)
self.width = width
self.height = height
def __repr__ (self):
return "<Leg h:%s w:%s %s>" % (self.height, self.width, self.placement)
def __cmp__ (self, other):
if isinstance(other, Leg):
return cmp(self.placement, other.placement)
elif isinstance(other, tuple):
return cmp(self.placement, other)
def attach_leg (base, leg, side=SIDE_LEFT, placement=PLACE_TOP, corr_offset = None, x_offset = None):
"""
Take a result of base_builder() and attach a leg.
:``base``: The base shape collection.
:``leg``: The leg shape collection.
:``side``: Which side the leg should be placed on. *Default ``SIDE_LEFT``*.
:``placement``: Whether the leg should be placed above or below. *Default ``PLACE_TOP``*.
:``corr_offset``: A number by which to vertically offset the corridor placement.
If none, uses the default room height. *Default None*.
:``x_offset``: A number by which to horizontally offset the corridor placement.
*Default None*.
"""
assert not base.leg_at(side, placement)
old_leg = leg.copy()
no_vert_offset = False
vert_offset = 0
if base.leg_at(side.opposite(), placement):
l = base.get_leg(side.opposite(), placement)
vert_offset = base.height() - l.height
no_vert_offset = True
else:
vert_offset = base.height() - 1
# Find the corridor
corridor, start = base.corridor(base.main_corridor)
assert corridor is not None
# Find the corridor's end point
stop = coord.Coord(start)
stop.x = corridor.width()
if side == SIDE_RIGHT:
offs = leg[0].width() - start.x
leg.offset(coord.Coord(stop.x-offs-1, 0))
if x_offset == None:
x_offset = stop.x + start.x
elif side == SIDE_LEFT and x_offset == None:
x_offset = start.x
print "vert_offset: %s, x_offset: %s, no_vert_offset: %s" % (vert_offset, x_offset, no_vert_offset)
if corr_offset == None:
corr_offset = room.Room().height
ncorr_height = leg.height() + corr_offset - 1
new_corridor = Corridor(shape.Column(height=ncorr_height, fill="."))
corridor_offset = None
if placement == PLACE_BOTTOM:
if no_vert_offset:
base.place_on(leg, offset=coord.Coord(0, vert_offset))
else:
left_offset = 0
if side == SIDE_RIGHT:
left_offset = base.width()-leg.width()
base = shape.underneath(base, leg, left_offset=left_offset, overlap=1, collect=True)
new_corridor[coord.Coord(0, new_corridor.height()-1)] = "#"
corridor_offset = coord.Coord(x_offset, vert_offset - corr_offset + 1)
base.append(new_corridor, corridor_offset)
elif placement == PLACE_TOP:
if no_vert_offset:
base.place_on(leg)
else:
left_offset = 0
if side == SIDE_RIGHT:
left_offset = leg.width()-base.width()
# print "leg width (%s) - base width (%s) = left_offset (%s)" % (leg.width(), base.width(), left_offset)
base = shape.underneath(leg, base, left_offset=left_offset, overlap=1, collect=True)
new_corridor[POS_ORIGIN] = "#"
corridor_offset = coord.Coord(x_offset, 0)
base.append(new_corridor, corridor_offset)
if placement == PLACE_TOP:
start = coord.Coord(corridor_offset.x - 1, leg.height() - 1)
elif placement == PLACE_BOTTOM:
start = coord.Coord(corridor_offset.x - 1, vert_offset - corr_offset + 1)
base = BuilderCollection(base)
base.mark_leg(Leg(side, placement, leg=old_leg))
return base
def build_leg (rooms_tall=2, rooms_wide=2, width_left=12, width_right=12, make_corridor=True, do_cleanup=True):
"""
Create and return a "leg" to be used with add_leg.
:``rooms_tall``: How many rooms tall to make the leg. *Default 2*.
:``rooms_wide``: How many rooms wide to make the leg. *Max 2. Default 2*.
:``width_left``: The width of the leftmost rooms. *Default 12*.
:``width_right``: The width of the rightmost rooms. *Default 12*.
:``make_corridor``: Include a corridor when building. *Default True*.
:``do_cleanup``: Perform corridor, etc, clean-up when built. *Default True*.
"""
assert rooms_wide >= 1 and rooms_wide <= 2
assert rooms_tall >= 1
leg_rooms = collection.ShapeCollection()
if width_left == None:
width_left = random.choice(ROOM_WIDTH_LIST)
if width_right == None:
width_right = random.choice(ROOM_WIDTH_LIST)
heights = []
for r in xrange(rooms_tall):
heights.append(7)
for column in xrange(rooms_wide):
this_col = collection.ShapeCollection()
width = width_left
if column > 0:
width = width_right
height_list = heights[:]
if len(heights) > 1 and one_chance_in(5):
indices = range(len(height_list))
small = random.choice(indices)
indices.remove(small)
large = random.choice(indices)
height_list[small] -= 1
height_list[large] += 2
else:
large = random.choice(xrange(len(height_list)))
height_list[large] += 1
for row in xrange(rooms_tall):
new_room = room.Room(width=width,height=height_list[row]).as_shape()
# print "new_room height: %s, this_col height: %s" % (new_room.height(), this_col.height())
this_col = shape.underneath(new_room, this_col, offset_second=False, overlap=1, collect=True)
# print "leg_rooms width: %s, this_col width: %s" % (leg_rooms.width(), this_col.width())
leg_rooms = shape.adjoin(leg_rooms, this_col, overlap=-1, collect=True)
return leg_rooms
def build_L (base=None, min_rooms=0, rooms=2, rooms_wide=2):
"""
Modifies the results of base_builder() to result in an L shape in any
orientation.
:``base``: The base shape collection. If None, a new base will be built from
base_builder. *Default None*.
:``rooms``: How many rooms to build along the sides of the new axis. *Default 2*.
:``rooms_wide``: How many rooms wide to make the leg. *Max 2. Default 2*.
"""
side = random.choice([SIDE_LEFT, SIDE_RIGHT])
placement = random.choice([PLACE_TOP, PLACE_BOTTOM])
tlc = (side == SIDE_LEFT and placement == PLACE_TOP)
trc = (side == SIDE_RIGHT and placement == PLACE_TOP)
blc = (side == SIDE_LEFT and placement == PLACE_BOTTOM)
brc = (side == SIDE_RIGHT and placement == PLACE_BOTTOM)
if tlc or blc: # left side
tlw = random.choice(ROOM_WIDTH_LIST)
blw = random.choice(ROOM_WIDTH_LIST)
trw = None
brw = None
if tlc:
if blw < tlw:
blw = tlw
left = tlw
else:
if tlw < blw:
tlw = blw
left = blw
right = None
else: # right side
tlw = None
blw = None
trw = random.choice(ROOM_WIDTH_LIST)
brw = random.choice(ROOM_WIDTH_LIST)
if trc:
if brw < trw:
brw = trw
right = trw
else:
if trw < brw:
trw = brw
right = brw
left = None
tht = None
bht = None
corr_offset = random_room_height()
if placement == PLACE_TOP:
tht = corr_offset
else:
bht = corr_offset
if base is None:
base = base_builder(min_rooms=min_rooms-4, top_left=tlw, top_right=trw, bottom_left=blw, bottom_right=brw, tl_corr=tlc, tr_corr=trc, bl_corr=blc, br_corr=brc, top_height=tht, bottom_height=bht)
# Draw the new rooms.
new_rooms = build_leg(rooms, rooms_wide, width_left=left, width_right=right)
offset = None
if side == SIDE_RIGHT:
offset = base.width() - right - 1
base = attach_leg(base, new_rooms, side=side, placement=placement, corr_offset=corr_offset, x_offset=offset)
return base
def build_Z (base=None, min_rooms=0):
"""
Modifies the results of base_builder() to result in an L shape in any
orientation. Not implemented.
Currently just returns the base builder results.
:``base``: The base shape collection. If None, a new base will be built from
base_builder. *Default None*.
"""
if base is None:
base = base_builder(min_rooms=min_rooms)
return base
def build_N (base=None, min_rooms=0):
"""
Modifies the results of base_builder() to result in an L shape in any
orientation. Not implemented.
Currently just returns the base builder results.
:``base``: The base shape collection. If None, a new base will be built from
base_builder. *Default None*.
"""
if base is None:
base = base_builder(min_rooms=min_rooms)
return base
def build_O (base=None, min_rooms=0):
"""
Modifies the results of base_builder() to result in an L shape in any
orientation. Not implemented.
Currently just returns the base builder results.
:``base``: The base shape collection. If None, a new base will be built from
base_builder. *Default None*.
"""
if base is None:
base = base_builder(min_rooms=min_rooms)
return base
def build_H (base=None, min_rooms=0):
"""
Modifies the results of base_builder() to result in an H-shaped layout.
:``base``: The base shape collection. If None, a new base will be built from
base_builder. *Default None*.
"""
outer = random.choice(ROOM_WIDTH_LIST) # outer leg
inner = random.choice(ROOM_WIDTH_LIST) # inner leg
tht = random_room_height()
bht = random_room_height()
if base is None:
base = base_builder(min_rooms=min_rooms-16, top_left=outer, top_right=outer, bottom_left=outer, bottom_right=outer,
tl_corr=True, tr_corr=True, bl_corr=True, br_corr=True, top_height=tht, bottom_height=bht)
base = build_U(base, min_rooms=min_rooms, placement=PLACE_TOP, outer=outer, inner=inner, room_height=tht)
base = build_U(base, min_rooms=min_rooms, placement=PLACE_BOTTOM, outer=outer, inner=inner, room_height=bht)
return base
def build_U (base=None, min_rooms=0, rooms=2, rooms_wide=2, placement=None, outer=None, inner=None, room_height=None):
"""
Modifies the results of base_builder() to result in an U-shaped layout.
:``base``: The base shape collection. If None, a new base will be built from
base_builder. *Default None*.
:``rooms``: How many rooms to build along the sides of the new axis. *Default 2*.
:``rooms_wide``: How many rooms wide to make the leg. *Max 2. Default 2*.
:``placement``: The vertical orientation of the manor legs. Random, if none. *Default None*.
:``inner``: The width of the inner manor legs' rooms. Random, if none. *Default None*.
:``outer``: The width of the outer manor legs' rooms. Random, if none. *Default None*.
:``room_height``: The height of the base manor rooms on the side facing the legs.
Random, if none. *Default None*.
"""
if placement is None:
placement = random.choice([PLACE_TOP, PLACE_BOTTOM])
if outer == None:
outer = random.choice(ROOM_WIDTH_LIST) # outer leg
if inner == None:
inner = random.choice(ROOM_WIDTH_LIST) # inner leg
tht = None
bht = None
if room_height == None:
room_height = random_room_height()
if placement == PLACE_TOP:
tht = room_height
else:
bht = room_height
if base is None:
tlc = (placement == PLACE_TOP)
trc = tlc
blc = not tlc
brc = blc
noleg = random.choice(ROOM_WIDTH_LIST) # opposite side
if noleg < outer:
noleg = outer
if tlc: # top
tlw = outer
trw = outer
blw = noleg
brw = noleg
else: # bottom
tlw = noleg
trw = noleg
blw = outer
brw = outer
base = base_builder(min_rooms=min_rooms-8, top_left=tlw, top_right=trw, bottom_left=blw, bottom_right=brw, tl_corr=tlc, tr_corr=trc, bl_corr=blc, br_corr=brc, top_height=tht, bottom_height=bht)
leg_width = outer + inner + 1
distance = base.width() - 2 * leg_width
print "base width=%s, outer=%s, inner=%s, leg width=%s, distance=%s" % (base.width(), outer, inner, leg_width, base.width() - 2*leg_width)
if distance < 5 and distance != -1:
if distance % 2 == 0 or base.width() % 2 == 0:
if distance < 0:
inner -= 2 + (-distance)
inner -= 2
else:
inner = base.width()/2 - outer
leg_width = outer + inner + 1
distance = base.width() - 2 * leg_width
print "base width=%s, outer=%s, inner=%s, leg width=%s, distance=%s" % (base.width(), outer, inner, leg_width, base.width() - 2*leg_width)
new_rooms_L = build_leg(rooms, rooms_wide, width_left=outer, width_right=inner)
new_rooms_R = build_leg(rooms, rooms_wide, width_left=inner, width_right=outer)
base = attach_leg(base, new_rooms_L, side=SIDE_LEFT, placement=placement, corr_offset=room_height)
base = attach_leg(base, new_rooms_R, side=SIDE_RIGHT, placement=placement, corr_offset=room_height, x_offset=base.width() - outer - 1)
return base
def builder_by_type (type = None, min_rooms=0):
"""
Creates and returns a manor of a given layout type.
:``type``: The layout type in a character representation. *Default None*.
``B``: base manor.
``L``: L-shaped layout.
``U``: L-shaped layout.
``H``: L-shaped layout.
``None``: random layout.
"""
if type == None:
return build_random(min_rooms=min_rooms)
if type == 'B':
return base_builder(min_rooms=min_rooms)
if type == 'L':
return build_L(min_rooms=min_rooms)
if type == 'U':
return build_U(min_rooms=min_rooms)
if type == 'H':
return build_H(min_rooms=min_rooms)
# The other types don't exist yet and fall back on the base_builder.
if type == 'O':
return build_O(min_rooms=min_rooms)
if type == 'N':
return build_N(min_rooms=min_rooms)
if type == 'Z':
return build_Z(min_rooms=min_rooms)
else:
return base_builder(min_rooms=min_rooms)
def build_random (base=None, min_rooms=0):
"""
Creates and returns a manor of a random layout type.
:``base``: The base shape collection. If None, a new base will be built from
base_builder. *Default None*.
"""
l_list = [Z_LAYOUT, N_LAYOUT, O_LAYOUT, L_LAYOUT, U_LAYOUT, H_LAYOUT]
layout = random.choice(l_list)
if min_rooms > 25:
layout = H_LAYOUT
elif min_rooms > 20:
layout = random.choice(l_list[-2:])
elif min_rooms > 15:
layout = random.choice(l_list[-3:])
if layout == L_LAYOUT:
return build_L(base, min_rooms=min_rooms)
elif layout == Z_LAYOUT:
return build_Z(base, min_rooms=min_rooms)
elif layout == N_LAYOUT:
return build_N(base, min_rooms=min_rooms)
elif layout == H_LAYOUT:
return build_H(base, min_rooms=min_rooms)
elif layout == O_LAYOUT:
return build_O(base, min_rooms=min_rooms)
elif layout == U_LAYOUT:
return build_U(base, min_rooms=min_rooms)
else:
return base_builder(min_rooms=min_rooms)
```
#### File: murderrl/builder/manor.py
```python
import random, builder, room
from interface.features import *
from library.coord import *
from library.random_util import *
from library.feature import *
from library import pathfind
class ManorCollection (builder.BuilderCollection):
def __init__ (self, c=[]):
builder.BuilderCollection.__init__(self, c)
def print_corridors (self):
"""
Debugging method. Iterates over all corridors and prints the location
and size of each corridor within the manor.
"""
for idx in self.corridors:
print "Corridor %s: %s" % (idx, self.corridor(idx))
def get_corridor_index (self, pos, single = True):
"""
Returns the index of the corridor a coordinate belongs to, or None
if it doesn't lie in any corridor.
If it's part of the overlap region, the first index is returned.
:``pos``: A coord. *Required*
:``single``: If true, returns the first index encountered.
Otherwise, a list containing all matching indices. *Default true*.
"""
list = []
for idx in self.corridors:
corr = self.corridor(idx)
c = corr.pos()
r = corr.size() - 1
if (pos.x >= c.x and pos.x <= c.x + r.x
and pos.y >= c.y and pos.y <= c.y + r.y):
if single:
return idx
list.append(idx)
if single:
return None
return list
def get_corridor_indices (self, pos):
"""
Returns a list of indices of all corridors a coordinate belongs to,
or None if it's outside the manor.
:``pos``: A coord. *Required*.
"""
return self.get_corridor_index(pos, False)
def print_rooms (self):
"""
Debugging method. Iterates over all rooms and prints the location
and size of each room within the manor.
"""
for idx in self.rooms:
print "Room %s: %s" % (idx, self.get_room(idx))
def get_room_index (self, pos, single = True):
"""
Returns the index of the room a coordinate belongs to, or None if
it's outside the manor.
If it's part of the overlap region, the first index is returned.
:``pos``: A coord. *Required*.
:``single``: If true, returns the first index encountered.
Otherwise, a list containing all matching indices. *Default true*.
"""
list = []
for idx in self.rooms:
curr = self.get_room(idx)
start = curr.pos()
end = start + curr.size() - 1
if (pos.x >= start.x and pos.x <= end.x
and pos.y >= start.y and pos.y <= end.y):
if single:
return idx
list.append(idx)
if single:
return None
return list
def get_room_indices (self, pos):
"""
Returns a list of indices of all rooms a coordinate belongs to,
or None if it's outside the manor.
:``pos``: A coord. *Required*.
"""
return self.get_room_index(pos, False)
def get_room_corridor_indices (self, pos):
"""
Returns a list of indices of all rooms and corridors a coordinate belongs to,
or None if it's outside the manor.
:``pos``: A coord. *Required*.
"""
rooms = self.get_room_index(pos, False)
corrs = self.get_corridor_index(pos, False)
for c in corrs:
rooms.append(c)
return rooms
def get_room_corridors (self):
"""
Get a combined list including both room and corridor indices.
"""
# I might be overly cautious here, but it's so easy to overwrite
# existing lists by setting references without meaning to. (jpeg)
room_corridors = []
for r in self.rooms:
room_corridors.append(r)
for c in self.corridors:
room_corridors.append(c)
room_corridors.sort()
return room_corridors
def get_corridor_name (self, idx):
assert(idx in self.corridors)
if idx == self.main_corridor:
return "main corridor"
corr = self.corridor(idx)
start = corr.pos()
stop = start + coord.Coord(corr.width(), corr.height())
m_end = self.size()
print "corridor %s" % idx
print "start=(%s), stop=(%s)" % (start, stop)
print "manor size=(%s), 1/4 -> (%s), 3/4 -> (%s)" % (m_end, coord.Coord(m_end.x/4, m_end.y/4), coord.Coord(3*m_end.x/4, 3*m_end.y/4))
dir_horizontal = ""
if start.y < max(5, m_end.y/4):
dir_horizontal = "north"
elif stop.y > min(3*m_end.y/4, m_end.y - 5):
dir_horizontal = "south"
else:
dir_horizontal = ""
if start.x < max(5, m_end.x/4):
dir_vertical = "west"
elif stop.x > min(3*m_end.x/4, m_end.x - 5):
dir_vertical = "east"
else:
dir_vertical = ""
# only one other corridor
if len(self.corridors) == 2:
if dir_horizontal != "" and dir_vertical != "":
if coinflip():
dir_horizontal = ""
else:
dir_vertical = ""
# two other corridors
elif len(self.corridors) == 3:
if corr.width() == 1: # vertical
dir_horizontal = ""
else:
dir_vertical = ""
# else just combine both values
if dir_horizontal != "" or dir_vertical != "":
return "%s%s corridor" % (dir_horizontal, dir_vertical)
# If none of these match, just return the number.
return "corridor %s" % idx
def init_room_properties (self):
"""
Initialises a list of RoomProp objects for each room and corridor
in the manor.
"""
self.room_props = []
for r in self.get_room_corridors():
if r in self.rooms:
curr = self.get_room(r)
start = curr.pos()
size = curr.size()
width = size.x
height = size.y
room_prop = room.RoomProps("room %s" % r, start, width, height)
else:
corr = self.corridor(r)
start = corr.pos()
width = corr.width()
height = corr.height()
name = self.get_corridor_name(r)
room_prop = room.RoomProps(name, start, width, height)
room_prop.mark_as_corridor()
self.room_props.append(room_prop)
def get_roomprop (self, idx):
"""
Returns a RoomProp object for a given room index.
:``idx``: A room or corridor index. *Required*.
"""
if not self.room_props:
return None
assert(idx < len(self.room_props))
return self.room_props[idx]
def add_features (self):
# Translate rooms and corridors into wall and floor features.
self.init_features()
# Add doors along corridors.
self.add_doors()
self.maybe_remove_bonus_doors()
# Add windows.
self.add_windows()
# Add doors to rooms still missing them.
self.add_missing_doors()
def init_features (self):
"""
Initialise the manor's feature grid, placing floor and walls as
defined by the rooms/corridor layout.
"""
self.init_room_properties()
self.features = FeatureGrid(self.size().x, self.size().y)
print "Manor size: %s" % self.size()
print "Feature size: %s" % self.features.size()
# Iterate over all rooms and corridors, and mark positions within
# them as floor, and their boundaries as walls.
for r in self.get_room_corridors():
is_corridor = False # The "room" is actually a corridor.
if r in self.rooms:
curr = self.get_room(r)
else:
is_corridor = True
curr = self.corridor(r)
start = curr.pos()
stop = curr.pos() + curr.size()
# Note: Currently, only the main corridor is ever horizontal
# but that might change in the future.
horizontal = False # If a corridor, it's a horizontal one.
# Debugging output, and setting horizontal.
if is_corridor:
if curr.height() == 1:
horizontal = True
direction = "horizontal"
else:
direction = "vertical"
# print "Corridor %s: start=%s, stop=%s (%s)" % (r, start, stop, direction)
# Iterate over all coordinates within the room.
for pos in coord.RectangleIterator(start, stop):
# If we've reached the manor boundary, this is a wall.
if (pos.x == 0 or pos.x == self.size().x -1
or pos.y == 0 or pos.y == self.size().y - 1):
self.set_feature(pos, WALL)
# Corridors overwrite walls previously set by rooms.
elif is_corridor:
self.set_feature(pos, FLOOR)
# print pos
# Depending on the corridor orientation, mark the
# adjacent non-corridor squares as walls.
adjacent = []
if horizontal:
adjacent = (DIR_NORTH, DIR_SOUTH)
else:
adjacent = (DIR_WEST, DIR_EAST)
for dir in adjacent:
pos2 = pos + dir
# self.set_feature(pos2, WALL)
if pos2 <= 0 or pos2 >= self.size():
continue
corridx = self.get_corridor_indices(pos2)
# print "pos2: %s -> corridors=%s" % (pos2, corridx),
if r in corridx:
corridx.remove(r)
# print corridx
# else:
# print
if len(corridx) == 0:
self.set_feature(pos2, WALL)
# The room boundary is always a wall.
elif (pos.x == start.x or pos.x == stop.x - 1
or pos.y == start.y or pos.y == stop.y - 1):
self.set_feature(pos, WALL)
# Otherwise, we are inside the room.
# Mark as floor but don't overwrite previously placed walls.
elif self.get_feature(pos) != WALL:
self.set_feature(pos, FLOOR)
def get_feature (self, pos):
"""
Returns the feature for the given position.
:``pos``: A coordinate within the manor. *Required*.
"""
if pos < DIR_NOWHERE or pos >= self.size():
print "Invalid coord %s in manor of size %s" % (pos, self.size())
return NOTHING
return self.features.__getitem__(pos)
def set_feature (self, pos, feat):
"""
Sets the feature at a given position of the feature grid.
:``pos``: A coordinate within the manor. *Required*.
:``feat``: The feature to set. *Required*.
"""
if pos < DIR_NOWHERE or pos >= self.size():
print "Invalid coord %s in manor of size %s" % (pos, self.size())
return NOTHING
return self.features.__setitem__(pos, feat)
def add_doors_along_corridor (self, start, stop, offset = DIR_NOWHERE):
"""
Walks along a corridor, and for each adjacent room picks a random
wall spot to turn into a door.
:``start``: The corridor's starting position. *Required*
:``stop``: The corridor's end position. *Required*.
:``offset``: A coordinate specifying how the door position needs to be shifted. *Default (0,0)*.
"""
# print "add_doors_along_corridors(start=(%s), stop=(%s), offset=(%s))" % (start, stop, offset)
assert stop > start
candidates = [] # All valid door spots for the current room.
old_room = -1 # The index of the most recent room seen.
for p in coord.RectangleIterator(start, stop + 1):
pos = p + offset
if (pos.x < 2 or pos.x >= self.size().x - 1
or pos.y < 2 or pos.y >= self.size().y - 1):
continue
if self.get_feature(pos) != WALL:
continue
# If a room is adjacent to both the main and a leg corridor,
# both of them may place doors. This is okay, but they should
# not be adjacent to each other.
has_adj_door = False
for adj in coord.AdjacencyIterator(pos):
if feature_is_door(self.get_feature(adj)):
has_adj_door = True
if has_adj_door:
continue
rooms = self.get_room_indices(pos)
corrs = self.get_corridor_indices(pos - offset)
# print "pos: (%s), rooms: %s, corrs: %s" % (pos, rooms, corrs)
# Make sure there's only exactly one room for this wall.
# There also may be no other corridor except this one.
if len(rooms) == 1 and len(corrs) == 1:
# print "(%s, %s) -> %s" % (pos.x, pos.y, rooms)
curr_room = rooms[0]
if old_room != curr_room:
# We've reached another room. Time to pick a door spot for the old room.
if len(candidates):
rand_coord = random.choice(candidates)
# print "==> pick %s" % rand_coord
self.set_feature(rand_coord, CLOSED_DOOR)
self.room_props[old_room].add_adjoining_room(corrs[0])
self.room_props[corrs[0]].add_adjoining_room(old_room)
self.doors.append(rand_coord)
candidates = []
old_room = curr_room
thisroom = self.get_room(curr_room)
startx = thisroom.pos().x
starty = thisroom.pos().y
stopx = startx + thisroom.size().x - 1
stopy = starty + thisroom.size().y - 1
if (pos.x == startx or pos.x == stopx) and (pos.y == starty or pos.y == stopy):
pass
# print "pos: (%s, %s), room start: (%s, %s), room end: (%s, %s)" % (pos.x, pos.y, startx, starty, stopx, stopy)
else:
candidates.append(pos)
# The corridor has reached an end. Pick a door spot for the last room seen.
if len(candidates):
rand_coord = random.choice(candidates)
# print "==> pick %s" % rand_coord
self.set_feature(rand_coord, CLOSED_DOOR)
self.doors.append(rand_coord)
corrs = self.get_corridor_indices(rand_coord - offset)
if len(corrs) > 0:
self.room_props[old_room].add_adjoining_room(corrs[0])
self.room_props[corrs[0]].add_adjoining_room(old_room)
else:
print "no corridor matching doorpos %s of room %s" % (rand_coord, old_room)
def add_doors (self):
"""
For each corridor, adds doors to adjacent rooms.
"""
# print "Adding doors..."
self.doors = []
corr = self.corridors
for c in corr:
candidates = []
pos = self.corridor(c).pos()
w = self.corridor(c).width()
h = self.corridor(c).height()
# print "Corridor %s: %s" % (c, self.corridor(c))
# Depending on the corridor's orientation, check the parallel runs
# to the left and right, or above and below the corridor.
# Walls to the left and top of a corridor position are not
# considered part of the corridor, so we need to use a shim
# to add doors on those sides as well.
if w > 1: # vertical corridor
self.add_doors_along_corridor(coord.Coord(pos.x, pos.y), coord.Coord(pos.x + w, pos.y), DIR_NORTH)
self.add_doors_along_corridor(coord.Coord(pos.x, pos.y), coord.Coord(pos.x + w, pos.y + h), DIR_SOUTH)
else: # horizontal corridor
self.add_doors_along_corridor(coord.Coord(pos.x, pos.y), coord.Coord(pos.x, pos.y + h), DIR_WEST)
self.add_doors_along_corridor(coord.Coord(pos.x, pos.y), coord.Coord(pos.x + w, pos.y + h), DIR_EAST)
def maybe_remove_bonus_doors (self):
"""
For some rooms with exits to more than one corridor, possibly remove
one of these exits.
"""
for r in self.rooms:
if coinflip():
continue
rp = self.room_props[r]
corrs = []
for c in rp.adj_rooms:
if c in self.corridors:
corrs.append(c)
if len(corrs) < 2:
continue
# Randomly pick a door adjacent to one of the corridors.
rm = self.get_room(r)
doors = []
for pos in room.RoomWallIterator(rm.pos(), rm.size()):
if feature_is_door(self.get_feature(pos)):
doors.append(pos)
door_pos = random.choice(doors)
dirs = [DIR_NORTH, DIR_SOUTH, DIR_EAST, DIR_WEST]
corr = None
for d in dirs:
corr = self.get_corridor_index(door_pos + d)
if corr != None:
break
if corr == None:
continue
print "Change door pos (%s) to a wall" % door_pos
self.features.__setitem__(door_pos, WALL)
# Update the adjoining rooms of both room and corridor.
rp.adj_rooms.remove(corr)
self.room_props[corr].adj_rooms.remove(r)
def pick_door_along_wall (self, start, stop, offset):
"""
Picks a door spot for a wall specified by two coordinates.
:``start``: The wall's starting position. *Required*
:``stop``: The wall's end position. *Required*.
:``offset_check``: A Coord offset to check for adjacent non-walls. *Required*.
"""
candidates = []
for pos in coord.RectangleIterator(start, stop + 1):
if (self.get_feature(pos) != WALL
or self.get_feature(pos + offset) != FLOOR):
continue
# Make sure this wall connects to another room.
rooms = self.get_room_indices(pos)
if len(rooms) > 1:
candidates.append(pos)
if len(candidates) == 0:
return None
door_pos = random.choice(candidates)
rooms = self.get_room_indices(door_pos)
print "door_pos (%s) of rooms %s" % (door_pos, rooms)
for i1 in xrange(len(rooms)):
r1 = rooms[i1]
for i2 in xrange(i1+1, len(rooms)):
r2 = rooms[i2]
rp1 = self.room_props[r1]
rp2 = self.room_props[r2]
print "connect rooms %s and %s" % (rp1.name, rp2.name)
rp1.add_adjoining_room(r2)
rp2.add_adjoining_room(r1)
return door_pos
def add_window (self, start, stop, offset_check = DIR_NOWHERE):
"""
Adds windows to the wall specified by two coordinates.
:``start``: The wall's starting position. *Required*
:``stop``: The wall's end position. *Required*.
:``offset_check``: A Coord offset to check for empty space. *Default (0,0)*.
"""
if start.x == stop.x:
window = WINDOW_V
elif start.y == stop.y:
window = WINDOW_H
else:
return
# If we got an offset passed in, we need to check whether adjacent
# positions are really empty, so the window doesn't look out on
# a wall or something.
# NOTE: Naturally, should we decide to fill the nothingness with
# a garden of some sort, the whole routine will have to be
# changed. (jpeg)
if offset_check != DIR_NOWHERE:
# print "offset: %s, start=%s, stop=%s" % (offset_check, start, stop)
seen_nothing = False
for pos in coord.RectangleIterator(start, stop + 1):
adj_pos = pos + offset_check
if self.get_feature(adj_pos) == NOTHING:
seen_nothing = True
else:
if seen_nothing: # start already handled
if window == WINDOW_H:
stop.x = pos.x - 1
else:
stop.y = pos.y - 1
break
else:
if window == WINDOW_H:
start.x = pos.x + 1
else:
start.y = pos.y + 1
# print "new start=%s, stop=%s" % (start, stop)
full_window = False
if start.x == stop.x:
length = stop.y - start.y + 1
elif start.y == stop.y:
length = stop.x - start.x + 1
# else:
# return
# print "draw window for wall of length %s at (%s, %s)" % (length, start, stop)
if length < 5 or (length < 7 and one_chance_in(3)):
full_window = True
elif length >= 6 and one_chance_in(3):
# For really large windows, make them a bit smaller and
# move them into the centre.
full_window = True
if window == WINDOW_V:
start.y += 1
stop.y -= 1
else:
start.x += 1
stop.x -= 1
else:
# Split larger windows into two smaller ones.
midpost = length/2
width = 1
if length == 5:
midpost += 1
width = 0
elif length%2 == 1:
# midpost -= 1
width = 2
# For full windows, there's a chance of making them smaller
# and placing them slightly off-center.
if full_window and one_chance_in(3):
shift = random.randint(1, max(1,length/3))
if window == WINDOW_H:
if coinflip():
start.x += shift
else:
stop.x -= shift
else:
if coinflip():
start.y += shift
else:
stop.y -= shift
count = 0
for pos in coord.RectangleIterator(start, stop + 1):
count += 1
if full_window or count < midpost or count > midpost + width:
self.set_feature(pos, window)
def add_windows (self):
"""
Adds windows to the outer walls of the manor.
"""
for r in self.rooms:
curr = self.get_room(r)
start = curr.pos()
stop = start + curr.size()
print "Room %s: %s" % (r, curr)
# left-side vertical windows
if start.x == 0:
self.add_window(coord.Coord(start.x, start.y + 2), coord.Coord(start.x, stop.y - 3))
self.room_props[r].add_window(DIR_WEST)
elif (self.get_feature(coord.Coord(start.x-1, start.y+1)) == NOTHING
or self.get_feature(coord.Coord(start.x-1, stop.y-1)) == NOTHING):
self.add_window(coord.Coord(start.x, start.y + 2), coord.Coord(start.x, stop.y - 3), DIR_WEST)
self.room_props[r].add_window(DIR_WEST)
# right-side vertical windows
if stop.x == self.size().x:
self.add_window(coord.Coord(stop.x - 1, start.y + 2), coord.Coord(stop.x - 1, stop.y - 3))
self.room_props[r].add_window(DIR_EAST)
elif (self.get_feature(coord.Coord(stop.x+1, start.y+1)) == NOTHING
or self.get_feature(coord.Coord(stop.x+1, stop.y-1)) == NOTHING):
self.add_window(coord.Coord(stop.x - 1, start.y + 2), coord.Coord(stop.x - 1, stop.y - 3), DIR_EAST)
self.room_props[r].add_window(DIR_EAST)
# top horizontal windows
if start.y == 0:
self.add_window(coord.Coord(start.x + 2, start.y), coord.Coord(stop.x - 3, start.y))
self.room_props[r].add_window(DIR_NORTH)
elif (self.get_feature(coord.Coord(start.x+1, start.y-1)) == NOTHING
or self.get_feature(coord.Coord(stop.x-1, start.y-1)) == NOTHING):
self.add_window(coord.Coord(start.x + 2, start.y), coord.Coord(stop.x - 3, start.y), DIR_NORTH)
self.room_props[r].add_window(DIR_NORTH)
# bottom horizontal windows
if stop.y == self.size().y:
self.add_window(coord.Coord(start.x + 2, stop.y - 1), coord.Coord(stop.x - 3, stop.y - 1))
self.room_props[r].add_window(DIR_SOUTH)
elif (self.get_feature(coord.Coord(start.x+1, stop.y+1)) == NOTHING
or self.get_feature(coord.Coord(stop.x-1, stop.y+1)) == NOTHING):
self.add_window(coord.Coord(start.x + 2, stop.y - 1), coord.Coord(stop.x - 3, stop.y - 1), DIR_SOUTH)
self.room_props[r].add_window(DIR_SOUTH)
def add_missing_doors (self):
"""
Add doors to rooms that still lack them.
"""
door_rooms = []
for d in self.doors:
rooms = self.get_room_index(d, False)
if len(rooms) > 0:
for r in rooms:
if r not in door_rooms:
door_rooms.append(r)
rooms = self.rooms[:]
random.shuffle(rooms)
for r in rooms:
if r in door_rooms:
continue
curr = self.get_room(r)
start = curr.pos()
stop = start + curr.size()
print "Room %s: %s" % (r, curr)
door_candidates = []
rp = self.room_props[r]
door_dirs = [DIR_NORTH, DIR_SOUTH, DIR_EAST, DIR_WEST]
for windir in rp.windows:
door_dirs.remove(windir)
for dd in door_dirs:
if dd == DIR_WEST:
dpos = self.pick_door_along_wall(coord.Coord(start.x, start.y + 1), coord.Coord(start.x, stop.y - 2), DIR_WEST)
elif dd == DIR_EAST:
dpos = self.pick_door_along_wall(coord.Coord(stop.x - 1, start.y + 1), coord.Coord(stop.x - 1, stop.y - 2), DIR_EAST)
elif dd == DIR_NORTH:
dpos = self.pick_door_along_wall(coord.Coord(start.x + 1, start.y), coord.Coord(stop.x - 2, start.y), DIR_NORTH)
elif dd == DIR_SOUTH:
dpos = self.pick_door_along_wall(coord.Coord(start.x + 1, stop.y - 1), coord.Coord(stop.x - 2, stop.y - 1), DIR_SOUTH)
if dpos != None:
door_candidates.append(dpos)
# Adding doors to all applicable walls guarantees that
# all rooms are fully connected, but does mean that some
# rooms get 2-3 doors.
for d in door_candidates:
print "==> add door at pos %s" % d
self.set_feature(d, OPEN_DOOR)
self.doors.append(d)
# Update door-less rooms.
other_rooms = self.get_room_indices(d)
for r in other_rooms:
if r not in door_rooms:
door_rooms.append(r)
def assign_adjacent_rooms (self, rid):
"""
Tries to fill the neighbour rooms of a given room with the same
section type (utility or domestic), so matching rooms are neatly
grouped together.
:``rid``: The room id of a room that's already got a type assigned. *Required*.
"""
rp = self.room_props[rid]
utility = (rp.section == "utility")
# print "Room %s of type %s" % (rp.name, rp.section)
for adj in rp.adj_rooms:
if adj in self.get_corridors():
# print "adjacent room %s is corridor" % adj
continue
arp = self.room_props[adj]
if arp.has_data:
# print "adjacent room %s already filled" % arp.name
continue
print "fill room %s from database" % adj
if arp.fill_from_database(utility):
self.assign_adjacent_rooms(adj)
def add_entrance_portal (self):
"""
In the entrance hall, replace one set of windows with entrance doors.
"""
rm = self.get_room(self.entrance_hall)
candidates = []
found_window = False
for pos in room.RoomWallIterator(rm.pos(), rm.size()):
if feature_is_window(self.get_feature(pos)):
if not found_window:
candidates.append(pos)
found_window = True
else:
found_window = False
if len(candidates) == 0:
return
door_pos = random.choice(candidates)
stop = rm.pos() + rm.size() - 1
if self.get_feature(door_pos) == WINDOW_V:
corner = coord.Coord(door_pos.x, stop.y)
else:
corner = coord.Coord(stop.x, door_pos.y)
for pos in coord.RectangleIterator(door_pos, corner + 1):
if feature_is_window(self.get_feature(pos)):
self.features.__setitem__(pos, PORTAL)
else:
break
def init_room_names (self, list = None):
"""
Sets room names for all rooms within the manor.
:``list``: List of people's names that need a bedroom. *Default None*.
If this list is non-empty and as long as there are enough
available rooms, tries to assign bedrooms, in order.
"""
owner_list = []
if list != None:
owner_list = list
# There should be at least 7 rooms available to the public.
# This is only really a problem for smallish layouts, if there
# are many suspects. (jpeg)
max_no_bedrooms = len(self.rooms) - 7
count_bedrooms = 0
print "-------\nallow for max. %s bedrooms" % max_no_bedrooms
corrs = self.corridors[:]
if len(corrs) > 1:
corrs.remove(self.main_corridor)
random.shuffle(corrs)
utility = True
for c in corrs:
if utility:
section = "utility"
elif len(owner_list) > 0:
section = "bedrooms"
else:
section = "domestic"
print "-------\nCorridor %s is marked as %s" % (c, section)
corrprop = self.room_props[c]
rooms = corrprop.adj_rooms[:]
random.shuffle(rooms)
for r in rooms:
if r in self.corridors:
continue
rp = self.room_props[r]
if rp.has_data:
continue
if (not utility and len(owner_list) > 0
and rp.is_good_bedroom()):
owner = owner_list[0]
owner_list.remove(owner)
rp.make_bedroom(owner)
count_bedrooms += 1
self.assign_adjacent_rooms(r)
continue
if rp.fill_from_database(utility):
self.assign_adjacent_rooms(r)
utility = False
# One of the rooms off the main corridor is the entrance hall.
c = self.main_corridor
corrprop = self.room_props[c]
e_hall_candidates = []
for r in corrprop.adj_rooms:
if r in self.corridors:
continue
rp = self.room_props[r]
if rp.has_data or len(rp.windows) == 0:
continue
e_hall_candidates.append(r)
if len(e_hall_candidates) == 0:
self.entrance_hall = 0
else:
self.entrance_hall = random.choice(e_hall_candidates)
self.add_entrance_portal()
rp = self.room_props[self.entrance_hall]
rp.name = "entrance hall"
rp.has_data = True
print "-------\nentrance hall: room %s" % self.entrance_hall
if len(owner_list) > 0:
print "-------\nremaining rooms - allow for max. %s bedrooms" % (max_no_bedrooms - count_bedrooms)
rooms = self.rooms[:]
random.shuffle(rooms)
for r in rooms:
rp = self.room_props[r]
if rp.has_data:
continue
if len(owner_list) > 0 and count_bedrooms < max_no_bedrooms:
max_size=70
if count_bedrooms < 2:
max_size=None
if rp.is_good_bedroom(max_size=max_size):
owner = owner_list[0]
owner_list.remove(owner)
rp.make_bedroom(owner)
count_bedrooms += 1
self.assign_adjacent_rooms(r)
print "-------\nassign remaining rooms"
rooms = self.rooms[:]
random.shuffle(rooms)
for r in rooms:
rp = self.room_props[r]
if rp.has_data:
continue
if len(owner_list) > 0 and count_bedrooms < max_no_bedrooms:
max_size=60
if count_bedrooms < 5:
max_size=70
if rp.is_good_bedroom(check_windows=False, max_size=max_size):
owner = owner_list[0]
owner_list.remove(owner)
rp.make_bedroom(owner)
count_bedrooms += 1
self.assign_adjacent_rooms(r)
continue
if rp.fill_from_database():
self.assign_adjacent_rooms(r)
self.update_adjoining_rooms()
# Now the room types have been assigned, add the furniture.
self.add_furniture()
def update_adjoining_rooms (self):
"""
For each room or corridor within the manor, traverses the list of
adjoining rooms/corridors and adds their names to the list of
adjoining room names.
"""
for r in self.get_room_corridors():
rp = self.room_props[r]
for adjr in rp.adj_rooms:
rp2 = self.room_props[adjr]
name = rp2.room_name(True)
rp.add_adjoining_room_name(name)
def add_furniture (self):
"""
Places furniture within the manor. Specialcases bedrooms and
otherwise uses the database definitions.
"""
for r in self.rooms:
rp = self.room_props[r]
bedcount = len(rp.owners)
if bedcount > 0:
self.add_bedroom_furniture(r, bedcount)
else:
furniture = rp.want_feats
if len(furniture) > 0:
self.add_room_furniture(r, furniture)
def get_pos_list_within_room (self, r):
"""
Returns a list of floor coordinates within a room that are not
directly adjacent to a door or window.
:``r``: The room id. *Required*.
"""
rm = self.get_room(r)
start = rm.pos() + 1
stop = rm.pos() + rm.size() - 1
candidates = []
for pos in coord.RectangleIterator(start, stop):
if self.get_feature(pos) != FLOOR:
continue
# Never block windows or doors with furniture.
allowed = True
for adj in coord.AdjacencyIterator(pos):
feat = self.get_feature(adj)
if feature_is_door(feat) or feature_is_window(feat):
allowed = False
break
if not allowed:
continue
# It's a valid position.
candidates.append(pos)
return candidates
def get_nearby_interesting_feature (self, curr):
"""
Returns a list of interesting features nearby a given position.
:``curr``: The current position in the manor. *Required*.
"""
curr_room = self.get_room_index(curr)
adj_features = []
features = []
for pos in coord.RectangleIterator(curr - 2, curr + 3):
if pos == curr:
continue
if pos.x < 0 or pos.x >= self.size().x or pos.y < 0 or pos.y >= self.size().y:
continue
nearby_feat = self.get_feature(pos)
# Skip boring features.
if nearby_feat == NOTHING or nearby_feat == WALL:
continue
# Make sure we stay within the same room.
if self.get_room_index(pos) != curr_room:
continue
if feature_is_door(nearby_feat) or not nearby_feat.traversable():
if (pos.x >= curr.x - 1 and pos.y >= curr.y - 1
and pos.x <= curr.x + 1 and pos.y <= curr.y + 1):
adj_features.append(nearby_feat)
else:
features.append(nearby_feat)
# If there are any directly adjacent features, return those.
if len(adj_features) > 0:
return adj_features
# ... otherwise, the list of nearby features.
return features
def add_bedroom_furniture (self, r, bedcount=1):
"""
Places additional furniture in bedrooms.
:``r``: The room id. *Required*.
:``bedcount``: The number of beds placed. *Default 1*.
"""
rp = self.room_props[r]
# First get a list of eligible positions within the room.
candidates = self.get_pos_list_within_room(r)
if len(candidates) == 0:
return
# First, place a (double) bed, then additional furniture.
other_furniture = []
for i in range(1, bedcount+1):
if coinflip():
other_furniture.append(WARDROBE)
if one_chance_in(3):
other_furniture.append(FIREPLACE)
if one_chance_in(8):
other_furniture.append(BOOKSHELF)
for i in range(1, bedcount+1):
if one_chance_in(6):
other_furniture.append(CHAIR)
tries = 20
while tries > 0:
tries -= 1
pos = random.choice(candidates)
if bedcount > 1: # need an adjacent second bed
free_adj = []
for adj in coord.AdjacencyIterator(pos):
if not adj in candidates:
continue
feat = self.get_feature(adj)
if not feature_is_floor(feat):
continue
free_adj.append(adj)
if len(free_adj) == 0:
continue
pos2 = random.choice(free_adj)
self.set_feature(pos2, BED)
candidates.remove(pos2)
rp.add_furniture_name("double bed")
self.set_feature(pos, BED)
candidates.remove(pos)
if len(rp.furniture) == 0:
rp.add_furniture_name("bed")
self.add_furniture_from_list(rp, other_furniture, candidates)
break
def add_room_furniture (self, r, furniture):
"""
Places furniture within a room.
:``r``: The room id. *Required*.
:``furniture``: A list of strings defining the furniture to be placed. *Required*.
"""
furniture_list = []
for f in furniture:
how_many = 1
if f[-1] == '?':
if coinflip():
continue
else:
f = f[:-1]
elif f[-1] == '+':
how_many = 2
f = f[:-1]
elif f[-1] == '!':
how_many = 3
f = f[:-1]
feat = get_furniture_by_name(f)
if feat == NOTHING:
continue
if feature_is_large_table(feat):
self.add_table_and_chairs(r, feat)
continue
furniture_list.append(feat)
if how_many > 1:
if how_many == 3:
for i in xrange(2):
furniture_list.append(feat)
# additional chances of placing more
for i in xrange(5):
if one_chance_in(3):
furniture_list.append(feat)
# Get a list of eligible positions within the room.
candidates = self.get_pos_list_within_room(r)
if len(candidates) == 0:
return
self.add_furniture_from_list(self.room_props[r], furniture_list, candidates)
def add_table_and_chairs (self, r, table_type):
"""
Places a table and, depending on the table type, possibly chairs
within a given room.
:``r``: The room id. *Required*.
:``table_type``: TextFeature representation of the table type. *Required*.
"""
rm = self.get_room(r)
rp = self.room_props[r]
start = rm.pos() + 2
stop = rm.pos() + rm.size() - 2
room_width = rm.size().x - 2
room_height = rm.size().y - 2
width = 3
height = 3
if table_type == BILLIARD_TABLE or room_height < 5:
height = 2
if rp.section == "utility":
if height > 2 and coinflip():
height -= 1
if one_chance_in(4):
width -= 1
else:
if height > 2 and (stop.y - start.y < 2 or coinflip()):
height -= 1
if stop.x - start.x > 2 and coinflip():
width += 1
if width != height and (room_height > room_width or room_height == room_width and coinflip()):
tmp = width
width = height
height = tmp
print "table width=%s, height=%s, room size: (%s), section: (%s)" % (width, height, rm.size(), stop - start)
if width > stop.x - start.x:
width = stop.x - start.x
if height > stop.y - start.y:
height = stop.y - start.y
startx = start.x
starty = start.y
if width < stop.x - startx:
startx = random.randint(start.x, stop.x - width)
if height < stop.y - starty:
starty = random.randint(start.y, stop.y - height)
# Only the dining table has chairs!
add_chairs = (table_type == DINING_TABLE)
tablestart = coord.Coord(startx, starty)
tablestop = coord.Coord(startx + width - 1, starty + height - 1)
chairstart = tablestart - 1
chairstop = tablestop + 1
for pos in coord.RectangleIterator(chairstart, chairstop + 1):
if self.features.__getitem__(pos) != FLOOR:
continue
feat = table_type
chairx = (pos.x == chairstart.x or pos.x == chairstop.x)
chairy = (pos.y == chairstart.y or pos.y == chairstop.y)
if chairx or chairy:
if add_chairs:
if chairx and chairy:
continue # skip corners
else:
feat = CHAIR
else:
continue
self.set_feature(pos, feat)
rp.add_furniture_name(table_type.name())
if add_chairs:
rp.add_furniture_name("some chairs", False)
def stays_in_room (self, pos):
"""
Returns whether the current position lies in the same room as a
previously defined room variable.
Used to restrict pathfinding within a single room.
:``pos``: A coordinate within the manor. *Required*.
"""
return self.get_room_index(pos) == self.curr_room
def pos_blocks_corridor (self, pos):
"""
Returns whether placing an intraversable feature at a given position
would block the path to any currently traversable place in a room.
:``pos``: A coordinate within the manor. *Required*.
"""
# Temporarily mark the position intraversable to check if this
# would block any paths, but remember the original feature.
old_feat = self.get_feature(pos)
self.features.__setitem__(pos, NOTHING)
# Store the current room in a helper variable.
self.curr_room = self.get_room_index(pos)
found = True
north = pos + DIR_NORTH
south = pos + DIR_SOUTH
east = pos + DIR_EAST
west = pos + DIR_WEST
north_trav = self.get_feature(north).traversable()
south_trav = self.get_feature(south).traversable()
if north_trav and south_trav:
found = pathfind.Pathfind(self.features, north, south, None, self.stays_in_room).path_exists()
if not found:
print "pos (%s) blocks N/S path" % pos
if found:
east_trav = self.get_feature(east).traversable()
west_trav = self.get_feature(west).traversable()
if east_trav and west_trav:
found = pathfind.Pathfind(self.features, east, west, None, self.stays_in_room).path_exists()
if not found:
print "pos (%s) blocks E/W path" % pos
if found and north_trav != south_trav and east_trav != west_trav:
if north_trav:
if east_trav:
found = pathfind.Pathfind(self.features, north, east, None, self.stays_in_room).path_exists()
else:
found = pathfind.Pathfind(self.features, north, west, None, self.stays_in_room).path_exists()
else:
if east_trav:
found = pathfind.Pathfind(self.features, south, east, None, self.stays_in_room).path_exists()
else:
found = pathfind.Pathfind(self.features, south, west, None, self.stays_in_room).path_exists()
if not found:
print "pos (%s) blocks diagonal path" % pos
# Reset to original feature.
self.features.__setitem__(pos, old_feat)
return (not found)
def add_furniture_from_list (self, rp, furniture, candidates):
"""
Places furniture from a list of features within a given room.
:``rp``: The RoomProps representation of the current room. *Required*.
:``furniture``: A list of TextFeatures representing the furniture to be placed. *Required*.
:``candidates``: A list of free coordinates within the room. *Required*.
"""
tries = 20
for feat in furniture:
if len(candidates) == 0:
break
print "Trying to place %s in %s" % (feat.name(), rp.name)
while tries > 0:
# For restrictions, only a chance of reducing the counter.
reduce_tries = True
pos = random.choice(candidates)
if feat.needs_wall():
# More tries with wall restriction.
if one_chance_in(4):
tries -= 1
reduce_tries = False
found_wall = False
for adj in coord.AdjacencyIterator(pos):
if self.get_feature(adj) == WALL:
found_wall = True
break
if not found_wall:
continue
if not feat.traversable():
if reduce_tries:
if one_chance_in(4):
tries -= 1
reduce_tries = False
if self.pos_blocks_corridor(pos):
continue
secondary_feat_name = None
if feat == DESK or feat == PIANO:
# need to place a chair
if reduce_tries:
if coinflip():
tries -= 1
reduce_tries = False
chair_candidates = []
for adj in coord.AdjacencyIterator(pos):
if adj not in candidates:
continue
if feature_is_floor(self.get_feature(adj)):
chair_candidates.append(adj)
if len(chair_candidates) == 0:
continue
chairpos = random.choice(chair_candidates)
if feat == DESK:
chair = CHAIR
else:
chair = STOOL
self.set_feature(chairpos, chair)
candidates.remove(chairpos)
secondary_feat_name = chair.name()
if reduce_tries:
tries -= 1
self.set_feature(pos, feat)
candidates.remove(pos)
rp.add_furniture_name("%s" % feat.name())
if secondary_feat_name != None:
rp.add_furniture_name(secondary_feat_name)
break
def get_bedroom_id (self, owner, rids = None, do_chance = True):
"""
Given a person id, returns the their bedroom's room index.
:``owner``: A person from the suspect list. *Required*.
:``rids``: A list of available room indices. If none, all rooms are
considered available. *Default None*.
:``do_chance``: If true, there's a 75% chance of returning early
without checking for the bedroom index. *Default True*.
"""
if do_chance and not one_chance_in(4):
return None
if rids == None:
rids = self.rooms
rp = self.room_props
for r in rids:
if owner in rp[r].owners:
return r
return None
def pick_random_public_room (self, rids = None, force_adj_corr = False):
"""
Returns a random non-bedroom room index.
:``rids``: A list of available room indices. If none, all rooms are
considered available. *Default None*.
:``force_adj_corr``: If true, only consider rooms that have an
adjoining corridor. *Default False*.
"""
if rids == None:
rids = self.rooms
rp = self.room_props
candidates = []
for r in rids:
if force_adj_corr:
found_corr = False
for adj in rp[r].adj_rooms:
if rp[adj].is_corridor:
found_corr = True
break
if not found_corr:
continue
if len(rp[r].owners) == 0:
candidates.append(r)
if len(candidates) == 0:
return None
return random.choice(candidates)
def get_random_pos_in_room (self, rid):
"""
Returns a random traversable position in a given room.
:``rid``: Index of a manor's room. *Required*.
"""
room = self.get_room(rid)
start = room.pos() + 1
stop = room.pos() + room.size() - 2
while True:
pos = coord.Coord(random.randint(start.x, stop.x), random.randint(start.y, stop.y))
if self.get_feature(pos).traversable():
return pos
def pick_room_for_suspect (self, rids, idx1, idx2 = None, force_adj_corr = False):
"""
Given a suspect id or pair of suspect ids, returns the index of a
room for their current location.
:``rids``: A list of available room indices. *Required*.
:``idx1``: An index from the suspect list. *Required*.
:``idx2``: Another person's suspect index. *Default None*.
:``force_adj_corr``: If true, only consider rooms that have an
adjoining corridor. *Default False*.
"""
rp = self.room_props
r = self.get_bedroom_id(idx1, rids)
if r != None:
return r
if idx2 != None:
r = self.get_bedroom_id(idx2, rids)
if r != None:
return r
r = self.pick_random_public_room(rids, force_adj_corr)
if r != None:
return r
# Try for bedrooms again.
r = self.get_bedroom_id(idx1, rids)
if r != None:
return r
if idx2 != None:
r = self.get_bedroom_id(idx2, rids)
return r
```
#### File: murderrl/database/database.py
```python
import sys, os, random, collections, re
_dbobjects = []
PATH_DELIM = os.path.sep
class DatabaseError (Exception):
"""
An error class for any and all errors relating to databases.
"""
pass
class DatabaseFolder (object):
"""
A basic representation of a folder structure. This container does not
actually contain any database information, but instead stores copies of the
databases equivalent to the database that are found in the specified folder.
"""
_databases = None
spec = None
def __init__ (self, folder, spec=None):
"""
Create a new database folder.
:``folder``: The folder path. Example: *names.db/*. This path should
contain databases within itself.
:``spec``: The default spec for this folder. *Default None*
"""
self.folder = folder
if spec is not None:
self.spec = spec
self._databases = []
def append (self, db):
"""
Add a database representation to the folder representation.
:``db``: The Database instance. This will be accessible by
``DatabaseFolder.database_name``.
"""
assert isinstance(db, Database)
assert not hasattr(self, db.name)
self._databases.append(db)
setattr(self, db.name, db)
def exists (self, db):
"""
Boolean function for determining if a database is contained within.
:``db``: The database name to search for.
"""
return hasattr(self, db) and isinstance(getattr(self, db), Database)
def extend (self, db_list):
"""
Append the contents of a list to the DatabaseFolder representation.
:``db_list``: The iterable list of databases to store.
"""
for db in db_list:
self.append(db)
def get (self, database, default=None):
"""
Fetch databases from the folder.
:``database``: The database name to search for.
:``default``: The default value to return if this folder does not
contain ``database``. *Default None*.
"""
if hasattr(self, database):
return getattr(self, database)
else:
return default
def __repr__ (self):
r = "<DatabaseFolder %s [" % self.folder
for d in self._databases:
r += d.name + ", "
return r.rstrip(", ") + "]>"
class Database (list):
"""
An extremely simplistic type that is nothing more than a wrapper on top of
the default list type.
"""
def __init__ (self, name, data):
"""
Initialises the database.
:``name``: The name of the Database. This is stored and used to describe
the database.
:``data``: The actual data of the Database. This should be a list of
items in any format.
"""
self.name = name
list.__init__(self, data)
def copy (self):
"""
Returns a copy of the database that allows for modification.
"""
return self.__class__(self.name, self[:])
def random (self, checkfn=None):
"""
Returns a random element from the Database.
:``checkfn``: A function to be applied to results. If this function
returns ``true``, the result is allowed; if it returns
``false``, another item is picked. *Default None*.
"""
if len(self) == 0:
return None
if checkfn is None:
return random.choice(self)
else:
item = random.choice(self)
tries = len(self) * 5
while not checkfn(item):
item = random.choice(self)
tries = tries - 1
if tries <= 0:
return None
return item
def random_pop (self, checkfn=None):
"""
Removes a random element from the Database and then returns it. This is
an in-place activity.
:``checkfn``: A function to be applied to results. If this function
returns ``true``, the result is allowed; if it returns
``false``, another item is picked. *Default None*.
"""
if len(self) == 0:
return None
item = random.randint(0, len(self)-1)
if checkfn is not None:
tries = len(self) * 5
while not checkfn(self[item]):
item = random.randint(0, len(self)-1)
tries = tries - 1
if tries <= 0:
return None
return self.pop(item)
def __repr__ (self):
return "Database%s" % (list.__repr__(self))
class WeightedString (str):
"""
A simple collation of a string and a weight.
The default weight of ``10`` means that the string has no higher or lesser
chance of being chosen from a WeightedDatabase than any other string. A
weight of ``20`` means that it has double the chance, a weight of ``5``
meaning that has half the chance, etc.
"""
def __init__ (self, string, weight=10):
"""
Create a new weighted string.
:``string``: The actual string contents.
:``weight``: The weight of the string. *Default 10*.
"""
self.weight = weight
str.__init__(self, string)
class WeightedDatabase (Database):
"""
A slightly more complicated collection of data stored by weight. The
"default" weight of the databse is ``10``. Random choices pick things by
weight as well as randomness, etc.
"""
def total_weight (self, checkfn=None):
"""
Return the total weight of the database.
:``checkfn``: A function to be applied to each item. If the function
returns ``false``, the weight of the item is ignored (and the
item is discarded). *Default None*.
"""
weight = 0
for item in self:
if checkfn is not None and not checkfn(item):
continue
assert hasattr(item, "weight")
weight += item.weight
return weight
def random_pick (self, checkfn=None):
"""
Randomly pick an item from the database based on its weight in
comparison to the total weight of the database. Returns a tuple of
(``index``, ``item``).
:``checkfn``: A function to be applied to the items in the database: if
it returns ``false``, the item is not considered. *Default
None*.
"""
tweight = self.total_weight(checkfn=checkfn)
if tweight == 0:
return None, None
n = random.uniform(0, tweight)
for num, item in enumerate(self):
if checkfn is not None and not checkfn(item):
continue
if item.weight >= n:
return num, item
n = n - item.weight
return None, None
def random (self, checkfn=None):
"""
Returns a random element from the Database, picked by weight.
:``checkfn``: A function to be applied to the items in the database: if
it returns ``false``, the item is not considered. *Default
None*.
"""
if len(self) == 0:
return None
return self.random_pick(checkfn=checkfn)[1]
def random_pop (self, checkfn=None):
"""
Removes a random element from the Database and then returns it. This is
an in-place activity.
:``checkfn``: A function to be applied to the items in the database: if
it returns ``false``, the item is not considered. *Default
None*.
"""
if len(self) == 0:
return None
index = self.random_pick(checkfn=checkfn)[0]
if index == None:
return None
return self.pop(index)
def __repr__ (self):
return "WeightedDatabase%s" % (list.__repr__(self))
def get_databases ():
"""
Returns a list of all Database objects stored.
"""
return _dbobjects[:]
def get_database (name, parent=None):
"""
Returns a specific Database object. If the Database doesn't exist, will
instead return ``None``.
:``name``: The name of the Database object being requested.
:``parent``: A possible DatabaseFolder instance or name to be searched
instead of the global scope. *Default None*
"""
if "." in name:
parent, name = name.split(".")
if parent is not None:
if not isinstance(parent, DatabaseFolder):
parent = globals().get(parent, None)
if parent is None or not isinstance(parent, DatabaseFolder):
return None
return parent.get(name, None)
return globals().get(name, None)
def database_exists (name, parent=None):
"""
Checks for the existance of a specific database object.
:``name``: The name of the Database.
:``parent``: A possible DatabaseFolder instance or name to be searched
instead of the global scope. *Default None*.
"""
return get_database(name, parent) is not None
def num_databases ():
"""
Returns the total number of Databases available.
"""
return len(_dbobjects)
def split_escaped_delim (delimiter, string, count=0):
"""
Returns the result of splitting ``string`` with ``delimiter``. It is an
extension of ``string.split(delimiter, count)`` in that it ignores instances
of the delimiter being escaped or contained within a string.
:``delimiter``: The delimiter to split the string with. *Required*.
:``string``: The string to be split. *Required*.
:``count``: How many strings to limit the match to. *Default 0*.
"""
assert len(delimiter) == 1
split_expression = re.compile(r"""(?<!\\)%s""" % (delimiter))
result = split_expression.split(string, count)
return result
def parse_spec (spec_file):
"""
Parses a specification into either a list or a namedtuple constructor.
**Example specifications**::
$0
*Would return a single-element list creator that could be applied to all
incoming data.*::
%delim ,
$0
$1
$2
*Would return a three-element list creator using "," as the delimiter.*::
$name
$weight 10
*Would return a two-element namedtuple called "(filename)_spec" with a name
and weight property. The weight would default to 10 if not supplied.*::
%id room_spec
$name
$weight
*Would return a two-element namedtuple called "room_spec" with a name and
weight property.*
**Example specification usage**::
(using the "room_spec" above)
%
name=dining room
%
name=kitchen
weight=20
In this instance, the order doesn't matter, as they are passed by
parameter::
(using the first unnamed list example)
%
dining room
%
kitchen
%
As there is just a single set of data, the block is parsed and stripped of
whitespace and then stored in a single element::
(using the second unnamed list example)
%
dining room,10,domestic
%
kitchen, 50, utility
%
Here, the provided delimiter of a commas used to convert the incoming block
into a three-element list.
"""
spec_object = None
spec_name = spec_file.replace(".", "_")
params = []
default_params = {}
int_conversion = []
namedtuple = False
delimiter = "\n"
spec_file = open(spec_file, "r")
spec = spec_file.readlines()
spec_file.close()
for line in spec:
line = line.strip()
param_name = None
default_param = None
if line.startswith("%id"):
spec_name = line.split(" ", 1)[1]
elif line.startswith("%delim"):
delimiter = line.split(" ", 1)[1].strip()
elif line.startswith("$"):
line = line.split(" ", 1)
if len(line) >= 1:
param_name = line[0].strip("$")
if len(line) == 2:
default_param = line[1].strip()
if param_name and not param_name.isdigit():
namedtuple = True
if default_param and param_name.isdigit():
assert param_name != "0"
params.append(param_name)
if default_param:
default_params[param_name]=default_param
elif line.startswith("%int"):
var = line.split(" ", 1)[1].strip()
int_conversion.append(var)
if namedtuple:
class parent (object):
def __init__ (self, *args, **kwargs):
self.__name__ = spec_name
if len(args) == len(params):
# arg for arg
for key, value in zip(params, args):
self.__dict__[key] = value
elif len(kwargs) == len(params):
for key, value in kwargs.iteritems():
self.__dict__[key] = value
else:
assert not "Didn't get the right number of arguments!"
def __repr__ (self):
values = ""
for key in params:
values += "%s=%s," % (key, repr(self.__dict__[key]))
return "<%s %s>" % (self.__name__, values.strip(", "))
else:
parent = list
class spec_object (parent):
def __init__ (self, block):
self.__name__ = spec_name
if isinstance(block, str):
block = split_escaped_delim(delimiter, block.strip())
assert len(block) + len(default_params) >= len(params)
if len(block) < len(params):
for key, default in default_params.iteritems():
if key.isdigit():
assert int(key) >= len(block)
block.insert(int(key), default)
else:
block.append("%s=%s" % (key, default))
if not namedtuple:
if int_conversion:
for conv in int_conversion:
block[conv] = int(block[conv])
parent.__init__(self, block)
else:
new_data = {}
for item in block:
new_item = split_escaped_delim("=", item, 1)
if len(new_item) == 1:
new_item = split_escaped_delim(":", item, 1)
if len(new_item) == 1:
raise DatabaseError, "Corrupted line? %s" % item
item = new_item
if int_conversion and item[0] in int_conversion:
item[1] = int(item[1])
assert len(item) == 2
# Don't overwrite real data with default values!
if item[0] not in new_data:
new_data[item[0]] = item[1]
parent.__init__(self, **new_data)
elif isinstance(block, list):
if not namedtuple:
parent.__init__(self, block)
else:
parent.__init__(self, *block)
elif isinstance(block, dict):
assert namedtuple
parent.__init__(self, **block)
def __repr__ (self):
if namedtuple:
return parent.__repr__(self)
else:
return "<%s %s>" % (self.__name__, parent.__repr__(self))
return spec_object
def _do_build ():
"""
Convert the contents of the local directory, or a data directory relevant to
the local directory, into a series of Database objects.
"""
if os.path.exists("./database"):
data_path = "./database/"
elif os.path.exists("../database"):
data_path = "../database/"
elif os.path.exists("../../database"):
data_path = "../../database/"
else:
data_path = "."
dir_specs = {}
databases = []
# first pass over the databases to create complete tree:
for dirpath, dirnames, filenames in os.walk(data_path):
# all databases are stored
for name in filenames:
if name.endswith(".db"):
databases.append(os.path.join(dirpath, name).replace(data_path, ""))
# but we need to store specs here otherwise things could get a bit confusing
elif name.endswith(".spec"):
possible_dir = os.path.join(dirpath, name[:-5]+".db")
if os.path.exists(possible_dir) and os.path.isdir(possible_dir):
spec_name = possible_dir.replace(data_path, "")
dir_specs[spec_name] = parse_spec(os.path.join(dirpath, name))
# and we create DatabaseFolders for each subfolder
for name in dirnames:
if name.endswith(".db"):
# dump the extension here too
obj_name = name[:-3]
this_folder = DatabaseFolder(obj_name)
if dir_specs.has_key(name):
this_folder.spec = dir_specs.pop(name)
if dirpath != data_path:
search = dirpath.replace(data_path, "").split(PATH_DELIM)
try:
top_folder = globals()[search[0]]
except KeyError:
raise DatabaseError, "Subdirectory of a db folder without a DatabaseFolder?"
for p in search[1:]:
if p == name:
break
try:
top_folder = getattr(top_folder, p)
except AttributeError:
raise DatabaseError, "Subdirectory of a db subfolder without a DatabaseFolder subfolder!"
top_folder.append(this_folder)
else:
globals()[obj_name] = this_folder
for database in databases:
build_from_file_name(database, data_path)
def build_from_file_name (database, data_path, folder=None, spec=None):
"""
Converts a database file via a specification into a Database instance and
then inserts into into the global scope or a specific parent based on
provided information.
:``database``: The filename to be opened. If this is in a subfolder, the
subfolder name will be removed from the final name and the
database will be available globally, unless ``folder`` has
been specified, or ``folder`` is already a globally available
folder. *Required*.
:``data_path``: This will be appended to the beginning of all I/O operations
but will not be treated as a ``folder``. *Required*.
:``folder``: The folder this database will be appended to. If None and the
database contains a folder name, the folder will be looked for
globally and if found, the database will be appended to this;
if there is no folder available, the database will be inserted
into the global scope. *Default None*.
:``spec``: A specification object that matches the contents of this
database. If not provided, and a specification exists, this
specification will be used instead. If not provided and ``folder``
is not none, and the ``folder`` contains a specification, this
will be used instead. *Default None*.
"""
# chop the extension off
temp = database.split(PATH_DELIM)
name = database[:-3]
folder_name = None
store_point = None
if len(temp) != 1:
folder_name = PATH_DELIM.join(temp[:-1])
name = temp[-1][:-3]
if folder_name is not None and not folder:
search = folder_name
if PATH_DELIM in folder_name:
# we need to look recursively, but not yet
search = folder_name.split(PATH_DELIM)[0]
try:
store_point = globals()[search.replace(".db", "")]
except KeyError:
pass
if PATH_DELIM in folder_name:
# now recurse
searches = folder_name.split(PATH_DELIM)[1:]
for search in searches:
try:
store_point = getattr(store_point, search.replace(".db", ""))
except AttributeError:
break
elif folder:
store_point = folder
if spec is None:
if store_point is not None and store_point.spec is not None:
spec_obj = store_point.spec
else:
spec = name + ".spec"
if os.path.exists(os.path.join(data_path, spec)):
spec_obj = parse_spec(os.path.join(data_path, spec))
else:
spec_obj = str
else:
spec_obj = spec
dbfile = open(os.path.join(data_path, database), "r")
dbfile_contents = [item.strip() for item in dbfile.read().strip().strip("%").split("%")]
dbdata = [spec_obj(item) for item in dbfile_contents if not item.startswith("#")]
db = Database
if hasattr(dbdata[0], 'weight'):
db = WeightedDatabase
this_db = db(name, dbdata)
dbfile.close()
if store_point:
store_point.append(this_db)
else:
globals()[name] = this_db
_dbobjects.append(this_db)
_do_build()
```
#### File: murderrl/docs/conf.py
```python
import inspect
from sphinx.ext.autodoc import ClassDocumenter
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.autosummary']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'murderrl'
copyright = u'2011, <NAME>, <NAME>'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0a'
# The full version, including alpha/beta/rc tags.
release = '0.0a'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['.build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['.static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'murderrldoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'murderrl.tex', u'murderrl Documentation',
u'<NAME>, <NAME>', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
autodoc_member_order = "groupwise"
autoclass_content = "class"
def method_info (method):
if inspect.ismethod(method):
return "%s.%s" % (method.im_class.__name__, method.im_func.__name__)
return "%s" % (method.__name__)
def _resolve_doc (method):
my_docs = []
if hasattr(method, "extends"):
extends = method.extends
if isinstance(extends, tuple):
for exmeth in extends:
assert inspect.ismethod(exmeth)
doc = _resolve_doc(exmeth)
if doc:
my_docs.extend(doc)
else:
my_docs.extend(_resolve_doc(extends))
elif method.__doc__:
my_docs.append((method_info(method), inspect.getdoc(method)))
return my_docs
def resolve_doc (method):
lines = _resolve_doc(method)
result = []
done_methods = []
for method, line in lines:
if method in done_methods:
continue
result.append("*Documentation inherited from* :meth:`%s`:" % (method))
result.append("")
result.extend(line.split("\n"))
done_methods.append(method)
return result
def process_signature (app, what, name, obj, options, signature, return_annotation):
if hasattr(obj, "extends") and callable(obj):
extends = obj.extends
if isinstance(extends, tuple):
argspec = inspect.getargspec(extends[0])
else:
argspec = inspect.getargspec(extends)
if argspec[0] and argspec[0][0] in ('cls', 'self'):
del argspec[0][0]
signature = inspect.formatargspec(*argspec)
return (signature, return_annotation)
def process_docstring (app, what, name, obj, options, lines):
if hasattr(obj, "extends") and callable(obj):
lines.extend(resolve_doc(obj))
return lines
class NewClassDocumenter(ClassDocumenter):
def format_args(self):
# for classes, the relevant signature is the __init__ method's
initmeth = self.get_attr(self.object, '__init__', None)
# classes without __init__ method, default __init__ or
# __init__ written in C?
if initmeth is None or initmeth is object.__init__ or not \
(inspect.ismethod(initmeth) or inspect.isfunction(initmeth)):
return None
try:
argspec = inspect.getargspec(initmeth)
except TypeError:
# still not possible: happens e.g. for old-style classes
# with __init__ in C
return None
if argspec[0] and argspec[0][0] in ('cls', 'self'):
del argspec[0][0]
args = inspect.formatargspec(*argspec)
result = self.env.app.emit_firstresult(
'autodoc-process-signature', self.objtype, self.fullname,
initmeth, self.options, args, None)
if result:
return result[0]
else:
return args
def setup (app):
app.add_autodocumenter(NewClassDocumenter)
app.connect('autodoc-process-docstring', process_docstring)
app.connect('autodoc-process-signature', process_signature)
```
#### File: interface/console/__init__.py
```python
import curse, win32
__provides__ = curse, win32
def select (priority="curse"):
"""
Select a supported interface.
"""
if not curse.UNAVAILABLE and (priority == "curse" or win32.UNAVAILABLE):
return curse
if not win32.UNAVAILABLE and (priority == "win32" or curse.UNAVAILABLE):
return win32
return None
```
#### File: console/win32/_win32.py
```python
import library.coord as coord
import library.colour
import ctypes
from _subprocess import INFINITE, WAIT_OBJECT_0
OLD_SCREEN_SIZE = None
COMMON_LVB_UNDERSCORE = 32768
STD_INPUT_HANDLE = -10
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
WAIT_TIMEOUT = 0x102
WAIT_ABANDONED = 0x80
class _COORD(ctypes.Structure):
_fields_ = [
("X", ctypes.c_short),
("Y", ctypes.c_short)]
class _SMALL_RECT(ctypes.Structure):
_fields_ = [
("Left", ctypes.c_short),
("Top", ctypes.c_short),
("Right", ctypes.c_short),
("Bottom", ctypes.c_short)]
class _CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure):
_fields_ = [
("dwSize", _COORD),
("dwCursorPosition", _COORD),
("wAttributes", ctypes.c_ushort),
("srWindow", _SMALL_RECT),
("dwMaximumWindowSize", _COORD)]
class _CONSOLE_CURSOR_INFO (ctypes.Structure):
_fields_ = [
("dwSize", ctypes.c_ushort),
("bVisible", ctypes.c_bool)]
class _FOCUS_EVENT_RECORD (ctypes.Structure):
_fields_ = [
("bSetFocus", ctypes.c_bool)]
class _UCHAR (ctypes.Union):
_fields_ = [
("UnicodeChar", ctypes.c_char),
("AsciiChar", ctypes.c_wchar)]
class _KEY_EVENT_RECORD (ctypes.Structure):
_fields_ = [
("bKeyDown", ctypes.c_bool),
("wRepeatCount", ctypes.c_ushort),
("wVirtualKeyCode", ctypes.c_ushort),
("wVirtualScanCode", ctypes.c_ushort),
("uChar", _UCHAR),
("dwControlKeyState", ctypes.c_uint)]
class _MENU_EVENT_RECORD (ctypes.Structure):
_fields_ = [
("dwCommandId", ctypes.c_uint)]
class _MOUSE_EVENT_RECORD (ctypes.Structure):
_fields_ = [
("dwMousePosition", _COORD),
("dwButtonState", ctypes.c_uint),
("dwControlKeyState", ctypes.c_uint),
("dwEventFlags", ctypes.c_uint)]
class _WINDOW_BUFFER_SIZE_RECORD (ctypes.Structure):
_fields_ = [
("dwSize", _COORD)]
class _EVENT (ctypes.Union):
_fields_ = [
("KeyEvent", _KEY_EVENT_RECORD),
("MouseEvent", _MOUSE_EVENT_RECORD),
("WindowBufferSizeEvent", _WINDOW_BUFFER_SIZE_RECORD),
("MenuEvent", _MENU_EVENT_RECORD),
("FocusEvent", _FOCUS_EVENT_RECORD)]
class _INPUT_RECORD (ctypes.Structure):
_fields_ = [
("EventType", ctypes.c_ushort),
("Event", _EVENT)]
class DOSBaseColour (library.colour.BaseColour):
def __init__ (self, colour):
super(DOSBaseColour, self).__init__(colour._colour, colour._colour_id)
def is_background ():
if self._colour_id == 0x0000:
return True
return self._colour_id & 0x0070
def is_foreground ():
if self._colour_id == 0x0000:
return True
return self._colour_id & 0x0007
def as_background ():
if self.is_background():
return self._colour_id & 0x0070
return self._colour_id & 0x0007 << 4
def as_foreground ():
if self.is_foreground():
return self._colour_id & 0x0007
return self._colour_id & 0x0070 >> 4
class DOSColour (library.colour.Colour):
def __init__ (self, colour):
super(DOSColour, self).__init__(colour._foreground, colour._background, colour._style)
def as_dos ():
fg = DOSBaseColour(self._foreground).as_foreground()
bg = DOSBaseColour(self._background).as_background()
new_col = fg | bg
if self._style == "underline":
new_col |= COMMON_LVB_UNDERSCORE
return new_col
def _STDOUT ():
return _GetStdHandle(STD_OUTPUT_HANDLE)
def _STDIN ():
return _GetStdHandle(STD_INPUT_HANDLE)
def _STDERR ():
return _GetStdHandle(STD_ERROR_HANDLE)
def _GetStdHandle (handle):
return ctypes.windll.kernel32.GetStdHandle(handle)
def _GetColour ():
return _GetConsoleScreenBufferInfo().wAttributes
def _GetBG ():
return _GetConsoleScreenBufferInfo().wAttributes & 0x0070
def _GetFG ():
return _GetConsoleScreenBufferInfo().wAttributes & 0x0007
def _WaitForMultipleObjects (handles, wait_all=False, wtime=INFINITE):
return ctypes.windll.kernel32.WaitForMultipleObjects(len(handles), (ctypes.c_int*len(handles))(*handles), ctypes.c_bool(wait_all), wtime)
def _GetConsoleScreenBufferInfo ():
buffer_info = _CONSOLE_SCREEN_BUFFER_INFO()
ctypes.windll.kernel32.GetConsoleScreenBufferInfo(_STDOUT(), ctypes.byref(buffer_info))
return buffer_info
def _GetConsoleCursorInfo ():
cursor_info = _CONSOLE_CURSOR_INFO()
ctypes.windll.kernel32.GetConsoleCursorInfo(_STDOUT(), ctypes.byref(cursor_info))
return cursor_info
def _SetColour (colour, dos=False):
if not dos:
colour = DOSColour(colour).as_dos()
return ctypes.windll.kernel32.SetConsoleTextAttribute(_STDOUT(), colour)
def _SetConsoleCursorInfo (size=1, visible=True):
cursor_info = _CONSOLE_CURSOR_INFO()
cursor_info.dwSize = size
cursor_info.bVisible = ctypes.c_bool(visible)
return ctypes.windll.kernel32.SetConsoleCursorInfo(_STDOUT(), cursor_info)
def _SetConsoleSize (c=None):
if c is None:
c = _GetConsoleScreenBufferInfo().dwSize
c.Y = 25
if not isinstance(c, _COORD):
c = _COORD(c.x, c.y)
return ctypes.windll.kernel32.SetConsoleScreenBufferSize(_STDOUT(), c)
def _goto (c):
if not isinstance(c, _COORD):
c = _COORD(c.x, c.y)
return ctypes.windll.kernel32.SetConsoleCursorPosition(_STDOUT(), c)
def _getxy ():
return _GetConsoleScreenBufferInfo().dwCursorPosition
def put (char, c, colour=None):
_goto(c)
old_c = _getxy()
old_colour = _GetColour()
if colour is not None:
_SetColour(colour)
print char
_goto(old_c)
_SetColour(old_colour, True)
def get (err=False, block=False):
pass
def clear (char=None, colour=None):
termsize = size()
if char is None:
char = " "
for x in xrange(termsize.width):
for y in xrange(termsize.height):
put (char, coord.Coord(x, y), colour)
def init ():
global OLD_SCREEN_SIZE
OLD_SCREEN_SIZE = _GetConsoleScreenBufferInfo().dwSize
OLD_SCREEN_SIZE = coord.Coord(OLD_SCREEN_SIZE.X, OLD_SCREEN_SIZE.Y)
_SetConsoleSize()
def deinit ():
_SetConsoleSize(OLD_SCREEN_SIZE)
def size ():
info = _GetConsoleScreenBufferInfo().dwSize
size = coord.Size()
size.width = info.X
size.height = info.Y
return size
def wrapper (fn):
try:
fn()
except:
deinit()
raise
deinit()
```
#### File: murderrl/interface/screen.py
```python
import textwrap
from library import shape, coord, viewport
class Grid (object):
_grid = None
def __init__ (self, width=0, height=0):
self._grid = []
for row in xrange(height):
row = []
for column in xrange(width):
row.append(None)
self._grid.append(row)
def at (self, c, x=None):
if x is not None:
c = coord.Coord(coord, x)
try:
return self._grid[c.y][c.x]
except IndexError:
return None
def set (self, c, x=None, grid=None):
if x is not None:
c = coord.Coord(c, x)
else:
grid = x
try:
self._grid[c.y][c.x] = grid
except IndexError:
return False
return True
class ScreenGrid (shape.Shape):
pass
class ColourGrid (Grid):
pass
class Screen (object):
_regions = None
_glyphs = None
_colours = None
_screen = None
def __init__ (self, size, phys_screen):
width, height = size
self._glyphs = ScreenGrid(width=width, height=height, fill=" ")
self._colours = ColourGrid(width=width, height=height)
self._regions = []
self._screen = phys_screen
def region (self, start, stop=None, name=None):
if stop is not None:
new_region = Region(start, stop, name)
else:
new_region = start
name = stop
self._regions.append(new_region)
def regions (self, index=None):
if index is not None:
try:
return self._regions[index]
except TypeError:
return self.region_by_name(index)
else:
return self._regions[:]
def region_by_name (self, name):
for region in self.regions():
if region.name == name:
return region
return None
def colours (self):
return self._colours
def glyphs (self):
return self._glyphs
def physical (self):
return self._screen
def blit (self):
"""
This blits the entire contents of self.glyphs onto the screen -- using
colours where appropriate.
"""
for c, glyph in self.glyphs():
colour = self.colours().at(c)
self.physical().put(glyph, c, colour)
```
#### File: murderrl/tests/features.py
```python
import curses
from builder import builder, manor
import library.viewport, library.coord
import interface.console
from library.feature import *
from interface.features import *
screen = interface.console.select()
def put_text (text, spot):
for ind, char in enumerate(text):
screen.put(char, library.coord.Coord(spot.x+ind, spot.y))
def main ():
screen.init()
# First, build the manor.
# base_manor = builder.manor.base_builder()
base_manor = manor.ManorCollection(builder.build_random())
# Translate rooms and corridors into wall and floor features.
base_manor.init_features()
# Add doors along corridors, and windows.
base_manor.add_doors()
base_manor.add_windows()
# Combine the room shapes into a canvas
mymanor = base_manor.combine()
# Debugging output
print "Doors:", base_manor.doors
print "Rooms:"
base_manor.print_rooms()
print "#Legs: ", base_manor.count_legs()
for i in base_manor.legs:
print i
print "Corridors:"
base_manor.print_corridors()
# Draw features on canvas.
for pos in library.coord.RectangleIterator(mymanor.size()):
feat = base_manor.get_feature(pos)
if feat != NOTHING and feat != WALL and feat != FLOOR:
mymanor.__setitem__(pos, feat.glyph())
# Initialise the view port.
vp = library.viewport.ViewPort(buffer=mymanor,
width =min(mymanor.size().width, 70),
height=min(mymanor.size().height, 20))
# Initialise a couple of variables.
ppos = library.coord.Coord(35, 10) # player (@) position in the viewport
last_move = library.coord.Coord(0, 0) # the last step taken by the player
placement = True # initial player placement
move_was_blocked = False # tried to leave the manor boundaries
did_move = True # actually took a step
print_features = False # draw manor via the feature grid
while True:
screen.clear(" ")
# The currently visible section of the viewport.
sect = vp.sect()
# The real player position in the manor.
real_pos = library.coord.Coord(vp._left + ppos.x + 1, vp._top + ppos.y + 1)
# Depending on the current toggle state (toggle key 't'), either draw
# the manor via the feature grid, or via the shape canvas.
if print_features:
for coord in library.coord.RectangleIterator(sect.size()):
if coord >= base_manor.features.size():
continue
real_coord = coord + library.coord.Coord(vp._left, vp._top)
char = base_manor.features.__getitem__(real_coord).glyph()
if (coord == ppos):
if placement:
ppos.x += 2
elif char == " " or move_was_blocked:
ppos = ppos - last_move
char = "X"
screen.put(char, coord+1)
else:
for coord, char in sect:
if char == None:
char = " "
# Don't place the player outside the manor.
# Initially place him elsewhere, later disallow such movements.
if (coord == ppos):
# FIXME: Choose a sensible starting position and get
# rid of this hack.
if placement:
ppos.x += 2
elif char == " " or move_was_blocked:
ppos = ppos - last_move
char = "X"
screen.put(char, coord+1)
placement = False
# Draw the player.
screen.put("@", ppos + 1)
# Debugging information.
put_text("Sect size : %s, Start coord: %s, Stop coord: %s" % (sect.size(), library.coord.Coord(vp._left, vp._top), library.coord.Coord(vp._left + vp._width, vp._top + vp._height)), library.coord.Coord(0, 23))
# Get the current room/corridor id.
id = base_manor.get_corridor_index(real_pos)
type = "corridor"
if id == None:
id = base_manor.get_room_index(real_pos)
type = "room"
put_text("Manor size: %s, Player coord: %s, last_move: %s, %s id: %s" % (mymanor.size(), real_pos, last_move, type, id), library.coord.Coord(0, 24))
# Get a key.
ch = screen.get(block=True)
# Move the player (@) via the arrow keys.
# If we haven't reached the manor boundaries yet, scroll in that direction.
# Otherwise, take a step unless it would make us leave the manor.
# Reinitialise the relevant variables.
last_move = library.coord.Coord(0, 0)
move_was_blocked = False
did_move = True
if ch == curses.KEY_UP:
last_move.y = -1
if vp._top > 0:
vp.up(1)
elif real_pos.y > 2:
ppos.y -= 1
else:
move_was_blocked = True
elif ch == curses.KEY_DOWN:
last_move.y = 1
if vp._top + vp._height < mymanor.size().y:
vp.down(1)
elif real_pos.y < mymanor.size().y - 1:
ppos.y += 1
else:
move_was_blocked = True
elif ch == curses.KEY_LEFT:
last_move.x = -1
if vp._left > 0:
vp.left(1)
elif real_pos.x > 2:
ppos.x -= 1
else:
move_was_blocked = True
elif ch == curses.KEY_RIGHT:
last_move.x = 1
if vp._left + vp._width < mymanor.size().x:
vp.right(1)
elif real_pos.x < mymanor.size().x - 1:
ppos.x += 1
else:
move_was_blocked = True
elif (ch in range(256) and chr(ch) == 't'):
# Toggle between feature grid (true) and canvas view (false).
print_features = not print_features
did_move = False
else:
break
if move_was_blocked:
# Reset last_move.
last_move = library.coord.Coord(0, 0)
did_move = False
screen.deinit()
if __name__=="__main__":
screen.wrapper(main)
``` |
{
"source": "jmcb/urwidx",
"score": 2
} |
#### File: urwidx/ux/layout.py
```python
import urwid
# Taken from http://www.mail-archive.com/<EMAIL>/msg00515.html
class OffsetOverlay(urwid.Overlay):
def calculate_padding_filler(self, size, focus):
l, r, t, b = self.__super.calculate_padding_filler(size, focus)
return l+1, max(0, r-1), t+1, max(0, b-1)
# A flow widget version of urwid.Frame.
class SizedFrame (urwid.BoxAdapter):
def __init__ (self, height, body, header=None, footer=None, focus_part='body'):
urwid.BoxAdapter.__init__(self, urwid.Frame(body, header, footer, focus_part), height)
def get_cursor_coords (self, size):
return self.render(size, focus=True).cursor
# Walker that interfaces on a list of string values, providing editors for them.
# Support in-place editing by setting in_place=True.
class ListEditorWalker (urwid.SimpleListWalker):
def __init__ (self, contents, editor=urwid.Edit, in_place=True):
self.to_update = contents
self.editors = [None for i in contents]
self.editor = editor
self.focus = 0
urwid.MonitoredList.__init__(self, contents)
if in_place:
urwid.connect_signal(self, "modified", self.update_list)
def update_list (self):
self.to_update[:] = list(self)
def wipe_editors (self):
self.editors = [None for i in self.contents]
def _get_focus (self, pos):
if pos < 0: return None, None
if len(self.contents) <= pos: return None, None
return self._get_focus_editor(pos), pos
def _get_focus_editor (self, pos):
if self.editors[pos]:
return self.editors[pos]
def update_list (pos):
def f (editor, new_value):
if not self.editors[pos]:
self.editors[pos] = None
self.contents[pos] = new_value
self._modified()
return f
editor = self.editor(edit_text=self.contents[pos])
urwid.connect_signal(editor, 'change', update_list(pos))
self.editors[pos] = editor
return editor
def set_focus (self, focus):
assert isinstance(focus, int)
self.focus = focus
self._modified()
def get_next (self, pos):
return self._get_focus(pos+1)
def get_prev (self, pos):
return self._get_focus(pos-1)
def get_focus (self):
if len(self.contents) == 0: return None, None
return self._get_focus_editor(self.focus), self.focus
def new (self, end=True):
if end == True:
self.contents.append("")
self.editors.append(None)
self.focus = len(self.contents)-1
else:
self.contents.insert(self.focus, "")
self.editors.append(None)
self.wipe_editors()
def snip (self):
del self.contents[self.focus]
self.wipe_editors()
# An interface to ListEditorWalker.
class ListBoxEditor (urwid.ListBox):
def __init__ (self, to_edit, editor=urwid.Edit, walker=ListEditorWalker, meta_key="ctrl e", del_key="-", append_key="+", insert_key="insert"):
self.to_edit = to_edit
self.meta_key = meta_key
self.append_key = append_key
self.insert_key = insert_key
self.delete_key = del_key
self.looking_meta = False
self.walker = walker(self.to_edit, editor)
urwid.ListBox.__init__(self, self.walker)
def keypress (self, size, key):
if self.looking_meta:
self.looking_meta = False
if key == self.append_key or key == self.insert_key:
self.walker.new(key == self.append_key)
return
elif key == self.delete_key:
self.walker.snip()
return
self.looking_meta = (key == self.meta_key)
return urwid.ListBox.keypress(self, size, key)
``` |
{
"source": "jmccand/Student_change_web_app",
"score": 3
} |
#### File: jmccand/Student_change_web_app/updown.py
```python
import db
class User:
def __init__(self, email, cookie_code, activity={}, votes={}, verified_email=False):
self.email = email
self.cookie_code = cookie_code
self.activity = activity
self.votes = votes
self.verified_email = verified_email
class Opinion:
def __init__(self, ID, text, activity, approved=None, scheduled=False, committee_jurisdiction=None):
self.ID = ID
self.text = text
self.activity = activity
self.approved = approved
self.scheduled = scheduled
self.committee_jurisdiction = committee_jurisdiction
def count_votes(self):
up_votes = 0
down_votes = 0
abstains = 0
for user in db.user_cookies.values():
if user.verified_email and str(self.ID) in user.votes:
this_vote = user.votes[str(self.ID)][-1][0]
#print(f'{user.email} has voted {this_vote}')
if this_vote == 'up':
up_votes += 1
elif this_vote == 'down':
down_votes += 1
elif this_vote == 'abstain':
abstains += 1
else:
raise ValueError(f'Found a vote other than up, down, or abstain: {this_vote}')
return up_votes, down_votes, abstains
```
#### File: jmccand/Student_change_web_app/wsgi_example.py
```python
from wsgiref.simple_server import make_server
def application(environ, start_response):
for key, item in environ.items():
print(f'{key} {item}')
path = environ.get('PATH_INFO')
if path == '/':
response_body = "Index"
else:
response_body = "Hello"
status = "200 OK"
response_headers = [("Content-Length", str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
httpd = make_server(
'10.17.4.17', 8051, application)
httpd.serve_forever()
``` |
{
"source": "jmccand/werewolf",
"score": 3
} |
#### File: jmccand/werewolf/main.py
```python
import random
import urllib.parse
import webbrowser
import os
import socket
from http.cookies import SimpleCookie
from http.server import HTTPServer, SimpleHTTPRequestHandler
import json
import uuid
from datetime import datetime
import _thread
import time
import math
class MyHandler(SimpleHTTPRequestHandler):
player_usernames = set()
def do_GET(self):
print('\npath = ' + self.path)
host = self.headers.get('Host')
print(host)
if False and host != 'werewolf.joelmccandless.com:8000':
self.send_response(302)
self.send_header('Location', 'http://werewolf.joelmccandless.com:8000')
self.end_headers()
return
else:
if self.path == '/change_username':
return self.change_username()
elif self.path.startswith('/set_username'):
return self.set_username()
elif self.path.startswith('/new_game'):
return self.new_game()
elif self.path.startswith('/join_game'):
return self.join_game()
elif self.path == '/':
return self.handleHomepage()
elif self.path.endswith('.jpg'):
return self.load_image()
elif self.path.startswith('/waiting_room'):
return self.waiting_room()
elif self.path.startswith('/game_state'):
return self.get_gamestate()
elif self.path.startswith('/pick_roles'):
return self.pick_roles()
elif self.path.startswith('/set_roles'):
return self.set_roles()
elif self.path.startswith('/view_roles'):
return self.view_roles()
elif self.path.startswith('/deal_cards'):
return self.deal_cards()
elif self.path.startswith('/show_cards'):
return self.show_cards()
elif self.path.startswith('/start_night'):
return self.start_night()
elif self.path.startswith('/add_selected'):
return self.add_selected()
elif self.path == '/favicon.ico':
self.path = '/Card Backside.ico'
return self.load_image()
elif self.path.startswith('/vote'):
return self.vote()
else:
raise RuntimeError(f'got a path from {self.path}')
def set_username(self):
print('set username')
arguments = urllib.parse.parse_qs(urllib.parse.urlparse(self.path).query)
if 'username' in arguments:
username = arguments['username'][0]
print(username)
else:
username = None
if 'username' not in arguments or username in MyHandler.player_usernames:
self.send_response(200)
self.end_headers()
self.wfile.write('<html><body>'.encode('utf8'))
# if username in MyHandler.player_usernames:
# self.wfile.write(f'Sorry, the username "{username}" was already taken. Please choose another username.'.encode('utf8'))
self.wfile.write('''
<form method = 'GET' action = '/set_username'>
Please enter your username:
<input type = 'text' name = 'username'/>
<input type = 'submit' value = 'submit'/>
</form>
</body>
</html>
'''.encode('utf8'))
else:
self.send_response(302)
self.send_header('Set-Cookie', 'username="%s"' % username)
self.send_header('Location', '/')
self.end_headers()
def change_username(self):
print('change username called')
self.send_response(302)
print('sending expired cookie...')
self.send_header('Set-Cookie', 'username=None; expires=Mon, 14 Sep 2020 07:00:00 GMT')
print('expired cookie sent!')
self.send_header('Location', '/set_username')
self.end_headers()
cookies = SimpleCookie(self.headers.get('Cookie'))
username = cookies['username'].value
MyHandler.player_usernames.remove(username)
def get_gamestate(self):
myID = self.get_game_id()
myGame = Game.running_games[myID]
self.send_response(200)
self.end_headers()
print(f'gamestate: {myGame.gamestate}')
if myGame.gamestate == 'waiting_room':
game_state = {'mode': 'waiting_room', 'players': list(myGame.players)}
elif myGame.gamestate == 'pick_roles':
game_state = {'mode': 'pick_roles', 'roles': myGame.selected_roles}
elif myGame.gamestate == 'show_cards':
game_state = {'mode': 'show_cards', 'roles': myGame.position_username_role}
elif myGame.gamestate == 'night':
#new_selected = myGame.selected[:]
#for index, player in enumerate(myGame.selected):
#if len(player) > 0 and player[-1] == True:
#new_selected[index] = player[:-1]
game_state = {'mode': 'night', 'active_roles': myGame.active_roles, 'selected': myGame.selected}
print(f'active_roles: {myGame.active_roles}')
print(f'selected: {myGame.selected}')
#print(f'new selected: {new_selected}')
elif myGame.gamestate == 'day':
game_state = {'mode': 'day', 'time': math.floor(myGame.day_length - (datetime.now() - myGame.day_start).total_seconds())}
elif myGame.gamestate == 'conclusion':
game_state = {'mode': 'conclusion', 'winners': myGame.winners}
self.wfile.write(json.dumps(game_state).encode('utf8'))
def pick_roles(self):
myID = self.get_game_id()
myGame = Game.running_games[myID]
myGame.gamestate = 'pick_roles'
print(str(myGame.uuid) + ' GAME: MODE SWITCHED TO PICK ROLES')
self.send_response(200)
self.end_headers()
self.wfile.write(str('''
<html>
<head>
<style>
div.fixed {
position : fixed;
top : 40%%;
left : 38;
width : 200px;
height : 100px;
border: 3px solid #FFFFFF;
z-index : 1;
`}
div.relative {
position : relative;
left : 110px;
width : 1091px;
height : 300px;
border : 0px solid #FFFFFF;
z-index : 0;
}
</style>
</head>
<body bgcolor = '#000033' align = 'center'>
<div class = 'fixed' align = 'center'>
<font id = 'total_role_number' color = '#FFFFFF' size = '32'>
0 / %s
</font>
<br />
<font color = '#FFFFFF' size = '6'>
roles selected
</font>
</div>
<h2>
<button id='start_game' onclick='document.location.href = "/deal_cards?id=%s"' type = 'button' style = 'position : fixed; top : 55%%; left : 50; z-index : 1' disabled='true'>
<font size = '6'>
Start Game
</font>
</button>
</h2>
<div class = 'relative' style = 'display : inline-block'>
<h1>
<font color = '#FFFFFF'> One Night Ultimate Werewolf </font>
</h1>
<img src = 'sentinel.jpg' id = 'sentinel' width = '215px' height = '300px' style = 'border : 0px solid white' onclick = 'selectCard(this)'/>
<img src = 'doppleganger.jpg' id = 'doppleganger' width = '215px' height = '300px' style = 'border:0px solid white' onclick = 'selectCard(this)'/>
<img src = 'werewolf.jpg' id = 'werewolf1' width = '215px' height = '300px' style = 'border:0px solid white' onclick = 'selectCard(this)'/>
<img src = 'werewolf.jpg' id = 'werewolf2' width = '215px' height = '300px' style = 'border:0px solid white' onclick = 'selectCard(this)'/>
<img src = 'alpha wolf.jpg' id = 'alpha wolf' width = '215px' height = '300px' style = 'border:0px solid white' onclick = 'selectCard(this)'/>
<br />
<img src = 'mystic wolf.jpg' id = 'mystic wolf' width = '215px' height = '300px' style = 'border:0px solid white' onclick = 'selectCard(this)'/>
<img src = 'minion.jpg' id = 'minion' width = '215px' height = '300px' style = 'border:0px solid white' onclick = 'selectCard(this)'/>
<img src = 'mason.jpg' id = 'mason1' width = '215px' height = '300px' style = 'border:0px solid white' onclick = 'selectCard(this)'/>
<img src = 'mason.jpg' id = 'mason2' width = '215px' height = '300px' style = 'border:0px solid white' onclick = 'selectCard(this)'/>
<img src = 'seer.jpg' id = 'seer' width = '215px' height = '300px' style = 'border:0px solid white' onclick = 'selectCard(this)'/>
<br />
<img src = 'apprentice seer.jpg' id = 'apprentice seer' width = '215px' height = '300px' style = 'border:0px solid white' onclick = 'selectCard(this)'/>
<img src = 'paranormal investigator.jpg' id = 'paranormal investigator' width = '215px' height = '300px' style = 'border:0px solid white' onclick = 'selectCard(this)'/>
<img src = 'robber.jpg' id = 'robber' width = '215px' height = '300px' style = 'border:0px solid white' onclick = 'selectCard(this)'/>
<img src = 'witch.jpg' id = 'witch' width = '215px' height = '300px' style = 'border:0px solid white' onclick = 'selectCard(this)'/>
<img src = 'troublemaker.jpg' id = 'troublemaker' width = '215px' height = '300px' style = 'border:0px solid white' onclick = 'selectCard(this)'/>
<br />
<img src = 'village idiot.jpg' id = 'village idiot' width = '215px' height = '300px' style = 'border:0px solid white' onclick = 'selectCard(this)'/>
<img src = 'drunk.jpg' id = 'drunk' width = '215px' height = '300px' style = 'border:0px solid white' onclick = 'selectCard(this)'/>
<img src = 'insomniac.jpg' id = 'insomniac' width = '215px' height = '300px' style = 'border:0px solid white' onclick = 'selectCard(this)'/>
<img src = 'revealer.jpg' id = 'revealer' width = '215px' height = '300px' style = 'border:0px solid white' onclick = 'selectCard(this)'/>
<img src = 'curator.jpg' id = 'curator' width = '215px' height = '300px' style = 'border:0px solid white' onclick = 'selectCard(this)'/>
<br />
<img src = 'villager.jpg' id = 'villager1' width = '215px' height = '300px' style = 'border:0px solid white' onclick = 'selectCard(this)'/>
<img src = 'villager.jpg' id = 'villager2' width = '215px' height = '300px' style = 'border:0px solid white' onclick = 'selectCard(this)'/>
<img src = 'villager.jpg' id = 'villager3' width = '215px' height = '300px' style = 'border:0px solid white' onclick = 'selectCard(this)'/>
<img src = 'hunter.jpg' id = 'hunter' width = '215px' height = '300px' style = 'border:0px solid white' onclick = 'selectCard(this)'/>
<img src = 'tanner.jpg' id = 'tanner' width = '215px' height = '300px' style = 'border:0px solid white' onclick = 'selectCard(this)'/>
<br />
<img src = 'dream wolf.jpg' id = 'dream wolf' width = '215px' height = '300px' style = 'border:0px solid white' onclick = 'selectCard(this)'/>
<img src = 'bodyguard.jpg' id = 'bodyguard' width = '215px' height = '300px' style = 'border:0px solid white' onclick = 'selectCard(this)'/>
</div>
<script>
var total_roles_selected = [];
var number_of_players = %s;
function selectCard(element) {
if (element.style.border == '6px solid white') {
element.style.border = '0px solid white';
element.width = '215';
element.height = '300';
total_roles_selected.splice(total_roles_selected.indexOf(element.id), 1);
}
else {
element.style.border = '6px solid white';
element.width = '203';
element.height = '288';
total_roles_selected.push(element.id);
}
updateRoles(total_roles_selected);
document.getElementById('total_role_number').innerHTML = total_roles_selected.length + ' / ' + number_of_players;
if (total_roles_selected.length == number_of_players) {
document.getElementById('start_game').disabled = false;
}
else {
document.getElementById('start_game').disabled = true;
}
}
function updateRoles(roleList) {
var xhttp = new XMLHttpRequest();
xhttp.open("GET", "/set_roles?id=%s&roles=" + roleList.join(), true);
xhttp.send();
}
</script>
</body>
</html>
''' % (len(myGame.players) + 3, myID, len(myGame.players) + 3, myID)).encode('utf8'))
def load_image(self):
if self.path.startswith('/werewolf'):
self.path = '/werewolf.jpg'
elif self.path.startswith('/villager'):
self.path = '/villager.jpg'
return SimpleHTTPRequestHandler.do_GET(self)
def handleHomepage(self, invalidId=False):
cookies = SimpleCookie(self.headers.get('Cookie'))
self.send_response(200)
self.end_headers()
if 'username' not in cookies:
self.wfile.write('''<html><body>We noticed you are not signed in. To create or join a game, please sign in below.<br />
<button onclick='document.location.href="/set_username"'>SIGN IN</button>
<br /><br /><br />
'''.encode('utf8'))
else:
self.wfile.write('<html><body>'.encode('utf8'))
if invalidId != False:
self.wfile.write(f'''Error, the ID you entered was not found.<br />
<button onclick='document.location.href="/new_game"'>Host new game</button><br />
<br />
OR
<br />
<br />
<form action='/join_game' method='GET'>
Game Pin:<br />
<input type='text' name='gamepin' value='{invalidId}'/>
<input type='submit' value='Join!'/>
</form>
'''.encode('utf8'))
else:
self.wfile.write('''
<button onclick='document.location.href="/new_game"'>Host new game</button><br />
<br />
OR
<br />
<br />
<form action='/join_game' method='GET'>
Game Pin:<br />
<input type='text' name='gamepin'/>
<input type='submit' value='Join!'/>
</form>
'''.encode('utf8'))
self.wfile.write('''<br />
Here is a list of public games that you can join:<br />
<ul>
'''.encode('utf8'))
for game in Game.running_games.values():
self.wfile.write(f'''<li><a href='/waiting_room?id={game.uuid}'>{len(game.players)} player game</a></li>'''.encode('utf8'))
self.wfile.write('</ul></body></html>'.encode('utf8'))
def getUsername(self):
cookies = SimpleCookie(self.headers.get('Cookie'))
if 'username' not in cookies:
self.send_response(400)
self.end_headers()
raise KeyError('username not in cookies; sent response 400')
else:
return cookies['username'].value
def waiting_room(self):
myID = self.get_game_id()
myGame = Game.running_games[self.get_game_id()]
username = self.getUsername()
self.send_response(200)
self.end_headers()
Game.running_games[myID].players.append(username)
self.wfile.write(f'''
<html>
<body>
Welcome {username}!
<br />
There are <span id='player_number'>{len(myGame.players)}</span> players.
<br />'''.encode('utf-8'))
self.wfile.write('Here are the current players:<ol id = "player_list">'.encode('utf-8'))
for username in myGame.players:
self.wfile.write(f'<li>{username}</li>'.encode('utf-8'))
self.wfile.write(('''
</ol>
<button type = 'button' onclick = 'document.location.href = "/change_username"'>
Change username
</button>
<script>
function refreshPage() {
var xhttp = new XMLHttpRequest();
xhttp.open("GET", "/game_state?id=%s", true);
xhttp.send();
xhttp.onreadystatechange = function() {
if (this.readyState == 4 && this.status == 200) {
var response = JSON.parse(this.responseText);
var updatedPlayers = response['players']
if (response['mode'] == 'waiting_room') {
var player_list = document.getElementById('player_list');
while (player_list.firstChild) {
player_list.removeChild(player_list.firstChild);
}
for (element = 0; element < updatedPlayers.length; element++) {
var player = document.createElement('li');
player.innerHTML = updatedPlayers[element]
player_list.appendChild(player);
}
document.getElementById('player_number').innerHTML = updatedPlayers.length;
}
else if (response['mode'] == 'pick_roles') {
document.location.href = '/view_roles?id=%s'
}
console.log('updatedPlayerList: ' + this.responseText);
}
};
setTimeout(refreshPage, 1000);
}
setTimeout(refreshPage, 1000);
</script>
''' % (myID, myID)).encode('utf8'))
if myGame.players[0] == username:
self.wfile.write(('''
<button type = 'button' onclick = 'document.location.href = "/pick_roles?id=%s"'>
Select Roles
</button>
''' % myID).encode('utf8'))
self.wfile.write('''
</body>
</html>
'''.encode('utf8'))
def set_roles(self):
myID = self.get_game_id()
myGame = Game.running_games[myID]
arguments = urllib.parse.parse_qs(urllib.parse.urlparse(self.path).query, keep_blank_values=True)
if 'roles' in arguments:
if arguments['roles'] == ['']:
myGame.selected_roles = []
else:
myGame.selected_roles = arguments['roles'][0].split(",")
def view_roles(self):
myID = self.get_game_id()
myGame = Game.running_games[myID]
self.send_response(200)
self.end_headers()
self.wfile.write(str('''
<html>
<head>
<style>
div.fixed {
position : fixed;
top : 40%%;
left : 38;
width : 200px;
height : 100px;
border: 3px solid #FFFFFF;
z-index : 1;
}
div.relative {
position : relative;
left : 110px;
width : 1091px;
height : 300px;
border : 0px solid #FFFFFF;
z-index : 0;
}
</style>
</head>
<body bgcolor = '#000033' align = 'center'>
<div class = 'fixed' align = 'center'>
<font id = 'total_role_number' color = '#FFFFFF' size = '32'>
0 / %s
</font>
<br />
<font color = '#FFFFFF' size = '6'>
roles selected
</font>
</div>
<div class = 'relative' style = 'display : inline-block'>
<h1>
<font color = '#FFFFFF'> One Night Ultimate Werewolf </font>
</h1>
<img src = 'sentinel.jpg' id = 'sentinel' width = '215px' height = '300px' style = 'border : 0px solid white'/>
<img src = 'doppleganger.jpg' id = 'doppleganger' width = '215px' height = '300px' style = 'border:0px solid white'/>
<img src = 'werewolf.jpg' id = 'werewolf1' width = '215px' height = '300px' style = 'border:0px solid white'/>
<img src = 'werewolf.jpg' id = 'werewolf2' width = '215px' height = '300px' style = 'border:0px solid white'/>
<img src = 'alpha wolf.jpg' id = 'alpha wolf' width = '215px' height = '300px' style = 'border:0px solid white'/>
<br />
<img src = 'mystic wolf.jpg' id = 'mystic wolf' width = '215px' height = '300px' style = 'border:0px solid white'/>
<img src = 'minion.jpg' id = 'minion' width = '215px' height = '300px' style = 'border:0px solid white'/>
<img src = 'mason.jpg' id = 'mason1' width = '215px' height = '300px' style = 'border:0px solid white'/>
<img src = 'mason.jpg' id = 'mason2' width = '215px' height = '300px' style = 'border:0px solid white'/>
<img src = 'seer.jpg' id = 'seer' width = '215px' height = '300px' style = 'border:0px solid white'/>
<br />
<img src = 'apprentice seer.jpg' id = 'apprentice seer' width = '215px' height = '300px' style = 'border:0px solid white'/>
<img src = 'paranormal investigator.jpg' id = 'paranormal investigator' width = '215px' height = '300px' style = 'border:0px solid white'/>
<img src = 'robber.jpg' id = 'robber' width = '215px' height = '300px' style = 'border:0px solid white'/>
<img src = 'witch.jpg' id = 'witch' width = '215px' height = '300px' style = 'border:0px solid white'/>
<img src = 'troublemaker.jpg' id = 'troublemaker' width = '215px' height = '300px' style = 'border:0px solid white'/>
<br />
<img src = 'village idiot.jpg' id = 'village idiot' width = '215px' height = '300px' style = 'border:0px solid white'/>
<img src = 'drunk.jpg' id = 'drunk' width = '215px' height = '300px' style = 'border:0px solid white'/>
<img src = 'insomniac.jpg' id = 'insomniac' width = '215px' height = '300px' style = 'border:0px solid white'/>
<img src = 'revealer.jpg' id = 'revealer' width = '215px' height = '300px' style = 'border:0px solid white'/>
<img src = 'curator.jpg' id = 'curator' width = '215px' height = '300px' style = 'border:0px solid white'/>
<br />
<img src = 'villager.jpg' id = 'villager1' width = '215px' height = '300px' style = 'border:0px solid white'/>
<img src = 'villager.jpg' id = 'villager2' width = '215px' height = '300px' style = 'border:0px solid white'/>
<img src = 'villager.jpg' id = 'villager3' width = '215px' height = '300px' style = 'border:0px solid white'/>
<img src = 'hunter.jpg' id = 'hunter' width = '215px' height = '300px' style = 'border:0px solid white'/>
<img src = 'tanner.jpg' id = 'tanner' width = '215px' height = '300px' style = 'border:0px solid white'/>
<br />
<img src = 'dream wolf.jpg' id = 'dream wolf' width = '215px' height = '300px' style = 'border:0px solid white'/>
<img src = 'bodyguard.jpg' id = 'bodyguard' width = '215px' height = '300px' style = 'border:0px solid white'/>
</div>
<script>
var total_roles_selected = [];
var number_of_players = %s;
function refreshPage() {
var xhttp = new XMLHttpRequest();
xhttp.open("GET", "/game_state?id=%s", true);
xhttp.send();
xhttp.onreadystatechange = function() {
if (this.readyState == 4 && this.status == 200) {
var response = JSON.parse(this.responseText);
var updatedRoles = response['roles']
if (response['mode'] == 'pick_roles') {
console.log('updated roles: ' + updatedRoles);
for (var index = 0; index < total_roles_selected.length; index++) {
var role = total_roles_selected[index];
if (updatedRoles.indexOf(role) == -1) {
var element = document.getElementById(role);
element.style.border = '0px solid white';
element.width = '215';
element.height = '300';
}
}
for (var index = 0; index < updatedRoles.length; index++) {
var role = updatedRoles[index];
var element = document.getElementById(role);
element.style.border = '6px solid white';
element.width = '203';
element.height = '288';
}
total_roles_selected = updatedRoles;
document.getElementById('total_role_number').innerHTML = total_roles_selected.length + ' / ' + number_of_players;
}
else {
document.location.href = '/show_cards?id=%s';
}
}
}
setTimeout(refreshPage, 1000);
}
setTimeout(refreshPage, 1000);
</script>
</body>
</html>
''' % (len(myGame.players) + 3, len(myGame.players) + 3, myID, myID)).encode('utf8'))
def deal_cards(self):
myID = self.get_game_id()
myGame = Game.running_games[myID]
if len(myGame.players) + 3 != len(myGame.selected_roles):
print('there was an error: incorrect number of roles')
self.send_response(200)
self.end_headers()
self.wfile.write(f'''<html><body>I'm sorry, you must've pressed the start game too fast. The server sees {myGame.players} as players and {myGame.selected_roles} as roles.</body></html>'''.encode('utf8'))
else:
print('DEALING CARDS')
available_roles = myGame.selected_roles[:]
for player in myGame.players:
choice = random.choice(available_roles)
myGame.position_username_role.append((player, choice))
available_roles.remove(choice)
for center in range(1, 4):
choice = random.choice(available_roles)
myGame.position_username_role.append((f'Center{center}', choice))
available_roles.remove(choice)
myGame.gamestate = 'show_cards'
self.send_response(302)
self.send_header('Location', '/show_cards?id=%s' % myID)
self.end_headers()
def show_cards(self):
myID = self.get_game_id()
myGame = Game.running_games[myID]
self.send_response(200)
self.end_headers()
cookies = SimpleCookie(self.headers.get('Cookie'))
username = cookies['username'].value
my_index = None
for index, player_role in enumerate(myGame.position_username_role):
if player_role[0] == username:
my_index = index
print(f'{username}\'s index is {my_index}, which matches the role {player_role[1]}')
my_role = myGame.position_username_role[my_index][1]
self.wfile.write('''
<html>
<head>
<title>ONUW TABLE</title>
<style>
body {
background-image : url('Table.jpg');
}
#clock {
color: red;
font-size: 50px;
font-weight: bold;
}
</style>
</head>
<body>
<!--<div style = 'position : fixed; height : 1; width : 1; left : 700; top : 360; border : 3px solid #000000'>
</div>-->'''.encode('utf8'))
if my_index == 0:
self.wfile.write(f'''<!--<button id='start_night_button' type='button' onclick='document.location.href="/start_night?id={myID}"'>Start night</button>-->'''.encode('utf8'))
self.wfile.write(('''
<script>
var firstRefresh = true;
var player_role_list = %s;
var my_index = %s;
var my_role;
var alreadyRefreshedNight = false;
var mySelections = [];
var previouslyActive;
var turnDeployed = false;
var isDay = false;
var myVote;
function drawBoard() {
var total_player_number = player_role_list.length - 3;
for (var player = 0; player < total_player_number; player++) {
var angle = (360.0 / total_player_number) * (player - my_index) - 90;
var y = Math.sin((angle / 360.0) * (2 * Math.PI)) * 280;
var x = Math.cos((angle / 360.0) * (2 * Math.PI)) * 280;
var image = document.createElement('img');
image.src = 'Card Backside.jpg';
image.id = player_role_list[player][0];
image.width = '71';
image.height = '100';
image.style.transform = 'rotate(' + (-(angle + 90)) + 'deg)';
image.style.position = 'fixed';
image.style.left = 735 + x - 35;
image.style.top = 380 - y - 50;
//really important onclick for selection:
image.setAttribute('onclick', 'select(this)');
document.body.appendChild(image);
y *= 13/10;
x *= 13/10;
var name = document.createElement('div');
name.innerHTML = player_role_list[player][0];
name.style.transform = 'rotate(' + (-(angle + 90)) + 'deg)';
name.style.position = 'fixed';
name.style.width = '300';
name.style.left = 735 + x - 150;
name.style.top = 372 - y;
name.style.textAlign = 'center';
name.style.fontWeight = 'bold';
name.style.color = 'white';
document.body.appendChild(name);
}
var centerRotation = my_index * 360 / total_player_number;
for (var player = 0; player < 2; player++) {
var angle = -(360.0 / 2) * player - centerRotation;
var card;
y = Math.sin((angle / 360.0) * (2 * Math.PI)) * 80;
x = Math.cos((angle / 360.0) * (2 * Math.PI)) * 80;
card = document.createElement('img');
card.src = 'Card Backside.jpg';
card.id = 'Center' + (player * 2 + 1);
card.width = '71';
card.height = '100';
card.style.transform = 'rotate(' + (-angle + 180 * player) + 'deg)';
card.style.position = 'fixed';
card.style.left = 735 + x - 35;
card.style.top = 380 - y - 50;
//really important onclick for selection:
card.setAttribute('onclick', 'select(this)');
document.body.appendChild(card);
}
var card = document.createElement('img');
card.src = 'Card Backside.jpg';
card.id = 'Center2';
card.width = '71';
card.height = '100';
card.style.transform = 'rotate(' + (centerRotation) + 'deg)';
card.style.position = 'fixed';
card.style.left = 735 - 35;
card.style.top = 380 - 50;
//really important onclick for selection:
card.setAttribute('onclick', 'select(this)');
document.body.appendChild(card);
}
function myTurn() {
my_role = player_role_list[my_index][1];
switch (my_role) {
case 'alpha wolf':
alpha_wolf();
case 'mystic wolf':
mystic_wolf();
case 'werewolf1':
case 'werewolf2':
werewolf();
break;
case 'apprentice seer':
apprentice_seer();
break;
case 'bodyguard':
bodyguard();
break;
case 'curator':
curator();
break;
case 'doppleganger':
doppleganger();
break;
case 'dream wolf':
dream_wolf();
break;
case 'drunk':
drunk();
break;
case 'hunter':
hunter();
break;
case 'insomniac':
insomniac();
break;
case 'mason1':
case 'mason2':
mason();
break;
case 'minion':
minion();
break;
case 'paranormal investigator':
paranormal_investigator();
break;
case 'revealer':
revealer();
break;
case 'robber':
robber();
break;
case 'seer':
seer();
break;
case 'sentinel':
sentinel();
break;
case 'tanner':
tanner();
break;
case 'troublemaker':
troublemaker();
break;
case 'village idiot':
village_idiot();
break;
case 'villager':
villager();
break;
case 'witch':
witch();
break;
}
turnDeployed = true;
}
function doDivTextbox(message) {
var div = document.createElement('div');
div.id = 'div_textbox';
div.innerHTML = 'Awaken ' + my_role + '!<br />' + message;
div.style = 'position: absolute; top: 20px; left: 40%%; background-color: white; border-style: solid; border-color: red; width: 20%%;';
div.align = 'center';
document.body.appendChild(div);
}
function werewolf() {
//console.log('werewolf function called!');
var partnerWolf = false;
for (var index = 0; index < player_role_list.length - 3; index++) {
if ((player_role_list[index][1].indexOf('wolf') != -1) && player_role_list[index][1] != my_role) {
partnerWolf = player_role_list[index][0];
}
}
if (partnerWolf != false) {
doDivTextbox('Your partner is ' + partnerWolf + '.');
}
else {
doDivTextbox('You are the lone wolf. Select a card in the center to view.');
}
}
function werewolfSelect(selected) {
//console.log('werewolf select function called!');
var partnerWolf = false;
for (var index = 0; index < player_role_list.length - 3; index++) {
if ((player_role_list[index][1].indexOf('wolf') != -1) && player_role_list[index][1] != my_role) {
partnerWolf = player_role_list[index][0];
}
}
if (partnerWolf == false) {
if ((selected.id == 'Center1' || selected.id == 'Center2' || selected.id == 'Center3') && mySelections.length < 1) {
reveal(selected);
for (var index = 0; index < player_role_list.length; index++) {
if (player_role_list[index][0] == selected.id) {
return true;
}
}
}
}
return false;
}
function minion() {
var wolves = [];
for (var index = 0; index < player_role_list.length - 3; index++) {
if (player_role_list[index][1].indexOf('wolf') != -1) {
wolves.push(player_role_list[index][0]);
}
}
if (wolves.length == 0) {
doDivTextbox('There are no werewolves in play.');
}
else {
var message;
if (wolves.length > 1) {
message = 'There are ' + wolves.length + ' werewolves in play. They are ' + wolves + '.';
}
else {
message = 'There is ' + wolves.length + ' werewolf in play. They are ' + wolves + '.';
}
doDivTextbox(message);
}
updateAction(-1);
}
function troublemaker() {
doDivTextbox('Choose two players to switch their roles. You do not get to see their roles.');
}
function troublemakerSelect(selected) {
if (!(selected.id == 'Center1' || selected.id == 'Center2' || selected.id == 'Center3')) {
if (mySelections.length < 2) {
for (var index = 0; index < player_role_list.length; index++) {
if (player_role_list[index][0] == selected.id && mySelections.indexOf(index) == -1) {
return true;
}
}
}
}
return false;
}
function witch() {
doDivTextbox('Choose a center card to view. Then, choose a player to give that card to.');
}
function witchSelect(selected) {
//console.log('my selections: ' + mySelections);
if (mySelections.length < 1) {
if (selected.id == 'Center1' || selected.id == 'Center2' || selected.id == 'Center3') {
for (var index = 0; index < player_role_list.length; index++) {
if (player_role_list[index][0] == selected.id && mySelections.indexOf(index) == -1) {
reveal(selected);
console.log('1st selection: pushed ' + index);
return true;
}
}
}
}
else if (mySelections.length < 2) {
if (!(selected.id == 'Center1' || selected.id == 'Center2' || selected.id == 'Center3')) {
for (var index = 0; index < player_role_list.length; index++) {
if (player_role_list[index][0] == selected.id && mySelections.indexOf(index) == -1) {
console.log('2nd selection pushed ' + index);
return true;
}
}
}
}
return false;
}
function select(element) {
var selected = element;
console.log('select got ' + selected);
if (isDay) {
vote(element);
}
else {
var update = false;
switch (my_role) {
case 'alpha wolf':
update = alpha_wolfSelect(selected);
case 'mystic wolf':
update = mystic_wolfSelect(selected);
case 'werewolf1':
case 'werewolf2':
update = werewolfSelect(selected);
break;
case 'apprentice seer':
update = apprentice_seerSelect(selected);
break;
case 'bodyguard':
update = bodyguardSelect(selected);
break;
case 'curator':
update = curatorSelect(selected);
break;
case 'doppleganger':
update = dopplegangerSelect(selected);
break;
case 'dream wolf':
update = dream_wolfSelect(selected);
break;
case 'drunk':
update = drunkSelect(selected);
break;
case 'hunter':
update = hunterSelect(selected);
break;
case 'paranormal investigator':
update = paranormal_investigatorSelect(selected);
break;
case 'revealer':
update = revealerSelect(selected);
break;
case 'robber':
update = robberSelect(selected);
break;
case 'seer':
update = seerSelect(selected);
break;
case 'sentinel':
update = sentinelSelect(selected);
break;
case 'troublemaker':
update = troublemakerSelect(selected);
break;
case 'village idiot':
update = village_idiotSelect(selected);
break;
case 'witch':
update = witchSelect(selected);
break;
}
if (update) {
for (var index = 0; index < player_role_list.length; index++) {
if (player_role_list[index][0] == selected.id) {
updateAction(index);
}
}
}
}
}
function reveal(element) {
console.log('revealing ' + element);
for (var index = 0; index < player_role_list.length; index++) {
if (player_role_list[index][0] == element.id) {
element.src = player_role_list[index][1] + '.jpg';
}
}
}
function endTurn(mySelected) {
for (var index = 0; index < mySelected.length; index++) {
document.getElementById(player_role_list[mySelected[index]][0]).src = 'Card Backside.jpg';
}
var textbox = document.getElementById('div_textbox');
textbox.parentNode.removeChild(textbox);
turnDeployed = false;
}
function updateAction(index) {
console.log('updating action on index ' + index);
mySelections.push(index);
var xhttp = new XMLHttpRequest();
xhttp.open("GET", "/add_selected?id=%s&selected=" + index, true);
xhttp.send();
}
function refreshPage() {
var xhttp = new XMLHttpRequest();
xhttp.open("GET", "/game_state?id=%s", true);
xhttp.send();
xhttp.onreadystatechange = function() {
if (this.readyState == 4 && this.status == 200) {
var response = JSON.parse(this.responseText);
if (firstRefresh) {
drawBoard();
firstRefresh = false;
}
if (response['mode'] == 'show_cards') {
document.getElementById(player_role_list[my_index][0]).src = player_role_list[my_index][1] + '.jpg';
if (my_index == 0 && document.getElementById('start_night_button') == null) {
var child = document.createElement('button');
child.id = 'start_night_button';
child.type = 'button';
child.setAttribute('onclick', 'document.location.href="/start_night?id=%s"');
child.innerHTML = 'Start Night';
document.body.appendChild(child);
}
}
else if (response['mode'] == 'night') {
//console.log('active roles: ' + response['active_roles'] + ' vs previously ' + previouslyActive + ' - ' + (previouslyActive == response['active_roles']));
if (previouslyActive == null || previouslyActive[0] != response['active_roles'][0]) {
previouslyActive = response['active_roles'];
if (response['active_roles'].indexOf(player_role_list[my_index][1]) != -1) {
//console.log('my turn! ' + '- ' + previouslyActive);
myTurn();
}
}
var mySelected = response['selected'][my_index]
if (mySelected[mySelected.length - 1] == true) {
mySelected.pop();
if (turnDeployed) {
//console.log('my selected: ' + mySelected);
setTimeout(function(){ if (turnDeployed) { endTurn(mySelected) } }, 5000);
}
}
if (alreadyRefreshedNight == false) {
alreadyRefreshedNight = true;
mySelections = mySelected;
var child = document.getElementById('start_night_button');
if (child != null) {
child.parentNode.removeChild(child);
}
document.getElementById(player_role_list[my_index][0]).src = 'Card Backside.jpg';
for (var item = 0; item < mySelected.length; item++) {
var selected = document.getElementById(player_role_list[mySelected[item]][0]);
switch (my_role) {
case 'alpha wolf':
alpha_wolfSelect(selected);
case 'mystic wolf':
mystic_wolfSelect(selected);
case 'werewolf1':
case 'werewolf2':
werewolfSelect(selected);
break;
case 'apprentice seer':
apprentice_seerSelect(selected);
break;
case 'bodyguard':
bodyguardSelect(selected);
break;
case 'curator':
curatorSelect(selected);
break;
case 'doppleganger':
dopplegangerSelect(selected);
break;
case 'dream wolf':
dream_wolfSelect(selected);
break;
case 'drunk':
drunkSelect(selected);
break;
case 'hunter':
hunterSelect(selected);
break;
case 'paranormal investigator':
paranormal_investigatorSelect(selected);
break;
case 'revealer':
revealerSelect(selected);
break;
case 'robber':
robberSelect(selected);
break;
case 'seer':
seerSelect(selected);
break;
case 'sentinel':
sentinelSelect(selected);
break;
case 'troublemaker':
troublemakerSelect(selected);
break;
case 'village idiot':
village_idiotSelect(selected);
break;
case 'witch':
witchSelect(selected);
break;
}
}
}
}
else if (response['mode'] == 'day') {
if (!isDay) {
isDay = true;
}
if (turnDeployed) {
endTurn(mySelections);
}
if (document.getElementById('clock') == null) {
var element = document.createElement('div');
element.id = 'clock';
document.body.appendChild(element);
}
document.getElementById('clock').innerHTML = response['time'];
}
else if (response['mode'] == 'conclusion') {
var winner = false;
for (var index = 0; index < response['winners'].length; index++) {
if (response['winners'][index] == my_role) {
console.log('you are a winner!');
winner = true;
}
}
}
}
}
setTimeout(refreshPage, 1000);
}
setTimeout(refreshPage, 1000);
function vote(element) {
if (!(element.id == 'Center1' || element.id == 'Center2' || element.id == 'Center3')) {
if (element == myVote) {
var xhttp = new XMLHttpRequest();
xhttp.open("GET", "/vote?id=%s&for=-1", true);
xhttp.send();
myVote.style.border = '0px solid red';
myVote = null;
}
else {
if (myVote != null) {
myVote.style.border = '0px solid red';
}
var xhttp = new XMLHttpRequest();
xhttp.open("GET", "/vote?id=%s&for=" + element.id, true);
xhttp.send();
element.style.border = '4px solid red';
myVote = element;
}
}
}
</script>
</body>
</html>
''' % ([list(tuplepair) for tuplepair in myGame.position_username_role], my_index, myID, myID, myID, myID, myID)).encode('utf8'))
def start_night(self):
myID = self.get_game_id()
myGame = Game.running_games[myID]
myGame.selected = []
for entry in myGame.position_username_role[:-3]:
myGame.selected.append([])
myGame.gamestate = 'night'
myGame.active_roles = []
myGame.progress_night()
myGame.check_completed_section()
self.send_response(302)
self.send_header('Location', f'/show_cards?id={myID}')
self.end_headers()
def get_game_id(self):
arguments = urllib.parse.parse_qs(urllib.parse.urlparse(self.path).query, keep_blank_values=True)
print('get game id arguments: ' + str(arguments))
if 'id' in arguments:
if arguments['id'][0] in Game.running_games:
return arguments['id'][0]
else:
self.send_response(400)
self.end_headers()
self.wfile.write('<html><body>This game is not currently running. Return to the homepage <a href="/">here</a>.'.encode('utf8'))
raise ValueError('no running games under pin; sent response 400')
else:
self.send_response(400)
self.end_headers()
raise KeyError(f'no ID given in url; sent response 400 (path was {self.path})')
def new_game(self):
gameID = uuid.uuid1().hex
Game.newGame(gameID)
self.send_response(302)
self.send_header('Location', '/join_game?id=%s' % gameID)
self.end_headers()
self.wfile.write('You should be redirected'.encode('utf8'))
def join_game(self):
myID = self.get_game_id()
if Game.canJoin(myID):
self.send_response(302)
self.send_header('Location', '/waiting_room?id=%s' % myID)
self.end_headers()
else:
return self.handleHomepage(myID)
def add_selected(self):
myID = self.get_game_id()
myGame = Game.running_games[myID]
cookies = SimpleCookie(self.headers.get('Cookie'))
username = cookies['username'].value
arguments = urllib.parse.parse_qs(urllib.parse.urlparse(self.path).query, keep_blank_values=True)
if 'selected' in arguments:
self.send_response(200)
self.end_headers()
added = int(arguments['selected'][0])
my_index = None
for index, player_role in enumerate(myGame.position_username_role):
if player_role[0] == username:
my_index = index
if len(myGame.selected[my_index]) > 0 and myGame.selected[my_index][-1] == True:
raise RuntimeError(f'{username} just tried to send extra roles. Uh oh!')
if added != -1:
myGame.selected[my_index].append(added)
if myGame.selection_is_done(my_index):
myGame.selected[my_index].append(True)
print(f'selection from {myGame.position_username_role[my_index][0]} AKA {myGame.position_username_role[my_index][1]} is complete.')
myGame.check_completed_section()
def vote(self):
myID = self.get_game_id()
myGame = Game.running_games[myID]
cookies = SimpleCookie(self.headers.get('Cookie'))
username = cookies['username'].value
arguments = urllib.parse.parse_qs(urllib.parse.urlparse(self.path).query, keep_blank_values=True)
if len(myGame.votes) != len(myGame.position_username_role) - 3:
myGame.votes = []
for player in myGame.position_username_role[:-3]:
myGame.votes.append(None)
if 'for' in arguments:
for index, values in enumerate(myGame.position_username_role[:-3]):
if values[0] == username:
if arguments['for'][0] == '-1':
print(f'reseting {username}s vote to None')
myGame.votes[index] = None
else:
myGame.votes[index] = arguments['for'][0]
self.send_response(200)
self.end_headers()
for vote in myGame.votes:
if vote == None:
break
else:
print('VOTING IS FINISHED!!!')
myGame.gamestate = 'conclusion'
myGame.calculate_winners()
class Game:
running_games = {}
def __init__(self, uuid, gamestate, players=[], selected_roles=[], position_username_role=[], day_length=5*60, active_roles=None, selected=None, day_start=None, votes=[], winners=[]):
self.uuid = uuid
self.gamestate = gamestate
self.players = players
self.selected_roles = selected_roles
self.position_username_role = position_username_role
self.day_length = day_length
self.active_roles = active_roles
self.selected = selected
self.day_start = day_start
self.votes = votes
self.winners = winners
def newGame(uuid):
Game.running_games[uuid] = Game(uuid, 'waiting_room')
def canJoin(uuid):
if uuid in Game.running_games:
myGame = Game.running_games[uuid]
else:
return False
if myGame.gamestate == 'waiting_room':
return True
else:
return False
def seed_game(self):
choice = 0
if choice == 0:
self.gamestate = 'day'
self.players = ['Jmccand', 'Safari', 'Firefox']
self.selected_roles = ['werewolf1', 'troublemaker', 'witch', 'werewolf2', 'doppelganger', 'villager2']
self.position_username_role = [('Jmccand', 'witch'), ('Safari', 'troublemaker'), ('Firefox', 'werewolf1'), ('Center1', 'werewolf2'), ('Center2', 'doppelganger'), ('Center3', 'villager2')]
self.selected = [[8, 5, True], [5, 4, True], [7, True]]
self.day_start = datetime.now()
elif choice == 1:
self.gamestate = 'day'
self.players = ['Jmccand', 'Safari', 'DadMcDadDad', 'Firefox', 'rando1', 'rando2']
self.selected_roles = ['werewolf1', 'minion', 'werewolf2', 'doppelganger', 'villager1', 'villager2', 'villager3', 'troublemaker', 'witch']
self.position_username_role = [('Jmccand', 'witch'), ('Safari', 'troublemaker'), ('Firefox', 'werewolf1'), ('rando1', 'villager3'), ('rando2', 'minion'), ('DadMcDadDad', 'villager1'), ('Center1', 'werewolf2'), ('Center2', 'doppelganger'), ('Center3', 'villager2')]
self.selected = [[8, 5, True], [5, 4, True], [7, True], [], [True], []]
self.day_start = datetime.now()
else:
self.gamestate = 'night'
self.players = ['Jmccand', 'Safari', 'DadMcDadDad', 'Firefox', 'rando1', 'rando2']
self.selected_roles = ['werewolf1', 'minion', 'werewolf2', 'doppelganger', 'villager1', 'villager2', 'villager3', 'troublemaker', 'witch']
self.position_username_role = [('Jmccand', 'witch'), ('Safari', 'troublemaker'), ('Firefox', 'werewolf1'), ('rando1', 'villager3'), ('rando2', 'minion'), ('DadMcDadDad', 'villager1'), ('Center1', 'werewolf2'), ('Center2', 'doppelganger'), ('Center3', 'villager2')]
self.selected = []
for entry in self.position_username_role[:-3]:
self.selected.append([])
self.selected[4].append(True)
self.active_roles = []
self.progress_night()
def check_completed_section(self):
completed_section = True
for index, others in enumerate(self.position_username_role[:-3]):
print(f'evaluating index {index}, which is {others[0]}')
this_selection = self.selected[index]
print(f'this selection: {this_selection}')
if others[1] in self.active_roles and (len(this_selection) < 1 or this_selection[len(this_selection) - 1] != True):
completed_section = False
if completed_section:
print('progressing night to next stage')
self.progress_night()
def progress_night(self):
#this full night order (one at a time) won't be needed. Virtually, a lot of these roles can go concurrently
#night_order = ('sentinel', 'doppelganger', 'werewolf', 'alpha wolf', 'mystic wolf', 'minion', 'mason', 'seer', 'apprentice seer', 'paranormal investigator', 'robber', 'witch', 'troublemaker', 'village idiot', 'drunk', 'insomniac', 'revealer', 'curator')
group1 = set(['sentinel'])
group2 = set(['doppelganger', 'werewolf1', 'werewolf2', 'alpha wolf'])
group3 = set(['mystic wolf', 'minion', 'mason1', 'mason2', 'seer', 'apprentice seer', 'paranormal investigator', 'robber', 'witch', 'troublemaker', 'village idiot', 'drunk'])
group4 = set(['insomniac', 'revealer', 'curator'])
if self.active_roles == []:
self.active_roles = list(group1)
elif self.active_roles == list(group1):
self.active_roles = list(group2)
elif self.active_roles == list(group2):
self.active_roles = list(group3)
elif self.active_roles == list(group3):
self.active_roles = list(group4)
elif self.active_roles == list(group4):
self.active_roles = []
else:
raise ValueError(f'when trying to progress the night, the active roles ({self.active_roles}) was not recognized.')
if self.active_roles == []:
print('NIGHT IS FINISHED!!!!')
_thread.start_new_thread(self.change_to_day())
else:
for index, others in enumerate(self.position_username_role[:-3]):
if self.position_username_role[index] in active_roles:
self.selected[index].append(True)
print(f'selection from {self.position_username_role[index][0]} AKA {self.position_username_role[index][1]} is complete.')
self.check_completed_section()
def selection_is_done(self, my_index=None):
total_roles = set(['sentinel', 'doppleganger', 'werewolf1', 'werewolf2', 'alpha wolf', 'mystic wolf', 'minion', 'mason1', 'mason2', 'seer', 'apprentice seer', 'paranormal investigator', 'robber', 'witch', 'troublemaker', 'village idiot', 'drunk', 'insomniac', 'revealer', 'curator', 'villager1', 'villager2', 'villager3', 'hunter', 'tanner', 'dream wolf', 'bodyguard'])
#select0 won't actually be used, it is only for reference
select0 = set(['minion', 'mason1', 'mason2', 'insomniac', 'villager1', 'villager2', 'villager3', 'hunter', 'tanner', 'dream wolf', 'bodyguard'])
select1 = set(['sentinel', 'apprentice seer', 'robber', 'village idiot'])
select2 = set(['witch', 'troublemaker'])
depending_roles = set(['doppleganger', 'werewolf1', 'werewolf2', 'alpha wolf', 'mystic wolf', 'seer', 'paranormal investigator'])
if my_index == None:
for index, player_role in enumerate(self.position_username_role):
if player_role[0] == username:
my_index = index
my_role = self.position_username_role[my_index][1]
number_selected = len(self.selected[my_index])
if my_role in depending_roles:
if 'wolf' in my_role:
partner = False
for player, role in self.position_username_role[:-3]:
if 'wolf' in role and role != my_role:
partner = True
if partner:
select0.add(my_role)
else:
select1.add(my_role)
#assert select0 + select1 + select2 == total_roles
assert select0.intersection(select1) == set()
assert select1.intersection(select2) == set()
assert select2.intersection(select0) == set()
if my_role in select0:
return number_selected == 0
elif my_role in select1:
return number_selected == 1
elif my_role in select2:
return number_selected == 2
else:
raise ValueError('the role that selected has not been placed in a select1, select2, or select3 set')
def change_to_day(self):
time.sleep(5)
self.gamestate = 'day'
self.day_start = datetime.now()
def calculate_winners(self):
total_tally = {}
for vote in self.votes:
if vote in total_tally:
total_tally[vote] += 1
else:
total_tally[vote] = 1
max_votes = 0
voted = []
for key, value in total_tally.items():
if value > max_votes:
voted = [key]
elif value == max_votes:
voted.append(key)
#total_roles = set(['sentinel', 'doppleganger', 'werewolf1', 'werewolf2', 'alpha wolf', 'mystic wolf', 'minion', 'mason1', 'mason2', 'seer', 'apprentice seer', 'paranormal investigator', 'robber', 'witch', 'troublemaker', 'village idiot', 'drunk', 'insomniac', 'revealer', 'curator', 'villager1', 'villager2', 'villager3', 'hunter', 'tanner', 'dream wolf', 'bodyguard'])
werewolves = set(['werewolf1', 'werewolf2', 'mystic wolf', 'alpha wolf', 'dream wolf'])
villagers = set(['sentinel', 'mason1', 'mason2', 'seer', 'villager1', 'apprentice seer', 'paranormal investigator', 'robber', 'witch', 'troublemaker', 'village idiot', 'drunk', 'insomniac', 'revealer', 'curator', 'villager2', 'villager3', 'hunter', 'bodyguard'])
if 'tanner' in voted:
self.winners = 'tanner'
print('tanner won!')
else:
for player in voted:
if player in werewolves:
self.winners = list(villagers)
print('villagers won!')
break
if self.winners == []:
self.winners = list(werewolves) + ['minion']
print('werewolves won!')
class ReuseHTTPServer(HTTPServer):
def server_bind(self):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(self.server_address)
class Cards:
def __init__(self, name, frontside, validation, ability = 1):
self.name = name
self.frontside = frontside
self.validation = validation
self.ability = ability
# center, edge, werewolves, oneself
def validate(self, selected_player, matched_roles):
center = selected_player in ("Center1", "Center2", "Center3")
werewolves = matched_roles[selected_player].is_a_werewolf()
oneself = (matched_roles[selected_player] != self)
edge = not center and not werewolves and oneself
for factor in range(0, 4):
if(list(center, edge, werewolves, oneself)[factor]):
if(not self.validation[factor]):
return False
return True
def is_a_werewolf(self):
return self.name in ("alpha wolf", "dream wolf", "mystic wolf", "werewolf", "doppelganger-alpha wolf", "doppelganger-dream wolf", "doppelganger-mystic wolf", "doppelganger-werewolf")
def switch_cards(self, player1, player2, players):
original = players[player1]
players[player1] = players[player2]
players[player2] = original
return players
#def place_token(self, player, tokens):
# def see(self, player, players):
active_roles = {
Cards("alpha wolf", None, (False, True, False, False)) : False,
Cards("apprentice seer", None, (True, False, False, False)) : False,
Cards("bodyguard", None, None) : False,
Cards("curator", None, (False, True, True, True)) : False,
Cards("doppelganger", None, (False, True, True, False)) : False,
Cards("<NAME>", None, None) : False,
Cards("drunk", None, (True, False, False, False)) : False,
Cards("hunter", None, None) : False,
Cards("insomniac", None, None) : False,
Cards("mason", None, None) : False,
Cards("mason", None, None) : False,
Cards("minion", None, None) : False,
Cards("<NAME>", None, (False, True, True, False)) : False,
Cards("paran<NAME>", None, (False, True, True, False)) : False,
Cards("revealer", None, (False, True, True, False)) : False,
Cards("robber", None, (False, True, True, False)) : False,
Cards("seer", None, (True, True, True, False)) : False,
Cards("sentinel", None, (False, True, True, True)) : False,
Cards("tanner", None, None) : False,
Cards("troublemaker", None, (False, True, True, False)) : False,
Cards("village idiot", None, None) : False,
Cards("villager", None, None) : False,
Cards("villager", None, None) : False,
Cards("villager", None, None) : False,
Cards("werewolf", None, None) : False,
Cards("werewolf", None, None) : False,
#!!!!! FIX THE WITCH VALIDATIONS
Cards("witch", None, (True, True, True, True)) : False
}
matched_roles = {
}
night_order = (
Cards("sentinel", None, (False, True, True, True)),
Cards("doppelganger", None, (False, True, True, False)),
Cards("werewolf", None, None),
Cards("alpha wolf", None, (False, True, False, False)),
Cards("mystic wolf", None, (False, True, True, False)),
Cards("minion", None, None),
Cards("mason", None, None),
Cards("seer", None, (True, True, True, False)),
Cards("apprentice seer", None, (True, False, False, False)),
Cards("paranormal investigator", None, (False, True, True, False)),
Cards("robber", None, (False, True, True, False)),
Cards("witch", None, (True, True, True, True)),
Cards("troublemaker", None, (False, True, True, False)),
Cards("village idiot", None, None),
Cards("drunk", None, (True, False, False, False)),
#These three have the doppelganger after them:
Cards("insomniac", None, None),
Cards("revealer", None, (False, True, True, False)),
Cards("curator", None, (False, True, True, True))
)
def main():
print("One Night Werewolf Web")
Game.newGame('16e7691a8f5211eb80a4a683e7b3717c')
myGame = Game.running_games['16e7691a8f5211eb80a4a683e7b3717c']
myGame.seed_game()
httpd = ReuseHTTPServer(('0.0.0.0', 8000), MyHandler)
httpd.serve_forever()
if __name__ == '__main__':
main()
``` |
{
"source": "jmccann-REH/aqueduct30_data_download",
"score": 2
} |
#### File: aqueduct30_data_download/scripts/Y2019M07D11_RH_Aqueduct30_Data_Download.py
```python
SCRIPT_NAME = "Y2019M07D11_RH_Aqueduct30_Data_Download"
OUTPUT_VERSION = 1
S3_INPUT_PATH = {}
S3_INPUT_PATH["master_geom_simplified"] = "s3://wri-projects/Aqueduct30/processData/Y2019M07D09_RH_Simplified_Geometries_V01/output_V02"
S3_INPUT_PATH["annual"] = "s3://wri-projects/Aqueduct30/finalData/Y2019M01D14_RH_Aqueduct_Results_V01/output_V04/annual"
# We simplified the master_geom using verious keep_percentages.
KEEP_PERCENT = 30
INPUT_FILENAME = {}
INPUT_FILENAME["master_geom_simplified"] = "mastergeom_mapshaper_visvalingam_keeppercent{:03.0f}_v01.shp".format(KEEP_PERCENT)
INPUT_FILENAME["annual"] = "annual_pivot.pkl"
ec2_input_path = "/volumes/data/{}/input_V{:02.0f}".format(SCRIPT_NAME,OUTPUT_VERSION)
ec2_output_path = "/volumes/data/{}/output_V{:02.0f}".format(SCRIPT_NAME,OUTPUT_VERSION)
s3_output_path = "s3://wri-projects/Aqueduct30/processData/{}/output_V{:02.0f}/".format(SCRIPT_NAME,OUTPUT_VERSION)
print(s3_output_path)
# In[2]:
import time, datetime, sys
dateString = time.strftime("Y%YM%mD%d")
timeString = time.strftime("UTC %H:%M")
start = datetime.datetime.now()
print(dateString,timeString)
sys.version
get_ipython().magic('matplotlib inline')
# In[3]:
get_ipython().system('rm -r {ec2_input_path} ')
get_ipython().system('rm -r {ec2_output_path} ')
get_ipython().system('mkdir -p {ec2_input_path} ')
get_ipython().system('mkdir -p {ec2_output_path} ')
# In[4]:
get_ipython().system('aws s3 cp {S3_INPUT_PATH["master_geom_simplified"]} {ec2_input_path} --recursive --quiet')
# In[5]:
get_ipython().system('aws s3 cp {S3_INPUT_PATH["annual"]} {ec2_input_path} --recursive')
# In[ ]:
# In[50]:
import pandas as pd
import geopandas as gpd
from tqdm import tqdm
from shapely.geometry import MultiPolygon, shape
# In[51]:
input_path_master_geom_simplified = "{}/{}".format(ec2_input_path,INPUT_FILENAME["master_geom_simplified"])
# In[78]:
gdf_in = gpd.read_file(filename=input_path_master_geom_simplified)
# In[79]:
gdf_in.head()
# In[85]:
gdf_in.shape
# In[80]:
def convert_row_to_multipolygon(row):
if row.type == "Polygon":
new_geom = MultiPolygon([row.geometry])
elif row.type == "MultiPolygon":
new_geom = row.geometry
else:
new_geom = -9999
return new_geom
def df_force_multipolygon(gdf):
"""
Force all geometries in a geodataframe to be
MultiPolygons. The GeoPackage format does not allow
mixing of polygons and multipolygons.
Args:
gdf(GeoDataFrame) : GeoDataFrame
Returns:
gdf_mp(GeoDataFrame): GeodataFrame with multipolygons
"""
gdf_temp = gdf.copy()
gdf_temp["type"] = gdf_temp["geometry"].geom_type
gdf["geometry"] = gdf_temp.apply(axis=1,func=convert_row_to_multipolygon)
return gdf
# In[91]:
gdf = df_force_multipolygon(gdf_in)
# In[92]:
# Change column order. See https://github.com/wri/aqueduct_analyze_locations/blob/master/data_download/instructions.md#annual-baseline
gdf = gdf[["string_id","aq30_id","pfaf_id","gid_1","aqid","geometry"]]
# ## Annual
# In[82]:
input_path_annual = "{}/{}".format(ec2_input_path,INPUT_FILENAME["annual"])
# In[132]:
df_annual = pd.read_pickle(path=input_path_annual)
# In[133]:
df_annual.head()
# In[134]:
df_annual.shape
# In[135]:
def annual_column_order():
"""
Create a list of the preferred column order.
See https://github.com/wri/aqueduct_analyze_locations/blob/master/data_download/instructions.md#annual-baseline
Args:
none
Returns:
columns(list): List of strings with column names
"""
# Indicator Columns
indicators = ["bws",
"bwd",
"iav",
"sev",
"gtd",
"rfr",
"cfr",
"drr",
"ucw",
"cep",
"udw",
"usa",
"rri"]
types = ["raw","score","cat","label"]
indicator_columns =[]
for indicator in indicators:
for one_type in types:
column = "{}_{}".format(indicator,one_type)
indicator_columns.append(column)
# Grouped Water Risk Columns
industries = ["def",
"agr",
"che",
"con",
"elp",
"fnb",
"min",
"ong",
"smc",
"tex"]
groups = ["qan",
"qal",
"rrr",
"tot"]
types_awr = ["raw","score","cat","label","weight_fraction"]
grouped_water_risk_columns = []
for industry in industries:
for group in groups:
for one_type_awr in types_awr:
column = "w_awr_{}_{}_{}".format(industry,group,one_type_awr)
grouped_water_risk_columns.append(column)
columns = indicator_columns + grouped_water_risk_columns
return columns
# In[136]:
result_column_names = annual_column_order()
# In[137]:
extra_column_names = ["string_id","gid_0","name_0","name_1","area_km2"]
# In[138]:
annual_column_names = extra_column_names + result_column_names
# In[139]:
df_annual = df_annual[annual_column_names]
# In[140]:
gdf_annual = gdf.merge(df_annual,on="string_id",how="left")
# In[141]:
gdf_annual.shape
# In[143]:
gdf_annual.head()
# In[144]:
gdf_annual.sort_values(by="aq30_id",inplace=True)
# # Monthly
# In[ ]:
# In[ ]:
# In[ ]:
# # Export
# In[146]:
output_filename_annual = "y2019m07d11_aqueduct30_annual_v01"
# In[147]:
output_path_annual = "{}/{}".format(ec2_output_path,output_filename_annual)
# In[ ]:
gdf_annual.to_file(driver="GPKG",
filename=output_path_annual + ".gpkg",
encoding="UTF-8")
# In[ ]:
get_ipython().system('aws s3 cp {ec2_output_path} {s3_output_path} --recursive')
# In[ ]:
```
#### File: aqueduct30_data_download/scripts/Y2019M07D12_RH_Aqueduct30_Data_Download_Monthly_V01.py
```python
TESTING = 0
SCRIPT_NAME = "Y2019M07D12_RH_Aqueduct30_Data_Download_Monthly_V01"
OUTPUT_VERSION = 3
S3_INPUT_PATH = {}
S3_INPUT_PATH["hybas"] = "s3://wri-projects/Aqueduct30/processData/Y2017M08D02_RH_Merge_HydroBasins_V02/output_V04"
S3_INPUT_PATH["monthly"] = "s3://wri-projects/Aqueduct30/finalData/Y2019M01D14_RH_Aqueduct_Results_V01/output_V04/monthly"
INPUT_FILENAME = {}
INPUT_FILENAME["hybas"] = "hybas_lev06_v1c_merged_fiona_V04.shp"
ec2_input_path = "/volumes/data/{}/input_V{:02.0f}".format(SCRIPT_NAME,OUTPUT_VERSION)
ec2_output_path = "/volumes/data/{}/output_V{:02.0f}".format(SCRIPT_NAME,OUTPUT_VERSION)
s3_output_path = "s3://wri-projects/Aqueduct30/processData/{}/output_V{:02.0f}/".format(SCRIPT_NAME,OUTPUT_VERSION)
print(s3_output_path)
# In[2]:
import time, datetime, sys
dateString = time.strftime("Y%YM%mD%d")
timeString = time.strftime("UTC %H:%M")
start = datetime.datetime.now()
print(dateString,timeString)
sys.version
get_ipython().magic('matplotlib inline')
# In[3]:
get_ipython().system('rm -r {ec2_input_path} ')
get_ipython().system('rm -r {ec2_output_path} ')
get_ipython().system('mkdir -p {ec2_input_path} ')
get_ipython().system('mkdir -p {ec2_output_path} ')
# In[4]:
get_ipython().system('aws s3 cp {S3_INPUT_PATH["hybas"]} {ec2_input_path} --recursive --quiet')
# In[5]:
get_ipython().system('aws s3 cp {S3_INPUT_PATH["monthly"]} {ec2_input_path} --recursive --quiet')
# In[6]:
import pandas as pd
import geopandas as gpd
from tqdm import tqdm
from shapely.geometry import MultiPolygon, shape
# In[7]:
gpd.__version__
# In[8]:
input_path_geom = "{}/{}".format(ec2_input_path,INPUT_FILENAME["hybas"])
# In[9]:
def convert_row_to_multipolygon(row):
if row.type == "Polygon":
new_geom = MultiPolygon([row.geometry])
elif row.type == "MultiPolygon":
new_geom = row.geometry
else:
new_geom = -9999
return new_geom
def df_force_multipolygon(gdf):
"""
Force all geometries in a geodataframe to be
MultiPolygons. The GeoPackage format does not allow
mixing of polygons and multipolygons.
Args:
gdf(GeoDataFrame) : GeoDataFrame
Returns:
gdf_mp(GeoDataFrame): GeodataFrame with multipolygons
"""
gdf_temp = gdf.copy()
gdf_temp["type"] = gdf_temp["geometry"].geom_type
gdf["geometry"] = gdf_temp.apply(axis=1,func=convert_row_to_multipolygon)
return gdf
def process_df(df,indicator,month):
"""
Process monthly dataframe
Args:
df(dataframe): input dataframe
indicator(string): short name for indicator. in bws bwd iav
month(integere): month
Return:
df_out(dataframe) : output dataframe. simplified, clean, beatiful!
"""
df_out = df[["pfaf_id","raw","score","cat","label"]]
df_out = df_out.rename(columns={"raw":"{}_{:02.0f}_raw".format(indicator,month),
"score":"{}_{:02.0f}_score".format(indicator,month),
"cat":"{}_{:02.0f}_cat".format(indicator,month),
"label":"{}_{:02.0f}_label".format(indicator,month)})
df_out.set_index("pfaf_id",inplace=True)
return df_out
def process_gdf(gdf):
"""
Process the hydrobasin level6 geodataframe
Dropping a weird polygon that crosses the -180 meridian and has a
non unique ID 353020
Args:
gdf(geodataframe): hydrobasin level 6 geodataframe
Returns:
gdf_out(geodataframe): simple, clean beatiful
"""
gdf_out = gdf.loc[gdf["PFAF_ID"] != 353020]
drop_columns = ["HYBAS_ID",
"NEXT_DOWN",
"NEXT_SINK",
"MAIN_BAS",
"DIST_SINK",
"DIST_MAIN",
"SUB_AREA",
"UP_AREA",
"ENDO",
"COAST",
"ORDER",
"SORT"]
gdf_out = gdf_out.drop(drop_columns,axis=1)
gdf_out = gdf_out.rename(columns={"PFAF_ID":"pfaf_id"})
gdf_out = df_force_multipolygon(gdf_out)
return gdf_out
# In[10]:
gdf_in = gpd.read_file(filename=input_path_geom)
# In[11]:
gdf_in.shape
# In[12]:
gdf = process_gdf(gdf_in)
# In[13]:
gdf.head()
# # Add monthly tabular data, pivot
# In[14]:
indicators = ["bws",'bwd','iav']
# In[15]:
months = range(1,12+1)
# In[16]:
for indicator in indicators:
input_filename = "monthly_{}.pkl".format(indicator)
input_path = "{}/{}".format(ec2_input_path,input_filename)
df = pd.read_pickle(path=input_path)
for month in months:
df_month = df.loc[df["month"]==month]
df_month = process_df(df_month,indicator,month)
gdf = gdf.merge(right=df_month,
how="left",
left_on="pfaf_id",
right_index=True)
# In[17]:
output_filename= "{}".format(SCRIPT_NAME).lower()
# In[18]:
output_path = "{}/{}".format(ec2_output_path,output_filename)
# In[19]:
gdf.to_file(driver="GPKG",
filename=output_path + ".gpkg",
encoding="UTF-8")
# In[20]:
get_ipython().system('aws s3 cp {ec2_output_path} {s3_output_path} --recursive')
# In[21]:
end = datetime.datetime.now()
elapsed = end - start
print(elapsed)
# Previous runs:
# 0:02:06.463591
# 0:02:01.624344
#
#
# In[ ]:
``` |
{
"source": "jmccarrell/n100tickers",
"score": 3
} |
#### File: n100tickers/tests/test_n100_tickers.py
```python
import datetime
from nasdaq_100_ticker_history import tickers_as_of
def test_basics() -> None:
assert 'AMZN' in tickers_as_of(2020, 6, 1)
assert len(tickers_as_of(2020, 6, 1)) >= 100
def _test_one_swap(as_of_date: datetime.date,
removed_ticker: str,
added_ticker: str,
expected_number_of_tickers: int) -> None:
tickers_on_change_date = tickers_as_of(as_of_date.year,
as_of_date.month,
as_of_date.day)
assert len(tickers_on_change_date) == expected_number_of_tickers
before_change_date = as_of_date - datetime.timedelta(days=1)
tickers_before_change_date = tickers_as_of(before_change_date.year,
before_change_date.month,
before_change_date.day)
assert len(tickers_before_change_date) == expected_number_of_tickers
assert removed_ticker in tickers_before_change_date
assert added_ticker not in tickers_before_change_date
assert removed_ticker not in tickers_on_change_date
assert added_ticker in tickers_on_change_date
def _test_at_year_boundary(year: int) -> None:
"""prove the tickers at the beginning of the year match the set at the end of the
previous year.
"""
begin_of_current_year = datetime.date.fromisoformat(f"{year}-01-01")
end_of_previous_year = begin_of_current_year - datetime.timedelta(days=1)
current_tickers = tickers_as_of(begin_of_current_year.year,
begin_of_current_year.month,
begin_of_current_year.day)
previous_tickers = tickers_as_of(end_of_previous_year.year,
end_of_previous_year.month,
end_of_previous_year.day)
assert previous_tickers == current_tickers
def test_tickers_2022() -> None:
num_tickers_2022 = 101
# On Jan 24, Old Dominion replaces Peloton
_test_one_swap(datetime.date.fromisoformat('2022-01-24'), 'PTON', 'ODFL', num_tickers_2022)
# On Feb 2, Excelon EXC split off Constellation Energy CEG, which remained in the index
tickers_added_2022_02_02 = frozenset(('CEG',))
assert tickers_added_2022_02_02.isdisjoint(tickers_as_of(2022, 2, 1))
assert tickers_added_2022_02_02.issubset(tickers_as_of(2022, 2, 2))
assert len(tickers_as_of(2022, 2, 2)) == num_tickers_2022 + 1
num_tickers_2022 += 1
# AMD completed its acquisition of Xilinx XLNX on or about 14 Feb.
# So AstraZeneca AZN replaces XLNX as of 22 Feb 2022.
_test_one_swap(datetime.date.fromisoformat('2022-02-22'), 'XLNX', 'AZN', num_tickers_2022)
def test_year_boundary_2021_2022() -> None:
_test_at_year_boundary(2022)
def test_2021_annual_changes() -> None:
num_tickers_2021_end_of_year = 101
# Annual 2021 changes
# https://www.nasdaq.com/press-release/annual-changes-to-the-nasdaq-100-indexr-2021-12-10-0
#
# On December 10, 2021 Nasdaq announced that six new companies would join the index
# prior to the market open on December 20, 2021.
# They are Airbnb (ABNB), Datadog (DDOG), Fortinet (FTNT), Lucid Group (LCID),
# Palo Alto Networks (PANW), and Zscaler (ZS).
# They will replace CDW (CDW), Cerner (CERN), Check Point (CHKP), Fox Corporation (FOXA/FOX),
# Incyte (INCY), and Trip.com (TCOM).
# https://greenstocknews.com/news/nasdaq/lcid/annual-changes-to-the-nasdaq-100-index
# This removes 7 tickers while adding 6, so total number of tickers goes to 101
assert len(tickers_as_of(2021, 12, 17)) == num_tickers_2021_end_of_year + 1
tickers_removed_2021_12_20 = frozenset(('CDW', 'CERN', 'CHKP', 'FOX', 'FOXA', 'INCY', 'TCOM'))
assert tickers_removed_2021_12_20.issubset(tickers_as_of(2021, 12, 17))
tickers_added_2021_12_20 = frozenset(('ABNB', 'DDOG', 'FTNT', 'LCID', 'PANW', 'ZS'))
assert tickers_added_2021_12_20.isdisjoint(tickers_as_of(2021, 12, 17))
assert len(tickers_as_of(2021, 12, 20)) == num_tickers_2021_end_of_year
assert tickers_removed_2021_12_20.isdisjoint(tickers_as_of(2021, 12, 20))
assert tickers_added_2021_12_20.issubset(tickers_as_of(2021, 12, 20))
def test_tickers_2021() -> None:
num_tickers_2021 = 102
# On July 21, Honeywell replaces Alexion
_test_one_swap(datetime.date.fromisoformat('2021-07-21'), 'ALXN', 'HON', num_tickers_2021)
# On Aug 26, Crowdstrike replaced Maxim Integrated Products, who is being acquired by Analog Devices.
_test_one_swap(datetime.date.fromisoformat('2021-08-26'), 'MXIM', 'CRWD', num_tickers_2021)
def test_year_boundary_2020_2021() -> None:
_test_at_year_boundary(2021)
def test_tickers_2020() -> None:
num_tickers_2020: int = 103
_test_at_year_boundary(2020)
# On April 20, Dexcom replaced American Airlines Group in the index
_test_one_swap(datetime.date.fromisoformat('2020-04-20'), 'AAL', 'DXCM', num_tickers_2020)
# On April 30, Zoom Video Communications replaced <NAME>
_test_one_swap(datetime.date.fromisoformat('2020-04-30'), 'WLTW', 'ZM', num_tickers_2020)
# On June 22, DocuSign, Inc. (DOCU) will replace United Airlines Holdings, Inc. (Nasdaq: UAL)
_test_one_swap(datetime.date.fromisoformat('2020-06-22'), 'UAL', 'DOCU', num_tickers_2020)
# On Jul 20, Moderna MRNA replaces CoStar Group CGSP
# https://www.globenewswire.com/news-release/2020/07/13/2061339/0/en/Moderna-Inc-to-Join-the-NASDAQ-100-Index-Beginning-July-20-2020.html
_test_one_swap(datetime.date.fromisoformat('2020-07-20'), 'CSGP', 'MRNA', num_tickers_2020)
# On 24 Aug 2020, Pinduoduo, Inc. PDD replaced NetApp, Inc. NTAP in the NASDAQ-100 Index.
# https://www.globenewswire.com/news-release/2020/08/15/2078875/0/en/Pinduoduo-Inc-to-Join-the-NASDAQ-100-Index-Beginning-August-24-2020.html
_test_one_swap(datetime.date.fromisoformat('2020-08-24'), 'NTAP', 'PDD', 103)
# Western Digital Corp (WDC) is replaced by Keurig Dr Pepper Inc. (KDP) as of Oct 19, 2020.
# https://www.globenewswire.com/news-release/2020/10/10/2106521/0/en/Keurig-Dr-Pepper-Inc-to-Join-the-NASDAQ-100-Index-Beginning-October-19-2020.html
_test_one_swap(datetime.date.fromisoformat('2020-10-19'), 'WDC', 'KDP', 103)
def test_2020_annual_changes() -> None:
# Annual 2020 changes
# https://www.nasdaq.com/press-release/annual-changes-to-the-nasdaq-100-index-2020-12-11
#
# 6 companies added; 6 removed. However, Liberty Global PLC has 2 symbols: (Nasdaq: LBTYA/LBTYK)
# So total tickers change from 103 to 102.
# Effective date: 2020-12-21
assert len(tickers_as_of(2020, 12, 18)) == 103
tickers_removed_12_21 = frozenset(('BMRN', 'CTXS', 'EXPE', 'LBTYA', 'LBTYK', 'TTWO', 'ULTA'))
assert tickers_removed_12_21.issubset(tickers_as_of(2020, 12, 18))
tickers_added_12_21 = frozenset(('AEP', 'MRVL', 'MTCH', 'OKTA', 'PTON', 'TEAM'))
assert tickers_added_12_21.isdisjoint(tickers_as_of(2020, 12, 18))
assert len(tickers_as_of(2020, 12, 21)) == 102
assert tickers_removed_12_21.isdisjoint(tickers_as_of(2020, 12, 21))
assert tickers_added_12_21.issubset(tickers_as_of(2020, 12, 21))
def test_tickers_2019() -> None:
num_tickers_2019: int = 103
_test_at_year_boundary(2019)
# 6 tickers added and removed on 12/23/2019
# https://finance.yahoo.com/news/annual-changes-nasdaq-100-index-010510822.html
tickers_2019_dec_23 = tickers_as_of(2019, 12, 23)
assert len(tickers_2019_dec_23) == num_tickers_2019
dec_23_removals = frozenset(('HAS', 'HSIC', 'JBHT', 'MYL', 'NLOK', 'WYNN'))
assert tickers_2019_dec_23.isdisjoint(dec_23_removals)
dec_23_additions = frozenset(('ANSS', 'CDW', 'CPRT', 'CSGP', 'SGEN', 'SPLK'))
assert dec_23_additions.issubset(tickers_2019_dec_23)
tickers_2019_dec_20 = tickers_as_of(2019, 12, 20)
assert len(tickers_2019_dec_20) == num_tickers_2019
assert dec_23_removals.issubset(tickers_2019_dec_20)
assert tickers_2019_dec_20.isdisjoint(dec_23_additions)
# 1 swap Nov 19
# https://www.nasdaq.com/press-release/exelon-corporation-to-join-the-nasdaq-100-index-beginning-november-21-2019-2019-11-18
_test_one_swap(datetime.date.fromisoformat('2019-11-19'), 'CELG', 'EXC', num_tickers_2019)
# there was a record of 21st Century Fox changing to Fox Corp. But as near as I can tell, the ticker
# symbols were the same.
def test_tickers_2018() -> None:
num_tickers_2018: int = 103
_test_at_year_boundary(2018)
# 6 tickers added and removed on 12/24/2018
# https://www.nasdaq.com/about/press-center/annual-changes-nasdaq-100-index-0
tickers_2018_dec_23 = tickers_as_of(2018, 12, 23)
assert len(tickers_2018_dec_23) == num_tickers_2018
tickers_2018_dec_24 = tickers_as_of(2018, 12, 24)
assert len(tickers_2018_dec_24) == num_tickers_2018
dec_24_removals = frozenset(('ESRX', 'HOLX', 'QRTEA', 'SHPG', 'STX', 'VOD'))
assert dec_24_removals.issubset(tickers_2018_dec_23)
assert tickers_2018_dec_24.isdisjoint(dec_24_removals)
dec_24_additions = frozenset(('AMD', 'LULU', 'NTAP', 'UAL', 'VRSN', 'WLTW'))
assert dec_24_additions.issubset(tickers_2018_dec_24)
# 11/19/2018 XEL replaces XRAY
# https://www.nasdaq.com/about/press-center/xcel-energy-inc-join-nasdaq-100-index-beginning-november-19-2018
_test_one_swap(datetime.date.fromisoformat('2018-11-19'), 'XRAY', 'XEL', num_tickers_2018)
# 11/5/2018 NXPI replaces CA
# (link broken):
# https://business.nasdaq.com/mediacenter/pressreleases/1831989/nxp-semiconductors-nv-to-join-the-nasdaq-100-index-beginning-november-5-2018
_test_one_swap(datetime.date.fromisoformat('2018-11-05'), 'CA', 'NXPI', num_tickers_2018)
# 7/23/2018 PEP replaces DISH
_test_one_swap(datetime.date.fromisoformat('2018-07-23'), 'DISH', 'PEP', num_tickers_2018)
def test_tickers_2017() -> None:
num_tickers_2017: int = 104
# 2/7/2017 JBHT replaced NXPI
_test_one_swap(datetime.date.fromisoformat('2017-02-07'), 'NXPI', 'JBHT', num_tickers_2017)
# 3/20/2017 IDXX replaced SBAC
_test_one_swap(datetime.date.fromisoformat('2017-03-20'), 'SBAC', 'IDXX', num_tickers_2017)
# 4/24/2017 WYNN replaced TRIP
_test_one_swap(datetime.date.fromisoformat('2017-04-24'), 'TRIP', 'WYNN', num_tickers_2017)
# 6/19/2017 MELI replaced YHOO
_test_one_swap(datetime.date.fromisoformat('2017-06-19'), 'YHOO', 'MELI', num_tickers_2017)
# 10/23/2017 ALGN replaced MAT
_test_one_swap(datetime.date.fromisoformat('2017-10-23'), 'MAT', 'ALGN', num_tickers_2017)
# annual changes for 2017; effective Dec 18, 2017
# https://www.nasdaq.com/about/press-center/annual-changes-nasdaq-100-index-2
dec_18_removals = frozenset(('AKAM', 'DISCA', 'DISCK', 'NCLH', 'TSCO', 'VIAB'))
dec_18_additions = frozenset(('ASML', 'CDNS', 'SNPS', 'TTWO', 'WDAY'))
tickers_dec_17 = tickers_as_of(2017, 12, 17)
assert len(tickers_dec_17) == num_tickers_2017
assert dec_18_removals.issubset(tickers_dec_17)
assert tickers_dec_17.isdisjoint(dec_18_additions)
tickers_dec_18 = tickers_as_of(2017, 12, 18)
# this was a remove 6 and add 5 change due to two classes of Discovery Communications: DISCA and DISCK
assert len(tickers_dec_18) == num_tickers_2017 - 1
assert dec_18_additions.issubset(tickers_dec_18)
assert tickers_dec_18.isdisjoint(dec_18_removals)
def test_year_boundary_2016_2017() -> None:
_test_at_year_boundary(2017)
def test_2016_annual_changes() -> None:
# annual changes for 2016; effective Dec 19, 2016 announced Dec 9
# https://en.wikipedia.org/wiki/Nasdaq-100#Changes_in_2016
dec_18_tickers = tickers_as_of(2016, 12, 18)
dec_19_tickers = tickers_as_of(2016, 12, 19)
assert len(dec_18_tickers) == len(dec_19_tickers)
dec_19_removals = frozenset(('BBBY', 'NTAP', 'SRCL', 'WFM'))
assert dec_19_removals.issubset(dec_18_tickers)
assert dec_19_tickers.isdisjoint(dec_19_removals)
dec_19_additions = frozenset(('CTAS', 'HAS', 'HOLX', 'KLAC'))
assert dec_19_additions.isdisjoint(dec_18_tickers)
assert dec_19_additions.issubset(dec_19_tickers)
def test_tickers_2016() -> None:
num_tickers_2016_boy = 105 # num tickers at the start of 2016
num_tickers_2016_eoy = 104 # number of tickers at the end of 2016
assert len(tickers_as_of(2016, 1, 1)) == num_tickers_2016_boy
assert len(tickers_as_of(2016, 12, 31)) == num_tickers_2016_eoy
# https://ir.nasdaq.com/news-releases/news-release-details/csx-corporation-join-nasdaq-100-index-beginning-february-22-2016
_test_one_swap(datetime.date.fromisoformat('2016-02-22'), 'KLAC', 'CSX', num_tickers_2016_boy)
# https://www.nasdaq.com/about/press-center/netease-inc-join-nasdaq-100-index-beginning-march-16-2016
_test_one_swap(datetime.date.fromisoformat('2016-03-16'), 'SNDK', 'NTES', num_tickers_2016_boy)
# adds BATRA, BATRK as of Apr 18; no replacements
# https://en.wikipedia.org/wiki/Nasdaq-100#cite_note-37
apr_17_tickers = tickers_as_of(2016, 4, 17)
assert len(apr_17_tickers) == 105
apr_18_tickers = tickers_as_of(2016, 4, 18)
assert len(apr_18_tickers) == 107
apr_18_additions = frozenset(('BATRA', 'BATRK'))
assert apr_18_additions.isdisjoint(apr_17_tickers)
assert apr_18_additions.issubset(apr_18_tickers)
# https://en.wikipedia.org/wiki/Nasdaq-100#cite_note-38
# this is a 4 for one change as of June 10
jun_09_tickers = tickers_as_of(2016, 6, 9)
assert len(jun_09_tickers) == 107
jun_10_tickers = tickers_as_of(2016, 6, 10)
assert len(jun_10_tickers) == 104
jun_10_removals = frozenset(('LMCA', 'LMCK', 'BATRA', 'BATRK'))
assert jun_10_removals.issubset(jun_09_tickers)
assert jun_10_tickers.isdisjoint(jun_10_removals)
jun_10_additions = frozenset(('XRAY',))
assert jun_10_additions.isdisjoint(jun_09_tickers)
assert jun_10_additions.issubset(jun_10_tickers)
# https://en.wikipedia.org/wiki/Nasdaq-100#cite_note-39
_test_one_swap(datetime.date.fromisoformat('2016-07-18'), 'ENDP', 'MCHP', num_tickers_2016_eoy)
# https://en.wikipedia.org/wiki/Nasdaq-100#cite_note-40
_test_one_swap(datetime.date.fromisoformat('2016-10-19'), 'LLTC', 'SHPG', num_tickers_2016_eoy)
``` |
{
"source": "jm-cc/gcvb",
"score": 3
} |
#### File: gcvb/gcvb/model.py
```python
from enum import IntEnum
from . import db
from .loader import loader as loader
import datetime
class JobStatus(IntEnum):
unlinked = -4
pending = -3
ready = -2
running = -1
exit_success = 0
class AbsoluteMetric:
def __init__(self, reference, tolerance, unit = None):
self.type = "absolute"
self.reference = reference
self.tolerance = float(tolerance)
self.unit = unit
def distance(self, value):
return abs(value - self.reference)
def within_tolerance(self, value):
return self.distance(value) <= self.tolerance
class RelativeMetric:
def __init__(self, reference, tolerance):
self.type = "relative"
self.reference = reference
self.tolerance = tolerance
def distance(self, value):
return (abs(value - self.reference) / self.reference)
def within_tolerance(self, value):
return self.distance(value) <= self.tolerance
class Validation:
default_type = "relative"
default_reference = None
def __init__(self, valid_dict, config, task=None):
self.raw_dict = valid_dict
self.status = JobStatus.unlinked
self.executable = valid_dict["executable"]
self.type = valid_dict["type"]
self.launch_command = valid_dict["launch_command"]
self.recorded_metrics = {}
self.init_metrics(config)
self.start_date = None
self.end_date = None
self.Task = task
def init_metrics(self, config):
self.expected_metrics = {}
for metric in self.raw_dict.get("Metrics", []):
t = metric.get("type", self.default_type)
if t not in ["relative", "absolute"]:
raise ValueError("'type' must be 'relative' or 'absolute'.")
#reference is either a dict or a number.
ref = metric.get("reference", self.default_reference)
if ref is None:
raise ValueError("'reference' must be provided.")
if isinstance(ref, dict):
if config in ref:
if t == "relative":
self.expected_metrics[metric["id"]] = RelativeMetric(ref[config], metric["tolerance"])
else:
self.expected_metrics[metric["id"]] = AbsoluteMetric(ref[config], metric["tolerance"])
else:
if t == "relative":
self.expected_metrics[metric["id"]] = RelativeMetric(ref, metric["tolerance"])
else:
self.expected_metrics[metric["id"]] = AbsoluteMetric(ref, metric["tolerance"])
def get_missing_metrics(self):
e_m = set(self.expected_metrics.keys())
r_m = set(self.recorded_metrics.keys())
return e_m.difference(r_m)
def get_untracked_metrics(self):
e_m = set(self.expected_metrics.keys())
r_m = set(self.recorded_metrics.keys())
return {m : self.recorded_metrics[m] for m in r_m.difference(e_m)}
def get_out_of_tolerance_metrics(self):
res = []
for k,v in self.recorded_metrics.items():
if k in self.expected_metrics:
if not(self.expected_metrics[k].within_tolerance(v)):
res.append((k, self.expected_metrics[k], v))
return res
@property
def missing_metrics(self):
return bool(self.get_missing_metrics())
@property
def success(self):
if self.missing_metrics:
return False
for metric_id, metric in self.expected_metrics.items():
if not metric.within_tolerance(self.recorded_metrics[metric_id]):
return False
return True
@property
def elapsed(self):
return self.end_date-self.start_date
class FileComparisonValidation(Validation):
default_type = "absolute"
default_reference = 0
def __init__(self, valid_dict, config, task=None):
super().__init__(valid_dict, config, task)
self.base = self.raw_dict["base"]
self.ref_id = self.raw_dict["ref"]
@property
def data(self):
return self.Task.Test.data
@property
def filename(self):
ref = self.Task.Test.Run.references[self.data]
if self.base in ref:
return ref[self.base][self.ref_id]["file"]
return "" #The gcvb.db may be used alone, the filename information is lost in this case #FIXME
class Task():
def __init__(self, task_dict, config, test=None):
self.raw_dict = task_dict
self.status = JobStatus.unlinked
self.executable = task_dict["executable"]
self.options = task_dict.get("options", '')
self.launch_command = task_dict["launch_command"]
self.nprocs = task_dict["nprocs"]
self.nthreads = task_dict["nthreads"]
# Validations
self.Validations = []
for v in task_dict.get("Validations", []):
if v["type"] == "script":
self.Validations.append(Validation(v, config, self))
else:
self.Validations.append(FileComparisonValidation(v, config, self))
self.start_date = None
self.end_date = None
self.Test = test
@property
def completed(self):
return bool(self.end_date)
@property
def success(self):
if not self.completed:
return False
if self.status != JobStatus.exit_success:
return False
return all([v.success for v in self.Validations])
@property
def elapsed(self):
return self.end_date-self.start_date
def get_failures(self):
res = []
if self.completed:
if self.status > JobStatus.exit_success:
res.append(ExitFailure(self.executable, self.status))
# Missing metric is a failure only if the task is completed
for v in self.Validations:
missing = v.get_missing_metrics()
for mm in missing:
res.append(MissingMetric(mm))
for v in self.Validations:
oot = v.get_out_of_tolerance_metrics()
for m_id,m,v in oot:
res.append(OutOfTolerance(m_id,m,v))
return res
def hr_result(self):
# returns a string representing the first failure
f = self.get_failures()
if f:
return f[0].hr_result()
return "Success"
def hr_elapsed(self):
if self.completed:
return str(self.elapsed)
return "DNF" #Did not finish
class Test():
def __init__(self, test_dict, config, name=None, start_date=None, end_date=None, run=None):
self.raw_dict = test_dict
# Tasks
self.Tasks = []
for t in test_dict.get("Tasks"):
self.Tasks.append(Task(t, config, self))
# Steps
self.Steps = []
for t in self.Tasks:
self.Steps.append(t)
for v in t.Validations:
self.Steps.append(v)
# Infos
self.name = name
self.start_date = start_date
self.end_date = end_date
self.data = self.raw_dict["data"]
self.Run = run
@property
def completed(self):
return bool(self.end_date)
@property
def success(self):
return all([t.success for t in self.Tasks])
@property
def failed(self):
#failed if a completed task failed.
return any([bool(t.get_failures()) for t in self.Tasks if t.completed])
@property
def elapsed(self):
return self.end_date-self.start_date
def __repr__(self):
return f"{{id : {self.name}, status : TODO}}"
def get_failures(self):
return [t.get_failures() for t in self.Tasks]
def hr_result(self):
if not(self.completed):
return "Not completed yet"
failures = self.get_failures()
for k,f in enumerate(failures, 1):
if f:
return f"Step {k} : {f[0].hr_result()}"
return "Success"
def cpu_time(self):
ct = 0
for task in self.Tasks:
if task.completed:
ct += task.elapsed.total_seconds() * task.nthreads * task.nprocs
else:
return float('inf')
return ct
def _strtotimestamp(s):
# for backward compatibility with previous database format
if isinstance(s, str):
return datetime.datetime.strptime(s, "%Y-%m-%d %H:%M:%S")
else:
return s
class Run():
def __test_db_to_objects(self):
self.db_tests = db.get_tests(self.run_id)
self.base_id = db.get_base_from_run(self.run_id)
self.gcvb_base = loader.load_base(self.run_id)
self.references = loader.references
b = self.gcvb_base["Tests"]
self.Tests = {t["name"] : Test(b[t["name"]], self.config, t["name"], t["start_date"], t["end_date"], self) for t in self.db_tests}
# Fill infos for every step
recorded_metrics = db.load_report_n(self.run_id)
steps = db.get_steps(self.run_id)
for test_id, test in self.Tests.items():
for step, metrics in recorded_metrics[test_id].items():
test.Steps[step-1].recorded_metrics = metrics
for step, step_info in steps[test_id].items():
test.Steps[step-1].start_date = _strtotimestamp(step_info["start_date"])
test.Steps[step-1].end_date = _strtotimestamp(step_info["end_date"])
test.Steps[step-1].status = step_info["status"]
def __init__(self, run_id):
self.run_id = run_id
run_infos = db.get_run_infos(run_id)
self.start_date = run_infos["start_date"]
self.end_date = run_infos["end_date"]
self.config = run_infos["config_id"]
self.gcvb_id = run_infos["gcvb_id"]
self.__test_db_to_objects()
@property
def completed(self):
return bool(self.end_date)
@property
def success(self):
return all([test.success for test in self.Tests.values()])
@property
def failed(self):
return any([test.failed for test in self.Tests.values()])
@property
def elapsed(self):
return self.end_date-self.start_date
def get_running_tests(self):
return [k for k,v in self.Tests.items() if not v.completed]
def get_failures(self):
return {test_id : test.get_failures() for test_id, test in self.Tests.items() if any(test.get_failures())}
def str_status(self):
if self.failed:
return "Failed"
if self.completed and self.success:
return "Success"
else:
return "In progress"
class TaskFailure():
def __init__(self):
pass
def __repr__(self):
pass
def __str__(self):
pass
class ExitFailure(TaskFailure):
def __init__(self, executable, return_code):
self.executable = executable
self.return_code = return_code
def __repr__(self):
return f"<Exit Failure (return code : {self.return_code})>"
def __str__(self):
return self.__repr__()
def hr_result(self):
return f"{self.executable} exited with code {self.return_code}"
class MissingMetric(TaskFailure):
def __init__(self, metric_id):
self.metric_id = metric_id
def __repr__(self):
return f"<Missing Metric ({self.metric_id})>"
def __str__(self):
return self.__repr__()
def hr_result(self):
return f"Metric {self.metric_id} is missing."
class OutOfTolerance(TaskFailure):
def __init__(self, metric_id, metric, recorded):
self.metric_id = metric_id
self.metric = metric
self.recorded = recorded
def __repr__(self):
return f"<OutOfTolerance (metric : {self.metric_id})"
def __str__(self):
return self.__repr__()
def hr_result(self):
target = self.metric.tolerance #FIXME
value = self.recorded #FIXME
return f"{self.metric.type.capitalize()} metric {self.metric_id} is out of tolerance. (target : {target}, value : {value})"
``` |
{
"source": "jmcclare/spin",
"score": 3
} |
#### File: jmcclare/spin/spin.py
```python
import sys
from optparse import OptionParser
import re
usage_text = """\
spin version 1.0
spin takes a text file and outputs a randomly "spun" version of it. The text
file should contain phrases marked with alternatives using spintax.
Usage: spin filename
or spin [optiona]
Options
-t, --test Skip file processing and run tests."""
def print_usage():
print(usage_text)
class Spintax:
"""
Originally based on simple Spintax class by <NAME>
http://ronaldarichardson.com/2011/10/04/recursive-python-spintax-class/
For reference, also see this implementation in C#:
http://stackoverflow.com/questions/8004465/spintax-c-sharp-how-can-i-handle-this
"""
def __init__(self):
return None
def spin(self, str):
while self.incomplete(str):
str = self.regex(str)
return str
def regex(self, str):
from random import choice
match = self.preg_match("{[^{}]+?}", str)
attack = match.split("|")
new_str = self.preg_replace("[{}]", "", choice(attack))
str = str.replace(match, new_str)
return str
def incomplete(self, str):
complete = re.search("{[^{}]+?}", str)
return complete
def preg_match(self, pattern, subject):
match = re.search(pattern, subject)
return match.group()
def preg_match_all(self, pattern, subject):
matches = re.findall(pattern, subject)
return matches
def preg_replace(self, pattern, replacement, subject):
result = re.sub(pattern, replacement, subject)
return result
def run_tests():
spintax = Spintax()
print("Test Output:\n")
unspun = u"I recently {fried|baked|totaled} my 300GB Seagate {hard drive|hard disk} by accident. I tend to keep my drives unfastened inside my PC cases. My boot drive tipped over and hit the circuit board of my data drive. There was a small flash and a bit of smoke arose. The boot drive survived, but the 300GB drive had a nasty burn mark right on top of one of the chips on it's circuit board. It was done and all of my data was now trapped on the disk."
print("\n\nUnspun text:\n\n%s" % unspun)
print("\nSpun:\n\n%s" % spintax.spin(unspun))
unspun_nested = u"It turns out that {you can {replace|change} the circuit board on a Seagate hard drive|the circuit board on a Seagate hard drive can be {replaced|changed}} without even soldering. The board is held on with some screws and fasteners. According to some forum posts I read, if your drive's circuit board dies, you can get one from another drive of the same or similar model and it should work."
print("\n\nNested unspun text:\n\n%s" % unspun_nested)
print("\nSpun:\n\n%s" % spintax.spin(unspun_nested))
unspun_linebreaks = u"""I recently {fried|baked|totaled} my 300GB Seagate {hard
drive|hard disk} by accident. I tend to keep my drives unfastened inside my PC cases.
My boot drive tipped over and hit the circuit board of my data drive. There
was a small flash and a bit of smoke arose. The boot drive survived, but
the 300GB drive had a nasty burn mark right on top of one of the chips on
it's circuit board. It was done and all of my data was now trapped on the
disk."""
# This tests unspun text with linebreaks, including one right in the middle
# of a spin set.
print("\n\nUnspun text with linebreaks:\n\n%s" % unspun_linebreaks)
print("\nSpun:\n\n%s" % spintax.spin(unspun_linebreaks))
unspun_multi_p = u"""I recently {fried|baked|totaled} my 300GB Seagate {hard drive|hard disk} by accident. I tend to keep my drives unfastened inside my PC cases. My boot drive tipped over and hit the circuit board of my data drive. There was a small flash and a bit of smoke arose. The boot drive survived, but the 300GB drive had a nasty burn mark right on top of one of the chips on it's circuit board. It was done and all of my data was now trapped on the disk.
It turns out that {you can {replace|change} the circuit board on a Seagate hard drive|the circuit board on a Seagate hard drive can be {replaced|changed}} without even soldering. The board is held on with some screws and fasteners. According to some forum posts I read, if your drive's circuit board dies, you can get one from another drive of the same or similar model and it should work."""
print("\n\nMulti-paragraph unspun text:\n\n%s" % unspun_multi_p)
print("\nSpun:\n\n%s" % spintax.spin(unspun_multi_p))
sys.exit(0)
if __name__ == '__main__':
opt_parser = OptionParser(
usage = usage_text,
version = "%prog 1.0",
)
opt_parser.add_option('-t', '--test',
action="store_true", dest="test", default=False,
help = "Skip file processing and run tests."
)
(options, args) = opt_parser.parse_args()
if (options.test == True):
run_tests()
if (len(args) == 0):
opt_parser.error("Please specify a filename.")
if (len(args) > 1):
opt_parser.error("Wrong number of arguments.")
try:
f = open(args[0], 'r')
except:
print("Invalid filename.")
print("Could not open: %s" % args[0])
print_usage()
sys.exit(1)
unspun = f.read()
spintax = Spintax()
print(spintax.spin(unspun))
``` |
{
"source": "jmcclell/easyblog",
"score": 2
} |
#### File: easyblog/easyblog/managers.py
```python
from django.utils import timezone
from django.db import models
# Because of a circular dependency, we can't import models specifically
import easyblog
class PostStatusLiveManager(models.Manager):
def get_query_set(self):
"""Return PostStatus objects which are defined as "live" (eg: Published) """
return super(PostStatusLiveManager, self).get_query_set().filter(
is_live=True
)
class PostLiveManager(models.Manager):
def get_query_set(self):
"""Return posts that are live (ie: their PostStatus is a Live status and their publish date has past)"""
return super(PostLiveManager, self).get_query_set().filter(status__in=easyblog.models.PostStatus.live_statuses.all(),
publish_date__lte=timezone.now())
```
#### File: easyblog/views/authors.py
```python
from django.shortcuts import get_object_or_404
from django.views.generic.list_detail import object_list
from easyblog.models import Author
from easyblog import settings
def author_detail(request, username, page=None, **kwargs):
"""Display the entries of an author"""
extra_context = kwargs.pop('extra_context', {})
author = get_object_or_404(Author, username=username)
#if not kwargs.get('template_name'):
# kwargs['template_name'] = template_name_for_entry_queryset_filtered(
# 'author', author.username)
extra_context.update({'author': author})
kwargs['extra_context'] = extra_context
return object_list(request, queryset=author.live_posts,
paginate_by=settings.POSTS_PER_PAGE, page=page,
**kwargs)
```
#### File: easyblog/sample/dilla_spamlib.py
```python
from django.utils import timezone
from dilla import spam, spammers
import random
#@spam.strict_handler('easyblog.Post.title')
#def get_blog_post_title(record, field):
# return random.choice(string.ascii_letters)
@spam.global_handler('DateField')
@spam.global_handler('TimeField')
@spam.global_handler('DateTimeField')
def random_datetime_tz_aware(record, field):
"""
Calculate random datetime object between last and next month.
Django interface is pretty tollerant at this point, so three
decorators instead of three handlers here.
"""
# 1 month ~= 30d ~= 720h ~= 43200min
random_minutes = random.randint(-43200, 43200)
return timezone.now() + timezone.timedelta(minutes=random_minutes)
@spam.strict_handler('easyblog.models.Post.status')
def get_default_post_status(record, field):
weighted_choices = [1, 1, 1, 2]
return random.choice(weighted_choices)
@spam.strict_handler('easyblog.models.Post.tags')
def get_post_tags(record, field):
return spammers.random_words(record, field)
``` |
{
"source": "jmccormac01/donuts_voyager",
"score": 3
} |
#### File: donuts_voyager/testing/test_receive_until.py
```python
import sys
import socket as s
import traceback
import time
import json
class Voyager():
"""
Voyager interaction class
"""
def __init__(self, config):
"""
Initialise the class
"""
self.socket = None
self.socket_ip = config['socket_ip']
self.socket_port = config['socket_port']
self.host = config['host']
self.inst = 1
self.message_overflow = []
# test the new receive_until method
self.establish_and_maintain_voyager_connection()
def establish_and_maintain_voyager_connection(self):
"""
Open a connection and maintain it with Voyager
"""
self.__open_socket()
while 1:
# keep it alive and listen for jobs
polling_str = self.__polling_str()
sent = self.__send(polling_str)
if sent:
print(f"SENT: {polling_str}")
# listen for a response
rec = self.__receive_until(delim=b'\r\n')
if rec:
print(f"RECEIVED: {rec}")
time.sleep(1)
def __open_socket(self):
"""
Open a connection to Voyager
"""
self.socket = s.socket(s.AF_INET, s.SOCK_STREAM)
self.socket.settimeout(1.0)
try:
self.socket.connect((self.socket_ip, self.socket_port))
except s.error:
print('Voyager socket connect failed!')
print('Check the application interface is running!')
traceback.print_exc()
sys.exit(1)
def __close_socket(self):
"""
Close the socket once finished
"""
self.socket.close()
def __send(self, message):
"""
Send a message to Voyager
"""
try:
self.socket.sendall(bytes(message, encoding='utf-8'))
sent = True
self.inst += 1
except:
print(f"Error sending message {message} to Voyager")
traceback.print_exc()
sent = False
return sent
def __receive(self, n_bytes=2048):
"""
Receive a message of n_bytes in length from Voyager
Parameters
----------
n_bytes : int, optional
Number of bytes to read from socket
default = 2048
Returns
-------
message : dict
json parsed response from Voyager
Raises
------
None
"""
# NOTE original code, we have JSON decoding errors, trying to figure it out
#try:
# message = json.loads(self.socket.recv(n_bytes))
#except s.timeout:
# message = {}
#return message
# load the raw string
try:
message_raw = self.socket.recv(n_bytes)
except s.timeout:
message_raw = ""
# unpack it into a json object
if message_raw != "":
# NOTE sometimes a message is truncated, try to stop it crashing...
try:
message = json.loads(message_raw)
except json.decoder.JSONDecodeError:
message = {}
else:
message = {}
return message
def __receive_until(self, delim=b'\r\n'):
"""
"""
message_buffer = []
n_bytes = 2048
# check if there is any overflow from last time
print(f"Message overflow {self.message_overflow}")
for msg in self.message_overflow:
print("HANDLING OVERFLOW!!!!!!!!!")
message_buffer.append(msg)
# reset the overflow
self.message_overflow = []
print(f"Message overflow {self.message_overflow}")
continue_reading = True
while continue_reading:
try:
message_raw = self.socket.recv(n_bytes)
except s.timeout:
message_raw = b''
print(f"Message raw {message_raw}")
if delim in message_raw:
print("DELIM FOUND...")
continue_reading = False
message_end, message_new_start = message_raw.split(b'\r\n')
print(f"Message parts {message_end} : {message_new_start}")
message_buffer.append(message_end)
print(f"Message buffer: {message_buffer}")
self.message_overflow.append(message_new_start)
print(f"Message overflow: {self.message_overflow}")
else:
print("DELIM NOT FOUND, CONTINUING READING...")
continue_reading = True
message_buffer.append(message_raw)
print("DONE READING...")
message_str = b''.join(message_buffer)
print(f"Final message string: {message_str}")
return json.loads(message_str)
def __polling_str(self):
"""
Create a polling string
"""
now = str(time.time())
return f"{{\"Event\":\"Polling\",\"Timestamp\":{now},\"Host\":\"{self.host}\",\"Inst\":{self.inst}}}\r\n"
def __guide_str(self):
"""
Create a guiding string
"""
return ""
if __name__ == "__main__":
config = {'socket_ip': '127.0.0.1',
'socket_port': 5950,
'host': 'DESKTOP-CNTF3JR'}
voyager = Voyager(config)
```
#### File: jmccormac01/donuts_voyager/voyager_utils.py
```python
import os
from datetime import (
date,
timedelta,
datetime)
import toml
def load_config(filename):
"""
Load the config file
Parameters
----------
filename : string
Name of the configuration file to load
Returns
-------
configuration : dict
Configuration information
Raises
------
None
"""
return toml.load(filename)
# get evening or morning
def get_am_or_pm():
"""
Determine if it is morning or afteroon
This function uses now instead of utcnow because
it is the local time which determines if we are
starting or ending the curent night.
A local time > midday is the evening
A local time < midday is the morning of the day after
This is not true for UTC in all places
Parameters
----------
None
Returns
-------
token : int
0 if evening
1 if morning
Raises
------
None
"""
now = datetime.now()
if now.hour >= 12:
token = 0
else:
token = 1
return token
def get_tonight():
"""
Get tonight's date in YYYY-MM-DD format
"""
token = get_am_or_pm()
d = date.today()-timedelta(days=token)
night = "{:d}-{:02d}-{:02d}".format(d.year, d.month, d.day)
return night
# get tonights directory
def get_data_dir(root_dir, windows=True):
"""
Get tonight's data directory and night string
If directory doesn't exist make it
Parameters
----------
root_dir : string
the path to the data directory
data_subdir : stringsubdir inside nightly folder
Returns
-------
data_loc : string
Path to tonight's data directory
Raises
------
None
"""
night = get_tonight()
if windows:
data_loc = f"{root_dir}\\{night}"
else:
data_loc = f"{root_dir}/{night}"
if not os.path.exists(data_loc):
os.mkdir(data_loc)
return data_loc
``` |
{
"source": "jmccormac01/SONG",
"score": 3
} |
#### File: jmccormac01/SONG/extract_SONG_CCFs.py
```python
import sys
import os
import argparse as ap
from astropy.io import fits
import matplotlib.pyplot as plt
import numpy as np
import glob as g
iSpec_location = '/Users/jmcc/iSpec_v20161118'
sys.path.insert(0, os.path.abspath(iSpec_location))
import ispec
strongLines = iSpec_location + '/input/regions/strong_lines/absorption_lines.txt'
line_lists_parent = iSpec_location + '/input/linelists/CCF/'
telluricLines = line_lists_parent + "Synth.Tellurics.500_1100nm/mask.lst"
atomicMaskLines = {'A0': line_lists_parent + 'HARPS_SOPHIE.A0.350_1095nm/mask.lst',
'F0': line_lists_parent + 'HARPS_SOPHIE.F0.360_698nm/mask.lst',
'G2': line_lists_parent + 'HARPS_SOPHIE.G2.375_679nm/mask.lst',
'K0': line_lists_parent + 'HARPS_SOPHIE.K0.378_679nm/mask.lst',
'K5': line_lists_parent + 'HARPS_SOPHIE.K5.378_680nm/mask.lst',
'M5': line_lists_parent + 'HARPS_SOPHIE.M5.400_687nm/mask.lst'}
INSTRUMENT = {'RESOLUTION': 90000}
def arg_parse():
"""
"""
p = ap.ArgumentParser()
p.add_argument('action', choices=['orders', 'ccfs'])
return p.parse_args()
def normaliseContinuum(spec):
"""
Based on example.py
normalize_whole_spectrum_strategy1_ignoring_prefixed_strong_lines function
"""
model = 'Splines'
degree = 2
nknots = None
from_resolution = INSTRUMENT['RESOLUTION']
# continuum fit
order = 'median+max'
median_wave_range = 0.01
max_wave_range = 1.0
strong_lines = ispec.read_line_regions(strongLines)
continuum_model = ispec.fit_continuum(spec, \
from_resolution=from_resolution, \
nknots=nknots, \
degree=degree, \
median_wave_range=median_wave_range, \
max_wave_range=max_wave_range, \
model=model, \
order=order, \
automatic_strong_line_detection=True, \
strong_line_probability=0.5, \
use_errors_for_fitting=True)
# continuum normalisation
spec_norm = ispec.normalize_spectrum(spec, \
continuum_model, \
consider_continuum_errors=False)
return spec_norm
def measureRadialVelocityWithMask(spec, ccf_mask):
"""
Radial velocity measurement using atomic line list
Based on example.py determine_radial_velocity_with_mask() function
"""
models, ccf = ispec.cross_correlate_with_mask(spec, \
ccf_mask, \
lower_velocity_limit=-200, \
upper_velocity_limit=200, \
velocity_step=0.50, \
mask_depth=0.01, \
fourier=False)
# Number of models represent the number of components
components = len(models)
# First component:
try:
rv = np.round(models[0].mu(), 2) # km/s
rv_err = np.round(models[0].emu(), 2) # km/s
except IndexError:
print '\n\n\nPROBLEM RV WITH MASK, SKIPPING...\n\n\n'
return 0.0, 0.0, components, models, ccf
return rv, rv_err, components, models, ccf
if __name__ == "__main__":
args = arg_parse()
data_dir = "/Users/jmcc/Dropbox/data/SONG/20180907"
os.chdir(data_dir)
fitsfiles = sorted(g.glob("*.fits"))
if args.action == 'orders':
# loop over all fits files and extract the orders and 1D spectrum
for fitsfile in fitsfiles:
fits_dir = fitsfile.split('.')[0]
if not os.path.exists(fits_dir):
os.mkdir(fits_dir)
with fits.open(fitsfile) as ff:
fluxes = ff[0].data[0]
blazes = ff[0].data[2]
waves = ff[0].data[3]
wave_out, flux_out, error_out = [], [], []
fig, ax = plt.subplots(4, figsize=(20, 20), sharex=True)
o = 1
for wave, flux, blaze in zip(waves, fluxes, blazes):
# normalise the blaze
blazen = blaze / np.average(blaze)
error = np.zeros(len(wave))
flux_corr = flux/blazen
# keep this for the final spectrum
wave_out.append(wave)
flux_out.append(flux_corr)
error_out.append(error)
# save per order files for doing the CCF
order_file = "{}/{}_o{:02d}.txt".format(fits_dir, fitsfile.split('.')[0], o)
# do nan filtering on the output
wave_filt, flux_corr_filt, error_filt = [], [], []
for nw, nf, ne in zip(wave, flux_corr, error):
if nw == nw and nf == nf and ne == ne and nf != 0:
# also divide wave by 10 to get nm
wave_filt.append(nw/10.0)
flux_corr_filt.append(nf)
error_filt.append(ne)
wave_filt = np.array(wave_filt)
flux_corr_filt = np.array(flux_corr_filt)
error_filt = np.array(error_filt)
np.savetxt(order_file,
np.c_[wave_filt, flux_corr_filt, error_filt],
fmt='%.5f\t%.4f\t%.4f',
header='Wave_nm Flux Error')
# do the plots
_ = ax[0].plot(wave, flux, 'k-')
_ = ax[0].set_ylabel('Flux', fontsize=18)
_ = ax[1].plot(wave, blaze, 'r-')
_ = ax[1].set_ylabel('Blaze Flat', fontsize=18)
_ = ax[2].plot(wave, blazen, 'r-')
_ = ax[2].set_ylabel('Blaze Flat (norm_avg)', fontsize=18)
_ = ax[3].plot(wave, flux_corr, 'g-')
_ = ax[3].set_ylabel('Flux / Blaze Flat (norm_avg)', fontsize=18)
_ = ax[3].set_xlabel('Wavelength (Angstroms)', fontsize=18)
o += 1
fig.subplots_adjust(hspace=0.0)
fig.tight_layout()
fig.savefig('{}/{}.png'.format(fits_dir, fitsfile.split('.')[0]), dpi=400)
# stack and sort the orders
wave_out = np.hstack(wave_out)
flux_out = np.hstack(flux_out)
error_out = np.hstack(error_out)
n = np.where(wave_out > 4700)[0]
wave_out = wave_out[n]
flux_out = flux_out[n]
error_out = error_out[n]
temp = zip(wave_out, flux_out, error_out)
temp = sorted(temp)
wave_out_s, flux_out_s, error_out_s = zip(*temp)
# save the full 1D spectrum
np.savetxt("{}/{}_1D.txt".format(fits_dir, fitsfile.split('.')[0]),
np.c_[wave_out_s, flux_out_s, error_out_s],
fmt='%.5f\t%.4f\t%.4f',
header='Wave_Ang Flux Error')
else:
mask_type = 'G2'
ccf_mask = ispec.read_cross_correlation_mask(atomicMaskLines[mask_type])
for fitsfile in fitsfiles:
fits_dir = fitsfile.split('.')[0]
os.chdir(fits_dir)
orders = sorted(g.glob('*.txt'))[10:-4]
fig, ax = plt.subplots(len(orders), figsize=(5, 10), sharex=True)
fig_total, ax_total = plt.subplots(2, figsize=(5, 5), sharex=True)
for i, order in enumerate(orders):
# read in the order
spec = ispec.read_spectrum(order)
print('{} Loaded...'.format(order))
spec = normaliseContinuum(spec)
print('{} Continuum normalised...'.format(order))
# measure the radial velocity using a atomic mask line list
mask_rv, mask_rv_err, mask_components, mask_models, \
mask_ccf = measureRadialVelocityWithMask(spec, ccf_mask)
ax[i].plot(mask_ccf['x'], mask_ccf['y'], 'k-', lw=1)
if i==0:
total_ccf = np.zeros(len(mask_ccf['x']))
if mask_rv != 0.0 and mask_rv_err != 0.0:
total_ccf += mask_ccf['y']
ax[len(orders)-1].set_xlabel('RV (km/s)')
fig.tight_layout()
fig.subplots_adjust(hspace=0)
fig.savefig('{}_order_ccfs.png'.format(fits_dir), dpi=300)
# fit the CCF to normalise the baseline
n = np.where(((mask_ccf['x'] < -50) | (mask_ccf['x'] > 50)))
coeffs = np.polyfit(mask_ccf['x'][n], total_ccf[n], 1)
besty = np.polyval(coeffs, mask_ccf['x'])
total_ccf_norm = total_ccf / besty
# plot the combined CCF and the final normalised version
ax_total[0].plot(mask_ccf['x'], total_ccf, 'k-', lw=1)
ax_total[0].plot(mask_ccf['x'], besty, 'r--', lw=1)
ax_total[0].set_ylabel('CCF contrast')
ax_total[1].plot(mask_ccf['x'], total_ccf_norm, 'k-', lw=1)
ax_total[1].set_ylabel('CCF contrast norm')
ax_total[1].set_xlabel('RV (km/s)')
fig_total.tight_layout()
fig_total.subplots_adjust(hspace=0.0)
fig_total.savefig('{}_total_ccf.png'.format(fits_dir), dpi=300)
# output the final CCF
np.savetxt('{}.ccf'.format(fits_dir),
np.c_[mask_ccf['x'], total_ccf_norm],
fmt='%.3f %.5f',
header='RV_kms Contrast')
os.chdir('../')
``` |
{
"source": "jmccrae/irish_saffron",
"score": 3
} |
#### File: jmccrae/irish_saffron/make_tagged_corpus.py
```python
import gzip
import xml.etree.ElementTree as ET
import json
from nltk import word_tokenize
def init_variations(word):
if len(word) <= 1:
return [word]
elif word[0] == "b":
return [word, word[0] + "h" + word[1:], "m" + word]
elif word[0] == "c":
return [word, word[0] + "h" + word[1:], "g" + word]
elif word[0] == "d":
return [word, word[0] + "h" + word[1:], "n" + word]
elif word[0] == "f":
return [word, word[0] + "h" + word[1:], "bh" + word]
elif word[0] == "g":
return [word, word[0] + "h" + word[1:], "n" + word]
elif word[0] == "m":
return [word, word[0] + "h" + word[1:]]
elif word[0] == "p":
return [word, word[0] + "h" + word[1:], "b" + word]
elif word[0] == "s" and word[1] not in ["b","c","d","f","g","l","m","n","p","t"]:
return [word, word[0] + "h" + word[1:], "t" + word]
elif word[0] == "t":
return [word, word[0] + "h" + word[1:], "d" + word]
elif word[0] in ["a","e","i","o","u"]:
return [word, "h" + word, "n-" + word]
else:
return [word]
def read_morphology():
tree = ET.parse("morphology.xml")
morph = {}
for entry in tree.getroot().findall("entry"):
lemma = entry.findall("src")[0].findall("scope")[0].findall("ortho")[0].findall("token")[0].text.lower()
for subentry in entry.findall("subentries"):
variants = [lemma]
for entry2 in subentry.findall("entry"):
variants.append(entry2.findall("src")[0].findall("scope")[0].findall("ortho")[0].findall("token")[0].text.lower())
for v1 in variants:
if v1 not in morph:
morph[v1] = set([])
morph[v1].add(lemma)
return morph
morph = read_morphology()
morph2 = {k2: v for k, v in morph.items() for k2 in init_variations(k) }
morph.update(morph2)
terms = [l.strip().split("\t") for l in open("term_freq-sort.tsv").readlines()]
terms = set(t[1].strip() for t in terms if len(t) >= 2)
def build_trie(words):
if len(words) == 0:
return {"":""}
else:
return {words[0]: build_trie(words[1:])}
def trie_merge(trie1, trie2):
if trie1 == "":
return trie2
if trie2 == "":
return trie1
m = trie1
for k in trie2.keys():
if k in trie1:
m[k] = trie_merge(trie1[k], trie2[k])
else:
m[k] = trie2[k]
return m
def term_trie():
trie = {}
for term in terms:
words = term.lower().split(" ")
trie = trie_merge(trie, build_trie(words))
return trie
trie = term_trie()
def get_tags(tokens):
tags = ["O"] * len(tokens)
part_matches = []
for i, token in enumerate(tokens):
pm_new = []
for pm in part_matches:
t = trie
for x in pm:
t = t[x]
z = [t2 for t2 in morph.get(token.lower(), [token.lower()]) if t2 in t]
for z2 in z:
if z2 in t:
pm_new.append(pm + [z2])
z = [t2 for t2 in morph.get(token.lower(), [token.lower()]) if t2 in trie]
for z2 in z:
if z2 in trie:
pm_new.append([z2])
part_matches = pm_new
for pm in part_matches:
t = trie
for x in pm:
t = t[x]
if "" in t:
if tags[i - len(pm) + 1] == "O":
tags[i-len(pm)+1] = "B"
for j in range(i - len(pm)+2, i+1):
tags[j] = "I"
if len(pm) != 1:
tags[i] = "I"
return tags
with gzip.open("gawiki-filt.gz", "rt") as input:
line = input.readline()
while line:
doc = line.strip().split(":")
title = doc[0]
contents = ":".join(doc[1:])
tokens = word_tokenize(contents)
tags = get_tags(tokens)
print(" ".join(["%s_%s" % (token, tags[i]) for i, token in enumerate(tokens)]))
line = input.readline()
```
#### File: jmccrae/irish_saffron/term_freq.py
```python
import gzip
import xml.etree.ElementTree as ET
import json
from nltk import word_tokenize
from collections import Counter
def read_morphology():
tree = ET.parse("morphology.xml")
morph = {}
for subentry in tree.getroot().iter():
if subentry.tag == "subentries":
variants = []
for entry in subentry.findall("entry"):
variants.append(entry.findall("src")[0].findall("scope")[0].findall("ortho")[0].findall("token")[0].text.lower())
for v1 in variants:
if v1 not in morph:
morph[v1] = set([])
for v2 in variants:
morph[v1].add(v2)
return morph
morph = read_morphology()
terms = [l.strip().split("\t") for l in open("19.03.01-tearma.ie-concepts.txt").readlines()]
terms = set(t[1] for t in terms if len(t) >= 2)
def build_trie(words):
if len(words) == 0:
return {"":""}
else:
return {words[0]: build_trie(words[1:])}
def trie_merge(trie1, trie2):
if trie1 == "":
return trie2
if trie2 == "":
return trie1
m = trie1
for k in trie2.keys():
if k in trie1:
m[k] = trie_merge(trie1[k], trie2[k])
else:
m[k] = trie2[k]
return m
def term_trie():
trie = {}
for term in terms:
words = term.lower().split(" ")
trie = trie_merge(trie, build_trie(words))
return trie
trie = term_trie()
termfreqs = Counter()
def count(tokens):
part_matches = []
for i, token in enumerate(tokens):
pm_new = []
for pm in part_matches:
t = trie
for x in pm:
t = t[x]
z = [t2 for t2 in morph.get(token.lower(), [token.lower()]) if t2 in t]
if z:
pm_new.append(pm + z[0:1])
z = [t2 for t2 in morph.get(token.lower(), [token.lower()]) if t2 in trie]
if z:
pm_new.append(z[0:1])
part_matches = pm_new
for pm in part_matches:
t = trie
for x in pm:
t = t[x]
if "" in t:
termfreqs[" ".join(pm)] += 1
with gzip.open("/home/jmccrae/data/wiki/gawiki-filt.gz", "rt") as input:
line = input.readline()
while line:
doc = line.strip().split(":")
title = doc[0]
contents = ":".join(doc[1:])
tokens = word_tokenize(contents)
count(tokens)
line = input.readline()
for term, freq in termfreqs.items():
print(freq, "\t", term)
``` |
{
"source": "jmccrae/saffron",
"score": 3
} |
#### File: jmccrae/saffron/taxonomy-to-dot.py
```python
import sys
import json
def gen_label(taxo, topic_wts):
if "root" in taxo:
if taxo["root"] in topic_wts:
print(" \"%s\" [ weight=%.4f ];" % (taxo["root"], topic_wts[taxo["root"]]))
else:
print(" \"%s\";" % (taxo["root"]))
if "children" in taxo:
for child in taxo["children"]:
gen_label(child, topic_wts)
def gen_link(taxo):
if "root" in taxo and "children" in taxo:
src = taxo["root"]
for child in taxo["children"]:
trg = child["root"]
print(" \"" + src + "\" -> \"" + trg + "\";")
gen_link(child)
def main(args):
taxonomy = json.load(open(args[0]))
if len(args) >= 2:
topics = json.load(open(args[1]))
topic_wts = { x["topic_string"]: x["score"] for x in topics }
else:
topic_wts = {}
print("digraph G {")
gen_label(taxonomy, topic_wts)
gen_link(taxonomy)
print("}")
if __name__ == "__main__":
main(sys.argv[1:])
``` |
{
"source": "jmccrae/zrtifi",
"score": 2
} |
#### File: zrtifi/analyzers/gzip.py
```python
import gzip
import sys
from subprocess import Popen, PIPE
from os.path import exists
from uuid import uuid4
step_id = str(uuid4())
def format_err(lines):
for line in lines:
yield line.strip()
ZRTIFI_ONTOLOGY = "http://www.zrtifi.org/ontology#"
if __name__ == "__main__":
file = sys.argv[1]
if exists(file):
if file.endswith(".gz"):
p = Popen(["gunzip", file], stderr=PIPE)
p.wait()
print("<> <%sstep> <#gzip_step_%s> ." % (ZRTIFI_ONTOLOGY, step_id))
print("<#gzip_step_%s> <%sprocess> \"gzip\" ." % (step_id, ZRTIFI_ONTOLOGY))
if p.returncode == 0:
print("<#gzip_step_%s> <%sstatus> <http://www.zrtifi.org/ontology#success> ." % (step_id, ZRTIFI_ONTOLOGY))
print("<#file_%s> <http://www.zrtifi.org/internal#next> <sniff> ." % step_id)
print("<#file_%s> <http://www.zrtifi.org/internal#nextTarget> <%s> ." % (step_id, file[:-3]))
print("<> <%scontains> <#file_%s> ." % (ZRTIFI_ONTOLOGY, step_id))
print("<#file_%s> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://www.w3.org/ns/dcat#Distribution> ." % (step_id))
else:
print("<#gzip_step_%s> <%sstatus> <http://www.zrtifi.org/ontology#error> ." % (step_id, ZRTIFI_ONTOLOGY))
print("<#gzip_step_%s> <%sstatus> \"%s\" ." % (step_id, ZRTIFI_ONTOLOGY,
"\\n".join(format_err(p.stderr.readlines()))))
else:
print("<#gzip_step_%s> <%serror> \"file does not end in .gz\"@en ." % (step_id, ZRTIFI_ONTOLOGY))
print("<#gzip_step_%s> <%sstatus> <http://www.zrtifi.org/ontology#failed> ." % (step_id, ZRTIFI_ONTOLOGY))
else:
print("<#gzip_step_%s> <%sstatus> <http://www.zrtifi.org/ontology#failed> ." % (step_id, ZRTIFI_ONTOLOGY))
print("<#gzip_step_%s> <%serror> \"file does not exist\"@en . " % (step_id, ZRTIFI_ONTOLOGY))
```
#### File: zrtifi/analyzers/xml.py
```python
from subprocess import Popen, PIPE
from cStringIO import StringIO
import sys
from uuid import uuid4
step_id = str(uuid4())
ZRTIFI_ONTOLOGY = "http://www.zrtifi.org/ontology#"
def format_err(lines):
for line in lines:
yield line.strip()
if __name__ == "__main__":
p = Popen(["xmllint","--noouti","--loaddtd ",sys.argv[1]], stdout=PIPE, stderr=PIPE)
p.wait()
print("<> <%sstep> <#xml_step_%s> ." % (ZRTIFI_ONTOLOGY, step_id))
print("<#xml_step_%s> <%sprocess> \"xml\" ." % (step_id, ZRTIFI_ONTOLOGY))
if p.returncode == 0:
print("<#xml_step_%s> <%sstatus> <%ssuccess> ." % (step_id, ZRTIFI_ONTOLOGY, ZRTIFI_ONTOLOGY))
elif p.returncode >= 1:
print("<#xml_step_%s> <%sstatus> <%serror> ." % (step_id, ZRTIFI_ONTOLOGY, ZRTIFI_ONTOLOGY))
errmsg = "\\n".join(format_err(p.stderr.readlines()))
print("<#xml_step_%s> <%serror> \"%s\" ." % (step_id, ZRTIFI_ONTOLOGY, errmsg))
if p.returncode == 1:
print("<#xml_step_%s> <%serror> \"Unclassified\" ." % (step_id, ZRTIFI_ONTOLOGY))
if p.returncode == 2:
print("<#xml_step_%s> <%serror> \"Error in DTD\" ." % (step_id, ZRTIFI_ONTOLOGY))
if p.returncode == 3:
print("<#xml_step_%s> <%serror> \"Validation error\" ." % (step_id, ZRTIFI_ONTOLOGY))
if p.returncode == 4:
print("<#xml_step_%s> <%serror> \"Validation error\" ." % (step_id, ZRTIFI_ONTOLOGY))
if p.returncode == 5:
print("<#xml_step_%s> <%serror> \"Error in schema compilation\" ." % (step_id, ZRTIFI_ONTOLOGY))
if p.returncode == 6:
print("<#xml_step_%s> <%serror> \"Error writing output\" ." % (step_id, ZRTIFI_ONTOLOGY))
if p.returncode == 7:
print("<#xml_step_%s> <%serror> \"Error in pattern (generated when --pattern option is used)\" ." % (step_id, ZRTIFI_ONTOLOGY))
if p.returncode == 8:
print("<#xml_step_%s> <%serror> \"Error in Reader registration (generated when --chkregister option is used)\" ." % (step_id, ZRTIFI_ONTOLOGY))
if p.returncode == 9:
print("<#xml_step_%s> <%serror> \"Out of memory error\" ." % (step_id, ZRTIFI_ONTOLOGY))
``` |
{
"source": "jmccreight/WrfHydroForcing",
"score": 2
} |
#### File: WrfHydroForcing/core/bias_correction.py
```python
import math
from math import tau as TWO_PI
import os
import random
import time
import ESMF
import numpy as np
from netCDF4 import Dataset
from core import err_handler
PARAM_NX = 384
PARAM_NY = 190
NumpyExceptions = (IndexError, ValueError, AttributeError, ArithmeticError)
# These come from the netCDF4 module, but they're not exported via __all__ so we put them here:
default_fillvals = {'S1': '\x00', 'i1': -127, 'u1': 255, 'i2': -32767, 'u2': 65535, 'i4': -2147483647,
'u4': 4294967295, 'i8': -9223372036854775806, 'u8': 18446744073709551614,
'f4': 9.969209968386869e+36, 'f8': 9.969209968386869e+36}
def run_bias_correction(input_forcings, config_options, geo_meta_wrf_hydro, mpi_config):
"""
Top level calling routine for initiating bias correction on
this particular input forcing. This is called prior to downscaling,
but after regridding.
:param mpi_config:
:param geo_meta_wrf_hydro:
:param input_forcings:
:param config_options:
:return:
"""
# Dictionary for mapping to temperature bias correction.
bias_correct_temperature = {
0: no_bias_correct,
1: cfsv2_nldas_nwm_bias_correct,
2: ncar_tbl_correction,
3: ncar_temp_gfs_bias_correct,
4: ncar_temp_hrrr_bias_correct
}
bias_correct_temperature[input_forcings.t2dBiasCorrectOpt](input_forcings, config_options, mpi_config, 0)
err_handler.check_program_status(config_options, mpi_config)
# Dictionary for mapping to humidity bias correction.
bias_correct_humidity = {
0: no_bias_correct,
1: cfsv2_nldas_nwm_bias_correct,
2: ncar_tbl_correction
}
bias_correct_humidity[input_forcings.q2dBiasCorrectOpt](input_forcings, config_options, mpi_config, 1)
err_handler.check_program_status(config_options, mpi_config)
# Dictionary for mapping to surface pressure bias correction.
bias_correct_pressure = {
0: no_bias_correct,
1: cfsv2_nldas_nwm_bias_correct
}
bias_correct_pressure[input_forcings.psfcBiasCorrectOpt](input_forcings, config_options, mpi_config, 7)
err_handler.check_program_status(config_options, mpi_config)
# Dictionary for mapping to incoming shortwave radiation correction.
bias_correct_sw = {
0: no_bias_correct,
1: cfsv2_nldas_nwm_bias_correct,
2: ncar_sw_hrrr_bias_correct
}
if input_forcings.swBiasCorrectOpt != 2:
bias_correct_sw[input_forcings.swBiasCorrectOpt](input_forcings, config_options, mpi_config, 5)
else:
bias_correct_sw[input_forcings.swBiasCorrectOpt](input_forcings, geo_meta_wrf_hydro,
config_options, mpi_config, 5)
err_handler.check_program_status(config_options, mpi_config)
# Dictionary for mapping to incoming longwave radiation correction.
bias_correct_lw = {
0: no_bias_correct,
1: cfsv2_nldas_nwm_bias_correct,
2: ncar_blanket_adjustment_lw,
3: ncar_lwdown_gfs_bias_correct
}
bias_correct_lw[input_forcings.lwBiasCorrectOpt](input_forcings, config_options, mpi_config, 6)
err_handler.check_program_status(config_options, mpi_config)
# Dictionary for mapping to wind bias correction.
bias_correct_wind = {
0: no_bias_correct,
1: cfsv2_nldas_nwm_bias_correct,
2: ncar_tbl_correction,
3: ncar_wspd_gfs_bias_correct,
4: ncar_wspd_hrrr_bias_correct
}
# Run for U-Wind
bias_correct_wind[input_forcings.windBiasCorrectOpt](input_forcings, config_options, mpi_config, 2)
err_handler.check_program_status(config_options, mpi_config)
# Run for V-Wind
bias_correct_wind[input_forcings.windBiasCorrectOpt](input_forcings, config_options, mpi_config, 3)
err_handler.check_program_status(config_options, mpi_config)
# Dictionary for mapping to precipitation bias correction.
bias_correct_precip = {
0: no_bias_correct,
1: cfsv2_nldas_nwm_bias_correct
}
bias_correct_precip[input_forcings.precipBiasCorrectOpt](input_forcings, config_options, mpi_config, 4)
err_handler.check_program_status(config_options, mpi_config)
# Assign the temperature/pressure grids to temporary placeholders here.
# these will be used if 2-meter specific humidity is downscaled.
if input_forcings.q2dDownscaleOpt != 0:
input_forcings.t2dTmp = input_forcings.final_forcings[4, :, :] * 1.0
input_forcings.psfcTmp = input_forcings.final_forcings[6, :, :] * 1.0
else:
input_forcings.t2dTmp = None
input_forcings.psfcTmp = None
def no_bias_correct(input_forcings, config_options, mpi_config, force_num):
"""
Generic routine to simply pass forcing states through without any
bias correction.
:param mpi_config:
:param input_forcings:
:param config_options:
:param force_num:
:return:
"""
try:
input_forcings.final_forcings[force_num, :, :] = input_forcings.final_forcings[force_num, :, :]
except NumpyExceptions as npe:
config_options.errMsg = "Unable to set final forcings during bias correction routine: " + str(npe)
err_handler.log_critical(config_options, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
def ncar_tbl_correction(input_forcings, config_options, mpi_config, force_num):
"""
Generic NCAR bias correction for forcings based on the forecast hour. A lookup table
is used for each different forcing variable. NOTE!!!! - This is based on HRRRv3 analysis
and should be used with extreme caution.
:param input_forcings:
:param config_options:
:param mpi_config:
:param force_num:
:return:
"""
# Establish lookup tables for each forcing, for each forecast hour.
adj_tbl = {
0: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0],
1: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0],
2: [0.35, 0.18, 0.15, 0.13, 0.12, 0.11, 0.10, 0.08, 0.07, 0.06, 0.05, 0.03, 0.02, 0.01,
-0.01, -0.02, -0.03, -0.4, -0.05],
3: [0.35, 0.18, 0.15, 0.13, 0.12, 0.11, 0.10, 0.08, 0.07, 0.06, 0.05, 0.03, 0.02, 0.01,
-0.01, -0.02, -0.03, -0.4, -0.05]
}
if mpi_config.rank == 0:
config_options.statusMsg = "Performing table lookup bias correction for: " + \
input_forcings.netcdf_var_names[force_num] + " For input: " + \
input_forcings.productName
err_handler.log_msg(config_options, mpi_config)
# First check to make sure we are within the accepted forecast range per the above table. For now, this
# bias correction only applies to the first 18 forecast hours.
if int(input_forcings.fcst_hour2) > 18:
config_options.statusMsg = "Current forecast hour for: " + input_forcings.productName + \
" is greater than allowed forecast range of 18 for table lookup bias correction."
err_handler.log_warning(config_options, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
# Extract local array of values to perform adjustment on.
force_tmp = None
try:
force_tmp = input_forcings.final_forcings[input_forcings.input_map_output[force_num], :, :]
except NumpyExceptions as npe:
config_options.errMsg = "Unable to extract: " + input_forcings.netcdf_var_names[force_num] + \
" from local forcing object for: " + input_forcings.productName + \
"(" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
ind_valid = None
try:
ind_valid = np.where(force_tmp != config_options.globalNdv)
except NumpyExceptions as npe:
config_options.errMsg = "Unable to perform valid search for: " + input_forcings.netcdf_var_names[force_num] + \
" from local forcing object for: " + input_forcings.productName + " (" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
# Apply the bias correction adjustment based on the current forecast hour.
try:
force_tmp[ind_valid] = force_tmp[ind_valid] + adj_tbl[force_num][int(input_forcings.fcst_hour2)]
except NumpyExceptions as npe:
config_options.errMsg = "Unable to apply table bias correction for: " + \
input_forcings.netcdf_var_names[force_num] + \
" from local forcing object for: " + input_forcings.productName + \
" (" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
try:
input_forcings.final_forcings[input_forcings.input_map_output[force_num], :, :] = force_tmp[:, :]
except NumpyExceptions as npe:
config_options.errMsg = "Unable to place temporary LW array back into forcing object for: " + \
input_forcings.productName + " (" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
# Reset temporary variables to keep low memory footprint.
del force_tmp
del ind_valid
def ncar_blanket_adjustment_lw(input_forcings, config_options, mpi_config, force_num):
"""
Generic NCAR bias correction for incoming longwave radiation fluxes. NOTE!!! - This is based
off HRRRv3 analysis and should be used with extreme caution.....
:param input_forcings:
:param config_options:
:param mpi_config:
:param force_num:
:return:
"""
if mpi_config.rank == 0:
config_options.statusMsg = "Performing blanket bias correction on incoming longwave " \
"radiation fluxes for input: " + \
input_forcings.productName
err_handler.log_msg(config_options, mpi_config)
# Establish blanket adjustment to apply across the board in W/m^2
adj_lw = 9.0
# Perform adjustment.
lw_tmp = None
try:
lw_tmp = input_forcings.final_forcings[input_forcings.input_map_output[force_num], :, :]
except NumpyExceptions as npe:
config_options.errMsg = "Unable to extract incoming LW from forcing object for: " + \
input_forcings.productName + " (" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
ind_valid = None
try:
ind_valid = np.where(lw_tmp != config_options.globalNdv)
except NumpyExceptions as npe:
config_options.errMsg = "Unable to calculate valid index in incoming LW for: " + \
input_forcings.productName + " (" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
try:
lw_tmp[ind_valid] = lw_tmp[ind_valid] + adj_lw
except NumpyExceptions as npe:
config_options.errMsg = "Unable to perform LW bias correction for: " + \
input_forcings.productName + " (" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
try:
input_forcings.final_forcings[input_forcings.input_map_output[force_num], :, :] = lw_tmp[:, :]
except NumpyExceptions as npe:
config_options.errMsg = "Unable to place temporary LW array back into forcing object for: " + \
input_forcings.productName + " (" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
# Reset temporary variables to keep low memory footprint.
del lw_tmp
del ind_valid
def ncar_sw_hrrr_bias_correct(input_forcings, geo_meta_wrf_hydro, config_options, mpi_config, force_num):
"""
Function to implement a bias correction to the forecast incoming shortwave radiation fluxes.
NOTE!!!! - This bias correction is based on in-situ analysis performed against HRRRv3
fields. It's high discouraged to use this for any other NWP products, or even with the HRRR
changes versions in the future.
:param input_forcings:
:param geo_meta_wrf_hydro:
:param config_options:
:param mpi_config:
:param force_num:
:return:
"""
if mpi_config.rank == 0:
config_options.statusMsg = "Performing NCAR bias correction on incoming shortwave " \
"radiation fluxes for input: " + \
input_forcings.productName
err_handler.log_msg(config_options, mpi_config)
# Establish constant parameters. NOTE!!! - These will change with future HRRR upgrades.
c1 = -0.159
c2 = -0.077
# Establish current datetime information, along wth solar constants.
f_hr = input_forcings.fcst_hour2
# For now, hard-coding the total number of forecast hours to be 18, since we
# are assuming this is HRRR
n_fcst_hr = 18
# Trig params
d2r = math.pi / 180.0
r2d = 180.0 / math.pi
date_current = config_options.current_output_date
hh = float(date_current.hour)
mm = float(date_current.minute)
ss = float(date_current.second)
doy = float(time.strptime(date_current.strftime('%Y.%m.%d'), '%Y.%m.%d').tm_yday)
frac_year = 2.0 * math.pi / 365.0 * (doy - 1.0 + (hh / 24.0) + (mm / 1440.0) + (ss / 86400.0))
# eqtime is the difference in minutes between true solar time and that if solar noon was at actual noon.
# This difference is due to Earth's elliptical orbit around the sun.
eqtime = 229.18 * (0.000075 + 0.001868 * math.cos(frac_year) - 0.032077 * math.sin(frac_year) -
0.014615 * math.cos(2.0 * frac_year) - 0.040849 * math.sin(2.0 * frac_year))
# decl is the solar declination angle in radians: how much the Earth is tilted toward or away from the sun
decl = 0.006918 - 0.399912 * math.cos(frac_year) + 0.070257 * math.sin(frac_year) - \
0.006758 * math.cos(2.0 * frac_year) + 0.000907 * math.sin(2.0 * frac_year) - \
0.002697 * math.cos(3.0 * frac_year) + 0.001480 * math.sin(3.0 * frac_year)
# Create temporary grids for calculating the solar zenith angle, which will be used in the bias correction.
# time offset in minutes from the prime meridian
time_offset = eqtime + 4.0 * geo_meta_wrf_hydro.longitude_grid
# tst is the true solar time: the number of minutes since solar midnight
tst = hh * 60.0 + mm + ss / 60.0 + time_offset
# solar hour angle in radians: the amount the sun is off from due south
ha = d2r * ((tst / 4.0) - 180.0)
# solar zenith angle is the angle between straight up and the center of the sun's disc
# the cosine of the sol_zen_ang is proportional to the solar intensity
# (not accounting for humidity or cloud cover)
sol_zen_ang = r2d * np.arccos(np.sin(geo_meta_wrf_hydro.latitude_grid * d2r) * math.sin(decl) +
np.cos(geo_meta_wrf_hydro.latitude_grid * d2r) * math.cos(decl) * np.cos(ha))
# Check for any values greater than 90 degrees.
sol_zen_ang[np.where(sol_zen_ang > 90.0)] = 90.0
# Extract the current incoming shortwave field from the forcing object and set it to
# a local grid. We will perform the bias correction on this grid, based on forecast
# hour and datetime information. Once a correction has taken place, we will place
# the corrected field back into the forcing object.
sw_tmp = None
try:
sw_tmp = input_forcings.final_forcings[input_forcings.input_map_output[force_num], :, :]
except NumpyExceptions as npe:
config_options.errMsg = "Unable to extract incoming shortwave forcing from object for: " + \
input_forcings.productName + " (" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
# Calculate where we have valid values.
ind_valid = None
try:
ind_valid = np.where(sw_tmp != config_options.globalNdv)
except NumpyExceptions as npe:
config_options.errMsg = "Unable to run a search for valid SW values for: " + input_forcings.productName + \
" (" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
# Perform the bias correction.
try:
# The second half of this calculation below is the actual calculation of the incoming SW bias, which is then
# added (or subtracted if negative) to the original values.
sw_tmp[ind_valid] = sw_tmp[ind_valid] + \
(c1 + (c2 * ((f_hr - 1) / (n_fcst_hr - 1)))) * np.cos(sol_zen_ang[ind_valid] * d2r) * \
sw_tmp[ind_valid]
except NumpyExceptions as npe:
config_options.errMsg = "Unable to apply NCAR HRRR bias correction to incoming shortwave radiation: " + \
str(npe)
err_handler.log_critical(config_options, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
# Place updated states back into the forcing object.
try:
input_forcings.final_forcings[7, :, :] = sw_tmp[:, :]
except NumpyExceptions as npe:
config_options.errMsg = "Unable to place bias-corrected incoming SW radiation fluxes back into the forcing " \
"object: " + str(npe)
err_handler.log_critical(config_options, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
# Reset variables to keep memory footprints low.
del sw_tmp
del time_offset
del tst
del ha
del sol_zen_ang
del ind_valid
def ncar_temp_hrrr_bias_correct(input_forcings, config_options, mpi_config, force_num):
if mpi_config.rank == 0:
config_options.statusMsg = "Performing NCAR HRRR bias correction on incoming 2m temperature input: " + \
input_forcings.productName
err_handler.log_msg(config_options, mpi_config)
date_current = config_options.current_output_date
hh = float(date_current.hour)
MM= float(date_current.month)
# determine if we're in AnA or SR configuration
if config_options.ana_flag == 1:
net_bias_AA = 0.13
diurnal_ampl_AA = -0.18
diurnal_offs_AA = 2.2
monthly_ampl_AA = -0.15
monthly_offs_AA = -2.0
bias_corr = net_bias_AA + diurnal_ampl_AA * math.sin(diurnal_offs_AA + hh / 24 * 2 * math.pi) + \
monthly_ampl_AA * math.sin(monthly_offs_AA + MM / 12 * 2*math.pi)
else:
net_bias_SR = 0.060
diurnal_ampl_SR = -0.31
diurnal_offs_SR = 2.2
monthly_ampl_SR = -0.21
monthly_offs_SR = -2.4
fhr_mult_SR = -0.016
fhr = config_options.current_output_step
bias_corr = net_bias_SR + fhr * fhr_mult_SR + \
diurnal_ampl_SR * math.sin(diurnal_offs_SR + hh / 24 * 2*math.pi) + \
monthly_ampl_SR * math.sin(monthly_offs_SR + MM / 12 * 2*math.pi)
# if mpi_config.rank == 0:
# config_options.statusMsg = f"\tAnAFlag = {config_options.ana_flag} {bias_corr}"
# err_handler.log_msg(config_options, mpi_config)
temp_in = None
try:
temp_in = input_forcings.final_forcings[input_forcings.input_map_output[force_num], :, :]
except NumpyExceptions as npe:
config_options.errMsg = "Unable to extract incoming temperature from forcing object for: " + \
input_forcings.productName + " (" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
ind_valid = None
try:
ind_valid = np.where(temp_in != config_options.globalNdv)
except NumpyExceptions as npe:
config_options.errMsg = "Unable to calculate valid index in incoming temperature for: " + \
input_forcings.productName + " (" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
try:
temp_in[ind_valid] = temp_in[ind_valid] + bias_corr
except NumpyExceptions as npe:
config_options.errMsg = "Unable to perform temperature bias correction for: " + \
input_forcings.productName + " (" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
try:
input_forcings.final_forcings[input_forcings.input_map_output[force_num], :, :] = temp_in[:, :]
except NumpyExceptions as npe:
config_options.errMsg = "Unable to place temporary temperature array back into forcing object for: " + \
input_forcings.productName + " (" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
del temp_in
del ind_valid
def ncar_temp_gfs_bias_correct(input_forcings, config_options, mpi_config, force_num):
if mpi_config.rank == 0:
config_options.statusMsg = "Performing NCAR GFS bias correction on incoming 2m temperature input: " + \
input_forcings.productName
err_handler.log_msg(config_options, mpi_config)
date_current = config_options.current_output_date
hh = float(date_current.hour)
net_bias_mr = -0.18
fhr_mult_mr = 0.002
diurnal_ampl_mr = -1.4
diurnal_offs_mr = -2.1
fhr = config_options.current_output_step
bias_corr = net_bias_mr + fhr_mult_mr * fhr + diurnal_ampl_mr * math.sin(diurnal_offs_mr + hh / 24 * TWO_PI)
temp_in = None
try:
temp_in = input_forcings.final_forcings[input_forcings.input_map_output[force_num], :, :]
except NumpyExceptions as npe:
config_options.errMsg = "Unable to extract incoming temperature from forcing object for: " + \
input_forcings.productName + " (" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
ind_valid = None
try:
ind_valid = np.where(temp_in != config_options.globalNdv)
except NumpyExceptions as npe:
config_options.errMsg = "Unable to calculate valid index in incoming temperature for: " + \
input_forcings.productName + " (" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
try:
temp_in[ind_valid] = temp_in[ind_valid] + bias_corr
except NumpyExceptions as npe:
config_options.errMsg = "Unable to perform temperature bias correction for: " + \
input_forcings.productName + " (" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
try:
input_forcings.final_forcings[input_forcings.input_map_output[force_num], :, :] = temp_in[:, :]
except NumpyExceptions as npe:
config_options.errMsg = "Unable to place temporary temperature array back into forcing object for: " + \
input_forcings.productName + " (" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
del temp_in
del ind_valid
def ncar_lwdown_gfs_bias_correct(input_forcings, config_options, mpi_config, force_num):
if mpi_config.rank == 0:
config_options.statusMsg = "Performing NCAR bias correction on incoming longwave " \
"radiation fluxes for input: " + \
input_forcings.productName
err_handler.log_msg(config_options, mpi_config)
date_current = config_options.current_output_date
hh = float(date_current.hour)
fhr = config_options.current_output_step
lwdown_net_bias_mr = 9.9
lwdown_fhr_mult_mr = 0.00
lwdown_diurnal_ampl_mr = -1.5
lwdown_diurnal_offs_mr = 2.8
bias_corr = lwdown_net_bias_mr + lwdown_fhr_mult_mr * fhr + lwdown_diurnal_ampl_mr * \
math.sin(lwdown_diurnal_offs_mr + hh / 24 * TWO_PI)
lwdown_in = None
try:
lwdown_in = input_forcings.final_forcings[input_forcings.input_map_output[force_num], :, :]
except NumpyExceptions as npe:
config_options.errMsg = "Unable to extract incoming longwave from forcing object for: " + \
input_forcings.productName + " (" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
ind_valid = None
try:
ind_valid = np.where(lwdown_in != config_options.globalNdv)
except NumpyExceptions as npe:
config_options.errMsg = "Unable to calculate valid index in incoming longwave for: " + \
input_forcings.productName + " (" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
try:
lwdown_in[ind_valid] = lwdown_in[ind_valid] + bias_corr
except NumpyExceptions as npe:
config_options.errMsg = "Unable to perform longwave bias correction for: " + \
input_forcings.productName + " (" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
try:
input_forcings.final_forcings[input_forcings.input_map_output[force_num], :, :] = lwdown_in[:, :]
except NumpyExceptions as npe:
config_options.errMsg = "Unable to place temporary longwave array back into forcing object for: " + \
input_forcings.productName + " (" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
del lwdown_in
del ind_valid
def ncar_wspd_hrrr_bias_correct(input_forcings, config_options, mpi_config, force_num):
if mpi_config.rank == 0:
config_options.statusMsg = "Performing NCAR bias correction on incoming windspeed for input: " + \
input_forcings.productName + " at step " + str(config_options.current_output_step)
err_handler.log_msg(config_options, mpi_config)
date_current = config_options.current_output_date
hh = float(date_current.hour)
fhr = config_options.current_output_step
# need to get wind speed from U, V components
ugrd_idx = input_forcings.grib_vars.index('UGRD')
vgrd_idx = input_forcings.grib_vars.index('VGRD')
ugrid_in = input_forcings.final_forcings[input_forcings.input_map_output[ugrd_idx], :, :]
vgrid_in = input_forcings.final_forcings[input_forcings.input_map_output[vgrd_idx], :, :]
wdir = np.arctan2(vgrid_in, ugrid_in)
wspd = np.sqrt(np.square(ugrid_in) + np.square(vgrid_in))
if config_options.ana_flag:
wspd_bias_corr = 0.35 # fixed for AnA
else:
wspd_net_bias_sr = [0.18, 0.15, 0.13, 0.12, 0.11, 0.10, 0.08, 0.07, 0.06, 0.05,
0.03, 0.02, 0.01, -0.01, -0.02, -0.03, -0.04, -0.05]
wspd_bias_corr = wspd_net_bias_sr[config_options.current_output_step - 1]
wspd = wspd + wspd_bias_corr
wspd = np.where(wspd < 0, 0, wspd)
ugrid_out = wspd * np.cos(wdir)
vgrid_out = wspd * np.sin(wdir)
# TODO: cache the "other" value so we don't repeat this calculation unnecessarily
bias_corrected = ugrid_out if force_num == ugrd_idx else vgrid_out
wind_in = None
try:
wind_in = input_forcings.final_forcings[input_forcings.input_map_output[force_num], :, :]
except NumpyExceptions as npe:
config_options.errMsg = "Unable to extract incoming windspeed from forcing object for: " + \
input_forcings.productName + " (" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
ind_valid = None
try:
ind_valid = np.where(wind_in != config_options.globalNdv)
except NumpyExceptions as npe:
config_options.errMsg = "Unable to calculate valid index in incoming windspeed for: " + \
input_forcings.productName + " (" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
try:
wind_in[ind_valid] = bias_corrected[ind_valid]
except NumpyExceptions as npe:
config_options.errMsg = "Unable to perform windspeed bias correction for: " + \
input_forcings.productName + " (" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
try:
input_forcings.final_forcings[input_forcings.input_map_output[force_num], :, :] = wind_in[:, :]
except NumpyExceptions as npe:
config_options.errMsg = "Unable to place temporary windspeed array back into forcing object for: " + \
input_forcings.productName + " (" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
del wind_in
del ind_valid
def ncar_wspd_gfs_bias_correct(input_forcings, config_options, mpi_config, force_num):
if mpi_config.rank == 0:
config_options.statusMsg = "Performing NCAR bias correction on incoming windspeed for input: " + \
input_forcings.productName
err_handler.log_msg(config_options, mpi_config)
date_current = config_options.current_output_date
hh = float(date_current.hour)
fhr = config_options.current_output_step
wspd_net_bias_mr = -0.20
wspd_fhr_mult_mr = 0.00
wspd_diurnal_ampl_mr = -0.32
wspd_diurnal_offs_mr = -1.1
# need to get wind speed from U, V components
ugrd_idx = input_forcings.grib_vars.index('UGRD')
vgrd_idx = input_forcings.grib_vars.index('VGRD')
ugrid_in = input_forcings.final_forcings[input_forcings.input_map_output[ugrd_idx], :, :]
vgrid_in = input_forcings.final_forcings[input_forcings.input_map_output[vgrd_idx], :, :]
wdir = np.arctan2(vgrid_in, ugrid_in)
wspd = np.sqrt(np.square(ugrid_in) + np.square(vgrid_in))
wspd_bias_corr = wspd_net_bias_mr + wspd_fhr_mult_mr * fhr + \
wspd_diurnal_ampl_mr * math.sin(wspd_diurnal_offs_mr + hh / 24 * TWO_PI)
wspd = wspd + wspd_bias_corr
wspd = np.where(wspd < 0, 0, wspd)
ugrid_out = wspd * np.cos(wdir)
vgrid_out = wspd * np.sin(wdir)
# TODO: cache the "other" value so we don't repeat this calculation unnecessarily
bias_corrected = ugrid_out if force_num == ugrd_idx else vgrid_out
wind_in = None
try:
wind_in = input_forcings.final_forcings[input_forcings.input_map_output[force_num], :, :]
except NumpyExceptions as npe:
config_options.errMsg = "Unable to extract incoming windspeed from forcing object for: " + \
input_forcings.productName + " (" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
ind_valid = None
try:
ind_valid = np.where(wind_in != config_options.globalNdv)
except NumpyExceptions as npe:
config_options.errMsg = "Unable to calculate valid index in incoming windspeed for: " + \
input_forcings.productName + " (" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
try:
wind_in[ind_valid] = bias_corrected[ind_valid]
except NumpyExceptions as npe:
config_options.errMsg = "Unable to perform windspeed bias correction for: " + \
input_forcings.productName + " (" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
try:
input_forcings.final_forcings[input_forcings.input_map_output[force_num], :, :] = wind_in[:, :]
except NumpyExceptions as npe:
config_options.errMsg = "Unable to place temporary windspeed array back into forcing object for: " + \
input_forcings.productName + " (" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
del wind_in
del ind_valid
def cfsv2_nldas_nwm_bias_correct(input_forcings, config_options, mpi_config, force_num):
"""
Routine to run CDF/PDF bias correction parametric corrections
SPECIFIC to the NWM long-range configuration.
:param mpi_config:
:param input_forcings:
:param config_options:
:param force_num:
:return:
"""
# TODO: move these into a (.py or .json) configuration file
# Create a dictionary that maps forcing numbers to the expected NetCDF variable names, etc.
nldas_param1_vars = {
2: 'UGRD10M_PARAM_1',
3: 'VGRD10M_PARAM_1',
6: 'LW_PARAM_1',
4: 'PRATE_PARAM_1',
0: 'T2M_PARAM_1',
1: 'Q2M_PARAM_1',
7: 'PSFC_PARAM_1',
5: 'SW_PARAM_1'
}
nldas_param2_vars = {
2: 'UGRD10M_PARAM_2',
3: 'VGRD10M_PARAM_2',
6: 'LW_PARAM_2',
4: 'PRATE_PARAM_2',
0: 'T2M_PARAM_2',
1: 'Q2M_PARAM_2',
7: 'PSFC_PARAM_2',
5: 'SW_PARAM_2'
}
cfs_param_path_vars = {
2: 'ugrd',
3: 'vgrd',
6: 'dlwsfc',
4: 'prate',
0: 'tmp2m',
1: 'q2m',
7: 'pressfc',
5: 'dswsfc'
}
# Specify the min/max ranges on CDF/PDF values for each variable
val_range1 = {
2: -50.0,
3: -50.0,
6: 1.0,
4: 0.01,
0: 200.0,
1: 0.01,
7: 50000.0,
5: 0.0
}
val_range2 = {
2: 50.0,
3: 50.0,
6: 800.0,
4: 100.0,
0: 330.0,
1: 40.0,
7: 1100000.0,
5: 0.0
}
val_bins = {
2: 1000,
3: 1000,
6: 4000,
4: 2000,
0: 1300,
1: 1000,
7: 3000,
5: 0
}
if mpi_config.rank == 0:
config_options.statusMsg = "Running NLDAS-CFSv2 CDF/PDF bias correction on variable: " + \
input_forcings.netcdf_var_names[force_num]
err_handler.log_msg(config_options, mpi_config)
# Check to ensure we are running with CFSv2 here....
if input_forcings.productName != "CFSv2_6Hr_Global_GRIB2":
config_options.errMsg = "Attempting to run CFSv2-NLDAS bias correction on: " + input_forcings.productName
err_handler.log_critical(config_options, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
# Open the necessary parameter grids, which are on the global CFSv2 grid, then scatter them out
# to the various processors.
id_nldas_param = nldas_param_file = None
if mpi_config.rank == 0:
nldas_param_file = input_forcings.paramDir + "/NLDAS_Climo/nldas2_" + \
config_options.current_output_date.strftime('%m%d%H') + \
"_dist_params.nc"
if not os.path.isfile(nldas_param_file):
config_options.errMsg = "Unable to locate necessary bias correction parameter file: " + \
nldas_param_file
err_handler.log_critical(config_options, mpi_config)
# Open the NetCDF file.
try:
id_nldas_param = Dataset(nldas_param_file, 'r')
except OSError as err:
config_options.errMsg = "Unable to open parameter file: " + nldas_param_file + " (" + str(err) + ")"
err_handler.log_critical(config_options, mpi_config)
raise err
# Ensure dimensions/variables are as expected.
if 'lat_0' not in id_nldas_param.dimensions.keys():
config_options.errMsg = "Expected to find lat_0 dimension in: " + nldas_param_file
err_handler.log_critical(config_options, mpi_config)
if 'lon_0' not in id_nldas_param.dimensions.keys():
config_options.errMsg = "Expected to find lon_0 dimension in: " + nldas_param_file
err_handler.log_critical(config_options, mpi_config)
if id_nldas_param.dimensions['lat_0'].size != PARAM_NY:
config_options.errMsg = "Expected lat_0 size is {} - found size of: ".format(PARAM_NY) + \
str(id_nldas_param.dimensions['lat_0'].size) + " in: " + nldas_param_file
err_handler.log_critical(config_options, mpi_config)
if id_nldas_param.dimensions['lon_0'].size != PARAM_NX:
config_options.errMsg = "Expected lon_0 size is {} - found size of: ".format(PARAM_NX) + \
str(id_nldas_param.dimensions['lon_0'].size) + " in: " + nldas_param_file
err_handler.log_critical(config_options, mpi_config)
if nldas_param1_vars[force_num] not in id_nldas_param.variables.keys():
config_options.errMsg = "Expected variable: " + nldas_param1_vars[force_num] + " not found " + \
"in: " + nldas_param_file
err_handler.log_critical(config_options, mpi_config)
if nldas_param2_vars[force_num] not in id_nldas_param.variables.keys():
config_options.errMsg = "Expected variable: " + nldas_param2_vars[force_num] + " not found " + \
"in: " + nldas_param_file
err_handler.log_critical(config_options, mpi_config)
if force_num == 4:
if 'ZERO_PRECIP_PROB' not in id_nldas_param.variables.keys():
config_options.errMsg = "Expected variable: ZERO_PRECIP_PROB not found in: " + \
nldas_param_file
err_handler.log_critical(config_options, mpi_config)
nldas_param_1 = None
try:
nldas_param_1 = id_nldas_param.variables[nldas_param1_vars[force_num]][:, :]
except NumpyExceptions as npe:
config_options.errMsg = "Unable to extract: " + nldas_param1_vars[force_num] + \
" from: " + nldas_param_file + " (" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
nldas_param_2 = None
try:
nldas_param_2 = id_nldas_param.variables[nldas_param2_vars[force_num]][:, :]
except NumpyExceptions as npe:
config_options.errMsg = "Unable to extract: " + nldas_param2_vars[force_num] + \
" from: " + nldas_param_file + " (" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
if nldas_param_1.shape[0] != PARAM_NY or nldas_param_1.shape[1] != PARAM_NX:
config_options.errMsg = "Parameter variable: " + nldas_param1_vars[force_num] + " from: " + \
nldas_param_file + " not of shape [{},{}].".format(PARAM_NY, PARAM_NX)
err_handler.log_critical(config_options, mpi_config)
if nldas_param_2.shape[0] != PARAM_NY or nldas_param_2.shape[1] != PARAM_NX:
config_options.errMsg = "Parameter variable: " + nldas_param2_vars[force_num] + " from: " + \
nldas_param_file + " not of shape [{},{}].".format(PARAM_NY, PARAM_NX)
err_handler.log_critical(config_options, mpi_config)
# Extract the fill value
fill_tmp = None
try:
try:
fill_tmp = id_nldas_param.variables[nldas_param1_vars[force_num]].getncattr('_FillValue')
except AttributeError:
fill_tmp = default_fillvals[id_nldas_param.variables[nldas_param1_vars[force_num]].dtype.str[1:]]
except NumpyExceptions as npe:
config_options.errMsg = "Unable to extract _FillValue from: " + nldas_param_file + " (" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
# Read in the zero precip prob grids if we are bias correcting precipitation.
nldas_zero_pcp = None
if force_num == 4:
try:
nldas_zero_pcp = id_nldas_param.variables['ZERO_PRECIP_PROB'][:, :]
except NumpyExceptions as npe:
config_options.errMsg = "Unable to extract ZERO_PRECIP_PROB from: " + nldas_param_file + \
" (" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
if nldas_zero_pcp.shape[0] != PARAM_NY or nldas_zero_pcp.shape[1] != PARAM_NX:
config_options.errMsg = "Parameter variable: ZERO_PRECIP_PROB from: " + nldas_param_file + \
" not of shape [{},{}].".format(PARAM_NY, PARAM_NX)
err_handler.log_critical(config_options, mpi_config)
# Set missing values accordingly.
nldas_param_1[np.where(nldas_param_1 == fill_tmp)] = config_options.globalNdv
nldas_param_2[np.where(nldas_param_1 == fill_tmp)] = config_options.globalNdv
if force_num == 4:
nldas_zero_pcp[np.where(nldas_zero_pcp == fill_tmp)] = config_options.globalNdv
# Params are y-mirrored compared to the way WGRIB2 produces output
nldas_param_1 = np.flip(nldas_param_1, axis=0)
nldas_param_2 = np.flip(nldas_param_2, axis=0)
if nldas_zero_pcp is not None:
nldas_zero_pcp = np.flip(nldas_zero_pcp, axis=0)
else:
nldas_param_1 = None
nldas_param_2 = None
nldas_zero_pcp = None
err_handler.check_program_status(config_options, mpi_config)
# Scatter NLDAS parameters
nldas_param_1_sub = mpi_config.scatter_array(input_forcings, nldas_param_1, config_options)
err_handler.check_program_status(config_options, mpi_config)
nldas_param_2_sub = mpi_config.scatter_array(input_forcings, nldas_param_2, config_options)
err_handler.check_program_status(config_options, mpi_config)
if force_num == 4:
nldas_zero_pcp_sub = mpi_config.scatter_array(input_forcings, nldas_zero_pcp, config_options)
err_handler.check_program_status(config_options, mpi_config)
else:
nldas_zero_pcp_sub = None
id_cfs_param1 = id_cfs_param2 = None
cfs_param_path1 = cfs_param_path2 = None
if mpi_config.rank == 0:
# Read in the CFSv2 parameter files, based on the previous CFSv2 dates
cfs_param_path1 = (input_forcings.paramDir + "/CFSv2_Climo/cfs_" +
cfs_param_path_vars[force_num] + "_" +
input_forcings.fcst_date1.strftime('%m%d') + "_" +
input_forcings.fcst_date1.strftime('%H') + '_dist_params.nc')
cfs_param_path2 = (input_forcings.paramDir + "/CFSv2_Climo/cfs_" +
cfs_param_path_vars[force_num] + "_" +
input_forcings.fcst_date2.strftime('%m%d') + "_" +
input_forcings.fcst_date2.strftime('%H') + '_dist_params.nc')
if not os.path.isfile(cfs_param_path1):
config_options.errMsg = "Unable to locate necessary parameter file: " + cfs_param_path1
err_handler.log_critical(config_options, mpi_config)
if not os.path.isfile(cfs_param_path2):
config_options.errMsg = "Unable to locate necessary parameter file: " + cfs_param_path2
err_handler.log_critical(config_options, mpi_config)
# Open the files and ensure they contain the correct information.
try:
id_cfs_param1 = Dataset(cfs_param_path1, 'r')
except OSError as err:
config_options.errMsg = "Unable to open parameter file: " + cfs_param_path1 + " (" + str(err) + ")"
err_handler.log_critical(config_options, mpi_config)
try:
id_cfs_param2 = Dataset(cfs_param_path2, 'r')
except OSError as err:
config_options.errMsg = "Unable to open parameter file: " + cfs_param_path2 + " (" + str(err) + ")"
err_handler.log_critical(config_options, mpi_config)
config_options.statusMsg = "Checking CFS parameter files."
err_handler.log_msg(config_options, mpi_config)
if 'DISTRIBUTION_PARAM_1' not in id_cfs_param1.variables.keys():
config_options.errMsg = "Expected DISTRIBUTION_PARAM_1 variable not found in: " + cfs_param_path1
err_handler.log_critical(config_options, mpi_config)
if 'DISTRIBUTION_PARAM_2' not in id_cfs_param1.variables.keys():
config_options.errMsg = "Expected DISTRIBUTION_PARAM_1 variable not found in: " + cfs_param_path1
err_handler.log_critical(config_options, mpi_config)
if 'DISTRIBUTION_PARAM_1' not in id_cfs_param2.variables.keys():
config_options.errMsg = "Expected DISTRIBUTION_PARAM_1 variable not found in: " + cfs_param_path2
err_handler.log_critical(config_options, mpi_config)
if 'DISTRIBUTION_PARAM_2' not in id_cfs_param2.variables.keys():
config_options.errMsg = "Expected DISTRIBUTION_PARAM_1 variable not found in: " + cfs_param_path2
err_handler.log_critical(config_options, mpi_config)
param_1 = param_2 = None
try:
param_1 = id_cfs_param2.variables['DISTRIBUTION_PARAM_1'][:, :]
except NumpyExceptions as npe:
config_options.errMsg = "Unable to extract DISTRIBUTION_PARAM_1 from: " + cfs_param_path2 + \
" (" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
try:
param_2 = id_cfs_param2.variables['DISTRIBUTION_PARAM_2'][:, :]
except NumpyExceptions as npe:
config_options.errMsg = "Unable to extract DISTRIBUTION_PARAM_2 from: " + cfs_param_path2 + \
" (" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
# try:
# lat_0 = id_cfs_param2.variables['lat_0'][:]
# except NumpyExceptions as npe:
# config_options.errMsg = "Unable to extract lat_0 from: " + cfs_param_path2 + " (" + str(npe) + ")"
# err_handler.log_critical(config_options, mpi_config)
# try:
# lon_0 = id_cfs_param2.variables['lon_0'][:]
# except NumpyExceptions as npe:
# config_options.errMsg = "Unable to extract lon_0 from: " + cfs_param_path2 + " (" + str(npe) + ")"
# err_handler.log_critical(config_options, mpi_config)
prev_param_1 = prev_param_2 = None
try:
prev_param_1 = id_cfs_param1.variables['DISTRIBUTION_PARAM_1'][:, :]
except NumpyExceptions as npe:
config_options.errMsg = "Unable to extract DISTRIBUTION_PARAM_1 from: " + cfs_param_path1 + \
" (" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
try:
prev_param_2 = id_cfs_param1.variables['DISTRIBUTION_PARAM_2'][:, :]
except NumpyExceptions as npe:
config_options.errMsg = "Unable to extract DISTRIBUTION_PARAM_2 from: " + cfs_param_path1 + \
" (" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
if param_1.shape[0] != PARAM_NY and param_1.shape[1] != PARAM_NX:
config_options.errMsg = "Unexpected DISTRIBUTION_PARAM_1 found in: " + cfs_param_path2
err_handler.log_critical(config_options, mpi_config)
if param_2.shape[0] != PARAM_NY and param_2.shape[1] != PARAM_NX:
config_options.errMsg = "Unexpected DISTRIBUTION_PARAM_2 found in: " + cfs_param_path2
err_handler.log_critical(config_options, mpi_config)
if prev_param_1.shape[0] != PARAM_NY and prev_param_1.shape[1] != PARAM_NX:
config_options.errMsg = "Unexpected DISTRIBUTION_PARAM_1 found in: " + cfs_param_path1
err_handler.log_critical(config_options, mpi_config)
if prev_param_2.shape[0] != PARAM_NY and prev_param_2.shape[1] != PARAM_NX:
config_options.errMsg = "Unexpected DISTRIBUTION_PARAM_2 found in: " + cfs_param_path1
err_handler.log_critical(config_options, mpi_config)
config_options.statusMsg = "Reading in zero precip probs."
err_handler.log_msg(config_options, mpi_config)
# Read in the zero precip prob grids if we are bias correcting precipitation.
zero_pcp = prev_zero_pcp = None
if force_num == 4:
try:
zero_pcp = id_cfs_param2.variables['ZERO_PRECIP_PROB'][:, :]
except NumpyExceptions as npe:
config_options.errMsg = "Unable to locate ZERO_PRECIP_PROB in: " + cfs_param_path2 + \
" (" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
try:
prev_zero_pcp = id_cfs_param2.variables['ZERO_PRECIP_PROB'][:, :]
except NumpyExceptions as npe:
config_options.errMsg = "Unable to locate ZERO_PRECIP_PROB in: " + cfs_param_path1 + \
" (" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
if zero_pcp.shape[0] != PARAM_NY and zero_pcp.shape[1] != PARAM_NX:
config_options.errMsg = "Unexpected ZERO_PRECIP_PROB found in: " + cfs_param_path2
err_handler.log_critical(config_options, mpi_config)
if prev_zero_pcp.shape[0] != PARAM_NY and prev_zero_pcp.shape[1] != PARAM_NX:
config_options.errMsg = "Unexpected ZERO_PRECIP_PROB found in: " + cfs_param_path1
err_handler.log_critical(config_options, mpi_config)
# Reset any missing values. Because the fill values for these files are all over the map, we
# will just do a gross check here. For the most part, there shouldn't be missing values.
param_1[np.where(param_1 > 500000.0)] = config_options.globalNdv
param_2[np.where(param_2 > 500000.0)] = config_options.globalNdv
prev_param_1[np.where(prev_param_1 > 500000.0)] = config_options.globalNdv
prev_param_2[np.where(prev_param_2 > 500000.0)] = config_options.globalNdv
if force_num == 4:
zero_pcp[np.where(zero_pcp > 500000.0)] = config_options.globalNdv
prev_zero_pcp[np.where(prev_zero_pcp > 500000.0)] = config_options.globalNdv
# Params are y-mirrored compared to the way WGRIB2 produces output
param_1 = np.flip(param_1, axis=0)
param_2 = np.flip(param_2, axis=0)
prev_param_1 = np.flip(prev_param_1, axis=0)
prev_param_2 = np.flip(prev_param_2, axis=0)
if force_num == 4:
zero_pcp = np.flip(zero_pcp, axis=0)
prev_zero_pcp = np.flip(prev_zero_pcp, axis=0)
else:
param_1 = None
param_2 = None
prev_param_1 = None
prev_param_2 = None
zero_pcp = None
prev_zero_pcp = None
err_handler.check_program_status(config_options, mpi_config)
if mpi_config.rank == 0:
config_options.statusMsg = "Scattering CFS parameter grids"
err_handler.log_msg(config_options, mpi_config)
# Scatter CFS parameters
cfs_param_1_sub = mpi_config.scatter_array(input_forcings, param_1, config_options)
err_handler.check_program_status(config_options, mpi_config)
cfs_param_2_sub = mpi_config.scatter_array(input_forcings, param_2, config_options)
err_handler.check_program_status(config_options, mpi_config)
cfs_prev_param_1_sub = mpi_config.scatter_array(input_forcings, prev_param_1, config_options)
err_handler.check_program_status(config_options, mpi_config)
cfs_prev_param_2_sub = mpi_config.scatter_array(input_forcings, prev_param_2, config_options)
err_handler.check_program_status(config_options, mpi_config)
if force_num == 4:
cfs_zero_pcp_sub = mpi_config.scatter_array(input_forcings, zero_pcp, config_options)
err_handler.check_program_status(config_options, mpi_config)
cfs_prev_zero_pcp_sub = mpi_config.scatter_array(input_forcings, prev_zero_pcp, config_options)
err_handler.check_program_status(config_options, mpi_config)
else:
cfs_prev_zero_pcp_sub = None
cfs_zero_pcp_sub = None
if mpi_config.rank == 0:
config_options.statusMsg = "Closing CFS bias correction parameter files."
err_handler.log_msg(config_options, mpi_config)
# Close the parameter files.
try:
id_nldas_param.close()
except OSError as err:
config_options.errMsg = "Unable to close parameter file: " + nldas_param_file + " (" + str(err) + ")"
err_handler.log_critical(config_options, mpi_config)
try:
id_cfs_param1.close()
except OSError as err:
config_options.errMsg = "Unable to close parameter file: " + cfs_param_path1 + " (" + str(err) + ")"
err_handler.log_critical(config_options, mpi_config)
try:
id_cfs_param2.close()
except OSError as err:
config_options.errMsg = "Unable to close parameter file: " + cfs_param_path2 + " (" + str(err) + ")"
err_handler.log_critical(config_options, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
# Now.... Loop through the local CFSv2 grid cells and perform the following steps:
# 1.) Interpolate the six-hour values to the current output timestep.
# 2.) Calculate the CFSv2 cdf/pdf
# 3.) Calculate the NLDAS cdf/pdf
# 4.) Adjust CFSv2 values based on the method of pdf matching.
# 5.) Regrid the CFSv2 values to the WRF-Hydro domain using the pre-calculated ESMF
# regridding object.
# 6.) Place the data into the final output arrays for further processing (downscaling).
# 7.) Reset variables for memory efficiency and exit the routine.
if mpi_config.rank == 0:
config_options.statusMsg = "Creating local CFS CDF arrays."
err_handler.log_msg(config_options, mpi_config)
# Establish local arrays of data.
cfs_data = np.empty([input_forcings.ny_local, input_forcings.nx_local], np.float64)
# Establish parameters of the CDF matching.
vals = np.linspace(val_range1[force_num], val_range2[force_num], val_bins[force_num])
if mpi_config.rank == 0:
config_options.statusMsg = "Looping over local arrays to calculate bias corrections."
err_handler.log_msg(config_options, mpi_config)
# Process each of the pixel cells for this local processor on the CFS grid.
for x_local in range(0, input_forcings.nx_local):
for y_local in range(0, input_forcings.ny_local):
cfs_prev_tmp = input_forcings.coarse_input_forcings1[input_forcings.input_map_output[force_num],
y_local, x_local]
cfs_next_tmp = input_forcings.coarse_input_forcings2[input_forcings.input_map_output[force_num],
y_local, x_local]
# Check for any missing parameter values. If any missing values exist,
# set this flag to False. Further down, if it's False, we will simply
# set the local CFS adjusted value to the interpolated value.
correct_flag = True
if cfs_param_1_sub[y_local, x_local] == config_options.globalNdv:
correct_flag = False
if cfs_param_2_sub[y_local, x_local] == config_options.globalNdv:
correct_flag = False
if cfs_prev_param_1_sub[y_local, x_local] == config_options.globalNdv:
correct_flag = False
if cfs_prev_param_2_sub[y_local, x_local] == config_options.globalNdv:
correct_flag = False
if nldas_param_1_sub[y_local, x_local] == config_options.globalNdv:
correct_flag = False
if nldas_param_2_sub[y_local, x_local] == config_options.globalNdv:
correct_flag = False
if force_num == 4:
if cfs_prev_zero_pcp_sub[y_local, x_local] == config_options.globalNdv:
correct_flag = False
if cfs_zero_pcp_sub[y_local, x_local] == config_options.globalNdv:
correct_flag = False
if nldas_zero_pcp_sub[y_local, x_local] == config_options.globalNdv:
correct_flag = False
# Interpolate the two CFS values (and parameters) in time.
dt_from_previous = config_options.current_output_date - input_forcings.fcst_date1
hr_from_previous = dt_from_previous.total_seconds() / 3600.0
interp_factor1 = float(1 - (hr_from_previous / 6.0))
interp_factor2 = float(hr_from_previous / 6.0)
# Since this is only for CFSv2 6-hour data, we will assume 6-hour intervals.
# This is already checked at the beginning of this routine for the product name.
cfs_param_1_interp = (cfs_prev_param_1_sub[y_local, x_local] * interp_factor1 +
cfs_param_1_sub[y_local, x_local] * interp_factor2)
cfs_param_2_interp = (cfs_prev_param_2_sub[y_local, x_local] * interp_factor1 +
cfs_param_2_sub[y_local, x_local] * interp_factor2)
cfs_interp_fcst = cfs_prev_tmp * interp_factor1 + cfs_next_tmp * interp_factor2
nldas_nearest_1 = nldas_param_1_sub[y_local, x_local]
nldas_nearest_2 = nldas_param_2_sub[y_local, x_local]
if correct_flag:
if force_num != 4 and force_num != 5 and force_num != 1:
# Not incoming shortwave or precip or specific humidity
pts = (vals - cfs_param_1_interp) / cfs_param_2_interp
spacing = (vals[2] - vals[1]) / cfs_param_2_interp
cfs_pdf = (np.exp(-0.5 * (np.power(pts, 2))) / math.sqrt(2 * 3.141592)) * spacing
cfs_cdf = np.cumsum(cfs_pdf)
pts = (vals - nldas_nearest_1) / nldas_nearest_2
spacing = (vals[2] - vals[1]) / nldas_nearest_2
nldas_pdf = (np.exp(-0.5 * (np.power(pts, 2))) / math.sqrt(2 * 3.141592)) * spacing
nldas_cdf = np.cumsum(nldas_pdf)
# compute adjusted value now using the CFSv2 forecast value and the two CDFs
# find index in vals array
diff_tmp = np.absolute(vals - cfs_interp_fcst)
cfs_ind = np.where(diff_tmp == diff_tmp.min())[0][0]
cfs_cdf_val = cfs_cdf[cfs_ind]
# now whats the index of the closest cdf value in the nldas array?
diff_tmp = np.absolute(cfs_cdf_val - nldas_cdf)
cfs_nldas_ind = np.where(diff_tmp == diff_tmp.min())[0][0]
# Adjust the CFS data
cfs_data[y_local, x_local] = vals[cfs_nldas_ind]
if force_num == 5:
# Incoming shortwave radiation flux.
# find nearest nldas grid point and then calculate nldas cdf
nldas_nearest_1 = nldas_param_1_sub[y_local, x_local]
if cfs_interp_fcst > 2.0 and cfs_param_1_interp > 2.0:
factor = nldas_nearest_1 / cfs_param_1_interp
cfs_data[y_local, x_local] = cfs_interp_fcst * factor
else:
cfs_data[y_local, x_local] = 0.0
if force_num == 1:
# Specific humidity
# spacing = vals[2] - vals[1]
cfs_interp_fcst = cfs_interp_fcst * 1000.0 # units are now g/kg
cfs_cdf = 1 - np.exp(-(np.power((vals / cfs_param_1_interp), cfs_param_2_interp)))
nldas_cdf = 1 - np.exp(-(np.power((vals / nldas_nearest_1), nldas_nearest_2)))
# compute adjusted value now using the CFSv2 forecast value and the two CDFs
# find index in vals array
diff_tmp = np.absolute(vals - cfs_interp_fcst)
cfs_ind = np.argmin(diff_tmp)
cfs_cdf_val = cfs_cdf[cfs_ind]
# now whats the index of the closest cdf value in the nldas array?
diff_tmp = np.absolute(cfs_cdf_val - nldas_cdf)
cfs_nldas_ind = np.argmin(diff_tmp)
# Adjust the CFS data
cfs_data[y_local, x_local] = vals[cfs_nldas_ind] / 1000.0 # convert back to kg/kg
if cfs_data[y_local, x_local] == 0:
config_options.statusMsg = "Invalid Q2D bias correction parameter; using original value"
err_handler.log_msg(config_options, mpi_config)
cfs_data[y_local, x_local] = cfs_interp_fcst
if force_num == 4:
# Precipitation
# precipitation is estimated using a Weibull distribution
# valid values range from 3e-6 mm/s (0.01 mm/hr) up to 100 mm/hr
# spacing = vals[2] - vals[1]
cfs_zero_pcp_interp = cfs_prev_zero_pcp_sub[y_local, x_local] * interp_factor1 + \
cfs_zero_pcp_sub[y_local, x_local] * interp_factor2
cfs_cdf = 1 - np.exp(-(np.power((vals / cfs_param_1_interp), cfs_param_2_interp)))
# cfs_cdf_scaled = ((1 - cfs_zero_pcp_interp) + cfs_cdf) / \
# (cfs_cdf.max() + (1 - cfs_zero_pcp_interp))
nldas_nearest_zero_pcp = nldas_zero_pcp_sub[y_local, x_local]
if nldas_nearest_2 == 0.0:
# if second Weibull parameter is zero, the
# distribution has no width, no precipitation outside first bin
nldas_cdf = np.empty([2000], np.float64)
nldas_cdf[:] = 1.0
nldas_nearest_zero_pcp = 1.0
else:
# valid point, see if we need to adjust cfsv2 precip
nldas_cdf = 1 - np.exp(-(np.power((vals / nldas_nearest_1), nldas_nearest_2)))
if cfs_interp_fcst == 0.0 or nldas_nearest_zero_pcp == 1.0:
# if no rain in cfsv2, no rain in bias corrected field
cfs_data[y_local, x_local] = 0.0
else:
# else there is rain in cfs forecast, so adjust it in some manner
# compute adjusted value now using the CFSv2 forecast value and the two CDFs
# find index in vals array
try:
diff_tmp = np.absolute(vals - (cfs_interp_fcst * 3600.0))
cfs_ind = np.where(diff_tmp == diff_tmp.min())[0][0]
cfs_cdf_val = cfs_cdf[cfs_ind]
# now whats the index of the closest cdf value in the nldas array?
diff_tmp = np.absolute(cfs_cdf_val - nldas_cdf)
cfs_nldas_ind = np.where(diff_tmp == diff_tmp.min())[0][0]
except IndexError:
# something's wrong with the parameters, so log it and keep on keeping on...
config_options.statusMsg = "Invalid input data for bias correction, continuing..."
err_handler.log_msg(config_options, mpi_config)
continue
pcp_pop_diff = nldas_nearest_zero_pcp - cfs_zero_pcp_interp
if cfs_zero_pcp_interp <= nldas_nearest_zero_pcp:
# if cfsv2 zero precip probability is less than nldas,
# then do one adjustment
if cfs_cdf_val <= pcp_pop_diff:
# if cfsv2 precip cdf is still less than pop
# difference, set precip to zero
cfs_data[y_local, x_local] = 0.0
else:
# cfsv2 precip cdf > nldas zero precip probability,
# so adjust cfsv2 to nldas2 precip
cfs_data[y_local, x_local] = vals[cfs_nldas_ind] / 3600.0 # convert back to mm/s
# check for unreasonable corrections of cfs rainfall
# ad-hoc setting that cfsv2 precipitation should not be corrected by more than 3x
# if it is, this indicated nldas2 distribution is unrealistic
# and default back to cfsv2 forecast value
if (cfs_data[y_local, x_local] / cfs_interp_fcst) >= 3.0:
cfs_data[y_local, x_local] = cfs_interp_fcst
else:
if cfs_cdf_val <= abs(pcp_pop_diff):
# if cfsv2 cdf value less than pop difference, need to randomly
# generate precip, since we're in the zero portion of the nldas
# zero precip prob still
randn = random.uniform(0.0, abs(pcp_pop_diff))
diff_tmp = np.absolute(randn - nldas_cdf)
new_nldas_ind = np.where(diff_tmp == diff_tmp.min())[0][0]
cfs_data[y_local, x_local] = vals[new_nldas_ind] / 3600.0
# ad-hoc setting that cfsv2 precipitation should not be corrected by more than 3x
# if it is, this indicated nldas2 distribution is unrealistic
# and default back to cfsv2 forecast value
if (cfs_data[y_local, x_local] / cfs_interp_fcst) >= 3.0:
cfs_data[y_local, x_local] = cfs_interp_fcst
else:
cfs_data[y_local, x_local] = vals[cfs_nldas_ind] / 3600.0 # convert back to mm/s
# ad-hoc setting that cfsv2 precipitation should not be corrected by more than 3x
# if it is, this indicated nldas2 distribution is unrealistic
# and default back to cfsv2 forecast value
if (cfs_data[y_local, x_local] / cfs_interp_fcst) >= 3.0:
cfs_data[y_local, x_local] = cfs_interp_fcst
else:
# No adjustment for this CFS pixel cell as we have missing parameter values.
cfs_data[y_local, x_local] = cfs_interp_fcst
# Regrid the local CFS slab to the output array
try:
input_forcings.esmf_field_in.data[...] = cfs_data
except NumpyExceptions as npe:
config_options.errMsg = "Unable to place CFSv2 forcing data into temporary ESMF field: " + str(npe)
err_handler.log_critical(config_options, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
try:
input_forcings.esmf_field_out = input_forcings.regridObj(input_forcings.esmf_field_in,
input_forcings.esmf_field_out)
except ValueError as ve:
config_options.errMsg = "Unable to regrid CFSv2 variable: " + input_forcings.netcdf_var_names[force_num] + \
" (" + str(ve) + ")"
err_handler.log_critical(config_options, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
# Set any pixel cells outside the input domain to the global missing value.
try:
input_forcings.esmf_field_out.data[np.where(input_forcings.regridded_mask == 0)] = \
config_options.globalNdv
except NumpyExceptions as npe:
config_options.errMsg = "Unable to run mask calculation on CFSv2 variable: " + \
input_forcings.netcdf_var_names[force_num] + " (" + str(npe) + ")"
err_handler.log_critical(config_options, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
try:
input_forcings.final_forcings[input_forcings.input_map_output[force_num], :, :] = \
input_forcings.esmf_field_out.data
except NumpyExceptions as npe:
config_options.errMsg = "Unable to extract ESMF field data for CFSv2: " + str(npe)
err_handler.log_critical(config_options, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
```
#### File: WrfHydroForcing/core/downscale.py
```python
import math
import os
import time
import numpy as np
from netCDF4 import Dataset
from core import err_handler
def run_downscaling(input_forcings, config_options, geo_meta_wrf_hydro, mpi_config):
"""
Top level module function that will downscale forcing variables
for this particular input forcing product.
:param geo_meta_wrf_hydro:
:param mpi_config:
:param input_forcings:
:param config_options:
:return:
"""
# Dictionary mapping to temperature downscaling.
downscale_temperature = {
0: no_downscale,
1: simple_lapse,
2: param_lapse
}
downscale_temperature[input_forcings.t2dDownscaleOpt](input_forcings, config_options,
geo_meta_wrf_hydro, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
# Dictionary mapping to pressure downscaling.
downscale_pressure = {
0: no_downscale,
1: pressure_down_classic
}
downscale_pressure[input_forcings.psfcDownscaleOpt](input_forcings, config_options,
geo_meta_wrf_hydro, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
# Dictionary mapping to shortwave radiation downscaling
downscale_sw = {
0: no_downscale,
1: ncar_topo_adj
}
downscale_sw[input_forcings.swDowscaleOpt](input_forcings, config_options, geo_meta_wrf_hydro, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
# Dictionary mapping to specific humidity downscaling
downscale_q2 = {
0: no_downscale,
1: q2_down_classic
}
downscale_q2[input_forcings.q2dDownscaleOpt](input_forcings, config_options, geo_meta_wrf_hydro, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
# Dictionary mapping to precipitation downscaling.
downscale_precip = {
0: no_downscale,
1: nwm_monthly_PRISM_downscale
#1: precip_mtn_mapper
}
downscale_precip[input_forcings.precipDownscaleOpt](input_forcings, config_options, geo_meta_wrf_hydro, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
def no_downscale(input_forcings, ConfigOptions, GeoMetaWrfHydro, MpiConfig):
"""
Generic function for passing states through without any
downscaling.
:param input_forcings:
:param ConfigOptions:
:return:
"""
input_forcings.final_forcings = input_forcings.final_forcings
def simple_lapse(input_forcings,ConfigOptions,GeoMetaWrfHydro,MpiConfig):
"""
Function that applies a single lapse rate adjustment to modeled
2-meter temperature by taking the difference of the native
input elevation and the WRF-hydro elevation.
:param inpute_forcings:
:param ConfigOptions:
:param GeoMetaWrfHydro:
:return:
"""
if MpiConfig.rank == 0:
ConfigOptions.statusMsg = "Applying simple lapse rate to temperature downscaling"
err_handler.log_msg(ConfigOptions, MpiConfig)
# Calculate the elevation difference.
elevDiff = input_forcings.height - GeoMetaWrfHydro.height
# Assign existing, un-downscaled temperatures to a temporary placeholder, which
# will be used for specific humidity downscaling.
if input_forcings.q2dDownscaleOpt > 0:
input_forcings.t2dTmp[:,:] = input_forcings.final_forcings[4,:,:]
# Apply single lapse rate value to the input 2-meter
# temperature values.
try:
indNdv = np.where(input_forcings.final_forcings == ConfigOptions.globalNdv)
except:
ConfigOptions.errMsg = "Unable to perform NDV search on input forcings"
err_handler.log_critical(ConfigOptions, MpiConfig)
return
try:
input_forcings.final_forcings[4,:,:] = input_forcings.final_forcings[4,:,:] + \
(6.49/1000.0)*elevDiff
except:
ConfigOptions.errMsg = "Unable to apply lapse rate to input 2-meter temperatures."
err_handler.log_critical(ConfigOptions, MpiConfig)
return
input_forcings.final_forcings[indNdv] = ConfigOptions.globalNdv
# Reset for memory efficiency
indNdv = None
def param_lapse(input_forcings,ConfigOptions,GeoMetaWrfHydro,MpiConfig):
"""
Function that applies a apriori lapse rate adjustment to modeled
2-meter temperature by taking the difference of the native
input elevation and the WRF-hydro elevation. It's assumed this lapse
rate grid has already been regridded to the final output WRF-Hydro
grid.
:param inpute_forcings:
:param ConfigOptions:
:param GeoMetaWrfHydro:
:return:
"""
if MpiConfig.rank == 0:
ConfigOptions.statusMsg = "Applying aprior lapse rate grid to temperature downscaling"
err_handler.log_msg(ConfigOptions, MpiConfig)
# Calculate the elevation difference.
elevDiff = input_forcings.height - GeoMetaWrfHydro.height
if input_forcings.lapseGrid is None:
#if not np.any(input_forcings.lapseGrid):
# We have not read in our lapse rate file. Read it in, do extensive checks,
# scatter the lapse rate grid out to individual processors, then apply the
# lapse rate to the 2-meter temperature grid.
if MpiConfig.rank == 0:
while (True):
# First ensure we have a parameter directory
if input_forcings.paramDir == "NONE":
ConfigOptions.errMsg = "User has specified spatial temperature lapse rate " \
"downscaling while no downscaling parameter directory " \
"exists."
err_handler.log_critical(ConfigOptions, MpiConfig)
break
# Compose the path to the lapse rate grid file.
lapsePath = input_forcings.paramDir + "/lapse_param.nc"
if not os.path.isfile(lapsePath):
ConfigOptions.errMsg = "Expected lapse rate parameter file: " + \
lapsePath + " does not exist."
err_handler.log_critical(ConfigOptions, MpiConfig)
break
# Open the lapse rate file. Check for the expected variable, along with
# the dimension size to make sure everything matches up.
try:
idTmp = Dataset(lapsePath,'r')
except:
ConfigOptions.errMsg = "Unable to open parameter file: " + lapsePath
err_handler.log_critical(ConfigOptions, MpiConfig)
break
if not 'lapse' in idTmp.variables.keys():
ConfigOptions.errMsg = "Expected 'lapse' variable not located in parameter " \
"file: " + lapsePath
err_handler.log_critical(ConfigOptions, MpiConfig)
break
try:
lapseTmp = idTmp.variables['lapse'][:,:]
except:
ConfigOptions.errMsg = "Unable to extracte 'lapse' variable from parameter: " \
"file: " + lapsePath
err_handler.log_critical(ConfigOptions, MpiConfig)
break
# Check dimensions to ensure they match up to the output grid.
if lapseTmp.shape[1] != GeoMetaWrfHydro.nx_global:
ConfigOptions.errMsg = "X-Dimension size mismatch between output grid and lapse " \
"rate from parameter file: " + lapsePath
err_handler.log_critical(ConfigOptions, MpiConfig)
break
if lapseTmp.shape[0] != GeoMetaWrfHydro.ny_global:
ConfigOptions.errMsg = "Y-Dimension size mismatch between output grid and lapse " \
"rate from parameter file: " + lapsePath
err_handler.log_critical(ConfigOptions, MpiConfig)
break
# Perform a quick search to ensure we don't have radical values.
indTmp = np.where(lapseTmp < -10.0)
if len(indTmp[0]) > 0:
ConfigOptions.errMsg = "Found anomolous negative values in the lapse rate grid from " \
"parameter file: " + lapsePath
err_handler.log_critical(ConfigOptions, MpiConfig)
break
indTmp = np.where(lapseTmp > 100.0)
if len(indTmp[0]) > 0:
ConfigOptions.errMsg = "Found excessively high values in the lapse rate grid from " \
"parameter file: " + lapsePath
err_handler.log_critical(ConfigOptions, MpiConfig)
break
# Close the parameter lapse rate file.
try:
idTmp.close()
except:
ConfigOptions.errMsg = "Unable to close parameter file: " + lapsePath
err_handler.log_critical(ConfigOptions, MpiConfig)
break
break
else:
lapseTmp = None
err_handler.check_program_status(ConfigOptions, MpiConfig)
# Scatter the lapse rate grid to the other processors.
input_forcings.lapseGrid = MpiConfig.scatter_array(GeoMetaWrfHydro,lapseTmp,ConfigOptions)
err_handler.check_program_status(ConfigOptions, MpiConfig)
# Apply the local lapse rate grid to our local slab of 2-meter temperature data.
temperature_grid_tmp = input_forcings.final_forcings[4, :, :]
try:
indNdv = np.where(input_forcings.final_forcings == ConfigOptions.globalNdv)
except:
ConfigOptions.errMsg = "Unable to perform NDV search on input " + \
input_forcings.productName + " regridded forcings."
err_handler.log_critical(ConfigOptions, MpiConfig)
return
try:
indValid = np.where(temperature_grid_tmp != ConfigOptions.globalNdv)
except:
ConfigOptions.errMsg = "Unable to perform search for valid values on input " + \
input_forcings.productName + " regridded temperature forcings."
err_handler.log_critical(ConfigOptions, MpiConfig)
return
try:
temperature_grid_tmp[indValid] = temperature_grid_tmp[indValid] + \
((input_forcings.lapseGrid[indValid]/1000.0) * elevDiff[indValid])
except:
ConfigOptions.errMsg = "Unable to apply spatial lapse rate values to input " + \
input_forcings.productName + " regridded temperature forcings."
err_handler.log_critical(ConfigOptions, MpiConfig)
return
input_forcings.final_forcings[4,:,:] = temperature_grid_tmp
input_forcings.final_forcings[indNdv] = ConfigOptions.globalNdv
# Reset for memory efficiency
indTmp = None
indNdv = None
indValid = None
elevDiff = None
temperature_grid_tmp = None
def pressure_down_classic(input_forcings,ConfigOptions,GeoMetaWrfHydro,MpiConfig):
"""
Generic function to downscale surface pressure to the WRF-Hydro domain.
:param input_forcings:
:param ConfigOptions:
:param GeoMetaWrfHydro:
:return:
"""
if MpiConfig.rank == 0:
ConfigOptions.statusMsg = "Performing topographic adjustment to surface pressure."
err_handler.log_msg(ConfigOptions, MpiConfig)
# Calculate the elevation difference.
elevDiff = input_forcings.height - GeoMetaWrfHydro.height
# Assign existing, un-downscaled pressure values to a temporary placeholder, which
# will be used for specific humidity downscaling.
if input_forcings.q2dDownscaleOpt > 0:
input_forcings.psfcTmp[:, :] = input_forcings.final_forcings[6, :, :]
try:
indNdv = np.where(input_forcings.final_forcings == ConfigOptions.globalNdv)
except:
ConfigOptions.errMsg = "Unable to perform NDV search on input forcings"
err_handler.log_critical(ConfigOptions, MpiConfig)
return
try:
input_forcings.final_forcings[6,:,:] = input_forcings.final_forcings[6,:,:] +\
(input_forcings.final_forcings[6,:,:]*elevDiff*9.8)/\
(input_forcings.final_forcings[4,:,:]*287.05)
except:
ConfigOptions.errMsg = "Unable to downscale surface pressure to input forcings."
err_handler.log_critical(ConfigOptions, MpiConfig)
return
input_forcings.final_forcings[indNdv] = ConfigOptions.globalNdv
# Reset for memory efficiency
indNdv = None
def q2_down_classic(input_forcings,ConfigOptions,GeoMetaWrfHydro,MpiConfig):
"""
NCAR function for downscaling 2-meter specific humidity using already downscaled
2-meter temperature, unadjusted surface pressure, and downscaled surface
pressure.
:param input_forcings:
:param ConfigOptions:
:param GeoMetaWrfHydro:
:return:
"""
if MpiConfig.rank == 0:
ConfigOptions.statusMsg = "Performing topographic adjustment to specific humidity."
err_handler.log_msg(ConfigOptions, MpiConfig)
# Establish where we have missing values.
try:
indNdv = np.where(input_forcings.final_forcings == ConfigOptions.globalNdv)
except:
ConfigOptions.errMsg = "Unable to perform NDV search on input forcings"
err_handler.log_critical(ConfigOptions, MpiConfig)
return
# First calculate relative humidity given original surface pressure and 2-meter
# temperature
try:
relHum = rel_hum(input_forcings,ConfigOptions)
except:
ConfigOptions.errMsg = "Unable to perform topographic downscaling of incoming " \
"specific humidity to relative humidity"
err_handler.log_critical(ConfigOptions, MpiConfig)
return
# Downscale 2-meter specific humidity
try:
q2Tmp = mixhum_ptrh(input_forcings,relHum,2,ConfigOptions)
except:
ConfigOptions.errMsg = "Unable to perform topographic downscaling of " \
"incoming specific humidity"
err_handler.log_critical(ConfigOptions, MpiConfig)
return
input_forcings.final_forcings[5,:,:] = q2Tmp
input_forcings.final_forcings[indNdv] = ConfigOptions.globalNdv
q2Tmp = None
indNdv = None
def nwm_monthly_PRISM_downscale(input_forcings,ConfigOptions,GeoMetaWrfHydro,MpiConfig):
"""
NCAR/OWP function for downscaling precipitation using monthly PRISM climatology in a
mountain-mapper like fashion.
:param input_forcings:
:param ConfigOptions:
:param GeoMetaWrfHydro:
:return:
"""
if MpiConfig.rank == 0:
ConfigOptions.statusMsg = "Performing NWM Monthly PRISM Mountain Mapper " \
"Downscaling of Precipitation"
err_handler.log_msg(ConfigOptions, MpiConfig)
# Establish whether or not we need to read in new PRISM monthly climatology:
# 1.) This is the first output timestep, and no grids have been initialized.
# 2.) We have switched months from the last timestep. In this case, we need
# to re-initialize the grids for the current month.
initialize_flag = False
if input_forcings.nwmPRISM_denGrid is None and input_forcings.nwmPRISM_numGrid is None:
# We are on situation 1 - This is the first output step.
initialize_flag = True
# print('WE NEED TO READ IN PRISM GRIDS')
if ConfigOptions.current_output_date.month != ConfigOptions.prev_output_date.month:
# We are on situation #2 - The month has changed so we need to reinitialize the
# PRISM grids.
initialize_flag = True
# print('MONTH CHANGE.... NEED TO READ IN NEW PRISM GRIDS.')
if initialize_flag is True:
while (True):
# First reset the local PRISM grids to be safe.
input_forcings.nwmPRISM_numGrid = None
input_forcings.nwmPRISM_denGrid = None
# Compose paths to the expected files.
numeratorPath = input_forcings.paramDir + "/PRISM_Precip_Clim_" + \
ConfigOptions.current_output_date.strftime('%h') + '_NWM_Mtn_Mapper_Numer.nc'
denominatorPath = input_forcings.paramDir + "/PRISM_Precip_Clim_" + \
ConfigOptions.current_output_date.strftime('%h') + '_NWM_Mtn_Mapper_Denom.nc'
#print(numeratorPath)
#print(denominatorPath)
# Make sure files exist.
if not os.path.isfile(numeratorPath):
ConfigOptions.errMsg = "Expected parameter file: " + numeratorPath + \
" for mountain mapper downscaling of precipitation not found."
err_handler.log_critical(ConfigOptions, MpiConfig)
break
if not os.path.isfile(denominatorPath):
ConfigOptions.errMsg = "Expected parameter file: " + denominatorPath + \
" for mountain mapper downscaling of precipitation not found."
err_handler.log_critical(ConfigOptions, MpiConfig)
break
if MpiConfig.rank == 0:
# Open the NetCDF parameter files. Check to make sure expected dimension
# sizes are in place, along with variable names, etc.
try:
idNum = Dataset(numeratorPath,'r')
except:
ConfigOptions.errMsg = "Unable to open parameter file: " + numeratorPath
err_handler.log_critical(ConfigOptions, MpiConfig)
break
try:
idDenom = Dataset(denominatorPath,'r')
except:
ConfigOptions.errMsg = "Unable to open parameter file: " + denominatorPath
err_handler.log_critical(ConfigOptions, MpiConfig)
break
# Check to make sure expected names, dimension sizes are present.
if 'x' not in idNum.variables.keys():
ConfigOptions.errMsg = "Expected 'x' variable not found in parameter file: " + numeratorPath
err_handler.log_critical(ConfigOptions, MpiConfig)
break
if 'x' not in idDenom.variables.keys():
ConfigOptions.errMsg = "Expected 'x' variable not found in parameter file: " + denominatorPath
err_handler.log_critical(ConfigOptions, MpiConfig)
break
if 'y' not in idNum.variables.keys():
ConfigOptions.errMsg = "Expected 'y' variable not found in parameter file: " + numeratorPath
err_handler.log_critical(ConfigOptions, MpiConfig)
break
if 'y' not in idDenom.variables.keys():
ConfigOptions.errMsg = "Expected 'y' variable not found in parameter file: " + denominatorPath
err_handler.log_critical(ConfigOptions, MpiConfig)
break
if 'Data' not in idNum.variables.keys():
ConfigOptions.errMsg = "Expected 'Data' variable not found in parameter file: " + numeratorPath
err_handler.log_critical(ConfigOptions, MpiConfig)
break
if 'Data' not in idDenom.variables.keys():
ConfigOptions.errMsg = "Expected 'Data' variable not found in parameter file: " + denominatorPath
err_handler.log_critical(ConfigOptions, MpiConfig)
break
if idNum.variables['Data'].shape[0] != GeoMetaWrfHydro.ny_global:
ConfigOptions.errMsg = "Input Y dimension for: " + numeratorPath + \
" does not match the output WRF-Hydro Y dimension size."
err_handler.log_critical(ConfigOptions, MpiConfig)
break
if idDenom.variables['Data'].shape[0] != GeoMetaWrfHydro.ny_global:
ConfigOptions.errMsg = "Input Y dimension for: " + denominatorPath + \
" does not match the output WRF-Hydro Y dimension size."
err_handler.log_critical(ConfigOptions, MpiConfig)
break
if idNum.variables['Data'].shape[1] != GeoMetaWrfHydro.nx_global:
ConfigOptions.errMsg = "Input X dimension for: " + numeratorPath + \
" does not match the output WRF-Hydro X dimension size."
err_handler.log_critical(ConfigOptions, MpiConfig)
break
if idDenom.variables['Data'].shape[1] != GeoMetaWrfHydro.nx_global:
ConfigOptions.errMsg = "Input X dimension for: " + denominatorPath + \
" does not match the output WRF-Hydro X dimension size."
err_handler.log_critical(ConfigOptions, MpiConfig)
break
# Read in the PRISM grid on the output grid. Then scatter the array out to the processors.
try:
numDataTmp = idNum.variables['Data'][:,:]
except:
ConfigOptions.errMsg = "Unable to extract 'Data' from parameter file: " + numeratorPath
err_handler.log_critical(ConfigOptions, MpiConfig)
break
try:
denDataTmp = idDenom.variables['Data'][:,:]
except:
ConfigOptions.errMsg = "Unable to extract 'Data' from parameter file: " + denominatorPath
err_handler.log_critical(ConfigOptions, MpiConfig)
break
# Close the parameter files.
try:
idNum.close()
except:
ConfigOptions.errMsg = "Unable to close parameter file: " + numeratorPath
err_handler.log_critical(ConfigOptions, MpiConfig)
break
try:
idDenom.close()
except:
ConfigOptions.errMsg = "Unable to close parameter file: " + denominatorPath
err_handler.log_critical(ConfigOptions, MpiConfig)
break
else:
numDataTmp = None
denDataTmp = None
break
err_handler.check_program_status(ConfigOptions, MpiConfig)
# Scatter the array out to the local processors
input_forcings.nwmPRISM_numGrid = MpiConfig.scatter_array(GeoMetaWrfHydro, numDataTmp, ConfigOptions)
err_handler.check_program_status(ConfigOptions, MpiConfig)
input_forcings.nwmPRISM_denGrid = MpiConfig.scatter_array(GeoMetaWrfHydro, denDataTmp, ConfigOptions)
err_handler.check_program_status(ConfigOptions, MpiConfig)
# Create temporary grids from the local slabs of params/precip forcings.
localRainRate = input_forcings.final_forcings[3,:,:]
numLocal = input_forcings.nwmPRISM_numGrid[:,:]
denLocal = input_forcings.nwmPRISM_denGrid[:,:]
# Establish index of where we have valid data.
try:
indValid = np.where((localRainRate > 0.0) & (denLocal > 0.0) & (numLocal > 0.0))
except:
ConfigOptions.errMsg = "Unable to run numpy search for valid values on precip and " \
"param grid in mountain mapper downscaling"
err_handler.log_critical(ConfigOptions, MpiConfig)
err_handler.check_program_status(ConfigOptions, MpiConfig)
# Convert precipitation rate, which is mm/s to mm, which is needed to run the PRISM downscaling.
try:
localRainRate[indValid] = localRainRate[indValid]*3600.0
except:
ConfigOptions.errMsg = "Unable to convert temporary precip rate from mm/s to mm."
err_handler.log_critical(ConfigOptions, MpiConfig)
err_handler.check_program_status(ConfigOptions, MpiConfig)
try:
localRainRate[indValid] = localRainRate[indValid] * numLocal[indValid]
except:
ConfigOptions.errMsg = "Unable to multiply precip by numerator in mountain mapper downscaling"
err_handler.log_critical(ConfigOptions, MpiConfig)
err_handler.check_program_status(ConfigOptions, MpiConfig)
try:
localRainRate[indValid] = localRainRate[indValid] / denLocal[indValid]
except:
ConfigOptions.errMsg = "Unable to divide precip by denominator in mountain mapper downscaling"
err_handler.log_critical(ConfigOptions, MpiConfig)
err_handler.check_program_status(ConfigOptions, MpiConfig)
# Convert local precip back to a rate (mm/s)
try:
localRainRate[indValid] = localRainRate[indValid]/3600.0
except:
ConfigOptions.errMsg = "Unable to convert temporary precip rate from mm to mm/s."
err_handler.log_critical(ConfigOptions, MpiConfig)
err_handler.check_program_status(ConfigOptions, MpiConfig)
input_forcings.final_forcings[3, :, :] = localRainRate
# Reset variables for memory efficiency
idDenom = None
idNum = None
localRainRate = None
numLocal = None
denLocal = None
def ncar_topo_adj(input_forcings,ConfigOptions,GeoMetaWrfHydro,MpiConfig):
"""
Topographic adjustment of incoming shortwave radiation fluxes,
given input parameters.
:param input_forcings:
:param ConfigOptions:
:return:
"""
if MpiConfig.rank == 0:
ConfigOptions.statusMsg = "Performing topographic adjustment to incoming " \
"shortwave radiation flux."
err_handler.log_msg(ConfigOptions, MpiConfig)
# Establish where we have missing values.
try:
indNdv = np.where(input_forcings.final_forcings == ConfigOptions.globalNdv)
except:
ConfigOptions.errMsg = "Unable to perform NDV search on input forcings"
err_handler.log_critical(ConfigOptions, MpiConfig)
return
# By the time this function has been called, necessary input static grids (height, slope, etc),
# should have been calculated for each local slab of data.
DEGRAD = math.pi/180.0
DPD = 360.0/365.0
try:
DECLIN, SOLCON = radconst(ConfigOptions)
except:
ConfigOptions.errMsg = "Unable to calculate solar constants based on datetime information."
err_handler.log_critical(ConfigOptions, MpiConfig)
return
try:
coszen_loc, hrang_loc = calc_coszen(ConfigOptions,DECLIN,GeoMetaWrfHydro)
except:
ConfigOptions.errMsg = "Unable to calculate COSZEN or HRANG variables for topographic adjustment " \
"of incoming shortwave radiation"
err_handler.log_critical(ConfigOptions, MpiConfig)
return
try:
TOPO_RAD_ADJ_DRVR(GeoMetaWrfHydro,input_forcings,coszen_loc,DECLIN,SOLCON,
hrang_loc)
except:
ConfigOptions.errMsg = "Unable to perform final topographic adjustment of incoming " \
"shortwave radiation fluxes."
err_handler.log_critical(ConfigOptions, MpiConfig)
return
# Assign missing values based on our mask.
input_forcings.final_forcings[indNdv] = ConfigOptions.globalNdv
# Reset variables to free up memory
DECLIN = None
SOLCON = None
coszen_loc = None
hrang_loc = None
indNdv = None
def radconst(ConfigOptions):
"""
Function to calculate the current incoming solar constant.
:param ConfigOptions:
:return:
"""
dCurrent = ConfigOptions.current_output_date
DEGRAD = math.pi/180.0
DPD = 360.0/365.0
# For short wave radiation
DECLIN = 0.0
SOLCON = 0.0
# Calculate the current julian day.
JULIAN = time.strptime(dCurrent.strftime('%Y.%m.%d'), '%Y.%m.%d').tm_yday
# OBECL : OBLIQUITY = 23.5 DEGREE
OBECL = 23.5 * DEGRAD
SINOB = math.sin(OBECL)
# Calculate longitude of the sun from vernal equinox
if JULIAN >= 80:
SXLONG = DPD * (JULIAN - 80)
if JULIAN < 80:
SXLONG = DPD * (JULIAN + 285)
SXLONG = SXLONG*DEGRAD
ARG = SINOB * math.sin(SXLONG)
DECLIN = math.asin(ARG)
DECDEG = DECLIN / DEGRAD
# Solar constant eccentricity factor (Paltridge and Platt 1976)
DJUL = JULIAN * 360.0 / 365.0
RJUL = DJUL * DEGRAD
ECCFAC = 1.000110 + (0.034221 * math.cos(RJUL)) + (0.001280 * math.sin(RJUL)) + \
(0.000719 * math.cos(2 * RJUL)) + (0.000077 * math.sin(2 * RJUL))
SOLCON = 1370.0 * ECCFAC
return DECLIN, SOLCON
def calc_coszen(ConfigOptions,declin,GeoMetaWrfHydro):
"""
Downscaling function to compute radiation terms based on current datetime
information and lat/lon grids.
:param ConfigOptions:
:param input_forcings:
:param declin:
:return:
"""
degrad = math.pi / 180.0
gmt = 0
# Calculate the current julian day.
dCurrent = ConfigOptions.current_output_date
julian = time.strptime(dCurrent.strftime('%Y.%m.%d'), '%Y.%m.%d').tm_yday
da = 6.2831853071795862 * ((julian - 1) / 365.0)
eot = ((0.000075 + 0.001868 * math.cos(da)) - (0.032077 * math.sin(da)) - \
(0.014615 * math.cos(2 * da)) - (0.04089 * math.sin(2 * da))) * 229.18
xtime = dCurrent.hour * 60.0 # Minutes of day
xt24 = int(xtime) % 1440 + eot
tloctm = GeoMetaWrfHydro.longitude_grid/15.0 + gmt + xt24/60.0
hrang = ((tloctm - 12.0) * degrad) * 15.0
xxlat = GeoMetaWrfHydro.latitude_grid * degrad
coszen = np.sin(xxlat) * math.sin(declin) + np.cos(xxlat) * math.cos(declin) * np.cos(hrang)
# Reset temporary variables to free up memory.
tloctm = None
xxlat = None
return coszen, hrang
def TOPO_RAD_ADJ_DRVR(GeoMetaWrfHydro,input_forcings,COSZEN,declin,solcon,hrang2d):
"""
Downscaling driver for correcting incoming shortwave radiation fluxes from a low
resolution to a a higher resolution.
:param GeoMetaWrfHydro:
:param input_forcings:
:param COSZEN:
:param declin:
:param solcon:
:param hrang2d:
:return:
"""
degrad = math.pi / 180.0
ny = GeoMetaWrfHydro.ny_local
nx = GeoMetaWrfHydro.nx_local
xxlat = GeoMetaWrfHydro.latitude_grid*degrad
# Sanity checking on incoming shortwave grid.
SWDOWN = input_forcings.final_forcings[7,:,:]
SWDOWN[np.where(SWDOWN < 0.0)] = 0.0
SWDOWN[np.where(SWDOWN >= 1400.0)] = 1400.0
COSZEN[np.where(COSZEN < 1E-4)] = 1E-4
corr_frac = np.empty([ny, nx], np.int)
# shadow_mask = np.empty([ny,nx],np.int)
diffuse_frac = np.empty([ny, nx], np.int)
corr_frac[:, :] = 0
diffuse_frac[:, :] = 0
# shadow_mask[:,:] = 0
indTmp = np.where((GeoMetaWrfHydro.slope[:,:] == 0.0) &
(SWDOWN <= 10.0))
corr_frac[indTmp] = 1
term1 = np.sin(xxlat) * np.cos(hrang2d)
term2 = ((0 - np.cos(GeoMetaWrfHydro.slp_azi)) *
np.sin(GeoMetaWrfHydro.slope))
term3 = np.sin(hrang2d) * (np.sin(GeoMetaWrfHydro.slp_azi) *
np.sin(GeoMetaWrfHydro.slope))
term4 = (np.cos(xxlat) * np.cos(hrang2d)) * np.cos(GeoMetaWrfHydro.slope)
term5 = np.cos(xxlat) * (np.cos(GeoMetaWrfHydro.slp_azi) *
np.sin(GeoMetaWrfHydro.slope))
term6 = np.sin(xxlat) * np.cos(GeoMetaWrfHydro.slope)
csza_slp = (term1 * term2 - term3 + term4) * math.cos(declin) + \
(term5 + term6) * math.sin(declin)
csza_slp[np.where(csza_slp <= 1E-4)] = 1E-4
# Topographic shading
# csza_slp[np.where(shadow == 1)] = 1E-4
# Correction factor for sloping topographic: the diffuse fraction of solar
# radiation is assumed to be unaffected by the slope.
corr_fac = diffuse_frac + ((1 - diffuse_frac) * csza_slp) / COSZEN
corr_fac[np.where(corr_fac > 1.3)] = 1.3
# Peform downscaling
SWDOWN_OUT = SWDOWN * corr_fac
# Reset variables to free up memory
# corr_frac = None
diffuse_frac = None
term1 = None
term2 = None
term3 = None
term4 = None
term5 = None
term6 = None
input_forcings.final_forcings[7,:,:] = SWDOWN_OUT
# Reset variables to free up memory
SWDOWN = None
SWDOWN_OUT = None
def rel_hum(input_forcings,ConfigOptions):
"""
Function to calculate relative humidity given
original, undownscaled surface pressure and 2-meter
temperature.
:param input_forcings:
:param ConfigOptions:
:return:
"""
tmpHumidity = input_forcings.final_forcings[5,:,:]/(1-input_forcings.final_forcings[5,:,:])
T0 = 273.15
EP = 0.622
ONEMEP = 0.378
ES0 = 6.11
A = 17.269
B = 35.86
EST = ES0 * np.exp((A * (input_forcings.t2dTmp - T0)) / (input_forcings.t2dTmp - B))
QST = (EP * EST) / ((input_forcings.psfcTmp * 0.01) - ONEMEP * EST)
RH = 100 * (tmpHumidity / QST)
# Reset variables to free up memory
tmpHumidity = None
return RH
def mixhum_ptrh(input_forcings,relHum,iswit,ConfigOptions):
"""
Functionto convert relative humidity back to a downscaled
2-meter specific humidity
:param input_forcings:
:param ConfigOptions:
:return:
"""
T0 = 273.15
EP = 0.622
ONEMEP = 0.378
ES0 = 6.11
A = 17.269
B = 35.86
term1 = A * (input_forcings.final_forcings[4,:,:] - T0)
term2 = input_forcings.final_forcings[4,:,:] - B
EST = np.exp(term1 / term2) * ES0
QST = (EP * EST) / ((input_forcings.final_forcings[6,:,:]/100.0) - ONEMEP * EST)
QW = QST * (relHum * 0.01)
if iswit == 2:
QW = QW / (1.0 + QW)
if iswit < 0:
QW = QW * 1000.0
# Reset variables to free up memory
term1 = None
term2 = None
EST = None
QST = None
psfcTmp = None
return QW
``` |
{
"source": "jmccrohan/solarman-dls-utils",
"score": 3
} |
#### File: solarman-dls-utils/examples/register_scan.py
```python
from pysolarmanv5.pysolarmanv5 import PySolarmanV5, V5FrameError
import umodbus.exceptions
def main():
modbus = PySolarmanV5("192.168.1.24", 123456789, port=8899, mb_slave_id=1, verbose=0)
print("Scanning input registers")
for x in range(30000, 39999):
try:
val = modbus.read_input_registers(register_addr=x, quantity=1)[0]
print(f"Register: {x:05}\t\tValue: {val:05} ({val:#06x})")
except (V5FrameError, umodbus.exceptions.IllegalDataAddressError):
continue
print("Finished scanning input registers")
print("Scanning holding registers")
for x in range(40000, 49999):
try:
val = modbus.read_holding_registers(register_addr=x, quantity=1)[0]
print(f"Register: {x:05}\t\tValue: {val:05} ({val:#06x})")
except (V5FrameError, umodbus.exceptions.IllegalDataAddressError):
continue
print("Finished scanning holding registers")
if __name__ == "__main__":
main()
``` |
{
"source": "jmccrosky/semanticdist",
"score": 2
} |
#### File: semanticdist/semanticdist/embeddings.py
```python
from semanticdist import utils
from sklearn.metrics.pairwise import cosine_similarity
# add thumbnail to diagnostrics
def get_embeddings(data, part, context, pickle_file=None, type='text'):
count_before = 0
if f'{part}_embedding' in data:
needed_indexes = data.index[(~data[part].isnull()) & (
data[f'{part}_embedding'].isnull())]
count_before = len(data[~data[f'{part}_embedding'].isnull()])
else:
needed_indexes = data.index[~data[part].isnull()]
data[f'{part}_embedding'] = None
elements = list(data.loc[needed_indexes, part])
if type == 'text':
embeddings = list(context['language_model'].encode(
elements, show_progress_bar=True))
elif type == 'image':
embeddings = list(context['image_model'].encode(
[Image.open(io.BytesIO(base64.b64decode(t))) for t in elements], show_progress_bar=True))
for i in range(len(needed_indexes)):
data.at[needed_indexes[i], f'{part}_embedding'] = embeddings[i]
count_after = len(data[~data[f'{part}_embedding'].isnull()])
if count_after != count_before + len(needed_indexes):
print(
f"Warning: counts are wrong. Pickle not saved. {count_after} {count_before} {len(needed_indexes)}")
return data
if pickle_file is not None:
utils.save_data(data, pickle_file, context)
return data
def get_similarity_matrix(embeddings):
return cosine_similarity(list(embeddings))
```
#### File: semanticdist/semanticdist/utils.py
```python
import pickle
import plotly.express as px
import numpy as np
import gspread_dataframe as gd
import pandas as pd
from scipy.spatial.distance import pdist, squareform
def get_raw_data(context):
_query = '''
SELECT
*
FROM
`moz-fx-data-shared-prod.regrets_reporter_analysis.yt_api_data_v7`
WHERE
takedown = FALSE
'''
data = context['bq_client'].query(
_query
).result(
).to_dataframe(
bqstorage_client=context['bq_storage_client']
)
total_rows = len(data)
data.drop_duplicates(subset="video_id", keep='first',
inplace=True, ignore_index=True)
unique_rows = len(data)
if total_rows != unique_rows:
print(
f"Warning: raw table has {total_rows - unique_rows} duplicate rows or {100 * (total_rows - unique_rows) / unique_rows}%.")
return data
def update_from_raw_data(data, context):
_query = '''
SELECT
*
FROM
`moz-fx-data-shared-prod.regrets_reporter_analysis.yt_api_data_v7`
WHERE
takedown = FALSE
'''
new_data = context['bq_client'].query(
_query
).result(
).to_dataframe(
bqstorage_client=context['bq_storage_client']
).loc[lambda d: ~ d.video_id.isin(data.video_id)]
if len(new_data) > 0:
return pd.concat([data, new_data])
else:
print("Warning: no new data acquired.")
return data
def save_data(data, pickle_file, context):
with open(context['gdrive_path'] + pickle_file, 'wb') as handle:
pickle.dump(data, handle,
protocol=pickle.HIGHEST_PROTOCOL)
def load_data(pickle_file, context):
with open(context['gdrive_path'] + pickle_file, 'rb') as handle:
return pickle.load(handle)
def plot_similarity_matrix(m):
fig = px.imshow(m, width=1600, height=800)
fig.update_layout(
margin=dict(l=20, r=20, t=20, b=20),
paper_bgcolor="LightSteelBlue",
)
return fig
def get_indices_of_k_largest(arr, k):
arr[np.tril_indices(arr.shape[0], 0)] = np.nan
idx = np.argpartition((-arr).ravel(), k)
return tuple(np.array(np.unravel_index(idx, arr.shape))[:, range(min(k, 0), max(k, 0))])
def prep_videovote_sheet(data, pairs, tab, context, existing=None):
left = data.iloc[pairs[0]].reset_index()
right = data.iloc[pairs[1]].reset_index()
vvdata = pd.DataFrame({
"title_a": left.title,
"channel_a": left.channel,
"description_a": left.description,
"id_a": left.video_id,
"title_b": right.title,
"channel_b": right.channel,
"description_b": right.description,
"id_b": right.video_id,
"vote": None,
})
for i, r in vvdata.iterrows():
if r.id_a > r.id_b:
temp = (r.title_a, r.channel_a, r.description_a, r.id_a)
(r.title_a, r.channel_a, r.description_a, r.id_a) = (
r.title_b, r.channel_b, r.description_b, r.id_b)
(r.title_b, r.channel_b, r.description_b, r.id_b) = temp
if existing != None:
vvdata = vvdata[[(r.id_a, r.id_b) not in existing for i,
r in vvdata.iterrows()]]
ss = context['gspread_client'].open("Videovote backend")
try:
ws = ss.add_worksheet(tab, rows=len(vvdata), cols="9")
except Exception:
ws = ss.worksheet(tab)
gd.set_with_dataframe(ws, vvdata.reset_index(
drop=True), include_index=False)
def init_eval_pickle(name, context):
temp = {}
with open(context['gdrive_path'] + name, 'wb') as handle:
pickle.dump(temp, handle, protocol=pickle.HIGHEST_PROTOCOL)
def update_eval_data(eval_data, sheet, context):
ws = context['gspread_client'].open("Videovote backend").worksheet(sheet)
new_eval_data = gd.get_as_dataframe(ws).dropna(
axis=1, how='all').dropna(how='all')
for i, r in new_eval_data.iterrows():
key = (r.id_a, r.id_b)
if key in eval_data:
eval_data[key] = eval_data[key] + [r.vote]
else:
eval_data[key] = [r.vote]
return eval_data
def get_equality_matrix(data, part):
d = pdist([[i] for i in data[part]], lambda x, y: 1 if x == y else 0)
return squareform(d)
def print_data_diagnostics(data):
n = len(data)
print(f"Data is length {n}")
nt = len(data[data.transcript.str.len() > 0])
print(f"With transcripts: {nt}")
possible_parts = ["title", "transcript", "description", "thumbnail"]
possible_types = ["embedding", "entities"]
dups = len(data[data.video_id.duplicated()])
if dups != 0:
print(f"Warning! {dups} dupes detected.")
for part in possible_parts:
ap_n = nt if part == "transcript" else n
for type in possible_types:
if f"{part}_{type}" in data:
nv = data[f"{part}_{type}"].isnull().sum()
print(
f"Data has {part}_{type} for {n-nv} rows or {(n-nv)/ap_n * 100}%")
``` |
{
"source": "jmccrosky/synthpop",
"score": 2
} |
#### File: synthpop/processor/processor.py
```python
import numpy as np
import pandas as pd
# global variables
from synthpop import NUM_COLS_DTYPES, CAT_COLS_DTYPES
NAN_KEY = 'nan'
NUMTOCAT_KEY = 'numtocat'
class Processor:
def __init__(self, spop):
self.spop = spop
self.processing_dict = {NUMTOCAT_KEY: {},
NAN_KEY: {}
}
def preprocess(self, df, dtypes):
for col in self.spop.visited_columns:
col_nan_indices = df[col].isna()
cont_nan_indices = {v: df[col] == v for v in self.spop.cont_na.get(col, [])}
col_nan_series = [(np.nan, col_nan_indices)] + list(cont_nan_indices.items())
col_all_nan_indices = pd.DataFrame({index: value[1] for index, value in enumerate(col_nan_series)}).max(axis=1)
col_not_nan_indices = np.invert(col_all_nan_indices)
# transform numerical columns in numtocat to categorical
if col in self.spop.numtocat:
self.processing_dict[NUMTOCAT_KEY][col] = {'dtype': self.spop.df_dtypes[col],
'categories': {}
}
# Dealing With Non-NaN Values
not_nan_values = df.loc[col_not_nan_indices, col].copy()
df.loc[col_not_nan_indices, col] = pd.cut(df.loc[col_not_nan_indices, col], self.spop.catgroups[col], labels=range(self.spop.catgroups[col]), include_lowest=True)
grouped = pd.DataFrame({'grouped': df.loc[col_not_nan_indices, col], 'real': not_nan_values}).groupby('grouped')
self.processing_dict[NUMTOCAT_KEY][col]['categories'] = grouped['real'].apply(np.array).to_dict()
# Dealing with NaN
for index, (_, bool_series) in enumerate(col_nan_series):
nan_cat = self.spop.catgroups[col] + index
self.processing_dict[NUMTOCAT_KEY][col]['categories'][nan_cat] = df.loc[bool_series, col].to_numpy()
df.loc[bool_series, col] = nan_cat
df[col] = df[col].astype('category')
self.spop.df_dtypes[col] = 'category'
else:
# NaNs in category columns
# need to process NaNs only as all other categories will be taken care automatically
if self.spop.df_dtypes[col] in CAT_COLS_DTYPES:
if col_nan_indices.any():
# TODO beware of 'NaN_category' naming
col_nan_category = 'NaN_category'
self.processing_dict[NAN_KEY][col] = {'dtype': self.spop.df_dtypes[col],
'nan_value': col_nan_category
}
df[col].cat.add_categories(col_nan_category, inplace=True)
df[col].fillna(col_nan_category, inplace=True)
# NaNs in numerical columns
elif self.spop.df_dtypes[col] in NUM_COLS_DTYPES:
if col_all_nan_indices.any():
# insert new column in df
# TODO beware of '_NaN' naming
col_nan_name = col + '_NaN'
df.insert(df.columns.get_loc(col), col_nan_name, 0)
self.processing_dict[NAN_KEY][col] = {'col_nan_name': col_nan_name,
'dtype': self.spop.df_dtypes[col],
'nan_flags': {}
}
for index, (cat, bool_series) in enumerate(col_nan_series):
cat_index = index + 1
self.processing_dict[NAN_KEY][col]['nan_flags'][cat_index] = cat
df.loc[bool_series, col_nan_name] = cat_index
df.loc[col_all_nan_indices, col] = 0
df[col_nan_name] = df[col_nan_name].astype('category')
self.spop.df_dtypes[col_nan_name] = 'category'
return df
def postprocess(self, synth_df):
for col, processing_numtocat_col_dict in self.processing_dict[NUMTOCAT_KEY].items():
synth_df[col] = synth_df[col].astype(object)
col_synth_df = synth_df[col].copy()
for category, category_values in processing_numtocat_col_dict['categories'].items():
category_indices = col_synth_df == category
synth_df.loc[category_indices, col] = np.random.choice(category_values, size=category_indices.sum(), replace=True)
# cast dtype back to original (float for int column with NaNs)
if synth_df[col].isna().any() and processing_numtocat_col_dict['dtype'] == 'int':
synth_df[col] = synth_df[col].astype(float)
else:
synth_df[col] = synth_df[col].astype(processing_numtocat_col_dict['dtype'])
# self.spop.df_dtypes[col] = processing_numtocat_col_dict['dtype']
for col, processing_nan_col_dict in self.processing_dict[NAN_KEY].items():
# NaNs in category columns
# need to postprocess NaNs only all other categories will be taken care automatically
if processing_nan_col_dict['dtype'] in CAT_COLS_DTYPES:
col_nan_value = processing_nan_col_dict['nan_value']
synth_df[col] = synth_df[col].astype(object)
synth_df.loc[synth_df[col] == col_nan_value, col] = np.nan
synth_df[col] = synth_df[col].astype('category')
# NaNs in numerical columns
elif processing_nan_col_dict['dtype'] in NUM_COLS_DTYPES:
for col_nan_flag, col_nan_value in processing_nan_col_dict['nan_flags'].items():
nan_flag_indices = synth_df[processing_nan_col_dict['col_nan_name']] == col_nan_flag
synth_df.loc[nan_flag_indices, col] = col_nan_value
synth_df.drop(columns=processing_nan_col_dict['col_nan_name'], inplace=True)
return synth_df
``` |
{
"source": "jmcculloch/greendo",
"score": 2
} |
#### File: jmcculloch/greendo/greendo.py
```python
import greendo
import json
import argparse
from getpass import getpass
from contextlib import closing
def main():
ap = argparse.ArgumentParser(prog="greendo")
ap.add_argument("--email", "-u", type=str, help="Email address registered with the GDO app. Default: request from stdin.")
ap.add_argument("--pwd", "-p", type=str, help="Password for the registered email. Default: request from stdin.")
ap.add_argument("--dry", "-n", action="store_true", help="Dry run - don't execute commands, just display them")
ap.add_argument("--dev", "-d", type=int, default=0, help="Door opener device index, if you have more than one.")
sub_ap = ap.add_subparsers(dest="target", help="Commands")
ap_status = sub_ap.add_parser("status", help="Output status for a given subsystem.")
ap_status.add_argument("thing", choices=("config", "charger", "door", "light", "fan"),
help="Get status for the given subsystem.")
ap_door = sub_ap.add_parser("door", help="Manipulate the door: open, close, preset.")
ap_door.add_argument("cmd", choices=("open", "close", "preset"))
ap_motion = sub_ap.add_parser("motion", help="Turn the motion sensor on or off.")
ap_motion.add_argument("set", choices=("on", "off"))
ap_light = sub_ap.add_parser("light", help="Turn the light on or off.")
ap_light.add_argument("set", choices=("on", "off"))
ap_light_timer = sub_ap.add_parser("lighttimer", help="Set the number of minutes for the light timer.")
ap_light_timer.add_argument("minutes", type=int)
ap_fan = sub_ap.add_parser("fan", help="Set fan to integer speed 0-100 (0 is off)")
ap_fan.add_argument("speed", type=int)
ap_vacation = sub_ap.add_parser("vacation", help="Turn vacation mode on or off.")
ap_vacation.add_argument("set", choices=("on", "off"))
ap_preset_pos = sub_ap.add_parser("preset", help="Set the preset position in integer inches.")
ap_preset_pos.add_argument("inches", type=int)
args = ap.parse_args()
email = args.email
pwd = args.pwd
if args.email is None:
email = input("email: ").strip()
if args.pwd is None:
pwd = <PASSWORD>("<PASSWORD>()
with closing(greendo.Client(email, pwd)) as client:
device = client.devices[max(0, min(args.dev, len(client.devices)))]
cmd = None
if args.target == "status":
thing = args.thing
if thing == "config":
print("Session:\n", json.dumps(client.session.data, indent=2))
print("Devices:\n", json.dumps([{"meta": d.meta, "data": d.data} for d in client.devices], indent=2))
elif thing == "charger":
print(json.dumps({
"level": device.charger.level()
}, indent=2))
elif thing == "door":
door = device.door
print(json.dumps({
"status": door.door_status(),
"error": door.door_error(),
"pos": door.door_pos(),
"max": door.door_max(),
"preset": door.preset_pos(),
"motion": door.motion(),
"alarm": door.alarm(),
"motor": door.motor(),
"sensor": door.sensor(),
"vacation": door.vacation(),
}, indent=2))
elif thing == "light":
light = device.light
print(json.dumps({
"light": light.on(),
"timer": light.timer(),
}, indent=2))
elif thing == "fan":
print(json.dumps({
"speed": device.fan.speed(),
}, indent=2))
return
if args.target == "door":
if args.cmd == "open":
cmd = device.cmd_open()
elif args.cmd == "close":
cmd = device.cmd_close()
else:
cmd = device.cmd_preset()
elif args.target == "motion":
cmd = device.cmd_motion(args.set == "on")
elif args.target == "light":
cmd = device.cmd_light(args.set == "on")
elif args.target == "lighttimer":
cmd = device.cmd_lighttimer(max(0, args.minutes))
elif args.target == "fan":
cmd = device.cmd_fan(max(0, min(100, args.speed)))
elif args.target == "vacation":
cmd = device.cmd_vacation(args.set == "on")
elif args.target == "preset":
cmd = device.cmd_preset(max(0, args.inches))
if args.dry:
print("Dry Run:")
print(json.dumps(cmd, indent=2))
return
print("Request to {}:".format(client.API_URL_SOCKET))
print(json.dumps(cmd, indent=2))
result = client.send_command(cmd)
print("Response:")
print(json.dumps(result, indent=2))
if __name__ == '__main__':
main()
``` |
{
"source": "Jmccullough12/simcov-antibodies",
"score": 3
} |
#### File: simcov-antibodies/scripts/avg_virs.py
```python
import sys
import numpy as np
def generateAverage(name, steps):
#listy = []
#for i in range(int(steps)):
#listy.append(0.0)
listy = np.zeros(int(steps))
for num in range(10):
resultFile = num + 1
file = "results_" + name + "_batch/simcov" + str(resultFile) + ".stats"
with open(file, 'r') as f:
file1 = f.readlines()
for i in range(int(steps)):
if (i == 0):
continue
line = file1[i].split('\t')
listy[i-1] = listy[i-1] + float(line[8])
#resultFile += 1
#result = [] # if we want to plot
for i in range(len(listy)):
listy[i] = listy[i] / 10
return listy
def saveAverage(name, xs, ys):
xs2 = xs.astype(int)
#result = np.column_stack((xs2,ys))
#print(result)
file = "averaged_results/" + name + ".stats"
result = []
with open(file, 'w') as f:
for i in range(len(xs)):
content = str(xs2[i]) + "\t" + str(ys[i]) + "\n"
f.write(content)
def main():
name = sys.argv[1]
steps = sys.argv[2]
virs = generateAverage(name, steps)
#print(virs)
timesteps = np.zeros(int(steps), dtype=int)
for i in range(int(steps)):
timesteps[i] = int(i)
timesteps = timesteps.astype(int)
saveAverage(name, timesteps, virs)
if __name__=="__main__":
main()
```
#### File: simcov-antibodies/scripts/dynamic_plot_three.py
```python
import sys
import matplotlib.pyplot as plt
from matplotlib import style
import numpy as np
#plot_subplot(options.compare_file, ax_virus, [8], 'avg virions per cell', lw=4, alpha=0.3, clear=False,
# log_scale=options.log_scale, scale=options.virus_scale)
def makePlot(xs, ys1, ys2, ys3, label1, label2, label3):
fig = plt.figure()
ax = plt.axes()
#ax.legend(loc='upper left')
ax.set_ylabel('Virion Count')
ax.set_xlabel('Time (days)')
ax.set_ylim(0.5, 10 * np.max(ys1))
ax.set_yscale('log')
plt.gca().get_xaxis().set_major_formatter(plt.FuncFormatter(lambda x, p: format(int(x/1440), ',')))
colors = ['blue', 'red', 'orange']
plt.locator_params(axis='x',nbins=25)
#ax.xaxis.set_ticks(np.linspace(0,len(xs),1))
ax.plot(xs,ys1, label="Not Vaccinated",color=colors[0])
ax.plot(xs,ys2, label="Vaccinated",color=colors[1])
#ax.plot(xs,ys3, label="Scale = 0.10",color=colors[2])
ax.legend(loc='lower left')
ax.set_title("Vaccinated vs. Not Vaccinated")
plt.savefig('vaccinated.png')
def getYs(name, steps):
file = "averaged_results/" + name + ".stats"
ys = np.zeros(int(steps))
with open(file, 'r') as f:
file1 = f.readlines()
for i in range(int(steps)):
line = file1[i].split('\t')
ys[i] = float(line[1])
return ys
def main():
name1 = sys.argv[1]
name2 = sys.argv[2]
name3 = sys.argv[3]
steps = sys.argv[4]
ys1 = getYs(name1, steps)
ys2 = getYs(name2, steps)
ys3 = getYs(name3, steps)
xs = np.zeros(int(steps), dtype=int)
for i in range(int(steps)):
xs[i] = int(i)
print(ys1)
print(ys2)
print(ys3)
makePlot(xs,ys1,ys2,ys3,name1,name2,name3)
if __name__=="__main__":
main()
``` |
{
"source": "jmcda001/sprintBurndown",
"score": 2
} |
#### File: jmcda001/sprintBurndown/burndown.py
```python
import os
import csv
import sys
import ntpath
import json
import datetime
import requests
import argparse
from config import TRELLO_API_KEY, TRELLO_API_TOKEN
from pprint import pprint
parser = argparse.ArgumentParser(description='Generate sprint burndown chart from Trello board.')
parser.add_argument('boards',help='File containing board(s) and corresponding configurations')
querystring = {"actions":"none","boardStars":"none","cards":"all","card_pluginData":"false","checklists":"none","customFields":"false","fields":"name,desc,descData,closed,idOrganization,pinned,url,shortUrl,prefs,labelNames,archive","lists":"open","labels": "all","members":"all","memberships":"none","membersInvited":"none","membersInvited_ields":"all","pluginData":"false","organization":"false","organization_pluginData":"false","myPrefs":"false","tags":"false","key":TRELLO_API_KEY,"token": TRELLO_API_TOKEN}
teamKanbanLabels = {}
kanbanLists = {}
teamPriorityLabels = {}
priorityLabels = {}
teamSizeLabels = {}
sizeLabels = {}
boards = []
skipMembers = []
def extractLists(lists):
boardLists = {}
for boardList in lists:
boardLists[boardList.get('id')] = {}
boardLists[boardList.get('id')]['name'] = boardList.get('name')
if teamKanbanLabels.get(boardList.get('name')) is not None:
kanbanLists[teamKanbanLabels.get(boardList.get('name'))] = boardList.get('id')
boardLists[boardList.get('id')]['cards'] = []
return boardLists
def extractCards(cardsDict,lists,members):
cards = {}
for card in cardsDict:
# card['labels'] -> []
# card['idLabels'] -> []
# card['idList'] -> list ID
# card['idMembers'] -> [member ids]
if not card.get('closed'):
newCard = {'name': card.get('name'),
'idList': card.get('idList'),
'idMembers': card.get('idMembers'),
'labels': card.get('idLabels')}
cards[card.get('id')] = newCard
if card.get('idList') in lists:
lists.get(card.get('idList')).get('cards').append(newCard)
return cards
def extractLabels(labelsDict):
labels = {}
for label in labelsDict:
labels[label.get('id')] = label.get('name')
if label.get('name') in teamSizeLabels:
sizeLabels[teamSizeLabels.get(label.get('name'))] = label.get('id')
elif label.get('name') in teamPriorityLabels:
priorityLabels[teamPriorityLabels.get(label.get('name'))] = label.get('id')
return labels
def extractMembers(membersDict):
members = {}
for member in membersDict:
if member.get('fullName') not in skipMembers:
newMember = {'name': member.get('fullName'),
'cardsDone': { 'Size': {'Size - Small': 0,
'Size - Medium': 0,
'Size - Large': 0,
'Unsized': 0},
'Shared': { }}}
for i in range(1,len(membersDict)+1):
newMember['cardsDone']['Shared'][i] = 0
members[member.get('id')] = newMember
return members
# members is a dict of members to object of their counts
def countCardsInListByLabels(listDict,labels,members):
for card in listDict.get('cards'):
for cardMember in card.get('idMembers'):
members[cardMember]['cardsDone']['Shared'][len(card.get('idMembers'))] += 1
sizeAdded = False
for labelName,labelId in labels.items():
if labelId in card.get('labels'):
members[cardMember]['cardsDone']['Size'][labelName] += 1
sizeAdded = True
break
if not sizeAdded:
members[cardMember]['cardsDone']['Size']['Unsized'] += 1
def countCardsInListByMemberId(cardList,memberId):
cardCount = 0
for card in cardList:
if memberId in card.get('idMembers'):
cardCount += 1
return cardCount
# boardDict contains the dict of the JSON file from Trello
def analyzeSprint(boardDict):
print("Analyzing "+boardDict.get('name')+"...")
boardData = {'name': boardDict.get('name'), 'date': boardDict.get('dateLastView')}
members = extractMembers(boardDict.get('members'))
boardLists = extractLists(boardDict.get('lists'))
labels = extractLabels(boardDict.get('labels'))
cards = extractCards(boardDict.get('cards'),boardLists,members)
return {**boardData,**{'members': members, 'boardLists': boardLists, 'labels': labels, 'cards': cards}}
def writeBurndown(csvFilename,burndownDict,memberBreakdown=[]):
print("Writing to "+csvFilename+"...")
data = [['',burndownDict.get('date')]]
for kanbanList in kanbanLists:
cardList = burndownDict.get('boardLists').get(kanbanLists.get(kanbanList)).get('cards')
cardsInList = len(cardList)
data.append([kanbanList,cardsInList])
if kanbanList in memberBreakdown:
for memberId,member in burndownDict.get('members').items():
data.append([member.get('name'),countCardsInListByMemberId(cardList,memberId)])
countCardsInListByLabels(burndownDict.get('boardLists').get(kanbanLists.get('Biz/Dev Done')),sizeLabels,
burndownDict.get('members'))
data.append(['name','hours','small','medium','large','1','2','3','4','5','share average'])
for memberId,member in burndownDict.get('members').items():
small = member.get('cardsDone').get('Size').get('Size - Small')
medium = member.get('cardsDone').get('Size').get('Size - Medium')
large = member.get('cardsDone').get('Size').get('Size - Large')
one = member.get('cardsDone').get('Shared').get(1)
two = member.get('cardsDone').get('Shared').get(2)
three = member.get('cardsDone').get('Shared').get(3)
four = member.get('cardsDone').get('Shared').get(4)
five = member.get('cardsDone').get('Shared').get(5)
try:
shareAverage = (one + 2*two + 3*three + 4*four + 5*five) / (one + two + three + four + five)
hours = (small * 2 + medium * 4 + large * 8) / shareAverage
except ZeroDivisionError:
print("Warning: "+member.get('name')+" has not worked on any cards")
shareAverage = 0
hours = 0
data.append([member.get('name'),hours,small,medium,large,one,two,three,four,five,shareAverage])
with open(csvFilename,'w') as csvfile:
csvwriter = csv.writer(csvfile,delimiter=',',quotechar='|',quoting=csv.QUOTE_MINIMAL)
for dataRow in data:
csvwriter.writerow(dataRow)
def configure(batchFilename):
with open(batchFilename,'r') as batchFile:
configuration = json.loads(batchFile.read())
if 'skipMembers' in configuration:
global skipMembers
skipMembers = configuration.get('skipMembers')
for newBoard in configuration.get('boards'):
boards.append(newBoard)
def runBatch():
for board in boards:
global teamKanbanLabels
global teamPriorityLabels
global teamSizeLabels
if 'skipMembers' in board:
global skipMembers
skipMembers = board.get('skipMembers')
initializeGlobals()
teamKanbanLabels = board.get('teamKanbanLabels')
teamPriorityLabels = board.get('teamPriorityLabels') if 'teamPriorityLabels' in board else {}
teamSizeLabels = board.get('teamSizeLabels')
boardJson = retrieveJsonFromURL(board.get('url'))
burndownFilename = "csv/"+boardJson.get('name')+'.csv'
writeBurndown(burndownFilename,analyzeSprint(boardJson),
['Biz/Dev Backlog',
'Biz/Dev In Progress',
'Biz/Dev In Review'])
def retrieveJsonFromURL(url):
return json.loads(requests.request("GET", url, params=querystring).text)
def initializeGlobals():
global teamKanbanLabels
global kanbanLists
global teamPriorityLabels
global priorityLabels
global teamSizeLabels
global sizeLabels
teamKanbanLabels = {}
kanbanLists = {}
teamPriorityLabels = {}
priorityLabels = {}
teamSizeLabels = {}
sizeLabels = {}
if __name__ == '__main__':
args = parser.parse_args()
if args.boards is not None:
configure(args.boards)
runBatch()
```
#### File: jmcda001/sprintBurndown/configure.py
```python
import requests
import argparse
import re
import json
from pprint import pprint
from config import TRELLO_API_KEY, TRELLO_API_TOKEN
parser = argparse.ArgumentParser(description='Generate configuration for sprint burndown for a specific Trello board.')
parser.add_argument('--url',dest='url',help='URL for the trello board')
parser.add_argument('--id',dest='id',help='ID for the trello board')
parser.add_argument('-o',dest='outFilename',help='Output filename for the configuration')
APIKeyToken = {"key":TRELLO_API_KEY,"token": TRELLO_API_TOKEN}
querystring = {**{"actions":"none","boardStars":"none","cards":"none","card_pluginData":"false","checklists":"none","customFields":"false","fields":"name","lists":"open","labels": "none","members":"none","memberships":"none","membersInvited":"none","membersInvited_ields":"none","pluginData":"false","organization":"false","organization_pluginData":"false","myPrefs":"false","tags":"false"},**APIKeyToken}
#https://trello.com/b/wrXzb9ug/checkmates
trelloBoardURL = re.compile('https://trello.com/b/(\w+)/\S+')
def extractTrelloBoardID(url):
if trelloBoardURL.match(url):
return trelloBoardURL.match(url).group(1)
else:
print("Warning: \""+url+"\" is not a URL for a trello board")
return None
def configureMembers(boardId):
print('Configuring tracked members.......')
skipMembers = []
members = json.loads(requests.request("GET", "https://api.trello.com/1/boards/"+boardId+"/members",
params=APIKeyToken).text)
for index,member in enumerate(members):
print(str(index)+") "+member.get('fullName'))
print()
skippedIndices = input("List the indices of members you DON'T want to track: ")
for index in skippedIndices.split(','):
skipMembers.append(members[int(index)].get('fullName'))
return {"skipMembers": skipMembers}
def configureTrackedLists(boardId):
print('Configuring tracked lists.......')
trackedLists = {}
lists = json.loads(requests.request("GET", "https://api.trello.com/1/boards/"+boardId+"/lists",
params=APIKeyToken).text)
for index,boardList in enumerate(lists):
print(str(index)+") "+boardList.get('name'))
print()
usBacklog = input("Which list corresponds to the 'User Story Backlog'? ")
usDoing = input("Which list corresponds to the 'User Story In Progress'? ")
usDone = input("Which list corresponds to the 'User Story Done'? ")
bizDevBacklog = input("Which list corresponds to the 'Biz/Dev Backlog'? ")
bizDevDoing = input("Which list corresponds to the 'Biz/Dev In Progress'? ")
bizDevReview = input("Which list corresponds to the 'Biz/Dev In Review'? ")
bizDevDone = input("Which list corresponds to the 'Biz/Dev Done'? ")
print()
return { "teamKanbanLabels": {lists[int(usBacklog)].get('name'): "US - Backlog",
lists[int(usDoing)].get('name'): "US - In Progress",
lists[int(usDone)].get('name'): "US - Done",
lists[int(bizDevBacklog)].get('name'): "Biz/Dev Backlog",
lists[int(bizDevDoing)].get('name'): "Biz/Dev In Progress",
lists[int(bizDevReview)].get('name'): "Biz/Dev In Review",
lists[int(bizDevDone)].get('name'): "Biz/Dev Done"}}
def configureLabelMapping(boardId):
print('Configuring label mapping.......')
teamLabels = []
labels = json.loads(requests.request("GET", "https://api.trello.com/1/boards/"+boardId+"/labels",
params=APIKeyToken).text)
for label in labels:
teamLabels.append(label.get('name'))
for index,label in enumerate(teamLabels):
print(str(index)+") "+label)
print()
smallIndex = input("Which label corresponds to a 'Size - Small' task? ")
mediumIndex = input("Which label corresponds to a 'Size - Medium' task? ")
largeIndex = input("Which label corresponds to a 'Size - Large' task? ")
print()
return {"teamSizeLabels": {teamLabels[int(smallIndex)]: "Size - Small",
teamLabels[int(mediumIndex)]: "Size - Medium",
teamLabels[int(largeIndex)]: "Size - Large"}}
def configure(boardId):
boardURL = "https://api.trello.com/1/boards/"+boardId
boardName = json.loads(requests.request("GET", "https://api.trello.com/1/boards/"+boardId,
params=querystring).text).get('name')
skipMembers = configureMembers(boardId)
listsMapping = configureTrackedLists(boardId)
labelMapping = configureLabelMapping(boardId)
return {"boards":[{**{"name":boardName,"url":boardURL},**listsMapping,**skipMembers,**labelMapping}]}
if __name__ == '__main__':
args = parser.parse_args()
if args.url is not None:
boardId = extractTrelloBoardID(args.url)
elif args.id is not None:
boardId = args.id
else:
boardId = extractTrelloBoardID(input("Input the URL for the Trello board"))
if boardId is not None:
configuration = configure(boardId)
if args.outFilename is not None:
print("Writing configuration to \""+args.outFilename+"\"...")
with open(args.outFilename,'w') as out:
out.write(json.dumps(configuration))
else:
pprint(configuration)
``` |
{
"source": "jmcdona1d/RocketLeague_Bot",
"score": 3
} |
#### File: RocketLeague_Bot/RLBot/RLBot.py
```python
from rlbot.agents.base_agent import BaseAgent, SimpleControllerState
from rlbot.utils.structures.game_data_struct import GameTickPacket
import math
import time
class RLBot(BaseAgent):
def __init__(self, name, team, index):
super().__init__(name, team, index)
self.controller = SimpleControllerState() #Initialize controller
#Constants
self.DODGETIME = 0.2
self.DODGEDISTANCE = 500
self.DISTANCETOBOOST = 1500
self.POWERSLIDEANGLE = math.radians(170)
#Game value instance variables
self.bot_pos = None
self.bot_rot = None
#Dodging instance variables
self.should_dodge = False
self.on_second_jump = False
self.next_dodge_time = 0
def get_output(self, packet: GameTickPacket) -> SimpleControllerState:
self.bot_yaw = packet.game_cars[self.team].physics.rotation.yaw
self.bot_pos = packet.game_cars[self.index].physics.location
ball_pos = packet.game_ball.physics.location
self.controller.throttle = 1
#Check if opponent's goal is behind the ball
if(self.index == 0 and self.bot_pos.y < ball_pos.y or self.index == 1 and self.bot_pos.y > ball_pos.y):
self.aim(ball_pos.x, ball_pos.y)
#Shoot if close enough to ball
if distance(self.bot_pos.x, self.bot_pos.y, ball_pos.x, ball_pos.y) < self.DODGEDISTANCE:
self.should_dodge = True
#If not then drive towards own goal (until are behind ball)
else:
if self.team == 0:
self.aim(0, -5000)
else:
self.aim(0,5000)
self.controller.jump = False #makes sure jump only lasts one frame
self.check_for_dodge()
#boost when over threshold away
self.controller.boost = distance(self.bot_pos.x, self.bot_pos.y, ball_pos.x, ball_pos.y) > self.DISTANCETOBOOST
if ball_pos.x == 0 and ball_pos.y == 0:
self.aim(ball_pos.x, ball_pos.y)
self.controller.boost = True
return self.controller
def aim(self, target_x, target_y):
angle_between_bot_and_target = math.atan2(target_y - self.bot_pos.y,
target_x - self.bot_pos.x)
angle_front_to_target = angle_between_bot_and_target - self.bot_yaw
# Correct the values
if angle_front_to_target < -math.pi:
angle_front_to_target += 2 * math.pi
if angle_front_to_target > math.pi:
angle_front_to_target -= 2 * math.pi
if angle_front_to_target < math.radians(-10):
# If the target is more than 10 degrees right from the centre, steer left
self.controller.steer = -1
elif angle_front_to_target > math.radians(10):
# If the target is more than 10 degrees left from the centre, steer right
self.controller.steer = 1
else:
# If the target is less than 10 degrees from the centre, steer straight
self.controller.steer = 0
#powerslide if angle greater than threshold
self.controller.handbrake = abs(math.degrees(angle_front_to_target)) < self.POWERSLIDEANGLE
def check_for_dodge(self):
if self.should_dodge and time.time() > self.next_dodge_time:
self.controller.jump = True
self.controller.pitch = -1 #tilts stick fully forward
if self.on_second_jump:
self.on_second_jump = False
self.should_dodge
else:
self.on_second_jump = True
self.next_dodge_time = time.time() + self.DODGETIME
def distance(x1, y1, x2, y2):
return math.sqrt((x2 - x1)**2 + (y2 - y1)**2)
``` |
{
"source": "jmcdono362/django",
"score": 2
} |
#### File: management/commands/migrate.py
```python
import time
from collections import OrderedDict
from importlib import import_module
from django.apps import apps
from django.core.checks import Tags, run_checks
from django.core.management.base import BaseCommand, CommandError
from django.core.management.sql import (
emit_post_migrate_signal, emit_pre_migrate_signal,
)
from django.db import DEFAULT_DB_ALIAS, connections, router
from django.db.migrations.autodetector import MigrationAutodetector
from django.db.migrations.executor import MigrationExecutor
from django.db.migrations.loader import AmbiguityError
from django.db.migrations.state import ModelState, ProjectState
from django.utils.module_loading import module_has_submodule
class Command(BaseCommand):
help = "Updates database schema. Manages both apps with migrations and those without."
def add_arguments(self, parser):
parser.add_argument(
'app_label', nargs='?',
help='App label of an application to synchronize the state.',
)
parser.add_argument(
'migration_name', nargs='?',
help='Database state will be brought to the state after that '
'migration. Use the name "zero" to unapply all migrations.',
)
parser.add_argument(
'--noinput', '--no-input', action='store_false', dest='interactive',
help='Tells Django to NOT prompt the user for input of any kind.',
)
parser.add_argument(
'--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS,
help='Nominates a database to synchronize. Defaults to the "default" database.',
)
parser.add_argument(
'--fake', action='store_true', dest='fake',
help='Mark migrations as run without actually running them.',
)
parser.add_argument(
'--fake-initial', action='store_true', dest='fake_initial',
help='Detect if tables already exist and fake-apply initial migrations if so. Make sure '
'that the current database schema matches your initial migration before using this '
'flag. Django will only check for an existing table name.',
)
parser.add_argument(
'--run-syncdb', action='store_true', dest='run_syncdb',
help='Creates tables for apps without migrations.',
)
def _run_checks(self, **kwargs):
issues = run_checks(tags=[Tags.database])
issues.extend(super()._run_checks(**kwargs))
return issues
def handle(self, *args, **options):
self.verbosity = options['verbosity']
self.interactive = options['interactive']
# Import the 'management' module within each installed app, to register
# dispatcher events.
for app_config in apps.get_app_configs():
if module_has_submodule(app_config.module, "management"):
import_module('.management', app_config.name)
# Get the database we're operating from
db = options['database']
connection = connections[db]
# Hook for backends needing any database preparation
connection.prepare_database()
# Work out which apps have migrations and which do not
executor = MigrationExecutor(connection, self.migration_progress_callback)
# Raise an error if any migrations are applied before their dependencies.
executor.loader.check_consistent_history(connection)
# Before anything else, see if there's conflicting apps and drop out
# hard if there are any
conflicts = executor.loader.detect_conflicts()
if conflicts:
name_str = "; ".join(
"%s in %s" % (", ".join(names), app)
for app, names in conflicts.items()
)
raise CommandError(
"Conflicting migrations detected; multiple leaf nodes in the "
"migration graph: (%s).\nTo fix them run "
"'python manage.py makemigrations --merge'" % name_str
)
# If they supplied command line arguments, work out what they mean.
target_app_labels_only = True
if options['app_label'] and options['migration_name']:
app_label, migration_name = options['app_label'], options['migration_name']
if app_label not in executor.loader.migrated_apps:
raise CommandError(
"App '%s' does not have migrations." % app_label
)
if migration_name == "zero":
targets = [(app_label, None)]
else:
try:
migration = executor.loader.get_migration_by_prefix(app_label, migration_name)
except AmbiguityError:
raise CommandError(
"More than one migration matches '%s' in app '%s'. "
"Please be more specific." %
(migration_name, app_label)
)
except KeyError:
raise CommandError("Cannot find a migration matching '%s' from app '%s'." % (
migration_name, app_label))
targets = [(app_label, migration.name)]
target_app_labels_only = False
elif options['app_label']:
app_label = options['app_label']
if app_label not in executor.loader.migrated_apps:
raise CommandError(
"App '%s' does not have migrations." % app_label
)
targets = [key for key in executor.loader.graph.leaf_nodes() if key[0] == app_label]
else:
targets = executor.loader.graph.leaf_nodes()
plan = executor.migration_plan(targets)
run_syncdb = options['run_syncdb'] and executor.loader.unmigrated_apps
# Print some useful info
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Operations to perform:"))
if run_syncdb:
self.stdout.write(
self.style.MIGRATE_LABEL(" Synchronize unmigrated apps: ") +
(", ".join(sorted(executor.loader.unmigrated_apps)))
)
if target_app_labels_only:
self.stdout.write(
self.style.MIGRATE_LABEL(" Apply all migrations: ") +
(", ".join(sorted({a for a, n in targets})) or "(none)")
)
else:
if targets[0][1] is None:
self.stdout.write(self.style.MIGRATE_LABEL(
" Unapply all migrations: ") + "%s" % (targets[0][0],)
)
else:
self.stdout.write(self.style.MIGRATE_LABEL(
" Target specific migration: ") + "%s, from %s"
% (targets[0][1], targets[0][0])
)
pre_migrate_state = executor._create_project_state(with_applied_migrations=True)
pre_migrate_apps = pre_migrate_state.apps
emit_pre_migrate_signal(
self.verbosity, self.interactive, connection.alias, apps=pre_migrate_apps, plan=plan,
)
# Run the syncdb phase.
if run_syncdb:
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Synchronizing apps without migrations:"))
self.sync_apps(connection, executor.loader.unmigrated_apps)
# Migrate!
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Running migrations:"))
if not plan:
if self.verbosity >= 1:
self.stdout.write(" No migrations to apply.")
# If there's changes that aren't in migrations yet, tell them how to fix it.
autodetector = MigrationAutodetector(
executor.loader.project_state(),
ProjectState.from_apps(apps),
)
changes = autodetector.changes(graph=executor.loader.graph)
if changes:
self.stdout.write(self.style.NOTICE(
" Your models have changes that are not yet reflected "
"in a migration, and so won't be applied."
))
self.stdout.write(self.style.NOTICE(
" Run 'manage.py makemigrations' to make new "
"migrations, and then re-run 'manage.py migrate' to "
"apply them."
))
fake = False
fake_initial = False
else:
fake = options['fake']
fake_initial = options['fake_initial']
post_migrate_state = executor.migrate(
targets, plan=plan, state=pre_migrate_state.clone(), fake=fake,
fake_initial=fake_initial,
)
# post_migrate signals have access to all models. Ensure that all models
# are reloaded in case any are delayed.
post_migrate_state.clear_delayed_apps_cache()
post_migrate_apps = post_migrate_state.apps
# Re-render models of real apps to include relationships now that
# we've got a final state. This wouldn't be necessary if real apps
# models were rendered with relationships in the first place.
with post_migrate_apps.bulk_update():
model_keys = []
for model_state in post_migrate_apps.real_models:
model_key = model_state.app_label, model_state.name_lower
model_keys.append(model_key)
post_migrate_apps.unregister_model(*model_key)
post_migrate_apps.render_multiple([
ModelState.from_model(apps.get_model(*model)) for model in model_keys
])
# Send the post_migrate signal, so individual apps can do whatever they need
# to do at this point.
emit_post_migrate_signal(
self.verbosity, self.interactive, connection.alias, apps=post_migrate_apps, plan=plan,
)
def migration_progress_callback(self, action, migration=None, fake=False):
if self.verbosity >= 1:
compute_time = self.verbosity > 1
if action == "apply_start":
if compute_time:
self.start = time.time()
self.stdout.write(" Applying %s..." % migration, ending="")
self.stdout.flush()
elif action == "apply_success":
elapsed = " (%.3fs)" % (time.time() - self.start) if compute_time else ""
if fake:
self.stdout.write(self.style.SUCCESS(" FAKED" + elapsed))
else:
self.stdout.write(self.style.SUCCESS(" OK" + elapsed))
elif action == "unapply_start":
if compute_time:
self.start = time.time()
self.stdout.write(" Unapplying %s..." % migration, ending="")
self.stdout.flush()
elif action == "unapply_success":
elapsed = " (%.3fs)" % (time.time() - self.start) if compute_time else ""
if fake:
self.stdout.write(self.style.SUCCESS(" FAKED" + elapsed))
else:
self.stdout.write(self.style.SUCCESS(" OK" + elapsed))
elif action == "render_start":
if compute_time:
self.start = time.time()
self.stdout.write(" Rendering model states...", ending="")
self.stdout.flush()
elif action == "render_success":
elapsed = " (%.3fs)" % (time.time() - self.start) if compute_time else ""
self.stdout.write(self.style.SUCCESS(" DONE" + elapsed))
def sync_apps(self, connection, app_labels):
"""Run the old syncdb-style operation on a list of app_labels."""
with connection.cursor() as cursor:
tables = connection.introspection.table_names(cursor)
# Build the manifest of apps and models that are to be synchronized.
all_models = [
(
app_config.label,
router.get_migratable_models(app_config, connection.alias, include_auto_created=False),
)
for app_config in apps.get_app_configs()
if app_config.models_module is not None and app_config.label in app_labels
]
def model_installed(model):
opts = model._meta
converter = connection.introspection.table_name_converter
return not (
(converter(opts.db_table) in tables) or
(opts.auto_created and converter(opts.auto_created._meta.db_table) in tables)
)
manifest = OrderedDict(
(app_name, list(filter(model_installed, model_list)))
for app_name, model_list in all_models
)
# Create the tables for each model
if self.verbosity >= 1:
self.stdout.write(" Creating tables...\n")
with connection.schema_editor() as editor:
for app_name, model_list in manifest.items():
for model in model_list:
# Never install unmanaged models, etc.
if not model._meta.can_migrate(connection):
continue
if self.verbosity >= 3:
self.stdout.write(
" Processing %s.%s model\n" % (app_name, model._meta.object_name)
)
if self.verbosity >= 1:
self.stdout.write(" Creating table %s\n" % model._meta.db_table)
editor.create_model(model)
# Deferred SQL is executed when exiting the editor's context.
if self.verbosity >= 1:
self.stdout.write(" Running deferred SQL...\n")
```
#### File: tests/migrations/test_graph.py
```python
import warnings
from django.db.migrations.exceptions import (
CircularDependencyError, NodeNotFoundError,
)
from django.db.migrations.graph import (
RECURSION_DEPTH_WARNING, DummyNode, MigrationGraph, Node,
)
from django.test import SimpleTestCase
class GraphTests(SimpleTestCase):
"""
Tests the digraph structure.
"""
def test_simple_graph(self):
"""
Tests a basic dependency graph:
app_a: 0001 <-- 0002 <--- 0003 <-- 0004
/
app_b: 0001 <-- 0002 <-/
"""
# Build graph
graph = MigrationGraph()
graph.add_node(("app_a", "0001"), None)
graph.add_node(("app_a", "0002"), None)
graph.add_node(("app_a", "0003"), None)
graph.add_node(("app_a", "0004"), None)
graph.add_node(("app_b", "0001"), None)
graph.add_node(("app_b", "0002"), None)
graph.add_dependency("app_a.0004", ("app_a", "0004"), ("app_a", "0003"))
graph.add_dependency("app_a.0003", ("app_a", "0003"), ("app_a", "0002"))
graph.add_dependency("app_a.0002", ("app_a", "0002"), ("app_a", "0001"))
graph.add_dependency("app_a.0003", ("app_a", "0003"), ("app_b", "0002"))
graph.add_dependency("app_b.0002", ("app_b", "0002"), ("app_b", "0001"))
# Test root migration case
self.assertEqual(
graph.forwards_plan(("app_a", "0001")),
[('app_a', '0001')],
)
# Test branch B only
self.assertEqual(
graph.forwards_plan(("app_b", "0002")),
[("app_b", "0001"), ("app_b", "0002")],
)
# Test whole graph
self.assertEqual(
graph.forwards_plan(("app_a", "0004")),
[
('app_b', '0001'), ('app_b', '0002'), ('app_a', '0001'),
('app_a', '0002'), ('app_a', '0003'), ('app_a', '0004'),
],
)
# Test reverse to b:0002
self.assertEqual(
graph.backwards_plan(("app_b", "0002")),
[('app_a', '0004'), ('app_a', '0003'), ('app_b', '0002')],
)
# Test roots and leaves
self.assertEqual(
graph.root_nodes(),
[('app_a', '0001'), ('app_b', '0001')],
)
self.assertEqual(
graph.leaf_nodes(),
[('app_a', '0004'), ('app_b', '0002')],
)
def test_complex_graph(self):
r"""
Tests a complex dependency graph:
app_a: 0001 <-- 0002 <--- 0003 <-- 0004
\ \ / /
app_b: 0001 <-\ 0002 <-X /
\ \ /
app_c: \ 0001 <-- 0002 <-
"""
# Build graph
graph = MigrationGraph()
graph.add_node(("app_a", "0001"), None)
graph.add_node(("app_a", "0002"), None)
graph.add_node(("app_a", "0003"), None)
graph.add_node(("app_a", "0004"), None)
graph.add_node(("app_b", "0001"), None)
graph.add_node(("app_b", "0002"), None)
graph.add_node(("app_c", "0001"), None)
graph.add_node(("app_c", "0002"), None)
graph.add_dependency("app_a.0004", ("app_a", "0004"), ("app_a", "0003"))
graph.add_dependency("app_a.0003", ("app_a", "0003"), ("app_a", "0002"))
graph.add_dependency("app_a.0002", ("app_a", "0002"), ("app_a", "0001"))
graph.add_dependency("app_a.0003", ("app_a", "0003"), ("app_b", "0002"))
graph.add_dependency("app_b.0002", ("app_b", "0002"), ("app_b", "0001"))
graph.add_dependency("app_a.0004", ("app_a", "0004"), ("app_c", "0002"))
graph.add_dependency("app_c.0002", ("app_c", "0002"), ("app_c", "0001"))
graph.add_dependency("app_c.0001", ("app_c", "0001"), ("app_b", "0001"))
graph.add_dependency("app_c.0002", ("app_c", "0002"), ("app_a", "0002"))
# Test branch C only
self.assertEqual(
graph.forwards_plan(("app_c", "0002")),
[('app_b', '0001'), ('app_c', '0001'), ('app_a', '0001'), ('app_a', '0002'), ('app_c', '0002')],
)
# Test whole graph
self.assertEqual(
graph.forwards_plan(("app_a", "0004")),
[
('app_b', '0001'), ('app_c', '0001'), ('app_a', '0001'),
('app_a', '0002'), ('app_c', '0002'), ('app_b', '0002'),
('app_a', '0003'), ('app_a', '0004'),
],
)
# Test reverse to b:0001
self.assertEqual(
graph.backwards_plan(("app_b", "0001")),
[
('app_a', '0004'), ('app_c', '0002'), ('app_c', '0001'),
('app_a', '0003'), ('app_b', '0002'), ('app_b', '0001'),
],
)
# Test roots and leaves
self.assertEqual(
graph.root_nodes(),
[('app_a', '0001'), ('app_b', '0001'), ('app_c', '0001')],
)
self.assertEqual(
graph.leaf_nodes(),
[('app_a', '0004'), ('app_b', '0002'), ('app_c', '0002')],
)
def test_circular_graph(self):
"""
Tests a circular dependency graph.
"""
# Build graph
graph = MigrationGraph()
graph.add_node(("app_a", "0001"), None)
graph.add_node(("app_a", "0002"), None)
graph.add_node(("app_a", "0003"), None)
graph.add_node(("app_b", "0001"), None)
graph.add_node(("app_b", "0002"), None)
graph.add_dependency("app_a.0003", ("app_a", "0003"), ("app_a", "0002"))
graph.add_dependency("app_a.0002", ("app_a", "0002"), ("app_a", "0001"))
graph.add_dependency("app_a.0001", ("app_a", "0001"), ("app_b", "0002"))
graph.add_dependency("app_b.0002", ("app_b", "0002"), ("app_b", "0001"))
graph.add_dependency("app_b.0001", ("app_b", "0001"), ("app_a", "0003"))
# Test whole graph
with self.assertRaises(CircularDependencyError):
graph.forwards_plan(("app_a", "0003"))
def test_circular_graph_2(self):
graph = MigrationGraph()
graph.add_node(('A', '0001'), None)
graph.add_node(('C', '0001'), None)
graph.add_node(('B', '0001'), None)
graph.add_dependency('A.0001', ('A', '0001'), ('B', '0001'))
graph.add_dependency('B.0001', ('B', '0001'), ('A', '0001'))
graph.add_dependency('C.0001', ('C', '0001'), ('B', '0001'))
with self.assertRaises(CircularDependencyError):
graph.forwards_plan(('C', '0001'))
def test_graph_recursive(self):
graph = MigrationGraph()
root = ("app_a", "1")
graph.add_node(root, None)
expected = [root]
for i in range(2, 750):
parent = ("app_a", str(i - 1))
child = ("app_a", str(i))
graph.add_node(child, None)
graph.add_dependency(str(i), child, parent)
expected.append(child)
leaf = expected[-1]
forwards_plan = graph.forwards_plan(leaf)
self.assertEqual(expected, forwards_plan)
backwards_plan = graph.backwards_plan(root)
self.assertEqual(expected[::-1], backwards_plan)
def test_graph_iterative(self):
graph = MigrationGraph()
root = ("app_a", "1")
graph.add_node(root, None)
expected = [root]
for i in range(2, 1000):
parent = ("app_a", str(i - 1))
child = ("app_a", str(i))
graph.add_node(child, None)
graph.add_dependency(str(i), child, parent)
expected.append(child)
leaf = expected[-1]
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', RuntimeWarning)
forwards_plan = graph.forwards_plan(leaf)
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, RuntimeWarning))
self.assertEqual(str(w[-1].message), RECURSION_DEPTH_WARNING)
self.assertEqual(expected, forwards_plan)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', RuntimeWarning)
backwards_plan = graph.backwards_plan(root)
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, RuntimeWarning))
self.assertEqual(str(w[-1].message), RECURSION_DEPTH_WARNING)
self.assertEqual(expected[::-1], backwards_plan)
def test_plan_invalid_node(self):
"""
Tests for forwards/backwards_plan of nonexistent node.
"""
graph = MigrationGraph()
message = "Node ('app_b', '0001') not a valid node"
with self.assertRaisesMessage(NodeNotFoundError, message):
graph.forwards_plan(("app_b", "0001"))
with self.assertRaisesMessage(NodeNotFoundError, message):
graph.backwards_plan(("app_b", "0001"))
def test_missing_parent_nodes(self):
"""
Tests for missing parent nodes.
"""
# Build graph
graph = MigrationGraph()
graph.add_node(("app_a", "0001"), None)
graph.add_node(("app_a", "0002"), None)
graph.add_node(("app_a", "0003"), None)
graph.add_node(("app_b", "0001"), None)
graph.add_dependency("app_a.0003", ("app_a", "0003"), ("app_a", "0002"))
graph.add_dependency("app_a.0002", ("app_a", "0002"), ("app_a", "0001"))
msg = "Migration app_a.0001 dependencies reference nonexistent parent node ('app_b', '0002')"
with self.assertRaisesMessage(NodeNotFoundError, msg):
graph.add_dependency("app_a.0001", ("app_a", "0001"), ("app_b", "0002"))
def test_missing_child_nodes(self):
"""
Tests for missing child nodes.
"""
# Build graph
graph = MigrationGraph()
graph.add_node(("app_a", "0001"), None)
msg = "Migration app_a.0002 dependencies reference nonexistent child node ('app_a', '0002')"
with self.assertRaisesMessage(NodeNotFoundError, msg):
graph.add_dependency("app_a.0002", ("app_a", "0002"), ("app_a", "0001"))
def test_validate_consistency(self):
"""
Tests for missing nodes, using `validate_consistency()` to raise the error.
"""
# Build graph
graph = MigrationGraph()
graph.add_node(("app_a", "0001"), None)
# Add dependency with missing parent node (skipping validation).
graph.add_dependency("app_a.0001", ("app_a", "0001"), ("app_b", "0002"), skip_validation=True)
msg = "Migration app_a.0001 dependencies reference nonexistent parent node ('app_b', '0002')"
with self.assertRaisesMessage(NodeNotFoundError, msg):
graph.validate_consistency()
# Add missing parent node and ensure `validate_consistency()` no longer raises error.
graph.add_node(("app_b", "0002"), None)
graph.validate_consistency()
# Add dependency with missing child node (skipping validation).
graph.add_dependency("app_a.0002", ("app_a", "0002"), ("app_a", "0001"), skip_validation=True)
msg = "Migration app_a.0002 dependencies reference nonexistent child node ('app_a', '0002')"
with self.assertRaisesMessage(NodeNotFoundError, msg):
graph.validate_consistency()
# Add missing child node and ensure `validate_consistency()` no longer raises error.
graph.add_node(("app_a", "0002"), None)
graph.validate_consistency()
# Rawly add dummy node.
msg = "app_a.0001 (req'd by app_a.0002) is missing!"
graph.add_dummy_node(
key=("app_a", "0001"),
origin="app_a.0002",
error_message=msg
)
with self.assertRaisesMessage(NodeNotFoundError, msg):
graph.validate_consistency()
def test_remove_replaced_nodes(self):
"""
Replaced nodes are properly removed and dependencies remapped.
"""
# Add some dummy nodes to be replaced.
graph = MigrationGraph()
graph.add_dummy_node(key=("app_a", "0001"), origin="app_a.0002", error_message="BAD!")
graph.add_dummy_node(key=("app_a", "0002"), origin="app_b.0001", error_message="BAD!")
graph.add_dependency("app_a.0002", ("app_a", "0002"), ("app_a", "0001"), skip_validation=True)
# Add some normal parent and child nodes to test dependency remapping.
graph.add_node(("app_c", "0001"), None)
graph.add_node(("app_b", "0001"), None)
graph.add_dependency("app_a.0001", ("app_a", "0001"), ("app_c", "0001"), skip_validation=True)
graph.add_dependency("app_b.0001", ("app_b", "0001"), ("app_a", "0002"), skip_validation=True)
# Try replacing before replacement node exists.
msg = (
"Unable to find replacement node ('app_a', '0001_squashed_0002'). It was either"
" never added to the migration graph, or has been removed."
)
with self.assertRaisesMessage(NodeNotFoundError, msg):
graph.remove_replaced_nodes(
replacement=("app_a", "0001_squashed_0002"),
replaced=[("app_a", "0001"), ("app_a", "0002")]
)
graph.add_node(("app_a", "0001_squashed_0002"), None)
# Ensure `validate_consistency()` still raises an error at this stage.
with self.assertRaisesMessage(NodeNotFoundError, "BAD!"):
graph.validate_consistency()
# Remove the dummy nodes.
graph.remove_replaced_nodes(
replacement=("app_a", "0001_squashed_0002"),
replaced=[("app_a", "0001"), ("app_a", "0002")]
)
# Ensure graph is now consistent and dependencies have been remapped
graph.validate_consistency()
parent_node = graph.node_map[("app_c", "0001")]
replacement_node = graph.node_map[("app_a", "0001_squashed_0002")]
child_node = graph.node_map[("app_b", "0001")]
self.assertIn(parent_node, replacement_node.parents)
self.assertIn(replacement_node, parent_node.children)
self.assertIn(child_node, replacement_node.children)
self.assertIn(replacement_node, child_node.parents)
def test_remove_replacement_node(self):
"""
A replacement node is properly removed and child dependencies remapped.
We assume parent dependencies are already correct.
"""
# Add some dummy nodes to be replaced.
graph = MigrationGraph()
graph.add_node(("app_a", "0001"), None)
graph.add_node(("app_a", "0002"), None)
graph.add_dependency("app_a.0002", ("app_a", "0002"), ("app_a", "0001"))
# Try removing replacement node before replacement node exists.
msg = (
"Unable to remove replacement node ('app_a', '0001_squashed_0002'). It was"
" either never added to the migration graph, or has been removed already."
)
with self.assertRaisesMessage(NodeNotFoundError, msg):
graph.remove_replacement_node(
replacement=("app_a", "0001_squashed_0002"),
replaced=[("app_a", "0001"), ("app_a", "0002")]
)
graph.add_node(("app_a", "0001_squashed_0002"), None)
# Add a child node to test dependency remapping.
graph.add_node(("app_b", "0001"), None)
graph.add_dependency("app_b.0001", ("app_b", "0001"), ("app_a", "0001_squashed_0002"))
# Remove the replacement node.
graph.remove_replacement_node(
replacement=("app_a", "0001_squashed_0002"),
replaced=[("app_a", "0001"), ("app_a", "0002")]
)
# Ensure graph is consistent and child dependency has been remapped
graph.validate_consistency()
replaced_node = graph.node_map[("app_a", "0002")]
child_node = graph.node_map[("app_b", "0001")]
self.assertIn(child_node, replaced_node.children)
self.assertIn(replaced_node, child_node.parents)
# Ensure child dependency hasn't also gotten remapped to the other replaced node.
other_replaced_node = graph.node_map[("app_a", "0001")]
self.assertNotIn(child_node, other_replaced_node.children)
self.assertNotIn(other_replaced_node, child_node.parents)
def test_infinite_loop(self):
"""
Tests a complex dependency graph:
app_a: 0001 <-
\
app_b: 0001 <- x 0002 <-
/ \
app_c: 0001<- <------------- x 0002
And apply squashing on app_c.
"""
graph = MigrationGraph()
graph.add_node(("app_a", "0001"), None)
graph.add_node(("app_b", "0001"), None)
graph.add_node(("app_b", "0002"), None)
graph.add_node(("app_c", "0001_squashed_0002"), None)
graph.add_dependency("app_b.0001", ("app_b", "0001"), ("app_c", "0001_squashed_0002"))
graph.add_dependency("app_b.0002", ("app_b", "0002"), ("app_a", "0001"))
graph.add_dependency("app_b.0002", ("app_b", "0002"), ("app_b", "0001"))
graph.add_dependency("app_c.0001_squashed_0002", ("app_c", "0001_squashed_0002"), ("app_b", "0002"))
with self.assertRaises(CircularDependencyError):
graph.forwards_plan(("app_c", "0001_squashed_0002"))
def test_stringify(self):
graph = MigrationGraph()
self.assertEqual(str(graph), "Graph: 0 nodes, 0 edges")
graph.add_node(("app_a", "0001"), None)
graph.add_node(("app_a", "0002"), None)
graph.add_node(("app_a", "0003"), None)
graph.add_node(("app_b", "0001"), None)
graph.add_node(("app_b", "0002"), None)
graph.add_dependency("app_a.0002", ("app_a", "0002"), ("app_a", "0001"))
graph.add_dependency("app_a.0003", ("app_a", "0003"), ("app_a", "0002"))
graph.add_dependency("app_a.0003", ("app_a", "0003"), ("app_b", "0002"))
self.assertEqual(str(graph), "Graph: 5 nodes, 3 edges")
self.assertEqual(repr(graph), "<MigrationGraph: nodes=5, edges=3>")
class NodeTests(SimpleTestCase):
def test_node_repr(self):
node = Node(('app_a', '0001'))
self.assertEqual(repr(node), "<Node: ('app_a', '0001')>")
def test_dummynode_repr(self):
node = DummyNode(
key=('app_a', '0001'),
origin='app_a.0001',
error_message='x is missing',
)
self.assertEqual(repr(node), "<DummyNode: ('app_a', '0001')>")
def test_dummynode_promote(self):
dummy = DummyNode(
key=('app_a', '0001'),
origin='app_a.0002',
error_message="app_a.0001 (req'd by app_a.0002) is missing!",
)
dummy.promote()
self.assertIsInstance(dummy, Node)
self.assertFalse(hasattr(dummy, 'origin'))
self.assertFalse(hasattr(dummy, 'error_message'))
``` |
{
"source": "jmcelve2/cheshire",
"score": 3
} |
#### File: cheshire/cheshire/Grid.py
```python
import numpy as np
class Grid1D(object):
"""
The physical grid for 1D Potentials.
Attributes:
**n_x (int)**: The number of grid points along the x axis. This number must be a multiple of 2.
**n_e (int)**: The number of electrons on the grid.
**x_min (float)**: The minimum physical distance (in a.u.) on the potential grid along the x axis.
**x_max (float)**: The maximum physical distance (in a.u.) on the potential grid along the x axis.
**x (numpy.array)**: A grid of distances (in a.u.) from the center of the grid along the x axis.
"""
def __init__(self, n_x=128, n_e=2, x_min=-20, x_max=20):
"""
Grid1D constructor.
Args:
**n_x (int)**: The number of grid points along the x axis. This number must be a multiple of 2. Default is
128.
**n_e (int)**: The number of electrons on the grid.
**x_min (float)**: The minimum physical distance (in a.u.) on the potential grid along the x axis. Default
is -20.
**x_max (float)**: The maximum physical distance (in a.u.) on the potential grid along the x axis. Default
is 20.
"""
if not ((n_x & (n_x - 1)) == 0) and n_x != 0:
raise AssertionError('n_x must be must be an integer power of 2.')
assert n_e == 2
assert x_min < x_max
self.n_x = n_x
self.n_e = n_e
self.x_min = x_min
self.x_max = x_max
self.x = np.linspace(start=x_min, stop=x_max, num=n_x)
self.params = dict(n_x=n_x, n_e=n_e, x_min=x_min, x_max=x_max, x=self.x)
def rescale_by_size(self, val):
"""
Rescale parameters by the pixel size of the grid.
"""
return val * self.x_max / 20
def rescale_by_grid(self, val):
"""
Rescale parameters by the physical size of the grid.
"""
return val * self.n_x / 256
class Grid2D(object):
"""
The physical grid for 2D Potentials.
Attributes:
**n_x (int)**: The number of grid points along the x axis. This number must be a multiple of 2.
**n_y (int)**: The number of grid points along the y axis. This number must be a multiple of 2.
**n_e (int)**: The number of electrons on the grid.
**x_min (float)**: The minimum physical distance (in a.u.) on the potential grid along the x axis.
**x_max (float)**: The maximum physical distance (in a.u.) on the potential grid along the x axis.
**y_min (float)**: The minimum physical distance (in a.u.) on the potential grid along the y axis.
**y_max (float)**: The maximum physical distance (in a.u.) on the potential grid along the y axis.
**x (numpy.array)**: A grid of distances (in a.u.) from the center of the grid along the x axis.
**y (numpy.array)**: A grid of distances (in a.u.) from the center of the grid along the y axis.
"""
def __init__(self, n_x=128, n_y=128, n_e=1, x_min=-20, x_max=20, y_min=-20, y_max=20):
"""
Grid2D constructor.
Args:
**n_x (int)**: The number of grid points along the x axis. This number must be a multiple of 2. Default is
128.
**n_y (int)**: The number of grid points along the y axis. This number must be a multiple of 2. Default is
128.
**n_e (int)**: The number of electrons on the grid.
**x_min (float)**: The minimum physical distance (in a.u.) on the potential grid along the x axis. Default
is -20.
**x_max (float)**: The maximum physical distance (in a.u.) on the potential grid along the x axis. Default
is 20.
**y_min (float)**: The minimum physical distance (in a.u.) on the potential grid along the y axis. Default
is -20.
**y_max (float)**: The maximum physical distance (in a.u.) on the potential grid along the y axis. Default
is 20.
"""
if not ((n_x & (n_x - 1)) == 0) and n_x != 0:
raise AssertionError('n_x must be must be an integer power of 2.')
if not ((n_y & (n_y - 1)) == 0) and n_y != 0:
raise AssertionError('n_y must be must be an integer power of 2.')
assert n_e == 1
assert n_x == n_y
assert x_min < x_max
assert y_min < y_max
assert (x_max - x_min) == (y_max - y_min)
self.n_x = n_x
self.n_y = n_y
self.n_e = n_e
self.x_min = x_min
self.x_max = x_max
self.y_min = y_min
self.y_max = y_max
self.x = np.tile(np.linspace(start=x_min, stop=x_max, num=n_x), (n_y, 1))
self.y = -np.transpose(np.tile(np.linspace(start=y_min, stop=y_max, num=n_y), (n_x, 1)))
self.params = dict(n_x=n_x, n_y=n_y, n_e=n_e, x_min=x_min, x_max=x_max, x=self.x, y_min=y_min, y_max=y_max,
y=self.y)
def rescale_by_size(self, val):
"""
Rescale parameters by the pixel size of the grid.
"""
return val * self.x_max / 20
def rescale_by_grid(self, val):
"""
Rescale parameters by the physical size of the grid.
"""
return val * self.n_x / 256
```
#### File: cheshire/cheshire/ParamSampler.py
```python
import math
import numpy as np
from cheshire.Grid import *
class InfiniteWell1DSampler(Grid1D):
"""
Class that generates parameters for the 1D infinite well.
"""
def sample_params(self, min_l=5, max_l=23, min_c=-8, max_c=8):
"""
Random parameter method that generates parameters for infinite
square wells.
Args:
**min_l (float)**: The minimum possible allowed length for the well size. Default is 5.
**max_l (float)**: The maximum possible allowed length for the well size. Default is 23.
**min_c (float)**: The minimum possible center point for the well. Default is -8.
**max_c (float)**: The maximum possible center point for the well. Default is 8.
Returns:
A dictionary of randomly generated infinite well parameters.
"""
assert min_l <= max_l
assert min_c <= max_c
# Rescale arguments based on the size of the grid
min_l = self.rescale_by_size(min_l)
max_l = self.rescale_by_size(max_l)
min_c = self.rescale_by_size(min_c)
max_c = self.rescale_by_size(max_c)
l_x = np.random.uniform(low=min_l*self.x_max/20, high=max_l*self.x_max/20)
c_x = np.random.uniform(low=min_c, high=max_c)
return dict(l_x=l_x, c_x=c_x)
class InfiniteWell2DSampler(Grid2D):
"""
Class that generates parameters for the 2D infinite well.
"""
def sample_params(self, min_l=4, max_l=15, min_c=-8, max_c=8):
"""
Random parameter method that generates parameters for 2D infinite square wells.
Args:
**min_l (float)**: The minimum possible allowed length for the well size. Default is 4.
**max_l (float)**: The maximum possible allowed length for the well size. Default is 15.
**min_c (float)**: The minimum possible center point for the well. Default is -8.
**max_c (float)**: The maximum possible center point for the well. Default is 8.
Returns:
A dictionary of randomly generated infinite well parameters.
"""
assert min_l <= max_l
assert min_c <= max_c
# Rescale arguments based on the size of the grid
min_l = self.rescale_by_size(min_l)
max_l = self.rescale_by_size(max_l)
min_c = self.rescale_by_size(min_c)
max_c = self.rescale_by_size(max_c)
while True:
l_x, l_y = self.__sample__(min_l=min_l, max_l=max_l)
if (min_l <= l_x) & \
(l_x <= max_l) & \
(min_l <= l_y) & \
(l_y <= max_l):
break
switch = np.random.uniform(low=0, high=1)
if switch > 0.5:
temp = l_x
l_x = l_y
l_y = temp
l_x = l_x
l_y = l_y
c_x = np.random.uniform(low=min_c, high=max_c)
c_y = np.random.uniform(low=min_c, high=max_c)
return dict(l_x=l_x, l_y=l_y, c_x=c_x, c_y=c_y)
def __sample__(self, min_l, max_l):
"""
Sample parameters. Sampling from the energy first ensures that the desired
energy range is obtained
"""
energy = np.random.uniform(low=0, high=0.4)
l_x = np.random.uniform(low=min_l*self.x_max/20, high=max_l*self.x_max/20)
if 2*energy/math.pi**2 <= 1/(l_x**2):
l_y = np.nan
elif 2*energy/math.pi**2 <= 1/(l_x**2) > max_l:
l_y = np.nan
elif 2*energy/math.pi**2 <= 1/(l_x**2) < min_l:
l_y = np.nan
else:
l_y = 1 / np.sqrt((2*energy)/(math.pi**2) - 1/(l_x**2))
return l_x, l_y
class SimpleHarmonicOscillator2DSampler(Grid2D):
"""
Class that generates parameters for the 2D simple harmonic oscillator.
"""
def sample_params(self, min_kx=0, max_kx=0.16, min_ky=0, max_ky=0.16, min_cx=-8, max_cx=8, min_cy=-8, max_cy=8):
"""
Random parameter method that generates parameters for simple harmonic oscillator potentials.
Args:
**min_kx (float)**: The minimum possible value for the well width along the x axis. Default is 0.
**max_kx (float)**: The maximum possible value for the well width along the x axis. Default is 0.16.
**min_ky (float)**: The minimum possible value for the well width along the y axis. Default is 0.
**max_ky (float)**: The maximum possible value for the well width along the y axis. Default is 0.16.
**min_cx (float)**: The minimum possible value for the center (in a.u.) of the simple harmonic oscillator
along the x axis. Default is -8.
**max_cx (float)**: The maximum possible value for the center (in a.u.) of the simple harmonic oscillator
along the x axis. Default is 8.
**min_cy (float)**: The minimum possible value for the center (in a.u.) of the simple harmonic oscillator
along the y axis. Default is -8.
**max_cy (float)**: The maximum possible value for the center (in a.u.) of the simple harmonic oscillator
along the y axis. Default is 8.
Returns:
A dictionary of randomly generated simple harmonic oscillator parameters.
"""
assert min_kx <= max_kx
assert min_ky <= max_ky
assert min_cx <= max_cx
assert min_cy <= max_cy
# Rescale arguments
min_kx = self.rescale_by_size(min_kx)
max_kx = self.rescale_by_size(max_kx)
min_ky = self.rescale_by_size(min_ky)
max_ky = self.rescale_by_size(max_ky)
min_cx = self.rescale_by_size(min_cx)
max_cx = self.rescale_by_size(max_cx)
min_cy = self.rescale_by_size(min_cy)
max_cy = self.rescale_by_size(max_cy)
# Sample parameters
k_x = np.random.uniform(low=min_kx, high=max_kx)
k_y = np.random.uniform(low=min_ky, high=max_ky)
c_x = np.random.uniform(low=min_cx, high=max_cx)
c_y = np.random.uniform(low=min_cy, high=max_cy)
return dict(k_x=k_x, k_y=k_y, c_x=c_x, c_y=c_y)
class DoubleInvertedGaussian2DSampler(Grid2D):
"""
Class that generates parameters for the 2D double inverted Gaussian.
"""
def sample_params(self, min_a1=2, max_a1=4, min_a2=2, max_a2=4, min_cx1=-8, max_cx1=8, min_cy1=-8, max_cy1=8,
min_cx2=-8, max_cx2=8, min_cy2=-8, max_cy2=8, min_kx1=1.6, max_kx1=8, min_ky1=1.6, max_ky1=8,
min_kx2=1.6, max_kx2=8, min_ky2=1.6, max_ky2=8):
"""
Random parameter method that generates parameters for double inverted Gaussian potentials.
Args:
**min_a1 (float)**: The minimum possible value of the amplitude of the first Gaussian. Default is 2. Units
are in Hartree energy.
**max_a1 (float)**: The maximum possible value of the amplitude of the first Gaussian. Default is 4. Units
are in Hartree energy.
**min_a2 (float)**: The minimum possible value of the amplitude of the second Gaussian. Default is 2. Units
are in Hartree energy.
**max_a2 (float)**: The maximum possible value of the amplitude of the second Gaussian. Default is 4. Units
are in Hartree energy.
**min_cx1 (float)**: The minimum possible position of the center along the x axis of the first Gaussian.
Default is -8 a.u.
**max_cx1 (float)**: The maximum possible position of the center along the x axis of the first Gaussian.
Default is 8 a.u.
**min_cy1 (float)**: The minimum possible position of the center along the y axis of the first Gaussian.
Default is -8 a.u.
**max_cy1 (float)**: The maximum possible position of the center along the y axis of the first Gaussian.
Default is 8 a.u.
**min_cx2 (float)**: The minimum possible position of the center along the x axis of the second Gaussian.
Default is -8 a.u.
**max_cx2 (float)**: The maximum possible position of the center along the x axis of the second Gaussian.
Default is 8 a.u.
**min_cy2 (float)**: The minimum possible position of the center along the y axis of the second Gaussian.
Default is -8 a.u.
**max_cy2 (float)**: The maximum possible position of the center along the y axis of the second Gaussian.
Default is 8 a.u.
**min_kx1 (float)**: The minimum possible value for the constant that determines the width along the x axis
of the first Gaussian. Default is 1.6.
**max_kx1 (float)**: The maximum possible value for the constant that determines the width along the x axis
of the first Gaussian. Default is 8.
**min_ky1 (float)**: The minimum possible value for the constant that determines the width along the y axis
of the first Gaussian. Default is 1.6.
**max_ky1 (float)**: The maximum possible value for the constant that determines the width along the y axis
of the first Gaussian. Default is 8.
**min_kx2 (float)**: The minimum possible value for the constant that determines the width along the x axis
of the second Gaussian. Default is 1.6.
**max_kx2 (float)**: The maximum possible value for the constant that determines the width along the x axis
of the second Gaussian. Default is 8.
**min_ky2 (float)**: The minimum possible value for the constant that determines the width along the y axis
of the second Gaussian. Default is 1.6.
**max_ky2 (float)**: The maximum possible value for the constant that determines the width along the y axis
of the second Gaussian. Default is 8.
Returns:
A dictionary of randomly generated double inverted Gaussian parameters.
"""
assert min_a1 > 0
assert min_a2 > 0
assert min_a1 <= max_a1
assert min_a2 <= max_a2
assert min_cx1 <= max_cx1
assert min_cy1 <= max_cy1
assert min_cx2 <= max_cx2
assert min_cy2 <= max_cy2
assert min_kx1 <= max_kx1
assert min_ky1 <= max_ky1
assert min_kx2 <= max_kx2
assert min_ky2 <= max_ky2
# Rescale arguments
min_cx1 = self.rescale_by_size(min_cx1)
min_cy1 = self.rescale_by_size(min_cy1)
min_cx2 = self.rescale_by_size(min_cx2)
min_cy2 = self.rescale_by_size(min_cy2)
min_kx1 = self.rescale_by_size(min_kx1)
min_ky1 = self.rescale_by_size(min_ky1)
min_kx2 = self.rescale_by_size(min_kx2)
min_ky2 = self.rescale_by_size(min_ky2)
# Sample parameters
a1 = np.random.uniform(low=min_a1, high=max_a1)
a2 = np.random.uniform(low=min_a2, high=max_a2)
c_x1 = np.random.uniform(low=min_cx1, high=max_cx1)
c_y1 = np.random.uniform(low=min_cy1, high=max_cy1)
c_x2 = np.random.uniform(low=min_cx2, high=max_cx2)
c_y2 = np.random.uniform(low=min_cy2, high=max_cy2)
k_x1 = np.random.uniform(low=min_kx1, high=max_kx1)
k_y1 = np.random.uniform(low=min_ky1, high=max_ky1)
k_x2 = np.random.uniform(low=min_kx2, high=max_kx2)
k_y2 = np.random.uniform(low=min_ky2, high=max_ky2)
return dict(a1=a1, a2=a2, c_x1=c_x1, c_y1=c_y1, c_x2=c_x2, c_y2=c_y2, k_x1=k_x1, k_y1=k_y1, k_x2=k_x2,
k_y2=k_y2)
class Random2DSampler(Grid2D):
"""
Class that generates parameters for a random 2D potential.
"""
def sample_params(self, min_k=2, max_k=7, min_r=80, max_r=180, min_sig1=6, max_sig1=10, min_sig2=10, max_sig2=16,
p_range=[0.5, 1.0, 1.5, 2.0]):
"""
Random parameter method that generates parameters for random potentials.
Args:
**min_k (int)**: Determines the low end of the number of points used to create the convex hull. Default is
2.
**max_k (int)**: Determines the high end of the number of points used to create the convex hull. Default is
7.
**min_r (float)**: The minimum possible resolution size of the random blob. Default is 80 pixels (scaled
for 256 x 256).
**max_r (float)**: The maximum possible resolution size of the random blob. Default is 180 pixels (scaled
for 256 x 256).
**min_sig1 (float)**: The minimum possible variance of the first Gaussian blur. Default is 6.
**max_sig1 (float)**: The maximum possible variance of the first Gaussian blur. Default is 10.
**min_sig2 (float)**: The minimum possible variance of the second Gaussian blur. Default is 10.
**max_sig2 (float)**: The maximum possible variance of the second Gaussian blur. Default is 16.
**p_range (list)**: The range of exponents to sample from when exponentiating the potential for contrasting.
Default is [1, 2].
Returns:
A dictionary of randomly generated random potential parameters.
"""
assert min_k <= max_k
assert min_r <= max_r
assert min_sig1 <= max_sig1
assert min_sig1 > 0
assert min_sig2 <= max_sig2
assert min_sig2 > 0
assert all([isinstance(i, float) for i in p_range])
# Rescale arguments
min_r = self.rescale_by_grid(min_r)
max_r = self.rescale_by_grid(max_r)
min_sig1 = self.rescale_by_grid(min_sig1)
max_sig1 = self.rescale_by_grid(max_sig1)
min_sig2 = self.rescale_by_grid(min_sig2)
max_sig2 = self.rescale_by_grid(max_sig2)
# Sample parameters
k = np.random.randint(low=min_k, high=max_k+1)
r = np.random.uniform(low=min_r, high=max_r)
sig1 = np.random.uniform(low=min_sig1, high=max_sig1)
sig2 = np.random.uniform(low=min_sig2, high=max_sig2)
p = np.random.choice(p_range)
return dict(k=k, r=r, sig1=sig1, sig2=sig2, p=p)
class Coulomb2DSampler(Grid2D):
"""
Class that generates parameters for the 2D Coulomb potential.
"""
def sample_params(self, min_z=1, max_z=118, min_cx=-8, max_cx=8, min_cy=-8, max_cy=8, alpha=10**-9):
"""
Random parameter method that generates parameters for Coulomb potentials.
Args:
**min_z (int)**: The minimum number of allowable protons.
**max_z (int)**: The maximum number of allowable protons.
**min_cx (float)**: The minimum possible value for the center (in a.u.) of the Coulomb potential along the x
axis. Default is -8.
**max_cx (float)**: The maximum possible value for the center (in a.u.) of the Coulomb potential along the x
axis. Default is 8.
**min_cy (float)**: The minimum possible value for the center (in a.u.) of the Coulomb potential along the y
axis. Default is -8.
**max_cy (float)**: The maximum possible value for the center (in a.u.) of the Coulomb potential along the y
axis. Default is 8.
Returns:
A dictionary of randomly generated Coulomb potential parameters.
"""
assert min_z <= max_z
assert min_z >= 1
# Sample parameters
z = np.random.randint(low=min_z, high=max_z)
c_x = np.random.uniform(low=min_cx, high=max_cx)
c_y = np.random.uniform(low=min_cy, high=max_cy)
alpha = alpha
return dict(z=z, c_x=c_x, c_y=c_y, alpha=alpha)
```
#### File: cheshire/cheshire/Potential.py
```python
import numpy as np
from scipy import ndimage
from scipy.interpolate import spline
from scipy.ndimage.filters import gaussian_filter
from scipy.spatial import ConvexHull
from scipy.spatial import Delaunay
from cheshire.Grid import *
class Potential(object):
"""
A potential object containing information about the potential.
"""
def __init__(self, params):
"""
A potential object containing information about the potential.
Args:
**params (dict)**: A dictionary of values specifying the type of potential to create.
"""
if not "potential" in params.keys():
raise AssertionError("The params dictionary must contain a potential grid.")
assert isinstance(params["potential"], np.ndarray)
for key in params:
setattr(self, key, params[key])
class InfiniteWell1D(Grid1D):
"""
One dimensional infinite well potential.
"""
def create(self, c_x=0, l_x=7):
"""
Generate a Potential object with a 1D infinite well potential.
Args:
**c_x (float)**: The center of the infinite well along the x axis (in a.u.). Default is 0.
**l_x (float)**: The length of the infinite well along the x axis (in a.u.). Default is 7.
Returns:
An object of class Potential with a potential attribute and attributes corresponding to the parameters used
to generate the potential.
"""
assert self.x_min < c_x-0.5*l_x
assert c_x+0.5*l_x < self.x_max
v = np.zeros(shape=(1, self.n_x))[0]
mask = (c_x-0.5*l_x < self.x) & (self.x <= c_x+0.5*l_x)
v[~mask] = 20
params = dict(**self.params, **dict(potential=v, c_x=c_x, l_x=l_x))
return Potential(params=params)
class Coulomb1D(Grid1D):
"""
One dimensional Coulomb potential.
"""
def create(self, c_x=0, z=1):
"""
Generate a Potential object with a 1D Coulomb potential.
Args:
**c_x (float)**: The center of the Coulomb potential along the x axis (in a.u.). Default is 0.
**z (int)**: The number of protons determining the strength of the Coulomb attraction. Default is 1.
Returns:
An object of class Potential with a potential attribute and attributes corresponding to the parameters used
to generate the potential.
"""
assert isinstance(z, int)
assert z >= 1
assert (c_x > self.x_min) and (c_x < self.x_max)
v = z/np.sqrt((c_x-self.x)**2+alpha**2)
v = np.max(v)-v
params = dict(**self.params, **dict(potential=v, z=z, c_x=c_x, alpha=alpha))
return Potential(params=params)
class InfiniteWell2D(Grid2D):
"""
Two dimensional infinite well potential.
"""
def create(self, c_x=0, c_y=0, l_x=7, l_y=7):
"""
Generate a Potential object with a 2D infinite well potential.
Args:
**c_x (float)**: The center of the infinite well along the x axis (in a.u.). Default is 0.
**c_y (float)**: The center of the infinite well along the y axis (in a.u.). Default is 0.
**l_x (float)**: The length of the infinite well along the x axis (in a.u.). Default is 7.
**l_y (float)**: The length of the infinite well along the y axis (in a.u.). Default is 7.
Returns:
An object of class Potential with a potential attribute and attributes corresponding to the parameters used
to generate the potential.
"""
assert (c_x > self.x_min) & (c_x < self.x_max)
assert (c_y > self.y_min) & (c_y < self.y_max)
assert (l_x > self.x_min) & (l_x < self.x_max)
assert (l_y > self.y_min) & (l_y < self.y_max)
v = np.zeros(shape=(self.n_y, self.n_x))
mask = (c_x-0.5*l_x < self.x) & (self.x <= c_x+0.5*l_x) & \
(c_y-0.5*l_y < self.y) & (self.y <= c_y+0.5*l_y)
v[~mask] = 20
params = dict(**self.params, **dict(potential=v, c_x=c_x, c_y=c_y, l_x=l_x, l_y=l_y))
return Potential(params=params)
class SimpleHarmonicOscillator2D(Grid2D):
"""
Two dimensional simple harmonic oscillator potential.
"""
def create(self, c_x=0, c_y=0, k_x=2, k_y=2):
"""
Generate a Potential object with a 2D simple harmonic oscillator potential.
Args:
**c_x (float)**: The center of the simple harmonic oscillator along the x axis (in a.u.). Default is 0.
**c_y (float)**: The center of the simple harmonic oscillator along the y axis (in a.u.). Default is 0.
**k_x (float)**: The constant that determines the width of the harmonic oscillator potential along the x
axis. Default is 2.
**k_y (float)**: The constant that determines the width of the harmonic oscillator potential along the y
axis. Default is 2.
Returns:
An object of class Potential with a potential attribute and attributes corresponding to the parameters used
to generate the potential.
"""
assert (c_x > self.x_min) & (c_x < self.x_max)
assert (c_y > self.y_min) & (c_y < self.y_max)
assert k_x > 0
assert k_y > 0
v = .5*(k_x*(self.x-c_x)**2+k_y*(self.y-c_y)**2)
v[v > 20] = 20
params = dict(**self.params, **dict(potential=v, c_x=c_x, c_y=c_y, k_x=k_x, k_y=k_y))
return Potential(params=params)
class DoubleInvertedGaussian2D(Grid2D):
"""
Two dimensional infinite well potential.
"""
def create(self, a1=2, a2=2, c_x1=-2, c_x2=2, c_y1=-2, c_y2=2, k_x1=2, k_x2=2, k_y1=2, k_y2=2):
"""
Generate a Potential object with a 2D double inverted Gaussian potential.
Args:
**a1 (float)**: The maximum depth of the first Gaussian in units of Hartree Energy. Default is 2.
**a2 (float)**: The maximum depth of the first Gaussian in units of Hartree Energy. Default is 2.
**c_x1 (float)**: The center of the first Gaussian well along the x axis (in a.u.). Default is -2.
**c_y1 (float)**: The center of the first Gaussian well along the y axis (in a.u.). Default is 2.
**c_x2 (float)**: The center of the second Gaussian well along the x axis (in a.u.). Default is -2.
**c_y2 (float)**: The center of the second Gaussian well along the y axis (in a.u.). Default is 2.
**k_x1 (float)**: The constant that determines the width of the first inverted Gaussian along the x axis.
Default is 2.
**k_x2 (float)**: The constant that determines the width of the second inverted Gaussian along the x axis.
Default is 2.
**k_y1 (float)**: The constant that determines the width of the first inverted Gaussian along the y axis.
Default is 2.
**k_y2 (float)**: The constant that determines the width of the second inverted Gaussian along the y axis.
Default is 2.
Returns:
An object of class Potential with a potential attribute and attributes corresponding to the parameters used
to generate the potential.
"""
assert a1 > 0
assert a2 > 0
assert (c_x1 > self.x_min) & (c_x1 < self.x_max)
assert (c_x2 > self.x_min) & (c_x2 < self.x_max)
assert (c_y1 > self.y_min) & (c_y1 < self.y_max)
assert (c_y2 > self.y_min) & (c_y2 < self.y_max)
assert k_x1 > 0
assert k_x2 > 0
assert k_y1 > 0
assert k_y2 > 0
v = -a1*np.exp(-((self.x-c_x1)/k_x1)**2 - ((self.y-c_y1)/k_y1)**2) \
- a2*np.exp(-((self.x-c_x2)/k_x2)**2 - ((self.y-c_y2)/k_y2)**2)
v = v + np.max(v)
params = dict(**self.params, **dict(potential=v, a1=a1, a2=a2, c_x1=c_x1, c_x2=c_x2, c_y1=c_y1, c_y2=c_y2,
k_x1=k_x1, k_x2=k_x2, k_y1=k_y1, k_y2=k_y2))
return Potential(params=params)
class Random2D(Grid2D):
"""
Two dimensional random potential.
To generate a random potential, a 16 x 16 binary grid of 1s and 0s is generated and upscaled to n x n. A second
16 x 16 binary grid is generated and upscaled to n/2 x n/2. The smaller grid is centered within the larger grid and
then the grids are subtracted element-wise. A Gaussian blur is then applied with standard deviation sig1**2. The
potential is now random, and smooth, but does not achieve a maximum at the boundary.
To achieve this, a mask that smoothly goes to zero at the boundary and 1 in the interior is generated. To generate
the desired random mask, k**2 random coordinate pairs are generated on a 200*n/256 x 200*n/256 grid. A convex hull
is generated with these points, and the boundary of the convex hull is smoothly interpolated using a cubic spline. A
binary mask is then formed by filling the inside of the closed blob with 1s, and the outside with 0s. Resizing the
blob to a resolution of r x r, and applying a Gaussian blur with standard deviation sig2 returns the final mask.
Element-wise multiplication of the mask with the random-blurred image gives a random potential that approaches zero
at the boundary. The “sharpness” of the potential is randomized by then exponentiating by either d = 0.1, 0.5, 1.0,
or 2.0, chosen at random with equal probabilities (i.e. V := V**p). The result is then subtracted from its maximum
to invert the well.
"""
def create(self, k=5, r=40, sig1=8, sig2=13, p=2):
"""
Generate a Potential object with a 2D random potential.
Args:
**k (int)**: Determines the number of integers (k**2) to use to generate the convex hull that makes the blob.
**r (float)**: The resolution size of the blob.
**sig1 (float)**: The variance of the first Gaussian blur.
**sig2 (float)**: The variance of the second Gaussian blur.
**p (float)**: The exponent used to increase the contrast of the potential.
Returns:
An object of class Potential with a potential attribute and attributes corresponding to the parameters used
to generate the potential.
"""
assert (k >= 2) & (k <= 7)
assert (self.rescale_by_grid(r) < self.n_x) & (r > 0)
assert sig1 > 0
assert sig2 > 0
assert p in [0.5, 1, 1.5, 2]
# Create the n x n and n/2 x n/2 grids
v = np.reshape(np.random.randint(low=0, high=2, size=16*16), newshape=(16,16))
v = np.kron(v, np.ones((round(self.n_x/16), round(self.n_y/16))))
subgrid = np.reshape(np.random.randint(low=0, high=2, size=16*16), newshape=(16,16))
subgrid = np.kron(subgrid, np.ones((round(self.n_x/32), round(self.n_y/32))))
# Center and diff the two grids
lo_x = round(self.n_x/4)
hi_x = round(self.n_x/4*3)
lo_y = round(self.n_y/4)
hi_y = round(self.n_y/4*3)
v[lo_y:hi_y, lo_x:hi_x] = v[lo_y:hi_y, lo_x:hi_x] - subgrid
# Run the first Gaussian blur
v = gaussian_filter(v, sigma=sig1)
# Create the convex hull from k**2 points
points = np.random.rand(k**2, 2)*(self.rescale_by_grid(200))
points = points.astype(int)
hull = ConvexHull(points)
# Get the x and y points of the convex hull
x = np.transpose(hull.points[hull.vertices])[0]
x = np.append(x, x[0])
y = np.transpose(hull.points[hull.vertices])[1]
y = np.append(y, y[0])
# Parameterize the boundary of the hull and interpolate
t = np.arange(x.shape[0], dtype=float)
t /= t[-1]
nt = np.linspace(0, 1, 100)
x = spline(t, x, nt)
y = spline(t, y, nt)
# Create a Delaunay hull for identifying points inside of the hull
# The two args of np.zeros need to be scaled independently when n_x and n_y are allowed to vary
# independently.
hull = ConvexHull(np.transpose(np.array((x, y))))
hull = Delaunay(hull.points[hull.vertices])
coords = np.transpose(np.indices((round(self.rescale_by_grid(200)), round(self.rescale_by_grid(200)))))
coords = coords.reshape(round(self.rescale_by_grid(200)**2), 2)
in_hull = hull.find_simplex(coords) >= 0
# Rescale the blob
# The two args of np.zeros need to be scaled independently when n_x and n_y are allowed to vary
# independently.
blob = np.zeros(shape=(round(self.rescale_by_grid(200)), round(self.rescale_by_grid(200))))
blob[np.transpose(coords[in_hull])[0], np.transpose(coords[in_hull])[1]] = 1
blob = ndimage.zoom(blob, self.rescale_by_grid(r)/self.rescale_by_grid(200))
# Create the final mask with the second Gaussian blur
mask = np.zeros(shape=v.shape)
x_offset = round((mask.shape[0]-blob.shape[0])/2)
y_offset = round((mask.shape[1]-blob.shape[1])/2)
mask[x_offset:blob.shape[0]+x_offset, y_offset:blob.shape[1]+y_offset] = blob
mask = gaussian_filter(mask, sigma=sig2)
v = np.abs(v)
v = v**p
v = v*mask
v = np.max(v) - v + (1 - np.max(v))
params = dict(**self.params, **dict(potential=v, k=k, r=r, sig1=sig1, sig2=sig2, p=p))
return Potential(params=params)
class Coulomb2D(Grid2D):
"""
Two dimensional Coulomb potential.
"""
def create(self, z=1, c_x=0, c_y=0, alpha=10**-9):
"""
Generate a Potential object with a 2D Coulomb potential.
Args:
**z (int)**: Determines the magnitude of the potential. An effective "proton number" constant.
**c_x (float)**: The center of the Coulomb potential along the x axis (in a.u.). Default is 0.
**c_y (float)**: The center of the Coulomb potential along the y axis (in a.u.). Default is 0.
**alpha (float)**: A value that removes the Coulomb singularity to ensure the solver converges.
Returns:
An object of class Potential with a potential attribute and attributes corresponding to the parameters used
to generate the potential.
"""
assert isinstance(z, int)
assert z >= 1
assert (c_x > self.x_min) and (c_x < self.x_max)
assert (c_y > self.y_min) and (c_y < self.y_max)
v = z/np.sqrt((c_x-self.x)**2+(c_y-self.y)**2+alpha**2)
v = v.max()-v
params = dict(**self.params, **dict(potential=v, z=z, c_x=c_x, c_y=c_y, alpha=alpha))
return Potential(params=params)
def potential_factory(grid_params, pot_params):
"""
Infer the Potential to create based on the parameters passed to this method.
"""
assert isinstance(grid_params, dict)
assert isinstance(pot_params, dict)
if 'n_x' in grid_params.keys() and 'n_y' not in grid_params.keys():
pass
elif 'n_x' in grid_params.keys() and 'n_y' in grid_params.keys():
pass
else:
raise AssertionError('The parameters passed into the creation method are not supported.')
if set(pot_params) == set(['c_x', 'l_x']):
potential = InfiniteWell1D(**grid_params)
elif set(pot_params) == set(['c_x', 'c_y', 'l_x', 'l_y']):
potential = InfiniteWell2D(**grid_params)
elif set(pot_params) == set(['c_x', 'c_y', 'k_x', 'k_y']):
potential = SimpleHarmonicOscillator2D(**grid_params)
elif set(pot_params) == set(['a1', 'a2', 'c_x1', 'c_x2', 'c_y1', 'c_y2', 'k_x1', 'k_y1', 'k_x2', 'k_y2']):
potential = DoubleInvertedGaussian2D(**grid_params)
elif set(pot_params) == set(['k', 'r', 'sig1', 'sig2', 'p']):
potential = Random2D(**grid_params)
elif set(pot_params) == set(['z', 'c_x', 'c_y', 'alpha']):
potential = Coulomb2D(**grid_params)
else:
raise AssertionError('None of the supported potentials accept the parameters passed into the creation method.')
return potential.create(**pot_params)
``` |
{
"source": "jmcevoy1984/Phorest-Voucher-Maker",
"score": 3
} |
#### File: jmcevoy1984/Phorest-Voucher-Maker/helper_functions.py
```python
from shared import *
#-------------helper functions-------------
#Fetch a unique voucher serial number and parse the XML to output as a string.
#This is done using the xml.etree (Element Tree) module. This is a standard Python module for parsing XML.
def get_voucher_serial():
req = requests.get(get_serial_uri , auth=(username, password), verify=False)
print(req.status_code)
print(req.content)
if req.status_code != 200:
abort(req.status_code)
else:
root = ET.fromstring(req.content)
voucher_number = root[0].text
return voucher_number
#Takes an object and a string that represents the type of object eg 'voucher', 'clientCard' and outputs with xml tags.
#This function converts the attributes of an object passed to it into XML tags which surround the attribute value.
def to_xml(object, string, *exclude):
attrib = object.__dict__
output_xml = '<'+string+'>'
for key, val in attrib.items():
if not (key in exclude):
open_tag = '<'+str(key)+'>'
close_tag = '</'+str(key)+'>'
output_xml += open_tag + str(val) + close_tag
output_xml += '</'+string+'>'
return output_xml
def get_voucher(ref):
req = requests.get( post_uri +'/'+ ref, auth=(username, password), verify=False)
if req.status_code != 200:
abort(req.status_code)
else:
root = ET.fromstring(req.content)
voucher = Voucher()
setattr(voucher, 'ref', ref)
for child in root:
if hasattr(voucher, child.tag):
setattr(voucher, child.tag, root.find('./'+child.tag).text)
return voucher
def serialize(object):
attributes = object.__dict__
return {key:value for key, value in attributes.items() if key != 'creatingBranchRef'}
def get_voucher_list(start, max_results):
params = {'start' : start, 'max' : max_results}
req = requests.get( voucher_uri, auth=(username, password), params=params, verify=False)
if req.status_code != 200:
abort(req.status_code)
else:
root = ET.fromstring(req.content)
#print(root.attrib['totalResults'])
#print(root)
return root
def find_voucher(serial):
start, max_results = 0 , 50
found = ''
count = 0
while not found and count < max_results:
vouchers = get_voucher_list(start, max_results)
max_results = int(vouchers.attrib['totalResults'])
for child in vouchers:
count += 1
for subchild in child.iter('serial'):
if subchild.text == serial:
found = True
#print('FOUND!')
voucher = Voucher()
voucher = xml_to_object(child, voucher)
#print(voucher)
return voucher
break
if found:
break
return False
def xml_to_object(xml, object):
for child in xml:
if hasattr(object, child.tag):
setattr(object, child.tag, xml.find('./'+child.tag).text)
if child.tag == 'identity':
ref_str = child.attrib['id'][::-1].split(':')
#ref_str = ref_str.split(':')
ref_str = ref_str[0][::-1]
#ref_str = ref_str[::-1]
#print(ref_str)
#print(child.attrib['id'][ref_str:])
setattr(object, 'ref', ref_str) #add unique ref
if child.tag == 'clientCardRef':
ref_str = child.text[::-1]
ref_str = ref_str.split(':')
ref_str = ref_str[0]
ref_str = ref_str[::-1]
setattr(object, 'clientCardRef', ref_str)
#print('child: ' + ref_str)
return object
def convert_vouchers(xml):
'''voucher_list = []
for child in xml:
if child.tag == 'voucher':
voucher_list.append(serialize(xml_to_object(child, Voucher())))
return voucher_list'''
return [serialize(xml_to_object(child, Voucher())) for child in xml if child.tag == 'voucher']
def get_client_vouchers(client_vouchers_uri):
req = requests.get(client_vouchers_uri, auth=(username, password), verify=False)
print(req.status_code)
root = ET.fromstring(req.content)
return convert_vouchers(root)
```
#### File: jmcevoy1984/Phorest-Voucher-Maker/voucher_api.py
```python
from shared import *
from helper_functions import *
from app import app
from flask_restful import Api, Resource, reqparse, fields, marshal_with, marshal
#Wraps the flask 'app' with the Api function from flask-RESTful. Assigns this to the 'api' variable.
api = Api(app)
#---------------MAIN API------------------
#Fields for marshaling output in JSON using the 'marshal_with' decorator/the 'marshal' function of flask-RESTful.
#Only the fields listed here will be output as the reponse in JSON format during the specified HTTP request.
voucher_post_fields = {
'ref' : fields.String,
'serial' : fields.String,
}
voucher_get_fields = {
'ref' : fields.String,
'serial' : fields.String,
'expiryDate' : fields.String,
'originalBalance' : fields.String,
'clientCardRef' : fields.String,
'archived' : fields.String,
'remainingBalance' : fields.String,
'issueDate' : fields.String
}
#Classes that subclass the "Resource" class from flask-RESTful.
#These are the resources for the API and contain all HTTP C.R.U.D methods.
class VoucherListAPI(Resource):
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument('issueDate', type=str, default="{:%Y-%m-%d}".format(datetime.now()), location = 'json')
self.reqparse.add_argument('expiryDate', type=str, default='{:%Y-%m-%d}'.format(datetime.now() + timedelta(days=365)), location='json')
self.reqparse.add_argument('originalBalance', type=str, default='50', location='json')
self.reqparse.add_argument('clientCardRef', type=str, default='', location='json')
self.reqparse.add_argument('creatingBranchRef', type=str, default='urn:x-memento:Branch:'+branch_id, location='json')
self.reqparse.add_argument('archived', type=str, default='false', location='json')
self.reqparse.add_argument('start', type=str, default='0')
self.reqparse.add_argument('max', type=str, default='50')
#self.reqparse.add_argument('remainingBalance', type=str, default='', location='json')
super(VoucherListAPI, self).__init__()
@marshal_with(voucher_post_fields)
def post(self):
print('we go to post!')
args = self.reqparse.parse_args()
voucher = Voucher()
setattr(voucher, 'serial', get_voucher_serial())
for key, value in args.items():
if hasattr(voucher, key):
setattr(voucher, key, value)
#setattr(voucher, 'remainingBalance', getattr(voucher, 'originalBalance'))
#exl = ['remainingBalance', 'ref']
xml_voucher = to_xml(voucher, 'voucher', 'remainingBalance')
print(xml_voucher)
headers = { 'content-type' : 'application/vnd.memento.Voucher+xml' }
req = requests.post(voucher_uri, headers=headers, auth=(username, password), data=xml_voucher, verify=False)
print(req.status_code)
print(req.content)
if req.status_code != 201:
abort(req.status_code)
else:
root = ET.fromstring(req.content)
setattr(voucher, 'ref', root.find('./identity').attrib['id']) #set the newly aquired id/uri for the voucher
return voucher, 201
def get(self):
args = self.reqparse.parse_args()
params = { 'start' : int(args['start']), 'max' : int(args['max']) }
req = requests.get( voucher_uri, auth=(username, password), params=params, verify=False)
#print(req.content)
#print(req.headers)
#print(req.status_code)
voucher_list = []
if req.status_code != 200:
abort(req.status_code)
else:
root = ET.fromstring(req.content)
#voucher = Voucher()
#setattr(voucher, 'ref', ref)
for child in root:
if child.tag == 'voucher':
voucher = Voucher()
filled_voucher = xml_to_object(child, voucher)
voucher_list.append(marshal(filled_voucher, voucher_get_fields))
return { 'voucher_list' : voucher_list }, 200 #to be fixed
class VoucherAPI(Resource):
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument('issueDate', type=str, default="{:%Y-%m-%d}".format(datetime.now()), location = 'json')
self.reqparse.add_argument('expiryDate', type=str, default='{:%Y-%m-%d}'.format(datetime.now() + timedelta(days=365)), location='json')
self.reqparse.add_argument('originalBalance', type=str, default='50', location='json')
self.reqparse.add_argument('clientCardRef', type=str, default='', location='json')
self.reqparse.add_argument('creatingBranchRef', type=str, default='urn:x-memento:Branch:'+branch_id, location='json')
self.reqparse.add_argument('archived', type=str, default='false', location='json')
#self.reqparse.add_argument('remainingBalance', type=str, default='', location='json')
super(VoucherAPI, self).__init__()
#api.add_resource(VoucherAPI, '/api/voucher', endpoint='vouchers')
#get a voucher by it's unique reference (id)
@marshal_with(voucher_get_fields)
def get(self, ref):
req = requests.get( voucher_uri +'/'+ ref, auth=(username, password), verify=False)
print(req.content)
print(req.headers)
print(req.status_code)
if req.status_code != 200:
abort(req.status_code)
else:
root = ET.fromstring(req.content)
voucher = Voucher()
setattr(voucher, 'ref', ref)
for child in root:
if hasattr(voucher, child.tag):
setattr(voucher, child.tag, root.find('./'+child.tag).text)
print(voucher.__dict__)
return voucher, 200 #to be fixed
def put(self, ref):
original = get_voucher(ref)
args = self.reqparse.parse_args()
voucher = Voucher()
for key, value in original.__dict__.items():
if hasattr(voucher, key):
setattr(voucher, key, value)
for key, value in args.items():
if hasattr(voucher, key):
setattr(voucher, key, value)
#setattr(voucher, 'originalBalance', '1000')
xml_voucher = to_xml(voucher, 'voucher', 'serial')
headers = {'content-type' : 'application/vnd.memento.Voucher+xml' }
req = requests.put(post_uri + '/' + ref, headers=headers, auth=(username, password), data=xml_voucher, verify=False)
print(req.status_code)
print(req.content)
if req.status_code != 200:
abort(req.status_code)
else:
print(original.__dict__.items())
print(voucher.__dict__.items())
print(xml_voucher)
print(args.items())
return { 'result' : True }, 200
#def archive_voucher():
#pass
'''def put(self, ref):
original = get_voucher(ref)
#print(original)
voucher = Voucher()
for child in original:
if hasattr(voucher, child.tag):
setattr(voucher, child.tag, original.find('./'+child.tag).text)
for key, value in dict.items():
if hasattr(voucher, key) and dict[key] != False:
setattr(voucher, key, value)
xml_voucher = to_xml(voucher, 'voucher')
headers = {'content-type' : 'application/vnd.memento.Voucher+xml' }
req = requests.put(voucher_uri + ref, headers=headers, auth=(username, password), data=xml_voucher, verify=False)
return req.status_code'''
api.add_resource(VoucherListAPI, '/api/vouchers', endpoint='vouchers')
api.add_resource(VoucherAPI, '/api/vouchers/<ref>', endpoint='voucher')
#def update_voucher(ref):
#req = requests.get('localhost:5000/api/vouchers/'+ref)
#original = req.content
#print(req)
#------------Clients---------------
client_get_fields = {
'firstName' : fields.String,
'lastName' : fields.String,
'mobile' : fields.String,
'email' : fields.String,
'ref' : fields.String,
'vouchers' : fields.String
}
class ClientAPI(Resource):
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument('firstName', type=str, default='', location = 'json')
self.reqparse.add_argument('lastName', type=str, default='', location= 'json')
self.reqparse.add_argument('mobile', type=str, location= 'json')
super(ClientAPI, self).__init__()
@marshal_with(client_get_fields)
def get(self, ref):
req = requests.get(client_uri + '/' + ref, auth=(username, password), verify=False)
#print(req.status_code)
#print(req.content)
if req.status_code != 200:
abort(req.status_code)
else:
root = ET.fromstring(req.content)
client = Client()
setattr(client, 'ref', ref)
for child in root:
if child.tag == 'link' and child.attrib['rel'] == 'vouchers':
setattr(client, 'vouchers', child.attrib['href'])
if hasattr(client, child.tag):
setattr(client, child.tag, root.find('./'+child.tag).text)
return client, 200 #to be fixed
api.add_resource(ClientAPI, '/api/client/<ref>', endpoint='client')
def create_client():
xml = "<clientCard><firstName>Joe</firstName><lastName>Test</lastName><mobile>0833128991</mobile><archived>false</archived></clientCard>"
headers = {'content-type' : 'application/vnd.memento.ClientCard+xml' }
test_uri = 'https://lbh.eu-dev-0.memento-stacks.phorest.com/memento/rest/business/3Evn8Qqw6pVY4iScdZXWBA/client'
req = requests.post(client_uri, headers=headers, auth=(username, password), data=xml, verify=False)
print(req.status_code)
print('Headers:', req.headers)
print(req.content)
def update_client():
xml = "<clientCard><firstName>Joe</firstName><lastName>Test</lastName><mobile>083317777</mobile></clientCard>"
headers = {'content-type' : 'application/vnd.memento.ClientCard+xml' }
test_uri = 'https://lbh.eu-dev-0.memento-stacks.phorest.com/memento/rest/business/3Evn8Qqw6pVY4iScdZXWBA/client/9mkguGy0b6xpUgaBF65CIA'
req = requests.put(client_uri + '/' + 'XAzAN9Hwffcqp0cx_v7qJg', headers=headers, auth=(username, password), data=xml, verify=False)
print(req.status_code)
print('Headers:', req.headers)
with open('client_update.xml', 'w') as f:
f.write(str(req.content))
f.close()
def get_client(ref):
req = requests.get(client_uri + '/' + ref, auth=(username, password), verify=False)
print(req.status_code)
print(req.content)
if req.status_code != 200:
abort(req.status_code)
else:
root = ET.fromstring(req.content)
client = Client()
setattr(client, 'ref', ref)
for child in root:
if hasattr(client, child.tag):
setattr(client, child.tag, root.find('./'+child.tag).text)
return client, 200 #to be fixed
'''def get_client_vouchers(client_vouchers_uri):
test_uri = 'https://lbh1.eu.phorest.com/memento/rest/business/3Evn8Qqw6pVY4iScdZXWBA/client/sH40eB0ICVBgK5KFMrokfA/voucher'
req = requests.get(test_uri, auth=(username, password), verify=False)
print(req.status_code)
with open('client_vouchers.xml', 'w') as f:
f.write(str(req.content))
f.close'''
def get_clients():
#test_uri = "https://lbh.eu-dev-0.memento-stacks.phorest.com/memento/rest/business/3Evn8Qqw6pVY4iScdZXWBA/voucher"
req = requests.get(client_uri + '/' + 'Xbus3AT3eqOEJXsfXr6L_w', auth=(username, password), verify=False)
print(req.status_code)
print(req.content)
if req.status_code != 200:
abort(req.status_code)
else:
root = ET.fromstring(req.content)
'''print(len(root))
count = 0
for child in root:
if child.tag == 'voucher':
count += 1
print (count)
#print(child.tag)'''
'''else:
with open('client_list.xml', 'w') as f:
f.write(str(req.content))
f.close'''
def vouch_trans():
trans_uri = "https://lbh.eu-dev-0.memento-stacks.phorest.com/memento/rest/business/3Evn8Qqw6pVY4iScdZXWBA/voucher/8tpbJWlBGIB5Z4CC00npvw/transactions"
xml = '<voucherTransaction><date>2016-02-17</date><transactionAmount>-100.00</transactionAmount><voucherRef>urn:x-memento:Voucher:8tpbJWlBGIB5Z4CC00npvw</voucherRef><branchRef>urn:x-memento:Branch:nPpLa0UY4UO5dn68TpPsiA</branchRef><creatingUserRef>urn:x-memento:User:ISLX8fGtdKIB8CMLSIlc7g</creatingUserRef><voucherUpdateType>MANUALLY_ADDED</voucherUpdateType><relatedTransactionDeleted>false</relatedTransactionDeleted><compensatingTransaction>false</compensatingTransaction></voucherTransaction>'
headers = {'content-type' : 'application/vnd.memento.VoucherTransaction+xm' }
req = requests.post(trans_uri, headers=headers, auth=(username, password), data=xml, verify=False)
print(req.status_code)
print(req.content)
``` |
{
"source": "jmcgill298/flake8",
"score": 3
} |
#### File: flake8/plugins/notifier.py
```python
from flake8.plugins import _trie
class Notifier(object):
"""Object that tracks and notifies listener objects."""
def __init__(self):
"""Initialize an empty notifier object."""
self.listeners = _trie.Trie()
def listeners_for(self, error_code):
"""Retrieve listeners for an error_code.
There may be listeners registered for E1, E100, E101, E110, E112, and
E126. To get all the listeners for one of E100, E101, E110, E112, or
E126 you would also need to incorporate the listeners for E1 (since
they're all in the same class).
Example usage:
.. code-block:: python
from flake8 import notifier
n = notifier.Notifier()
# register listeners
for listener in n.listeners_for('W102'):
listener.notify(...)
"""
path = error_code
while path:
node = self.listeners.find(path)
listeners = getattr(node, "data", [])
for listener in listeners:
yield listener
path = path[:-1]
def notify(self, error_code, *args, **kwargs):
"""Notify all listeners for the specified error code."""
for listener in self.listeners_for(error_code):
listener.notify(error_code, *args, **kwargs)
def register_listener(self, error_code, listener):
"""Register a listener for a specific error_code."""
self.listeners.add(error_code, listener)
```
#### File: tests/unit/test_git.py
```python
import mock
import pytest
from flake8.main import git
@pytest.mark.parametrize('lazy', [True, False])
def test_find_modified_files(lazy):
"""Confirm our logic for listing modified files."""
if lazy:
# Here --cached is missing
call = [
'git', 'diff-index', '--name-only', '--diff-filter=ACMRTUXB',
'HEAD'
]
else:
call = [
'git', 'diff-index', '--cached', '--name-only',
'--diff-filter=ACMRTUXB', 'HEAD'
]
mocked_popen = mock.Mock()
mocked_popen.communicate.return_value = ('', '')
with mock.patch('flake8.main.git.piped_process') as piped_process:
piped_process.return_value = mocked_popen
git.find_modified_files(lazy)
piped_process.assert_called_once_with(call)
```
#### File: tests/unit/test_merged_config_parser.py
```python
import os
import mock
import pytest
from flake8.options import config
from flake8.options import manager
@pytest.fixture
def optmanager():
"""Generate an OptionManager with simple values."""
return manager.OptionManager(prog='flake8', version='3.0.0a1')
@pytest.fixture
def config_finder():
"""Generate a simple ConfigFileFinder."""
return config.ConfigFileFinder('flake8', [], [])
def test_parse_cli_config(optmanager, config_finder):
"""Parse the specified config file as a cli config file."""
optmanager.add_option('--exclude', parse_from_config=True,
comma_separated_list=True,
normalize_paths=True)
optmanager.add_option('--ignore', parse_from_config=True,
comma_separated_list=True)
optmanager.add_option('--verbose', parse_from_config=True,
action='count')
optmanager.add_option('--quiet', parse_from_config=True,
action='count')
parser = config.MergedConfigParser(optmanager, config_finder)
parsed_config = parser.parse_cli_config(
'tests/fixtures/config_files/cli-specified.ini'
)
assert parsed_config == {
'ignore': ['E123', 'W234', 'E111'],
'exclude': [
os.path.abspath('foo/'),
os.path.abspath('bar/'),
os.path.abspath('bogus/'),
],
'verbose': 2,
'quiet': 1,
}
@pytest.mark.parametrize('filename,is_configured_by', [
('tests/fixtures/config_files/cli-specified.ini', True),
('tests/fixtures/config_files/no-flake8-section.ini', False),
])
def test_is_configured_by(
filename, is_configured_by, optmanager, config_finder):
"""Verify the behaviour of the is_configured_by method."""
parsed_config, _ = config.ConfigFileFinder._read_config(filename)
parser = config.MergedConfigParser(optmanager, config_finder)
assert parser.is_configured_by(parsed_config) is is_configured_by
def test_parse_user_config(optmanager, config_finder):
"""Verify parsing of user config files."""
optmanager.add_option('--exclude', parse_from_config=True,
comma_separated_list=True,
normalize_paths=True)
optmanager.add_option('--ignore', parse_from_config=True,
comma_separated_list=True)
optmanager.add_option('--verbose', parse_from_config=True,
action='count')
optmanager.add_option('--quiet', parse_from_config=True,
action='count')
parser = config.MergedConfigParser(optmanager, config_finder)
with mock.patch.object(parser.config_finder, 'user_config_file') as usercf:
usercf.return_value = 'tests/fixtures/config_files/cli-specified.ini'
parsed_config = parser.parse_user_config()
assert parsed_config == {
'ignore': ['E123', 'W234', 'E111'],
'exclude': [
os.path.abspath('foo/'),
os.path.abspath('bar/'),
os.path.abspath('bogus/'),
],
'verbose': 2,
'quiet': 1,
}
def test_parse_local_config(optmanager, config_finder):
"""Verify parsing of local config files."""
optmanager.add_option('--exclude', parse_from_config=True,
comma_separated_list=True,
normalize_paths=True)
optmanager.add_option('--ignore', parse_from_config=True,
comma_separated_list=True)
optmanager.add_option('--verbose', parse_from_config=True,
action='count')
optmanager.add_option('--quiet', parse_from_config=True,
action='count')
parser = config.MergedConfigParser(optmanager, config_finder)
with mock.patch.object(config_finder, 'local_config_files') as localcfs:
localcfs.return_value = [
'tests/fixtures/config_files/cli-specified.ini'
]
parsed_config = parser.parse_local_config()
assert parsed_config == {
'ignore': ['E123', 'W234', 'E111'],
'exclude': [
os.path.abspath('foo/'),
os.path.abspath('bar/'),
os.path.abspath('bogus/'),
],
'verbose': 2,
'quiet': 1,
}
def test_merge_user_and_local_config(optmanager, config_finder):
"""Verify merging of parsed user and local config files."""
optmanager.add_option('--exclude', parse_from_config=True,
comma_separated_list=True,
normalize_paths=True)
optmanager.add_option('--ignore', parse_from_config=True,
comma_separated_list=True)
optmanager.add_option('--select', parse_from_config=True,
comma_separated_list=True)
parser = config.MergedConfigParser(optmanager, config_finder)
with mock.patch.object(config_finder, 'local_config_files') as localcfs:
localcfs.return_value = [
'tests/fixtures/config_files/local-config.ini'
]
with mock.patch.object(config_finder,
'user_config_file') as usercf:
usercf.return_value = ('tests/fixtures/config_files/'
'user-config.ini')
parsed_config = parser.merge_user_and_local_config()
assert parsed_config == {
'exclude': [
os.path.abspath('docs/')
],
'ignore': ['D203'],
'select': ['E', 'W', 'F'],
}
def test_parse_isolates_config(optmanager):
"""Verify behaviour of the parse method with isolated=True."""
config_finder = mock.MagicMock()
parser = config.MergedConfigParser(optmanager, config_finder)
assert parser.parse(isolated=True) == {}
assert config_finder.local_configs.called is False
assert config_finder.user_config.called is False
def test_parse_uses_cli_config(optmanager):
"""Verify behaviour of the parse method with a specified config."""
config_finder = mock.MagicMock()
parser = config.MergedConfigParser(optmanager, config_finder)
parser.parse(cli_config='foo.ini')
config_finder.cli_config.assert_called_once_with('foo.ini')
@pytest.mark.parametrize('config_fixture_path', [
'tests/fixtures/config_files/cli-specified.ini',
'tests/fixtures/config_files/cli-specified-with-inline-comments.ini',
'tests/fixtures/config_files/cli-specified-without-inline-comments.ini',
])
def test_parsed_configs_are_equivalent(
optmanager, config_finder, config_fixture_path):
"""Verify the each file matches the expected parsed output.
This is used to ensure our documented behaviour does not regress.
"""
optmanager.add_option('--exclude', parse_from_config=True,
comma_separated_list=True,
normalize_paths=True)
optmanager.add_option('--ignore', parse_from_config=True,
comma_separated_list=True)
parser = config.MergedConfigParser(optmanager, config_finder)
with mock.patch.object(config_finder, 'local_config_files') as localcfs:
localcfs.return_value = [config_fixture_path]
with mock.patch.object(config_finder,
'user_config_file') as usercf:
usercf.return_value = []
parsed_config = parser.merge_user_and_local_config()
assert parsed_config['ignore'] == ['E123', 'W234', 'E111']
assert parsed_config['exclude'] == [
os.path.abspath('foo/'),
os.path.abspath('bar/'),
os.path.abspath('bogus/'),
]
@pytest.mark.parametrize('config_file', [
'tests/fixtures/config_files/config-with-hyphenated-options.ini'
])
def test_parsed_hyphenated_and_underscored_names(
optmanager, config_finder, config_file):
"""Verify we find hyphenated option names as well as underscored.
This tests for options like --max-line-length and --enable-extensions
which are able to be specified either as max-line-length or
max_line_length in our config files.
"""
optmanager.add_option('--max-line-length', parse_from_config=True,
type='int')
optmanager.add_option('--enable-extensions', parse_from_config=True,
comma_separated_list=True)
parser = config.MergedConfigParser(optmanager, config_finder)
with mock.patch.object(config_finder, 'local_config_files') as localcfs:
localcfs.return_value = [config_file]
with mock.patch.object(config_finder,
'user_config_file') as usercf:
usercf.return_value = []
parsed_config = parser.merge_user_and_local_config()
assert parsed_config['max_line_length'] == 110
assert parsed_config['enable_extensions'] == ['H101', 'H235']
```
#### File: tests/unit/test_setuptools_command.py
```python
import pytest
from setuptools import dist
from flake8.main import setuptools_command
@pytest.fixture
def distribution():
"""Create a setuptools Distribution object."""
return dist.Distribution({
'name': 'foo',
'packages': [
'foo',
'foo.bar',
'foo_biz',
],
})
@pytest.fixture
def command(distribution):
"""Create an instance of Flake8's setuptools command."""
return setuptools_command.Flake8(distribution)
def test_package_files_removes_submodules(command):
"""Verify that we collect all package files."""
package_files = list(command.package_files())
assert sorted(package_files) == [
'foo',
'foo_biz',
]
``` |
{
"source": "jmcgill298/nautobot",
"score": 2
} |
#### File: nautobot/development/nautobot_config.py
```python
from distutils.util import strtobool
import os
import sys
from nautobot.core.settings import *
ALLOWED_HOSTS = os.environ.get("ALLOWED_HOSTS", "").split(" ")
DATABASES = {
"default": {
"NAME": os.environ.get("NAUTOBOT_DATABASE", "nautobot"),
"USER": os.environ.get("NAUTOBOT_USER", ""),
"PASSWORD": os.environ.get("NAUTOBOT_PASSWORD", ""),
"HOST": os.environ.get("NAUTOBOT_DB_HOST", "localhost"),
"PORT": os.environ.get("NAUTOBOT_DB_PORT", ""),
"CONN_MAX_AGE": 300,
"ENGINE": "django.db.backends.postgresql",
}
}
DEBUG = True
LOG_LEVEL = "DEBUG" if DEBUG else "INFO"
TESTING = len(sys.argv) > 1 and sys.argv[1] == "test"
# Verbose logging during normal development operation, but quiet logging during unit test execution
if not TESTING:
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"normal": {
"format": "%(asctime)s.%(msecs)03d %(levelname)-7s %(name)s :\n %(message)s",
"datefmt": "%H:%M:%S",
},
"verbose": {
"format": "%(asctime)s.%(msecs)03d %(levelname)-7s %(name)-20s %(filename)-15s %(funcName)30s() :\n %(message)s",
"datefmt": "%H:%M:%S",
},
},
"handlers": {
"normal_console": {
"level": "INFO",
"class": "rq.utils.ColorizingStreamHandler",
"formatter": "normal",
},
"verbose_console": {
"level": "DEBUG",
"class": "rq.utils.ColorizingStreamHandler",
"formatter": "verbose",
},
},
"loggers": {
"django": {"handlers": ["normal_console"], "level": "INFO"},
"nautobot": {
"handlers": ["verbose_console" if DEBUG else "normal_console"],
"level": LOG_LEVEL,
},
"rq.worker": {
"handlers": ["verbose_console" if DEBUG else "normal_console"],
"level": LOG_LEVEL,
},
},
}
def is_truthy(arg):
"""Convert "truthy" strings into Booleans.
Examples:
>>> is_truthy('yes')
True
Args:
arg (str): Truthy string (True values are y, yes, t, true, on and 1; false values are n, no,
f, false, off and 0. Raises ValueError if val is anything else.
"""
if isinstance(arg, bool):
return arg
return bool(strtobool(str(arg)))
# Redis variables
REDIS_HOST = os.getenv("REDIS_HOST", "localhost")
REDIS_PORT = os.getenv("REDIS_PORT", 6379)
REDIS_PASSWORD = os.getenv("REDIS_PASSWORD", "")
# Check for Redis SSL
REDIS_SCHEME = "redis"
REDIS_SSL = is_truthy(os.environ.get("REDIS_SSL", False))
if REDIS_SSL:
REDIS_SCHEME = "rediss"
# The django-redis cache is used to establish concurrent locks using Redis. The
# django-rq settings will use the same instance/database by default.
#
# This "default" server is now used by RQ_QUEUES.
# >> See: nautobot.core.settings.RQ_QUEUES
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": f"{REDIS_SCHEME}://{REDIS_HOST}:{REDIS_PORT}/0",
"TIMEOUT": 300,
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
"PASSWORD": <PASSWORD>IS_PASSWORD,
},
}
}
# RQ_QUEUES is not set here because it just uses the default that gets imported
# up top via `from nautobot.core.settings import *`.
# REDIS CACHEOPS
CACHEOPS_REDIS = f"{REDIS_SCHEME}://:{REDIS_PASSWORD}@{REDIS_HOST}:{REDIS_PORT}/1"
HIDE_RESTRICTED_UI = os.environ.get("HIDE_RESTRICTED_UI", False)
SECRET_KEY = os.environ.get("SECRET_KEY", "")
# Django Debug Toolbar
DEBUG_TOOLBAR_CONFIG = {"SHOW_TOOLBAR_CALLBACK": lambda _request: DEBUG and not TESTING}
if "debug_toolbar" not in INSTALLED_APPS:
INSTALLED_APPS.append("debug_toolbar")
if "debug_toolbar.middleware.DebugToolbarMiddleware" not in MIDDLEWARE:
MIDDLEWARE.insert(0, "debug_toolbar.middleware.DebugToolbarMiddleware")
``` |
{
"source": "jmcgill298/nautobot-plugin-golden-config",
"score": 2
} |
#### File: nautobot_golden_config/nornir_plays/config_intended.py
```python
import os
import logging
from datetime import datetime
from nornir import InitNornir
from nornir.core.plugins.inventory import InventoryPluginRegister
from nornir.core.task import Result, Task
from nornir_nautobot.exceptions import NornirNautobotException
from nornir_nautobot.plugins.tasks.dispatcher import dispatcher
from nornir_nautobot.utils.logger import NornirLogger
from nautobot_plugin_nornir.plugins.inventory.nautobot_orm import NautobotORMInventory
from nautobot_plugin_nornir.constants import NORNIR_SETTINGS
from nautobot_plugin_nornir.utils import get_dispatcher
from nautobot_golden_config.models import GoldenConfigSetting, GoldenConfig
from nautobot_golden_config.utilities.helper import (
get_job_filter,
verify_global_settings,
check_jinja_template,
)
from nautobot_golden_config.utilities.graphql import graph_ql_query
from nautobot_golden_config.nornir_plays.processor import ProcessGoldenConfig
InventoryPluginRegister.register("nautobot-inventory", NautobotORMInventory)
LOGGER = logging.getLogger(__name__)
def run_template( # pylint: disable=too-many-arguments
task: Task, logger, global_settings, job_result, jinja_root_path, intended_root_folder
) -> Result:
"""Render Jinja Template.
Only one template is supported, so the expectation is that that template includes all other templates.
Args:
task (Task): Nornir task individual object
Returns:
result (Result): Result from Nornir task
"""
obj = task.host.data["obj"]
intended_obj = GoldenConfig.objects.filter(device=obj).first()
if not intended_obj:
intended_obj = GoldenConfig.objects.create(device=obj)
intended_obj.intended_last_attempt_date = task.host.defaults.data["now"]
intended_obj.save()
intended_path_template_obj = check_jinja_template(obj, logger, global_settings.intended_path_template)
output_file_location = os.path.join(intended_root_folder, intended_path_template_obj)
jinja_template = check_jinja_template(obj, logger, global_settings.jinja_path_template)
status, device_data = graph_ql_query(job_result.request, obj, global_settings.sot_agg_query)
if status != 200:
logger.log_failure(obj, f"The GraphQL query return a status of {str(status)} with error of {str(device_data)}")
raise NornirNautobotException()
task.host.data.update(device_data)
generated_config = task.run(
task=dispatcher,
name="GENERATE CONFIG",
method="generate_config",
obj=obj,
logger=logger,
jinja_template=jinja_template,
jinja_root_path=jinja_root_path,
output_file_location=output_file_location,
default_drivers_mapping=get_dispatcher(),
)[1].result["config"]
intended_obj.intended_last_success_date = task.host.defaults.data["now"]
intended_obj.intended_config = generated_config
intended_obj.save()
logger.log_success(obj, "Successfully generated the intended configuration.")
return Result(host=task.host, result=generated_config)
def config_intended(job_result, data, jinja_root_path, intended_root_folder):
"""Nornir play to generate configurations."""
now = datetime.now()
logger = NornirLogger(__name__, job_result, data.get("debug"))
global_settings = GoldenConfigSetting.objects.first()
verify_global_settings(logger, global_settings, ["jinja_path_template", "intended_path_template", "sot_agg_query"])
try:
with InitNornir(
runner=NORNIR_SETTINGS.get("runner"),
logging={"enabled": False},
inventory={
"plugin": "nautobot-inventory",
"options": {
"credentials_class": NORNIR_SETTINGS.get("credentials"),
"params": NORNIR_SETTINGS.get("inventory_params"),
"queryset": get_job_filter(data),
"defaults": {"now": now},
},
},
) as nornir_obj:
nr_with_processors = nornir_obj.with_processors([ProcessGoldenConfig(logger)])
# Run the Nornir Tasks
nr_with_processors.run(
task=run_template,
name="RENDER CONFIG",
logger=logger,
global_settings=global_settings,
job_result=job_result,
jinja_root_path=jinja_root_path,
intended_root_folder=intended_root_folder,
)
except Exception as err:
logger.log_failure(None, err)
raise
```
#### File: nautobot-plugin-golden-config/nautobot_golden_config/tables.py
```python
import copy
from django.utils.html import format_html
from django_tables2 import Column, LinkColumn, TemplateColumn
from django_tables2.utils import A
from nautobot.dcim.models import Device
from nautobot.utilities.tables import (
BaseTable,
ToggleColumn,
)
from nautobot_golden_config import models
from nautobot_golden_config.utilities.constant import ENABLE_BACKUP, ENABLE_COMPLIANCE, ENABLE_INTENDED, CONFIG_FEATURES
ALL_ACTIONS = """
{% if backup == True %}
{% if record.configcompliance_set.first.rule.config_type == 'json' %}
<i class="mdi mdi-circle-small"></i>
{% else %}
{% if record.goldenconfig_set.first.backup_config %}
<a value="{% url 'plugins:nautobot_golden_config:configcompliance_details' pk=record.pk config_type='backup' %}" class="openBtn" data-href="{% url 'plugins:nautobot_golden_config:configcompliance_details' pk=record.pk config_type='backup' %}?modal=true">
<i class="mdi mdi-file-document-outline" title="Backup Configuration"></i>
</a>
{% else %}
<i class="mdi mdi-circle-small"></i>
{% endif %}
{% endif %}
{% endif %}
{% if intended == True %}
{% if record.configcompliance_set.first.rule.config_type == 'json' %}
<i class="mdi mdi-circle-small"></i>
{% else %}
{% if record.goldenconfig_set.first.intended_config %}
<a value="{% url 'plugins:nautobot_golden_config:configcompliance_details' pk=record.pk config_type='intended' %}" class="openBtn" data-href="{% url 'plugins:nautobot_golden_config:configcompliance_details' pk=record.pk config_type='intended' %}?modal=true">
<i class="mdi mdi-text-box-check-outline" title="Intended Configuration"></i>
</a>
{% else %}
<i class="mdi mdi-circle-small"></i>
{% endif %}
{% endif %}
{% endif %}
{% if compliance == True %}
{% if record.configcompliance_set.first.rule.config_type == 'json' %}
<a value="{% url 'plugins:nautobot_golden_config:configcompliance_details' pk=record.pk config_type='json_compliance' %}" class="openBtn" data-href="{% url 'plugins:nautobot_golden_config:configcompliance_details' pk=record.pk config_type='json_compliance' %}?modal=true">
<i class="mdi mdi-file-compare" title="Compliance Details JSON"></i>
</a>
{% else %}
{% if record.goldenconfig_set.first.compliance_config %}
<a value="{% url 'plugins:nautobot_golden_config:configcompliance_details' pk=record.pk config_type='compliance' %}" class="openBtn" data-href="{% url 'plugins:nautobot_golden_config:configcompliance_details' pk=record.pk config_type='compliance' %}?modal=true">
<i class="mdi mdi-file-compare" title="Compliance Details"></i>
</a>
{% else %}
<i class="mdi mdi-circle-small"></i>
{% endif %}
{% endif %}
{% endif %}
{% if sotagg == True %}
<a value="{% url 'plugins:nautobot_golden_config:configcompliance_details' pk=record.pk config_type='sotagg' %}" class="openBtn" data-href="{% url 'plugins:nautobot_golden_config:configcompliance_details' pk=record.pk config_type='sotagg' %}?modal=true">
<i class="mdi mdi-code-json" title="SOT Aggregate Data"></i>
</a>
{% if record.configcompliance_set.first.rule.config_type == 'json' %}
<i class="mdi mdi-circle-small"></i>
{% else %}
<a href="{% url 'extras:job' class_path='plugins/nautobot_golden_config.jobs/AllGoldenConfig' %}?device={{ record.pk }}"
<span class="text-primary">
<i class="mdi mdi-play-circle" title="Execute All Golden Config Jobs"></i>
</span>
</a>
{% endif %}
{% endif %}
"""
MATCH_CONFIG = """{{ record.match_config|linebreaksbr }}"""
def actual_fields():
"""Convienance function to conditionally toggle columns."""
active_fields = ["pk", "name"]
if ENABLE_BACKUP:
active_fields.append("backup_last_success_date")
if ENABLE_INTENDED:
active_fields.append("intended_last_success_date")
if ENABLE_COMPLIANCE:
active_fields.append("compliance_last_success_date")
active_fields.append("actions")
return tuple(active_fields)
#
# Columns
#
class PercentageColumn(Column):
"""Column used to display percentage."""
def render(self, value):
"""Render percentage value."""
return f"{value} %"
class ComplianceColumn(Column):
"""Column used to display config compliance status (True/False/None)."""
def render(self, value):
"""Render an entry in this column."""
if value == 1: # pylint: disable=no-else-return
return format_html('<span class="text-success"><i class="mdi mdi-check-bold"></i></span>')
elif value == 0:
return format_html('<span class="text-danger"><i class="mdi mdi-close-thick"></i></span>')
else: # value is None
return format_html('<span class="mdi mdi-minus"></span>')
#
# Tables
#
# ConfigCompliance
class ConfigComplianceTable(BaseTable):
"""Table for rendering a listing of Device entries and their associated ConfigCompliance record status."""
pk = ToggleColumn(accessor=A("device"))
device = TemplateColumn(
template_code="""<a href="{% url 'plugins:nautobot_golden_config:configcompliance_devicedetail' pk=record.device %}" <strong>{{ record.device__name }}</strong></a> """
)
def __init__(self, *args, **kwargs):
"""Override default values to dynamically add columns."""
# Used ConfigCompliance.objects on purpose, vs queryset (set in args[0]), as there were issues with that as
# well as not as expected from user standpoint (e.g. not always the same values on columns depending on
# filtering)
features = list(
models.ConfigCompliance.objects.order_by("rule__feature__name")
.values_list("rule__feature__name", flat=True)
.distinct()
)
extra_columns = [(feature, ComplianceColumn(verbose_name=feature)) for feature in features]
kwargs["extra_columns"] = extra_columns
# Nautobot's BaseTable.configurable_columns() only recognizes columns in self.base_columns,
# so override the class's base_columns to include our additional columns as configurable.
self.base_columns = copy.deepcopy(self.base_columns)
for feature, column in extra_columns:
self.base_columns[feature] = column
super().__init__(*args, **kwargs)
class Meta(BaseTable.Meta):
"""Metaclass attributes of ConfigComplianceTable."""
model = models.ConfigCompliance
fields = (
"pk",
"device",
)
# All other fields (ConfigCompliance names) are constructed dynamically at instantiation time - see views.py
class ConfigComplianceGlobalFeatureTable(BaseTable):
"""Table for feature compliance report."""
name = Column(accessor="rule__feature__slug", verbose_name="Feature")
count = Column(accessor="count", verbose_name="Total")
compliant = Column(accessor="compliant", verbose_name="Compliant")
non_compliant = Column(accessor="non_compliant", verbose_name="Non-Compliant")
comp_percent = PercentageColumn(accessor="comp_percent", verbose_name="Compliance (%)")
class Meta(BaseTable.Meta):
"""Metaclass attributes of ConfigComplianceGlobalFeatureTable."""
model = models.ConfigCompliance
fields = ["name", "count", "compliant", "non_compliant", "comp_percent"]
default_columns = [
"name",
"count",
"compliant",
"non_compliant",
"comp_percent",
]
class ConfigComplianceDeleteTable(BaseTable):
"""Table for device compliance report."""
feature = Column(accessor="rule__feature__name", verbose_name="Feature")
class Meta(BaseTable.Meta):
"""Metaclass attributes of ConfigComplianceDeleteTable."""
device = Column(accessor="device__name", verbose_name="Device Name")
compliance = Column(accessor="compliance", verbose_name="Compliance")
model = models.ConfigCompliance
fields = ("device", "feature", "compliance")
class DeleteGoldenConfigTable(BaseTable):
"""
Table used in bulk delete confirmation.
This is required since there model is different when deleting the record compared to when viewing the records initially via Device.
"""
pk = ToggleColumn()
def __init__(self, *args, **kwargs):
"""Remove all fields from showing except device ."""
super().__init__(*args, **kwargs)
for feature in list(self.base_columns.keys()): # pylint: disable=no-member
if feature not in ["pk", "device"]:
self.base_columns.pop(feature) # pylint: disable=no-member
self.sequence.remove(feature)
class Meta(BaseTable.Meta):
"""Meta for class DeleteGoldenConfigTable."""
model = models.GoldenConfig
# GoldenConfig
class GoldenConfigTable(BaseTable):
"""Table to display Config Management Status."""
def __init__(self, *args, **kwargs):
"""Remove custom field columns from showing."""
super().__init__(*args, **kwargs)
for feature in list(self.base_columns.keys()): # pylint: disable=no-member
if feature.startswith("cf_"):
self.base_columns.pop(feature) # pylint: disable=no-member
self.sequence.remove(feature)
pk = ToggleColumn()
name = TemplateColumn(
template_code="""<a href="{% url 'dcim:device' pk=record.pk %}">{{ record.name }}</a>""",
verbose_name="Device",
)
if ENABLE_BACKUP:
backup_last_success_date = Column(
verbose_name="Backup Status", empty_values=(), order_by="goldenconfig__backup_last_success_date"
)
if ENABLE_INTENDED:
intended_last_success_date = Column(
verbose_name="Intended Status", empty_values=(), order_by="goldenconfig__intended_last_success_date"
)
if ENABLE_COMPLIANCE:
compliance_last_success_date = Column(
verbose_name="Compliance Status", empty_values=(), order_by="goldenconfig__compliance_last_success_date"
)
actions = TemplateColumn(
template_code=ALL_ACTIONS, verbose_name="Actions", extra_context=CONFIG_FEATURES, orderable=False
)
def _render_last_success_date(self, record, column, value): # pylint: disable=no-self-use
"""Abstract method to get last success per row record."""
entry = record.goldenconfig_set.first()
last_success_date = getattr(entry, f"{value}_last_success_date", None)
last_attempt_date = getattr(entry, f"{value}_last_attempt_date", None)
if not last_success_date or not last_attempt_date:
column.attrs = {"td": {"style": "color:black"}}
return "--"
if not last_success_date and not last_attempt_date:
column.attrs = {"td": {"style": "color:black"}}
return "--"
if last_success_date and last_attempt_date == last_success_date:
column.attrs = {"td": {"style": "color:green"}}
return last_success_date
column.attrs = {"td": {"style": "color:red"}}
return last_success_date
def render_backup_last_success_date(self, record, column): # pylint: disable=no-self-use
"""Pull back backup last success per row record."""
return self._render_last_success_date(record, column, "backup")
def render_intended_last_success_date(self, record, column): # pylint: disable=no-self-use
"""Pull back intended last success per row record."""
return self._render_last_success_date(record, column, "intended")
def render_compliance_last_success_date(self, record, column): # pylint: disable=no-self-use
"""Pull back compliance last success per row record."""
return self._render_last_success_date(record, column, "compliance")
class Meta(BaseTable.Meta):
"""Meta for class GoldenConfigTable."""
model = Device
fields = actual_fields()
# ComplianceFeature
class ComplianceFeatureTable(BaseTable):
"""Table to display Compliance Features."""
pk = ToggleColumn()
name = LinkColumn("plugins:nautobot_golden_config:compliancefeature", args=[A("pk")])
class Meta(BaseTable.Meta):
"""Table to display Compliance Features Meta Data."""
model = models.ComplianceFeature
fields = ("pk", "name", "slug", "description")
default_columns = ("pk", "name", "slug", "description")
# ComplianceRule
class ComplianceRuleTable(BaseTable):
"""Table to display Compliance Rules."""
pk = ToggleColumn()
feature = LinkColumn("plugins:nautobot_golden_config:compliancerule", args=[A("pk")])
match_config = TemplateColumn(template_code=MATCH_CONFIG)
class Meta(BaseTable.Meta):
"""Table to display Compliance Rules Meta Data."""
model = models.ComplianceRule
fields = ("pk", "feature", "platform", "description", "config_ordered", "match_config", "config_type")
default_columns = ("pk", "feature", "platform", "description", "config_ordered", "match_config", "config_type")
# ConfigRemove
class ConfigRemoveTable(BaseTable):
"""Table to display Compliance Rules."""
pk = ToggleColumn()
name = LinkColumn("plugins:nautobot_golden_config:configremove", args=[A("pk")])
class Meta(BaseTable.Meta):
"""Table to display Compliance Rules Meta Data."""
model = models.ConfigRemove
fields = ("pk", "name", "platform", "description", "regex")
default_columns = ("pk", "name", "platform", "description", "regex")
# ConfigReplace
class ConfigReplaceTable(BaseTable):
"""Table to display Compliance Rules."""
pk = ToggleColumn()
name = LinkColumn("plugins:nautobot_golden_config:configreplace", args=[A("pk")])
class Meta(BaseTable.Meta):
"""Table to display Compliance Rules Meta Data."""
model = models.ConfigReplace
fields = ("pk", "name", "platform", "description", "regex", "replace")
default_columns = ("pk", "name", "platform", "description", "regex", "replace")
``` |
{
"source": "jmcgover/bots",
"score": 3
} |
#### File: bots/beerdescriber/json_to_txt.py
```python
import sys
import os
import json
def main():
data = None
with open("bad_beers.json", 'r') as file:
data = json.load(file)
list = []
list.extend(data["bad_beers"])
with open("bad_beers.txt", 'w') as outfile:
for i in list:
print(i, file=outfile)
return 0
if __name__ == "__main__":
rtn = main()
sys.exit(rtn)
```
#### File: bots/logios/hyphenate.py
```python
import re
__version__ = '1.0.20070709'
class Hyphenator:
def __init__(self, patterns, exceptions=''):
self.tree = {}
for pattern in patterns.split():
self._insert_pattern(pattern)
self.exceptions = {}
for ex in exceptions.split():
# Convert the hyphenated pattern into a point array for use later.
self.exceptions[ex.replace('-', '')] = [0] + [ int(h == '-') for h in re.split(r"[a-z]", ex) ]
def _insert_pattern(self, pattern):
# Convert the a pattern like 'a1bc3d4' into a string of chars 'abcd'
# and a list of points [ 0, 1, 0, 3, 4 ].
chars = re.sub('[0-9]', '', pattern)
points = [ int(d or 0) for d in re.split("[.a-z]", pattern) ]
# Insert the pattern into the tree. Each character finds a dict
# another level down in the tree, and leaf nodes have the list of
# points.
t = self.tree
for c in chars:
if c not in t:
t[c] = {}
t = t[c]
t[None] = points
def hyphenate_word(self, word):
""" Given a word, returns a list of pieces, broken at the possible
hyphenation points.
"""
# Short words aren't hyphenated.
if len(word) <= 4:
return [word]
# If the word is an exception, get the stored points.
if word.lower() in self.exceptions:
points = self.exceptions[word.lower()]
else:
work = '.' + word.lower() + '.'
points = [0] * (len(work)+1)
for i in range(len(work)):
t = self.tree
for c in work[i:]:
if c in t:
t = t[c]
if None in t:
p = t[None]
for j in range(len(p)):
points[i+j] = max(points[i+j], p[j])
else:
break
# No hyphens in the first two chars or the last two.
points[1] = points[2] = points[-2] = points[-3] = 0
# Examine the points to build the pieces list.
pieces = ['']
for c, p in zip(word, points[2:]):
pieces[-1] += c
if p % 2:
pieces.append('')
return pieces
patterns = (
# Knuth and Liang's original hyphenation patterns from classic TeX.
# In the public domain.
"""
.ach4 .ad4der .af1t .al3t .am5at .an5c .ang4 .ani5m .ant4 .an3te .anti5s .ar5s
.ar4tie .ar4ty .as3c .as1p .as1s .aster5 .atom5 .au1d .av4i .awn4 .ba4g .ba5na
.bas4e .ber4 .be5ra .be3sm .be5sto .bri2 .but4ti .cam4pe .can5c .capa5b .car5ol
.ca4t .ce4la .ch4 .chill5i .ci2 .cit5r .co3e .co4r .cor5ner .de4moi .de3o .de3ra
.de3ri .des4c .dictio5 .do4t .du4c .dumb5 .earth5 .eas3i .eb4 .eer4 .eg2 .el5d
.el3em .enam3 .en3g .en3s .eq5ui5t .er4ri .es3 .eu3 .eye5 .fes3 .for5mer .ga2
.ge2 .gen3t4 .ge5og .gi5a .gi4b .go4r .hand5i .han5k .he2 .hero5i .hes3 .het3
.hi3b .hi3er .hon5ey .hon3o .hov5 .id4l .idol3 .im3m .im5pin .in1 .in3ci .ine2
.in2k .in3s .ir5r .is4i .ju3r .la4cy .la4m .lat5er .lath5 .le2 .leg5e .len4
.lep5 .lev1 .li4g .lig5a .li2n .li3o .li4t .mag5a5 .mal5o .man5a .mar5ti .me2
.mer3c .me5ter .mis1 .mist5i .mon3e .mo3ro .mu5ta .muta5b .ni4c .od2 .odd5
.of5te .or5ato .or3c .or1d .or3t .os3 .os4tl .oth3 .out3 .ped5al .pe5te .pe5tit
.pi4e .pio5n .pi2t .pre3m .ra4c .ran4t .ratio5na .ree2 .re5mit .res2 .re5stat
.ri4g .rit5u .ro4q .ros5t .row5d .ru4d .sci3e .self5 .sell5 .se2n .se5rie .sh2
.si2 .sing4 .st4 .sta5bl .sy2 .ta4 .te4 .ten5an .th2 .ti2 .til4 .tim5o5 .ting4
.tin5k .ton4a .to4p .top5i .tou5s .trib5ut .un1a .un3ce .under5 .un1e .un5k
.un5o .un3u .up3 .ure3 .us5a .ven4de .ve5ra .wil5i .ye4 4ab. a5bal a5ban abe2
ab5erd abi5a ab5it5ab ab5lat ab5o5liz 4abr ab5rog ab3ul a4car ac5ard ac5aro
a5ceou ac1er a5chet 4a2ci a3cie ac1in a3cio ac5rob act5if ac3ul ac4um a2d ad4din
ad5er. 2adi a3dia ad3ica adi4er a3dio a3dit a5diu ad4le ad3ow ad5ran ad4su 4adu
a3duc ad5um ae4r aeri4e a2f aff4 a4gab aga4n ag5ell age4o 4ageu ag1i 4ag4l ag1n
a2go 3agog ag3oni a5guer ag5ul a4gy a3ha a3he ah4l a3ho ai2 a5ia a3ic. ai5ly
a4i4n ain5in ain5o ait5en a1j ak1en al5ab al3ad a4lar 4aldi 2ale al3end a4lenti
a5le5o al1i al4ia. ali4e al5lev 4allic 4alm a5log. a4ly. 4alys 5a5lyst 5alyt
3alyz 4ama am5ab am3ag ama5ra am5asc a4matis a4m5ato am5era am3ic am5if am5ily
am1in ami4no a2mo a5mon amor5i amp5en a2n an3age 3analy a3nar an3arc anar4i
a3nati 4and ande4s an3dis an1dl an4dow a5nee a3nen an5est. a3neu 2ang ang5ie
an1gl a4n1ic a3nies an3i3f an4ime a5nimi a5nine an3io a3nip an3ish an3it a3niu
an4kli 5anniz ano4 an5ot anoth5 an2sa an4sco an4sn an2sp ans3po an4st an4sur
antal4 an4tie 4anto an2tr an4tw an3ua an3ul a5nur 4ao apar4 ap5at ap5ero a3pher
4aphi a4pilla ap5illar ap3in ap3ita a3pitu a2pl apoc5 ap5ola apor5i apos3t
aps5es a3pu aque5 2a2r ar3act a5rade ar5adis ar3al a5ramete aran4g ara3p ar4at
a5ratio ar5ativ a5rau ar5av4 araw4 arbal4 ar4chan ar5dine ar4dr ar5eas a3ree
ar3ent a5ress ar4fi ar4fl ar1i ar5ial ar3ian a3riet ar4im ar5inat ar3io ar2iz
ar2mi ar5o5d a5roni a3roo ar2p ar3q arre4 ar4sa ar2sh 4as. as4ab as3ant ashi4
a5sia. a3sib a3sic 5a5si4t ask3i as4l a4soc as5ph as4sh as3ten as1tr asur5a a2ta
at3abl at5ac at3alo at5ap ate5c at5ech at3ego at3en. at3era ater5n a5terna
at3est at5ev 4ath ath5em a5then at4ho ath5om 4ati. a5tia at5i5b at1ic at3if
ation5ar at3itu a4tog a2tom at5omiz a4top a4tos a1tr at5rop at4sk at4tag at5te
at4th a2tu at5ua at5ue at3ul at3ura a2ty au4b augh3 au3gu au4l2 aun5d au3r
au5sib aut5en au1th a2va av3ag a5van ave4no av3era av5ern av5ery av1i avi4er
av3ig av5oc a1vor 3away aw3i aw4ly aws4 ax4ic ax4id ay5al aye4 ays4 azi4er azz5i
5ba. bad5ger ba4ge bal1a ban5dag ban4e ban3i barbi5 bari4a bas4si 1bat ba4z 2b1b
b2be b3ber bbi4na 4b1d 4be. beak4 beat3 4be2d be3da be3de be3di be3gi be5gu 1bel
be1li be3lo 4be5m be5nig be5nu 4bes4 be3sp be5str 3bet bet5iz be5tr be3tw be3w
be5yo 2bf 4b3h bi2b bi4d 3bie bi5en bi4er 2b3if 1bil bi3liz bina5r4 bin4d bi5net
bi3ogr bi5ou bi2t 3bi3tio bi3tr 3bit5ua b5itz b1j bk4 b2l2 blath5 b4le. blen4
5blesp b3lis b4lo blun4t 4b1m 4b3n bne5g 3bod bod3i bo4e bol3ic bom4bi bon4a
bon5at 3boo 5bor. 4b1ora bor5d 5bore 5bori 5bos4 b5ota both5 bo4to bound3 4bp
4brit broth3 2b5s2 bsor4 2bt bt4l b4to b3tr buf4fer bu4ga bu3li bumi4 bu4n
bunt4i bu3re bus5ie buss4e 5bust 4buta 3butio b5uto b1v 4b5w 5by. bys4 1ca
cab3in ca1bl cach4 ca5den 4cag4 2c5ah ca3lat cal4la call5in 4calo can5d can4e
can4ic can5is can3iz can4ty cany4 ca5per car5om cast5er cas5tig 4casy ca4th
4cativ cav5al c3c ccha5 cci4a ccompa5 ccon4 ccou3t 2ce. 4ced. 4ceden 3cei 5cel.
3cell 1cen 3cenc 2cen4e 4ceni 3cent 3cep ce5ram 4cesa 3cessi ces5si5b ces5t cet4
c5e4ta cew4 2ch 4ch. 4ch3ab 5chanic ch5a5nis che2 cheap3 4ched che5lo 3chemi
ch5ene ch3er. ch3ers 4ch1in 5chine. ch5iness 5chini 5chio 3chit chi2z 3cho2
ch4ti 1ci 3cia ci2a5b cia5r ci5c 4cier 5cific. 4cii ci4la 3cili 2cim 2cin c4ina
3cinat cin3em c1ing c5ing. 5cino cion4 4cipe ci3ph 4cipic 4cista 4cisti 2c1it
cit3iz 5ciz ck1 ck3i 1c4l4 4clar c5laratio 5clare cle4m 4clic clim4 cly4 c5n 1co
co5ag coe2 2cog co4gr coi4 co3inc col5i 5colo col3or com5er con4a c4one con3g
con5t co3pa cop3ic co4pl 4corb coro3n cos4e cov1 cove4 cow5a coz5e co5zi c1q
cras5t 5crat. 5cratic cre3at 5cred 4c3reta cre4v cri2 cri5f c4rin cris4 5criti
cro4pl crop5o cros4e cru4d 4c3s2 2c1t cta4b ct5ang c5tant c2te c3ter c4ticu
ctim3i ctu4r c4tw cud5 c4uf c4ui cu5ity 5culi cul4tis 3cultu cu2ma c3ume cu4mi
3cun cu3pi cu5py cur5a4b cu5ria 1cus cuss4i 3c4ut cu4tie 4c5utiv 4cutr 1cy cze4
1d2a 5da. 2d3a4b dach4 4daf 2dag da2m2 dan3g dard5 dark5 4dary 3dat 4dativ 4dato
5dav4 dav5e 5day d1b d5c d1d4 2de. deaf5 deb5it de4bon decan4 de4cil de5com
2d1ed 4dee. de5if deli4e del5i5q de5lo d4em 5dem. 3demic dem5ic. de5mil de4mons
demor5 1den de4nar de3no denti5f de3nu de1p de3pa depi4 de2pu d3eq d4erh 5derm
dern5iz der5s des2 d2es. de1sc de2s5o des3ti de3str de4su de1t de2to de1v dev3il
4dey 4d1f d4ga d3ge4t dg1i d2gy d1h2 5di. 1d4i3a dia5b di4cam d4ice 3dict 3did
5di3en d1if di3ge di4lato d1in 1dina 3dine. 5dini di5niz 1dio dio5g di4pl dir2
di1re dirt5i dis1 5disi d4is3t d2iti 1di1v d1j d5k2 4d5la 3dle. 3dled 3dles.
4dless 2d3lo 4d5lu 2dly d1m 4d1n4 1do 3do. do5de 5doe 2d5of d4og do4la doli4
do5lor dom5iz do3nat doni4 doo3d dop4p d4or 3dos 4d5out do4v 3dox d1p 1dr
drag5on 4drai dre4 drea5r 5dren dri4b dril4 dro4p 4drow 5drupli 4dry 2d1s2 ds4p
d4sw d4sy d2th 1du d1u1a du2c d1uca duc5er 4duct. 4ducts du5el du4g d3ule dum4be
du4n 4dup du4pe d1v d1w d2y 5dyn dy4se dys5p e1a4b e3act ead1 ead5ie ea4ge
ea5ger ea4l eal5er eal3ou eam3er e5and ear3a ear4c ear5es ear4ic ear4il ear5k
ear2t eart3e ea5sp e3ass east3 ea2t eat5en eath3i e5atif e4a3tu ea2v eav3en
eav5i eav5o 2e1b e4bel. e4bels e4ben e4bit e3br e4cad ecan5c ecca5 e1ce ec5essa
ec2i e4cib ec5ificat ec5ifie ec5ify ec3im eci4t e5cite e4clam e4clus e2col
e4comm e4compe e4conc e2cor ec3ora eco5ro e1cr e4crem ec4tan ec4te e1cu e4cul
ec3ula 2e2da 4ed3d e4d1er ede4s 4edi e3dia ed3ib ed3ica ed3im ed1it edi5z 4edo
e4dol edon2 e4dri e4dul ed5ulo ee2c eed3i ee2f eel3i ee4ly ee2m ee4na ee4p1
ee2s4 eest4 ee4ty e5ex e1f e4f3ere 1eff e4fic 5efici efil4 e3fine ef5i5nite
3efit efor5es e4fuse. 4egal eger4 eg5ib eg4ic eg5ing e5git5 eg5n e4go. e4gos
eg1ul e5gur 5egy e1h4 eher4 ei2 e5ic ei5d eig2 ei5gl e3imb e3inf e1ing e5inst
eir4d eit3e ei3th e5ity e1j e4jud ej5udi eki4n ek4la e1la e4la. e4lac elan4d
el5ativ e4law elaxa4 e3lea el5ebra 5elec e4led el3ega e5len e4l1er e1les el2f
el2i e3libe e4l5ic. el3ica e3lier el5igib e5lim e4l3ing e3lio e2lis el5ish
e3liv3 4ella el4lab ello4 e5loc el5og el3op. el2sh el4ta e5lud el5ug e4mac e4mag
e5man em5ana em5b e1me e2mel e4met em3ica emi4e em5igra em1in2 em5ine em3i3ni
e4mis em5ish e5miss em3iz 5emniz emo4g emoni5o em3pi e4mul em5ula emu3n e3my
en5amo e4nant ench4er en3dic e5nea e5nee en3em en5ero en5esi en5est en3etr e3new
en5ics e5nie e5nil e3nio en3ish en3it e5niu 5eniz 4enn 4eno eno4g e4nos en3ov
en4sw ent5age 4enthes en3ua en5uf e3ny. 4en3z e5of eo2g e4oi4 e3ol eop3ar e1or
eo3re eo5rol eos4 e4ot eo4to e5out e5ow e2pa e3pai ep5anc e5pel e3pent ep5etitio
ephe4 e4pli e1po e4prec ep5reca e4pred ep3reh e3pro e4prob ep4sh ep5ti5b e4put
ep5uta e1q equi3l e4q3ui3s er1a era4b 4erand er3ar 4erati. 2erb er4bl er3ch
er4che 2ere. e3real ere5co ere3in er5el. er3emo er5ena er5ence 4erene er3ent
ere4q er5ess er3est eret4 er1h er1i e1ria4 5erick e3rien eri4er er3ine e1rio
4erit er4iu eri4v e4riva er3m4 er4nis 4ernit 5erniz er3no 2ero er5ob e5roc ero4r
er1ou er1s er3set ert3er 4ertl er3tw 4eru eru4t 5erwau e1s4a e4sage. e4sages
es2c e2sca es5can e3scr es5cu e1s2e e2sec es5ecr es5enc e4sert. e4serts e4serva
4esh e3sha esh5en e1si e2sic e2sid es5iden es5igna e2s5im es4i4n esis4te esi4u
e5skin es4mi e2sol es3olu e2son es5ona e1sp es3per es5pira es4pre 2ess es4si4b
estan4 es3tig es5tim 4es2to e3ston 2estr e5stro estruc5 e2sur es5urr es4w eta4b
eten4d e3teo ethod3 et1ic e5tide etin4 eti4no e5tir e5titio et5itiv 4etn et5ona
e3tra e3tre et3ric et5rif et3rog et5ros et3ua et5ym et5z 4eu e5un e3up eu3ro
eus4 eute4 euti5l eu5tr eva2p5 e2vas ev5ast e5vea ev3ell evel3o e5veng even4i
ev1er e5verb e1vi ev3id evi4l e4vin evi4v e5voc e5vu e1wa e4wag e5wee e3wh ewil5
ew3ing e3wit 1exp 5eyc 5eye. eys4 1fa fa3bl fab3r fa4ce 4fag fain4 fall5e 4fa4ma
fam5is 5far far5th fa3ta fa3the 4fato fault5 4f5b 4fd 4fe. feas4 feath3 fe4b
4feca 5fect 2fed fe3li fe4mo fen2d fend5e fer1 5ferr fev4 4f1f f4fes f4fie
f5fin. f2f5is f4fly f2fy 4fh 1fi fi3a 2f3ic. 4f3ical f3ican 4ficate f3icen
fi3cer fic4i 5ficia 5ficie 4fics fi3cu fi5del fight5 fil5i fill5in 4fily 2fin
5fina fin2d5 fi2ne f1in3g fin4n fis4ti f4l2 f5less flin4 flo3re f2ly5 4fm 4fn
1fo 5fon fon4de fon4t fo2r fo5rat for5ay fore5t for4i fort5a fos5 4f5p fra4t
f5rea fres5c fri2 fril4 frol5 2f3s 2ft f4to f2ty 3fu fu5el 4fug fu4min fu5ne
fu3ri fusi4 fus4s 4futa 1fy 1ga gaf4 5gal. 3gali ga3lo 2gam ga5met g5amo gan5is
ga3niz gani5za 4gano gar5n4 gass4 gath3 4gativ 4gaz g3b gd4 2ge. 2ged geez4
gel4in ge5lis ge5liz 4gely 1gen ge4nat ge5niz 4geno 4geny 1geo ge3om g4ery 5gesi
geth5 4geto ge4ty ge4v 4g1g2 g2ge g3ger gglu5 ggo4 gh3in gh5out gh4to 5gi. 1gi4a
gia5r g1ic 5gicia g4ico gien5 5gies. gil4 g3imen 3g4in. gin5ge 5g4ins 5gio 3gir
gir4l g3isl gi4u 5giv 3giz gl2 gla4 glad5i 5glas 1gle gli4b g3lig 3glo glo3r g1m
g4my gn4a g4na. gnet4t g1ni g2nin g4nio g1no g4non 1go 3go. gob5 5goe 3g4o4g
go3is gon2 4g3o3na gondo5 go3ni 5goo go5riz gor5ou 5gos. gov1 g3p 1gr 4grada
g4rai gran2 5graph. g5rapher 5graphic 4graphy 4gray gre4n 4gress. 4grit g4ro
gruf4 gs2 g5ste gth3 gu4a 3guard 2gue 5gui5t 3gun 3gus 4gu4t g3w 1gy 2g5y3n
gy5ra h3ab4l hach4 hae4m hae4t h5agu ha3la hala3m ha4m han4ci han4cy 5hand.
han4g hang5er hang5o h5a5niz han4k han4te hap3l hap5t ha3ran ha5ras har2d hard3e
har4le harp5en har5ter has5s haun4 5haz haz3a h1b 1head 3hear he4can h5ecat h4ed
he5do5 he3l4i hel4lis hel4ly h5elo hem4p he2n hena4 hen5at heo5r hep5 h4era
hera3p her4ba here5a h3ern h5erou h3ery h1es he2s5p he4t het4ed heu4 h1f h1h
hi5an hi4co high5 h4il2 himer4 h4ina hion4e hi4p hir4l hi3ro hir4p hir4r his3el
his4s hith5er hi2v 4hk 4h1l4 hlan4 h2lo hlo3ri 4h1m hmet4 2h1n h5odiz h5ods ho4g
hoge4 hol5ar 3hol4e ho4ma home3 hon4a ho5ny 3hood hoon4 hor5at ho5ris hort3e
ho5ru hos4e ho5sen hos1p 1hous house3 hov5el 4h5p 4hr4 hree5 hro5niz hro3po
4h1s2 h4sh h4tar ht1en ht5es h4ty hu4g hu4min hun5ke hun4t hus3t4 hu4t h1w
h4wart hy3pe hy3ph hy2s 2i1a i2al iam4 iam5ete i2an 4ianc ian3i 4ian4t ia5pe
iass4 i4ativ ia4tric i4atu ibe4 ib3era ib5ert ib5ia ib3in ib5it. ib5ite i1bl
ib3li i5bo i1br i2b5ri i5bun 4icam 5icap 4icar i4car. i4cara icas5 i4cay iccu4
4iceo 4ich 2ici i5cid ic5ina i2cip ic3ipa i4cly i2c5oc 4i1cr 5icra i4cry ic4te
ictu2 ic4t3ua ic3ula ic4um ic5uo i3cur 2id i4dai id5anc id5d ide3al ide4s i2di
id5ian idi4ar i5die id3io idi5ou id1it id5iu i3dle i4dom id3ow i4dr i2du id5uo
2ie4 ied4e 5ie5ga ield3 ien5a4 ien4e i5enn i3enti i1er. i3esc i1est i3et 4if.
if5ero iff5en if4fr 4ific. i3fie i3fl 4ift 2ig iga5b ig3era ight3i 4igi i3gib
ig3il ig3in ig3it i4g4l i2go ig3or ig5ot i5gre igu5i ig1ur i3h 4i5i4 i3j 4ik
i1la il3a4b i4lade i2l5am ila5ra i3leg il1er ilev4 il5f il1i il3ia il2ib il3io
il4ist 2ilit il2iz ill5ab 4iln il3oq il4ty il5ur il3v i4mag im3age ima5ry
imenta5r 4imet im1i im5ida imi5le i5mini 4imit im4ni i3mon i2mu im3ula 2in.
i4n3au 4inav incel4 in3cer 4ind in5dling 2ine i3nee iner4ar i5ness 4inga 4inge
in5gen 4ingi in5gling 4ingo 4ingu 2ini i5ni. i4nia in3io in1is i5nite. 5initio
in3ity 4ink 4inl 2inn 2i1no i4no4c ino4s i4not 2ins in3se insur5a 2int. 2in4th
in1u i5nus 4iny 2io 4io. ioge4 io2gr i1ol io4m ion3at ion4ery ion3i io5ph ior3i
i4os io5th i5oti io4to i4our 2ip ipe4 iphras4 ip3i ip4ic ip4re4 ip3ul i3qua
iq5uef iq3uid iq3ui3t 4ir i1ra ira4b i4rac ird5e ire4de i4ref i4rel4 i4res ir5gi
ir1i iri5de ir4is iri3tu 5i5r2iz ir4min iro4g 5iron. ir5ul 2is. is5ag is3ar
isas5 2is1c is3ch 4ise is3er 3isf is5han is3hon ish5op is3ib isi4d i5sis is5itiv
4is4k islan4 4isms i2so iso5mer is1p is2pi is4py 4is1s is4sal issen4 is4ses
is4ta. is1te is1ti ist4ly 4istral i2su is5us 4ita. ita4bi i4tag 4ita5m i3tan
i3tat 2ite it3era i5teri it4es 2ith i1ti 4itia 4i2tic it3ica 5i5tick it3ig
it5ill i2tim 2itio 4itis i4tism i2t5o5m 4iton i4tram it5ry 4itt it3uat i5tud
it3ul 4itz. i1u 2iv iv3ell iv3en. i4v3er. i4vers. iv5il. iv5io iv1it i5vore
iv3o3ro i4v3ot 4i5w ix4o 4iy 4izar izi4 5izont 5ja jac4q ja4p 1je jer5s 4jestie
4jesty jew3 jo4p 5judg 3ka. k3ab k5ag kais4 kal4 k1b k2ed 1kee ke4g ke5li k3en4d
k1er kes4 k3est. ke4ty k3f kh4 k1i 5ki. 5k2ic k4ill kilo5 k4im k4in. kin4de
k5iness kin4g ki4p kis4 k5ish kk4 k1l 4kley 4kly k1m k5nes 1k2no ko5r kosh4 k3ou
kro5n 4k1s2 k4sc ks4l k4sy k5t k1w lab3ic l4abo laci4 l4ade la3dy lag4n lam3o
3land lan4dl lan5et lan4te lar4g lar3i las4e la5tan 4lateli 4lativ 4lav la4v4a
2l1b lbin4 4l1c2 lce4 l3ci 2ld l2de ld4ere ld4eri ldi4 ld5is l3dr l4dri le2a
le4bi left5 5leg. 5legg le4mat lem5atic 4len. 3lenc 5lene. 1lent le3ph le4pr
lera5b ler4e 3lerg 3l4eri l4ero les2 le5sco 5lesq 3less 5less. l3eva lev4er.
lev4era lev4ers 3ley 4leye 2lf l5fr 4l1g4 l5ga lgar3 l4ges lgo3 2l3h li4ag li2am
liar5iz li4as li4ato li5bi 5licio li4cor 4lics 4lict. l4icu l3icy l3ida lid5er
3lidi lif3er l4iff li4fl 5ligate 3ligh li4gra 3lik 4l4i4l lim4bl lim3i li4mo
l4im4p l4ina 1l4ine lin3ea lin3i link5er li5og 4l4iq lis4p l1it l2it. 5litica
l5i5tics liv3er l1iz 4lj lka3 l3kal lka4t l1l l4law l2le l5lea l3lec l3leg l3lel
l3le4n l3le4t ll2i l2lin4 l5lina ll4o lloqui5 ll5out l5low 2lm l5met lm3ing
l4mod lmon4 2l1n2 3lo. lob5al lo4ci 4lof 3logic l5ogo 3logu lom3er 5long lon4i
l3o3niz lood5 5lope. lop3i l3opm lora4 lo4rato lo5rie lor5ou 5los. los5et
5losophiz 5losophy los4t lo4ta loun5d 2lout 4lov 2lp lpa5b l3pha l5phi lp5ing
l3pit l4pl l5pr 4l1r 2l1s2 l4sc l2se l4sie 4lt lt5ag ltane5 l1te lten4 ltera4
lth3i l5ties. ltis4 l1tr ltu2 ltur3a lu5a lu3br luch4 lu3ci lu3en luf4 lu5id
lu4ma 5lumi l5umn. 5lumnia lu3o luo3r 4lup luss4 lus3te 1lut l5ven l5vet4 2l1w
1ly 4lya 4lyb ly5me ly3no 2lys4 l5yse 1ma 2mab ma2ca ma5chine ma4cl mag5in 5magn
2mah maid5 4mald ma3lig ma5lin mal4li mal4ty 5mania man5is man3iz 4map ma5rine.
ma5riz mar4ly mar3v ma5sce mas4e mas1t 5mate math3 ma3tis 4matiza 4m1b mba4t5
m5bil m4b3ing mbi4v 4m5c 4me. 2med 4med. 5media me3die m5e5dy me2g mel5on mel4t
me2m mem1o3 1men men4a men5ac men4de 4mene men4i mens4 mensu5 3ment men4te me5on
m5ersa 2mes 3mesti me4ta met3al me1te me5thi m4etr 5metric me5trie me3try me4v
4m1f 2mh 5mi. mi3a mid4a mid4g mig4 3milia m5i5lie m4ill min4a 3mind m5inee
m4ingl min5gli m5ingly min4t m4inu miot4 m2is mis4er. mis5l mis4ti m5istry 4mith
m2iz 4mk 4m1l m1m mma5ry 4m1n mn4a m4nin mn4o 1mo 4mocr 5mocratiz mo2d1 mo4go
mois2 moi5se 4mok mo5lest mo3me mon5et mon5ge moni3a mon4ism mon4ist mo3niz
monol4 mo3ny. mo2r 4mora. mos2 mo5sey mo3sp moth3 m5ouf 3mous mo2v 4m1p mpara5
mpa5rab mpar5i m3pet mphas4 m2pi mpi4a mp5ies m4p1in m5pir mp5is mpo3ri mpos5ite
m4pous mpov5 mp4tr m2py 4m3r 4m1s2 m4sh m5si 4mt 1mu mula5r4 5mult multi3 3mum
mun2 4mup mu4u 4mw 1na 2n1a2b n4abu 4nac. na4ca n5act nag5er. nak4 na4li na5lia
4nalt na5mit n2an nanci4 nan4it nank4 nar3c 4nare nar3i nar4l n5arm n4as nas4c
nas5ti n2at na3tal nato5miz n2au nau3se 3naut nav4e 4n1b4 ncar5 n4ces. n3cha
n5cheo n5chil n3chis nc1in nc4it ncour5a n1cr n1cu n4dai n5dan n1de nd5est.
ndi4b n5d2if n1dit n3diz n5duc ndu4r nd2we 2ne. n3ear ne2b neb3u ne2c 5neck 2ned
ne4gat neg5ativ 5nege ne4la nel5iz ne5mi ne4mo 1nen 4nene 3neo ne4po ne2q n1er
nera5b n4erar n2ere n4er5i ner4r 1nes 2nes. 4nesp 2nest 4nesw 3netic ne4v n5eve
ne4w n3f n4gab n3gel nge4n4e n5gere n3geri ng5ha n3gib ng1in n5git n4gla ngov4
ng5sh n1gu n4gum n2gy 4n1h4 nha4 nhab3 nhe4 3n4ia ni3an ni4ap ni3ba ni4bl ni4d
ni5di ni4er ni2fi ni5ficat n5igr nik4 n1im ni3miz n1in 5nine. nin4g ni4o 5nis.
nis4ta n2it n4ith 3nitio n3itor ni3tr n1j 4nk2 n5kero n3ket nk3in n1kl 4n1l n5m
nme4 nmet4 4n1n2 nne4 nni3al nni4v nob4l no3ble n5ocl 4n3o2d 3noe 4nog noge4
nois5i no5l4i 5nologis 3nomic n5o5miz no4mo no3my no4n non4ag non5i n5oniz 4nop
5nop5o5li nor5ab no4rary 4nosc nos4e nos5t no5ta 1nou 3noun nov3el3 nowl3 n1p4
npi4 npre4c n1q n1r nru4 2n1s2 ns5ab nsati4 ns4c n2se n4s3es nsid1 nsig4 n2sl
ns3m n4soc ns4pe n5spi nsta5bl n1t nta4b nter3s nt2i n5tib nti4er nti2f n3tine
n4t3ing nti4p ntrol5li nt4s ntu3me nu1a nu4d nu5en nuf4fe n3uin 3nu3it n4um
nu1me n5umi 3nu4n n3uo nu3tr n1v2 n1w4 nym4 nyp4 4nz n3za 4oa oad3 o5a5les oard3
oas4e oast5e oat5i ob3a3b o5bar obe4l o1bi o2bin ob5ing o3br ob3ul o1ce och4
o3chet ocif3 o4cil o4clam o4cod oc3rac oc5ratiz ocre3 5ocrit octor5a oc3ula
o5cure od5ded od3ic odi3o o2do4 odor3 od5uct. od5ucts o4el o5eng o3er oe4ta o3ev
o2fi of5ite ofit4t o2g5a5r og5ativ o4gato o1ge o5gene o5geo o4ger o3gie 1o1gis
og3it o4gl o5g2ly 3ogniz o4gro ogu5i 1ogy 2ogyn o1h2 ohab5 oi2 oic3es oi3der
oiff4 oig4 oi5let o3ing oint5er o5ism oi5son oist5en oi3ter o5j 2ok o3ken ok5ie
o1la o4lan olass4 ol2d old1e ol3er o3lesc o3let ol4fi ol2i o3lia o3lice ol5id.
o3li4f o5lil ol3ing o5lio o5lis. ol3ish o5lite o5litio o5liv olli4e ol5ogiz
olo4r ol5pl ol2t ol3ub ol3ume ol3un o5lus ol2v o2ly om5ah oma5l om5atiz om2be
om4bl o2me om3ena om5erse o4met om5etry o3mia om3ic. om3ica o5mid om1in o5mini
5ommend omo4ge o4mon om3pi ompro5 o2n on1a on4ac o3nan on1c 3oncil 2ond on5do
o3nen on5est on4gu on1ic o3nio on1is o5niu on3key on4odi on3omy on3s onspi4
onspir5a onsu4 onten4 on3t4i ontif5 on5um onva5 oo2 ood5e ood5i oo4k oop3i o3ord
oost5 o2pa ope5d op1er 3opera 4operag 2oph o5phan o5pher op3ing o3pit o5pon
o4posi o1pr op1u opy5 o1q o1ra o5ra. o4r3ag or5aliz or5ange ore5a o5real or3ei
ore5sh or5est. orew4 or4gu 4o5ria or3ica o5ril or1in o1rio or3ity o3riu or2mi
orn2e o5rof or3oug or5pe 3orrh or4se ors5en orst4 or3thi or3thy or4ty o5rum o1ry
os3al os2c os4ce o3scop 4oscopi o5scr os4i4e os5itiv os3ito os3ity osi4u os4l
o2so os4pa os4po os2ta o5stati os5til os5tit o4tan otele4g ot3er. ot5ers o4tes
4oth oth5esi oth3i4 ot3ic. ot5ica o3tice o3tif o3tis oto5s ou2 ou3bl ouch5i
ou5et ou4l ounc5er oun2d ou5v ov4en over4ne over3s ov4ert o3vis oviti4 o5v4ol
ow3der ow3el ow5est ow1i own5i o4wo oy1a 1pa pa4ca pa4ce pac4t p4ad 5pagan
p3agat p4ai pain4 p4al pan4a pan3el pan4ty pa3ny pa1p pa4pu para5bl par5age
par5di 3pare par5el p4a4ri par4is pa2te pa5ter 5pathic pa5thy pa4tric pav4 3pay
4p1b pd4 4pe. 3pe4a pear4l pe2c 2p2ed 3pede 3pedi pedia4 ped4ic p4ee pee4d pek4
pe4la peli4e pe4nan p4enc pen4th pe5on p4era. pera5bl p4erag p4eri peri5st
per4mal perme5 p4ern per3o per3ti pe5ru per1v pe2t pe5ten pe5tiz 4pf 4pg 4ph.
phar5i phe3no ph4er ph4es. ph1ic 5phie ph5ing 5phisti 3phiz ph2l 3phob 3phone
5phoni pho4r 4phs ph3t 5phu 1phy pi3a pian4 pi4cie pi4cy p4id p5ida pi3de 5pidi
3piec pi3en pi4grap pi3lo pi2n p4in. pind4 p4ino 3pi1o pion4 p3ith pi5tha pi2tu
2p3k2 1p2l2 3plan plas5t pli3a pli5er 4plig pli4n ploi4 plu4m plum4b 4p1m 2p3n
po4c 5pod. po5em po3et5 5po4g poin2 5point poly5t po4ni po4p 1p4or po4ry 1pos
pos1s p4ot po4ta 5poun 4p1p ppa5ra p2pe p4ped p5pel p3pen p3per p3pet ppo5site
pr2 pray4e 5preci pre5co pre3em pref5ac pre4la pre3r p3rese 3press pre5ten pre3v
5pri4e prin4t3 pri4s pris3o p3roca prof5it pro3l pros3e pro1t 2p1s2 p2se ps4h
p4sib 2p1t pt5a4b p2te p2th pti3m ptu4r p4tw pub3 pue4 puf4 pul3c pu4m pu2n
pur4r 5pus pu2t 5pute put3er pu3tr put4ted put4tin p3w qu2 qua5v 2que. 3quer
3quet 2rab ra3bi rach4e r5acl raf5fi raf4t r2ai ra4lo ram3et r2ami rane5o ran4ge
r4ani ra5no rap3er 3raphy rar5c rare4 rar5ef 4raril r2as ration4 rau4t ra5vai
rav3el ra5zie r1b r4bab r4bag rbi2 rbi4f r2bin r5bine rb5ing. rb4o r1c r2ce
rcen4 r3cha rch4er r4ci4b rc4it rcum3 r4dal rd2i rdi4a rdi4er rdin4 rd3ing 2re.
re1al re3an re5arr 5reav re4aw r5ebrat rec5oll rec5ompe re4cre 2r2ed re1de
re3dis red5it re4fac re2fe re5fer. re3fi re4fy reg3is re5it re1li re5lu r4en4ta
ren4te re1o re5pin re4posi re1pu r1er4 r4eri rero4 re5ru r4es. re4spi ress5ib
res2t re5stal re3str re4ter re4ti4z re3tri reu2 re5uti rev2 re4val rev3el
r5ev5er. re5vers re5vert re5vil rev5olu re4wh r1f rfu4 r4fy rg2 rg3er r3get
r3gic rgi4n rg3ing r5gis r5git r1gl rgo4n r3gu rh4 4rh. 4rhal ri3a ria4b ri4ag
r4ib rib3a ric5as r4ice 4rici 5ricid ri4cie r4ico rid5er ri3enc ri3ent ri1er
ri5et rig5an 5rigi ril3iz 5riman rim5i 3rimo rim4pe r2ina 5rina. rin4d rin4e
rin4g ri1o 5riph riph5e ri2pl rip5lic r4iq r2is r4is. ris4c r3ish ris4p ri3ta3b
r5ited. rit5er. rit5ers rit3ic ri2tu rit5ur riv5el riv3et riv3i r3j r3ket rk4le
rk4lin r1l rle4 r2led r4lig r4lis rl5ish r3lo4 r1m rma5c r2me r3men rm5ers
rm3ing r4ming. r4mio r3mit r4my r4nar r3nel r4ner r5net r3ney r5nic r1nis4 r3nit
r3niv rno4 r4nou r3nu rob3l r2oc ro3cr ro4e ro1fe ro5fil rok2 ro5ker 5role.
rom5ete rom4i rom4p ron4al ron4e ro5n4is ron4ta 1room 5root ro3pel rop3ic ror3i
ro5ro ros5per ros4s ro4the ro4ty ro4va rov5el rox5 r1p r4pea r5pent rp5er. r3pet
rp4h4 rp3ing r3po r1r4 rre4c rre4f r4reo rre4st rri4o rri4v rron4 rros4 rrys4
4rs2 r1sa rsa5ti rs4c r2se r3sec rse4cr rs5er. rs3es rse5v2 r1sh r5sha r1si
r4si4b rson3 r1sp r5sw rtach4 r4tag r3teb rten4d rte5o r1ti rt5ib rti4d r4tier
r3tig rtil3i rtil4l r4tily r4tist r4tiv r3tri rtroph4 rt4sh ru3a ru3e4l ru3en
ru4gl ru3in rum3pl ru2n runk5 run4ty r5usc ruti5n rv4e rvel4i r3ven rv5er.
r5vest r3vey r3vic rvi4v r3vo r1w ry4c 5rynge ry3t sa2 2s1ab 5sack sac3ri s3act
5sai salar4 sal4m sa5lo sal4t 3sanc san4de s1ap sa5ta 5sa3tio sat3u sau4 sa5vor
5saw 4s5b scan4t5 sca4p scav5 s4ced 4scei s4ces sch2 s4cho 3s4cie 5scin4d scle5
s4cli scof4 4scopy scour5a s1cu 4s5d 4se. se4a seas4 sea5w se2c3o 3sect 4s4ed
se4d4e s5edl se2g seg3r 5sei se1le 5self 5selv 4seme se4mol sen5at 4senc sen4d
s5ened sen5g s5enin 4sentd 4sentl sep3a3 4s1er. s4erl ser4o 4servo s1e4s se5sh
ses5t 5se5um 5sev sev3en sew4i 5sex 4s3f 2s3g s2h 2sh. sh1er 5shev sh1in sh3io
3ship shiv5 sho4 sh5old shon3 shor4 short5 4shw si1b s5icc 3side. 5sides 5sidi
si5diz 4signa sil4e 4sily 2s1in s2ina 5sine. s3ing 1sio 5sion sion5a si2r sir5a
1sis 3sitio 5siu 1siv 5siz sk2 4ske s3ket sk5ine sk5ing s1l2 s3lat s2le slith5
2s1m s3ma small3 sman3 smel4 s5men 5smith smol5d4 s1n4 1so so4ce soft3 so4lab
sol3d2 so3lic 5solv 3som 3s4on. sona4 son4g s4op 5sophic s5ophiz s5ophy sor5c
sor5d 4sov so5vi 2spa 5spai spa4n spen4d 2s5peo 2sper s2phe 3spher spho5 spil4
sp5ing 4spio s4ply s4pon spor4 4spot squal4l s1r 2ss s1sa ssas3 s2s5c s3sel
s5seng s4ses. s5set s1si s4sie ssi4er ss5ily s4sl ss4li s4sn sspend4 ss2t ssur5a
ss5w 2st. s2tag s2tal stam4i 5stand s4ta4p 5stat. s4ted stern5i s5tero ste2w
stew5a s3the st2i s4ti. s5tia s1tic 5stick s4tie s3tif st3ing 5stir s1tle 5stock
stom3a 5stone s4top 3store st4r s4trad 5stratu s4tray s4trid 4stry 4st3w s2ty
1su su1al su4b3 su2g3 su5is suit3 s4ul su2m sum3i su2n su2r 4sv sw2 4swo s4y
4syc 3syl syn5o sy5rin 1ta 3ta. 2tab ta5bles 5taboliz 4taci ta5do 4taf4 tai5lo
ta2l ta5la tal5en tal3i 4talk tal4lis ta5log ta5mo tan4de tanta3 ta5per ta5pl
tar4a 4tarc 4tare ta3riz tas4e ta5sy 4tatic ta4tur taun4 tav4 2taw tax4is 2t1b
4tc t4ch tch5et 4t1d 4te. tead4i 4teat tece4 5tect 2t1ed te5di 1tee teg4 te5ger
te5gi 3tel. teli4 5tels te2ma2 tem3at 3tenan 3tenc 3tend 4tenes 1tent ten4tag
1teo te4p te5pe ter3c 5ter3d 1teri ter5ies ter3is teri5za 5ternit ter5v 4tes.
4tess t3ess. teth5e 3teu 3tex 4tey 2t1f 4t1g 2th. than4 th2e 4thea th3eas the5at
the3is 3thet th5ic. th5ica 4thil 5think 4thl th5ode 5thodic 4thoo thor5it
tho5riz 2ths 1tia ti4ab ti4ato 2ti2b 4tick t4ico t4ic1u 5tidi 3tien tif2 ti5fy
2tig 5tigu till5in 1tim 4timp tim5ul 2t1in t2ina 3tine. 3tini 1tio ti5oc tion5ee
5tiq ti3sa 3tise tis4m ti5so tis4p 5tistica ti3tl ti4u 1tiv tiv4a 1tiz ti3za
ti3zen 2tl t5la tlan4 3tle. 3tled 3tles. t5let. t5lo 4t1m tme4 2t1n2 1to to3b
to5crat 4todo 2tof to2gr to5ic to2ma tom4b to3my ton4ali to3nat 4tono 4tony
to2ra to3rie tor5iz tos2 5tour 4tout to3war 4t1p 1tra tra3b tra5ch traci4
trac4it trac4te tras4 tra5ven trav5es5 tre5f tre4m trem5i 5tria tri5ces 5tricia
4trics 2trim tri4v tro5mi tron5i 4trony tro5phe tro3sp tro3v tru5i trus4 4t1s2
t4sc tsh4 t4sw 4t3t2 t4tes t5to ttu4 1tu tu1a tu3ar tu4bi tud2 4tue 4tuf4 5tu3i
3tum tu4nis 2t3up. 3ture 5turi tur3is tur5o tu5ry 3tus 4tv tw4 4t1wa twis4 4two
1ty 4tya 2tyl type3 ty5ph 4tz tz4e 4uab uac4 ua5na uan4i uar5ant uar2d uar3i
uar3t u1at uav4 ub4e u4bel u3ber u4bero u1b4i u4b5ing u3ble. u3ca uci4b uc4it
ucle3 u3cr u3cu u4cy ud5d ud3er ud5est udev4 u1dic ud3ied ud3ies ud5is u5dit
u4don ud4si u4du u4ene uens4 uen4te uer4il 3ufa u3fl ugh3en ug5in 2ui2 uil5iz
ui4n u1ing uir4m uita4 uiv3 uiv4er. u5j 4uk u1la ula5b u5lati ulch4 5ulche
ul3der ul4e u1len ul4gi ul2i u5lia ul3ing ul5ish ul4lar ul4li4b ul4lis 4ul3m
u1l4o 4uls uls5es ul1ti ultra3 4ultu u3lu ul5ul ul5v um5ab um4bi um4bly u1mi
u4m3ing umor5o um2p unat4 u2ne un4er u1ni un4im u2nin un5ish uni3v un3s4 un4sw
unt3ab un4ter. un4tes unu4 un5y un5z u4ors u5os u1ou u1pe uper5s u5pia up3ing
u3pl up3p upport5 upt5ib uptu4 u1ra 4ura. u4rag u4ras ur4be urc4 ur1d ure5at
ur4fer ur4fr u3rif uri4fic ur1in u3rio u1rit ur3iz ur2l url5ing. ur4no uros4
ur4pe ur4pi urs5er ur5tes ur3the urti4 ur4tie u3ru 2us u5sad u5san us4ap usc2
us3ci use5a u5sia u3sic us4lin us1p us5sl us5tere us1tr u2su usur4 uta4b u3tat
4ute. 4utel 4uten uten4i 4u1t2i uti5liz u3tine ut3ing ution5a u4tis 5u5tiz u4t1l
ut5of uto5g uto5matic u5ton u4tou uts4 u3u uu4m u1v2 uxu3 uz4e 1va 5va. 2v1a4b
vac5il vac3u vag4 va4ge va5lie val5o val1u va5mo va5niz va5pi var5ied 3vat 4ve.
4ved veg3 v3el. vel3li ve4lo v4ely ven3om v5enue v4erd 5vere. v4erel v3eren
ver5enc v4eres ver3ie vermi4n 3verse ver3th v4e2s 4ves. ves4te ve4te vet3er
ve4ty vi5ali 5vian 5vide. 5vided 4v3iden 5vides 5vidi v3if vi5gn vik4 2vil
5vilit v3i3liz v1in 4vi4na v2inc vin5d 4ving vio3l v3io4r vi1ou vi4p vi5ro
vis3it vi3so vi3su 4viti vit3r 4vity 3viv 5vo. voi4 3vok vo4la v5ole 5volt 3volv
vom5i vor5ab vori4 vo4ry vo4ta 4votee 4vv4 v4y w5abl 2wac wa5ger wag5o wait5
w5al. wam4 war4t was4t wa1te wa5ver w1b wea5rie weath3 wed4n weet3 wee5v wel4l
w1er west3 w3ev whi4 wi2 wil2 will5in win4de win4g wir4 3wise with3 wiz5 w4k
wl4es wl3in w4no 1wo2 wom1 wo5ven w5p wra4 wri4 writa4 w3sh ws4l ws4pe w5s4t 4wt
wy4 x1a xac5e x4ago xam3 x4ap xas5 x3c2 x1e xe4cuto x2ed xer4i xe5ro x1h xhi2
xhil5 xhu4 x3i xi5a xi5c xi5di x4ime xi5miz x3o x4ob x3p xpan4d xpecto5 xpe3d
x1t2 x3ti x1u xu3a xx4 y5ac 3yar4 y5at y1b y1c y2ce yc5er y3ch ych4e ycom4 ycot4
y1d y5ee y1er y4erf yes4 ye4t y5gi 4y3h y1i y3la ylla5bl y3lo y5lu ymbol5 yme4
ympa3 yn3chr yn5d yn5g yn5ic 5ynx y1o4 yo5d y4o5g yom4 yo5net y4ons y4os y4ped
yper5 yp3i y3po y4poc yp2ta y5pu yra5m yr5ia y3ro yr4r ys4c y3s2e ys3ica ys3io
3ysis y4so yss4 ys1t ys3ta ysur4 y3thin yt3ic y1w za1 z5a2b zar2 4zb 2ze ze4n
ze4p z1er ze3ro zet4 2z1i z4il z4is 5zl 4zm 1zo zo4m zo5ol zte4 4z1z2 z4zy
"""
# Extra patterns, from ushyphmax.tex, dated 2005-05-30.
# Copyright (C) 1990, 2004, 2005 <NAME>.
# Copying and distribution of this file, with or without modification,
# are permitted in any medium without royalty provided the copyright
# notice and this notice are preserved.
#
# These patterns are based on the Hyphenation Exception Log
# published in TUGboat, Volume 10 (1989), No. 3, pp. 337-341,
# and a large number of incorrectly hyphenated words not yet published.
"""
.con5gr .de5riva .dri5v4 .eth1y6l1 .eu4ler .ev2 .ever5si5b .ga4s1om1 .ge4ome
.ge5ot1 .he3mo1 .he3p6a .he3roe .in5u2t .kil2n3i .ko6r1te1 .le6ices .me4ga1l
.met4ala .mim5i2c1 .mi1s4ers .ne6o3f .noe1th .non1e2m .poly1s .post1am .pre1am
.rav5en1o .semi5 .sem4ic .semid6 .semip4 .semir4 .sem6is4 .semiv4 .sph6in1
.spin1o .ta5pes1tr .te3legr .to6pog .to2q .un3at5t .un5err5 .vi2c3ar .we2b1l
.re1e4c a5bolic a2cabl af6fish am1en3ta5b anal6ys ano5a2c ans5gr ans3v anti1d
an3ti1n2 anti1re a4pe5able ar3che5t ar2range as5ymptot ath3er1o1s at6tes.
augh4tl au5li5f av3iou back2er. ba6r1onie ba1thy bbi4t be2vie bi5d2if bil2lab
bio5m bi1orb bio1rh b1i3tive blan2d1 blin2d1 blon2d2 bor1no5 bo2t1u1l brus4q
bus6i2er bus6i2es buss4ing but2ed. but4ted cad5e1m cat1a1s2 4chs. chs3hu chie5vo
cig3a3r cin2q cle4ar co6ph1o3n cous2ti cri3tie croc1o1d cro5e2co c2tro3me6c
1cu2r1ance 2d3alone data1b dd5a5b d2d5ib de4als. de5clar1 de2c5lina de3fin3iti
de2mos des3ic de2tic dic1aid dif5fra 3di1methy di2ren di2rer 2d1lead 2d1li2e
3do5word dren1a5l drif2t1a d1ri3pleg5 drom3e5d d3tab du2al. du1op1o1l ea4n3ies
e3chas edg1l ed1uling eli2t1is e1loa en1dix eo3grap 1e6p3i3neph1 e2r3i4an.
e3spac6i eth1y6l1ene 5eu2clid1 feb1rua fermi1o 3fich fit5ted. fla1g6el flow2er.
3fluor gen2cy. ge3o1d ght1we g1lead get2ic. 4g1lish 5glo5bin 1g2nac gnet1ism
gno5mo g2n1or. g2noresp 2g1o4n3i1za graph5er. griev1 g1utan hair1s ha2p3ar5r
hatch1 hex2a3 hite3sid h3i5pel1a4 hnau3z ho6r1ic. h2t1eou hypo1tha id4ios
ifac1et ign4it ignit1er i4jk im3ped3a infra1s2 i5nitely. irre6v3oc i1tesima
ith5i2l itin5er5ar janu3a japan1e2s je1re1m 1ke6ling 1ki5netic 1kovian k3sha
la4c3i5e lai6n3ess lar5ce1n l3chai l3chil6d1 lead6er. lea4s1a 1lec3ta6b
le3g6en2dre 1le1noid lith1o5g ll1fl l2l3ish l5mo3nell lo1bot1o1 lo2ges. load4ed.
load6er. l3tea lth5i2ly lue1p 1lunk3er 1lum5bia. 3lyg1a1mi ly5styr ma1la1p m2an.
man3u1sc mar1gin1 medi2c med3i3cin medio6c1 me3gran3 m2en. 3mi3da5b 3milita
mil2l1ag mil5li5li mi6n3is. mi1n2ut1er mi1n2ut1est m3ma1b 5maph1ro1 5moc1ra1t
mo5e2las mol1e5c mon4ey1l mono3ch mo4no1en moro6n5is mono1s6 moth4et2 m1ou3sin
m5shack2 mu2dro mul2ti5u n3ar4chs. n3ch2es1t ne3back 2ne1ski n1dieck nd3thr
nfi6n3ites 4n5i4an. nge5nes ng1ho ng1spr nk3rup n5less 5noc3er1os nom1a6l
nom5e1no n1o1mist non1eq non1i4so 5nop1oly. no1vemb ns5ceiv ns4moo ntre1p
obli2g1 o3chas odel3li odit1ic oerst2 oke1st o3les3ter oli3gop1o1 o1lo3n4om
o3mecha6 onom1ic o3norma o3no2t1o3n o3nou op1ism. or4tho3ni4t orth1ri or5tively
o4s3pher o5test1er o5tes3tor oth3e1o1s ou3ba3do o6v3i4an. oxi6d1ic pal6mat
parag6ra4 par4a1le param4 para3me pee2v1 phi2l3ant phi5lat1e3l pi2c1a3d pli2c1ab
pli5nar poin3ca 1pole. poly1e po3lyph1ono 1prema3c pre1neu pres2pli pro2cess
proc3i3ty. pro2g1e 3pseu2d pseu3d6o3d2 pseu3d6o3f2 pto3mat4 p5trol3 pu5bes5c
quain2t1e qu6a3si3 quasir6 quasis6 quin5tes5s qui3v4ar r1abolic 3rab1o1loi
ra3chu r3a3dig radi1o6g r2amen 3ra4m5e1triz ra3mou ra5n2has ra1or r3bin1ge
re2c3i1pr rec5t6ang re4t1ribu r3ial. riv1o1l 6rk. rk1ho r1krau 6rks. r5le5qu
ro1bot1 ro5e2las ro5epide1 ro3mesh ro1tron r3pau5li rse1rad1i r1thou r1treu
r1veil rz1sc sales3c sales5w 5sa3par5il sca6p1er sca2t1ol s4chitz schro1ding1
1sci2utt scrap4er. scy4th1 sem1a1ph se3mes1t se1mi6t5ic sep3temb shoe1st sid2ed.
side5st side5sw si5resid sky1sc 3slova1kia 3s2og1a1my so2lute 3s2pace 1s2pacin
spe3cio spher1o spi2c1il spokes5w sports3c sports3w s3qui3to s2s1a3chu1 ss3hat
s2s3i4an. s5sign5a3b 1s2tamp s2t1ant5shi star3tli sta1ti st5b 1stor1ab strat1a1g
strib5ut st5scr stu1pi4d1 styl1is su2per1e6 1sync 1syth3i2 swimm6 5tab1o1lism
ta3gon. talk1a5 t1a1min t6ap6ath 5tar2rh tch1c tch3i1er t1cr teach4er. tele2g
tele1r6o 3ter1gei ter2ic. t3ess2es tha4l1am tho3don th1o5gen1i tho1k2er thy4l1an
thy3sc 2t3i4an. ti2n3o1m t1li2er tolo2gy tot3ic trai3tor1 tra1vers travers3a3b
treach1e tr4ial. 3tro1le1um trof4ic. tro3fit tro1p2is 3trop1o5les 3trop1o5lis
t1ro1pol3it tsch3ie ttrib1ut1 turn3ar t1wh ty2p5al ua3drati uad1ratu u5do3ny
uea1m u2r1al. uri4al. us2er. v1ativ v1oir5du1 va6guer vaude3v 1verely. v1er1eig
ves1tite vi1vip3a3r voice1p waste3w6a2 wave1g4 w3c week1n wide5sp wo4k1en
wrap3aro writ6er. x1q xquis3 y5che3d ym5e5try y1stro yes5ter1y z3ian. z3o1phr
z2z3w
""")
exceptions = """
as-so-ciate as-so-ciates dec-li-na-tion oblig-a-tory phil-an-thropic present
presents project projects reci-procity re-cog-ni-zance ref-or-ma-tion
ret-ri-bu-tion ta-ble
"""
hyphenator = Hyphenator(patterns, exceptions)
hyphenate_word = hyphenator.hyphenate_word
del patterns
del exceptions
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
for word in sys.argv[1:]:
print '-'.join(hyphenate_word(word))
else:
import doctest
doctest.testmod(verbose=True)
```
#### File: bots/muellerreportbot/parse.py
```python
import os
import sys
import errno
import argparse
import re
from pprint import pprint
import json
#from nltk.tokenize import sent_tokenize
# Logging
import logging
from logging import handlers
LOGGER = logging.getLogger(__name__)
SH = logging.StreamHandler()
FH = logging.handlers.RotatingFileHandler("log.log", maxBytes=5 * 1000000, backupCount = 5)
SH.setFormatter(logging.Formatter("%(asctime)s:%(levelname)s:%(message)s"))
FH.setFormatter(logging.Formatter("%(asctime)s:%(lineno)s:%(funcName)s:%(levelname)s:%(message)s"))
LOGGER.setLevel(logging.DEBUG)
LOGGER.addHandler(SH)
LOGGER.addHandler(FH)
DEFAULT_FILENAME="MuellerReport.txt"
DEFAULT_OUTPUT="cbdq.json"
DEFAULT_REPLIES="replies.json"
DESCRIPTION = """Parses the MuellerReport file into the appropriate data structures to enable text generation."""
def get_arg_parser():
parser = argparse.ArgumentParser(prog=sys.argv[0], description=DESCRIPTION)
parser.add_argument("-f", "--filename",
help = "file to parse (default is %s)" % DEFAULT_FILENAME)
parser.add_argument("-o", "--output",
help = "file to save the Cheap Bots Done Quick JSON to (default is %s)" % DEFAULT_OUTPUT)
parser.add_argument("-r", "--replies",
help = "file to save the Cheap Bots Done Quick JSON for replies to (default is %s)" % DEFAULT_REPLIES)
parser.add_argument("-i", "--info",
help = "set console logging output to INFO")
parser.add_argument("-d", "--debug",
help = "set console logging output to DEBUG")
parser.set_defaults(
filename = DEFAULT_FILENAME,
output = DEFAULT_OUTPUT,
replies = DEFAULT_REPLIES
)
return parser
def ngrams(doc, n=2):
return ngrams
def get_sents(doc):
# Get the Harm To Whatever stuff as their own sentences
capture_inbetween = re.sub(r"(?<!\.)\n{2,}", " ` ", doc)
# Remove extra whitespace within
doc_text = ' '.join(capture_inbetween.split())
# Create a list of sentences
sent_split = re.split("((?<!(v|[A-Z]|[0-9]))(?<!(Mr|Ms|MR|MS|Jr|jr|Dr|dr|No))(?<!Mrs)(?<!et al)(?<!(Jan|Feb|Mar|Apr|Jun|Jul|Aug|Oct|Nov|Dec|Doc))[.?!`])(\"?)", doc_text)
#sent_split = sent_tokenize(doc_text)
# Filter out the empty garbage items
non_empty = [s.strip() for s in sent_split if s and len(s.strip()) > 0];
print(non_empty)
# Append the punctuation to the previous sentence
punct_set = set(".?!")
sents = []
if not non_empty:
return None
sents.append(non_empty[0])
for i in range(1,len(non_empty)):
s = non_empty[i]
assert(len(s) > 0)
if s == "`":
continue
if s[0] in punct_set or s == '"':
sents[-1] += s
continue
#s = s.replace("!","")
sents.append(s)
sents = [s.strip() for s in sents if len(s) > 3 or "bad" in s.lower()];
#sents = [s.strip() for s in sents if len(s)];
return sents
def filter_replies(sents):
replies = []
for s in sents:
if 'BAD' in s:
replies.append(s)
continue
if 'harm' in s.lower():
replies.append(s)
continue
if '?' in s:
replies.append(s)
continue
if '...' in s:
replies.append(s)
continue
return replies
def replies_json_dict(sents):
return {'.' : '#replies#'}
def tracery_json_dict(sents):
return {"origin" : sents, "replies" : filter_replies(sents)}
def main():
parser = get_arg_parser()
args = parser.parse_args()
# Logging Information
if args.info:
SH.setLevel(logging.INFO)
if args.debug:
SH.setLevel(logging.DEBUG)
# Open File
LOGGER.info("Opening file at %s", args.filename)
raw_doc = None
try:
with open(args.filename, 'r') as file:
LOGGER.info("Opened %s" % args.filename)
raw_doc = file.read()
except IOError as e:
LOGGER.error("Failed to open %s: %s" % (args.filename, e))
return e.errno
# Format Doc
LOGGER.info("Formatting Document");
sents = get_sents(raw_doc)
num_too_long = 0
for i,s in enumerate(sents):
print("SENTENCE %d: '%s'" % (i, s))
threshold = 6
if len(s) == threshold:
print("WARNING: Exactly %d characters (%d): %s" % (len(s), len(s), s))
num_too_long += 1
#if len(s) > 280:
# LOGGER.warning("Too many characters (%d): %s" % (len(s), s))
# num_too_long += 1
LOGGER.info("Sentences: %d" % len(sents))
LOGGER.info("Sentences too long: %s" % num_too_long)
# Format sents for CPDQ
LOGGER.info("Converting to Cheap Bots Done Quick JSON format")
tracery_dict = tracery_json_dict(sents)
LOGGER.info("Saving JSON to %s" % args.output)
try:
with open(args.output, 'w') as file:
json.dump(tracery_dict, file, indent=4)
except IOError as e:
LOGGER.error("Failed to save to %s: %s" % (args.output, e))
return e.errno
LOGGER.info("Saving replies JSON to %s" % args.replies)
try:
with open(args.replies, 'w') as file:
json.dump(replies_json_dict(sents), file, indent=4)
except IOError as e:
LOGGER.error("Failed to save to %s: %s" % (args.output, e))
return e.errno
return 0
if __name__ == "__main__":
rtn = main()
sys.exit(rtn)
``` |
{
"source": "jmcgroga/turicreate",
"score": 2
} |
#### File: turicreate/data_structures/gframe.py
```python
from __future__ import print_function as _
from __future__ import division as _
from __future__ import absolute_import as _
from .sframe import SFrame
from .._cython.context import debug_trace as cython_context
from ..util import _is_non_string_iterable
from .sarray import SArray, _create_sequential_sarray
import copy
VERTEX_GFRAME = 0
EDGE_GFRAME = 1
class GFrame(SFrame):
"""
GFrame is similar to SFrame but is associated with an SGraph.
- GFrame can be obtained from either the `vertices` or `edges`
attributed in any SGraph:
>>> import turicreate
>>> g = turicreate.load_sgraph(...)
>>> vertices_gf = g.vertices
>>> edges_gf = g.edges
- GFrame has the same API as SFrame:
>>> sa = vertices_gf['pagerank']
>>> # column lambda transform
>>> vertices_gf['pagerank'] = vertices_gf['pagerank'].apply(lambda x: 0.15 + 0.85 * x)
>>> # frame lambda transform
>>> vertices_gf['score'] = vertices_gf.apply(lambda x: 0.2 * x['triangle_count'] + 0.8 * x['pagerank'])
>>> del vertices_gf['pagerank']
- GFrame can be converted to SFrame:
>>> # extract an SFrame
>>> sf = vertices_gf.__to_sframe__()
"""
def __init__(self, graph, gframe_type):
self.__type__ = gframe_type
self.__graph__ = graph
self.__sframe_cache__ = None
self.__is_dirty__ = False
def __to_sframe__(self):
return copy.copy(self._get_cache())
# /**************************************************************************/
# /* */
# /* Modifiers */
# /* */
# /**************************************************************************/
def add_column(self, data, column_name="", inplace=False):
"""
Adds the specified column to this SFrame. The number of elements in
the data given must match every other column of the SFrame.
If inplace == False (default) this operation does not modify the
current SFrame, returning a new SFrame.
If inplace == True, this operation modifies the current
SFrame, returning self.
Parameters
----------
data : SArray
The 'column' of data.
column_name : string
The name of the column. If no name is given, a default name is chosen.
inplace : bool, optional. Defaults to False.
Whether the SFrame is modified in place.
"""
# Check type for pandas dataframe or SArray?
if not isinstance(data, SArray):
raise TypeError("Must give column as SArray")
if not isinstance(column_name, str):
raise TypeError("Invalid column name: must be str")
if inplace:
self.__is_dirty__ = True
with cython_context():
if self._is_vertex_frame():
graph_proxy = self.__graph__.__proxy__.add_vertex_field(
data.__proxy__, column_name
)
self.__graph__.__proxy__ = graph_proxy
elif self._is_edge_frame():
graph_proxy = self.__graph__.__proxy__.add_edge_field(
data.__proxy__, column_name
)
self.__graph__.__proxy__ = graph_proxy
return self
else:
return super(GFrame, self).add_column(data, column_name, inplace=inplace)
def add_columns(self, data, column_names=None, inplace=False):
"""
Adds columns to the SFrame. The number of elements in all columns must
match every other column of the SFrame.
If inplace == False (default) this operation does not modify the
current SFrame, returning a new SFrame.
If inplace == True, this operation modifies the current
SFrame, returning self.
Parameters
----------
data : list[SArray] or SFrame
The columns to add.
column_names: list of string, optional
A list of column names. All names must be specified. ``column_names`` is
ignored if data is an SFrame.
inplace : bool, optional. Defaults to False.
Whether the SFrame is modified in place.
"""
datalist = data
if isinstance(data, SFrame):
other = data
datalist = [other.select_column(name) for name in other.column_names()]
column_names = other.column_names()
my_columns = set(self.column_names())
for name in column_names:
if name in my_columns:
raise ValueError(
"Column '" + name + "' already exists in current SFrame"
)
else:
if not _is_non_string_iterable(datalist):
raise TypeError("datalist must be an iterable")
if not _is_non_string_iterable(column_names):
raise TypeError("column_names must be an iterable")
if not all([isinstance(x, SArray) for x in datalist]):
raise TypeError("Must give column as SArray")
if not all([isinstance(x, str) for x in column_names]):
raise TypeError("Invalid column name in list : must all be str")
if inplace:
for (data, name) in zip(datalist, column_names):
self.add_column(data, name)
return self
else:
return super(GFrame, self).add_column(
datalist, column_names, inplace=inplace
)
def remove_column(self, column_name, inplace=False):
"""
Removes the column with the given name from the SFrame.
If inplace == False (default) this operation does not modify the
current SFrame, returning a new SFrame.
If inplace == True, this operation modifies the current
SFrame, returning self.
Parameters
----------
column_name : string
The name of the column to remove.
inplace : bool, optional. Defaults to False.
Whether the SFrame is modified in place.
"""
if column_name not in self.column_names():
raise KeyError("Cannot find column %s" % column_name)
if inplace:
self.__is_dirty__ = True
try:
with cython_context():
if self._is_vertex_frame():
assert column_name != "__id", 'Cannot remove "__id" column'
graph_proxy = self.__graph__.__proxy__.delete_vertex_field(
column_name
)
self.__graph__.__proxy__ = graph_proxy
elif self._is_edge_frame():
assert (
column_name != "__src_id"
), 'Cannot remove "__src_id" column'
assert (
column_name != "__dst_id"
), 'Cannot remove "__dst_id" column'
graph_proxy = self.__graph__.__proxy__.delete_edge_field(
column_name
)
self.__graph__.__proxy__ = graph_proxy
return self
except:
self.__is_dirty__ = False
raise
else:
return super(GFrame, self).remove_column(column_name, inplace=inplace)
def remove_columns(self, column_names, inplace=False):
column_names = list(column_names)
existing_columns = dict((k, i) for i, k in enumerate(self.column_names()))
for name in column_names:
if name not in existing_columns:
raise KeyError("Cannot find column %s" % name)
if inplace:
for c in column_names:
self.remove_column(c, inplace=True)
else:
return super(GFrame, self).remove_columns(column_names, inplace=inplace)
def swap_columns(self, column_name_1, column_name_2, inplace=False):
"""
Swaps the columns with the given names.
If inplace == False (default) this operation does not modify the
current SFrame, returning a new SFrame.
If inplace == True, this operation modifies the current
SFrame, returning self.
Parameters
----------
column_name_1 : string
Name of column to swap
column_name_2 : string
Name of other column to swap
inplace : bool, optional. Defaults to False.
Whether the SFrame is modified in place.
"""
if inplace:
self.__is_dirty__ = True
with cython_context():
if self._is_vertex_frame():
graph_proxy = self.__graph__.__proxy__.swap_vertex_fields(
column_name_1, column_name_2
)
self.__graph__.__proxy__ = graph_proxy
elif self._is_edge_frame():
graph_proxy = self.__graph__.__proxy__.swap_edge_fields(
column_name_1, column_name_2
)
self.__graph__.__proxy__ = graph_proxy
return self
else:
return super(GFrame, self).swap_columns(
column_name_1, column_name_2, inplace=inplace
)
def rename(self, names, inplace=False):
"""
Rename the columns using the 'names' dict. This changes the names of
the columns given as the keys and replaces them with the names given as
the values.
If inplace == False (default) this operation does not modify the
current SFrame, returning a new SFrame.
If inplace == True, this operation modifies the current
SFrame, returning self.
Parameters
----------
names : dict[string, string]
Dictionary of [old_name, new_name]
inplace : bool, optional. Defaults to False.
Whether the SFrame is modified in place.
"""
if type(names) is not dict:
raise TypeError("names must be a dictionary: oldname -> newname")
if inplace:
self.__is_dirty__ = True
with cython_context():
if self._is_vertex_frame():
graph_proxy = self.__graph__.__proxy__.rename_vertex_fields(
list(names.keys()), list(names.values())
)
self.__graph__.__proxy__ = graph_proxy
elif self._is_edge_frame():
graph_proxy = self.__graph__.__proxy__.rename_edge_fields(
list(names.keys()), list(names.values())
)
self.__graph__.__proxy__ = graph_proxy
return self
else:
return super(GFrame, self).rename(names, inplace=inplace)
def add_row_number(self, column_name="id", start=0, inplace=False):
if type(column_name) is not str:
raise TypeError("Must give column_name as str")
if column_name in self.column_names():
raise RuntimeError("Column name %s already exists" % str(column_name))
if type(start) is not int:
raise TypeError("Must give start as int")
if inplace:
the_col = _create_sequential_sarray(self.num_rows(), start)
self[column_name] = the_col
return self
else:
return super(GFrame, self).add_row_number(
column_name=column_name, start=start, inplace=inplace
)
def __setitem__(self, key, value):
"""
A wrapper around add_column(s). Key can be either a list or a str. If
value is an SArray, it is added to the SFrame as a column. If it is a
constant value (int, str, or float), then a column is created where
every entry is equal to the constant value. Existing columns can also
be replaced using this wrapper.
"""
if key in ["__id", "__src_id", "__dst_id"]:
raise KeyError(
"Cannot modify column %s. Changing __id column will\
change the graph structure"
% key
)
else:
self.__is_dirty__ = True
super(GFrame, self).__setitem__(key, value)
# /**************************************************************************/
# /* */
# /* Read-only Accessor */
# /* */
# /**************************************************************************/
def num_rows(self):
"""
Returns the number of rows.
Returns
-------
out : int
Number of rows in the SFrame.
"""
if self._is_vertex_frame():
return self.__graph__.summary()["num_vertices"]
elif self._is_edge_frame():
return self.__graph__.summary()["num_edges"]
def num_columns(self):
"""
Returns the number of columns.
Returns
-------
out : int
Number of columns in the SFrame.
"""
return len(self.column_names())
def column_names(self):
"""
Returns the column names.
Returns
-------
out : list[string]
Column names of the SFrame.
"""
if self._is_vertex_frame():
return self.__graph__.__proxy__.get_vertex_fields()
elif self._is_edge_frame():
return self.__graph__.__proxy__.get_edge_fields()
def column_types(self):
"""
Returns the column types.
Returns
-------
out : list[type]
Column types of the SFrame.
"""
if self.__type__ == VERTEX_GFRAME:
return self.__graph__.__proxy__.get_vertex_field_types()
elif self.__type__ == EDGE_GFRAME:
return self.__graph__.__proxy__.get_edge_field_types()
# /**************************************************************************/
# /* */
# /* Internal Private Methods */
# /* */
# /**************************************************************************/
def _get_cache(self):
if self.__sframe_cache__ is None or self.__is_dirty__:
if self._is_vertex_frame():
self.__sframe_cache__ = self.__graph__.get_vertices()
elif self._is_edge_frame():
self.__sframe_cache__ = self.__graph__.get_edges()
else:
raise TypeError
self.__is_dirty__ = False
return self.__sframe_cache__
def _is_vertex_frame(self):
return self.__type__ == VERTEX_GFRAME
def _is_edge_frame(self):
return self.__type__ == EDGE_GFRAME
@property
def __proxy__(self):
return self._get_cache().__proxy__
``` |
{
"source": "jmcguinness11/StockPredictor",
"score": 3
} |
#### File: jmcguinness11/StockPredictor/create_tweet_classes.py
```python
import collections
import json
import random
refined_tweets = collections.defaultdict(list)
#returns label for company and time
def getLabel(ticker, month, day, hour):
return random.randint(-1,1)
#parses individual json file
def parseJSON(data, month, day, hour):
results = []
for tweet in data.itervalues():
text = tweet['text']
label = getLabel(tweet['company'], month, day, hour)
results.append([text,label])
return results
def loadData(months, days):
hours = [10, 11, 12, 13, 14]
minutes = [0, 15, 30, 45]
output_data = []
for month in months:
for day in days:
for hour in hours:
for minute in minutes:
filename = 'tweets_{}_{}_{}_{}.dat'.format(month, day, hour, minute)
with open(filename, 'r') as f:
try:
data = json.load(f)
except ValueError as err:
print filename
exit(1)
output_data += parseJSON(data, month, day, hour)
f.close()
print len(output_data)
print output_data[0:10]
return output_data
def main():
days = [9,10,11,12,13,16,17]
loadData([4], days)
if __name__=='__main__':
main()
``` |
{
"source": "jmcguire/effortless_bootstrap_web_form_monkey_patch",
"score": 2
} |
#### File: effortless_bootstrap_web_form_monkey_patch/tests/website.py
```python
import web
import os, sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import effortless_bootstrap_web_form_monkey_patch
effortless_bootstrap_web_form_monkey_patch.patch()
render = web.template.render('./')
urls = (
'/', 'website',
)
class website:
# create a complex form
form = web.form.Form(
web.form.Textbox('a-textbox', web.form.notnull, description="textbox description"),
web.form.Password('a-password', description="password description"),
web.form.Textarea('a-textarea', description="textarea description"),
web.form.Checkbox('a-checkbox', description="checkbox description", checked=False),
web.form.Checkbox('another-checkbox', description="checkbox description", checked=True),
web.form.Dropdown('a-dropdown', description="dropdown description", args=[('first', 'first value'), ('second', 'second value'), ('third', 'third value')], value='second'),
web.form.Radio('a-radio', description="radio description", args=[('alpha', 'alpha value'), ('beta', 'beta value'), ('gamma', 'gamma value')], value='beta'),
web.form.File('a-file', description="file upload description"),
web.form.Hidden('a-hidden', description="this should be hidden"),
web.form.Button('Submit your data'),
)
def GET(self):
form = self.form()
return render.website(form)
if __name__ == '__main__':
app = web.application(urls, globals())
app.run()
``` |
{
"source": "jmcguire/jms",
"score": 3
} |
#### File: jms/jms/db.py
```python
from flask import g
from jms import app
import sqlite3
__all__ = ['get_db']
# connect to db
def get_db():
"""return the db connection, create it if it doesn't exist yet"""
db = getattr(g, 'db', None)
if db is None:
db = g.db = sqlite3.connect('sqlite3.db')
db.row_factory = sqlite3.Row
return db
def init_if_empty(force=False):
"""if the database is empty, initialize it"""
db = get_db()
cursor = db.cursor()
cursor.execute("SELECT count(*) FROM sqlite_master WHERE type='table' AND name='posts';")
row = cursor.fetchone()
cursor.close()
if force or row[0] == 0:
print "database is empty, initializing it..."
with open(app.config['DB_INIT'], 'r') as f:
try:
db.executescript(f.read())
except:
print "...failed to initialize database"
db.rollback()
raise
else:
print "...success!"
db.commit()
else:
print "using existing database"
@app.teardown_appcontext
def close_db(error):
if hasattr(g, 'db'):
g.db.close()
``` |
{
"source": "jmcguire/learning",
"score": 4
} |
#### File: algorithms/misc/best_time_to_sell.py
```python
def max_profit(prices):
"""get the maximum profit from buying and selling stock"""
max_profit = None
lowest_price = None
highest_price = None
for price in prices:
print "checking ", price
# if we have a new lowest price, grab it and reset out highest
if not lowest_price or price < lowest_price:
lowest_price = price
highest_price = None
print "\tnew lowest_price ", price
# if we have a new highest, grab it and calculate the profit
elif not highest_price or price > highest_price:
highest_price = price
profit = highest_price - lowest_price
print "\tnew highest_price ", price
print "\tpossible profit ", profit
# check for a new max_profit
if not max_profit or max_profit < profit:
max_profit = profit
print "\tnew max_profit ", profit
return max_profit or 0
prices = [10, 5, 3, 7, 11, 1, 4]
bad_prices = [5, 4, 3, 2, 1]
profit = max_profit(prices)
print "maximum profit: ", profit
```
#### File: learning/data_structures/binary_search_tree.py
```python
class BinaryNode(object):
"""a single node in a binary search tree"""
def __init__(self, e):
self.e = e
self.left = None
self.right = None
self.parent = None
def add(self, e):
"""add the new element in the appropriate place"""
if e < self.e:
if self.left is not None:
self.left.add(e)
else:
node = BinaryNode(e)
node.parent = self
self.left = node
else:
if self.right is not None:
self.right.add(e)
else:
node = BinaryNode(e)
node.parent = self
self.right = node
def find_furthest_right(self):
if self.right:
return self.right.find_furthest_right()
else:
return self
def find_furthest_left(self):
if self.left:
return self.left.find_furthest_left()
else:
return self
class BinarySearchTree(object):
"""mostly just holds the root node of a binary tree"""
def __init__(self):
self.root = None
def add(self, new):
if self.root is None:
node = BinaryNode(new)
self.root = node
else:
self.root.add(new)
def __contains__(self, e):
"""a expressive alias for find()"""
if self.find(e):
return True
else:
return False
def find(self, e):
"""return the node with e if it exists"""
current = self.root
while current is not None:
if e == current.e:
return current
elif e < current.e:
current = current.left
elif e > current.e:
current = current.right
return False
def rm(self, e):
"""remove the node with e, if it exists"""
""" how to remove a node in a binary tree:
we need three nodes:
- parent: to_delete's parent
- advance: to_delete's most rightmost node on it's left side (with no left children)
- advance_parent: that node's parent
if we're in the middle of a complicated tree, then we'll make three moves:
1. advance takes the place of to_delete
2. advance's right child is moved to advance_parent.left
3. to_delete's children are moved to advance
"""
to_delete = self.find(e)
if not to_delete:
raise Exception("error: can't remove, doesn't exist")
parent = to_delete.parent
# if there are no children, this will be easy
if to_delete.left is None and to_delete.right is None:
if parent is None: # it's a root node
self.root = None
elif parent.left == to_delete:
parent.left = None
elif parent.right == to_delete:
parent.right = None
else:
raise Exception("parent doesn't think it's related to to_delete")
return
advance = None
advance_parent = None
# 2. find advance, and handle its child
if to_delete.left is not None:
advance = to_delete.left.find_furthest_right()
advance_parent = advance.parent
advance_parent.right = advance.left
if advance.right:
advance.right.parent = advance_parent.left
elif to_delete.right is not None:
advance = to_delete.right.find_furthest_left()
advance_parent = advance.parent
advance_parent.left = advance.right
if advance.left:
advance.left.parent = advance_parent.right
else:
raise Exception("to_delete suddenly has no children, but it did a minute ago")
# 1. move advance to delete's spot
advance.parent = to_delete.parent
if parent is None:
# we're a root node
self.root = advance
elif parent.left == to_delete:
to_delete.parent.left = advance
elif parent.right == to_delete:
to_delete.parent.right = advance
else:
raise Exception("something has gone wrong")
# 3. handle to_delete's children
advance.left = to_delete.left
advance.right = to_delete.right
if to_delete.left:
to_delete.left.parent = advance
if to_delete.right:
to_delete.right.parent = advance
def show_all(self):
"""show all nodes, breadth-first"""
nodes = []
nodes.append(self.root)
while nodes:
node = nodes.pop(0) # basically, shift.
print "node:", node.e,
if node.parent:
print "(parent: %s)" % str(node.parent.e),
if node.left:
print "- left: ", node.left.e,
nodes.append(node.left)
if node.right:
print "- right:", node.right.e,
nodes.append(node.right)
print
def show_all_visual(self, depth=0, node=None):
"""show all nodes, nicely formatted, depth-first"""
indent = " " * depth
if node is None:
node = self.root
print "%s> %s" % (indent, node.e)
if node.left:
self.show_all_visual(depth+1, node.left)
if node.right:
self.show_all_visual(depth+1, node.right)
# TODO
def show_in_order(self):
"""left, current, right"""
pass
def show_post_order(self):
"""left, right, current"""
pass
def show_pre_order(self):
"""current, left, right"""
pass
def balance(self):
pass
```
#### File: learning/data_structures/queue.py
```python
from linked_list import LinkedList, Node
class Queue(LinkedList):
"""a FIFO list"""
# these two methods are just aliases for the regular LinkedList methods, but
# are given the names we'd expect for a Queue
def push_e(self, e):
self.add_e(e)
def push_node(self, node):
self.add_node(node)
def next_(self):
"""queues don't have a notion of next, the user only gets the first"""
pass
def shift(self):
"""return the first element of the list"""
if self.size == 0:
return None
node = self.first
self.first = node.next_
self.size -= 1
if self.is_empty():
self.current = None
self.last = None
return node
```
#### File: learning/data_structures/test_binary_search_tree.py
```python
from binary_search_tree import BinarySearchTree
from random import randint, shuffle
def check_for(bt, e):
if e in bt:
print "%s was found" % e
else:
print "%s was not found" % e
def load(bt):
# a nicely balanced tree
bt.add(10)
bt.add(5)
bt.add(2)
bt.add(1)
bt.add(4)
bt.add(7)
bt.add(6)
bt.add(9)
bt.add(15)
bt.add(12)
bt.add(11)
bt.add(14)
bt.add(17)
bt.add(16)
bt.add(19)
# small tree
if False:
bt = BinarySearchTree()
bt.add('a')
bt.add('d')
bt.add('e')
bt.add('f')
bt.add('b')
bt.add('c')
bt.show_all()
bt.show_all_visual()
for e in ['a','e','j']:
check_for(bt,e)
print
# deleting
if False:
bt = BinarySearchTree()
load(bt)
bt.show_all_visual()
print "\ndeleting leafless node 19"
bt.rm(19)
check_for(bt,19)
bt.show_all_visual()
print "\nreloading and deleting middle node 15"
bt = BinarySearchTree()
load(bt)
bt.rm(15)
bt.show_all_visual()
print "\nreloading and deleting root node 10"
bt = BinarySearchTree()
load(bt)
bt.rm(10)
bt.show_all_visual()
# big tree
if False:
bt2 = BinarySearchTree()
# get ~100 distinct random numbers from 1 to 1000
random_number_hash = {}
for i in range(0,100):
random_number_hash[randint(0,1000)] = True
random_numbers = [e for e in random_number_hash.keys()]
shuffle(random_numbers)
for e in random_numbers:
bt2.add(e)
bt2.show_all_visual()
# huge tree
if False:
bt2 = BinarySearchTree()
# get ~10,000 distinct random numbers from 1 to 10,000,000
random_number_hash = {}
for i in range(0,10000):
random_number_hash[randint(0,10000000)] = True
random_numbers = [e for e in random_number_hash.keys()]
shuffle(random_numbers)
for e in random_numbers:
bt2.add(e)
# todo: look for one number
```
#### File: design_patterns/decorator/starbuzz_original.py
```python
class Beverage(object):
def __init__(self):
self.milk = False
self.soy = False
self.mocha = False
self.whip = False
self.description = ''
def get_description(self):
return self.description
def cost(self):
cost = 0.89
if self.get_milk(): cost += 0.10
if self.get_soy(): cost += 0.15
if self.get_mocha(): cost += 0.20
if self.get_whip(): cost += 0.10
return cost
def get_milk(self): return self.milk
def get_soy(self): return self.soy
def get_mocha(self): return self.mocha
def get_whip(self): return self.whip
def set_milk(self): self.milk = True
def set_soy(self): self.soy = True
def set_mocha(self): self.mocha = True
def set_whip(self): self.whip = True
# concrete classes
class HouseBlend(Beverage):
def __init__(self):
self.description = "Regular House Blend"
super(HouseBlend, self).__init__()
class DarkRoast(Beverage):
def __init__(self):
self.description = "Most Excellent Dark Roast"
super(DarkRoast, self).__init__()
def cost(self):
return super(DarkRoast, self).cost() + 0.10
class Decaf(Beverage):
def __init__(self):
self.description = "Literally Poison"
super(Decaf, self).__init__()
def cost(self):
return super(Decaf, self).cost() + 0.16
class Espresso(Beverage):
def __init__(self):
self.description = "Quick Hit Espresso"
super(Espresso, self).__init__()
def cost(self):
return super(Espresso, self).cost() + 1.1
# testing
if __name__ == '__main__':
def display(beverage):
print "%s $%.2f" % (beverage.get_description(), beverage.cost())
b1 = Espresso()
display(b1)
b2 = DarkRoast()
b2.set_mocha()
b2.set_whip()
display(b2)
b3 = HouseBlend()
b3.set_soy()
b3.set_mocha()
b3.set_whip()
display(b3)
```
#### File: design_patterns/facade/home_theater_new.py
```python
class PopcornMaker(object):
def on(self): print "popcorn maker turned on"
def off(self): print "popcorn maker turned off"
def pop(self): print "popcorn maker popping"
class Lights(object):
def dim(self): print "light dimmed"
def full(self): print "light turned on full bright"
class Screen(object):
def lower(self): print "screen lowered"
def raise_(self): print "screen raised"
class DvdPlayer(object):
def on(self): print "dvd player turned on"
def off(self): print "dvd player turned off"
def play(self): print "dvd player playing"
# home theatre facade
class HomeTheatreFacade(object):
def __init__(self, popcorn, lights, screen, dvd):
self.popcorn = popcorn
self.lights = lights
self.screen = screen
self.dvd = dvd
def watch_movie(self):
self.popcorn.on()
self.popcorn.pop()
self.lights.dim()
self.screen.lower()
self.dvd.on()
self.dvd.play()
def end_movie(self):
self.dvd.off()
self.screen.raise_()
self.lights.full()
self.popcorn.off()
# testing
if __name__ == '__main__':
popcorn = PopcornMaker()
lights = Lights()
screen = Screen()
dvd = DvdPlayer()
home_theatre = HomeTheatreFacade(popcorn, lights, screen, dvd)
print "starting movie..."
home_theatre.watch_movie()
print "\ndone with movie..."
home_theatre.end_movie()
```
#### File: design_patterns/factory_method/factory_original.py
```python
class Pizza(object):
def prepare(self): pass
def bake(self): pass
def cut(self): pass
def box(self): pass
class NYCheesePizza(Pizza): pass
class NYGreekPizza(Pizza): pass
class NYPepperoniPizza(Pizza): pass
class NYClamPizza(Pizza): pass
class NYVeggiePizza(Pizza): pass
class ChicagoCheesePizza(Pizza): pass
class ChicagoGreekPizza(Pizza): pass
class ChicagoPepperoniPizza(Pizza): pass
class ChicagoClamPizza(Pizza): pass
class ChicagoVeggiePizza(Pizza): pass
# factory class
class PizzaFactory(object):
""" an abstract factory """
def create_pizza(self, pizza_type): pass
class NYPizzaFactory(object):
""" figure out what type of NY pizza to make """
def create_pizza(self, pizza_type):
pizza = None
if pizza_type == 'cheese':
pizza = NYCheesePizza()
elif pizza_type == 'greek':
pizza = NYGreekPizza()
elif pizza_type == 'pepperoni':
pizza = NYPepperoniPizza()
elif pizza_type == 'clam':
pizza = NYClamPizza()
elif pizza_type == 'veggie':
pizza = NYVeggiePizza()
return pizza
class ChicagoPizzaFactory(object):
""" figure out what type of chicago pizza to make """
def create_pizza(self, pizza_type):
pizza = None
if pizza_type == 'cheese':
pizza = ChicagoCheesePizza()
elif pizza_type == 'greek':
pizza = ChicagoGreekPizza()
elif pizza_type == 'pepperoni':
pizza = ChicagoPepperoniPizza()
elif pizza_type == 'clam':
pizza = ChicagoClamPizza()
elif pizza_type == 'veggie':
pizza = ChicagoVeggiePizza()
return pizza
# store class
class PizzaStore(object):
def __init__(self, factory):
self.factory = factory
def order_pizza(self, pizza_type):
pizza = self.factory.create_pizza(pizza_type)
pizza.prepare()
pizza.bake()
pizza.cut()
pizza.box()
return pizza
# sample code
if __name__ == '__main__':
ny_factory = NYPizzaFactory()
ny_store = PizzaStore(ny_factory)
ny_store.order_pizza('veggie')
chicago_factory = ChicagoPizzaFactory()
chicago_store = PizzaStore(chicago_factory)
chicago_store.order_pizza('veggie')
```
#### File: design_patterns/iterator/menu_new.py
```python
from abc import abstractmethod
# iterator classes
class Iterator(object):
"""abstract class for iterators"""
@abstractmethod
def has_next(self): pass
@abstractmethod
def get_next(self): pass
class PancakeIterator(Iterator):
"""concrete iterator for our pancake house, or any list-based collection"""
def __init__(self, items):
self.items = items
self.pos = 0
def has_next(self):
return bool(self.pos < len(self.items))
def get_next(self):
if self.pos < len(self.items):
item = self.items[self.pos]
self.pos += 1
return item
else:
raise Exception("iterator is out of bounds: %d" % self.pos)
class DinerIterator(Iterator):
"""concrete iterator for our diner, or any array-based collection"""
def __init__(self, items):
self.items = items
self.pos = 0
def has_next(self):
return bool(self.pos < len(self.items) and self.items[self.pos] is not None)
def get_next(self):
if self.pos < len(self.items) and self.items[self.pos] is not None:
item = self.items[self.pos]
self.pos += 1
return item
else:
raise Exception("iterator is out of bounds: %d" % self.pos)
# general-use classes
class MenuItem(object):
def __init__(self, name, desc, veggie, price):
self.name = name
self.desc = desc
self.veggie = veggie
self.price = price
def get_name(self): return self.name
def get_desc(self): return self.desc
def get_veggie(self): return self.veggie
def get_price(self): return self.price
# incompatible collection classes
class PancakeHouse(object):
"""a collection based on a linked list"""
def __init__(self):
self.name = "Breakfast Menu"
self.menu_items = []
self.add_menu_item("Pancakes", "Regular pancakes", 2.99, True)
self.add_menu_item("Trucker Pancakes", "Pancakes with eggs", 3.99, None)
self.add_menu_item("Waffles", "Waffles with Maple Syrup", 4.99, True)
def add_menu_item(self, name, desc, price, veggie):
menu_item = MenuItem(name, desc, veggie, price)
self.menu_items.append(menu_item)
def get_iterator(self):
iterator = PancakeIterator(self.menu_items)
return iterator
class Diner(object):
"""an collection based on an array, which is pretty hard to do in python"""
def __init__(self):
self.menu_items = [None, None, None, None, None, None]
self.max_items = 6
self.number_of_items = 0
self.name = "Lunch Menu"
self.add_menu_item("Soup", "Soup of the Day", 3.29, True)
self.add_menu_item("BLT", "Bacon, Lettuce, Tomato, with optional Mutton", 2.99, None)
self.add_menu_item("Hot Dog", "World famously cheap-ass hot dog", 0.25, None)
def add_menu_item(self, name, desc, price, veggie):
if self.number_of_items < self.max_items:
menu_item = MenuItem(name, desc, veggie, price)
self.menu_items[self.number_of_items] = menu_item
self.number_of_items += 1
else:
raise Exception("maximum number of items reached!")
def get_iterator(self):
iterator = DinerIterator(self.menu_items)
return iterator
class Waitress(object):
def __init__(self):
self.menus = []
def add_menu(self, menu):
self.menus.append(menu)
def print_menu(self):
"""loop thru our iterators to loop thru our menu items"""
for menu in self.menus:
print "\n%s:" % menu.name
iterator = menu.get_iterator()
while iterator.has_next():
menu_item = iterator.get_next()
print "\t%s. %s -- %.2f" % (menu_item.get_name(), menu_item.get_desc(),
menu_item.get_price())
# testing
if __name__ == '__main__':
breakfast_menu = PancakeHouse()
lunch_menu = Diner()
waitress = Waitress()
waitress.add_menu(breakfast_menu)
waitress.add_menu(lunch_menu)
waitress.print_menu()
```
#### File: design_patterns/iterator/menu_original.py
```python
class MenuItem(object):
def __init__(self, name, desc, veggie, price):
self.name = name
self.desc = desc
self.veggie = veggie
self.price = price
def get_name(self): return self.name
def get_desc(self): return self.desc
def get_veggie(self): return self.veggie
def get_price(self): return self.price
# incompatible collection classes
class PancakeHouse(object):
"""a collection based on a linked list"""
def __init__(self):
self.menu_items = []
self.add_menu_item("Pancakes", "Regular pancakes", 2.99, True)
self.add_menu_item("Trucker Pancakes", "Pancakes with eggs", 3.99, None)
self.add_menu_item("Waffles", "Waffles with Maple Syrup", 4.99, True)
def add_menu_item(self, name, desc, price, veggie):
menu_item = MenuItem(name, desc, veggie, price)
self.menu_items.append(menu_item)
def get_menu_items(self):
return self.menu_items
class Diner(object):
"""an collection based on an array, which is pretty hard to do in python"""
def __init__(self):
self.menu_items = [None, None, None, None, None, None]
self.max_items = 6
self.number_of_items = 0
self.add_menu_item("Soup", "Soup of the Day", 3.29, True)
self.add_menu_item("BLT", "Bacon, Lettuce, Tomato, with optional Mutton", 2.99, None)
self.add_menu_item("Hot Dog", "World famously cheap-ass hot dog", 0.25, None)
def add_menu_item(self, name, desc, price, veggie):
if self.number_of_items < self.max_items:
menu_item = MenuItem(name, desc, veggie, price)
self.menu_items[self.number_of_items] = menu_item
self.number_of_items += 1
else:
raise Exception("maximum number of items reached!")
def get_menu_items(self):
return self.menu_items
# testing
if __name__ == '__main__':
breakfast_menu = PancakeHouse()
breakfast_items = breakfast_menu.get_menu_items()
lunch_menu = Diner()
lunch_items = lunch_menu.get_menu_items()
def print_menu_item(menu_item):
print "\t%s. %s -- %.2f" % (menu_item.get_name(), menu_item.get_desc(), menu_item.get_price())
print "Breakfast Menu:"
for menu_item in breakfast_items:
print_menu_item(menu_item)
print "\nLunch Menu:"
for i in range(len(lunch_items)):
if not lunch_items[i]:
continue
menu_item = lunch_items[i]
print_menu_item(menu_item)
```
#### File: design_patterns/observer/weather_original.py
```python
from weather_data import WeatherData
class WeatherDisplay(object):
"""a sample object that pretends to hold weather data"""
def update(self): pass
class OurWeatherData(WeatherData):
"""build our weather data class on the vendor-supplied one"""
def __init__(self):
self.current_conditions_display = WeatherDisplay()
self.statistics_display = WeatherDisplay()
self.forecast_display = WeatherDisplay()
def measurements_changed(self):
temp = self.get_temp()
humidity = self.get_humidity()
pressure = self.get_pressure()
self.current_conditions_display.update(temp, humidity, pressure)
self.statistics_display.update(temp, humidity, pressure)
self.forecast_display.update(temp, humidity, pressure)
```
#### File: design_patterns/proxy/gumball_monitor_new.py
```python
from gumball_monitor_original import GumballMonitor, GumballMachine
# this is a pretty generic wrapper object
class GumballMachineProxy(object):
"""hold a GumballMachine, and call pass on attribute calls"""
def __init__(self, gumball_machine):
self.machine = gumball_machine
def __getattr__(self, attr):
"""call the attribute on our gumball machine"""
#print "checking for attribute %r remotely" % attr
# note that this will fail if it doesn't exist, and that's good
check_attr = getattr(self.machine, attr)
if callable(check_attr):
# it's a method, call it and return it
def wrap_remote_call(*args, **kargs):
return check_attr(*args, **kargs)
return wrap_remote_call
else:
# it' just an attribute, return it
return check_attr
if __name__ == '__main__':
gumball_machine = GumballMachine("Boston", 112)
proxy = GumballMachineProxy(gumball_machine)
monitor = GumballMonitor(proxy)
monitor.report()
```
#### File: design_patterns/proxy/gumball_monitor_original.py
```python
class MachineState(object):
"""an enum"""
sold_out = 0
no_quarter = 1
has_quarter = 2
sold = 3
class GumballMachine(object):
def __init__(self, location, inventory):
self.location = location
self.inventory = inventory
self.state = MachineState.no_quarter
def get_state(self):
if self.state == MachineState.sold_out:
return "sold out"
elif self.state == MachineState.no_quarter:
return "waiting for quarter"
elif self.state == MachineState.has_quarter:
return "has quarter"
elif self.state == MachineState.sold:
return "sold"
# the main class of this exercise
class GumballMonitor(object):
def __init__(self, machine):
self.machine = machine
def report(self):
print "Gumball Machine: %s" % self.machine.location
print "Current inventory: %d gumballs" % self.machine.inventory
print "Current state: %s" % self.machine.get_state()
# testing
if __name__ == '__main__':
gumball_machine = GumballMachine("Boston", 112)
monitor = GumballMonitor(gumball_machine)
monitor.report()
```
#### File: design_patterns/template/caffeine_original.py
```python
class Coffee(object):
def prepare(self):
self.boil_water()
self.brew_grinds()
self.pour_in_cup()
self.add_sugar_and_milk()
def boil_water(self):
print "boiling water"
def brew_grinds(self):
print "dripping coffee through filter"
def pour_in_cup(self):
print "pouring in cup"
def add_sugar_and_milk(self):
print "adding sugar and milk"
class Tea(object):
def prepare(self):
self.boil_water()
self.steep()
self.pour_in_cup()
self.add_lemon()
def boil_water(self):
print "boiling water"
def steep(self):
print "steeping tea bag"
def pour_in_cup(self):
print "pouring in cup"
def add_lemon(self):
print "adding a spot of lemon"
# testing
if __name__ == '__main__':
coffee = Coffee()
tea = Tea()
print "making coffee..."
coffee.prepare()
print "\nmaking tea..."
tea.prepare()
``` |
{
"source": "jmcguir/netbox",
"score": 2
} |
#### File: utilities/forms/utils.py
```python
import re
from django import forms
from django.forms.models import fields_for_model
from utilities.querysets import RestrictedQuerySet
from .constants import *
__all__ = (
'add_blank_choice',
'expand_alphanumeric_pattern',
'expand_ipaddress_pattern',
'form_from_model',
'parse_alphanumeric_range',
'parse_numeric_range',
'restrict_form_fields',
'parse_csv',
'validate_csv',
)
def parse_numeric_range(string, base=10):
"""
Expand a numeric range (continuous or not) into a decimal or
hexadecimal list, as specified by the base parameter
'0-3,5' => [0, 1, 2, 3, 5]
'2,8-b,d,f' => [2, 8, 9, a, b, d, f]
"""
values = list()
for dash_range in string.split(','):
try:
begin, end = dash_range.split('-')
except ValueError:
begin, end = dash_range, dash_range
try:
begin, end = int(begin.strip(), base=base), int(end.strip(), base=base) + 1
except ValueError:
raise forms.ValidationError(f'Range "{dash_range}" is invalid.')
values.extend(range(begin, end))
return list(set(values))
def parse_alphanumeric_range(string):
"""
Expand an alphanumeric range (continuous or not) into a list.
'a-d,f' => [a, b, c, d, f]
'0-3,a-d' => [0, 1, 2, 3, a, b, c, d]
"""
values = []
for dash_range in string.split(','):
try:
begin, end = dash_range.split('-')
vals = begin + end
# Break out of loop if there's an invalid pattern to return an error
if (not (vals.isdigit() or vals.isalpha())) or (vals.isalpha() and not (vals.isupper() or vals.islower())):
return []
except ValueError:
begin, end = dash_range, dash_range
if begin.isdigit() and end.isdigit():
for n in list(range(int(begin), int(end) + 1)):
values.append(n)
else:
# Value-based
if begin == end:
values.append(begin)
# Range-based
else:
# Not a valid range (more than a single character)
if not len(begin) == len(end) == 1:
raise forms.ValidationError(f'Range "{dash_range}" is invalid.')
for n in list(range(ord(begin), ord(end) + 1)):
values.append(chr(n))
return values
def expand_alphanumeric_pattern(string):
"""
Expand an alphabetic pattern into a list of strings.
"""
lead, pattern, remnant = re.split(ALPHANUMERIC_EXPANSION_PATTERN, string, maxsplit=1)
parsed_range = parse_alphanumeric_range(pattern)
for i in parsed_range:
if re.search(ALPHANUMERIC_EXPANSION_PATTERN, remnant):
for string in expand_alphanumeric_pattern(remnant):
yield "{}{}{}".format(lead, i, string)
else:
yield "{}{}{}".format(lead, i, remnant)
def expand_ipaddress_pattern(string, family):
"""
Expand an IP address pattern into a list of strings. Examples:
'192.0.2.[1,2,100-250]/24' => ['192.0.2.1/24', '192.0.2.2/24', '192.0.2.100/24' ... '192.0.2.250/24']
'2001:db8:0:[0,fd-ff]::/64' => ['2001:db8:0:0::/64', '2001:db8:0:fd::/64', ... '2001:db8:0:ff::/64']
"""
if family not in [4, 6]:
raise Exception("Invalid IP address family: {}".format(family))
if family == 4:
regex = IP4_EXPANSION_PATTERN
base = 10
else:
regex = IP6_EXPANSION_PATTERN
base = 16
lead, pattern, remnant = re.split(regex, string, maxsplit=1)
parsed_range = parse_numeric_range(pattern, base)
for i in parsed_range:
if re.search(regex, remnant):
for string in expand_ipaddress_pattern(remnant, family):
yield ''.join([lead, format(i, 'x' if family == 6 else 'd'), string])
else:
yield ''.join([lead, format(i, 'x' if family == 6 else 'd'), remnant])
def add_blank_choice(choices):
"""
Add a blank choice to the beginning of a choices list.
"""
return ((None, '---------'),) + tuple(choices)
def form_from_model(model, fields):
"""
Return a Form class with the specified fields derived from a model. This is useful when we need a form to be used
for creating objects, but want to avoid the model's validation (e.g. for bulk create/edit functions). All fields
are marked as not required.
"""
form_fields = fields_for_model(model, fields=fields)
for field in form_fields.values():
field.required = False
return type('FormFromModel', (forms.Form,), form_fields)
def restrict_form_fields(form, user, action='view'):
"""
Restrict all form fields which reference a RestrictedQuerySet. This ensures that users see only permitted objects
as available choices.
"""
for field in form.fields.values():
if hasattr(field, 'queryset') and issubclass(field.queryset.__class__, RestrictedQuerySet):
field.queryset = field.queryset.restrict(user, action)
def parse_csv(reader):
"""
Parse a csv_reader object into a headers dictionary and a list of records dictionaries. Raise an error
if the records are formatted incorrectly. Return headers and records as a tuple.
"""
records = []
headers = {}
# Consume the first line of CSV data as column headers. Create a dictionary mapping each header to an optional
# "to" field specifying how the related object is being referenced. For example, importing a Device might use a
# `site.slug` header, to indicate the related site is being referenced by its slug.
for header in next(reader):
if '.' in header:
field, to_field = header.split('.', 1)
headers[field] = to_field
else:
headers[header] = None
# Parse CSV rows into a list of dictionaries mapped from the column headers.
for i, row in enumerate(reader, start=1):
if len(row) != len(headers):
raise forms.ValidationError(
f"Row {i}: Expected {len(headers)} columns but found {len(row)}"
)
row = [col.strip() for col in row]
record = dict(zip(headers.keys(), row))
records.append(record)
return headers, records
def validate_csv(headers, fields, required_fields):
"""
Validate that parsed csv data conforms to the object's available fields. Raise validation errors
if parsed csv data contains invalid headers or does not contain required headers.
"""
# Validate provided column headers
for field, to_field in headers.items():
if field not in fields:
raise forms.ValidationError(f'Unexpected column header "{field}" found.')
if to_field and not hasattr(fields[field], 'to_field_name'):
raise forms.ValidationError(f'Column "{field}" is not a related object; cannot use dots')
if to_field and not hasattr(fields[field].queryset.model, to_field):
raise forms.ValidationError(f'Invalid related object attribute for column "{field}": {to_field}')
# Validate required fields
for f in required_fields:
if f not in headers:
raise forms.ValidationError(f'Required column header "{f}" not found.')
``` |
{
"source": "jmchandonia/kb_trimmomatic",
"score": 2
} |
#### File: kb_trimmomatic/test/kb_trimmomatic_server_test.py
```python
import unittest
import os
import json
import time
import requests
requests.packages.urllib3.disable_warnings()
from os import environ
try:
from ConfigParser import ConfigParser # py2
except:
from configparser import ConfigParser # py3
from pprint import pprint
from requests_toolbelt import MultipartEncoder
from biokbase.workspace.client import Workspace as workspaceService
from biokbase.AbstractHandle.Client import AbstractHandle as HandleService
from kb_trimmomatic.kb_trimmomaticImpl import kb_trimmomatic
from kb_trimmomatic.kb_trimmomaticServer import MethodContext
from kb_trimmomatic.authclient import KBaseAuth as _KBaseAuth
class kb_trimmomaticTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
token = environ.get('KB_AUTH_TOKEN', None)
cls.token = token
config_file = environ.get('KB_DEPLOYMENT_CONFIG', None)
cls.cfg = {}
config = ConfigParser()
config.read(config_file)
for nameval in config.items('kb_trimmomatic'):
cls.cfg[nameval[0]] = nameval[1]
authServiceUrl = cls.cfg.get('auth-service-url',
"https://kbase.us/services/authorization/Sessions/Login")
auth_client = _KBaseAuth(authServiceUrl)
user_id = auth_client.get_user(cls.token)
# WARNING: don't call any logging methods on the context object,
# it'll result in a NoneType error
cls.ctx = MethodContext(None)
cls.ctx.update({'token': token,
'user_id': user_id,
'provenance': [
{'service': 'kb_trimmomatic',
'method': 'please_never_use_it_in_production',
'method_params': []
}],
'authenticated': 1})
cls.wsURL = cls.cfg['workspace-url']
cls.shockURL = cls.cfg['shock-url']
cls.handleURL = cls.cfg['handle-service-url']
cls.serviceWizardURL = cls.cfg['service-wizard-url']
cls.wsClient = workspaceService(cls.wsURL, token=token)
cls.serviceImpl = kb_trimmomatic(cls.cfg)
@classmethod
def tearDownClass(cls):
if hasattr(cls, 'wsName'):
cls.wsClient.delete_workspace({'workspace': cls.wsName})
print('Test workspace was deleted')
if hasattr(cls, 'shock_ids'):
for shock_id in cls.shock_ids:
print('Deleting SHOCK node: '+str(shock_id))
cls.delete_shock_node(shock_id)
def getWsClient(self):
return self.__class__.wsClient
def getWsName(self):
if hasattr(self.__class__, 'wsName'):
return self.__class__.wsName
suffix = int(time.time() * 1000)
wsName = "test_kb_trimmomatic_" + str(suffix)
ret = self.getWsClient().create_workspace({'workspace': wsName})
self.__class__.wsName = wsName
return wsName
def getImpl(self):
return self.__class__.serviceImpl
def getContext(self):
return self.__class__.ctx
@classmethod
def upload_file_to_shock(cls, file_path):
"""
Use HTTP multi-part POST to save a file to a SHOCK instance.
"""
header = dict()
header["Authorization"] = "Oauth {0}".format(cls.token)
if file_path is None:
raise Exception("No file given for upload to SHOCK!")
if not file_path.startswith(os.sep):
file_path = os.path.join(os.sep, 'kb','module','test',file_path)
with open(os.path.abspath(file_path), 'rb') as dataFile:
files = {'upload': dataFile}
response = requests.post(
cls.shockURL + '/node', headers=header, files=files,
stream=True, allow_redirects=True, timeout=30)
if not response.ok:
response.raise_for_status()
result = response.json()
if result['error']:
raise Exception(result['error'][0])
else:
shock_id = result['data']['id']
if not hasattr(cls, 'shock_ids'):
cls.shock_ids = []
cls.shock_ids.append(shock_id)
return result["data"]
@classmethod
def delete_shock_node(cls, node_id):
header = {'Authorization': 'Oauth {0}'.format(cls.token)}
requests.delete(cls.shockURL + '/node/' + node_id, headers=header,
allow_redirects=True)
print('Deleted shock node ' + node_id)
def getSingleEndLibInfo(self, read_lib_basename, lib_i=0):
if hasattr(self.__class__, 'singleEndLibInfo_list'):
try:
info = self.__class__.singleEndLibInfo_list[lib_i]
name = self.__class__.singleEndLibName_list[lib_i]
if info != None:
if name != read_lib_basename:
self.__class__.singleEndLib_SetInfo[lib_i] = None
self.__class__.singleEndLib_SetName[lib_i] = None
else:
return info
except:
pass
# 1) upload files to shock
token = self.ctx['token']
forward_shock_file = self.upload_file_to_shock('data/'+read_lib_basename+'.fwd.fq')
#pprint(forward_shock_file)
# 2) create handle
hs = HandleService(url=self.handleURL, token=token)
forward_handle = hs.persist_handle({
'id' : forward_shock_file['id'],
'type' : 'shock',
'url' : self.shockURL,
'file_name': forward_shock_file['file']['name'],
'remote_md5': forward_shock_file['file']['checksum']['md5']})
# 3) save to WS
single_end_library = {
'lib': {
'file': {
'hid':forward_handle,
'file_name': forward_shock_file['file']['name'],
'id': forward_shock_file['id'],
'url': self.shockURL,
'type':'shock',
'remote_md5':forward_shock_file['file']['checksum']['md5']
},
'encoding':'UTF8',
'type':'fastq',
'size':forward_shock_file['file']['size']
},
'sequencing_tech':'artificial reads'
}
new_obj_info = self.wsClient.save_objects({
'workspace':self.getWsName(),
'objects':[
{
'type':'KBaseFile.SingleEndLibrary',
'data':single_end_library,
'name':'test-'+str(lib_i)+'.se.reads',
'meta':{},
'provenance':[
{
'service':'kb_trimmomatic',
'method':'test_runTrimmomatic'
}
]
}]
})[0]
# store it
if not hasattr(self.__class__, 'singleEndLibInfo_list'):
self.__class__.singleEndLibInfo_list = []
self.__class__.singleEndLibName_list = []
for i in range(lib_i+1):
try:
assigned = self.__class__.singleEndLibInfo_list[i]
except:
self.__class__.singleEndLibInfo_list.append(None)
self.__class__.singleEndLibName_list.append(None)
self.__class__.singleEndLibInfo_list[lib_i] = new_obj_info
self.__class__.singleEndLibName_list[lib_i] = read_lib_basename
return new_obj_info
def getPairedEndLibInfo(self, read_lib_basename, lib_i=0):
if hasattr(self.__class__, 'pairedEndLibInfo_list'):
try:
info = self.__class__.pairedEndLibInfo_list[lib_i]
name = self.__class__.pairedEndLibName_list[lib_i]
if info != None:
if name != read_lib_basename:
self.__class__.singleEndLibInfo_list[lib_i] = None
self.__class__.singleEndLibName_list[lib_i] = None
else:
return info
except:
pass
# 1) upload files to shock
token = self.ctx['token']
forward_shock_file = self.upload_file_to_shock('data/'+read_lib_basename+'.fwd.fq')
reverse_shock_file = self.upload_file_to_shock('data/'+read_lib_basename+'.rev.fq')
#pprint(forward_shock_file)
#pprint(reverse_shock_file)
# 2) create handle
hs = HandleService(url=self.handleURL, token=token)
forward_handle = hs.persist_handle({
'id' : forward_shock_file['id'],
'type' : 'shock',
'url' : self.shockURL,
'file_name': forward_shock_file['file']['name'],
'remote_md5': forward_shock_file['file']['checksum']['md5']})
reverse_handle = hs.persist_handle({
'id' : reverse_shock_file['id'],
'type' : 'shock',
'url' : self.shockURL,
'file_name': reverse_shock_file['file']['name'],
'remote_md5': reverse_shock_file['file']['checksum']['md5']})
# 3) save to WS
paired_end_library = {
'lib1': {
'file': {
'hid':forward_handle,
'file_name': forward_shock_file['file']['name'],
'id': forward_shock_file['id'],
'url': self.shockURL,
'type':'shock',
'remote_md5':forward_shock_file['file']['checksum']['md5']
},
'encoding':'UTF8',
'type':'fastq',
'size':forward_shock_file['file']['size']
},
'lib2': {
'file': {
'hid':reverse_handle,
'file_name': reverse_shock_file['file']['name'],
'id': reverse_shock_file['id'],
'url': self.shockURL,
'type':'shock',
'remote_md5':reverse_shock_file['file']['checksum']['md5']
},
'encoding':'UTF8',
'type':'fastq',
'size':reverse_shock_file['file']['size']
},
'interleaved':0,
'sequencing_tech':'artificial reads'
}
new_obj_info = self.wsClient.save_objects({
'workspace':self.getWsName(),
'objects':[
{
'type':'KBaseFile.PairedEndLibrary',
'data':paired_end_library,
'name':'test-'+str(lib_i)+'.pe.reads',
'meta':{},
'provenance':[
{
'service':'kb_trimmomatic',
'method':'test_runTrimmomatic'
}
]
}]
})[0]
# store it
if not hasattr(self.__class__, 'pairedEndLibInfo_list'):
self.__class__.pairedEndLibInfo_list = []
self.__class__.pairedEndLibName_list = []
for i in range(lib_i+1):
try:
assigned = self.__class__.pairedEndLibInfo_list[i]
except:
self.__class__.pairedEndLibInfo_list.append(None)
self.__class__.pairedEndLibName_list.append(None)
self.__class__.pairedEndLibInfo_list[lib_i] = new_obj_info
self.__class__.pairedEndLibName_list[lib_i] = read_lib_basename
return new_obj_info
def getSingleEndLib_SampleSetInfo(self, read_libs_basename_list, refresh=False):
if hasattr(self.__class__, 'singleEndLib_SampleSetInfo'):
try:
info = self.__class__.singleEndLib_SampleSetInfo
if info != None:
if refresh:
self.__class__.singleEndLib_SampleSetInfo = None
else:
return info
except:
pass
sample_ids = list()
conditions = list()
for lib_i, read_lib_basename in enumerate(read_libs_basename_list):
label = read_lib_basename
lib_info = self.getSingleEndLibInfo(read_lib_basename, lib_i)
lib_ref = str(lib_info[6])+'/'+str(lib_info[0])
print ("LIB_REF["+str(lib_i)+"]: "+lib_ref+" "+read_lib_basename) # DEBUG
sample_ids.append(lib_ref)
conditions.append(label)
desc = "test sample set"
name = "TEST_SAMPLE_SET"
sampleset_obj = {
"sample_ids": sample_ids,
"condition": conditions,
"sampleset_id": "foo",
"sampleset_desc": desc,
"domain": "prokaryota",
"num_samples": len(sample_ids),
"Library_type": "SingleEnd"
}
sample_set_info = self.wsClient.save_objects({
"workspace": self.getWsName(),
"objects": [{
"type": "KBaseRNASeq.RNASeqSampleSet",
"data": sampleset_obj,
"name": name,
"meta": {},
"provenance": [
{
"service": "kb_trimmomatic",
"method": "test_runTrimmomatic"
}
]
}]
})[0]
# store it
self.__class__.singleEndLib_SampleSetInfo = sample_set_info
return sample_set_info
# call this method to get the WS object info of a Single End Library Set (will
# upload the example data if this is the first time the method is called during tests)
def getSingleEndLib_SetInfo(self, read_libs_basename_list, refresh=False):
if hasattr(self.__class__, 'singleEndLib_SetInfo'):
try:
info = self.__class__.singleEndLib_SetInfo
if info != None:
if refresh:
self.__class__.singleEndLib_SetInfo = None
else:
return info
except:
pass
# build items and save each PairedEndLib
items = []
for lib_i,read_lib_basename in enumerate (read_libs_basename_list):
label = read_lib_basename
lib_info = self.getSingleEndLibInfo (read_lib_basename, lib_i)
lib_ref = str(lib_info[6])+'/'+str(lib_info[0])
print ("LIB_REF["+str(lib_i)+"]: "+lib_ref+" "+read_lib_basename) # DEBUG
items.append({'ref': lib_ref,
'label': label
#'data_attachment': ,
#'info':
})
# save readsset
desc = 'test ReadsSet'
readsSet_obj = { 'description': desc,
'items': items
}
name = 'TEST_READSET'
new_obj_info = self.wsClient.save_objects({
'workspace':self.getWsName(),
'objects':[
{
'type':'KBaseSets.ReadsSet',
'data':readsSet_obj,
'name':name,
'meta':{},
'provenance':[
{
'service':'kb_trimmomatic',
'method':'test_runTrimmomatic'
}
]
}]
})[0]
# store it
self.__class__.singleEndLib_SetInfo = new_obj_info
return new_obj_info
# call this method to get the WS object info of a Paired End Library Set (will
# upload the example data if this is the first time the method is called during tests)
def getPairedEndLib_SetInfo(self, read_libs_basename_list, refresh=False):
if hasattr(self.__class__, 'pairedEndLib_SetInfo'):
try:
info = self.__class__.pairedEndLib_SetInfo
if info != None:
if refresh:
self.__class__.pairedEndLib_SetInfo[lib_i] = None
else:
return info
except:
pass
# build items and save each PairedEndLib
items = []
for lib_i,read_lib_basename in enumerate (read_libs_basename_list):
label = read_lib_basename
lib_info = self.getPairedEndLibInfo (read_lib_basename, lib_i)
lib_ref = str(lib_info[6])+'/'+str(lib_info[0])
print ("LIB_REF["+str(lib_i)+"]: "+lib_ref+" "+read_lib_basename) # DEBUG
items.append({'ref': lib_ref,
'label': label
#'data_attachment': ,
#'info':
})
# save readsset
desc = 'test ReadsSet'
readsSet_obj = { 'description': desc,
'items': items
}
name = 'TEST_READSET'
new_obj_info = self.wsClient.save_objects({
'workspace':self.getWsName(),
'objects':[
{
'type':'KBaseSets.ReadsSet',
'data':readsSet_obj,
'name':name,
'meta':{},
'provenance':[
{
'service':'kb_trimmomatic',
'method':'test_runTrimmomatic'
}
]
}]
})[0]
# store it
self.__class__.pairedEndLib_SetInfo = new_obj_info
return new_obj_info
##############
# UNIT TESTS #
##############
# NOTE: According to Python unittest naming rules test method names should start from 'test'.
#
# Prepare test objects in workspace if needed using
# self.getWsClient().save_objects({'workspace': self.getWsName(), 'objects': []})
#
# Run your method by
# ret = self.getImpl().your_method(self.getContext(), parameters...)
#
# Check returned data with
# self.assertEqual(ret[...], ...) or other unittest methods
# Object Info Contents
# 0 - obj_id objid
# 1 - obj_name name
# 2 - type_string type
# 3 - timestamp save_date
# 4 - int version
# 5 - username saved_by
# 6 - ws_id wsid
# 7 - ws_name workspace
# 8 - string chsum
# 9 - int size
# 10 - usermeta meta
### TEST 1: run Trimmomatic against just one single end library
#
# Uncomment to skip this test
#HIDE @unittest.skip("skipped test_runTrimmomatic_SingleEndLibrary()")
def test_runTrimmomatic_SingleEndLibrary(self):
print ("\n\nRUNNING: test_runTrimmomatic_SingleEndLibrary()")
print ("===============================================\n\n")
# figure out where the test data lives
se_lib_info = self.getSingleEndLibInfo('test_quick')
pprint(se_lib_info)
# run method
output_name = 'output_trim.SElib'
params = {
'input_ws': se_lib_info[7],
'output_ws': se_lib_info[7],
'input_reads_ref': str(se_lib_info[6])+'/'+str(se_lib_info[0]),
'output_reads_name': output_name,
#'read_type': 'SE',
#'quality_encoding': 'phred33',
'translate_to_phred33': 1,
'adapter_clip': {
'adapterFa': None,
'seed_mismatches': None,
'palindrom_clip_threshold': None,
'simple_clip_threshold': None
},
'sliding_window': {
'sliding_window_size': 4,
'sliding_window_min_size': 15
},
'leading_min_quality': 3,
'trailing_min_quality': 3,
'crop_length': 0,
'head_crop_length': 0,
'min_length': 36
}
result = self.getImpl().runTrimmomatic(self.getContext(),params)
print('RESULT:')
pprint(result)
# check the output
single_output_name = output_name
info_list = self.wsClient.get_object_info([{'ref':se_lib_info[7] + '/' + single_output_name}], 1)
self.assertEqual(len(info_list),1)
trimmed_reads_info = info_list[0]
self.assertEqual(trimmed_reads_info[1],single_output_name)
self.assertEqual(trimmed_reads_info[2].split('-')[0],'KBaseFile.SingleEndLibrary')
### TEST 2: run Trimmomatic against just one paired end library
#
# Uncomment to skip this test
#HIDE @unittest.skip("skipped test_runTrimmomatic_PairedEndLibrary()")
def test_runTrimmomatic_PairedEndLibrary(self):
print ("\n\nRUNNING: test_runTrimmomatic_PairedEndLibrary()")
print ("\n=============================================\n\n")
# figure out where the test data lives
pe_lib_info = self.getPairedEndLibInfo('test_quick')
pprint(pe_lib_info)
# run method
output_name = 'output_trim.PElib'
params = {
'input_ws': pe_lib_info[7],
'output_ws': pe_lib_info[7],
'input_reads_ref': str(pe_lib_info[6])+'/'+str(pe_lib_info[0]),
'output_reads_name': output_name,
#'read_type': 'PE',
#'quality_encoding': 'phred33',
'translate_to_phred33': 1,
'adapter_clip': {
'adapterFa': None,
'seed_mismatches': None,
'palindrom_clip_threshold': None,
'simple_clip_threshold': None
},
'sliding_window': {
'sliding_window_size': 4,
'sliding_window_min_size': 15
},
'leading_min_quality': 3,
'trailing_min_quality': 3,
'crop_length': 0,
'head_crop_length': 0,
'min_length': 36
}
result = self.getImpl().runTrimmomatic(self.getContext(),params)
print('RESULT:')
pprint(result)
# check the output
paired_output_name = output_name + '_paired'
info_list = self.wsClient.get_object_info([{'ref':pe_lib_info[7] + '/' + paired_output_name}], 1)
self.assertEqual(len(info_list),1)
trimmed_reads_info = info_list[0]
self.assertEqual(trimmed_reads_info[1],paired_output_name)
self.assertEqual(trimmed_reads_info[2].split('-')[0],'KBaseFile.PairedEndLibrary')
### TEST 3: run Trimmomatic against a Single End Library reads set
#
# Uncomment to skip this test
#HIDE @unittest.skip("skipped test_runTrimmomatic_SingleEndLibrary_ReadsSet()")
def test_runTrimmomatic_SingleEndLibrary_ReadsSet(self):
print ("\n\nRUNNING: test_runTrimmomatic_SingleEndLibrary_ReadsSet()")
print ("========================================================\n\n")
# figure out where the test data lives
se_lib_set_info = self.getSingleEndLib_SetInfo(['test_quick','small_2'])
pprint(se_lib_set_info)
# run method
output_name = 'output_trim.SElib'
params = {
'input_ws': se_lib_set_info[7],
'output_ws': se_lib_set_info[7],
'input_reads_ref': str(se_lib_set_info[6])+'/'+str(se_lib_set_info[0]),
'output_reads_name': output_name,
#'read_type': 'SE',
#'quality_encoding': 'phred33',
'translate_to_phred33': 1,
'adapter_clip': {
'adapterFa': None,
'seed_mismatches': None,
'palindrom_clip_threshold': None,
'simple_clip_threshold': None
},
'sliding_window': {
'sliding_window_size': 4,
'sliding_window_min_size': 15
},
'leading_min_quality': 3,
'trailing_min_quality': 3,
'crop_length': 0,
'head_crop_length': 0,
'min_length': 36
}
result = self.getImpl().runTrimmomatic(self.getContext(),params)
print('RESULT:')
pprint(result)
# check the output
single_output_name = output_name + '_trimm'
info_list = self.wsClient.get_object_info([{'ref':se_lib_set_info[7] + '/' + single_output_name}], 1)
self.assertEqual(len(info_list),1)
trimmed_reads_info = info_list[0]
self.assertEqual(trimmed_reads_info[1],single_output_name)
self.assertEqual(trimmed_reads_info[2].split('-')[0],'KBaseSets.ReadsSet')
### TEST 4: run Trimmomatic against a Paired End Library reads set
#
# Uncomment to skip this test
#HIDE @unittest.skip("skipped test_runTrimmomatic_PairedEndLibrary_ReadsSet()")
def test_runTrimmomatic_PairedEndLibrary_ReadsSet(self):
print ("\n\nRUNNING: test_runTrimmomatic_PairedEndLibrary_ReadsSet()")
print ("========================================================\n\n")
# figure out where the test data lives
pe_lib_set_info = self.getPairedEndLib_SetInfo(['test_quick','small_2'])
pprint(pe_lib_set_info)
# run method
output_name = 'output_trim.PElib'
params = {
'input_ws': pe_lib_set_info[7],
'output_ws': pe_lib_set_info[7],
'input_reads_ref': str(pe_lib_set_info[6])+'/'+str(pe_lib_set_info[0]),
'output_reads_name': output_name,
#'read_type': 'PE',
#'quality_encoding': 'phred33',
'translate_to_phred33': 1,
'adapter_clip': {
'adapterFa': None,
'seed_mismatches': None,
'palindrom_clip_threshold': None,
'simple_clip_threshold': None
},
'sliding_window': {
'sliding_window_size': 4,
'sliding_window_min_size': 15
},
'leading_min_quality': 3,
'trailing_min_quality': 3,
'crop_length': 0,
'head_crop_length': 0,
'min_length': 36
}
result = self.getImpl().runTrimmomatic(self.getContext(),params)
print('RESULT:')
pprint(result)
# check the output
paired_output_name = output_name + '_trimm_paired'
info_list = self.wsClient.get_object_info([{'ref':pe_lib_set_info[7] + '/' + paired_output_name}], 1)
self.assertEqual(len(info_list),1)
trimmed_reads_info = info_list[0]
self.assertEqual(trimmed_reads_info[1],paired_output_name)
self.assertEqual(trimmed_reads_info[2].split('-')[0],'KBaseSets.ReadsSet')
### TEST 5: run Trimmomatic against a Single End Library sample set
#
# Uncomment to skip this test
#HIDE @unittest.skip("skipped test_runTrimmomatic_SingleEndLibrary_SampleSet()")
def test_runTrimmomatic_SingleEndLibrary_SampleSet(self):
print ("\n\nRUNNING: test_runTrimmomatic_SingleEndLibrary_SampleSet()")
print ("========================================================\n\n")
# figure out where the test data lives
se_lib_sampleset_info = self.getSingleEndLib_SampleSetInfo(['test_quick', 'small_2'])
pprint(se_lib_sampleset_info)
# run method
output_name = 'output_trim.SElib'
params = {
'input_ws': se_lib_sampleset_info[7],
'output_ws': se_lib_sampleset_info[7],
'input_reads_ref': str(se_lib_sampleset_info[6])+'/'+str(se_lib_sampleset_info[0]),
'output_reads_name': output_name,
#'read_type': 'SE',
#'quality_encoding': 'phred33',
'translate_to_phred33': 1,
'adapter_clip': {
'adapterFa': None,
'seed_mismatches': None,
'palindrom_clip_threshold': None,
'simple_clip_threshold': None
},
'sliding_window': {
'sliding_window_size': 4,
'sliding_window_min_size': 15
},
'leading_min_quality': 3,
'trailing_min_quality': 3,
'crop_length': 0,
'head_crop_length': 0,
'min_length': 36
}
result = self.getImpl().runTrimmomatic(self.getContext(),params)
print('RESULT:')
pprint(result)
# check the output
single_output_name = output_name + '_trimm'
info_list = self.wsClient.get_object_info([{'ref':se_lib_sampleset_info[7] + '/' + single_output_name}], 1)
self.assertEqual(len(info_list),1)
trimmed_reads_info = info_list[0]
self.assertEqual(trimmed_reads_info[1],single_output_name)
self.assertEqual(trimmed_reads_info[2].split('-')[0],'KBaseSets.ReadsSet')
### TEST 6: run Trimmomatic with data that doesn't get trimmed, check report output.
#
# Uncomment to skip this test
#HIDE @unittest.skip("skipped test_runTrimmomatic_SingleEndLibrary_no_trimming()")
def test_runTrimmomatic_SingleEndLibrary_no_trimming(self):
print("\n\nRUNNING: test_runTrimmomatic_SingleEndLibrary_no_trimming")
print("---------------------------------------------------------\n\n")
# figure out where the test data lives
se_lib_info = self.getSingleEndLibInfo('small_no_trim')
#se_lib_info = self.getSingleEndLibInfo('test_quick')
pprint(se_lib_info)
# run method
output_name = 'output_no_trim.SElib'
params = {
'input_ws': se_lib_info[7],
'output_ws': se_lib_info[7],
'input_reads_ref': str(se_lib_info[6])+'/'+str(se_lib_info[0]),
'output_reads_name': output_name,
#'read_type': 'SE',
#'quality_encoding': 'phred33',
'translate_to_phred33': 1,
'adapter_clip': {
'adapterFa': None,
'seed_mismatches': None,
'palindrom_clip_threshold': None,
'simple_clip_threshold': None
},
'sliding_window': {
'sliding_window_size': 4,
'sliding_window_min_size': 15
},
'leading_min_quality': 3,
'trailing_min_quality': 3,
'crop_length': 0,
'head_crop_length': 0,
'min_length': 36
}
result = self.getImpl().runTrimmomatic(self.getContext(),params)
# check the output
single_output_name = output_name
info_list = self.wsClient.get_object_info([{'ref':se_lib_info[7] + '/' + single_output_name}], 1)
self.assertEqual(len(info_list),1)
trimmed_reads_info = info_list[0]
self.assertEqual(trimmed_reads_info[1],single_output_name)
self.assertEqual(trimmed_reads_info[2].split('-')[0],'KBaseFile.SingleEndLibrary')
### TEST 7: run Trimmomatic with data that gets completely trimmed, check report output.
#
# Uncomment to skip this test
#HIDE @unittest.skip("skipped test_runTrimmomatic_SingleEndLibrary_all_trimming()")
def test_runTrimmomatic_SingleEndLibrary_all_trimming(self):
print("\n\nRUNNING: test_runTrimmomatic_SingleEndLibrary_all_trimming")
print("---------------------------------------------------------\n\n")
# figure out where the test data lives
se_lib_info = self.getSingleEndLibInfo('small_all_trim')
#se_lib_info = self.getSingleEndLibInfo('test_quick')
pprint(se_lib_info)
# run method
output_name = 'output_all_trim.SElib'
params = {
'input_ws': se_lib_info[7],
'output_ws': se_lib_info[7],
'input_reads_ref': str(se_lib_info[6])+'/'+str(se_lib_info[0]),
'output_reads_name': output_name,
#'read_type': 'SE',
#'quality_encoding': 'phred33',
'translate_to_phred33': 1,
'adapter_clip': {
'adapterFa': None,
'seed_mismatches': None,
'palindrom_clip_threshold': None,
'simple_clip_threshold': None
},
'sliding_window': {
'sliding_window_size': 4,
'sliding_window_min_size': 15
},
'leading_min_quality': 3,
'trailing_min_quality': 30,
'crop_length': 0,
'head_crop_length': 0,
'min_length': 1000
}
result = self.getImpl().runTrimmomatic(self.getContext(), params)
# check the output, ensure it ends as a no-op
with self.assertRaises(Exception):
self.wsClient.get_object_info([{'ref': se_lib_info[7] + '/' + output_name}], 1)
report_obj = self.wsClient.get_objects([{'ref': result[0]['report_ref']}])[0]
# moved to separate HTML object, so can't just read buf
#self.assertIn('Input Reads', report_obj['data']['direct_html'])
#self.assertIn('Surviving', report_obj['data']['direct_html'])
#self.assertIn('Dropped', report_obj['data']['direct_html'])
### TEST 8: run Trimmomatic with q64 data that gets translated to q33
#
# Uncomment to skip this test
#HIDE @unittest.skip("skipped test_runTrimmomatic_SingleEndLibrary_q64_to_q33()")
def test_runTrimmomatic_SingleEndLibrary_translate_q64_to_q33(self):
print("\n\nRUNNING: test_runTrimmomatic_SingleEndLibrary_translate_q64_to_q33")
print("------------------------------------------------------------------\n\n")
# figure out where the test data lives
se_lib_info = self.getSingleEndLibInfo('small_all_trim') # q64
pprint(se_lib_info)
# run method
output_name = 'output_trim_q64_to_q33.SElib'
params = {
'input_ws': se_lib_info[7],
'output_ws': se_lib_info[7],
'input_reads_ref': str(se_lib_info[6])+'/'+str(se_lib_info[0]),
'output_reads_name': output_name,
#'read_type': 'SE',
#'quality_encoding': 'phred33',
'translate_to_phred33': 1,
'adapter_clip': {
'adapterFa': None,
'seed_mismatches': None,
'palindrom_clip_threshold': None,
'simple_clip_threshold': None
},
'sliding_window': {
'sliding_window_size': 4,
'sliding_window_min_size': 15
},
'leading_min_quality': 3,
'trailing_min_quality': 3,
'crop_length': 0,
'head_crop_length': 0,
'min_length': 20
}
result = self.getImpl().runTrimmomatic(self.getContext(),params)
print('RESULT:')
pprint(result)
# check the output
single_output_name = output_name
info_list = self.wsClient.get_object_info([{'ref':se_lib_info[7] + '/' + single_output_name}], 1)
self.assertEqual(len(info_list),1)
trimmed_reads_info = info_list[0]
self.assertEqual(trimmed_reads_info[1],single_output_name)
self.assertEqual(trimmed_reads_info[2].split('-')[0],'KBaseFile.SingleEndLibrary')
``` |
{
"source": "jmchandonia/narrative",
"score": 4
} |
#### File: narrative/common/kvp.py
```python
import re
KVP_EXPR = re.compile(r"""
(?:
\s* # leading whitespace
([0-9a-zA-Z_.\-]+) # Name
=
(?: # Value:
([^"\s]+) | # - simple value
"((?:[^"] | (?<=\\)")*)" # - quoted string
)
\s*
) |
([^= ]+) # Text w/o key=value
""", flags=re.X)
def parse_kvp(msg, record, text_sep=' '):
"""
Parse key-value pairs, adding to record in-place.
:param msg: Input string
:param record: In/out dict
:param text_sep: Separator for output text pieces
:return: All non-KVP as a string, joined by `text_sep`
"""
text = []
for n, v, vq, txt in KVP_EXPR.findall(msg):
if n:
if vq:
v = vq.replace('\\"', '"')
# add this KVP to output dict
record[n] = v
else:
text.append(txt)
return text_sep.join(text)
```
#### File: narrative/tests/test_kbtypes.py
```python
import unittest
import argparse
import StringIO
import os
import re
from biokbase.narrative.common import kbtypes
from traitlets import HasTraits
__author__ = "<NAME> <<EMAIL>>"
__date__ = "2013-12-09"
class HasVersion(HasTraits):
ver = kbtypes.VersionNumber('0.0.0')
def skipUnlessCreds(fn):
def wrapper(*a, **k):
if 'KBASE_CREDS' in os.environ:
return fn(*a, **k)
else:
raise unittest.SkipTest("Environment doesn't have KBASE_CREDS=<user>:<pass>")
return wrapper
class TestKbaseTypes(unittest.TestCase):
"""Test basic behavior of KBase types.
"""
user, password = None, None
def setUp(self):
self.args = argparse.Namespace(url=None, vb=0, bfile=None)
self.args.user = self.user
setattr(self.args, 'pass', self.password)
@classmethod
def setUpClass(cls):
cls._types = None
if 'KBASE_CREDS' in os.environ:
cls.user, cls.password = os.environ['KBASE_CREDS'].split(':')
@classmethod
def tearDownClass(cls):
cls._types = None
def _get_types(self, r):
"""Limit overhead of fetching types to once per test run.
"""
if self._types is None:
TestKbaseTypes._types = r.get_types()
return self._types
def tearDown(self):
pass
def test_name(self):
"""Test type name.
"""
g = kbtypes.KBaseGenome1()
self.assertEqual(str(g), 'KBaseGenomes.Genome-1.0')
g = kbtypes.KBaseGenome3()
self.assertEqual(str(g), 'KBaseGenomes.Genome-3.0')
def test_version_bad(self):
for bad_input in ("Mr. Robinson", "a.b.c", "1-2-3", "0.1.-1", (0, 1, -1)):
msg = "bad input {} passed validation".format(bad_input)
self.shouldRaise(kbtypes.KBTypeError, msg,
HasVersion, ver=bad_input)
def test_version_good(self):
for good_input, value in (("0.1.1", ('0', '1', '1')),
("13.14.97", ('13', '14', '97')),
("2.7.7-a+foo", ('2', '7', '7-a+foo'))):
self.assertEqual(value, HasVersion(ver=good_input).ver)
def shouldRaise(self, exc, msg, fn, *arg, **kwargs):
try:
fn(*arg, **kwargs)
if msg is None:
msg = "{}{} did not raise {}".format(fn.__name__, arg, str(exc))
self.assert_(False, msg)
except exc:
pass
@skipUnlessCreds
def test_get_types(self):
"""Regenerator.get_types
"""
r = kbtypes.Regenerator(self.args)
t = self._get_types(r)
self.assert_(t)
@skipUnlessCreds
def test_multiple_versions(self):
"""Test multiple versions of the same type.
"""
r = kbtypes.Regenerator(self.args)
t = self._get_types(r)
# Insert extra version.
for modname in t.keys():
for typename in t[modname].keys():
t[modname][typename]["9_9"] = {'description': "TEST99"}
# Capture output classes
w = StringIO.StringIO()
r.write_types(w, t)
# Check if versions are recorded properly
# by looking at the output
buf, prev_depth, num_ver, cur_mod = w.getvalue(), 0, 0, "?"
#print("@@ BUF: {}".format(buf))
for line in buf.split("\n"):
match = re.search("\S", line)
if match is None:
continue # blank line
depth = match.span()[0] / 4 # calc. depth by #leading spaces
if depth == 0:
match = re.match("class (\w+)", line)
if match is None:
self.fail("junk on line: {}".format(line))
cur_mod = match.group(1)
if depth == 2 and re.search("class v\d+_\d+\(.*\):", line):
num_ver += 1
# when we finish a class, check that we found at least 2 versions
if depth == 0 and prev_depth > 0:
self.assertGreaterEqual(num_ver, 2,
"{}: Expected >=2 versions, got {:d}"
.format(cur_mod, num_ver))
num_ver = 0
prev_depth = depth
if __name__ == '__main__':
unittest.main()
```
#### File: narrative/tests/test_url_config.py
```python
from biokbase.narrative.common.url_config import URLS
import unittest
class UrlConfigTest(unittest.TestCase):
def test_getter(self):
url = URLS.workspace
self.assertTrue(url.endswith('/services/ws'))
def test_get_url(self):
url = URLS.get_url('workspace')
self.assertTrue(url.endswith('/services/ws'))
def test_missing_url(self):
with self.assertRaises(Exception):
url = URLS.nope
if __name__ == "__main__":
unittest.main()
```
#### File: test/selenium_scripts/kb-test-narrative-app.py
```python
import json
import os
import re
import subprocess
import sys
import time
import uuid
import urllib2
import base64
from optparse import OptionParser
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.common.exceptions import UnexpectedAlertPresentException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait # available since 2.4.0
from selenium.webdriver.support import expected_conditions as EC # available since 2.26.0
VERSION = '1'
API_URL = "http://api.metagenomics.anl.gov/"+VERSION
AUTH_LIST = "<NAME>, <NAME>, <NAME>, <NAME>, <NAME>"
PAGE_LOAD_TIMEOUT = 120 # seconds
APP_RUN_TIMEOUT = 120 # seconds
SUPPORTED_FIELD_TYPES = [ "text", "dropdown" ]
prehelp = """
NAME
kb-test-narrative-app
VERSION
%s
SYNOPSIS
kb-test-narrative-app [ --help, --config_file <json input file>, --user <string>, --passwd <string>, --url <string> --output <png output file> ]
DESCRIPTION
Tool to test a narrative app.
"""
posthelp = """
Input
1. A json input file with the ws ID, app name, and app parameters (config_file, required).
2. KBase username (user, required to login to web UI - may be embedded in config_file)
3. KBase password (passwd, required to login to web UI - may be embedded in config_file)
4. The base url for the narrative server (url, default=https://narrative-test.kbase.us)
Output
1. A screenshot (png format) of the narrative interface upon app test completion (output, default=<wsid>.<app_name>.png).
2. Test report to STDOUT.
EXAMPLES
kb-test-narrative-app --help
SEE ALSO
-
AUTHORS
%s
"""
def get_auth_token(opts):
if 'KB_AUTH_TOKEN' in os.environ:
return os.environ['KB_AUTH_TOKEN']
if opts.user or opts.passwd:
if opts.user and opts.passwd:
return token_from_login(opts.user, opts.passwd)
else:
sys.stderr.write("ERROR: both username and password are required\n")
sys.exit(1)
else:
return None
def token_from_login(user, passwd):
auth = 'kb<PASSWORD>'+base64.b64encode('%s:%s' %(user, passwd)).replace('\n', '')
data = obj_from_url(API_URL, auth=auth)
return data['token']
# return python struct from JSON output of MG-RAST API
def obj_from_url(url, auth=None, data=None, debug=False):
header = {'Accept': 'application/json'}
if auth:
header['Auth'] = auth
if data:
header['Content-Type'] = 'application/json'
if debug:
if data:
print "data:\t"+data
print "header:\t"+json.dumps(header)
print "url:\t"+url
try:
req = urllib2.Request(url, data, headers=header)
res = urllib2.urlopen(req)
except urllib2.HTTPError, error:
if debug:
sys.stderr.write("URL: %s\n" %url)
try:
eobj = json.loads(error.read())
sys.stderr.write("ERROR (%s): %s\n" %(error.code, eobj['ERROR']))
except:
sys.stderr.write("ERROR (%s): %s\n" %(error.code, error.read()))
finally:
sys.exit(1)
if not res:
if debug:
sys.stderr.write("URL: %s\n" %url)
sys.stderr.write("ERROR: no results returned\n")
sys.exit(1)
obj = json.loads(res.read())
if obj is None:
if debug:
sys.stderr.write("URL: %s\n" %url)
sys.stderr.write("ERROR: return structure not valid json format\n")
sys.exit(1)
if len(obj.keys()) == 0:
if debug:
sys.stderr.write("URL: %s\n" %url)
sys.stderr.write("ERROR: no data available\n")
sys.exit(1)
if 'ERROR' in obj:
if debug:
sys.stderr.write("URL: %s\n" %url)
sys.stderr.write("ERROR: %s\n" %obj['ERROR'])
sys.exit(1)
return obj
def main(args):
OptionParser.format_description = lambda self, formatter: self.description
OptionParser.format_epilog = lambda self, formatter: self.epilog
parser = OptionParser(usage='', description=prehelp%VERSION, epilog=posthelp%AUTH_LIST)
parser.add_option("", "--user", dest="user", default=None, help="OAuth username")
parser.add_option("", "--passwd", dest="passwd", default=None, help="OAuth password")
parser.add_option("", "--config_file", dest="config_file", default=None, help="A json input file with the app parameters")
parser.add_option("", "--url", dest="url", default="https://narrative-test.kbase.us", help="The base url for the narrative server")
parser.add_option("", "--output", dest="output", default=None, help="Output filename for screenshot of browser after test completion")
(opts, args) = parser.parse_args()
if not opts.config_file:
sys.stderr.write("ERROR: missing required parameter: config_file\n")
return 1
indata = open(opts.config_file, 'r').read()
config = json.loads(indata)
if config is None:
sys.stderr.write("ERROR: config_file structure not valid json format\n")
sys.exit(1)
for i,j in enumerate(config):
if j == 'wsid':
opts.wsid = config[j]
elif j == 'user':
opts.user = config[j]
elif j == 'passwd':
opts.passwd = config[j]
elif j == 'app_name':
opts.app_name = config[j]
for i,j in enumerate(config["params"]):
for k in ['name', 'type', 'value']:
if k not in config["params"][i]:
sys.stderr.write("ERROR: config_file contains a parameter missing one of the required fields (name, type, value)\n")
sys.exit(1)
if k == 'type' and config["params"][i][k] not in SUPPORTED_FIELD_TYPES:
sys.stderr.write("ERROR: parameter type not supported: %s\n"%config["params"][i][k])
sys.exit(1)
for o in ['user', 'passwd', 'wsid', 'app_name']:
if not getattr(opts, o):
sys.stderr.write("ERROR: missing required parameter: " + o + "\n")
return 1
if not opts.output:
opts.output = "screenshot." + opts.app_name + "." + opts.wsid + ".png"
token = get_auth_token(opts)
# create a new instance of the Firefox driver
print "Creating Selenium Firefox driver..."
driver = webdriver.Firefox()
# get login page
print "Retrieving KBase login web page: " + opts.url + " ..."
driver.get(opts.url)
# we have to wait for the login page to be fully loaded
WebDriverWait(driver, PAGE_LOAD_TIMEOUT).until(EC.presence_of_element_located((By.ID, "kbase_username")))
print "Retrieved login page with title = " + driver.title
# get username and password elements
userElement = driver.find_element_by_id("kbase_username")
pwdElement = driver.find_element_by_id("kbase_password")
# login
print "Logging into KBase narrative website..."
userElement.send_keys(opts.user)
pwdElement.send_keys(opts.passwd)
userElement.submit()
# we have to wait until the narrative page has loaded
WebDriverWait(driver, PAGE_LOAD_TIMEOUT).until(EC.presence_of_element_located((By.ID, "kbase-navbar")))
print "Retrieved default narrative, ready for testing."
workspaceTest = str(uuid.uuid1())
print "Setting the current workspace to: " + opts.wsid
command = ['ws-workspace', opts.wsid]
proc = subprocess.Popen(command, stdout = subprocess.PIPE)
stdout, stderr = proc.communicate()
if stderr:
print "ERROR: " + stderr
sys.exit()
print "Cloning the template workspace: " + opts.wsid + " into test workspace: " + workspaceTest
command = ['ws-clone', '-w', opts.wsid, workspaceTest]
proc = subprocess.Popen(command, stdout = subprocess.PIPE)
stdout, stderr = proc.communicate()
if stderr:
print "ERROR: " + stderr
sys.exit()
output = stdout.split()
workspaceId = output[len(output)-1]
print "Narrative cloned successfully to: " + workspaceId
narrativeUrl = opts.url + "/narrative/ws." + workspaceId + ".obj.1"
print "Retrieving narrative url: " + narrativeUrl
# Open a new window to avoid narrative popup when leaving a narrative page
driver.execute_script("$(window.open('http://www.google.com'))")
driver.switch_to_window(driver.window_handles[-1])
driver.get(narrativeUrl)
time.sleep(5)
# we have to wait until the narrative page has loaded
WebDriverWait(driver, PAGE_LOAD_TIMEOUT).until(EC.presence_of_element_located((By.ID, "kb-save-btn")))
print "Identified element specific to narrative interface (kb-save-btn), narrative has been loaded."
panel_divs = driver.find_elements_by_class_name("kb-data-list-name")
for i,j in enumerate(panel_divs):
if j.text == opts.app_name:
link = j.find_element_by_link_text(opts.app_name)
link.click()
time.sleep(5)
break
source = driver.page_source
print source.encode('utf-8').strip()
sys.exit()
params = driver.find_elements_by_class_name("select2-choice")
for i,p in enumerate(params):
ptype = config["params"][i]["type"]
pval = config["params"][i]["value"]
if ptype == "text":
p.click()
time.sleep(1)
p.send_keys(pval + "\n")
elif ptype == "dropdown":
p.click()
time.sleep(1)
inputs = driver.find_elements_by_id("select2-drop")
for i,j in enumerate(inputs):
if j.is_displayed():
values = j.find_elements_by_class_name("select2-result-label")
for k,l in enumerate(values):
if l.text == pval:
l.click()
time.sleep(1)
break
else:
continue
break
appCount = 0
source = driver.page_source
for line in source.split('\n'):
matches = re.findall('kb-cell-\S+-run', line)
for m in matches:
button = driver.find_element_by_id(m)
print " Identified 'Run' button: " + m
button.click()
print " Button clicked."
appCount = appCount + 1
time.sleep(5)
startTime = time.time()
appsRunning = 0
appsWithOutput = 0
appsWithError = 0
appsWithAlert = 0
while(appsWithOutput < appCount):
currentTime = time.time()
if currentTime - startTime > APP_RUN_TIMEOUT:
print "Timed out waiting for narrative apps to complete."
break
appsRunning = 0
appsWithOutput = 0
appsWithError = 0
appsWithAlert = 0
source = driver.page_source
delimiter = 'cell text_cell border-box-sizing'
divs = source.split(delimiter)
for index in range(1, len(divs)):
div = divs[index]
if re.search('div id="kb-cell-\d+-', div) and re.search('kb-app-step-running', div):
appsRunning = appsRunning + 1
elif re.search('div id="kb-cell-out', div):
appsWithOutput = appsWithOutput + 1
if re.search('App Error', div):
appsWithError = appsWithError + 1
elif re.search('alert-danger', div):
appsWithAlert = appsWithAlert + 1
print "Total number of apps in narrative: " + str(appCount)
print "Apps still running: " + str(appsRunning)
print "Apps with output widget: " + str(appsWithOutput)
print "Apps with output widget and App Error: " + str(appsWithError)
print "Apps with output widget and Error that is not App Error: " + str(appsWithAlert)
driver.set_window_size(1400, 950)
driver.execute_script("window.scrollTo(0,0);")
driver.get_screenshot_as_file(opts.output)
print "Saved screenshot to: " + opts.output + "\n"
print "Done."
driver.quit()
return 0
if __name__ == "__main__":
sys.exit( main(sys.argv) )
``` |
{
"source": "jmcharter/advent_of_code_2020",
"score": 4
} |
#### File: advent_of_code_2020/day_05/day_05_part_two.py
```python
with open("day_05/day_05.txt") as f:
data = f.read().splitlines()
def binary_search(line, start, end, lo, hi, char1, char2,):
'''
input data, first character to read, last character to read,
lowest and highest value, and characters to search for.
'''
target = 0
for char in line[start:end]:
if char in char1:
hi -= (hi - lo) // 2 + 1
target = lo
elif char in char2:
lo += (hi - lo) // 2 + 1
target = hi
return target
def find_seat():
for line in data:
row = binary_search(line, 0, 7, 0, 127, 'F', 'B')
column = binary_search(line, 7, None, 0, 7, 'L','R')
id = row * 8 + column
yield id
seats = sorted([i for i in find_seat()])
adjacent = []
for i in range(1,len(seats)-1):
if seats[i] != seats[i-1] + 1 or seats[i] != seats[i+1] - 1:
adjacent.append(seats[i])
print((adjacent[0] + adjacent[1]) // 2)
# Answer = 699
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.