metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jmartens/pre-commit-hooks-django",
"score": 2
} |
#### File: pre-commit-hooks-django/tests/check_untracked_migrations_test.py
```python
from hooks.check_untracked_migrations import main
from hooks.utils import get_current_branch
def test_no_untracked_migrations(temp_git_dir):
with temp_git_dir.as_cwd():
migrations_dir = temp_git_dir.mkdir('app')
migrations_dir.join('main.py').write("print('hello world')")
assert main() == 0
def test_untracked_migrations(temp_git_dir):
with temp_git_dir.as_cwd():
migrations_dir = temp_git_dir.mkdir('app').mkdir('migrations')
migrations_dir.join('0001_initial.py').write("print('hello world')")
assert main() == 1
def test_running_on_correct_branch(temp_git_dir):
with temp_git_dir.as_cwd():
current_branch = get_current_branch()
assert main(["--branches", current_branch, "some_other_branch"]) == 0
def test_running_on_incorrect_branch(temp_git_dir):
with temp_git_dir.as_cwd():
assert main(["--branches", "branch_one", "branch_two"]) == 1
```
#### File: pre-commit-hooks-django/tests/conftest.py
```python
import subprocess
import pytest
@pytest.fixture
def temp_git_dir(tmpdir):
git_dir = tmpdir.join('gits')
subprocess.call(['git', 'init', '--', str(git_dir)])
yield git_dir
``` |
{
"source": "jmartens/sphinx-changelog",
"score": 2
} |
#### File: sphinx-changelog/sphinx_changelog/directive.py
```python
from pathlib import Path
from docutils import statemachine
from docutils.parsers.rst.directives import flag, path, unchanged
from sphinx.util.docutils import SphinxDirective
from .towncrier import generate_changelog_for_docs
__all__ = ['ChangeLog']
class ChangeLog(SphinxDirective):
"""
Render the changelog for the current commit using towncrier.
This directive renders all the towncrier newsfiles into your current
documentation, this can be used to keep a rendered version of the changelog
since your last release in your documentation.
The directive takes one argument which is the location of your
``pyproject.toml`` file (towncrier configuration) relative to the
``conf.py`` file *not* the file in which the directive is located.
If this argument is not specified it defaults to :file:`"../"`.
Examples
--------
.. code-block:: rst
.. changelog::
"""
required_arguments = 0
optional_arguments = 0
option_spec = {
'changelog_file': path,
'towncrier': unchanged,
'towncrier-skip-if-empty': flag,
'towncrier-title-underline-index': int,
}
final_argument_whitespace = True
def get_absolute_path(self, apath):
# This method returns relative and absolute paths
_, apath = self.env.relfn2path(apath)
return Path(apath)
def render_towncrier(self):
config_path = self.options.get("towncrier") or "../"
config_path = self.get_absolute_path(config_path)
skip_if_empty = "towncrier-skip-if-empty" in self.options
try:
changelog = generate_changelog_for_docs(config_path, skip_if_empty=skip_if_empty,
underline=self.options.get('towncrier-title-underline-index', 0))
except Exception as exc:
raise self.severe(str(exc))
return statemachine.string2lines(changelog, convert_whitespace=True)
def include_changelog(self):
changelog_filename = self.get_absolute_path(self.options['changelog_file'])
if not changelog_filename.exists():
raise self.severe(f"Can not find changelog file at {changelog_filename}")
with open(changelog_filename, encoding='utf8') as fobj:
return statemachine.string2lines(fobj.read(), convert_whitespace=True)
def run(self):
# These includes should be in reverse order (apparently)
# the last one ends up at the top of the rendered output.
if "changelog_file" in self.options:
self.state_machine.insert_input(self.include_changelog(), "")
if "towncrier" in self.options:
self.state_machine.insert_input(self.render_towncrier(), "")
return []
def setup(app):
app.add_directive('changelog', ChangeLog)
return {'parallel_read_safe': True, 'parallel_write_safe': True}
``` |
{
"source": "jmartenstein/interview-questions",
"score": 4
} |
#### File: jmartenstein/interview-questions/1.5-one-away.py
```python
import unittest
class OneAwayTest(unittest.TestCase):
def test_oneaway_missing_letter1(self):
actual = one_away("pale", "ple")
self.assertTrue(actual)
def test_oneaway_missing_letter2(self):
actual = one_away("p", "")
self.assertTrue(actual)
def test_oneaway_same_letters(self):
actual = one_away("justin", "justin")
self.assertTrue(actual)
def test_oneaway_missing_letter3(self):
actual = one_away("jstin", "jsti")
self.assertTrue(actual)
def test_oneaway_changed_letter1(self):
actual = one_away("pale", "bake")
self.assertFalse(actual)
def test_oneaway_changed_letters1(self):
actual = one_away("pale", "bale")
self.assertTrue(actual)
def one_away(string1, string2):
diff_count = 0
i = 0
j = 0
# for now, we break out separate case for if the string lengths are
# different or not
if len(string1) == len(string2):
while (i < len(string1)) and (diff_count <= 1):
if string1[i] != string2[i]:
diff_count += 1
i += 1
else:
if len(string1) < len(string2):
string_short = string1
string_long = string2
else:
string_long = string1
string_short = string2
while(i < len(string_long)) and (diff_count <= 1):
if j >= len(string_short):
diff_count += 1
else:
if string_long[i] != string_short[j]:
diff_count += 1
else:
j += 1
i += 1
if diff_count > 1:
return False
else:
return True
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jmartin4563/pi-water-sensor",
"score": 3
} |
#### File: jmartin4563/pi-water-sensor/detect.py
```python
import RPi.GPIO as GPIO
from sparkpost import SparkPost
from twilio.rest import Client
import time
import os
# Connection Setup for SP and Twilio
sparkpostKey = os.environ.get('SPKEY')
twilioKey = os.environ.get('TWILIOKEY')
twilioAccount = os.environ.get('TWILIOACCT')
sparky = SparkPost(sparkpostKey)
twilio = Client(twilioAccount, twilioKey)
# Yay, you don't have water anymore
def sendHappyEmail():
sparky.transmissions.send(
recipients=['<EMAIL>'],
template='detector-email-happy'
)
def sendHappyText():
twilio.messages.create(
to='+14436059355',
from_='+14438927539',
body='Hooray! No more water in your basement'
)
# Boo, you have water detected
def sendSadEmail():
sparky.transmissions.send(
recipients=['<EMAIL>'],
template='detector-email-sad'
)
def sendSadText():
twilio.messages.create(
to='+14436059355',
from_='+14438927539',
body='Uh oh! We have detected that there is water in your basement'
)
# Generic callback handler for any state change
def inputCallback(channel):
if GPIO.input(channel):
print('No Water Detected')
sendHappyEmail()
sendHappyText()
else:
print('Water Detected')
sendSadEmail()
sendSadText()
# Setup the pin we are listening to
GPIO.setmode(GPIO.BOARD)
channel = 38
GPIO.setup(channel, GPIO.IN)
# Add our event handlers and callback (bouncetime is for preventing false positive changes)
GPIO.add_event_detect(channel, GPIO.BOTH, bouncetime=1000)
GPIO.add_event_callback(channel, inputCallback)
# Infinite loop of detection, with a sleep to keep CPU down on the Pi
while True:
time.sleep(0.5)
``` |
{
"source": "jmartin4nrel/HOPP-1",
"score": 3
} |
#### File: optimization/layout_opt/wind_optimization_problem.py
```python
from typing import Tuple
import PySAM.Windpower as windpower
from shapely.geometry import Point
from hybrid.sites import SiteInfo
from hybrid.layout.wind_layout_tools import move_turbines_within_boundary
from parametrized_optimization_problem import ParametrizedOptimizationProblem
from hybrid.layout.plot_tools import plot_turbines
class WindSimulationVariables:
"""
Simulation inputs to be optimized for WindOptimizationProblem
turb_pos_x is a list of all the x-coordinates
turb_pos_y is a list of all the y-coordinates
"""
def __init__(self,
num_turbines: int,
turb_pos: [Point]
) -> None:
self.num_turbines = num_turbines
if len(turb_pos) != self.num_turbines:
# raise ValueError("HybridCandidate creation with wrong number of turbines")
self.num_turbines = int(len(turb_pos) / 2)
self.turb_pos_x = [pos.x for pos in turb_pos]
self.turb_pos_y = [pos.y for pos in turb_pos]
class WindOptimizationProblem(ParametrizedOptimizationProblem):
"""
Simulation of a wind farm with turbines placed within a site, following spacing requirements
"""
def __init__(self,
site_info: SiteInfo,
num_turbines: int = 20,
min_spacing: float = 200.0, # [m]
penalty_scale: float = .1,
max_unpenalized_distance: float = 0.0, # [m]
) -> None:
"""
Setup wind simulation
:param site_info: location, site and resource info
:param num_turbines: number of turbines to place on site
:param min_spacing: min spacing between turbines
:param penalty_scale: tuning parameter
:param max_unpenalized_distance: tuning parameter
"""
super().__init__(site_info, num_turbines, min_spacing)
self.candidate_type = lambda t: WindSimulationVariables(num_turbines, t)
self.penalty_scale: float = penalty_scale
self.max_unpenalized_distance: float = max_unpenalized_distance
self._scenario = None
self._setup_simulation()
def _setup_simulation(self
) -> None:
"""
Wind simulation
-> PySAM windpower model
"""
def run_wind_model(windmodel: windpower.Windpower):
windmodel.Farm.system_capacity = \
max(windmodel.Turbine.wind_turbine_powercurve_powerout) * len(windmodel.Farm.wind_farm_xCoordinates)
windmodel.execute(0)
return windmodel.Outputs.annual_energy
self._scenario = dict()
wind_model = windpower.default("WindPowerSingleOwner")
wind_model.Resource.wind_resource_data = self.site_info.wind_resource.data
self.turb_diam = wind_model.Turbine.wind_turbine_rotor_diameter
wind_model.Farm.wind_farm_wake_model = 2 # use eddy viscosity wake model
self._scenario['Wind'] = (wind_model, run_wind_model)
def make_conforming_candidate_and_get_penalty(self,
candidate: WindSimulationVariables
) -> Tuple[WindSimulationVariables, float]:
"""
Penalize turbines out of bounds while moving them within the boundary
+ always generates a feasible solution
+ provides a smooth surface to descend into a good solution
- requires tuning of penalty
"""
candidate.turb_pos_x, candidate.turb_pos_y, squared_error = \
move_turbines_within_boundary(candidate.turb_pos_x, candidate.turb_pos_y,
self.site_info.polygon.boundary, self.site_info.valid_region)
return candidate, squared_error
def objective(self,
candidate: WindSimulationVariables
) -> float:
"""
Annual energy production of turbine layout less penalty of out-of-bound turbines
:param candidate:
:return:
"""
conforming_candidate, squared_error = self.make_conforming_candidate_and_get_penalty(candidate)
penalty = max(0.0, self.penalty_scale * max(0.0, squared_error - self.max_unpenalized_distance))
wind_model: windpower.Windpower = self._scenario["Wind"][0]
wind_model.Farm.wind_farm_xCoordinates = conforming_candidate.turb_pos_x
wind_model.Farm.wind_farm_yCoordinates = conforming_candidate.turb_pos_y
score = self._scenario["Wind"][1](wind_model) / 1000
return score - penalty # , score
@staticmethod
def plot_candidate(candidate: WindSimulationVariables,
color=(0, 1, 0),
alpha=.5) -> None:
plot_turbines(candidate.turb_pos_x, candidate.turb_pos_y,
color, alpha)
```
#### File: dispatch/power_sources/power_source_dispatch.py
```python
import pyomo.environ as pyomo
from pyomo.network import Port
from pyomo.environ import units as u
from hybrid.dispatch.dispatch import Dispatch
class PowerSourceDispatch(Dispatch):
"""
"""
def __init__(self,
pyomo_model: pyomo.ConcreteModel,
index_set: pyomo.Set,
system_model,
financial_model,
block_set_name: str = 'generator'):
super().__init__(pyomo_model,
index_set,
system_model,
financial_model,
block_set_name=block_set_name)
@staticmethod
def dispatch_block_rule(gen):
##################################
# Parameters #
##################################
gen.time_duration = pyomo.Param(
doc="Time step [hour]",
default=1.0,
within=pyomo.NonNegativeReals,
mutable=True,
units=u.hr)
gen.cost_per_generation = pyomo.Param(
doc="Generation cost for generator [$/MWh]",
default=0.0,
within=pyomo.NonNegativeReals,
mutable=True,
units=u.USD / u.MWh)
gen.available_generation = pyomo.Param(
doc="Available generation for the generator [MW]",
default=0.0,
within=pyomo.NonNegativeReals,
mutable=True,
units=u.MW)
##################################
# Variables #
##################################
gen.generation = pyomo.Var(
doc="Power generation of generator [MW]",
domain=pyomo.NonNegativeReals,
bounds=(0, gen.available_generation),
units=u.MW)
gen.generation_cost = pyomo.Var(
doc="Cost of generation [$]",
domain=pyomo.NonNegativeReals,
units=u.USD)
##################################
# Constraints #
##################################
gen.generation_cost_calc = pyomo.Constraint(
doc="Calculation of generation cost for objective function",
expr=gen.generation_cost == gen.time_duration * gen.cost_per_generation * gen.generation)
##################################
# Ports #
##################################
gen.port = Port()
gen.port.add(gen.generation)
gen.port.add(gen.generation_cost)
def initialize_dispatch_model_parameters(self):
self.cost_per_generation = self._financial_model.value("om_capacity")[0]*1e3/8760
def update_time_series_dispatch_model_parameters(self, start_time: int):
n_horizon = len(self.blocks.index_set())
generation = self._system_model.value("gen")
if start_time + n_horizon > len(generation):
horizon_gen = list(generation[start_time:])
horizon_gen.extend(list(generation[0:n_horizon - len(horizon_gen)]))
else:
horizon_gen = generation[start_time:start_time + n_horizon]
if len(horizon_gen) < len(self.blocks):
raise RuntimeError(f"Dispatch parameter update error at start_time {start_time}: System model "
f"{type(self._system_model)} generation profile should have at least {len(self.blocks)} "
f"length but has only {len(generation)}")
self.available_generation = [gen_kw / 1e3 for gen_kw in horizon_gen]
@property
def cost_per_generation(self) -> float:
for t in self.blocks.index_set():
return self.blocks[t].cost_per_generation.value
@cost_per_generation.setter
def cost_per_generation(self, om_dollar_per_mwh: float):
for t in self.blocks.index_set():
self.blocks[t].cost_per_generation.set_value(round(om_dollar_per_mwh, self.round_digits))
@property
def available_generation(self) -> list:
return [self.blocks[t].available_generation.value for t in self.blocks.index_set()]
@available_generation.setter
def available_generation(self, resource: list):
if len(resource) == len(self.blocks):
for t, gen in zip(self.blocks, resource):
self.blocks[t].available_generation.set_value(round(gen, self.round_digits))
else:
raise ValueError(f"'resource' list ({len(resource)}) must be the same length as time horizon ({len(self.blocks)})")
@property
def generation(self) -> list:
return [round(self.blocks[t].generation.value, self.round_digits) for t in self.blocks.index_set()]
@property
def generation_cost(self) -> list:
return [round(self.blocks[t].generation_cost.value, self.round_digits) for t in self.blocks.index_set()]
```
#### File: dispatch/power_storage/simple_battery_dispatch_heuristic.py
```python
import pyomo.environ as pyomo
from pyomo.environ import units as u
import PySAM.BatteryStateful as BatteryModel
import PySAM.Singleowner as Singleowner
from hybrid.dispatch.power_storage.simple_battery_dispatch import SimpleBatteryDispatch
class SimpleBatteryDispatchHeuristic(SimpleBatteryDispatch):
"""Fixes battery dispatch operations based on user input.
Currently, enforces available generation and grid limit assuming no battery charging from grid
"""
def __init__(self,
pyomo_model: pyomo.ConcreteModel,
index_set: pyomo.Set,
system_model: BatteryModel.BatteryStateful,
financial_model: Singleowner.Singleowner,
fixed_dispatch: list = None,
block_set_name: str = 'heuristic_battery',
include_lifecycle_count: bool = False):
"""
:param fixed_dispatch: list of normalized values [-1, 1] (Charging (-), Discharging (+))
"""
super().__init__(pyomo_model,
index_set,
system_model,
financial_model,
block_set_name=block_set_name,
include_lifecycle_count=False)
self.max_charge_fraction = list([0.0]*len(self.blocks.index_set()))
self.max_discharge_fraction = list([0.0]*len(self.blocks.index_set()))
self.user_fixed_dispatch = list([0.0]*len(self.blocks.index_set()))
# TODO: should I enforce either a day schedule or a year schedule year and save it as user input.
# Additionally, Should I drop it as input in the init function?
if fixed_dispatch is not None:
self.user_fixed_dispatch = fixed_dispatch
self._fixed_dispatch = list([0.0] * len(self.blocks.index_set()))
def set_fixed_dispatch(self, gen: list, grid_limit: list):
"""Sets charge and discharge power of battery dispatch using fixed_dispatch attribute and enforces available
generation and grid limits.
"""
self.check_gen_grid_limit(gen, grid_limit)
self._set_power_fraction_limits(gen, grid_limit)
self._heuristic_method(gen)
self._fix_dispatch_model_variables()
def check_gen_grid_limit(self, gen: list, grid_limit: list):
if len(gen) != len(self.fixed_dispatch):
raise ValueError("gen must be the same length as fixed_dispatch.")
elif len(grid_limit) != len(self.fixed_dispatch):
raise ValueError("grid_limit must be the same length as fixed_dispatch.")
def _set_power_fraction_limits(self, gen: list, grid_limit: list):
"""Set battery charge and discharge power fraction limits based on available generation and grid capacity,
respectively.
NOTE: This method assumes that battery cannot be charged by the grid.
"""
for t in self.blocks.index_set():
self.max_charge_fraction[t] = self.enforce_power_fraction_simple_bounds(gen[t] / self.maximum_power)
self.max_discharge_fraction[t] = self.enforce_power_fraction_simple_bounds((grid_limit[t] - gen[t])
/ self.maximum_power)
@staticmethod
def enforce_power_fraction_simple_bounds(power_fraction) -> float:
""" Enforces simple bounds (0,1) for battery power fractions."""
if power_fraction > 1.0:
power_fraction = 1.0
elif power_fraction < 0.0:
power_fraction = 0.0
return power_fraction
def update_soc(self, power_fraction, soc0) -> float:
if power_fraction > 0.0:
discharge_power = power_fraction * self.maximum_power
soc = soc0 - self.time_duration[0] * (1/(self.discharge_efficiency/100.) * discharge_power) / self.capacity
elif power_fraction < 0.0:
charge_power = - power_fraction * self.maximum_power
soc = soc0 + self.time_duration[0] * (self.charge_efficiency / 100. * charge_power) / self.capacity
else:
soc = soc0
soc = max(0, min(1, soc))
return soc
def _heuristic_method(self, _):
""" Does specific heuristic method to fix battery dispatch."""
self._enforce_power_fraction_limits()
def _enforce_power_fraction_limits(self):
""" Enforces battery power fraction limits and sets _fixed_dispatch attribute"""
for t in self.blocks.index_set():
fd = self.user_fixed_dispatch[t]
if fd > 0.0: # Discharging
if fd > self.max_discharge_fraction[t]:
fd = self.max_discharge_fraction[t]
elif fd < 0.0: # Charging
if - fd > self.max_charge_fraction[t]:
fd = - self.max_charge_fraction[t]
self._fixed_dispatch[t] = fd
def _fix_dispatch_model_variables(self):
soc0 = self.model.initial_soc.value
for t in self.blocks.index_set():
dispatch_factor = self._fixed_dispatch[t]
self.blocks[t].soc.fix(self.update_soc(dispatch_factor, soc0))
soc0 = self.blocks[t].soc.value
if dispatch_factor == 0.0:
# Do nothing
self.blocks[t].charge_power.fix(0.0)
self.blocks[t].discharge_power.fix(0.0)
elif dispatch_factor > 0.0:
# Discharging
self.blocks[t].charge_power.fix(0.0)
self.blocks[t].discharge_power.fix(dispatch_factor * self.maximum_power)
elif dispatch_factor < 0.0:
# Charging
self.blocks[t].discharge_power.fix(0.0)
self.blocks[t].charge_power.fix(- dispatch_factor * self.maximum_power)
@property
def fixed_dispatch(self) -> list:
return self._fixed_dispatch
@property
def user_fixed_dispatch(self) -> list:
return self._user_fixed_dispatch
@user_fixed_dispatch.setter
def user_fixed_dispatch(self, fixed_dispatch: list):
# TODO: Annual dispatch array...
if len(fixed_dispatch) != len(self.blocks.index_set()):
raise ValueError("fixed_dispatch must be the same length as dispatch index set.")
elif max(fixed_dispatch) > 1.0 or min(fixed_dispatch) < -1.0:
raise ValueError("fixed_dispatch must be normalized values between -1 and 1.")
else:
self._user_fixed_dispatch = fixed_dispatch
```
#### File: hybrid/layout/layout_tools.py
```python
from math import fabs
from typing import (
Callable,
Tuple,
)
import numpy as np
from shapely.geometry import Polygon
def binary_search_float(objective: Callable[[float], any],
minimum: float,
maximum: float,
max_iters: int = 32,
threshold: float = 1e-3,
) -> (float, bool):
"""
:param objective: function for which to find fixed point
:param minimum: min value of search
:param maximum: max value of search
:param max_iters: max iterations
:param threshold: distance between max and min search points upon which to exit early
:return: solution
"""
if fabs(maximum - minimum) < threshold:
return maximum, True
if minimum > maximum:
raise ValueError(f"binary search minimum {minimum} must be less than maximum {maximum}")
candidate = 0.0
for i in range(max_iters):
candidate = (maximum + minimum) / 2
evaluation = objective(candidate)
if fabs(maximum - minimum) < threshold:
return candidate, True
if evaluation < 0: # candidate < target
minimum = candidate
elif evaluation > 0: # candidate > target
maximum = candidate
return candidate, False
def binary_search_int(objective: Callable[[int], any],
minimum: int,
maximum: int,
) -> (int, bool):
"""
:param objective: function for which to find fixed point
:param minimum: min value of search
:param maximum: max value of search
:return: solution
"""
if minimum > maximum:
raise ValueError(f"binary search minimum {minimum} must be less than maximum {maximum}")
candidate = 0
while minimum < maximum:
candidate = (maximum + minimum) // 2
evaluation = objective(candidate)
if evaluation < 0: # candidate < target
minimum = candidate + 1
elif evaluation > 0: # candidate > target
maximum = candidate
else: # candidate == target
return candidate, True
return candidate, False
def make_polygon_from_bounds(sw_bound: np.ndarray,
ne_bound: np.ndarray
) -> Polygon:
return Polygon([
sw_bound.tolist(),
[sw_bound[0], ne_bound[1]],
ne_bound.tolist(),
[ne_bound[0], sw_bound[1]]])
def clamp(value,
error,
minimum,
maximum
) -> Tuple:
delta = 0.0
if value > maximum:
delta = value - maximum
value = maximum
elif value < minimum:
delta = minimum - value
value = minimum
return value, error + delta ** 2
```
#### File: hybrid/layout/plot_tools.py
```python
import matplotlib.pyplot as plt
from shapely.geometry import (
LineString,
MultiPolygon,
)
def plot_turbines(turb_pos_x: list,
turb_pos_y: list,
color='g',
alpha=.5
) -> None:
for n in range(len(turb_pos_y)):
plt.plot(turb_pos_x[n], turb_pos_y[n], 'o', color=color, alpha=alpha)
def plot_solar_strands(
figure,
axes,
areas: (int, float, LineString),
*args,
**kwargs
) -> None:
if type(areas[0]) is int:
areas = (areas,)
for a in areas:
for s in a.strands:
segment: LineString = s[2]
x, y = segment.xy
axes.plot(x, y, *args, **kwargs)
def plot_shape(
figure,
axes,
shape,
*args,
**kwargs):
if isinstance(shape, MultiPolygon):
for poly in shape:
x, y = poly.exterior.xy
axes.plot(x, y, *args, **kwargs)
elif isinstance(shape, LineString):
points = list(shape.coords)
axes.plot([point[0] for point in points], [point[1] for point in points], *args, **kwargs)
else:
try:
x, y = shape.exterior.xy
axes.plot(x, y, *args, **kwargs)
except:
pass
```
#### File: hybrid/layout/simple_flicker.py
```python
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
from shapely.geometry import Point, Polygon
class SimpleFlicker():
def __init__(self, solar_verts, T, turbine_locs):
self.turbine_locs = [[0, 0]]
self.solar_verts = solar_verts
self.turbine_locs = turbine_locs
def rotate(self, origin, point, angle):
"""
Rotate a point counterclockwise by a given angle around a given origin.
The angle should be given in radians.
"""
ox, oy = origin
px, py = point
qx = ox + np.cos(angle) * (px - ox) - np.sin(angle) * (py - oy)
qy = oy + np.sin(angle) * (px - ox) + np.cos(angle) * (py - oy)
return qx, qy
def find_angle(self, T_in):
# find the omega
from scipy import interpolate
T = np.array([6, 12, 18])
omega = np.array([-90, 0, 90])
f = interpolate.interp1d(T, omega)
if T_in < 6:
omega_out = 0
print('Sun is not high enough for a shadow...')
elif T_in > 18:
omega_out = 0
print('Sun is not high enough for a shadow...')
else:
omega_out = f(T_in)
return -np.radians(omega_out)
def calculate_shadow(self, time_idx, show=True):
# user inputs
T = time_idx # time (in military time)
d = 10 # number of days since the new year
# turbine parameters
HH = 90 # hub height
D = 126 # rotor diameter
wd = 5 # tower width is 5 m?
# turbine location
x_loc = self.turbine_locs[0]
y_loc = self.turbine_locs[1]
# position
lat = 39.7555
lon = -105.2211
# calculate the shadow
delta = np.radians(-23.45 * np.cos( np.radians(360/365 * (d + 10)) ))
omega = self.find_angle(T)
# tower shadow
Fx = -( np.cos(delta) * np.sin(omega) / (np.sin(lat) * np.sin(delta) + np.cos(lat) * np.cos(delta) * np.cos(omega)))
numY = ( np.sin(np.radians(lat)) * np.cos(delta) * np.cos(omega) - np.cos(np.radians(lat)) * np.cos(delta) )
denY = ( np.sin(np.radians(lat)) * np.sin(delta) + np.cos(np.radians(lat)) * np.cos(delta) * np.cos(omega) )
Fy = -numY / denY
# plot turbine shadow and rotor shadow
fig, ax = plt.subplots()
plt.plot(x_loc,y_loc,'bo')
plt.plot([x_loc + wd/2, (x_loc+wd/2) + (HH) * Fx], [y_loc, y_loc + (HH) * Fy],'k')
plt.plot([x_loc - wd/2, (x_loc-wd/2) + (HH) * Fx], [y_loc, y_loc + (HH) * Fy], 'k')
length = (HH + D/2) * Fx - (HH - D/2) * Fx
angle = np.degrees(-90 - np.tan(Fx/Fy))
a = length/2
b = D/2
x = np.linspace(-a,a,100)
y = b * np.sqrt( 1 - (x/a)**2 )
rx = np.zeros(len(x))
ry = np.zeros(len(y))
rx2 = np.zeros(len(x))
ry2 = np.zeros(len(y))
poly_rotor = []
for i in range(len(x)):
rx[i], ry[i] = self.rotate([0,0], [x[i],y[i]], np.radians(angle))
poly_rotor.append((rx[i]+(HH*Fx)+x_loc,ry[i]+(HH*Fy)+y_loc))
for i in range(len(x)):
rx2[i], ry2[i] = self.rotate([0, 0], [x[i], -y[i]], np.radians(angle))
poly_rotor.append((rx2[i]+(HH*Fx)+x_loc,ry2[i]+(HH*Fy)+y_loc))
plt.plot(rx+(HH*Fx)+x_loc,ry+(HH*Fy)+y_loc,'k')
plt.plot(rx2+(HH*Fx)+x_loc,ry2+(HH*Fy)+y_loc,'k')
for i in range(len(self.solar_verts)-1):
plt.plot([self.solar_verts[i][0], self.solar_verts[i+1][0]], [self.solar_verts[i][1], self.solar_verts[i+1][1]],'r')
plt.plot([self.solar_verts[0][0], self.solar_verts[i + 1][0]], [self.solar_verts[0][1], self.solar_verts[i + 1][1]], 'r')
plt.xlim([-500,500])
plt.ylim([-500, 500])
plt.grid()
if show:
plt.show()
poly_tower = [(x_loc + wd/2, y_loc), (x_loc - wd/2, y_loc),
(x_loc - wd/2 + (HH) * Fx, y_loc + HH * Fy), (x_loc + wd/2 + HH * Fx, y_loc + HH * Fy)]
return poly_rotor, poly_tower
def point_inside(self, point, coords):
# Create Point objects
p1 = Point(point[0], point[1])
# Create a Polygon
poly = Polygon(coords)
# check if point is within polygon
return p1.within(poly)
def determine_boundaries(self):
x_min = 0
x_max = 0
y_min = 0
y_max = 0
for point in self.solar_verts:
# check x points
if point[0] < x_min:
x_min = point[0]
elif point[0] > x_max:
x_max = point[0]
# check y points
if point[1] < y_min:
y_min = point[1]
elif point[1] > y_max:
y_max = point[1]
return x_min, x_max, y_min, y_max
def calculate_overlap(self, T, show=False):
# determine xmin, xmax, ymin, ymax
xmin, xmax, ymin, ymax = self.determine_boundaries(self.solar_verts)
# solar boundaries - assume rectangle
# generation points inside the solar_verts
N = 10
x = np.linspace(xmin,xmax,N)
y = np.linspace(ymin,ymax,N)
# turbine parameters
D = 126
Area = np.pi * (D / 2)**2
# cycle through turbine shadows
# determine if those points are within the turbine shadow
inside_shadow = np.zeros((N,N))
for i in range(len(self.turbine_locs)):
poly_rotor, poly_tower = self.calculate_shadow(T, show=False)
for j in range(N):
for k in range(N):
point = [x[j],y[k]]
if inside_shadow[j,k] == 0:
if self.point_inside(point, poly_rotor):
inside_shadow[j,k] = 1/3 # not sure what the ratio should be here
elif self.point_inside(point, poly_tower):
inside_shadow[j, k] = 1
for i in range(N):
for j in range(N):
if inside_shadow[i,j] == 1:
plt.plot(x[i],y[j],'go')
else:
plt.plot(x[i], y[j], 'bo')
if show:
plt.show()
return np.sum(inside_shadow) / (N*N)
def calculate_losses(self, T, show=False):
losses = self.calculate_overlap(T, show=show)
print('Percent losses: ', 100 * losses, '%')
return 100 * losses
```
#### File: hybrid/sites/site_info.py
```python
import matplotlib.pyplot as plt
from shapely.geometry import *
from shapely.geometry.base import *
from hybrid.resource import (
SolarResource,
WindResource,
ElectricityPrices
)
from hybrid.layout.plot_tools import plot_shape
from hybrid.log import hybrid_logger as logger
from hybrid.keys import set_nrel_key_dot_env
def plot_site(verts, plt_style, labels):
for i in range(len(verts)):
if i == 0:
plt.plot([verts[0][0], verts[len(verts) - 1][0]], [verts[0][1], verts[len(verts) - 1][1]],
plt_style, label=labels)
else:
plt.plot([verts[i][0], verts[i - 1][0]], [verts[i][1], verts[i - 1][1]], plt_style)
plt.grid()
class SiteInfo:
def __init__(self, data, solar_resource_file="", wind_resource_file="", grid_resource_file=""):
set_nrel_key_dot_env()
self.data = data
self.vertices = np.array([np.array(v) for v in data['site_boundaries']['verts']])
self.polygon: Polygon = Polygon(self.vertices)
self.valid_region = self.polygon.buffer(1e-8)
if 'lat' not in data or 'lon' not in data:
raise ValueError("SiteInfo requires lat and lon")
self.lat = data['lat']
self.lon = data['lon']
if 'year' not in data:
data['year'] = 2012
self.solar_resource = SolarResource(data['lat'], data['lon'], data['year'], filepath=solar_resource_file)
# TODO: allow hub height to be used as an optimization variable
self.wind_resource = WindResource(data['lat'], data['lon'], data['year'], wind_turbine_hub_ht=80,
filepath=wind_resource_file)
self.elec_prices = ElectricityPrices(data['lat'], data['lon'], data['year'], filepath=grid_resource_file)
self.n_timesteps = len(self.solar_resource.data['gh']) // 8760 * 8760
self.n_periods_per_day = self.n_timesteps // 365 # TODO: Does not handle leap years well
self.interval = (60*24)/self.n_periods_per_day
self.urdb_label = data['urdb_label'] if 'urdb_label' in data.keys() else None
logger.info("Set up SiteInfo with solar and wind resource files: {}, {}".format(self.solar_resource.filename,
self.wind_resource.filename))
@property
def boundary(self) -> BaseGeometry:
# TODO: remove boundaries of interior holes
# return self.polygon.boundary.difference(self.polygon.interiors)
return self.polygon.exterior
@property
def bounding_box(self) -> np.ndarray:
return np.array([np.min(self.vertices, 0), np.max(self.vertices, 0)])
@property
def center(self) -> Point:
bounding_box = self.bounding_box
return (bounding_box[1] - bounding_box[0]) * .5
def plot(self,
figure=None,
axes=None,
border_color=(0, 0, 0),
alpha=0.95,
linewidth=4.0
):
bounds = self.polygon.bounds
site_sw_bound = np.array([bounds[0], bounds[1]])
site_ne_bound = np.array([bounds[2], bounds[3]])
site_center = .5 * (site_sw_bound + site_ne_bound)
max_delta = max(bounds[2] - bounds[0], bounds[3] - bounds[1])
reach = (max_delta / 2) * 1.3
min_plot_bound = site_center - reach
max_plot_bound = site_center + reach
if not figure and not axes:
figure = plt.figure(1)
axes = figure.add_subplot(111)
axes.set_aspect('equal')
axes.set(xlim=(min_plot_bound[0], max_plot_bound[0]), ylim=(min_plot_bound[1], max_plot_bound[1]))
plot_shape(figure, axes, self.polygon, '--', color=border_color, alpha=alpha, linewidth=linewidth / 2)
plt.tick_params(which='both', labelsize=15)
plt.xlabel('x (m)', fontsize=15)
plt.ylabel('y (m)', fontsize=15)
return figure, axes
```
#### File: tests/hybrid/test_hybrid.py
```python
from pytest import approx, fixture
from hybrid.sites import SiteInfo, flatirons_site
from hybrid.layout.hybrid_layout import WindBoundaryGridParameters, PVGridParameters
from hybrid.hybrid_simulation import HybridSimulation
@fixture
def site():
return SiteInfo(flatirons_site)
interconnection_size_kw = 15000
pv_kw = 5000
wind_kw = 10000
batt_kw = 5000
technologies = {'pv': {
'system_capacity_kw': pv_kw,
'layout_params': PVGridParameters(x_position=0.5,
y_position=0.5,
aspect_power=0,
gcr=0.5,
s_buffer=2,
x_buffer=2)
},
'wind': {
'num_turbines': 5,
'turbine_rating_kw': wind_kw / 5,
'layout_mode': 'boundarygrid',
'layout_params': WindBoundaryGridParameters(border_spacing=2,
border_offset=0.5,
grid_angle=0.5,
grid_aspect_power=0.5,
row_phase_offset=0.5)
},
'battery': {
'system_capacity_kwh': batt_kw * 4,
'system_capacity_kw': 5000
}}
def test_hybrid_wind_only(site):
wind_only = {'wind': technologies['wind']}
hybrid_plant = HybridSimulation(wind_only, site, interconnect_kw=interconnection_size_kw)
hybrid_plant.layout.plot()
hybrid_plant.ppa_price = (0.01, )
hybrid_plant.simulate(25)
aeps = hybrid_plant.annual_energies
npvs = hybrid_plant.net_present_values
assert aeps.pv == 0
assert aeps.wind == approx(33615479, 1e3)
assert aeps.hybrid == approx(33615479, 1e3)
assert npvs.pv == 0
assert npvs.wind == approx(-13692784, 1e3)
assert npvs.hybrid == approx(-13692784, 1e3)
def test_hybrid_pv_only(site):
solar_only = {'pv': technologies['pv']}
hybrid_plant = HybridSimulation(solar_only, site, interconnect_kw=interconnection_size_kw)
hybrid_plant.layout.plot()
hybrid_plant.ppa_price = (0.01, )
hybrid_plant.pv.dc_degradation = [0] * 25
hybrid_plant.simulate()
aeps = hybrid_plant.annual_energies
npvs = hybrid_plant.net_present_values
assert aeps.pv == approx(9884106.55, 1e-3)
assert aeps.wind == 0
assert aeps.hybrid == approx(9884106.55, 1e-3)
assert npvs.pv == approx(-5121293, 1e3)
assert npvs.wind == 0
assert npvs.hybrid == approx(-5121293, 1e3)
def test_hybrid(site):
"""
Performance from Wind is slightly different from wind-only case because the solar presence modified the wind layout
"""
solar_wind_hybrid = {key: technologies[key] for key in ('pv', 'wind')}
hybrid_plant = HybridSimulation(solar_wind_hybrid, site, interconnect_kw=interconnection_size_kw)
hybrid_plant.layout.plot()
hybrid_plant.ppa_price = (0.01, )
hybrid_plant.pv.dc_degradation = [0] * 25
hybrid_plant.simulate()
# plt.show()
aeps = hybrid_plant.annual_energies
npvs = hybrid_plant.net_present_values
assert aeps.pv == approx(8703525.94, 13)
assert aeps.wind == approx(33615479.57, 1e3)
assert aeps.hybrid == approx(41681662.63, 1e3)
assert npvs.pv == approx(-5121293, 1e3)
assert npvs.wind == approx(-13909363, 1e3)
assert npvs.hybrid == approx(-19216589, 1e3)
def test_hybrid_with_storage_dispatch(site):
hybrid_plant = HybridSimulation(technologies, site, interconnect_kw=interconnection_size_kw)
hybrid_plant.ppa_price = (0.03, )
hybrid_plant.pv.dc_degradation = [0] * 25
hybrid_plant.simulate()
aeps = hybrid_plant.annual_energies
assert aeps.pv == approx(9883471, 1e-3)
assert aeps.wind == approx(33637983, 1e-3)
assert aeps.battery == approx(-131771, 1e-3)
assert aeps.hybrid == approx(43389683, 1e-3)
npvs = hybrid_plant.net_present_values
assert npvs.pv == approx(-1293490, 1e-3)
assert npvs.wind == approx(-3967472, 1e-3)
assert npvs.battery == approx(-11836115, 1e-3)
assert npvs.hybrid == approx(-17136650, 1e-3)
taxes = hybrid_plant.federal_taxes
assert taxes.pv[1] == approx(105716, 1e-3)
assert taxes.wind[1] == approx(402703, 1e-3)
assert taxes.battery[1] == approx(512012, 1e-3)
assert taxes.hybrid[1] == approx(1022906, 1e-3)
apv = hybrid_plant.energy_purchases_values
assert apv.pv[1] == approx(0, 1e-3)
assert apv.wind[1] == approx(0, 1e-3)
assert apv.battery[1] == approx(158296, 1e-3)
assert apv.hybrid[1] == approx(38438, 1e-2)
debt = hybrid_plant.debt_payment
assert debt.pv[1] == approx(0, 1e-3)
assert debt.wind[1] == approx(0, 1e-3)
assert debt.battery[1] == approx(0, 1e-3)
assert debt.hybrid[1] == approx(0, 1e-3)
esv = hybrid_plant.energy_sales_values
assert esv.pv[1] == approx(296504, 1e3)
assert esv.wind[1] == approx(1009139, 1e3)
assert esv.battery[1] == approx(167015, 1e3)
assert esv.hybrid[1] == approx(1340129, 1e3)
depr = hybrid_plant.federal_depreciation_totals
assert depr.pv[1] == approx(762811, 1e3)
assert depr.wind[1] == approx(2651114, 1e3)
assert depr.battery[1] == approx(2555389, 1e3)
assert depr.hybrid[1] == approx(5969315, 1e3)
insr = hybrid_plant.insurance_expenses
assert insr.pv[0] == approx(0, 1e3)
assert insr.wind[0] == approx(0, 1e3)
assert insr.battery[0] == approx(0, 1e3)
assert insr.hybrid[0] == approx(0, 1e3)
om = hybrid_plant.om_total_expenses
assert om.pv[1] == approx(74993, 1e3)
assert om.wind[1] == approx(420000, 1e3)
assert om.battery[1] == approx(75000, 1e3)
assert om.hybrid[1] == approx(569993, 1e3)
rev = hybrid_plant.total_revenues
assert rev.pv[1] == approx(296504, 1e3)
assert rev.wind[1] == approx(1009139, 1e3)
assert rev.battery[1] == approx(167015, 1e3)
assert rev.hybrid[1] == approx(1340129, 1e3)
tc = hybrid_plant.tax_incentives
assert tc.pv[1] == approx(1123104, 1e3)
assert tc.wind[1] == approx(504569, 1e3)
assert tc.battery[1] == approx(0, 1e3)
assert tc.hybrid[1] == approx(1659156, 1e3)
def test_hybrid_om_costs_error(site):
hybrid_plant = HybridSimulation(technologies, site, interconnect_kw=interconnection_size_kw,
dispatch_options={'battery_dispatch': 'one_cycle_heuristic'})
hybrid_plant.ppa_price = (0.03, )
hybrid_plant.pv.dc_degradation = [0] * 25
hybrid_plant.battery._financial_model.SystemCosts.om_production = (1,)
try:
hybrid_plant.simulate()
except ValueError as e:
assert e
def test_hybrid_om_costs(site):
hybrid_plant = HybridSimulation(technologies, site, interconnect_kw=interconnection_size_kw,
dispatch_options={'battery_dispatch': 'one_cycle_heuristic'})
hybrid_plant.ppa_price = (0.03, )
hybrid_plant.pv.dc_degradation = [0] * 25
# set all O&M costs to 0 to start
hybrid_plant.wind.om_fixed = 0
hybrid_plant.wind.om_capacity = 0
hybrid_plant.wind.om_variable = 0
hybrid_plant.pv.om_fixed = 0
hybrid_plant.pv.om_capacity = 0
hybrid_plant.pv.om_variable = 0
hybrid_plant.battery.om_fixed = 0
hybrid_plant.battery.om_capacity = 0
hybrid_plant.battery.om_variable = 0
# test variable costs
hybrid_plant.wind.om_variable = 5
hybrid_plant.pv.om_variable = 2
hybrid_plant.battery.om_variable = 3
hybrid_plant.simulate()
var_om_costs = hybrid_plant.om_variable_expenses
total_om_costs = hybrid_plant.om_total_expenses
for i in range(len(var_om_costs.hybrid)):
assert var_om_costs.pv[i] + var_om_costs.wind[i] + var_om_costs.battery[i] \
== approx(var_om_costs.hybrid[i], rel=1e-3)
assert total_om_costs.pv[i] == approx(var_om_costs.pv[i])
assert total_om_costs.wind[i] == approx(var_om_costs.wind[i])
assert total_om_costs.battery[i] == approx(var_om_costs.battery[i])
assert total_om_costs.hybrid[i] == approx(var_om_costs.hybrid[i])
hybrid_plant.wind.om_variable = 0
hybrid_plant.pv.om_variable = 0
hybrid_plant.battery.om_variable = 0
# test fixed costs
hybrid_plant.wind.om_fixed = 5
hybrid_plant.pv.om_fixed = 2
hybrid_plant.battery.om_fixed = 3
hybrid_plant.simulate()
fixed_om_costs = hybrid_plant.om_fixed_expenses
total_om_costs = hybrid_plant.om_total_expenses
for i in range(len(fixed_om_costs.hybrid)):
assert fixed_om_costs.pv[i] + fixed_om_costs.wind[i] + fixed_om_costs.battery[i] \
== approx(fixed_om_costs.hybrid[i])
assert total_om_costs.pv[i] == approx(fixed_om_costs.pv[i])
assert total_om_costs.wind[i] == approx(fixed_om_costs.wind[i])
assert total_om_costs.battery[i] == approx(fixed_om_costs.battery[i])
assert total_om_costs.hybrid[i] == approx(fixed_om_costs.hybrid[i])
hybrid_plant.wind.om_fixed = 0
hybrid_plant.pv.om_fixed = 0
hybrid_plant.battery.om_fixed = 0
# test capacity costs
hybrid_plant.wind.om_capacity = 5
hybrid_plant.pv.om_capacity = 2
hybrid_plant.battery.om_capacity = 3
hybrid_plant.simulate()
cap_om_costs = hybrid_plant.om_capacity_expenses
total_om_costs = hybrid_plant.om_total_expenses
for i in range(len(cap_om_costs.hybrid)):
assert cap_om_costs.pv[i] + cap_om_costs.wind[i] + cap_om_costs.battery[i] \
== approx(cap_om_costs.hybrid[i])
assert total_om_costs.pv[i] == approx(cap_om_costs.pv[i])
assert total_om_costs.wind[i] == approx(cap_om_costs.wind[i])
assert total_om_costs.battery[i] == approx(cap_om_costs.battery[i])
assert total_om_costs.hybrid[i] == approx(cap_om_costs.hybrid[i])
hybrid_plant.wind.om_capacity = 0
hybrid_plant.pv.om_capacity = 0
hybrid_plant.battery.om_capacity = 0
def test_hybrid_tax_incentives(site):
hybrid_plant = HybridSimulation(technologies, site, interconnect_kw=interconnection_size_kw,
dispatch_options={'battery_dispatch': 'one_cycle_heuristic'})
hybrid_plant.ppa_price = (0.03, )
hybrid_plant.pv.dc_degradation = [0] * 25
hybrid_plant.wind._financial_model.TaxCreditIncentives.ptc_fed_amount = (1,)
hybrid_plant.pv._financial_model.TaxCreditIncentives.ptc_fed_amount = (2,)
hybrid_plant.battery._financial_model.TaxCreditIncentives.ptc_fed_amount = (3,)
hybrid_plant.wind._financial_model.TaxCreditIncentives.ptc_fed_escal = 0
hybrid_plant.pv._financial_model.TaxCreditIncentives.ptc_fed_escal = 0
hybrid_plant.battery._financial_model.TaxCreditIncentives.ptc_fed_escal = 0
hybrid_plant.simulate()
ptc_wind = hybrid_plant.wind._financial_model.value("cf_ptc_fed")[1]
assert ptc_wind == hybrid_plant.wind._financial_model.value("ptc_fed_amount")[0]*hybrid_plant.wind.annual_energy_kw
ptc_pv = hybrid_plant.pv._financial_model.value("cf_ptc_fed")[1]
assert ptc_pv == hybrid_plant.pv._financial_model.value("ptc_fed_amount")[0]*hybrid_plant.pv.annual_energy_kw
ptc_batt = hybrid_plant.battery._financial_model.value("cf_ptc_fed")[1]
assert ptc_batt == hybrid_plant.battery._financial_model.value("ptc_fed_amount")[0]\
* hybrid_plant.battery._financial_model.LCOS.batt_annual_discharge_energy[1]
ptc_hybrid = hybrid_plant.grid._financial_model.value("cf_ptc_fed")[1]
ptc_fed_amount = hybrid_plant.grid._financial_model.value("ptc_fed_amount")[0]
assert ptc_fed_amount == approx(1.22941)
assert ptc_hybrid == approx(ptc_fed_amount * hybrid_plant.grid._financial_model.Outputs.cf_energy_net[1], rel=1e-3)
```
#### File: tests/hybrid/test_utility_rate.py
```python
from dotenv import load_dotenv
import os
import shutil
from hybrid.utility_rate import UtilityRate
from hybrid.keys import set_developer_nrel_gov_key
path = os.path.dirname(os.path.abspath(__file__))
load_dotenv()
set_developer_nrel_gov_key(os.getenv("NREL_API_KEY"))
def test_urdb_response():
path_rates = os.path.join(path, 'data')
os.mkdir(path_rates)
# these rates sometimes mysteriously disappear from URDB fyi
urdb_label = "5ca4d1175457a39b23b3d45e" # https://openei.org/apps/IURDB/rate/view/5ca4d1175457a39b23b3d45e
urdb = UtilityRate(path_rates=path_rates, urdb_label=urdb_label)
resp = urdb.get_urdb_response()
assert('label' in resp)
shutil.rmtree(path_rates)
```
#### File: tests/hybrid/test_wind.py
```python
import pytest
import math
import PySAM.Windpower as windpower
from hybrid.sites import SiteInfo, flatirons_site
from hybrid.wind_source import WindPlant
wind_default_elevation = 0
wind_default_rated_output = 2000
wind_default_max_cp = 0.45
wind_default_max_tip_speed = 80
wind_default_max_tip_speed_ratio = 8
wind_default_cut_in_speed = 4
wind_default_cut_out_speed = 25
wind_default_drive_train = 0
powercurveKW = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 56.9014, 72.8929, 90.7638, 110.618, 132.561, 156.696,
183.129, 211.962, 243.302, 277.251, 313.915, 353.398, 395.805, 441.239, 489.805, 541.608, 596.752,
655.341, 717.481, 783.274, 852.826, 926.241, 1003.62, 1088.85, 1174.66, 1260.47, 1346.28, 1432.09,
1517.9, 1603.71, 1689.53, 1775.34, 1861.15, 1946.96, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000,
2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000,
2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000, 2000,
2000, 2000, 2000, 2000, 2000, 2000, 2000, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0)
powercurveWS = (0, 0.25, 0.5, 0.75, 1, 1.25, 1.5, 1.75, 2, 2.25, 2.5, 2.75, 3, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, 5,
5.25, 5.5, 5.75, 6, 6.25, 6.5, 6.75, 7, 7.25, 7.5, 7.75, 8, 8.25, 8.5, 8.75, 9, 9.25, 9.5, 9.75, 10,
10.25, 10.5, 10.75, 11, 11.25, 11.5, 11.75, 12, 12.25, 12.5, 12.75, 13, 13.25, 13.5, 13.75, 14, 14.25,
14.5, 14.75, 15, 15.25, 15.5, 15.75, 16, 16.25, 16.5, 16.75, 17, 17.25, 17.5, 17.75, 18, 18.25, 18.5,
18.75, 19, 19.25, 19.5, 19.75, 20, 20.25, 20.5, 20.75, 21, 21.25, 21.5, 21.75, 22, 22.25, 22.5, 22.75,
23, 23.25, 23.5, 23.75, 24, 24.25, 24.5, 24.75, 25, 25.25, 25.5, 25.75, 26, 26.25, 26.5, 26.75, 27,
27.25, 27.5, 27.75, 28, 28.25, 28.5, 28.75, 29, 29.25, 29.5, 29.75, 30, 30.25, 30.5, 30.75, 31, 31.25,
31.5, 31.75, 32, 32.25, 32.5, 32.75, 33, 33.25, 33.5, 33.75, 34, 34.25, 34.5, 34.75, 35, 35.25, 35.5,
35.75, 36, 36.25, 36.5, 36.75, 37, 37.25, 37.5, 37.75, 38, 38.25, 38.5, 38.75, 39, 39.25, 39.5, 39.75, 40)
def test_wind_powercurve():
model = windpower.default("WindpowerSingleowner")
model.Turbine.wind_turbine_rotor_diameter = 75
# calculate system capacity. To evaluate other turbines, update the defaults dictionary
model.Turbine.calculate_powercurve(wind_default_rated_output,
int(model.Turbine.wind_turbine_rotor_diameter),
wind_default_elevation,
wind_default_max_cp,
wind_default_max_tip_speed,
wind_default_max_tip_speed_ratio,
wind_default_cut_in_speed,
wind_default_cut_out_speed,
wind_default_drive_train)
windspeeds_truth = [round(x, 2) for x in powercurveWS]
windspeeds_calc = [round(x, 2) for x in model.Turbine.wind_turbine_powercurve_windspeeds]
powercurve_truth = [round(x, 0) for x in powercurveKW]
powercurve_calc = [round(x, 0) for x in model.Turbine.wind_turbine_powercurve_powerout]
assert all([a == b for a, b in zip(windspeeds_truth, windspeeds_calc)])
assert all([a == b for a, b in zip(powercurve_truth, powercurve_calc)])
def test_changing_n_turbines():
# test with gridded layout
model = WindPlant(SiteInfo(flatirons_site), {'num_turbines': 10, "turbine_rating_kw": 2000})
assert(model.system_capacity_kw == 20000)
for n in range(1, 20):
model.num_turbines = n
assert model.num_turbines == n, "n turbs should be " + str(n)
assert model.system_capacity_kw == pytest.approx(20000, 1), "system capacity different when n turbs " + str(n)
# test with row layout
def test_changing_rotor_diam_recalc():
model = WindPlant(SiteInfo(flatirons_site), {'num_turbines': 10, "turbine_rating_kw": 2000})
assert model.system_capacity_kw == 20000
diams = range(50, 70, 140)
for d in diams:
model.rotor_diameter = d
assert model.rotor_diameter == d, "rotor diameter should be " + str(d)
assert model.turb_rating == 2000, "new rating different when rotor diameter is " + str(d)
def test_changing_turbine_rating():
# powercurve scaling
model = WindPlant(SiteInfo(flatirons_site), {'num_turbines': 24, "turbine_rating_kw": 2000})
n_turbs = model.num_turbines
for n in range(1000, 3000, 150):
model.turb_rating = n
assert model.system_capacity_kw == model.turb_rating * n_turbs, "system size error when rating is " + str(n)
def test_changing_powercurve():
# with power curve recalculation requires diameter changes
model = WindPlant(SiteInfo(flatirons_site), {'num_turbines': 24, "turbine_rating_kw": 2000})
n_turbs = model.num_turbines
d_to_r = model.rotor_diameter / model.turb_rating
for n in range(1000, 3001, 500):
d = math.ceil(n * d_to_r * 1)
model.modify_powercurve(d, n)
assert model.turb_rating == pytest.approx(n, 0.1), "turbine rating should be " + str(n)
assert model.system_capacity_kw == pytest.approx(model.turb_rating * n_turbs, 0.1), "size error when rating is " + str(n)
def test_changing_system_capacity():
# adjust number of turbines, system capacity won't be exactly as requested
model = WindPlant(SiteInfo(flatirons_site), {'num_turbines': 20, "turbine_rating_kw": 1000})
rating = model.turb_rating
for n in range(1000, 20000, 1000):
model.system_capacity_by_num_turbines(n)
assert model.turb_rating == rating, str(n)
assert model.system_capacity_kw == rating * round(n/rating)
# adjust turbine rating first, system capacity will be exact
model = WindPlant(SiteInfo(flatirons_site), {'num_turbines': 20, "turbine_rating_kw": 1000})
for n in range(40000, 60000, 1000):
model.system_capacity_by_rating(n)
assert model.system_capacity_kw == pytest.approx(n)
```
#### File: analysis/bos/cost_calculator.py
```python
from .bos_model import BOSCostPerMW, BOSCalculator
from .bos_lookup import BOSLookup
# from .hybrid_bosse import HybridBOSSE
from hybrid.log import bos_logger as logger
import numpy as np
class CostCalculator():
"""
CostCalculator class contains tools to determine BOS component costs and Installed costs for a single technology or hybrid plant
"""
def __init__(self,
bos_cost_source,
scenario,
interconnection_size,
wind_installed_cost_mw,
pv_installed_cost_mw,
storage_installed_cost_mw,
storage_installed_cost_mwh,
wind_bos_cost_mw=0,
pv_bos_cost_mw=0,
storage_bos_cost_mw=0,
storage_bos_cost_mwh=0,
modify_costs=False,
cost_reductions=[]):
"""
:param bos_cost_source: Defines the type of bos analysis used. Options are 'JSONLookup', 'Cost/MW',
'HybridBOSSE', 'HybridBOSSE manual'
:param scenario: 'greenfield' or 'solar addition'
:param interconnection_size: Size (MW) of interconnection
:param wind_installed_cost_mw: $USD cost/mw for installed wind
:param pv_installed_cost_mw: $USD cost/mw for installed solar
:param storage_installed_cost_mw: $USD cost/mw for installed storage
:param storage_installed_cost_mwh: $USD cost/mwh for installed storage
:param wind_bos_cost_mw: $USD cost/mw for for wind BOS
:param pv_bos_cost_mw: $USD cost/mw for for solar BOS
:param storage_bos_cost_mw: $USD cost/mw for for storage BOS
:param storage_bos_cost_mwh: $USD cost/mw for for storage BOS
:param modify_costs: (boolean) Flag to determine whether returned costs will be modified using supplied
modifiers
:param cost_reductions: Dictionary specifiying CAPEX reduction fraction
"""
self.descriptor = 'BOS function'
if scenario == 'greenfield':
self.scenario = scenario
elif scenario == 'solar addition':
raise NotImplementedError
else:
raise ValueError("CostCalculator scenario must be 'greenfield' or 'solar addition'")
self.interconnection_size = interconnection_size
self.model = BOSCalculator()
if bos_cost_source.lower() == "boslookup":
self.model = BOSLookup()
elif bos_cost_source.lower() == "costpermw":
self.model = BOSCostPerMW()
elif bos_cost_source.lower() == "hybridbosse":
raise NotImplementedError
self.bos_cost_source = bos_cost_source
self.wind_installed_cost_mw = wind_installed_cost_mw
self.pv_installed_cost_mw = pv_installed_cost_mw
self.storage_installed_cost_mw = storage_installed_cost_mw
self.storage_installed_cost_mwh = storage_installed_cost_mwh
self.wind_bos_cost_mw = wind_bos_cost_mw
self.pv_bos_cost_mw = pv_bos_cost_mw
self.storage_bos_cost_mw = storage_bos_cost_mw
self.storage_bos_cost_mwh = storage_bos_cost_mwh
self.modify_costs = modify_costs
self.cost_reductions = cost_reductions
def calculate_installed_costs(self, wind_size, pv_size, storage_size_mw=0., storage_size_mwh=0.):
"""
Calculates installed costs for wind, solar, and hybrid based on installed cost/mw and size of plant
:return: installed cost of wind, solar and hybrid components of plant
"""
total_installed_cost = 0.
wind_installed_cost = self.wind_installed_cost_mw * wind_size
solar_installed_cost = self.pv_installed_cost_mw * pv_size
storage_installed_cost = (self.storage_installed_cost_mw * storage_size_mw) + \
(self.storage_installed_cost_mwh * storage_size_mwh)
total_installed_cost += wind_installed_cost
total_installed_cost += solar_installed_cost
total_installed_cost += storage_installed_cost
return wind_installed_cost, solar_installed_cost, storage_installed_cost, total_installed_cost
def calculate_total_costs(self, wind_mw, pv_mw, storage_mw=0., storage_mwh=0.):
"""
Calculates total installed cost of plant (BOS Cost + Installed Cost).
Modifies the capex or opex costs as specified in cost_reductions if modify_costs is True
:return: Total installed cost of plant (BOS Cost + Installed Cost)
"""
wind_installed_cost, solar_installed_cost, storage_installed_cost, total_installed_cost = \
self.calculate_installed_costs(wind_mw, pv_mw, storage_mw, storage_mwh)
if self.bos_cost_source.lower() == 'costpermw':
wind_bos_cost, solar_bos_cost, storage_bos_cost, total_bos_cost, _ = \
self.model.calculate_bos_costs(wind_mw, pv_mw, storage_mw, storage_mwh, self.wind_bos_cost_mw,
self.pv_bos_cost_mw, self.storage_bos_cost_mw, self.storage_bos_cost_mwh,
self.interconnection_size, self.scenario)
else:
wind_bos_cost, solar_bos_cost, total_bos_cost, min_distance = \
self.model.calculate_bos_costs(wind_mw, pv_mw, self.interconnection_size)
storage_bos_cost = 0.
total_wind_cost = wind_installed_cost + wind_bos_cost
total_solar_cost = solar_installed_cost + solar_bos_cost
total_storage_cost = storage_installed_cost + storage_bos_cost
total_project_cost = total_installed_cost + total_bos_cost
if self.modify_costs:
logger.info('Modifying costs using selected multipliers')
logger.info("Total Project Cost Before Modifiers: {}".format(total_project_cost))
if wind_mw > 0 and pv_mw > 0:
total_project_cost = ((1 - self.cost_reductions['solar_capex_reduction_hybrid']) *
solar_installed_cost) + \
((1 - self.cost_reductions[
'solar_bos_reduction_hybrid']) * solar_bos_cost) + \
((1 - self.cost_reductions['wind_capex_reduction_hybrid']) *
wind_installed_cost) + \
((1 - self.cost_reductions[
'wind_bos_reduction_hybrid']) * wind_bos_cost)
elif pv_mw > 0:
total_project_cost = ((1 - self.cost_reductions['solar_capex_reduction']) *
solar_installed_cost) + \
((1 - self.cost_reductions['solar_bos_reduction']) * solar_bos_cost)
elif wind_mw > 0:
total_project_cost = ((1 - self.cost_reductions['wind_capex_reduction']) *
wind_installed_cost) + \
((1 - self.cost_reductions['wind_bos_reduction']) * wind_bos_cost)
logger.info("Total Project Cost After Modifiers: {}".format(total_project_cost))
# else:
# logger.info('Not modifying costs')
# Not modifying wind or solar costs
logger.info("Total Project Cost (Installed Cost + BOS Cost): {}".format(total_project_cost))
return total_solar_cost, total_wind_cost, total_storage_cost, total_project_cost
def create_cost_calculator(interconnection_mw: float,
bos_cost_source: str = "CostPerMW",
scenario: str = "greenfield",
atb_costs: bool = False,
atb_year: float = 2020,
atb_scenario: str = "Moderate",
wind_installed_cost_mw: float = 1454000,
solar_installed_cost_mw: float = 960000,
storage_installed_cost_mw: float = 1203000,
storage_installed_cost_mwh: float = 400000,
wind_bos_cost_mw: float = 0,
solar_bos_cost_mw: float = 0,
storage_bos_cost_mw: float = 0,
storage_bos_cost_mwh: float = 0,
modify_costs: bool = False,
cost_reductions=dict()) -> CostCalculator:
if modify_costs:
cost_reductions['solar_capex_reduction'] = 0
cost_reductions['wind_capex_reduction'] = 0
cost_reductions['wind_bos_reduction'] = 0
cost_reductions['solar_bos_reduction'] = 0
cost_reductions['wind_capex_reduction_hybrid'] = 0.1
cost_reductions['solar_capex_reduction_hybrid'] = 0.1
cost_reductions['wind_bos_reduction_hybrid'] = 0.1
cost_reductions['solar_bos_reduction_hybrid'] = 0.1
if atb_costs:
from .atb_lookup import ATBLookup
atblookup = ATBLookup()
wind_installed_cost_mw, solar_installed_cost_mw, storage_installed_cost_mw, storage_installed_cost_mwh = \
atblookup.calculate_atb_costs(atb_year, atb_scenario)
return CostCalculator(bos_cost_source, scenario, interconnection_mw, wind_installed_cost_mw,
solar_installed_cost_mw, storage_installed_cost_mw, storage_installed_cost_mwh,
wind_bos_cost_mw, solar_bos_cost_mw, storage_bos_cost_mw, storage_bos_cost_mwh,
modify_costs, cost_reductions)
```
#### File: optimization/data_logging/a_data_recorder.py
```python
from abc import abstractmethod
class ADataRecorder:
"""
Abstract class defining an interface for accumulating data from an experimental run in a tabular format and
possibly writing that data out to disk. Data is accumulated in a tabular format, and is expected to always match
the columns defined.
"""
@abstractmethod
def add_columns(self, *column_names) -> None:
"""
Adds columns to the schema. Add columns in the same order you will record them in.
"""
pass
@abstractmethod
def set_schema(self) -> None:
"""
Call this after all columns have been defined via add_columns().
Schema changes can only happen before this point.
Data can only be accumulated after this point.
"""
pass
@abstractmethod
def accumulate(self, *data, **kwdata) -> None:
"""
Accumulates data into the recorder.
Data must be either accumulated in the same order as defined with add_columns() or as keywords using kwdata.
Don't mix these two approaches or you will get undefined behavior.
:return:
"""
pass
@abstractmethod
def store(self) -> None:
"""
Closes the accumulated record, adds it to self.records and logs it to the logger
"""
pass
@abstractmethod
def is_setup(self) -> bool:
"""
:return: true if set_schema() has been called
"""
pass
@abstractmethod
def get_column(self, name) -> []:
"""
gets a column from the recorded data
:param name: column name
:return: iterable column
"""
pass
@abstractmethod
def get_record(self, index) -> []:
"""
:param index:
:return: record at given index in the recorded data.
"""
pass
@abstractmethod
def get_records(self) -> []:
"""
:return: all records
"""
pass
@abstractmethod
def get_column_map(self) -> {}:
pass
@abstractmethod
def close(self) -> None:
"""
Must be called to dispose of an instance
"""
pass
```
#### File: optimizer/dimension/dimension_info.py
```python
from abc import abstractmethod
from typing import Union
class DimensionInfo:
"""
Interface for probability distribution functions
"""
@abstractmethod
def update(self, samples: [any]) -> None:
"""
Update parameters of the pdf with new samples
:param samples: list of best candidates from optimizer
"""
pass
@abstractmethod
def sample(self) -> Union[float, int]:
"""
:return: Sample from the pdf
"""
pass
@abstractmethod
def best(self) -> Union[float, int]:
"""
:return: Most likely sample
"""
pass
@abstractmethod
def mean(self) -> Union[float, int]:
pass
@abstractmethod
def variance(self) -> Union[float, int]:
pass
```
#### File: optimization/optimizer/IPDCEM.py
```python
import random
from typing import (
Optional,
Tuple,
)
import numpy as np
# import shapely
from .DCEM_optimizer import DCEMOptimizer
# sys.path.append('../examples/flatirons')
# import func_tools
# matplotlib.use('tkagg')
class IPDCEM(DCEMOptimizer):
"""
A prototype implementation of an incremental decomposed cross-entropy method.
"""
def __init__(self, generation_size: int, selection_size: int, scale: float,
**kwargs
) -> None:
super().__init__(generation_size, 1.0, **kwargs)
self._selection_size: int = selection_size
self._scale: float = scale
self._population: [Tuple[float, any]] = []
def ask(self, num: Optional[int] = None) -> [any]:
"""
:param num: the number of search points to return. If undefined, the optimizer will choose how many to return.
:return: a list of search points generated by the optimizer
"""
if len(self._population) == 0:
return super().ask(num)
if num is None:
num = self._generation_size
population = []
for _ in range(num):
base = self._population[random.randrange(len(self._population))][1]
# candidate = [0.0] * len(self.dimensions)
candidate = np.empty(self.get_num_dimensions())
for i, dimension in enumerate(self._dimensions):
candidate[i] = base[i] + (dimension.sample() - dimension.best_solution()) * self._scale
population.append(candidate)
return population
def tell(self, evaluations: [Tuple[float, any]]) -> None:
"""
Updates the optimizer with the objective evaluations of a list of search points
:param evaluations: a list of tuples of (evaluation, search point)
"""
print('eval: ', [sample[0] for sample in evaluations])
self._population.extend(evaluations)
self._population.sort(key=lambda evaluation: evaluation[0], reverse=True)
# self.population = self.population[0:self.selection_size]
del self._population[self._selection_size:]
print('pop: ', [sample[0] for sample in self._population])
for i, dimension in enumerate(self._dimensions):
dimension.update([evaluation[1][i] for evaluation in self._population])
def best_solution(self) -> (Optional[float], any):
"""
:return: the current best solution and (estimated) score
"""
return self._population[0] if len(self._population) > 0 else super().best_solution()
```
#### File: optimization/optimizer/KFDCEM.py
```python
import abc
import math
import random
from abc import ABC
from typing import (
List,
Optional,
Tuple,
Union,
)
# matplotlib.use('tkagg')
import numpy as np
# sys.path.append('../examples/flatirons')
# import func_tools
from ..data_logging.data_recorder import DataRecorder
from .ask_tell_optimizer import AskTellOptimizer
class KFDCEM(AskTellOptimizer, ABC):
"""
A prototype implementation of a Kalman-filter based decomposed cross-entropy method.
"""
class Dimension:
@abc.abstractmethod
def update(self, samples: [float]) -> None:
pass
@abc.abstractmethod
def step(self):
pass
@abc.abstractmethod
def sample(self) -> Union[float, int]:
pass
@abc.abstractmethod
def best(self) -> Union[float, int]:
pass
class KFDimension(Dimension):
"""
Kalman Filter model dynamics:
x' = ax + by + w
z = hx + v
here, a = 1, b = 0, h = 1 so:
x' = x + w
z = x + v
"""
def __init__(self,
mu: float,
sigma: float,
sensor_noise: float,
dynamic_noise: float):
self.mu: float = mu
self.variance: float = sigma * sigma
self.sensor_noise: float = sensor_noise
self.dynamic_noise: float = dynamic_noise
def step(self):
"""
x' = a*x + b*u
p' = a^2 * p + Q
In this case:
a = 1, b = 0
"""
# print('kf step ', self.variance, self.dynamic_noise, self.variance + self.dynamic_noise)
self.variance = self.variance + self.dynamic_noise
def update(self, samples: [float]) -> None:
"""
kalman gain: k = h*p / (p * h^2 + R)
mean update: x' = x + k*(z - h*x)
variance update: p' = p*(1 - h*k)
h = 1 here, so:
kalman gain: k = p / (p + R)
mean update: x' = x + k*(z - x)
variance update: p' = p*(1 - k)
"""
if len(samples) > 1:
sample_standard_deviation = np.std(samples, 0, ddof=1)
else:
sample_standard_deviation = 0.0
sensor_noise = self.sensor_noise + sample_standard_deviation * sample_standard_deviation
sample_mean = np.mean(samples, 0)
kalman_gain = self.variance / (self.variance + sensor_noise)
innovation = sample_mean - self.mu
print('kfu: ', sample_standard_deviation, sensor_noise, innovation, self.variance,
self.variance * (1 - kalman_gain))
self.mu = self.mu + kalman_gain * innovation
self.variance = self.variance * (1 - kalman_gain)
def sample(self) -> float:
return random.gauss(self.mu, math.sqrt(self.variance))
def best(self) -> Union[float, int]:
return self.mu
class KFParameterDimension(Dimension):
def __init__(self,
mu: 'KFDCEM.KFDimension',
sigma: 'KFDCEM.KFDimension'):
self.mu = mu
self.sigma = sigma
def step(self):
self.mu.step()
self.sigma.step()
def update(self, samples: [float]) -> None:
sample_mean = np.mean(samples, 0)
sample_standard_deviation = np.std(samples, 0, ddof=1)
self.mu.update([sample_mean])
self.sigma.update([sample_standard_deviation])
def sample(self) -> float:
return random.gauss(self.mu.best(), self.sigma.best())
def best(self) -> Union[float, int]:
return self.mu.best()
def __init__(self,
generation_size: int = 100,
selection_proportion: float = .33,
dimensions: Optional[List[Dimension]] = None
):
self._dimensions: [KFDCEM.Dimension] = [] if dimensions is None else dimensions
self._generation_size: int = generation_size
self._selection_proportion: float = selection_proportion
def setup(self, dimension: [Dimension], recorder: DataRecorder) -> None:
"""
Setup parameters given initial conditions of the candidate
:param dimensions: list of search dimensions
:param recorder: data recorder
"""
self._dimensions = dimension
def stop(self) -> bool:
"""
:return: True when the optimizer thinks it has reached a stopping point
"""
return False
def ask(self, num: Optional[int] = None) -> [any]:
"""
:param num: the number of search points to return. If undefined, the optimizer will choose how many to return.
:return: a list of search points generated by the optimizer
"""
if num is None:
num = self._generation_size
population = []
for _ in range(num):
# candidate = [0.0] * len(self.dimensions)
candidate = np.empty(self.get_num_dimensions())
for i, dimension in enumerate(self._dimensions):
candidate[i] = dimension.sample()
population.append(candidate)
return population
def tell(self, evaluations: [Tuple[float, any]]) -> None:
"""
Updates the optimizer with the objective evaluations of a list of search points
:param evaluations: a list of tuples of (evaluation, search point)
"""
evaluations.sort(key=lambda evaluation: evaluation[0], reverse=True)
selection_size = math.ceil(self._selection_proportion * len(evaluations))
del evaluations[selection_size:]
for i, dimension in enumerate(self._dimensions):
dimension.step()
dimension.update([evaluation[1][i] for evaluation in evaluations])
def best_solution(self) -> (Optional[float], any):
"""
:return: the current best solution
"""
return None, [dimension.best() for dimension in self._dimensions]
def get_num_dimensions(self) -> int:
"""
:return: number of dimensions being optimized over, or None if not implemented or applicable
"""
return len(self._dimensions)
``` |
{
"source": "j-martin/clubhouse",
"score": 2
} |
#### File: clubhouse/clubhouse/parser.py
```python
import argparse
import logging
import sys
import re
from typing import Dict
from collections import OrderedDict, deque
from lxml import html
from unidecode import unidecode
from jinja2 import BaseLoader, Environment
import dag
_jinja_env = Environment(loader=BaseLoader(), trim_blocks=True, lstrip_blocks=True)
logger = logging.getLogger(__name__)
ENCODINGS_WITH_SMART_QUOTES = [
"windows-1252",
"iso-8859-1",
"iso-8859-2",
]
def conf_logging(cli_arguments):
logger = logging.getLogger()
sfmt = '%(asctime)s : %(levelname)s : %(name)s : %(message)s'
formatter = logging.Formatter(sfmt)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(getattr(logging, cli_arguments.log_level.upper()))
def add_logging_options(options):
options.add_argument(
'-l', '--log-level',
default='INFO',
help='Set the logging level',
choices=[
'debug',
'info',
'warn',
'warning',
'error',
'critical',
'fatal',
],
)
return options
def add_options(options):
options.add_argument('infile', nargs='?', type=argparse.FileType('r'),
default=sys.stdin,
help="path to file to parse, defaults to stdin")
options.add_argument('-o', '--outfile', type=argparse.FileType('w'),
default=sys.stdout,
help="path to dump the output, defaults to stdout")
return options
def execute(parser):
parser = add_options(parser)
args = parser.parse_args()
conf_logging(args)
parsed = parse(html.parse(args.infile))
munged = munge(parsed)
rendered = build(munged)
with args.outfile as f:
f.write(rendered)
def parse(tree):
resources = tree.xpath('//h1[text()="Resources"]/following-sibling::h2')
tables = tree.xpath('//h1[text()="Resources"]/following-sibling::table')
logger.debug('resources: %s', resources)
parsed = {}
for resource, rawtable in zip(resources, tables):
resource_name = resource.xpath('string(.)')
logger.info('resource: %s', resource_name)
parsed[resource_name] = extract(rawtable)
logger.debug(parsed)
return parsed
def extract(table):
headers = table.xpath('thead/tr/th/text()')
fields = table.xpath('./tbody/tr/td[strong]')
descriptions = table.xpath('./tbody/tr/td[not(strong)]')
logger.debug('headers: %s', [h for h in headers])
extracted_fields = [
(h.findtext('strong'), h.xpath('string(./span)'))
for h in fields
]
extracted_descriptions = []
for h in descriptions:
asbytes = bytes(h.text, ENCODINGS_WITH_SMART_QUOTES[0])
extracted_descriptions.append(unidecode(str(asbytes, ENCODINGS_WITH_SMART_QUOTES[2])))
logger.debug('fields: %s', extracted_fields)
logger.debug('descriptions: %s', extracted_descriptions)
rv = {f[0]: {"type": f[1], "description": d, "args": ''}
for f, d in zip(extracted_fields, extracted_descriptions)}
logger.debug(rv)
return rv
def munge(datablob: Dict[str, Dict]) -> Dict[str, Dict]:
#: searches for data between () or [] .. also matches [)..
nested = re.compile(r'(?:\[|\()(?P<inside>.+)(?:\]|\))')
def has_nested(type_):
return any(('Array' in type_, type_ in datablob,))
graph = dag.DAG()
for resource_name, resource in datablob.items():
graph.add_node_if_not_exists(resource_name)
for details in resource.values():
if 'or null' in details['type']:
details['type'] = details['type'].split(' ')[0]
details['args'] = 'allow_none=True'
elif 'Enum' in details['type']:
choices = nested.search(details['type']).group(1).split(',')
details['type'] = 'String'
details['args'] = 'validate=validate.OneOf({0})'.format(
', '.join(['"{0}"'.format(c.strip()) for c in choices])
)
if not has_nested(details['type']):
continue
fieldtype = details['type']
logger.info('%s: %s\n%s', resource_name, fieldtype, details)
if 'Array' in fieldtype:
details['args'] = ', many=True'
fieldtype = nested.search(fieldtype).group(1)
if fieldtype not in datablob:
# if the field type is not part of the resources, then
# we will use marshmallow default fields
details['type'] = 'fields.' + fieldtype
continue
graph.add_node_if_not_exists(fieldtype)
if fieldtype == resource_name:
# marshmallow self-nesting schema
logger.info(
'self referential cycle detected for %s on %s',
resource_name,
fieldtype
)
fieldtype = '"self"'
else:
logger.info('---- %s: ', fieldtype)
graph.add_edge(fieldtype, resource_name)
details['type'] = fieldtype
details['args'] = ', many=False'
ob = OrderedDict()
for r in graph.topological_sort():
logger.info(r)
ob[r] = datablob[r]
return ob
def build(datablob: Dict[str, Dict]):
_template = """\
from marshmallow import Schema, fields, pprint, validate
{% for resource_name, resource in resources.items(): %}
class {{ resource_name }}(Schema):
{% for field, details in resource.items() %}
{{ '#: ' ~ details.description | wordwrap(73) | replace('\n', '\n#: ') | indent }}
{# parses the types and understands how to map to schemas #}
{%- if 'many' is in(details.args) %}
{{ field }} = fields.Nested({{details.type}}{{details.args}})
{% else %}
{{ field }} = fields.{{details.type}}({{details.args}})
{% endif %}
{% endfor %}
{% endfor %}
"""
rtemplate = _jinja_env.from_string(_template)
_rendered = rtemplate.render(resources=datablob)
logger.debug('%s: ', _rendered)
return _rendered
def main():
option_parser = argparse.ArgumentParser()
option_parser = add_logging_options(option_parser)
execute(option_parser)
if __name__ == '__main__':
main()
``` |
{
"source": "jmartindf/pelican-jsonfeed",
"score": 2
} |
#### File: jmartindf/pelican-jsonfeed/pelican_jsonfeed.py
```python
from __future__ import unicode_literals
import six
from jinja2 import Markup
from pelican import signals
from pelican.writers import Writer
from pelican.generators import Generator
from pelican.utils import set_date_tzinfo
from feedgenerator import SyndicationFeed, get_tag_uri
from feedgenerator.django.utils.feedgenerator import rfc2822_date, rfc3339_date
from feedgenerator.django.utils.encoding import force_text, iri_to_uri
from collections import OrderedDict
import json
class JSONFeed(SyndicationFeed):
content_type = 'application/json; charset=utf-8'
"""Helper class which generates the JSON based in the global settings"""
def __init__(self, *args, **kwargs):
"""Nice method docstring goes here"""
super(JSONFeed, self).__init__(*args, **kwargs)
def set_settings(self, settings):
"""Helper function which just receives the podcast settings.
:param settings: A dictionary with all the site settings.
"""
self.settings = settings
if 'WEBSUB_HUB' in self.settings and self.settings['WEBSUB_HUB'] != "":
self.feed["hub"] = self.settings['WEBSUB_HUB']
def add_root_elements(self, handler):
pass
def add_item(self, title, link, description, author_email=None,
author_name=None, author_link=None, pubdate=None, comments=None,
unique_id=None, unique_id_is_permalink=None, enclosure=None,
categories=(), item_copyright=None, ttl=None, updateddate=None,
enclosures=None, external_url=None, **kwargs):
"""
Adds an item to the feed. All args are expected to be Python Unicode
objects except pubdate and updateddate, which are datetime.datetime
objects, and enclosures, which is an iterable of instances of the
Enclosure class.
"""
def to_unicode(s):
return force_text(s, strings_only=True)
if categories:
categories = [to_unicode(c) for c in categories]
if ttl is not None:
# Force ints to unicode
ttl = force_text(ttl)
enclosures = [] if enclosures is None else enclosures
item = OrderedDict()
item['title'] = to_unicode(title)
if pubdate:
item['date_published'] = rfc3339_date(pubdate)
if updateddate:
item['dated_modified'] = rfc3339_date(updateddate)
#if unique_id:
# item["id"] = to_unicode(unique_id)
item["id"] = iri_to_uri(link)
item['url'] = iri_to_uri(link)
if external_url:
item['external_url'] = external_url
author = {}
has_author = False
if author_email:
author["url"] = "mailto:%s" % to_unicode(author_email)
has_author = True
if author_name:
author["name"] = to_unicode(author_name)
has_author = True
if author_link:
author["url"] = iri_to_uri(author_link)
has_author = True
if has_author:
item["author"] = author
item['content_html'] = to_unicode(description)
if categories:
item["tags"] = categories
item.update(kwargs)
self.items.append(item)
def write(self, outfile, encoding):
def to_unicode(s):
return force_text(s, strings_only=True)
handler = OrderedDict()
json_items = []
handler["version"] = "https://jsonfeed.org/version/1"
handler["title"] = self.feed["title"]
if 'SITESUBTITLE' in self.settings:
handler['description'] = self.settings['SITESUBTITLE']
handler["home_page_url"] = self.feed["link"]
handler["feed_url"] = self.feed["feed_url"]
if self.feed["description"] != "":
handler["description"] = self.feed["description"]
author = {}
has_author = False
if 'AUTHOR_EMAIL' in self.settings:
author["url"] = "mailto:%s" % to_unicode(self.settings['AUTHOR_EMAIL'])
has_author = True
if self.feed["author_name"] is not None:
author["name"] = self.feed["author_name"]
has_author = True
elif 'AUTHOR' in self.settings:
author['name'] = self.settings['AUTHOR']
has_author = True
if self.feed["author_link"] is not None:
author["url"] = self.feed["author_link"]
has_author = True
elif 'AUTHOR_LINK' in self.settings:
author['url'] = self.settings['AUTHOR_LINK']
has_author = True
if has_author:
handler["author"] = author
if "hub" in self.feed:
handler["hubs"] = [{'type': 'WebSub','url':self.feed["hub"]}]
handler["items"] = self.items
json.dump(handler,outfile,indent=2)
class JSONFeedWriter(Writer):
"""Writer class for our JSON Feed. This class is responsible for
invoking the RssPuSHFeed or Atom1PuSHFeed and writing the feed itself
(using it's superclass methods)."""
def __init__(self, *args, **kwargs):
"""Class initializer"""
super(JSONFeedWriter, self).__init__(*args, **kwargs)
def _create_new_feed(self, *args):
"""Helper function (called by the super class) which will initialize
the Feed object."""
if len(args) == 2:
# we are on pelican <2.7
feed_type, context = args
elif len(args) == 3:
# we are on Pelican >=2.7
feed_type, feed_title, context = args
else:
# this is not expected, let's provide a useful message
raise Exception(
'The Writer._create_new_feed signature has changed, check the '
'current Pelican source for the updated signature'
)
feed_class = JSONFeed
sitename = Markup(context['SITENAME']).striptags()
feed = feed_class(
title=sitename,
link=(self.site_url + '/'),
feed_url=self.feed_url,
description=context.get('SITESUBTITLE', ''))
feed.set_settings(self.settings)
return feed
def _add_item_to_the_feed(self, feed, item):
"""Performs an 'in-place' update of existing 'published' articles
in ``feed`` by creating a new entry using the contents from the
``item`` being passed.
This method is invoked by pelican's core.
:param feed: A Feed instance.
:param item: An article (pelican's Article object).
"""
title = Markup(item.title).striptags()
link = '%s/%s' % (self.site_url, item.url)
appendContent = ""
appendTitle = ""
# :FIXME: Need to handle the link attribute, so that it can be added
# as an external_url to the JSON feed
#if hasattr(item,"link"):
# appendContent = '<p><a href="%s">%s</a></p>' % (link, self.settings.get('LINK_BLOG_PERMALINK_GLYPH','∞'))
# appendTitle = self.settings.get('LINK_BLOG_APPEND_TITLE','')
# link = item.link
feed.add_item(
title=title + appendTitle,
link=link,
unique_id=get_tag_uri(link, item.date),
description=item.get_content(self.site_url) + appendContent,
categories=item.tags if hasattr(item, 'tags') else None,
author_name=getattr(item, 'author', ''),
pubdate=set_date_tzinfo(
item.modified if hasattr(item, 'modified') else item.date,
self.settings.get('TIMEZONE', None)
),
external_url=item.link if hasattr(item,'link') else None,
)
class JSONFeedGenerator(Generator):
"""Generates content by inspecting all articles and invokes the
JSONFeedWriter object, which will write the JSON Feed."""
def __init__(self, *args, **kwargs):
"""Starts a brand new feed generator."""
super(JSONFeedGenerator, self).__init__(*args, **kwargs)
# Initialize the number of posts and where to save the feed.
self.posts = []
def generate_context(self):
"""Looks for all 'published' articles and add them to the posts
list."""
self.context['SITEURL'] = self.settings.get('SITEURL')
self.context['FEED_DOMAIN'] = self.settings.get('FEED_DOMAIN')
for article in self.context['articles']:
if (article.status.lower()) == "published":
self.posts.append(article)
def generate_output(self, writer):
"""Write out the link feed to a file.
:param writer: A ``Pelican Writer`` instance.
"""
writer = JSONFeedWriter(self.output_path, self.settings)
if self.settings.get('FEED_JSON'):
writer.write_feed(self.posts, self.context, self.settings.get('FEED_JSON'), feed_type="json")
def get_generators(generators):
"""Module function invoked by the signal 'get_generators'."""
return JSONFeedGenerator
def register():
"""Registers the module function `get_generators`."""
signals.get_generators.connect(get_generators)
``` |
{
"source": "jmartinezbernet/FuelSDK-Python",
"score": 2
} |
#### File: FuelSDK/Public_WebAppTests/test_ET_Client.py
```python
from unittest import TestCase
from FuelSDK import ET_Client
class TestET_Client(TestCase):
@classmethod
def setUpClass(cls):
"""
test_authToken_and_refreshKey_should_differ_if_refresh_token_is_enforced expects a Public/Web App config.
All the other tests require a Server to Server config
"""
cls.client = ET_Client(False, False)
def test_authToken_and_refreshKey_should_differ_if_refresh_token_is_enforced(self):
self.authToken1 = self.client.authToken
self.refreshKey1 = self.client.refreshKey
self.client.refresh_token_with_oAuth2(True)
self.authToken2 = self.client.authToken
self.refreshKey2 = self.client.refreshKey
self.assertNotEqual(self.authToken1, self.authToken2)
self.assertNotEqual(self.refreshKey1, self.refreshKey2)
def test_auth_payload_should_have_public_app_attributes(self):
self.client.application_type = 'public'
payload = self.client.create_payload()
self.assertEqual(self.client.client_id, payload['client_id'])
self.assertEqual(self.client.redirect_URI, payload['redirect_uri'])
self.assertEqual(self.client.authorization_code, payload['code'])
self.assertEqual('authorization_code', payload['grant_type'])
def test_auth_payload_for_public_app_should_not_have_client_secret(self):
self.client.application_type = 'public'
payload = self.client.create_payload()
self.assertRaises(KeyError, lambda: payload['client_secret'])
def test_auth_payload_should_have_web_app_attributes(self):
self.client.application_type = 'web'
payload = self.client.create_payload()
self.assertEqual('authorization_code', payload['grant_type'])
self.assertEqual(self.client.client_id, payload['client_id'])
self.assertEqual(self.client.client_secret, payload['client_secret'])
self.assertEqual(self.client.redirect_URI, payload['redirect_uri'])
self.assertEqual(self.client.authorization_code, payload['code'])
def test_auth_payload_should_have_server_app_attributes(self):
self.client.application_type = 'server'
payload = self.client.create_payload()
self.assertEqual('client_credentials', payload['grant_type'])
self.assertEqual(self.client.client_id, payload['client_id'])
self.assertEqual(self.client.client_secret, payload['client_secret'])
def test_auth_payload_for_server_app_should_not_have_code_and_redirect_uri(self):
self.client.application_type = 'server'
payload = self.client.create_payload()
self.assertRaises(KeyError, lambda: payload['code'])
self.assertRaises(KeyError, lambda: payload['redirect_uri'])
def test_auth_payload_with_refresh_token_should_have_refresh_token_attribute(self):
self.client.refreshKey = 'RefreshKey'
payload = self.client.create_payload()
self.assertEqual('refresh_token', payload['grant_type'])
self.assertEqual(self.client.refreshKey, payload['refresh_token'])
``` |
{
"source": "jmartinezespza/odoo-docker",
"score": 2
} |
#### File: odoo-docker/auto_addons/tests.py
```python
import unittest
from addons import *
class RepoTest(unittest.TestCase):
def test_check_is_url(self):
remote_url = 'connector'
self.repo = Repo(remote_url)
self.assertTrue(self.repo._check_is_url('https://github.com'))
self.assertTrue(self.repo._check_is_url('http://github.com'))
self.assertFalse(self.repo._check_is_url('ttps://github.com'))
def test_parse_oca_repo(self):
remote_url = 'connector'
self.repo = Repo(remote_url)
self.repo._parse_organization_repo(remote_url)
self.assertEquals(self.repo.remote_url, remote_url)
self.assertEquals(self.repo.organization, DEFAULT_ORGANIZATION)
self.assertEquals(self.repo.repository, 'connector')
self.assertEquals(self.repo.folder_name, 'connector')
def test_parse_organization_and_repo(self):
remote_url = 'OCA/connector'
self.repo = Repo(remote_url)
self.repo._parse_organization_repo(remote_url)
self.assertEquals(self.repo.remote_url, remote_url)
self.assertEquals(self.repo.organization, DEFAULT_ORGANIZATION)
self.assertEquals(self.repo.repository, 'connector')
self.assertEquals(self.repo.folder_name, 'connector')
def test_parse_url(self):
remote_url = 'https://github.com/OCA/connector'
self.repo = Repo(remote_url)
self.repo._parse_url(remote_url)
self.assertEquals(self.repo.remote_url, remote_url)
self.assertEquals(self.repo.organization, DEFAULT_ORGANIZATION)
self.assertEquals(self.repo.repository, 'connector')
self.assertEquals(self.repo.folder_name, 'connector')
self.assertEquals(self.repo.git_repo_host, 'github.com')
def test_path(self):
remote_url = 'connector'
self.repo = Repo(remote_url)
self.assertEquals(self.repo.path, '%sconnector' % (EXTRA_ADDONS_PATH, ))
def test_repo_oca_repo(self):
remote_url = 'connector'
self.repo = Repo(remote_url)
self.assertEquals(self.repo.remote_url, remote_url)
self.assertEquals(self.repo.folder_name, 'connector')
self.assertEquals(self.repo.branch, None)
self.assertEquals(self.repo.organization, DEFAULT_ORGANIZATION)
self.assertEquals(self.repo.repository, 'connector')
self.assertEquals(self.repo.git_repo_host, 'github.com')
self.assertEquals(self.repo.path, '%sconnector' % (EXTRA_ADDONS_PATH, ))
def test_repo_organization_and_repo(self):
remote_url = 'OCA/connector'
self.repo = Repo(remote_url)
self.assertEquals(self.repo.remote_url, remote_url)
self.assertEquals(self.repo.folder_name, 'connector')
self.assertEquals(self.repo.branch, None)
self.assertEquals(self.repo.organization, DEFAULT_ORGANIZATION)
self.assertEquals(self.repo.repository, 'connector')
self.assertEquals(self.repo.git_repo_host, 'github.com')
self.assertEquals(self.repo.path, '%sconnector' % (EXTRA_ADDONS_PATH, ))
def test_repo_url(self):
remote_url = 'https://github.com/OCA/connector'
self.repo = Repo(remote_url)
self.assertEquals(self.repo.remote_url, remote_url)
self.assertEquals(self.repo.folder_name, 'connector')
self.assertEquals(self.repo.branch, None)
self.assertEquals(self.repo.organization, DEFAULT_ORGANIZATION)
self.assertEquals(self.repo.repository, 'connector')
self.assertEquals(self.repo.git_repo_host, 'github.com')
self.assertEquals(self.repo.path, '%sconnector' % (EXTRA_ADDONS_PATH, ))
def test_repo_oca_repo_and_branch(self):
remote_url = 'connector 8.0'
self.repo = Repo(remote_url)
self.assertEquals(self.repo.remote_url, remote_url)
self.assertEquals(self.repo.folder_name, 'connector')
self.assertEquals(self.repo.branch, '8.0')
self.assertEquals(self.repo.organization, DEFAULT_ORGANIZATION)
self.assertEquals(self.repo.repository, 'connector')
self.assertEquals(self.repo.git_repo_host, 'github.com')
self.assertEquals(self.repo.path, '%sconnector' % (EXTRA_ADDONS_PATH, ))
def test_repo_organization_and_repo_and_branch(self):
remote_url = 'OCA/connector 8.0'
self.repo = Repo(remote_url)
self.assertEquals(self.repo.remote_url, remote_url)
self.assertEquals(self.repo.folder_name, 'connector')
self.assertEquals(self.repo.branch, '8.0')
self.assertEquals(self.repo.organization, DEFAULT_ORGANIZATION)
self.assertEquals(self.repo.repository, 'connector')
self.assertEquals(self.repo.git_repo_host, 'github.com')
self.assertEquals(self.repo.path, '%sconnector' % (EXTRA_ADDONS_PATH, ))
def test_repo_url_and_branch(self):
remote_url = 'https://github.com/OCA/connector 8.0'
self.repo = Repo(remote_url)
self.assertEquals(self.repo.remote_url, remote_url)
self.assertEquals(self.repo.folder_name, 'connector')
self.assertEquals(self.repo.branch, '8.0')
self.assertEquals(self.repo.organization, DEFAULT_ORGANIZATION)
self.assertEquals(self.repo.repository, 'connector')
self.assertEquals(self.repo.git_repo_host, 'github.com')
self.assertEquals(self.repo.path, '%sconnector' % (EXTRA_ADDONS_PATH, ))
def test_repo_rename_and_url(self):
remote_url = 'connector_rename https://github.com/OCA/connector'
self.repo = Repo(remote_url)
self.assertEquals(self.repo.remote_url, remote_url)
self.assertEquals(self.repo.folder_name, 'connector_rename')
self.assertEquals(self.repo.branch, None)
self.assertEquals(self.repo.organization, DEFAULT_ORGANIZATION)
self.assertEquals(self.repo.repository, 'connector')
self.assertEquals(self.repo.git_repo_host, 'github.com')
self.assertEquals(self.repo.path, '%sconnector_rename' % (EXTRA_ADDONS_PATH, ))
def test_repo_rename_and_url_and_branch(self):
remote_url = 'connector_rename https://github.com/OCA/connector 8.0'
self.repo = Repo(remote_url)
self.assertEquals(self.repo.remote_url, remote_url)
self.assertEquals(self.repo.folder_name, 'connector_rename')
self.assertEquals(self.repo.branch, '8.0')
self.assertEquals(self.repo.organization, DEFAULT_ORGANIZATION)
self.assertEquals(self.repo.repository, 'connector')
self.assertEquals(self.repo.git_repo_host, 'github.com')
self.assertEquals(self.repo.path, '%sconnector_rename' % (EXTRA_ADDONS_PATH, ))
def test_repo_rename_and_url_and_branch_new(self):
remote_url = 'account-financial-reporting https://github.com/OCA/account-financial-reporting 8.0'
self.repo = Repo(remote_url)
self.assertEquals(self.repo.remote_url, remote_url)
self.assertEquals(self.repo.folder_name, 'account-financial-reporting')
self.assertEquals(self.repo.branch, '8.0')
self.assertEquals(self.repo.organization, DEFAULT_ORGANIZATION)
self.assertEquals(self.repo.repository, 'account-financial-reporting')
self.assertEquals(self.repo.git_repo_host, 'github.com')
self.assertEquals(self.repo.path, '%saccount-financial-reporting' % (EXTRA_ADDONS_PATH, ))
def test_download_cmd(self):
repo = Repo('Elico-Corp/odoo')
self.assertEqual(
['git', 'clone',
'https://github.com/Elico-Corp/odoo.git',
'/mnt/data/additional_addons/odoo'],
repo.download_cmd)
def test_download_cmd_with_branch(self):
repo = Repo('Elico-Corp/odoo 8.0')
self.assertEqual(
['git', 'clone', '-b', '8.0',
'https://github.com/Elico-Corp/odoo.git',
'/mnt/data/additional_addons/odoo'],
repo.download_cmd)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jmartinezv82/Data-Engineering-POC",
"score": 2
} |
#### File: jmartinezv82/Data-Engineering-POC/api.py
```python
import os
from dotenv import dotenv_values
from flask import Flask, jsonify
from flask_swagger import swagger
from pytz import utc
from flask_apscheduler import APScheduler
from data_ingestion import get_and_save_articles, get_articles_from_mongo
config = dotenv_values(".env")
minutes_env = os.environ['MINUTES_CRONJOB'] if config.get('MINUTES_CRONJOB') is None else config.get('MINUTES_CRONJOB')
# set configuration values
class Config:
SCHEDULER_TIMEZONE = utc
SCHEDULER_API_ENABLED = True
# create app
app = Flask(__name__)
app.config.from_object(Config())
@app.route("/")
def home():
swag = swagger(app, from_file_keyword='swagger_from_file')
swag['info']['version'] = "1.0"
swag['info']['title'] = "Data Enfinerring POC"
return jsonify(swag)
@app.route("/api/data/get", methods=['GET'])
def api_data_get():
"""
An endpoint that return all articles saved in mongodb atlas service
---
tags:
- articles
responses:
200:
description: Hacked some hacks
"""
try:
return jsonify(get_articles_from_mongo())
except IndexError:
return jsonify(IndexError)
# initialize scheduler
scheduler = APScheduler()
# if you don't wanna use a config, you can set options here:
# scheduler.api_enabled = True
scheduler.init_app(app)
scheduler.start()
# interval examples
@scheduler.task("interval", id="do_save_info", minutes=int(minutes_env), misfire_grace_time=900)
def save_info():
get_and_save_articles()
if __name__ == "__main__":
app.run(debug=False)
``` |
{
"source": "j-martin/font2css",
"score": 2
} |
#### File: font2css/font2css/tests.py
```python
__author__ = "<NAME>"
__copyright__ = "Copyright 2013, MIT License."
import nose
from font2css import *
def test_generateFontList():
directory = './font2css/example/'
expected = [(
'OpenSans-Light', './font2css/example/Open_Sans/OpenSans-Light.ttf'),
('OpenSans-Regular',
'./font2css/example/Open_Sans/OpenSans-Regular.ttf'),
('OpenSans-ExtraBold',
'./font2css/example/Open_Sans/OpenSans-ExtraBold.ttf'),
('OpenSans-SemiboldItalic',
'./font2css/example/Open_Sans/OpenSans-SemiboldItalic.ttf'),
('OpenSans-Italic', './font2css/example/Open_Sans/OpenSans-Italic.ttf'),
('OpenSans-ExtraBoldItalic',
'./font2css/example/Open_Sans/OpenSans-ExtraBoldItalic.ttf'),
('OpenSans-BoldItalic',
'./font2css/example/Open_Sans/OpenSans-BoldItalic.ttf'),
('OpenSans-Bold', './font2css/example/Open_Sans/OpenSans-Bold.ttf'),
('OpenSans-Semibold',
'./font2css/example/Open_Sans/OpenSans-Semibold.ttf'),
('OpenSans-LightItalic',
'./font2css/example/Open_Sans/OpenSans-LightItalic.ttf'),
('Quicksand-Light', './font2css/example/Quicksand/Quicksand-Light.ttf'),
('Quicksand-Bold', './font2css/example/Quicksand/Quicksand-Bold.ttf'),
('Quicksand-Regular', './font2css/example/Quicksand/Quicksand-Regular.ttf')]
results = generateFontList(directory)
assert(len(results) == len(expected))
def test_decodeFontName():
inputs = ['OpenSans-LightItalic',
'Quicksand-Bold', 'OpenSans-ExtraBoldItalic']
expected = [('OpenSans', 'italic', '300'),
('Quicksand', 'regular', '700'),
('OpenSans', 'italic', '700')]
for (count, item) in enumerate(inputs):
result = decodeFontName(item)
assert(result == expected[count])
def test_replace_all():
inputs = 'abc'
input_replace = {'a': '4', 'b': '5', 'c': '6'}
expected = '456'
result = replace_all(inputs, input_replace)
assert(expected == result)
def test_generateCSSdata():
inputs = ('Quicksand-Regular',
'./font2css/example/Quicksand/Quicksand-Regular.ttf')
results = generateCSSdata(inputs[0], inputs[1])
expected = """
@font-face {
font-family: 'Quicksand';
font-style: regular;
font-weight: 400;
src: url(data:font/truetype;charset=utf-8;base64,AAEAAAAPAIAAAwBwRFNJRwAAAAEAAFzMAAAA"""
# The results is truncated because the expected string would be too long.
assert(results[0:200] == expected)
``` |
{
"source": "jmartinm/nostro-web",
"score": 2
} |
#### File: nostro-web/nostrikesite/views.py
```python
from django.shortcuts import render
from mapapp.models import PointOfInterest
from django.core import serializers
from django.http import HttpResponse
# Create your views here.
def home(request):
return render(request, "index.html")
def export(request):
from tempfile import mkstemp
import os
import time
import subprocess
import datetime
pois = PointOfInterest.objects.all()
serialized_queryset = serializers.serialize('json', pois)
fd, temp_path = mkstemp()
myfile = os.fdopen(fd, "w")
myfile.write(serialized_queryset)
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
myfile.write("\n\ntimestamp: " + str(st) + '\n\n')
myfile.close()
fd_return, temp_path_return = mkstemp(dir="/tmp")
x = subprocess.call(["/home/ubuntu/signdb.sh", temp_path, temp_path_return])
response = HttpResponse(open(temp_path_return).read(), content_type="text/plain")
response['Content-Disposition'] = 'attachment; filename=export.txt'
return response
``` |
{
"source": "jmartin-r7/cpe_utils",
"score": 3
} |
#### File: cpe_utils/tests/test_basic.py
```python
import os
import sys
import unittest
import json
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import cpe_utils
class TestBasic(unittest.TestCase):
"""Test the basic functionality of cpe_utils
"""
def setUp(self):
pass
def tearDown(self):
pass
def test_cpe_parsing(self):
cpe_str = "cpe:/part:vendor:product:version:update:edition"
cpe_obj = cpe_utils.CPE(cpe_str)
self.assertEqual(cpe_obj.part, "part")
self.assertEqual(cpe_obj.vendor, "vendor")
self.assertEqual(cpe_obj.product, "product")
self.assertEqual(cpe_obj.version, "version")
self.assertEqual(cpe_obj.update, "update")
self.assertEqual(cpe_obj.edition, "edition")
# see issue #5
# TODO Test vendor
# TODO Test product
# TODO Test version
# TODO Test update
# TODO Test edition
def test_matches(self):
tests = [
["cpe:/a:vendor:product:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/X:vendor:product:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:X:product:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendor:X:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendor:product:X:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendor:product:1.1:X:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendor:product:1.1:sp3:X", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vandor:product:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:ndor:product:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:dor:product:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:or:product:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:r:product:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vbndo:product:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vand:product:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:ven:product:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:ve:product:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:v:product:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vbndor:produc:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendor:produ:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vcndor:prod:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vindor:pro:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vondor:pr:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vundor:p:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vondor::1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendor:product:1.0:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendor:product:1.:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendor:product:1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendor:product::sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:vendor:product:1.1:sp:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendor:product:1.1:s:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendor:product:1.1::x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:vendor:product:1.1:sp3:x8", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendor:product:1.1:sp3:x", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendor:product:1.1:sp3:", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:vndor:poduct:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vedor:prduct:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:venor:prouct:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendr:prodct:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendo:produt:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendor:produc:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:space:product:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendor:space:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendor:product:1.10:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendor:product:1.11:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendor:product:1.12:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendor:product:1.13:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendor:product:1.14:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendor:product:1.15:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendor:product:1.16:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendor:product:1.17:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendor:product:1.18:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendor:product:1.19:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", False],
["cpe:/a:vendor:product:1.1:sp3:*", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:vendor:product:1.1:*:x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:vendor:product:*:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:vendor:*:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:*:product:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/*:vendor:product:1.1:sp3:x8?", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:vendor:product:1.1:sp3:x?6", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:vendor:product:1.1:sp3:?86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:vendor:product:1.1:sp?:x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:vendor:product:1.1:s?3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:vendor:product:1.1:?p3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:vendor:product:1.?:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:vendor:product:1?1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:vendor:product:?.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:vendor:produc?:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:vendor:produ?t:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:vendor:prod?ct:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:vendor:pro?uct:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:vendor:pr?duct:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:vendor:p?oduct:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:vendor:?roduct:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:vendo?:product:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:vend?r:product:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:ven?or:product:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:ve?dor:product:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:v?ndor:product:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/a:?endor:product:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
["cpe:/?:vendor:product:1.1:sp3:x86", "cpe:/a:vendor:product:1.1:sp3:x86", True],
]
count = 0
for test_info in tests:
count += 1
cpe_str1, cpe_str2, match_result = test_info
cpe1 = cpe_utils.CPE(cpe_str1)
cpe2 = cpe_utils.CPE(cpe_str2)
self.assertTrue(cpe1.matches(cpe2) == match_result, "[{}] {}.match({}) was not {}".format(
count,
cpe_str1,
cpe_str2,
match_result
))
def test_cpe_parsing_23(self):
cpe_str = "cpe:2.3:o:vendor:product:version:update:edition"
cpe_obj = cpe_utils.CPE(cpe_str)
self.assertEqual(cpe_obj.part, "o")
self.assertEqual(cpe_obj.vendor, "vendor")
self.assertEqual(cpe_obj.product, "product")
self.assertEqual(cpe_obj.version, "version")
self.assertEqual(cpe_obj.update, "update")
self.assertEqual(cpe_obj.edition, "edition")
# see issue #5
# TODO Test vendor
# TODO Test product
# TODO Test version
# TODO Test update
# TODO Test edition
def test_cpe_exception(self):
with self.assertRaises(cpe_utils.CPEException):
cpe_utils.CPE("cpe:::::")
def test_human(self):
tests = [
["cpe:/"
"a:vendor:product:1.1:sp3:x86", "Vendor Product 1.1 SP3 x86"],
["cpe:/a:vendor_name:product:1.1:sp3:x86", "Vendor Name Product 1.1 SP3 x86"],
["cpe:/a:vendor:product::sp3:x86", "Vendor Product SP3 x86"],
["cpe:/a:vendor:::sp3:x86", "Vendor SP3 x86"],
["cpe:/a:vendor::::", "Vendor"],
["cpe:/a::::sp3:x86", "SP3 x86"],
["cpe:/a:vendor:product:1.1::", "Vendor Product 1.1"],
["cpe:/a:::::", ""],
["cpe:/a::product:::", "Product"],
["cpe:/a:::1.1::", "1.1"],
["cpe:/a::::sp3:", "SP3"],
["cpe:/a:::::x86", "x86"],
["cpe:/a:vendor:product:::", "Vendor Product"],
["cpe:/a:vendor:product:1.1:sp3:", "Vendor Product 1.1 SP3"],
["cpe:/a:vendor_name::::x86", "Vendor Name x86"],
["cpe:/a:vendor_name:::sp3:", "Vendor Name SP3"],
["cpe:/a:vendor_name:product:1.1::", "Vendor Name Product 1.1"],
["cpe:/a:vendor_name::::", "Vendor Name"],
["cpe:/a:vendor::::x86", "Vendor x86"],
["cpe:/a:vendor:::sp3:", "Vendor SP3"],
]
for test_info in tests:
cpe_string = test_info[0]
correct_human = test_info[1]
cpe = cpe_utils.CPE(cpe_string)
self.assertEqual(cpe.human(), correct_human, "{!r} was not {!r} (for cpe {})".format(
cpe.human(),
correct_human,
cpe_string
))
def test_to_json(self):
tests = [
["cpe:/a:vendor:product:1.1:sp3:x86",{
"part": "a",
"vendor": "vendor",
"product": "product",
"version": "1.1",
"update": "sp3",
"edition": "x86"
}],
["cpe:/a::product:1.1:sp3:x86",{
"part": "a",
"vendor": "",
"product": "product",
"version": "1.1",
"update": "sp3",
"edition": "x86"
}],
["cpe:/a:vendor::1.1:sp3:x86",{
"part": "a",
"vendor": "vendor",
"product": "",
"version": "1.1",
"update": "sp3",
"edition": "x86"
}],
["cpe:/a:vendor:product::sp3:x86",{
"part": "a",
"vendor": "vendor",
"product": "product",
"version": "",
"update": "sp3",
"edition": "x86"
}],
["cpe:/a:vendor:product:1.1::x86",{
"part": "a",
"vendor": "vendor",
"product": "product",
"version": "1.1",
"update": "",
"edition": "x86"
}],
["cpe:/a:vendor:product:1.1:sp3",{
"part": "a",
"vendor": "vendor",
"product": "product",
"version": "1.1",
"update": "sp3",
"edition": ""
}],
]
for test_info in tests:
cpe_string = test_info[0]
correct_dict = test_info[1]
cpe = cpe_utils.CPE(cpe_string)
assert isinstance(cpe_string, object)
self.assertEqual(cpe.to_json(), json.dumps(correct_dict), "{!r} was not {!r} (for cpe {})".format(
cpe.to_json(),
correct_dict,
cpe_string
))
def test_cpe_obj_equals(self):
orig_cpe = "cpe:/o:vendor:product:version:update:edition"
cpe_obj1 = cpe_utils.CPE(orig_cpe)
cpe_obj2 = cpe_utils.CPE(orig_cpe)
false_cpes = [
"cpe:/a:vendor:product:version:update:edition",
"cpe:/o:vendor1:product:version:update:edition",
"cpe:/o:vendor:product1:version:update:edition",
"cpe:/o:vendor:product:version1:update:edition",
"cpe:/o:vendor:product:version:update1:edition",
"cpe:/o:vendor:product:version:update:edition1",
]
for false_cpe in false_cpes:
false_cpe_obj = cpe_utils.CPE(false_cpe)
self.assertFalse(cpe_obj1 == false_cpe_obj, "{} is not equal to {}".format(
false_cpe,
orig_cpe
))
def test_has_wildcards(self):
cpe_tests = [
"cpe:/*:vendor:product:version:update:edition",
"cpe:/?:vendor:product:version:update:edition",
"cpe:/o:v*ndor:product:version:update:edition",
"cpe:/o:v?ndor:product:version:update:edition",
"cpe:/o:vendor:pr*duct:version:update:edition",
"cpe:/o:vendor:pr?duct:version:update:edition",
"cpe:/o:vendor:product:vers*on:update:edition",
"cpe:/o:vendor:product:vers?on:update:edition",
"cpe:/o:vendor:product:version:upda*e:edition",
"cpe:/o:vendor:product:version:upda?e:edition",
"cpe:/o:vendor:product:version:update:ed*tion",
"cpe:/o:vendor:product:version:update:ed?tion",
]
for cpe_str in cpe_tests:
cpe_obj = cpe_utils.CPE(cpe_str)
self.assertTrue(cpe_obj.has_wildcards())
no_wildcards = cpe_utils.CPE("cpe:/o:vendor:product:version:update:edition")
self.assertFalse(no_wildcards.has_wildcards())
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jmartinz/pyCrawler",
"score": 2
} |
#### File: 10.contratacionE/old/PCE_extraccion_documentos.py
```python
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from bs4 import BeautifulSoup
# voy a volcar a fichero el texto en crudo de la tabla para testear como extraer la información con bs4
def extraeContratos(table):
f = open('tablaContratos.txt', 'a')
for row in table.findAll("tr"):
f.write(row.encode("UTF-8")+ "\n")
f.close()
expedientes =[]
# No puede ser headless con driver Firefox
driver = webdriver.Firefox()
#Carga página
driver.get("https://contrataciondelestado.es/wps/portal/!ut/p/b1/lZDLDoIwEEU_aaYParssrwLxAVZQujEsjMH42Bi_30rcGCPq7CZz7pzkgoOWKC6kYBPYgDt3t37fXfvLuTs-die2PFlEUZpRlJbFSKdxXYvMrybwQOsB_DAah3xopdQh0YislqhFVUXK_0HFnvmARbwpmlLY3CDmWRpPaxKgoeI3_4jgxW_sjPhzwkRAkRhLn_mPAvqn_13wJb8GNyBjDQzAWMXjEgrz7HLaQeuxyVY3SaVzxXARLj1WlLNVaShB5LCCNoGTO6Z-VH7g3R2UoLEz/dl4/d5/L2dBISEvZ0FBIS9nQSEh/pw/Z7_AVEQAI930OBRD02JPMTPG21004/act/id=0/p=javax.servlet.include.path_info=QCPjspQCPbusquedaQCPBusquedaVIS_UOE.jsp/299420689304/-/")
#Clica en búsqueda avanzada
#driver.find_element_by_link_text('Búsqueda avanzada de licitaciones').click()
#driver.find_element_by_css_selector("div.paddingLeft1 a").click()
#Selecciona AGE
#el = driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21004_:form1:comboTipoAdminMAQ')
#for option in el.find_elements_by_tag_name('option'):
# if (option.text).encode('utf-8') == 'Administración General del Estado':
# option.click() # select() in earlier versions of webdriver
# break
#Selecciona MSSSI
driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21004_:form1:idSeleccionarOCLink').click() # Organización contratante -> seleccionar
driver.find_elements_by_class_name('tafelTreeopenable')[1].click() # Selecciona AGE
driver.find_element_by_id('tafelTree_maceoArbol_id_17').click() # Selecciona Sanidad
driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21004_:form1:botonAnadirMostrarPopUpArbolEO').click()
#nomAdmin =
#for option in nomAdmin.find_elements_by_tag_name('option'):
# if (option.text).encode('utf-8') == 'Ministerio de Sanidad, Servicios Sociales e Igualdad':
# option.click() # select() in earlier versions of webdriver
# break
#Fecha publicacion entre 01/12/2014
fDesde = driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21004_:form1:textMinFecAnuncioMAQ2')
fDesde.send_keys("01-01-2014")
# y 31/12/2014
fHasta = driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21004_:form1:textMaxFecAnuncioMAQ')
fHasta.send_keys("24-06-2015")
# pulsa el bot´on de buscar
driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21004_:form1:button1').click()
#Imprime el número de elementos
print (driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21004_:form1:textfooterTotalTotalMAQ').text)
nPagTotal = driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21004_:form1:textfooterInfoTotalPaginaMAQ').text
#print ("Páginas totales: "+ nPagTotal)
print (nPagTotal)
# Recorre todas las páginas de resultados
while True: # Se ejecuta siempre hasta que no exista el enlace "siguiente"
nPag = driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21004_:form1:textfooterInfoNumPagMAQ').text
#print ("Página : "+ nPag)
print (nPag)
# En linea saca los expedientes
html_page = driver.page_source
soup = BeautifulSoup(html_page)
tableExp = soup.find("table", { "id" : "myTablaBusquedaCustom" })
extraeContratos(tableExp)
expedientes_pag = [c.text for c in soup.findAll('td', {'class':'tdExpediente'})]
expedientes.extend(expedientes_pag)
# Pulsa enlace siguiente, si no lo encuentra se sale del bucle
try:
enlaceSiguiente= driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21004_:form1:footerSiguiente')
enlaceSiguiente.click()
except NoSuchElementException:
break
# Cierra el driver
driver.quit()
def main():
# Lee la bbdd ¿con qué criterios?
listaContratos=[]
exp_data = open('tablaContratos.txt','r').read()
soup = BeautifulSoup(exp_data)
# Envia sólo las líneas que son de contratos
for row in soup.findAll("tr", {'class': ['rowClass1', 'rowClass2']}):
#pickle.dump(Contrato(row), open('save.p', 'ab'))
listaContratos.append(Contrato(row))
#print(row.prettify())
for contrato in listaContratos:
contrato.grabarBD()
if __name__ == "__main__":
sys.exit(main())
```
#### File: pyCrawler/10.contratacionE/pce_extrae_contratos.py
```python
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException, TimeoutException
from bs4 import BeautifulSoup
import sys
#phantonPath = "/home/jmartinz/00.py/phantomjs/phantomjs"
phantonPath = "../phantomjs/phantomjs"
contratacionPage = "https://contrataciondelestado.es/wps/portal/!ut/p/b1/lZDLDoIwEEU_aaYParssrwLxAVZQujEsjMH42Bi_30rcGCPq7CZz7pzkgoOWKC6kYBPYgDt3t37fXfvLuTs-die2PFlEUZpRlJbFSKdxXYvMrybwQOsB_DAah3xopdQh0YislqhFVUXK_0HFnvmARbwpmlLY3CDmWRpPaxKgoeI3_4jgxW_sjPhzwkRAkRhLn_mPAvqn_13wJb8GNyBjDQzAWMXjEgrz7HLaQeuxyVY3SaVzxXARLj1WlLNVaShB5LCCNoGTO6Z-VH7g3R2UoLEz/dl4/d5/L2dBISEvZ0FBIS9nQSEh/pw/Z7_AVEQAI930OBRD02JPMTPG21004/act/id=0/p=javax.servlet.include.path_info=QCPjspQCPbusquedaQCPBusquedaVIS_UOE.jsp/299420689304/-/"
#contratacionPage="https://contrataciondelestado.es"
""" Móudlo para extraer datos de la página de contatación
del estado
"""
class Contratos():
""" Clase que devuelve los contratos de un ministerio entre unas fechas usando el dirver que se indique
driverType=1 (Firefox, online) / 2(phantomjs)
ministry:
6: MAGRAMA
7: MAExCoop
8. MDEfensa
9: MINECO
10:MEDCD
11:MESS
12:MFOM
13:MINHAP
14:MINET
15:MINJUS
16:MINPRES
17:MSSSI
18:MinTraInm
19:MinInt
20: Presid<NAME>
fini: dd-mm-aaaa
ffin: dd-mm-aaaa
"""
driver = "" #webdriver.PhantomJS(phantonPath, service_args=['--ignore-ssl-errors=true'])
driverType=1
expedientes =[]
ministerio = 'tafelTree_maceoArbol_id_'
ministry=0
fIni= '01-01-2015'
fFin='10-01-2015'
nContratos = 0
nPagTotal = 0
def __init__(self, driverType=1, ministry='17', fini='01-01-2015',ffin='10-01-2015'):
self.driverType=driverType
self.ministry = ministry
if driverType==1:
self.driver = webdriver.Firefox()
elif driverType==2:
self.driver = webdriver.PhantomJS(phantonPath, service_args=['--ignore-ssl-errors=true'])
self.driver.set_window_size(1120, 550)
self.ministerio = self.ministerio + ministry
self.fIni = fini
self.fFin = ffin
# self.debugPhanton()
self.extraecontratos()
def cargaPagina(self):
#Carga página
if self.driverType==2:
self.driver.implicitly_wait(10)
self.driver.set_page_load_timeout(10)
try:
self.driver.get(contratacionPage)
except TimeoutException as e: #Handle y
#Handle your exception here
print(e)
def debugPhanton(self):
self.cargaPagina()
# check phantomjs
print(self.driver.page_source)
def extraecontratos(self):
self.cargaPagina()
#Selecciona ministerio
self.driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21004_:form1:idSeleccionarOCLink').click() # Organización contratante -> seleccionar
self.driver.find_elements_by_class_name('tafelTreeopenable')[1].click() # Selecciona AGE
self.driver.find_element_by_id(self.ministerio).click() # Selecciona el Ministerio pasado por parámetros
self.driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21004_:form1:botonAnadirMostrarPopUpArbolEO').click()
# han añadido el boton añadir
self.driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21004_:form1:botonAnadirMostrarPopUpArbolEO').click()
#Fecha publicacion entre fIni
fDesde = self.driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21004_:form1:textMinFecAnuncioMAQ2')
fDesde.send_keys(self.fIni)
# y fFin
fHasta = self.driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21004_:form1:textMaxFecAnuncioMAQ')
fHasta.send_keys(self.fFin)
# pulsa el botón de buscar
self.driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21004_:form1:button1').click()
#Obtine el número de elementos
self.nContratos=self.driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21004_:form1:textfooterTotalTotalMAQ').text
# y de páginas totales
self.nPagTotal = self.driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21004_:form1:textfooterInfoTotalPaginaMAQ').text
# Recorre todas las páginas de resultados
while True: # Se ejecuta siempre hasta que no exista el enlace "siguiente"
nPag = self.driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21004_:form1:textfooterInfoNumPagMAQ').text
# En linea saca los expedientes de la página
html_page = self.driver.page_source
soup = BeautifulSoup(html_page, "html5lib")
# tableExp = soup.find("table", { "id" : "myTablaBusquedaCustom" })
#
# expedientes_pag = [c.text for c in soup.findAll('td', {'class':'tdExpediente'})]
expedientes_pag = []
# Añade sólo las líneas que son de contratos
for row in soup.findAll("tr", {'class': ['rowClass1', 'rowClass2']}):
expedientes_pag.append(row)
# Los añade a los expedientes totales
self.expedientes.extend(expedientes_pag)
# Pulsa enlace siguiente, si no lo encuentra se sale del bucle
try:
enlaceSiguiente= self.driver.find_element_by_id('viewns_Z7_AVEQAI930OBRD02JPMTPG21004_:form1:footerSiguiente')
enlaceSiguiente.click()
except NoSuchElementException:
break
# Cierra el driver
self.driver.quit()
# Sólo para probar que funcina
def main():
contratosMSSSI=Contratos(driverType=2)
print(contratosMSSSI.nContratos)
print(contratosMSSSI.nPagTotal)
# abre fichero
f = open('workfile', 'w')
for exp in contratosMSSSI.expedientes:
f.write(exp.encode("UTF-8")+ "\n")
f.close()
if __name__ == "__main__":
sys.exit(main())
```
#### File: pyCrawler/10.contratacionE/test_update_bd.py
```python
import sys
import datetime
import traceback
import pymysql
def main():
conn = pymysql.connect(host='localhost', port=3306, user='pce', passwd='<PASSWORD>', db='pce')
cur = conn.cursor()
cur.execute('SELECT * FROM pce_cargas WHERE num_exp = 1')
resultado = cur.fetchall()
for row in resultado:
print("Year=%s, min=%s, numexp=%d" % (row[0], row[1],row[2]))
ini = datetime.date(int(row[0]), 1, 1).strftime("%d-%m-%Y")
fin = datetime.date(int(row[0]), 12, 31).strftime("%d-%m-%Y")
print(ini,fin)
sql_stm = "UPDATE pce_cargas set num_exp = %s WHERE year=%s and id_min=%s"
cur.execute(sql_stm, (2,row[0],row[1]))
conn.commit()
conn.close()
if __name__ == "__main__":
sys.exit(main())
``` |
{
"source": "jmartipu/CrearSolicitudCronCypress",
"score": 2
} |
#### File: jmartipu/CrearSolicitudCronCypress/SQSConnection.py
```python
import boto3
import botocore
import Settings
class SQSConnection:
session = boto3.Session(
aws_access_key_id=Settings.AWS_ACCESS_KEY_ID_SQS,
aws_secret_access_key=Settings.AWS_SECRET_ACCESS_KEY_SQS,
)
sqs = session.client('sqs', region_name=Settings.AWS_REGION_SQS)
queue_url = Settings.AWS_QUEUE_URL_IN
exists = True
message = ''
receipt_handle = ''
def __init__(self, queue_url):
self.queue_url = queue_url
def __enter__(self):
try:
self.session = boto3.Session(
aws_access_key_id=Settings.AWS_ACCESS_KEY_ID_SQS,
aws_secret_access_key=Settings.AWS_SECRET_ACCESS_KEY_SQS,
)
self.sqs = self.session.client('sqs', region_name=Settings.AWS_REGION_SQS)
except ConnectionError:
print("No se puede conectar a SQS")
except Exception as e:
print(e)
def receive(self):
try:
response = self.sqs.receive_message(
QueueUrl=self.queue_url,
AttributeNames=[
'ALL'
],
MaxNumberOfMessages=1,
MessageAttributeNames=[
'All'
],
VisibilityTimeout=20,
WaitTimeSeconds=2
)
if response is not None:
self.message = response['Messages'][0]
self.receipt_handle = self.message['ReceiptHandle']
except botocore.exceptions.ClientError as e:
# If a client error is thrown, then check that it was a 404 error.
# If it was a 404 error, then the bucket does not exist.
error_code = e.response['Error']['Code']
if error_code == '404':
self.exists = False
except Exception as e:
print(e)
def delete(self):
try:
print(self.receipt_handle)
self.sqs.delete_message(
QueueUrl=self.queue_url,
ReceiptHandle=self.receipt_handle
)
self.message = ''
self.receipt_handle = ''
except botocore.exceptions.ClientError as e:
# If a client error is thrown, then check that it was a 404 error.
# If it was a 404 error, then the bucket does not exist.
error_code = e.response['Error']['Code']
if error_code == '404':
self.exists = False
except Exception as e:
print('Error Cargando SQS')
def __exit__(self, exc_type, exc_val, exc_tb):
print("SQS Terminada exit")
def send(self, data):
try:
response = self.sqs.send_message(
QueueUrl=self.queue_url,
DelaySeconds=10,
MessageAttributes={
'Title': {
'DataType': 'String',
'StringValue': 'Titulo Prueba'
},
'Author': {
'DataType': 'String',
'StringValue': 'Prueba'
}
},
MessageBody=str(data)
)
if response is not None:
self.message = response['Messages'][0]
self.receipt_handle = self.message['ReceiptHandle']
except botocore.exceptions.ClientError as e:
# If a client error is thrown, then check that it was a 404 error.
# If it was a 404 error, then the bucket does not exist.
error_code = e.response['Error']['Code']
if error_code == '404':
self.exists = False
except Exception as e:
print(e)
``` |
{
"source": "jmartipu/CrearSolicitudDispatcher",
"score": 2
} |
#### File: jmartipu/CrearSolicitudDispatcher/Cron.py
```python
import logging
from time import sleep
import datetime
import Settings
from SQSConnection import SQSConnection
from threading import Thread
def dispatch(msg, tipo):
if tipo == 'Cypress':
sqs_connection_out = SQSConnection(Settings.AWS_QUEUE_URL_OUT_CYPRESS)
elif tipo == 'Puppeteer':
sqs_connection_out = SQSConnection(Settings.AWS_QUEUE_URL_OUT_CYPRESS)
elif tipo == 'ADB Monkey':
sqs_connection_out = SQSConnection(Settings.AWS_QUEUE_URL_OUT_ADB)
elif tipo == 'Calabash':
sqs_connection_out = SQSConnection(Settings.AWS_QUEUE_URL_OUT_CALABASH)
elif tipo == 'Mutode':
print('Mutode')
sqs_connection_out = SQSConnection(Settings.AWS_QUEUE_URL_OUT_MUTODE)
elif tipo == 'VRT':
sqs_connection_out = SQSConnection(Settings.AWS_QUEUE_URL_OUT_VRT)
elif tipo == 'Mdroid':
sqs_connection_out = SQSConnection(Settings.AWS_QUEUE_URL_OUT_MDROIDPLUS)
else:
sqs_connection_out = SQSConnection(Settings.AWS_QUEUE_URL_OUT_CYPRESS)
print('Despachando')
sqs_connection_out.send(msg)
print('Despachado')
def process():
try:
sqs_connection_in = SQSConnection(Settings.AWS_QUEUE_URL_IN)
with sqs_connection_in:
sqs_connection_in.receive()
if sqs_connection_in.message is not '':
message_attributes = sqs_connection_in.message.get('MessageAttributes')
message_tipo = message_attributes.get('NombreHerramienta').get('StringValue')
dispatch(sqs_connection_in.message, message_tipo)
sqs_connection_in.delete()
except Exception as e:
print(e)
if __name__ == '__main__':
while True:
Thread(target=process).start()
st = str(datetime.datetime.now())
print(st + ' : alive')
sleep(Settings.SLEEP_TIME)
``` |
{
"source": "jmartipu/Cron",
"score": 3
} |
#### File: jmartipu/Cron/DbConnection.py
```python
import psycopg2
import Settings
class DbConnection:
def __enter__(self):
try:
self.connection = psycopg2.connect(
host=Settings.HOST,
port=Settings.PORT,
database=Settings.DATABASE,
user=Settings.USER,
password=Settings.PASSWORD,
)
self.connection.autocommit = True
self.cursor = self.connection.cursor()
# print("Conectado")
except ConnectionError:
print("No se puede conectar a la base de datos")
except:
print("Error General")
def query(self, query):
results = []
try:
self.cursor.execute(query)
results = self.cursor.fetchall()
except:
print("Error en consulta")
return results
def update(self, query):
try:
self.cursor.execute(query)
except:
print("Error en actualizacion")
def __exit__(self, exc_type, exc_val, exc_tb):
self.connection.close()
# print("Conexion Terminada exit")
``` |
{
"source": "jmarucha/minecraft-client",
"score": 2
} |
#### File: minecraft-client/interface/index.py
```python
from mod_python import apache, util, Session
import socket
import tools
import cred
from hashlib import sha256
from random import randint
def index(req):
sess = Session.Session(req)
try:
if(sess['logged']!=1):
util.redirect(req, 'auth')
return
except KeyError:
util.redirect(req, 'auth')
return
req.content_type = 'text/html'
return tools.write_page()
def stop(req,username=""):
sess = Session.Session(req)
try:
if(sess['logged']!=1):
util.redirect(req, 'auth')
return
except KeyError:
util.redirect(req, 'auth')
return
req.content_type = 'text/html'
if (username!=""):
tools.command("stop "+username)
util.redirect(req, '.')
def create(req,username=""):
sess = Session.Session(req)
try:
if(sess['logged']!=1):
util.redirect(req, 'auth')
return
except KeyError:
util.redirect(req, 'auth')
return
req.content_type = 'text/html'
if (username!=""):
tools.command("create "+username)
util.redirect(req, '.')
def auth(req, username="", passwd=""):
sess = Session.Session(req)
sess.load()
attempt = False
## req.write(cred.dict['jan']+'\n')
## req.write(sha256(cred.dict['jan']+sess['salt']).hexdigest())
if not 'salt' in sess:
sess['salt'] = ("%0.4X" % randint(0,256*256-1))+("%0.4X" % randint(0,256*256-1))
sess.save()
elif not username in cred.dict:
sess['salt'] = ("%0.4X" % randint(0,256*256-1))+("%0.4X" % randint(0,256*256-1))
sess.save()
elif passwd==sha256((cred.dict[username]+sess['salt'])).hexdigest():
sess['logged']=1
sess.save()
util.redirect(req,'.')
return
else:
sess['salt'] = ("%0.4X" % randint(0,256*256-1))+("%0.4X" % randint(0,256*256-1))
sess.save()
if passwd:
attempt = True
return tools.login_page(sess['salt'], fail=attempt,perm_salt=cred.salt,username=username)
def logout(req):
sess = Session.Session(req)
sess['logged']=0
sess.save()
util.redirect(req,'.')
return
def style(req):
req.content_type = 'text/css'
return """
body {
background-color: #666;
font-family: Arial, sans-serif;
}
table {
border: 1px solid;
border-collapse: collapse;
background-color: #fff;
width: 6cm;
position: absolute; top: 50%; left:50%; transform: translate(-50%, -50%)
}
th {
height: 0.8cm;
font-weight: bold;
background-color: #CCC;
line-height: 0.8cm;
}
td {
border: 1px solid;
}
a {
display: block;
text-align: center;
font-family: Arial,sans-serif;
text-decoration: none;
font-weight: bold;
color: black;
height: 100%;
width: 100%;
line-height: 0.8cm;
}
td.norm {
padding-left: 0.15cm;
height: 0.8cm;
padding-right: 0.15cm;
}
td.add {
background-color: #AFA;
height: 0.8cm;
width: 0.8cm;
}
td.rem {
background-color: #FAA;
height: 0.8cm;
width: 0.8cm;
}
"""
``` |
{
"source": "jmarunix/dwx-zeromq-connector",
"score": 2
} |
#### File: template/strategies/rates_subscriptions.py
```python
import sys
sys.path.append('../../..')
# Import ZMQ-Strategy from relative path
from examples.template.strategies.base.DWX_ZMQ_Strategy import DWX_ZMQ_Strategy
#############################################################################
# Other required imports
#############################################################################
import os
from pandas import Timedelta, to_datetime
from threading import Thread, Lock
from time import sleep
import random
#############################################################################
# Class derived from DWZ_ZMQ_Strategy includes data processor for PULL,SUB data
#############################################################################
class rates_subscriptions(DWX_ZMQ_Strategy):
def __init__(self,
_name="PRICES_SUBSCRIPTIONS",
_instruments=[('EURUSD_M1', 'EURUSD', 1),('GDAXI_M5', 'GDAXI', 5)],
_delay=0.1,
_broker_gmt=3,
_verbose=False):
# call DWX_ZMQ_Strategy constructor and passes itself as data processor for handling
# received data on PULL and SUB ports
super().__init__(_name,
[], # Empty symbol list (not needed for this example)
_broker_gmt,
[self], # Registers itself as handler of pull data via self.onPullData()
[self], # Registers itself as handler of sub data via self.onSubData()
_verbose)
# This strategy's variables
self._instruments = _instruments
self._delay = _delay
self._verbose = _verbose
self._finished = False
# Initializes counters of number of rates received from each instrument
self._eurusd_cnt = 0
self._gdaxi_cnt = 0
# lock for acquire/release of ZeroMQ connector
self._lock = Lock()
##########################################################################
def isFinished(self):
""" Check if execution finished"""
return self._finished
##########################################################################
def onPullData(self, data):
"""
Callback to process new data received through the PULL port
"""
# print responses to request commands
print('\rResponse from ExpertAdvisor={}'.format(data), end='', flush=True)
##########################################################################
def onSubData(self, data):
"""
Callback to process new data received through the SUB port
"""
# split msg to get topic and message
_topic, _msg = data.split(" ")
print('\rData on Topic={} with Message={}'.format(_topic, _msg), end='', flush=True)
# increment counters
if _topic == 'EURUSD_M1':
self._eurusd_cnt += 1
if _topic == 'GDAXI_M5':
self._gdaxi_cnt += 1
# check if received at least 5 prices from EURUSD to cancel its feed
if self._eurusd_cnt >= 5:
# updates the instrument list and request the update to the Expert Advisor
self._instruments=[('GDAXI_M5', 'GDAXI', 5)]
self.__subscribe_to_rate_feeds()
# resets counters
self._eurusd_cnt = 0
# check if received at least 3 rates from GDAXI
if self._gdaxi_cnt >= 3:
# finishes (removes all subscriptions)
self.stop()
#prints dictionary
print(self._zmq._Market_Data_DB)
##########################################################################
def run(self):
"""
Starts price subscriptions
"""
self._finished = False
# Subscribe to all symbols in self._symbols to receive bid,ask prices
self.__subscribe_to_rate_feeds()
##########################################################################
def stop(self):
"""
unsubscribe from all market symbols and exits
"""
# remove subscriptions and stop symbols price feeding
try:
# Acquire lock
self._lock.acquire()
self._zmq._DWX_MTX_UNSUBSCRIBE_ALL_MARKETDATA_REQUESTS_()
print('\rUnsubscribing from all topics', end='', flush=True)
finally:
# Release lock
self._lock.release()
sleep(self._delay)
try:
# Acquire lock
self._lock.acquire()
self._zmq._DWX_MTX_SEND_TRACKPRICES_REQUEST_([])
print('\rRemoving symbols list', end='', flush=True)
sleep(self._delay)
self._zmq._DWX_MTX_SEND_TRACKRATES_REQUEST_([])
print('\rRemoving instruments list', end='', flush=True)
finally:
# Release lock
self._lock.release()
sleep(self._delay)
self._finished = True
##########################################################################
def __subscribe_to_rate_feeds(self):
"""
Starts the subscription to the self._instruments list setup during construction.
1) Setup symbols in Expert Advisor through self._zmq._DWX_MTX_SUBSCRIBE_MARKETDATA_
2) Starts price feeding through self._zmq._DWX_MTX_SEND_TRACKRATES_REQUEST_
"""
if len(self._instruments) > 0:
# subscribe to all instruments' rate feeds
for _instrument in self._instruments:
try:
# Acquire lock
self._lock.acquire()
self._zmq._DWX_MTX_SUBSCRIBE_MARKETDATA_(_instrument[0], _string_delimiter=';')
print('\rSubscribed to {} rate feed'.format(_instrument), end='', flush=True)
finally:
# Release lock
self._lock.release()
sleep(self._delay)
# configure instruments to receive price feeds
try:
# Acquire lock
self._lock.acquire()
self._zmq._DWX_MTX_SEND_TRACKRATES_REQUEST_(self._instruments)
print('\rConfiguring rate feed for {} instruments'.format(len(self._instruments)), end='', flush=True)
finally:
# Release lock
self._lock.release()
sleep(self._delay)
""" -----------------------------------------------------------------------------------------------
-----------------------------------------------------------------------------------------------
SCRIPT SETUP
-----------------------------------------------------------------------------------------------
-----------------------------------------------------------------------------------------------
"""
if __name__ == "__main__":
# creates object with a predefined configuration: intrument list including EURUSD_M1 and GDAXI_M5
print('\rLoading example...', end='', flush=True)
example = rates_subscriptions()
# Starts example execution
print('\Running example...', end='', flush=True)
example.run()
# Waits example termination
print('\rWaiting example termination...', end='', flush=True)
while not example.isFinished():
sleep(1)
print('\rBye!!!', end='', flush=True)
``` |
{
"source": "jmaslak/voice-monitor",
"score": 3
} |
#### File: jmaslak/voice-monitor/pitch.py
```python
import aubio
import numpy
import os
import pyaudio
import shutil
import sys
from colors import color
from datetime import datetime
MIDRANGE_LOW = 160
MIDRANGE_HIGH = 180
SAMPLES = 1024
def main():
stream = get_audio_stream()
pDetection = aubio.pitch("default", 2048*2, SAMPLES, 44100)
pDetection.set_unit("Hz")
pDetection.set_silence(-50)
print("READY")
while True:
data = stream.read(SAMPLES)
samples = numpy.frombuffer(data, dtype=aubio.float_type)
pitch = pDetection(samples)[0]
# Compute the energy (volume) of the
# current frame.
volume = numpy.sum(samples**2)/len(samples)
# Floor for volume
if volume > 0.0005:
# Celing for pitch
if pitch > 70.0 and pitch < 500.0:
print_sample(volume, pitch)
def print_sample(volume, pitch):
"""Prints a sample"""
# Get the current date, chop off last 3 digits of microseconds to
# get milliseconds
dt = datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
pstr = "{:5.0f}".format(pitch)
outstr = dt + " " + pstr + "hz " # 33 chars wide
outstrwidth = len(outstr)
# Colorize
if pitch < MIDRANGE_LOW:
outstr = color(outstr, fg="black", bg="red")
elif pitch <= MIDRANGE_HIGH:
outstr = color(outstr, fg="black", bg="yellow")
else:
outstr = color(outstr, fg="green", bg="black")
outstr = outstr + color("", fg="gray", bg="black")
width = get_width()
if width > (outstrwidth + 5):
bar_space = width - (outstrwidth + 1)
min_bar = 110
max_bar = 260
outbar = []
for i in range(bar_space):
outbar.append(" ")
pos = find_position(pitch, min_bar, max_bar, bar_space)
outbar[pos] = "*"
pos = find_position(MIDRANGE_LOW, min_bar, max_bar, bar_space)
outbar[pos] = color(outbar[pos], bg=52) + color("", bg="black")
pos = find_position(MIDRANGE_HIGH, min_bar, max_bar, bar_space)
outbar[pos] = color(outbar[pos], bg=22) + color("", bg="black")
outstr = outstr + "".join(outbar)
print(outstr)
def find_position(value, min_bar, max_bar, size):
range_bar = max_bar - min_bar
pos = int(((value - min_bar) / range_bar) * size) - 1
if pos >= size:
pos = size - 1
if pos < 0:
pos = 0
return pos
def get_audio_stream():
"""Initialize the audio stream"""
# We want to ignore stderr.
devnull = os.open(os.devnull, os.O_WRONLY)
old_stderr = os.dup(2)
sys.stderr.flush()
os.dup2(devnull, 2)
os.close(devnull)
audio = pyaudio.PyAudio()
# XXX We probably should check for errors.
# Restore stderr
os.dup2(old_stderr, 2)
os.close(old_stderr)
stream = audio.open(
format=pyaudio.paFloat32,
channels=1,
rate=44100,
input=True,
frames_per_buffer=1024,
)
return stream
def get_width():
"""Gets the screen width"""
if sys.stdout.isatty():
return shutil.get_terminal_size((80, 25)).columns
else:
return 80
if __name__ == '__main__':
main()
``` |
{
"source": "jmaslanka/flask-pdf-url",
"score": 2
} |
#### File: flask-pdf-url/src/utils.py
```python
import os
import uuid
from werkzeug import FileStorage
import boto3
import pdfkit
S3_BUCKET = os.environ.get('S3_BUCKET')
S3_BUCKET_REGION = os.environ.get('S3_BUCKET_REGION')
IS_OFFLINE = os.environ.get('IS_OFFLINE')
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
WKHTMLTOPDF_PATH = os.path.join(BASE_DIR, 'binaries', 'wkhtmltopdf')
s3 = boto3.client('s3')
def convert_html_to_pdf(url: str = '', file: FileStorage = None) -> bytes:
config = pdfkit.configuration(
wkhtmltopdf='' if IS_OFFLINE else WKHTMLTOPDF_PATH
)
if url:
return pdfkit.from_url(url, False, configuration=config)
elif file:
return pdfkit.from_string(file.read().decode('utf-8'), False, configuration=config)
raise ValueError
def s3_save_file(file: bytes) -> str:
filename = f'pdfs/{uuid.uuid4().hex}.pdf'
s3.put_object(
ACL='public-read', Bucket=S3_BUCKET,
Body=file, ContentType='application/pdf', Key=filename,
)
return f'https://{S3_BUCKET}.s3.{S3_BUCKET_REGION}.amazonaws.com/{filename}'
``` |
{
"source": "jmaslanka/Python-practice",
"score": 4
} |
#### File: jmaslanka/Python-practice/decorators.py
```python
from functools import wraps
import time
def timer(func):
"""
Calculates time that given function take to execute.
"""
@wraps(func)
def wrapper(*args, **kwargs):
start = time.clock()
result = func(*args, **kwargs)
elapsed = time.clock() - start
print 'Function {} with arguments: {}, {} took {:.6f}s.'.format(
func.__name__, args, kwargs, elapsed
)
return result
return wrapper
def tracer(func):
"""
Keeps track of number of calls on given function.
"""
calls = [0]
@wraps(func)
def wrapper(*args, **kwargs):
calls[0] += 1
print 'Execution of function {} number {}'.format(
func.__name__, calls[0]
)
return func(*args, **kwargs)
return wrapper
def log(func):
"""
Adds information about called function to log file.
"""
name = 'files/logs/func_{}_log.txt'.format(func.__name__)
@wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
date = time.strftime('%d.%m.%Y %H:%M:%S')
with open(name, 'a') as f:
f.write(
'Time: {} Arguments: {}.\n'.format(date, (args, kwargs))
)
return result
return wrapper
```
#### File: jmaslanka/Python-practice/functions.py
```python
import datetime
def func_abs():
"""
abs(number) function returns the absolute value of a number.
Examples: abs(-4), abs(-99.9), abs(980)
"""
print '-4 = {}'.format(abs(-4))
print '-66.5 = {}'.format(abs(-66.5))
print '521 = {}'.format(abs(521))
def func_all():
"""
all(iterable) return True if all elements are true or if empty.
Examples: all([1, 2, 0, 8]), all(('Hi', 'Hey', '')), all([])
"""
print '[] = {}'.format(all([]))
print '[1, 4, 8, 12, 0, 11] = {}'.format(all([1, 4, 8, 12, 0, 11]))
print '[4, 2, 1] = {}'.format(all([4, 2, 1]))
print '[4, "Hello", 1, ""] = {}'.format(all([4, "Hello", 1, ""]))
def func_any():
"""
any(iterable) return True if any element is true, False if empty.
Examples: any(('', 'Hi', '')), any([0, False, True]), any([])
"""
print '[] = {}'.format(any([]))
print '["", "", False, 0] = {}'.format(any(["", "", False, 0]))
print '("Hi", False, 0) = {}'.format(any(("Hi", "", False, 0)))
def func_basestring():
"""
basestring() cannot be called, can used to test if an object is
an instance of 'str' or 'unicode'.
Examples: isinstance(u'unicode string', basestring)
"""
print '"I\'m string!" = {}'.format(isinstance("I'm string!", basestring))
print 'u"Unicode" = {}'.format(isinstance(u"Unicode", basestring))
def func_bin():
"""
bin(number) convert an integer to binary string, number can be other
object but has to have __index__ method that returns an integer.
Examples: bin(521), bin(128), bin(255)
"""
print '333 = {}'.format(bin(333))
print '127 = {}'.format(bin(127))
class Number:
def __init__(self, value):
self.value = value
def __index__(self):
return self.value ** 2
print 'Number(4) (__index__ return power) = {}'.format(bin(Number(4)))
def func_bool():
"""
bool(object) if object is false or omited return False, else True.
Example: bool("Hello"), bool(True), bool(0), bool({})
"""
print '"Hello!" = {}'.format(bool('Hello!'))
print 'True = {}'.format(bool(True))
print '[] = {}'.format(bool({}))
def func_bytearray():
"""
bytearray(object) return a new array of bytes. If object is an
integer it will initialize array with null bytes and int's range.
Examples: bytearray("AbC"), bytearray(3), bytearray(u"unicode")
"""
print '"AbC" = {}'.format(bytearray('AbC'))
print '"AbC" -> list() = {}'.format(list(bytearray('AbC')))
print '5 -> repr() = {}'.format(repr(bytearray(5)))
print '[65, 33, 36, 112] = {}'.format(bytearray([65, 33, 36, 112]))
def func_callable():
"""
callable(object) return True if object can be called - by using ().
Examples: callable(function), callable(My_Class)
"""
print 'abs = {}'.format(callable(abs))
print 'int = {}'.format(callable(int))
print '"string" = {}'.format(callable('string'))
class My_Class:
pass
print 'My_Class = {} - {}'.format(
callable(My_Class), 'Class can be called to create instance'
)
print 'My_Class\' instance (empty class) = {} - {}'.format(
callable(My_Class()), 'Instance must have __call__() method'
)
def func_chr():
"""
chr(integer) return string of one character from ASCII table
corresponding to given integer, integer must be in range [0..255].
Examples: chr(97), chr(250), chr(65)
"""
print '97 = {}'.format(chr(97))
print '250 = {}'.format(chr(250))
print '300 = '
try:
chr(300)
except ValueError as e:
print 'Value Error:', e.message
def func_classmethod():
"""
classmethod(function) return a class method for function.
Examples: add = classmethod(add), or @classmethod
"""
class Class:
def a(self):
print self
@classmethod
def b(cls):
print cls
print 'Class\' normal method = {}'.format(classmethod(Class.a))
def func_cmp():
"""
cmp(x, y) return negative value if x < y,
0 if x == y and positive if x > y.
Examples: cmp(-5, 6), cmp(10, 10), cmp("aBC", "Abc")
"""
print '-10, 20 = {}'.format(cmp(-10, 20))
print '35, 35 = {}'.format(cmp(35, 35))
print '"aBC", "Abcde" = {}'.format(cmp("aBC", "Abcde"))
def func_compile():
"""
compile(source, filename, mode) compile source into code or AST object.
If source wasn't from file pass recognizable value (eg. '<string>')
Mode specifies what kind of code must be compiles - 'exec' if source
consists of many statements, 'eval' if it's single expression or
'single' if it's single interactive statement.
Examples: compile("max([6, 1, 9])", "<string>", "eval")
"""
print '1 + 6 single = '
eval(compile("1 + 6", '<string>', 'single')) # single prints result
print 'abs(-66.6) eval = {}'.format(
eval(compile('abs(-66.6)', '<string>', 'eval')) # eval returns result
)
print 'max([2, -6, 10]) and cmp(10, 10) exec = '
eval(compile(
'print max([2, -6, 10])\nprint cmp(10, 10)', '<string>', 'exec'
)) # exec neither return nor print result
def func_complex():
"""
complex(real, imag) return complex number with value real + imag*1j
or convert a string or number to complex number.
Examples: complex(4), complex(-1, -4), complex("1+3j")
"""
print '4 = {}'.format(complex(4))
print '"5+3j" = {}'.format(complex('5+3j'))
print '-6, -3 = {}'.format(complex(-6, -3))
def func_delattr():
"""
delattr(object, name) delete named attribute of object (if allowed).
Examples: delattr(my_person, "name"), delattr(my_number, "value")
"""
class Car:
def __init__(self, value):
self.wheels = value
my_car = Car(4)
print 'my_car.wheels before = {}'.format(my_car.wheels)
delattr(my_car, 'wheels')
print 'my_car.wheels after: '
try:
print my_car.wheels
except AttributeError as e:
print 'AttributeError:', e.message
def func_dict():
"""
dict(**kwarg), dict(mapping, **kwarg), dict(iterable, **kwarg)
create a new dictionary.
"""
print 'one = 1, two = 2, three = 3 = {}'.format(
dict(one=1, two=2, three=3)
)
print 'zip(["four", "five", "six"], [4, 5, 6]) = {}'.format(
dict(zip(["four", "five", "six"], [4, 5, 6]))
)
print '[("seven", 7), ("eigth", 8), ("nine", 9)] = {}'.format(
dict([("seven", 7), ("eigth", 8), ("nine", 9)])
)
def func_dir():
"""
dir(object) or dir() return a list of valid attributes for object or
list of names in the current local scope if no arguments.
Example: dir(), dir(list), dir(6), dir(Class)
"""
local_string = "Hey!"
local_number = 55
print 'dir() = {}'.format(dir())
print 'dir(list) = {}'.format(dir(list)[-9:])
def func_divmod():
"""
divmod(a, b) return quotient and reminder from long division.
Examples: divmod(10, 3), divmod(7.5, 1.5)
"""
print '13, 4 = {}'.format(divmod(13, 4))
print '15.5, 1.25 = {}'.format(divmod(15.5, 1.25))
def func_enumerate():
"""
enumerate(sequence, start=0) return an enumerate object.
Examples: enumerate(('zero', 'one')), enumerate(['five', 'six'], start=5)
"""
print 'zero, one, two = {}'.format(list(enumerate(['zero', 'one', 'two'])))
print 'six, seven, start=6 = {}'.format(
list(enumerate(('six', 'seven', 'eight'), 6))
)
def func_eval():
"""
eval(expression[, globals[, locals]]) return result of expression.
Example: eval('8/2'), eval('min(my_list)')
"""
number = 4
print '8/2 = {}'.format(eval('8/2'))
print '16/number (local) = {}'.format(eval('16/number'))
def func_execfile():
"""
execfile(filename[, globals[, locals]]) parses file and evaluate it
as a sequence of Python statements, locals can be mapping object.
Example: execfile('my_file.txt')
"""
with open('test_file.txt', 'w') as f:
f.write('x = int(raw_input("Enter number: "))\nprint x ** 2')
execfile('test_file.txt')
def func_file():
"""
file(name[, mode[, buffering]]) constructs file object, use open()
instead and this in isinstance function.
Examples: isinstance(my_file, file), file('test.txt', 'r')
"""
with file('test.txt', 'w+') as f:
f.write('Hi!')
print 'isinstance(f, file) = {}'.format(isinstance(f, file))
def func_filter():
"""
filter(function, iterable) return list of those elements of iterable
for which function returns true, if iterable is string or tuple
returns also that type, if function is None filter by 'is True'.
Examples: filter(is_int, [1, 'yes', '5'), filter(None, ['', 2, 0, 'Hi'])
"""
print 'x > 5, [1, 4, 9] = {}'.format(filter(lambda x: x > 5, [1, 4, 9]))
print 'None, [-8, "", 5, ()] = {}'.format(filter(None, [-8, "", 5, ()]))
print 'x > 96, "ThDVi|TFtQh8?e33Fre" = {}'.format(
filter(lambda x: ord(x) > 96, "ThDVi|TFtQh8?e33Fre")
)
def func_float():
"""
float(number) return floating point number from number or string.
Examples: isinstance(0.1, float), float("2.44", float(-5)
"""
print 'None = {}'.format(float())
print '"7.44" = {}'.format(float("7.44"))
print '5.5 isinstance = {}'.format(isinstance(5.5, float))
def func_format():
"""
format(value[, format_spec]) covert value to formated representation.
Examples: 'Hi {}!'.format(name)
"""
print 'Complex {0} = {0.real} and {0.imag}'.format(complex(1, -5))
print 'List = [{0[0]}, {0[1]}, {0[2]}]'.format([6, 4, 2])
print '<-{:>30}->'.format('right align')
print '<-{:*^45}->'.format('center with asterisks')
print 'int: {0:d} bin: {0:b}, oct: {0:o}, hex: {0:x}, chr: {0:c}'.format(
0b1100101
)
print 'Division: {:.1%}'.format(87.0/97)
print '{:%d-%m-%Y %H:%M:%S}'.format(datetime.datetime.now())
def func_frozenset():
"""
frozenset(iterable) return frozenset object with elements from iterable.
Examples: frozenset(set(1, 4, 9)), frozenset([9, 11, 22])
"""
print '[1, 4, 9] = {}'.format(frozenset([1, 4, 9]))
print '[1, 4, 9] -> list = {}'.format(list(frozenset([1, 4, 9])))
def func_getattr():
"""
getattr(object, name[, default]) return value of given attribute,
if name isnt attribute default is returned, if default isn't
provided AttributeError will be raised.
Examples: getattr(car, wheels, 4), getattr(person, legs, 2)
"""
class Computer:
def __init__(self, ram=2, storage=500):
self.RAM = ram
self.storage = storage
print 'computer, RAM, "No ram" = {}'.format(
getattr(Computer(), 'RAM', 'No RAM')
)
print 'computer, CPU, "No CPU" = {}'.format(
getattr(Computer(), 'CPU', 'No CPU')
)
def func_globals():
"""
globals() return dictionary with current global symbol table.
"""
glob = globals()
print 'globals = Length {}, {:.100}...}}'.format(len(glob), glob)
def func_hasattr():
"""
hasattr(object, name) return True if name is an attribute, False if not.
Examples: hasattr(car, 'legs'), hasattr(computer, 'GPU')
"""
class Computer:
def __init__(self):
self.CPU = True
self.RAM = True
print 'computer, cpu = {}'.format(hasattr(Computer(), 'CPU'))
print 'computer, leg = {}'.format(hasattr(Computer(), 'leg'))
def func_hash():
"""
hash(object) return hash value of the object if it has one.
Examples: hash(dict(([1, 'a'], [2, 'b']))[1])
"""
my_dict = {1: "a", 2: "b", 5: "c"}
print '{}[1] = {} '.format(
my_dict, hash(my_dict[1])
)
print '{}[5] = {} '.format(
my_dict, hash(my_dict[5])
)
def func_help():
"""
help([object]) invoke the built-in help system.
Examples: help(), help(str)
"""
pass
def func_hex():
"""
hex(x) convert an integer to lowercase hex string with '0x' prefix
if x is other object it has to define __hex__() that returns string.
Examples: hex(-30), hex(9821)
"""
print '-30 = {}'.format(hex(-30))
print '1204 = {}'.format(hex(1204))
def func_id():
"""
id(object) return 'identity' of an object - unique integer for object.
Examples: id(my_car), id(variable)
"""
x, y, z = 5, 6, 'string'
print '5, 6, "string" = {}, {}, {}'.format(id(x), id(y), id(z))
def func_input():
"""
input([prompt]) takes input from user, doesn't catch user errors
preferred to use raw_input.
"""
x = input('Enter anything: ')
print x
def func_int():
"""
int(x=0)/int(x, base=10) return integer converted from x, if base is
given, x must be string in given base.
Examples: int(22.22), int('-11'), int('0723', 8), int('101101', 2)
"""
print '88.11 = {}'.format(int(88.11))
print '"-132" = {}'.format(int('-132'))
print '"10110101", 2 = {}'.format(int('10110101', 2))
print '"A5B9F", 16 = {}'.format(int('A5B9F', 16))
def func_isinstance():
"""
isinstance(object, classinfo) return True if object is an instance of
classinfo argument or any subclass of that class.
Examples: isinstance(str, basestring), isinstance(my_car, Car)
"""
print 'string, bytestring = {}'.format(isinstance(str, basestring))
print 'int, float = {}'.format(isinstance(int, float))
class Number:
pass
class Complex(Number):
pass
print 'Complex(), Number = {}'.format(isinstance(Complex(), Number))
def func_issubclass():
"""
issubclass(class, classinfo) return True is class if subclass of
classinfo, class is considered a subclass of itself.
Examples: issubclass(BMW, Cars), issubclass(Cat, Animal)
"""
class Animal:
pass
class Donkey(Animal):
pass
class Audi:
pass
print 'Audi, (Animal, Donkey) = {}'.format(
issubclass(Audi, (Animal, Donkey))
)
print 'Donkey, Animal = {}'.format(issubclass(Donkey, Animal))
print 'Animal, Animal = {}'.format(issubclass(Animal, Animal))
def func_iter():
"""
iter(o[, sentinel]) return iterator object from o. without sentinel
o must ba collection object with __iter__() method or support
__getitem__() method with integer args starting at 0.
If sentinel is given o must be callable object.
"""
def f():
f.count += 1
return f.count
f.count = 0
print 'counter(), 8 = {}'.format(list(iter(f, 8)))
print '[1, 5, 6] = {}'.format(str(iter([1, 5, 6])))
def func_len():
"""
len(object) return length (number of items) of an object.
Examples: len("string), len([1, 2, '', 4])
"""
print '"Happy_string" = {}'.format(len('Happy_string.'))
print '{{1: True, 2: False, 3: False}} = {}'.format(
len({1: True, 2: False, 3: False})
)
def func_list():
"""
list([iterable]) return list of given iterable or empty list.
Examples: list('abc'), list(['a', 'b', 'c'])
"""
print '(1, 4, 9) = {}'.format(list((1, 4, 9)))
print '"string" = {}'.format(list('string'))
print '{{1: True, 2: False}} = {}'.format(list({1: True, 2: False}))
def func_locals():
"""
locals() return a dictionary with the current local symbol table.
"""
name = 'Local variable'
print 'locals() = {}'.format(locals())
def func_long():
"""
long(x=0, base=10) return long integer from string or number x.
Works almost the same as int() function.
Examples: long(11.11), long('345123')
"""
print '114.55 = {}'.format(long(114.55))
print '"98123398217439812783901" = {}'.format(
long('98123398217439812783901')
)
def func_map():
"""
map(function, iterable, ...) apply function to every item of
iterable, if many iterables are provided, function must take that many
arguments and is applied to items from all iterables in parallel.
Examples: map(int, "123"), map(min, (1, 2, 3), (3, -1, 2))
"""
print 'int, "345" = {}'.format(map(int, '345'))
print 'max, (5, 9, 0), (9, 6, 2) = {}'.format(
map(max, (5, 9, 0), (9, 6, 2))
)
print 'lambda x, y: x == y, [3, 4, 7], [9, 4, 6] = {}'.format(
map(lambda x, y: x == y, [3, 4, 7], [9, 4, 8])
)
def func_max():
"""
max(iterable[, key]) or max(x, y, *args[, key]) return the largest
item in an iterable or largest of given arguments.
Examples: max([1, 0, 4), max({3: 9, 7: 2}, key=lambda x: x[1])
"""
print '[92, 11, 33] = {}'.format(max([92, 11, 33]))
print '(10, "5"), (5, "10") = {}'.format(max((10, '5'), (5, '10')))
print '(10, "5"), (5, "10"), key=lambda x: int(x[1]) = {}'.format(
max((10, '5'), (5, '10'), key=lambda x: int(x[1]))
)
def func_min():
"""
min(iterable[, key]) or min(x, y, *args[, key]) return the smallest
item in an iterable or smallest of given arguments.
Examples: min([1, 0, 4), min({3: 9, 7: 2}, key=lambda x: x[1])
"""
print '[92, 11, 33] = {}'.format(min([92, 11, 33]))
print '(10, "5"), (5, "10") = {}'.format(min((10, '5'), (5, '10')))
print '(10, "5"), (5, "10"), key=lambda x: int(x[1]) = {}'.format(
min((10, '5'), (5, '10'), key=lambda x: int(x[1]))
)
def func_next():
"""
next(iterator[, default]) retrive next item from iterator, default
arg will be returned if iterator is exhausted, otherwise StopIteration.
Examples: x = (x**2 for x in xrange(5)) next(x, 'end')
"""
def power(x, start=1):
while True:
start = start * x
yield start
x = power(2)
y = iter([True, True])
print 'x = power(2) = {}, {}, {}...'.format(next(x), next(x), next(x))
print 'y = iter([True, True]) = {}, {}, {}'.format(
next(y), next(y), next(y, 'StopIteration Exception')
)
def func_oct():
"""
oct(x) return integer converted to an octal string.
Examples: oct(10), oct(20), oct(8)
"""
print '8 = {}'.format(oct(8))
print '551 = {}'.format(oct(551))
def func_open():
"""
open(name[, mode[, buffering]]) return opened file object with name arg
as file name, and mode string stating what mode to use r/w/a/b/r+/w+/a+
buffering specifies buffer size 0 - unbuffered, 1 - line, n - n bytes.
Examples: open('log.txt', 'w'), open('data.txt', 'r', 1024)
"""
pass
def func_ord():
"""
ord(c) takes string of length one and return integer representing
the Unicode code point or value of byte when c is an 8-bit string.
Examples: ord('Z'), ord(u'\u2020')
"""
print 'A and z = {} and {}'.format(ord('A'), ord('z'))
print "u'\u3030' = {}".format(ord(u'\u3030'))
def func_pow():
"""
pow(x, y[, z]) return x to the power y, if z is present return
x to the power y, modulo z.
Examples: pow(2, 5), pow(3, 3, 5)
"""
print '4, 3 = {}'.format(pow(4, 3))
print '7, 2, 5 = {}'.format(pow(7, 2, 5))
def func_print():
"""
print (*obj, sep='', end='\n', file=sys.stdout)
prints obj to the stream file, separated by sep and followed by end.
to use in Python 2.x use from __future__ import print_function.
Examplex: print(2, 3, 4, sep='-', end=' END')
"""
print 'print(2, 3, 4, sep=\'-\', end=\' END\') = 2-3-4 END'
def func_property():
"""
property([get[, set[, del[, doc]]]]) return property attr for
new-style classes. Arguments are functions, doc is string.
"""
class Car(object):
def __init__(self):
self.wheels = 4
self.color = 'red'
def getwheels(self):
return self.wheels
def setwheels(self, value):
self.wheels = value
def delwheels(self):
del self.wheels
wheels = property(getwheels, setwheels, delwheels, 'Wheels')
# Can use @property decorator as well
@property
def color(self):
return self.color
@color.setter
def color(self, color):
self.color = color
@color.deleter
def color(self):
del self.color
def func_range():
"""
range(start, stop, step=1) return list containing integers from
start to stop with given step between.
Examples: range(1, 5), range(1, 8, 2), range(2, -8, -2)
"""
print '5, 14, 2 = {}'.format(range(5, 14, 2))
print '3, -8, -2 = {}'.format(range(3, -8, -2))
def func_raw_input():
"""
raw_input([prompt]) reads line from input and returns it as a string,
if prompt is given it is written before reading line.
Examples: raw_input('give number: '), raw_input('--> ')
"""
x = raw_input('Enter anything: ')
print 'You entered: {}'.format(x)
def func_reduce():
"""
reduce(func, iterable[, initializer]) apply function of two arguments
cumulatively to the items of iterable from left to right and return
a single (last) value. Initializer is given it is places before
the items of the iterable.
Examples: reduce(pow, [2, 3, 2, 1])
"""
print 'pow, [2, 2, 3, 2] = {}'.format(reduce(pow, [2, 2, 3, 2]))
print 'lambda x, y: x/y, [5.0, 7, 10, 1] = {}'.format(
reduce(lambda x, y: x/y, [5.0, 7, 10, 1])
)
def func_reload():
"""
reload(module) reload a previously imported module and return
that reloaded module object.
Example: reload(functions)
"""
pass
def func_repr():
"""
repr(object) return a string containing printable representation of
an object. Classes can use __repr__().
Examples: repr(Class_object), repr(variable)
"""
class Car:
def __repr__(self):
return 'An empty car!'
print 'datetime (module) = {}'.format(repr(datetime))
print 'Car instance = {}'.format(repr(Car()))
def func_reversed():
"""
reversed(seq) return a reverse iterator. seq must be an object which
has a __reversed__() method or support the sequence protocol -
(__len__() and __getitem__() with args starting at 0).
Examples: reversed([1, 2, 3, 4, 5]), reversed('straight')
"""
print '[4, 5, 6, 1, 3] = {}'.format(list(reversed([4, 5, 6, 1, 3])))
print '\'My string!\' = {}'.format(''.join(reversed('My string!')))
def func_round():
"""
round(number[. ndigits]) return the float value rounded to ndigits
after decimal point (default is 0).
Examples: round(0.55), round(-1.45675234, 3)
"""
print '44.5 = {}'.format(round(44.5))
print '-0.481074, 4 = {}'.format(round(-0.481074, 4))
print '-0.5 = {}'.format(round(-0.5))
def func_set():
"""
set([iter]) return a new set object, optionally built from iter items.
Examples: set([1, 5, 7]), set(), set('string')
"""
print 'empty = {}'.format(set())
print '[5, 9 ,22] = {}'.format(set([5, 9, 22]))
print '\'Hello World!\' = {}'.format(set('Hello World!'))
def func_setattr():
"""
setattr(object, name, value) assign value to given object's attribute
provided object allows it. Same as: object.name = value.
Example: setattr(my_car, color, 'red')
"""
pass
def func_slice():
"""
slice(start, stop[, step]) or slice(stop) return a slice object
representing set of indices specified by range(start, stop, step).
Example: slice(10, -50, -5)
"""
print '10, -12, -3 = {}'.format(slice(10, -12, -3))
def func_sorted():
"""
sorted(iterable[, cmp[, key[, reverse]]]) return new sorted list
from iterable. cmp specifies comparison function, key specifies a
one arg function that modify each list element before comparison.
Examples: sorted([3, 1, 7], sorted('hElLO', key=str.lower, reverse=True)
"""
print '"hElLO", key=str.lower = {}'.format(sorted('hElLO', key=str.lower))
print '[3, 0, 5] reverse=True = {}'.format(sorted([3, 0, 5], reverse=True))
def func_staticmethod():
"""
staticmethod(function) return a static method for function
Can use @staticmethod decorator as well.
Example: function = staticmethod(function)
"""
pass
def func_str():
"""
str(object='') return string containing printable representation of
an object, the goal is to return a printable string.
Examples: str(my_car), str(method)
"""
print 'datetime.datetime = {}'.format(str(datetime.datetime))
print '-55.4 = {}'.format(str(-55.4))
def func_sum():
"""
sum(iterable[, start]) return sum of all items in iterable and start.
Examples: sum([4, 67, -11]), sum((1, 3, 9), 10)
"""
print '[0, -20, 5] = {}'.format(sum([0, -20, 5]))
print '(1, 6, 2), 10 = {}'.format(sum((1, 6, 2), 10))
def func_super():
"""
super(type[, object-or-type]) return proxy object that
delegates method calls to a parent or sibling class of type.
Used for accessing overridden inherited methods.
Example: super(Mercedes, self).method()
"""
class A(object):
@staticmethod
def method():
return 'Parent of Car class.'
class Car(A):
def __init__(self):
self.wheels = 4
@staticmethod
def method():
return 'Car class.'
class Audi(Car):
def __init__(self):
super(Audi, self).__init__()
self.color = 'red'
@staticmethod
def method():
return 'Audi class.'
print 'my_audi.method() = {}'.format(Audi().method())
print 'super(Audi, my_audi).method() = {}'.format(
super(Audi, Audi()).method()
)
print 'super(Car, my_audi).method() = {}'.format(
super(Car, Audi()).method()
)
def func_tuple():
"""
tuple([iterable]) return a tuple from iterable if given, empty otherwise
Examples: tuple(), tuple('string'), tuple([1, 3, 5])
"""
print '\'Hello World!\' = {}'.format(tuple('Hello World!'))
print '[0, True, 5, -9, 13] = {}'.format(tuple([0, True, 5, -9, 13]))
def func_type():
"""
type(object) or type(name, bases, dict) return type of an object or if
3 arguments are given return new class.
Examples: type('abc'), type('MyClass', (object, Parent), {'attr': None})
"""
print '555 = {}'.format(type(555))
print '"Class", object, {{attr: 5}} = {}'.format(
type('Class', (object, ), {'attr': 5})
)
def func_unichr():
"""
unichr(i) return unicode string of one character whose code is integer i.
Examples: unichr(97), unichr(5123)
"""
print u'97 = {}'.format(unichr(97))
print u'50321 = {}'.format(unichr(50321))
def func_unicode():
"""
unicode(object='') or unicode(object[, encoding[, errors]]) return
unicode string version of object
Examples: unicode('string'), unicode(string, 'utf-8', 'ignore')
"""
pass
def func_vars():
"""
vars([object]) return __dict__ attribute for module/class/instance or
any other object with __dict__ attribute. If empty acts like locals().
Examples: vars(), vars(datetime), vars(MyClass)
"""
local_variable = 5
print 'empty = {}'.format(vars())
print 'datetime = {:.100}...'.format(vars(datetime))
def func_xrange():
"""
xrange(stop) or xrange(start, stop[, step]) return xrange object which
yields values as it was a list without storing them simultaneously.
Examples: xrange(10), xrange(5, 45, 5)
"""
print '10 = {}'.format(xrange(10))
print '10, -11, -5 --> list = {}'.format(list(xrange(10, -11, -5)))
def func_zip():
"""
zip([iterable, ...]) return list of tuples simultaneously taking one
element from each iterable. Returned list is truncated in length to
the length of the shortest argument sequence.
Example: zip([1, 2], ['a', 'b'])
"""
print "[10, 20, 30], ('ten', 'twenty') = {}".format(
zip([10, 20, 30], ('ten', 'twenty'))
)
```
#### File: jmaslanka/Python-practice/type_methods.py
```python
from __future__ import print_function
def numerical():
integer = 509
double = 55.021
print('int.bit_length() return number of bits in binary')
print(integer, bin(integer), integer.bit_length(), sep=' --> ')
print('-' * 90)
print('float.is_integer() return True if is finite with integral value')
print(double, double.is_integer(), sep=' --> ')
print(55.0, 55.0.is_integer(), sep=' --> ')
print('-' * 90)
print('float.hex() return hex representation of float number')
print(double, double.hex(), sep=' --> ')
print('-' * 90)
print('float.fromhex(s) return float from hexadecimal string s')
print(double.hex(), float.fromhex(double.hex()), sep=' --> ')
print('-' * 90)
def string():
print('capitalize first character.'.capitalize())
print('center string with fillchar'.center(10, '-'))
print('how many a letters are here till 20 char aaaaa'.count('a', 0, 20))
print('does this string ends with THIS?'.endswith('THIS?'))
print('position of lion word in this string, -1 otherwise'.find('lion'))
print('will rise ValueError if lion is not found!'.index('lion'))
print('ReturnTrueIfAllAreAlphabeticSPACEisNOT'.isalpha())
print('412312413242354325345435211100000111'.isdigit())
print('no uppercase characters - return true'.islower())
print(' \t \t '.isspace())
print('Some Kind Of Title'.istitle())
print('ONLY INTERNET TROLLS WILL PASS!'.isupper())
print(' '.join(['It', 'just', 'joins', 'all', 'elements!']))
print('What am I'.ljust(20, '?'))
print('I\'M NO LONGER INTERNET TROLL'.lower())
print('www.Remove given characters from the beggining.com'.lstrip())
print('How about split in the middle?'.partition('split'))
print('Eva likes Eva, but Eva...'.replace('Eva', 'Annie', 1))
print('1 2 3 Testing !'.split())
print('Line splitting\ndone\nright!'.splitlines())
print('THIS is the beggining'.startswith('THIS'))
print('tHIS wILL sWAP cHARACTERS.'.swapcase())
print('return all characters uppercase\'d'.upper())
if __name__ == '__main__':
numerical()
string()
``` |
{
"source": "jmaslanka/rss_scraper",
"score": 2
} |
#### File: rss_scraper/api/tests.py
```python
from unittest import mock
import requests
import pytest
from django.urls import reverse
from django.test import override_settings
from .models import ExchangeRate
from .literals import CURRENCIES
@pytest.mark.django_db
def test_rates_list(client):
r = client.get(reverse('api:exchangerate-list'))
assert r.status_code == 200
assert r.json()['results'] == []
items_count = 5
rate = 24.5423
ExchangeRate.objects.bulk_create([
ExchangeRate(currency=c[0], rate=rate) for c in CURRENCIES[:items_count]
])
r = client.get(reverse('api:exchangerate-list'))
assert r.status_code == 200
assert r.json()['count'] == items_count
assert r.json()['results'][2]['currency'] == CURRENCIES[2][0]
assert r.json()['results'][2]['rate'] == '24.542300'
@pytest.mark.django_db
def test_rates_details(client):
r = client.get(reverse('api:exchangerate-detail', kwargs=dict(currency='USD')))
assert r.status_code == 404
assert r.json() == {'detail': 'Not found.'}
currency = CURRENCIES[0][0]
rate = 12.11
ExchangeRate.objects.create(currency=currency, rate=rate)
r = client.get(reverse('api:exchangerate-detail', kwargs=dict(currency=currency)))
assert r.status_code == 200
assert r.json()['rate'] == '12.110000'
class TestRatesUpdate:
def mocked_rates_response(*args, **kwargs):
class MockResponse:
def __init__(self):
rate = 12.2345
self.ok = True
self.content = f'<cb:value frequency="daily" decimals="4">{rate}</cb:value>'.encode('UTF-8')
return MockResponse()
@pytest.mark.django_db
@override_settings(CELERY_TASK_ALWAYS_EAGER=True)
@mock.patch('requests.get', side_effect=mocked_rates_response)
def test_rates_update(self, mock, client):
r = client.get(reverse('api:exchangerate-list'))
assert r.status_code == 200
assert r.json()['count'] == 0
r = client.post(reverse('api:update-rates'))
assert r.status_code == 200
r = client.get(reverse('api:exchangerate-list'))
assert r.status_code == 200
assert r.json()['count'] == len(CURRENCIES)
```
#### File: rss_scraper/api/views.py
```python
from rest_framework import status
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.viewsets import ReadOnlyModelViewSet
from .models import ExchangeRate
from .serializers import ExchangeRateSerializer
from .tasks import update_exchange_rates
class UpdateRatesAPIView(APIView):
permission_classes = (AllowAny,)
authentication_classes = []
def post(self, request, *args, **kwargs):
update_exchange_rates.delay()
return Response(status=status.HTTP_200_OK)
class ExchangeRatesViewSet(ReadOnlyModelViewSet):
queryset = ExchangeRate.objects.all()
serializer_class = ExchangeRateSerializer
permission_classes = (AllowAny,)
authentication_classes = []
lookup_field = 'currency'
``` |
{
"source": "jmaslanka/security-django",
"score": 2
} |
#### File: auth_ex/api/views.py
```python
from django.contrib.auth import login as auth_login
from django.utils.decorators import method_decorator
from django.views.decorators.debug import sensitive_post_parameters
from rest_framework.generics import GenericAPIView
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from .serializers import LoginSerializer
from users.api.serializers import UserSerializer
@method_decorator(sensitive_post_parameters('password'), name='dispatch')
class LoginAPIView(GenericAPIView):
permission_classes = (AllowAny,)
authentication_classes = []
serializer_class = LoginSerializer
www_authenticate_realm = 'api'
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
auth_login(self.request, serializer.user)
return Response(UserSerializer(serializer.user).data)
```
#### File: src/auth_ex/middleware.py
```python
from django.conf import settings
from django.urls import reverse
class DeviceCookieMiddleware:
"""
Set device cookie if request is a successful login.
"""
def __init__(self, get_response):
self.get_response = get_response
self.login_paths = [
reverse('auth:login'),
reverse('admin:login'),
]
def __call__(self, request):
response = self.get_response(request)
if request.path in self.login_paths and \
request.method.lower() == 'post' and \
request.user.is_authenticated and \
response.status_code in [200, 302]:
response.set_signed_cookie(
settings.DEVICE_COOKIE_NAME,
request.user.email,
salt=settings.DEVICE_COOKIE_SALT,
max_age=settings.DEVICE_COOKIE_AGE,
secure=settings.CSRF_COOKIE_SECURE,
samesite='Strict',
httponly=True,
)
return response
```
#### File: src/manager/models.py
```python
import uuid
from django.conf import settings
from django.db import models
from django.db.models.functions import Now
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from config.utils import upload_to_classname_uuid
class Safe(models.Model):
'''
Model representing user's safe that contain all secrets.
'''
id = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False,
)
owner = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
verbose_name=_('owner'),
related_name='safes',
)
image = models.ImageField(
_('image'),
upload_to=upload_to_classname_uuid,
blank=True,
)
data = models.TextField(
_('encrypted data'),
blank=True,
)
date_created = models.DateTimeField(
_('created at'),
auto_now_add=True,
)
last_accessed = models.DateTimeField(
_('last accessed'),
default=timezone.now,
blank=True,
)
class Meta:
verbose_name = _('Safe')
verbose_name_plural = _('Safes')
def __str__(self):
return f'{self.id} (UserID: {self.owner_id})'
def update_last_access_time(self):
self.__class__.objects.filter(id=self.id).update(last_accessed=Now())
class SafeItem(models.Model):
'''
Model representing single item in safe.
'''
id = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False,
)
safe = models.ForeignKey(
Safe,
on_delete=models.CASCADE,
verbose_name=_('safe'),
related_name='items',
)
data = models.TextField(
_('encrypted data'),
blank=True,
)
class Meta:
verbose_name = _('Safe item')
verbose_name_plural = _('Safe items')
def __str__(self):
return f'{self.id} (SafeID: {self.safe_id})'
```
#### File: src/users/views.py
```python
from django.core.paginator import Paginator
from django.views.generic import TemplateView
class SettingsView(TemplateView):
template_name = 'auth/settings.html'
logs_paginate_by = 8
def get_context_data(self, **kwargs):
kwargs = super().get_context_data(**kwargs)
paginator = Paginator(
self.request.user.logs.order_by('-date'),
self.logs_paginate_by,
)
kwargs['logs'] = paginator.get_page(
self.request.GET.get('logs-page')
)
return kwargs
``` |
{
"source": "jmaslanka/SudokuSolver",
"score": 3
} |
#### File: jmaslanka/SudokuSolver/main.py
```python
import sys
import subprocess
import time
from PyQt5 import QtCore, QtGui, QtWidgets, uic
from Solver import Board
qtCreatorFile = "window.ui"
Ui_MainWindow, QtBaseClass = uic.loadUiType(qtCreatorFile)
class MyApp(QtWidgets.QMainWindow, Ui_MainWindow):
def __init__(self):
QtWidgets.QMainWindow.__init__(self)
Ui_MainWindow.__init__(self)
self.setupUi(self)
self.solve_button.clicked.connect(self.solve)
self.reset_button.clicked.connect(self.reset)
self.boxes = self.get_boxes()
QtWidgets.QWidget.setFixedSize(self, 380, 412)
def get_boxes(self):
boxes = []
for counter in range(1, 82):
boxes.append(getattr(self, 'box_{}'.format(counter)))
return boxes
def create_input(self):
board = [[0 for x in range(9)] for y in range(9)]
values = iter(self.boxes)
for x in range(9):
for y in range(9):
value = next(values).text()
if value:
board[x][y] = int(value)
return board
def create_output(self, board):
for x in range(9):
for y in range(9):
self.boxes[(x*9)+y].setText(str(board[x][y]))
def solve(self):
"""
Using command to run Solver with PyPy to speed up the algorithm.
If you want to use pure Python just do:
solution = Board(self.create_input()).solve()
"""
self.boxes = self.get_boxes()
start_time = time.clock()
result = subprocess.run(
['pypy', 'Solver.py', str(self.create_input())],
stdout=subprocess.PIPE
)
elapsed = time.clock() - start_time
solution = result.stdout.decode('utf-8')
if not solution.startswith('['):
msg = QtWidgets.QMessageBox.warning(self, 'Warning', solution)
else:
self.create_output(eval(solution))
self.time_display.setText('Time: {0:.5f} seconds'.format(elapsed))
def reset(self):
for obj in self.boxes:
obj.setText('')
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
window = MyApp()
window.show()
sys.exit(app.exec_())
``` |
{
"source": "jmaslek/etf_scraper",
"score": 3
} |
#### File: jmaslek/etf_scraper/scrape_data.py
```python
import requests
import pandas as pd
import json
from bs4 import BeautifulSoup as bs
from bs4 import BeautifulSoup
import numpy as np
def assets_to_num(x):
x = x.strip("$")
if x.endswith("M"):
return float(x.strip("M"))
elif x.endswith("B"):
return float(x.strip("B")) * 1000
elif x.endswith("K"):
return float(x.strip("K")) / 1000
else:
return np.nan
r = requests.get("https://stockanalysis.com/etf/", headers={"User-Agent":"Mozilla/5.0"})
soup2 = BeautifulSoup(r.text,"html.parser")
script = soup2.find("script",{"id":"__NEXT_DATA__"})
etf_symbols = pd.DataFrame(json.loads(script.text)["props"]["pageProps"]["stocks"]).s.to_list()
df = pd.DataFrame()
for etf in etf_symbols:
try:
r = requests.get(f"https://stockanalysis.com/etf/{etf}", headers={"User-Agent":"Mozilla/5.0"})
soup = bs(r.text, "html.parser") # %%
tables = soup.findAll("table")
texts = []
for tab in tables[:2]:
entries = tab.findAll("td")
for ent in entries:
texts.append(ent.get_text())
vars = [0, 2, 4, 6, 8, 10, 12, 18, 20, 22, 26, 28, 30, 32]
vals = [idx + 1 for idx in vars]
columns = [texts[idx] for idx in vars]
data = [texts[idx] for idx in vals]
df[etf] = data
except Exception as e:
print(etf)
df.index = columns
df = df.T
df.columns = ['Assets',
'NAV',
'Expense',
'PE',
'SharesOut',
'Div',
'DivYield',
'Volume',
'Open',
'PrevClose',
'YrLow',
'YrHigh',
'Beta',
'N_Hold']
df["Assets"] = df["Assets"].apply(lambda x: assets_to_num(x) if isinstance(x,str) else np.nan)
df["NAV"] = df["NAV"].apply(lambda x: float(x.strip("$")) if x not in ["n/a","-"] else np.nan)
df["Expense"] = df["Expense"].apply(lambda x: float(x.strip("%")) if x not in ["n/a","-"] else np.nan)
df["PE"] = df["PE"].apply(lambda x: float(x) if x not in ["n/a","-"] else np.nan)
df["SharesOut"] = df["SharesOut"].apply(lambda x: assets_to_num(x))
df["Div"] = df["Div"].apply(lambda x: float(x.strip("$")) if x not in ["n/a","-"] else np.nan)
df["DivYield"] = df["DivYield"].apply(lambda x: float(x.strip("%").replace(",","")) if x not in ["n/a","-"] else np.nan)
df["Volume"] = df["Volume"].apply(lambda x: float(x.replace(",","")) if x not in ["n/a","-"] else np.nan)
df["PrevClose"] = df["PrevClose"].apply(lambda x: float(x.strip("$")) if x not in ["n/a","-"] else np.nan)
df["Open"] = df["Open"].apply(lambda x: float(x.strip("$")) if x not in ["n/a","-"] else np.nan)
df["PrevClose"] = df["PrevClose"].apply(lambda x: float(x) if x not in ["n/a","-"] else np.nan)
df["YrLow"] = df["YrLow"].apply(lambda x: float(x) if x not in ["n/a","-"] else np.nan)
df["YrHigh"] = df["YrHigh"].apply(lambda x: float(x) if x not in ["n/a","-"] else np.nan)
df["Beta"] = df["Beta"].apply(lambda x: float(x) if x not in ["n/a","-"] else np.nan)
df["N_Hold"] = df["N_Hold"].apply(lambda x: float(x) if x not in ["n/a","-"] else np.nan)
df.to_csv("etf_overviews.csv")
``` |
{
"source": "jmaslek/OpenBBTerminal",
"score": 3
} |
#### File: openbb_terminal/econometrics/econometrics_view.py
```python
__docformat__ = "numpy"
import logging
import os
from itertools import combinations
from typing import Dict, Any, Optional, List
import matplotlib.pyplot as plt
import pandas as pd
from pandas.plotting import register_matplotlib_converters
from openbb_terminal.config_plot import PLOT_DPI
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import (
export_data,
plot_autoscale,
)
from openbb_terminal.helper_funcs import (
print_rich_table,
)
from openbb_terminal.rich_config import console
from openbb_terminal.econometrics import econometrics_model
from openbb_terminal.config_terminal import theme
logger = logging.getLogger(__name__)
register_matplotlib_converters()
@log_start_end(log=logger)
def show_options(
datasets: Dict[str, pd.DataFrame],
dataset_name: str = None,
export: str = "",
):
"""Plot custom data
Parameters
----------
datasets: dict
The loaded in datasets
dataset_name: str
The name of the dataset you wish to show options for
export: str
Format to export image
"""
if not datasets:
console.print(
"Please load in a dataset by using the 'load' command before using this feature."
)
else:
option_tables = econometrics_model.get_options(datasets, dataset_name)
for dataset, data_values in option_tables.items():
print_rich_table(
data_values,
headers=list(data_values.columns),
show_index=False,
title=f"Options for dataset: '{dataset}'",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
f"{dataset}_options",
data_values.set_index("column"),
)
@log_start_end(log=logger)
def display_plot(
data: Dict[str, pd.DataFrame],
export: str = "",
external_axes: Optional[List[plt.axes]] = None,
):
"""Plot data from a dataset
Parameters
----------
data: Dict[str: pd.DataFrame]
Dictionary with key being dataset.column and dataframes being values
export: str
Format to export image
external_axes:Optional[List[plt.axes]]
External axes to plot on
"""
for dataset_col in data:
if isinstance(data[dataset_col].index, pd.MultiIndex):
console.print(
"The index appears to be a multi-index. "
"Therefore, it is not possible to plot the data."
)
del data[dataset_col]
# Check that there's at least a valid dataframe
if data:
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
else:
ax = external_axes[0]
for dataset_col in data:
if isinstance(data[dataset_col], pd.Series):
ax.plot(data[dataset_col].index, data[dataset_col].values)
elif isinstance(data[dataset_col], pd.DataFrame):
ax.plot(data[dataset_col])
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
ax.legend(list(data.keys()))
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"plot",
)
@log_start_end(log=logger)
def display_norm(
data: pd.Series,
dataset: str,
column: str,
plot: bool = False,
export: str = "",
external_axes: Optional[List[plt.axes]] = None,
):
"""Determine the normality of a timeseries.
Parameters
----------
data: pd.Series
Series of custom data
dataset: str
Dataset name
column: str
Column for y data
plot : bool
Whether you wish to plot a histogram
export: str
Format to export data.
external_axes: Optional[List[plt.axes]]
External axes to plot on
"""
if data.dtype not in [int, float]:
console.print(
f"The column type must be numeric. The {column}-{dataset} type is {data.dtype}. "
f"Consider using the command 'type' to change this.\n"
)
else:
results = econometrics_model.get_normality(data)
print_rich_table(
results,
headers=list(results.columns),
show_index=True,
title=f"Normality test from dataset '{dataset}' of '{column}'",
)
if plot:
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
else:
ax = external_axes[0]
ax.hist(data, bins=100)
ax.set_title(f"Histogram from dataset '{dataset}' of '{column}'")
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
if export:
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
f"{column}_{dataset}_norm",
results,
)
else:
console.print()
@log_start_end(log=logger)
def display_root(
df: pd.Series,
dataset_name: str,
column_name: str,
fuller_reg: str,
kpss_reg: str,
export: str = "",
):
"""Determine the normality of a timeseries.
Parameters
----------
df : pd.Series
Series of target variable
dataset_name: str
Name of the dataset
column_name: str
Name of the column
fuller_reg : str
Type of regression of ADF test
kpss_reg : str
Type of regression for KPSS test
export: str
Format to export data.
"""
if df.dtype not in [int, float]:
console.print(
f"The column type must be numeric. The {column_name}-{dataset_name} "
f"type is {df.dtype}. Consider using the command 'type' to change this.\n"
)
else:
results = econometrics_model.get_root(df, fuller_reg, kpss_reg)
print_rich_table(
results,
headers=list(results.columns),
show_index=True,
title=f"Unitroot from dataset '{dataset_name} of '{column_name}'",
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
f"{dataset_name}_{column_name}_root",
results,
)
@log_start_end(log=logger)
def display_granger(
time_series_y: pd.Series,
time_series_x: pd.Series,
lags: int = 3,
confidence_level: float = 0.05,
export: str = "",
):
"""Show granger tests
Parameters
----------
time_series_y : Series
The series you want to test Granger Causality for.
time_series_x : Series
The series that you want to test whether it Granger-causes time_series_y
lags : int
The amount of lags for the Granger test. By default, this is set to 3.
confidence_level: float
The confidence level you wish to use. By default, this is set to 0.05.
export : str
Format to export data
"""
if time_series_y.dtype not in [int, float]:
console.print(
f"The time series {time_series_y.name} needs to be numeric but is type {time_series_y.dtype}. "
f"Consider using the command 'type' to change this."
)
elif time_series_x.dtype not in [int, float]:
console.print(
f"The time series {time_series_x.name} needs to be numeric but is type {time_series_x.dtype}. "
f"Consider using the command 'type' to change this."
)
else:
granger = econometrics_model.get_granger_causality(
time_series_y, time_series_x, lags
)
for test in granger[lags][0]:
# As ssr_chi2test and lrtest have one less value in the tuple, we fill
# this value with a '-' to allow the conversion to a DataFrame
if len(granger[lags][0][test]) != 4:
pars = granger[lags][0][test]
granger[lags][0][test] = (pars[0], pars[1], "-", pars[2])
granger_df = pd.DataFrame(
granger[lags][0], index=["F-test", "P-value", "Count", "Lags"]
).T
print_rich_table(
granger_df,
headers=list(granger_df.columns),
show_index=True,
title=f"Granger Causality Test [Y: {time_series_y.name} | X: {time_series_x.name} | Lags: {lags}]",
)
result_ftest = round(granger[lags][0]["params_ftest"][1], 3)
if result_ftest > confidence_level:
console.print(
f"As the p-value of the F-test is {result_ftest}, we can not reject the null hypothesis at "
f"the {confidence_level} confidence level.\n"
)
else:
console.print(
f"As the p-value of the F-test is {result_ftest}, we can reject the null hypothesis at "
f"the {confidence_level} confidence level and find the Series '{time_series_x.name}' "
f"to Granger-cause the Series '{time_series_y.name}'\n"
)
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
f'{time_series_y.name.replace(".","-")}_{time_series_x.name.replace(".","-")}_granger',
granger_df,
)
@log_start_end(log=logger)
def display_cointegration_test(
datasets: Dict[pd.Series, Any],
significant: bool = False,
plot: bool = False,
export: str = "",
external_axes: Optional[List[plt.axes]] = None,
):
"""Estimates long-run and short-run cointegration relationship for series y and x and apply
the two-step Engle & Granger test for cointegration.
Uses a 2-step process to first estimate coefficients for the long-run relationship
y_t = c + gamma * x_t + z_t
and then the short-term relationship,
y_t - y_(t-1) = alpha * z_(t-1) + epsilon_t,
with z the found residuals of the first equation.
Then tests co-integration with the Dickey-Fuller phi=1 vs phi < 1 in
z_t = phi * z_(t-1) + eta_t
If this implies phi < 1, the z series is stationary is concluded to be
stationary, and thus the series y and x are concluded to be cointegrated.
Parameters
----------
datasets: Dict[pd.Series, Any]
All time series to perform co-integration tests on.
significant: float
Show only companies that have p-values lower than this percentage
plot: bool
Whether you wish to plot the z-values of all pairs.
export : str
Format to export data
external_axes:Optional[List[plt.axes]]
External axes to plot on
"""
pairs = list(combinations(datasets.keys(), 2))
result: Dict[str, list] = dict()
z_values: Dict[str, pd.Series] = dict()
for x, y in pairs:
if sum(datasets[y].isnull()) > 0:
console.print(
f"The Series {y} has nan-values. Please consider dropping or filling these "
f"values with 'clean'."
)
elif sum(datasets[x].isnull()) > 0:
console.print(
f"The Series {x} has nan-values. Please consider dropping or filling these "
f"values with 'clean'."
)
elif not datasets[y].index.equals(datasets[x].index):
console.print(f"The Series {y} and {x} do not have the same index.")
else:
(
c,
gamma,
alpha,
z,
adfstat,
pvalue,
) = econometrics_model.get_engle_granger_two_step_cointegration_test(
datasets[x], datasets[y]
)
result[f"{x}/{y}"] = [c, gamma, alpha, adfstat, pvalue]
z_values[f"{x}/{y}"] = z
if result and z_values:
df = pd.DataFrame.from_dict(
result,
orient="index",
columns=["Constant", "Gamma", "Alpha", "Dickey-Fuller", "P Value"],
)
if significant:
console.print(
f"Only showing pairs that are statistically significant ({significant} > p-value)."
)
df = df[significant > df["P Value"]]
console.print()
print_rich_table(
df,
headers=list(df.columns),
show_index=True,
index_name="Pairs",
title="Cointegration Tests",
)
if plot:
if external_axes is None:
_, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)
else:
ax = external_axes[0]
for pair, values in z_values.items():
ax.plot(values, label=pair)
ax.legend()
ax.set_title("Error correction terms")
theme.style_primary_axis(ax)
if external_axes is None:
theme.visualize_output()
export_data(
export,
os.path.dirname(os.path.abspath(__file__)),
"coint",
df,
)
```
#### File: openbb_terminal/cryptocurrency/test_cryptocurrency_helpers.py
```python
import json
import pandas as pd
import pytest
from pycoingecko import CoinGeckoAPI
from openbb_terminal.cryptocurrency.cryptocurrency_helpers import (
read_data_file,
_load_coin_map,
plot_chart,
load,
load_ta_data,
prepare_all_coins_df,
load_coins_list,
_create_closest_match_df,
)
# pylint: disable=unused-import
base = "openbb_terminal.cryptocurrency."
def test_load_coin_map():
with pytest.raises(TypeError):
_load_coin_map("test.test")
def test_read_data_file(recorder):
file = read_data_file("coinbase_gecko_map.json")
recorder.capture(file)
def test_read_data_file_invalid():
with pytest.raises(TypeError):
read_data_file("sample.bad")
def test_load_coins_list(recorder):
value = load_coins_list("coinbase_gecko_map.json", True)
recorder.capture(value)
def test_load_coins_list_invalud():
with pytest.raises(TypeError):
load_coins_list("bad.bad")
def test_create_closet_match_df(recorder):
df = pd.DataFrame({"id": ["btc", "eth"], "index": [1, 2]})
value = _create_closest_match_df("btc", df, 5, 0.2)
recorder.capture(value)
@pytest.mark.parametrize(
"coin, interval, source",
[
("badcoin", "1day", "cg"),
("BTC", "1hour", "cg"),
("BTC", "1hour", "cp"),
("BTC", "1hour", "cp"),
],
)
def test_load_none(coin, interval, source):
assert load("BTC", vs=coin, interval=interval, source=source) == (
None,
None,
None,
None,
None,
None,
)
@pytest.mark.parametrize(
"coin, load_ta", [("BTC", True), ("ZTH", False), ("BTC", False)]
)
def test_load_cg(coin, load_ta):
load(coin, source="cg", should_load_ta_data=load_ta)
def test_load_cg_invalid():
load("ZTH", source="cg")
@pytest.fixture(name="get_bitcoin")
def fixture_get_bitcoin(mocker):
# pylint: disable=unused-argument
mock_load = mocker.patch(
base
+ "due_diligence.pycoingecko_model.CoinGeckoAPI.get_coin_market_chart_by_id"
)
with open(
"tests/openbb_terminal/cryptocurrency/json/test_cryptocurrency_helpers/btc_usd_test_data.json",
encoding="utf8",
) as f:
sample_return = json.load(f)
mock_load.return_value = sample_return
coin, _, symbol, _, _, _ = load(coin="BTC", source="cp")
return coin, symbol
# pylint: disable=R0904
@pytest.mark.vcr
def test_coin_api_load(get_bitcoin):
"""
Mock load function through get_coin_market_chart_by_id.
Mock returns a dict saved as .json
"""
coin, _ = get_bitcoin
assert coin == "btc-bitcoin"
@pytest.mark.vcr
def test_coin_api_load_df_for_ta(get_bitcoin, mocker):
"""
Mock load function through get_coin_market_chart_by_id.
Mock returns a dict saved as .json
"""
mock_load = mocker.patch(
base
+ "due_diligence.pycoingecko_model.CoinGeckoAPI.get_coin_market_chart_by_id"
)
_, symbol = get_bitcoin
coin_map_df = prepare_all_coins_df().set_index("Symbol").loc[symbol.upper()].iloc[0]
with open(
"tests/openbb_terminal/cryptocurrency/json/test_cryptocurrency_helpers/btc_usd_test_data.json",
encoding="utf8",
) as f:
sample_return = json.load(f)
mock_load.return_value = sample_return
mock_return, vs = load_ta_data(
coin_map_df=coin_map_df,
source="cg",
currency="usd",
days=30,
)
assert mock_return.shape == (31, 4)
assert vs == "usd"
@pytest.mark.record_stdout
@pytest.mark.vcr
def test_get_coins():
"""Test that pycoingecko retrieves the major coins"""
coins = CoinGeckoAPI().get_coins()
bitcoin_list = [coin["id"] for coin in coins]
test_coins = ["bitcoin", "ethereum", "dogecoin"]
for test in test_coins:
assert test in bitcoin_list
@pytest.mark.vcr
@pytest.mark.record_stdout
def test_coin_chart(get_bitcoin):
# pylint: disable=unused-argument
_, symbol = get_bitcoin
coin_map_df = prepare_all_coins_df().set_index("Symbol").loc[symbol.upper()].iloc[0]
plot_chart(coin_map_df=coin_map_df, source="cg", currency="usd", days=30)
``` |
{
"source": "jmason-ebi/impc-etl",
"score": 2
} |
#### File: load/solr/stats_results_mapper.py
```python
import base64
import gzip
import json
import sys
from typing import List
from pyspark.sql import DataFrame, SparkSession
from pyspark.sql.functions import (
array_contains,
array_sort,
col,
explode,
lit,
split,
collect_set,
to_json,
when,
udf,
expr,
struct,
lower,
regexp_replace,
size,
array,
regexp_extract,
flatten,
array_distinct,
first,
from_json,
explode_outer,
md5,
arrays_zip,
concat,
max,
min,
least,
greatest,
concat_ws,
collect_list,
)
from pyspark.sql.window import Window
from pyspark.sql.types import (
StructType,
StructField,
StringType,
IntegerType,
DoubleType,
ArrayType,
Row,
)
from impc_etl.shared.utils import convert_to_row
ONTOLOGY_STATS_MAP = {
"mp_term_name": "term",
"top_level_mp_term_id": "top_level_ids",
"top_level_mp_term_name": "top_level_terms",
"intermediate_mp_term_id": "intermediate_ids",
"intermediate_mp_term_name": "intermediate_terms",
}
BAD_MP_MAP = {
'["MP:0000592","MP:0000592"]': "MP:0000592",
'["MP:0003956","MP:0003956"]': "MP:0003956",
'["MP:0000589","MP:0000589"]': "MP:0000589",
'["MP:0010101","MP:0004649"]': "MP:0004649",
'["MP:0004650","MP:0004647"]': "MP:0004650",
}
PIPELINE_STATS_MAP = {
"mp_term_id_options": "mp_id",
"mp_term_name_options": "mp_term",
"top_level_mp_id_options": "top_level_mp_id",
"top_level_mp_term_options": "top_level_mp_term",
"parameter_stable_key": "parameter_stable_key",
"procedure_name": "procedure_name",
"pipeline_stable_key": "pipeline_stable_key",
"procedure_stable_key": "procedure_stable_key",
}
THREEI_STATS_MAP = {
"colony_id": "Colony.Prefixes",
"parameter_name": "Parameter.Name",
"marker_symbol": "Gene",
"procedure_name": "Procedure.Name",
"procedure_stable_id": "Procedure.Id",
"parameter_stable_id": "Parameter.Id",
"classification_tag": "Call.Type",
"mp_id": "Annotation.Calls",
"allele_name": "Construct",
"zygosity": "Genotype",
"combine_sex_call": "Combine.Gender.Call",
"sex": "Gender",
}
OBSERVATIONS_STATS_MAP = {
"genetic_background": "genetic_background",
"production_center": "production_center",
"project_name": "project_name",
"project_fullname": "project_name",
"strain_name": "strain_name",
"life_stage_name": "life_stage_name",
"resource_name": "datasource_name",
"resource_fullname": "datasource_name",
"life_stage_acc": "life_stage_acc",
"experiment_sex": "sex",
"metadata": "metadata",
}
STATS_OBSERVATIONS_JOIN = [
"procedure_group",
"procedure_name",
"parameter_stable_id",
"phenotyping_center",
"pipeline_stable_id",
"colony_id",
"metadata_group",
"zygosity",
]
RAW_DATA_COLUMNS = [
"observations_body_weight",
"observations_date_of_experiment",
"observations_external_sample_id",
"observations_response",
"observations_sex",
"observations_data_points",
"observations_categories",
"observations_time_point",
"observations_discrete_point",
]
ALLELE_STATS_MAP = {"allele_name": "allele_name"}
STATS_RESULTS_COLUMNS = [
"doc_id",
"additional_information",
"allele_accession_id",
"allele_name",
"allele_symbol",
"batch_significant",
"both_mutant_count",
"both_mutant_diversity_in_response",
"both_mutant_mean",
"both_mutant_sd",
"both_mutant_unique_n",
"classification_tag",
"colony_id",
"data_type",
"effect_size",
"female_control_count",
"female_control_diversity_in_response",
"female_control_mean",
"female_control_sd",
"female_control_unique_n",
"female_ko_effect_p_value",
"female_ko_effect_stderr_estimate",
"female_ko_parameter_estimate",
"female_mutant_count",
"female_mutant_diversity_in_response",
"female_mutant_mean",
"female_mutant_sd",
"female_mutant_unique_n",
"female_percentage_change",
"female_pvalue_low_normal_vs_high",
"female_pvalue_low_vs_normal_high",
"female_effect_size_low_normal_vs_high",
"female_effect_size_low_vs_normal_high",
"genotype_effect_p_value",
"genotype_effect_parameter_estimate",
"genotype_effect_size_low_normal_vs_high",
"genotype_effect_size_low_vs_normal_high",
"genotype_effect_stderr_estimate",
"genotype_pvalue_low_normal_vs_high",
"genotype_pvalue_low_vs_normal_high",
"weight_effect_p_value",
"weight_effect_stderr_estimate",
"weight_effect_parameter_estimate",
"group_1_genotype",
"group_1_residuals_normality_test",
"group_2_genotype",
"group_2_residuals_normality_test",
"interaction_effect_p_value",
"interaction_significant",
"intercept_estimate",
"intercept_estimate_stderr_estimate",
"male_control_count",
"male_control_diversity_in_response",
"male_control_mean",
"male_control_sd",
"male_control_unique_n",
"male_ko_effect_p_value",
"male_ko_effect_stderr_estimate",
"male_ko_parameter_estimate",
"male_mutant_count",
"male_mutant_diversity_in_response",
"male_mutant_mean",
"male_mutant_sd",
"male_mutant_unique_n",
"male_percentage_change",
"male_pvalue_low_normal_vs_high",
"male_pvalue_low_vs_normal_high",
"male_effect_size_low_normal_vs_high",
"male_effect_size_low_vs_normal_high",
"marker_accession_id",
"marker_symbol",
"metadata",
"metadata_group",
"percentage_change",
"no_data_control_count",
"no_data_control_diversity_in_response",
"no_data_control_unique_n",
"no_data_mutant_count",
"no_data_mutant_diversity_in_response",
"no_data_mutant_unique_n",
"p_value",
"parameter_name",
"parameter_stable_id",
"phenotype_sex",
"phenotyping_center",
"pipeline_name",
"pipeline_stable_id",
"procedure_group",
"procedure_name",
"procedure_stable_id",
"procedure_stable_key",
"sex_effect_p_value",
"sex_effect_parameter_estimate",
"sex_effect_stderr_estimate",
"statistical_method",
"status",
"strain_accession_id",
"variance_significant",
"zygosity",
"mpath_term_id",
"mpath_term_name",
"mp_term_id",
"mp_term_name",
"mp_term_event",
"mp_term_sex",
"top_level_mp_term_id",
"top_level_mp_term_name",
"intermediate_mp_term_id",
"intermediate_mp_term_name",
"mp_term_id_options",
"mp_term_name_options",
"anatomy_term_id",
"anatomy_term_name",
"anatomy_term_event",
"anatomy_term_sex",
"top_level_anatomy_term_id",
"top_level_anatomy_term_name",
"intermediate_anatomy_term_id",
"intermediate_anatomy_term_name",
"anatomy_term_id_options",
"anatomy_term_name_options",
"parameter_stable_key",
"pipeline_stable_key",
"genetic_background",
"production_center",
"project_name",
"project_fullname",
"resource_name",
"resource_fullname",
"strain_name",
"life_stage_name",
"life_stage_acc",
"sex",
"significant",
"full_mp_term",
"male_effect_size",
"female_effect_size",
"observation_ids",
]
WINDOW_COLUMNS = [
"window_l_value",
"window_l_score",
"window_k_value",
"window_k_score",
"window_doe",
"window_min_obs_required",
"window_total_obs_or_weight",
"window_threshold",
"window_number_of_doe",
"window_doe_note",
"observations_window_weight",
]
##TODO missing strain name and genetic background
def main(argv):
"""
Solr Core loader
:param list argv: the list elements should be:
[1]: Open stats parquet file
[2]: Observations parquet
[3]: Ontology parquet
[4]: Threei stats results file
[5]: Pipeline core parquet
[6]: Allele parquet
[7]: Output Path
"""
open_stats_parquet_path = argv[1]
observations_parquet_path = argv[2]
ontology_parquet_path = argv[3]
pipeline_parquet_path = argv[4]
pipeline_core_parquet_path = argv[5]
allele_parquet_path = argv[6]
mp_chooser_path = argv[7]
threei_parquet_path = argv[8]
mpath_metadata_path = argv[9]
raw_data_in_output = argv[10]
extract_windowed_data = argv[11] == "true"
output_path = argv[12]
spark = SparkSession.builder.getOrCreate()
open_stats_complete_df = spark.read.parquet(open_stats_parquet_path)
ontology_df = spark.read.parquet(ontology_parquet_path)
allele_df = spark.read.parquet(allele_parquet_path)
pipeline_df = spark.read.parquet(pipeline_parquet_path)
pipeline_core_df = spark.read.parquet(pipeline_core_parquet_path)
observations_df = spark.read.parquet(observations_parquet_path)
threei_df = spark.read.csv(threei_parquet_path, header=True)
mpath_metadata_df = spark.read.csv(mpath_metadata_path, header=True)
mp_chooser_txt = spark.sparkContext.wholeTextFiles(mp_chooser_path).collect()[0][1]
mp_chooser = json.loads(mp_chooser_txt)
open_stats_df = get_stats_results_core(
open_stats_complete_df,
ontology_df,
allele_df,
pipeline_df,
pipeline_core_df,
observations_df,
threei_df,
mpath_metadata_df,
mp_chooser,
extract_windowed_data,
raw_data_in_output,
)
if extract_windowed_data:
stats_results_column_list = STATS_RESULTS_COLUMNS + [
col_name
for col_name in WINDOW_COLUMNS
if col_name != "observations_window_weight"
]
stats_results_df = open_stats_df.select(*stats_results_column_list)
elif raw_data_in_output == "bundled":
stats_results_column_list = STATS_RESULTS_COLUMNS + ["raw_data"]
stats_results_df = open_stats_df.select(*stats_results_column_list)
stats_results_df = stats_results_df.repartition(20000)
else:
stats_results_df = open_stats_df.select(*STATS_RESULTS_COLUMNS)
for col_name in stats_results_df.columns:
if dict(stats_results_df.dtypes)[col_name] == "null":
stats_results_df = stats_results_df.withColumn(
col_name, lit(None).astype(StringType())
)
stats_results_df.write.parquet(output_path)
if raw_data_in_output == "include":
raw_data_df = open_stats_df.select("doc_id", "raw_data")
raw_data_df.distinct().write.parquet(output_path + "_raw_data")
def get_stats_results_core(
open_stats_complete_df,
ontology_df,
allele_df,
pipeline_df,
pipeline_core_df,
observations_df,
threei_df,
mpath_metadata_df,
mp_chooser,
extract_windowed_data=False,
raw_data_in_output="include",
):
threei_df = standardize_threei_schema(threei_df)
embryo_stat_packets = open_stats_complete_df.where(
(
(col("procedure_group").contains("IMPC_GPL"))
| (col("procedure_group").contains("IMPC_GEL"))
| (col("procedure_group").contains("IMPC_GPM"))
| (col("procedure_group").contains("IMPC_GEM"))
| (col("procedure_group").contains("IMPC_GPO"))
| (col("procedure_group").contains("IMPC_GEO"))
| (col("procedure_group").contains("IMPC_GPP"))
| (col("procedure_group").contains("IMPC_GEP"))
)
)
open_stats_df = open_stats_complete_df.where(
~(
col("procedure_stable_id").contains("IMPC_FER_001")
| (col("procedure_stable_id").contains("IMPC_VIA_001"))
| (col("procedure_stable_id").contains("IMPC_VIA_002"))
| (col("procedure_group").contains("_PAT"))
| (col("procedure_group").contains("_EVL"))
| (col("procedure_group").contains("_EVM"))
| (col("procedure_group").contains("_EVO"))
| (col("procedure_group").contains("_EVP"))
| (col("procedure_group").contains("_ELZ"))
| (col("procedure_name").startswith("Histopathology"))
| (col("procedure_group").contains("IMPC_GPL"))
| (col("procedure_group").contains("IMPC_GEL"))
| (col("procedure_group").contains("IMPC_GPM"))
| (col("procedure_group").contains("IMPC_GEM"))
| (col("procedure_group").contains("IMPC_GPO"))
| (col("procedure_group").contains("IMPC_GEO"))
| (col("procedure_group").contains("IMPC_GPP"))
| (col("procedure_group").contains("IMPC_GEP"))
)
)
fertility_stats = _fertility_stats_results(observations_df, pipeline_df)
for col_name in open_stats_df.columns:
if col_name not in fertility_stats.columns:
fertility_stats = fertility_stats.withColumn(col_name, lit(None))
fertility_stats = fertility_stats.select(open_stats_df.columns)
open_stats_df = open_stats_df.union(fertility_stats)
viability_stats = _viability_stats_results(observations_df, pipeline_df)
for col_name in open_stats_df.columns:
if col_name not in viability_stats.columns:
viability_stats = viability_stats.withColumn(col_name, lit(None))
viability_stats = viability_stats.select(open_stats_df.columns)
open_stats_df = open_stats_df.union(viability_stats)
gross_pathology_stats = _gross_pathology_stats_results(observations_df)
for col_name in open_stats_df.columns:
if col_name not in gross_pathology_stats.columns:
gross_pathology_stats = gross_pathology_stats.withColumn(
col_name, lit(None)
)
gross_pathology_stats = gross_pathology_stats.select(open_stats_df.columns)
open_stats_df = open_stats_df.union(gross_pathology_stats)
histopathology_stats = _histopathology_stats_results(observations_df)
for col_name in open_stats_df.columns:
if col_name not in histopathology_stats.columns:
histopathology_stats = histopathology_stats.withColumn(col_name, lit(None))
histopathology_stats = histopathology_stats.select(open_stats_df.columns).distinct()
open_stats_df = open_stats_df.union(histopathology_stats)
embryo_viability_stats = _embryo_viability_stats_results(
observations_df, pipeline_df
)
for col_name in open_stats_df.columns:
if col_name not in embryo_viability_stats.columns:
embryo_viability_stats = embryo_viability_stats.withColumn(
col_name, lit(None)
)
embryo_viability_stats = embryo_viability_stats.select(open_stats_df.columns)
open_stats_df = open_stats_df.union(embryo_viability_stats)
embryo_stats = _embryo_stats_results(
observations_df, pipeline_df, embryo_stat_packets
)
for col_name in open_stats_df.columns:
if col_name not in embryo_stats.columns:
embryo_stats = embryo_stats.withColumn(col_name, lit(None))
embryo_stats = embryo_stats.select(open_stats_df.columns)
open_stats_df = open_stats_df.union(embryo_stats)
observations_metadata_df = observations_df.select(
STATS_OBSERVATIONS_JOIN + list(set(OBSERVATIONS_STATS_MAP.values()))
).dropDuplicates()
observations_metadata_df = observations_metadata_df.groupBy(
*[
col_name
for col_name in observations_metadata_df.columns
if col_name != "sex"
]
).agg(collect_set("sex").alias("sex"))
aggregation_expresion = []
for col_name in list(set(OBSERVATIONS_STATS_MAP.values())):
if col_name not in ["datasource_name", "production_center"]:
if col_name == "sex":
aggregation_expresion.append(
array_distinct(flatten(collect_set(col_name))).alias(col_name)
)
elif col_name in ["strain_name", "genetic_background"]:
aggregation_expresion.append(first(col(col_name)).alias(col_name))
else:
aggregation_expresion.append(collect_set(col_name).alias(col_name))
observations_metadata_df = observations_metadata_df.groupBy(
STATS_OBSERVATIONS_JOIN + ["datasource_name", "production_center"]
).agg(*aggregation_expresion)
open_stats_df = map_to_stats(
open_stats_df,
observations_metadata_df,
STATS_OBSERVATIONS_JOIN,
OBSERVATIONS_STATS_MAP,
"observation",
)
open_stats_df = open_stats_df.withColumn(
"pipeline_stable_id",
when(col("procedure_stable_id") == "ESLIM_022_001", lit("ESLIM_001")).otherwise(
col("pipeline_stable_id")
),
)
open_stats_df = open_stats_df.withColumn(
"procedure_stable_id",
when(
col("procedure_stable_id").contains("~"),
split(col("procedure_stable_id"), "~"),
).otherwise(array(col("procedure_stable_id"))),
)
open_stats_df = open_stats_df.alias("stats")
mp_ancestors_df = ontology_df.select(
"id",
struct("parent_ids", "intermediate_ids", "top_level_ids").alias("ancestors"),
)
mp_ancestors_df_1 = mp_ancestors_df.alias("mp_term_1")
mp_ancestors_df_2 = mp_ancestors_df.alias("mp_term_2")
open_stats_df = open_stats_df.join(
mp_ancestors_df_1,
(expr("mp_term[0].term_id") == col("mp_term_1.id")),
"left_outer",
)
open_stats_df = open_stats_df.join(
mp_ancestors_df_2,
(expr("mp_term[1].term_id") == col("mp_term_2.id")),
"left_outer",
)
mp_term_schema = ArrayType(
StructType(
[
StructField("event", StringType(), True),
StructField("otherPossibilities", StringType(), True),
StructField("sex", StringType(), True),
StructField("term_id", StringType(), True),
]
)
)
select_collapsed_mp_term_udf = udf(
lambda mp_term_array, pipeline, procedure_group, parameter, data_type, first_term_ancestors, second_term_ancestors: _select_collapsed_mp_term(
mp_term_array,
pipeline,
procedure_group,
parameter,
mp_chooser,
data_type,
first_term_ancestors,
second_term_ancestors,
),
mp_term_schema,
)
open_stats_df = open_stats_df.withColumn(
"collapsed_mp_term",
when(
expr(
"exists(mp_term.sex, sex -> sex = 'male') AND exists(mp_term.sex, sex -> sex = 'female')"
)
& (col("data_type").isin(["categorical", "unidimensional"])),
select_collapsed_mp_term_udf(
"mp_term",
"pipeline_stable_id",
"procedure_group",
"parameter_stable_id",
"data_type",
"mp_term_1.ancestors",
"mp_term_2.ancestors",
),
).otherwise(col("mp_term")),
)
open_stats_df = open_stats_df.drop("mp_term_1.*", "mp_term_2.*")
open_stats_df = open_stats_df.withColumn(
"collapsed_mp_term", expr("collapsed_mp_term[0]")
)
open_stats_df = open_stats_df.withColumn("significant", lit(False))
open_stats_df = open_stats_df.join(
threei_df,
[
"resource_name",
"colony_id",
"marker_symbol",
"procedure_stable_id",
"parameter_stable_id",
"zygosity",
],
"left_outer",
)
open_stats_df = map_three_i(open_stats_df)
open_stats_df = open_stats_df.withColumn(
"collapsed_mp_term",
when(
col("threei_collapsed_mp_term").isNotNull(), col("threei_collapsed_mp_term")
).otherwise(col("collapsed_mp_term")),
)
open_stats_df = open_stats_df.drop("threei_collapsed_mp_term")
open_stats_df = open_stats_df.withColumn(
"mp_term_id", regexp_replace("collapsed_mp_term.term_id", " ", "")
)
for bad_mp in BAD_MP_MAP.keys():
open_stats_df = open_stats_df.withColumn(
"mp_term_id",
when(col("mp_term_id") == bad_mp, lit(BAD_MP_MAP[bad_mp])).otherwise(
col("mp_term_id")
),
)
open_stats_df = open_stats_df.withColumn(
"mp_term_event", col("collapsed_mp_term.event")
)
open_stats_df = open_stats_df.withColumn(
"mp_term_sex", col("collapsed_mp_term.sex")
)
open_stats_df = open_stats_df.withColumnRenamed("mp_term", "full_mp_term")
open_stats_df = open_stats_df.withColumn(
"full_mp_term",
when(
col("full_mp_term").isNull() & col("collapsed_mp_term").isNotNull(),
array(col("collapsed_mp_term")),
)
.when(
col("full_mp_term").isNull() & col("collapsed_mp_term").isNull(), lit(None)
)
.otherwise(col("full_mp_term")),
)
if extract_windowed_data:
stats_results_column_list = (
STATS_RESULTS_COLUMNS + WINDOW_COLUMNS + RAW_DATA_COLUMNS
)
elif raw_data_in_output == "exclude":
stats_results_column_list = STATS_RESULTS_COLUMNS
else:
stats_results_column_list = STATS_RESULTS_COLUMNS + RAW_DATA_COLUMNS
for col_name in stats_results_column_list:
if col_name not in open_stats_df.columns:
open_stats_df = open_stats_df.withColumn(col_name, lit(None))
ontology_df = ontology_df.withColumnRenamed("id", "mp_term_id")
open_stats_df = map_to_stats(
open_stats_df, ontology_df, ["mp_term_id"], ONTOLOGY_STATS_MAP, "ontology"
)
pipeline_core_join = [
"parameter_stable_id",
"pipeline_stable_id",
"procedure_stable_id",
]
pipeline_core_df = (
pipeline_core_df.select(
[
col_name
for col_name in pipeline_core_df.columns
if col_name in pipeline_core_join
or col_name in PIPELINE_STATS_MAP.values()
]
)
.groupBy(
[
"parameter_stable_id",
"pipeline_stable_id",
"procedure_stable_id",
"pipeline_stable_key",
]
)
.agg(
*[
array_distinct(flatten(collect_set(col_name))).alias(col_name)
if col_name
in ["mp_id", "mp_term", "top_level_mp_id", "top_level_mp_term"]
else collect_set(col_name).alias(col_name)
for col_name in list(set(PIPELINE_STATS_MAP.values()))
if col_name != "pipeline_stable_key"
]
)
.dropDuplicates()
)
pipeline_core_df = pipeline_core_df.withColumnRenamed(
"procedure_stable_id", "proc_id"
)
pipeline_core_df = pipeline_core_df.withColumn(
"procedure_stable_id", array(col("proc_id"))
)
# Fix for VIA_002 missing mp terms
pipeline_core_df = _add_via_002_mp_term_options(pipeline_core_df)
open_stats_df = map_to_stats(
open_stats_df,
pipeline_core_df,
pipeline_core_join,
PIPELINE_STATS_MAP,
"impress",
)
open_stats_df = open_stats_df.withColumn(
"top_level_mp_term_id",
when(
col("top_level_mp_term_id").isNull(), col("top_level_mp_id_options")
).otherwise(col("top_level_mp_term_id")),
)
open_stats_df = open_stats_df.withColumn(
"top_level_mp_term_name",
when(
col("top_level_mp_term_name").isNull(), col("top_level_mp_term_options")
).otherwise(col("top_level_mp_term_name")),
)
allele_df = allele_df.select(
["allele_symbol"] + list(ALLELE_STATS_MAP.values())
).dropDuplicates()
open_stats_df = map_to_stats(
open_stats_df, allele_df, ["allele_symbol"], ALLELE_STATS_MAP, "allele"
)
open_stats_df = open_stats_df.withColumn("sex", col("mp_term_sex"))
open_stats_df = open_stats_df.withColumn(
"phenotype_sex",
when(col("phenotype_sex").isNull(), lit(None))
.when(
col("phenotype_sex").contains("Both sexes included"),
array(lit("male"), lit("female")),
)
.otherwise(
array(
lower(
regexp_extract(
col("phenotype_sex"),
r"Only one sex included in the analysis; (.*)\[.*\]",
1,
)
)
)
),
)
open_stats_df = open_stats_df.withColumn(
"phenotype_sex",
when(
col("phenotype_sex").isNull() & col("mp_term_sex").isNotNull(),
when(
col("mp_term_sex") == "not_considered",
array(lit("male"), lit("female")),
).otherwise(array(col("mp_term_sex"))),
).otherwise(col("phenotype_sex")),
)
open_stats_df = open_stats_df.withColumn(
"zygosity",
when(col("zygosity") == "homozygous", lit("homozygote")).otherwise(
col("zygosity")
),
)
open_stats_df = map_ontology_prefix(open_stats_df, "MA:", "anatomy_")
open_stats_df = map_ontology_prefix(open_stats_df, "EMAP:", "anatomy_")
open_stats_df = map_ontology_prefix(open_stats_df, "EMAPA:", "anatomy_")
open_stats_df = open_stats_df.withColumn(
"significant",
when(col("mp_term_id").isNotNull(), lit(True)).otherwise(lit(False)),
)
open_stats_df = open_stats_df.withColumn(
"p_value",
when(
col("statistical_method").startswith("Reference Range"),
least(
col("female_pvalue_low_normal_vs_high"),
col("female_pvalue_low_vs_normal_high"),
col("male_pvalue_low_normal_vs_high"),
col("male_pvalue_low_vs_normal_high"),
col("genotype_pvalue_low_normal_vs_high"),
col("genotype_pvalue_low_vs_normal_high"),
),
).otherwise(col("p_value")),
)
open_stats_df = open_stats_df.withColumn(
"effect_size",
when(
col("statistical_method").startswith("Reference Range"),
greatest(
col("female_effect_size_low_normal_vs_high"),
col("female_effect_size_low_vs_normal_high"),
col("male_effect_size_low_normal_vs_high"),
col("male_effect_size_low_vs_normal_high"),
col("genotype_effect_size_low_normal_vs_high"),
col("genotype_effect_size_low_vs_normal_high"),
),
).otherwise(col("effect_size")),
)
open_stats_df = map_ontology_prefix(open_stats_df, "MPATH:", "mpath_")
mpath_metadata_df = mpath_metadata_df.select(
col("acc").alias("mpath_term_id"), col("name").alias("mpath_metadata_term_name")
).distinct()
open_stats_df = open_stats_df.join(mpath_metadata_df, "mpath_term_id", "left_outer")
open_stats_df = open_stats_df.withColumn(
"mpath_term_name", col("mpath_metadata_term_name")
)
open_stats_df = open_stats_df.withColumn(
"metadata",
expr("transform(metadata, metadata_values -> concat_ws('|', metadata_values))"),
)
open_stats_df = open_stats_df.withColumn(
"significant",
when(col("data_type") == "time_series", lit(False)).otherwise(
col("significant")
),
)
open_stats_df = open_stats_df.withColumn(
"status",
when(col("data_type") == "time_series", lit("NotProcessed")).otherwise(
col("status")
),
)
open_stats_df = open_stats_df.withColumn(
"procedure_stable_id_str", concat_ws(",", "procedure_stable_id")
)
identifying_cols = [
"colony_id",
"pipeline_stable_id",
"procedure_stable_id_str",
"parameter_stable_id",
"phenotyping_center",
"production_center",
"metadata_group",
"zygosity",
"strain_accession_id",
"sex",
]
identifying_cols = [
when(col(col_name).isNotNull(), col(col_name)).otherwise(lit(""))
for col_name in identifying_cols
]
open_stats_df = open_stats_df.withColumn("doc_id", md5(concat(*identifying_cols)))
open_stats_df = open_stats_df.withColumnRenamed(
"observations_id", "observation_ids"
)
if raw_data_in_output == "include" or raw_data_in_output == "bundled":
specimen_dobs = (
observations_df.select("external_sample_id", "date_of_birth")
.dropDuplicates()
.collect()
)
specimen_dob_dict = [row.asDict() for row in specimen_dobs]
specimen_dob_dict = {
row["external_sample_id"]: row["date_of_birth"] for row in specimen_dob_dict
}
open_stats_df = _parse_raw_data(
open_stats_df,
extract_windowed_data,
specimen_dob_dict,
raw_data_in_output != "bundled",
)
open_stats_df = open_stats_df.withColumn(
"data_type",
when(
col("procedure_group").rlike(
"|".join(
[
"IMPC_GPL",
"IMPC_GEL",
"IMPC_GPM",
"IMPC_GEM",
"IMPC_GPO",
"IMPC_GEO",
"IMPC_GPP",
"IMPC_GEP",
]
)
)
& (col("data_type") == "categorical"),
lit("embryo"),
).otherwise(col("data_type")),
)
return open_stats_df
def _compress_and_encode(json_text):
if json_text is None:
return None
else:
return str(base64.b64encode(gzip.compress(bytes(json_text, "utf-8"))), "utf-8")
def _parse_raw_data(
open_stats_df, extract_windowed_data, specimen_dob_dict, compress=True
):
compress_and_encode = udf(_compress_and_encode, StringType())
open_stats_df = open_stats_df.withColumnRenamed(
"observations_biological_sample_group", "biological_sample_group"
)
open_stats_df = open_stats_df.withColumnRenamed(
"observations_external_sample_id", "external_sample_id"
)
open_stats_df = open_stats_df.withColumnRenamed(
"observations_date_of_experiment", "date_of_experiment"
)
open_stats_df = open_stats_df.withColumnRenamed("observations_sex", "specimen_sex")
open_stats_df = open_stats_df.withColumnRenamed(
"observations_body_weight", "body_weight"
)
open_stats_df = open_stats_df.withColumnRenamed(
"observations_time_point", "time_point"
)
open_stats_df = open_stats_df.withColumnRenamed(
"observations_discrete_point", "discrete_point"
)
if extract_windowed_data:
open_stats_df = open_stats_df.withColumnRenamed(
"observations_window_weight", "window_weight"
)
for col_name in [
"biological_sample_group",
"date_of_experiment",
"external_sample_id",
"specimen_sex",
]:
open_stats_df = open_stats_df.withColumn(
col_name,
when(
(
col("data_type").isin(
["unidimensional", "time_series", "categorical"]
)
& (col(col_name).isNotNull())
),
col(col_name),
).otherwise(lit(None)),
)
open_stats_df = open_stats_df.withColumn(
"body_weight",
when(
(
col("data_type").isin(["unidimensional", "time_series", "categorical"])
& (col("body_weight").isNotNull())
),
col("body_weight"),
).otherwise(expr("transform(external_sample_id, sample_id -> NULL)")),
)
open_stats_df = open_stats_df.withColumn(
"data_point",
when(
(col("data_type").isin(["unidimensional", "time_series"]))
& (col("observations_response").isNotNull()),
col("observations_response"),
).otherwise(expr("transform(external_sample_id, sample_id -> NULL)")),
)
open_stats_df = open_stats_df.withColumn(
"category",
when(
(col("data_type") == "categorical")
& (col("observations_response").isNotNull()),
col("observations_response"),
).otherwise(expr("transform(external_sample_id, sample_id -> NULL)")),
)
open_stats_df = open_stats_df.withColumn(
"time_point",
when(
(col("data_type") == "time_series") & (col("time_point").isNotNull()),
col("time_point"),
).otherwise(expr("transform(external_sample_id, sample_id -> NULL)")),
)
open_stats_df = open_stats_df.withColumn(
"discrete_point",
when(
(col("data_type") == "time_series") & (col("discrete_point").isNotNull()),
col("discrete_point"),
).otherwise(expr("transform(external_sample_id, sample_id -> NULL)")),
)
date_of_birth_udf = (
lambda specimen_list: [
specimen_dob_dict[specimen] if specimen in specimen_dob_dict else None
for specimen in specimen_list
]
if specimen_list is not None
else []
)
date_of_birth_udf = udf(date_of_birth_udf, ArrayType(StringType()))
open_stats_df = open_stats_df.withColumn(
"date_of_birth", date_of_birth_udf("external_sample_id")
)
if extract_windowed_data:
open_stats_df = open_stats_df.withColumn(
"window_weight",
when(
(col("data_type") == "unidimensional")
& (col("window_weight").isNotNull()),
col("window_weight"),
).otherwise(expr("transform(external_sample_id, sample_id -> NULL)")),
)
raw_data_cols = [
"biological_sample_group",
"date_of_experiment",
"external_sample_id",
"specimen_sex",
"body_weight",
"data_point",
"category",
"time_point",
"discrete_point",
]
if extract_windowed_data:
raw_data_cols.append("window_weight")
open_stats_df = open_stats_df.withColumn("raw_data", arrays_zip(*raw_data_cols))
# to_json_udf = udf(
# lambda row: None
# if row is None
# else json.dumps(
# [
# {raw_data_cols[int(key)]: value for key, value in item.asDict().items()}
# for item in row
# ]
# ),
# StringType(),
# )
open_stats_df = open_stats_df.withColumn("raw_data", to_json("raw_data"))
for idx, col_name in enumerate(raw_data_cols):
open_stats_df = open_stats_df.withColumn(
"raw_data", regexp_replace("raw_data", f'"{idx}":', f'"{col_name}":')
)
if compress:
open_stats_df = open_stats_df.withColumn(
"raw_data", compress_and_encode("raw_data")
)
return open_stats_df
def map_ontology_prefix(open_stats_df, term_prefix, field_prefix):
mapped_columns = [
col_name for col_name in STATS_RESULTS_COLUMNS if field_prefix in col_name
]
for col_name in mapped_columns:
mp_col_name = col_name.replace(field_prefix, "mp_")
open_stats_df = open_stats_df.withColumn(
col_name,
when(
col(col_name).isNull(),
when(
col("mp_term_id").startswith(term_prefix), col(mp_col_name)
).otherwise(lit(None)),
).otherwise(col(col_name)),
)
mapped_id = field_prefix + "term_id"
for col_name in mapped_columns:
mp_col_name = col_name.replace(field_prefix, "mp_")
open_stats_df = open_stats_df.withColumn(
mp_col_name,
when(col(mapped_id).isNotNull(), lit(None)).otherwise(col(mp_col_name)),
)
return open_stats_df
def map_to_stats(
open_stats_df, metadata_df, join_columns, source_stats_map, source_name
):
for col_name in metadata_df.columns:
if col_name not in join_columns:
metadata_df = metadata_df.withColumnRenamed(
col_name, f"{source_name}_{col_name}"
)
if source_name == "observations":
open_stats_df = open_stats_df.join(metadata_df, join_columns)
else:
open_stats_df = open_stats_df.join(metadata_df, join_columns, "left_outer")
for column_name, source_column in source_stats_map.items():
open_stats_df = open_stats_df.withColumn(
column_name, col(f"{source_name}_{source_column}")
)
for source_column in source_stats_map.values():
open_stats_df = open_stats_df.drop(f"{source_name}_{source_column}")
return open_stats_df
def standardize_threei_schema(threei_df: DataFrame):
threei_df = threei_df.dropDuplicates()
for col_name, threei_column in THREEI_STATS_MAP.items():
threei_df = threei_df.withColumnRenamed(threei_column, col_name)
threei_df = threei_df.withColumn("resource_name", lit("3i"))
threei_df = threei_df.withColumn(
"procedure_stable_id", array(col("procedure_stable_id"))
)
threei_df = threei_df.withColumn(
"sex",
when(col("sex") == "both", lit("not_considered")).otherwise(lower(col("sex"))),
)
threei_df = threei_df.withColumn(
"zygosity",
when(col("zygosity") == "Hom", lit("homozygote"))
.when(col("zygosity") == "Hemi", lit("hemizygote"))
.otherwise(lit("heterozygote")),
)
threei_df = threei_df.withColumn("term_id", regexp_replace("mp_id", r"\[", ""))
threei_df = threei_df.withColumn(
"threei_collapsed_mp_term",
when(
(col("mp_id") != "NA") & (col("mp_id").isNotNull()),
struct(
lit(None).cast(StringType()).alias("event"),
lit(None).cast(StringType()).alias("otherPossibilities"),
"sex",
col("term_id").alias("term_id"),
),
).otherwise(lit(None)),
)
threei_df = threei_df.withColumn(
"threei_p_value",
when(col("classification_tag") == "Significant", lit(0.0)).otherwise(lit(1.0)),
)
threei_df = threei_df.withColumn(
"threei_genotype_effect_p_value",
when(col("classification_tag") == "Significant", lit(0.0)).otherwise(lit(1.0)),
)
threei_df = threei_df.withColumn(
"threei_genotype_effect_parameter_estimate",
when(col("classification_tag") == "Significant", lit(1.0)).otherwise(lit(0.0)),
)
threei_df = threei_df.withColumn(
"threei_significant",
when(col("classification_tag") == "Significant", lit(True)).otherwise(
lit(False)
),
)
threei_df = threei_df.withColumn(
"threei_status",
when(
col("classification_tag").isin(["Significant", "Not Significant"]),
lit("Successful"),
).otherwise(lit("NotProcessed")),
)
threei_df = threei_df.withColumn(
"threei_statistical_method", lit("Supplied as data")
)
threei_df = threei_df.drop(
"sex",
"term_id",
"mp_id",
"parameter_name",
"procedure_name",
"combine_sex_call",
"samples",
"allele_name",
"classification_tag",
)
return threei_df
def map_three_i(open_stats_df):
open_stats_df = open_stats_df.withColumn(
"genotype_effect_parameter_estimate",
when(
col("threei_genotype_effect_parameter_estimate").isNotNull(),
col("threei_genotype_effect_parameter_estimate"),
).otherwise(col("genotype_effect_parameter_estimate")),
)
open_stats_df = open_stats_df.drop("threei_genotype_effect_parameter_estimate")
open_stats_df = open_stats_df.withColumn(
"significant",
when(
col("threei_significant").isNotNull(), col("threei_significant")
).otherwise(col("significant")),
)
open_stats_df = open_stats_df.drop("threei_significant")
open_stats_df = open_stats_df.withColumn(
"status",
when(col("threei_status").isNotNull(), col("threei_status")).otherwise(
col("status")
),
)
open_stats_df = open_stats_df.drop("threei_status")
open_stats_df = open_stats_df.withColumn(
"statistical_method",
when(
col("threei_statistical_method").isNotNull(),
col("threei_statistical_method"),
).otherwise(col("statistical_method")),
)
open_stats_df = open_stats_df.drop("threei_statistical_method")
open_stats_df = open_stats_df.withColumn(
"p_value",
when(col("threei_p_value").isNotNull(), col("threei_p_value")).otherwise(
col("p_value")
),
)
open_stats_df = open_stats_df.drop("threei_p_value")
open_stats_df = open_stats_df.withColumn(
"genotype_effect_p_value",
when(
col("threei_genotype_effect_p_value").isNotNull(),
col("threei_genotype_effect_p_value"),
).otherwise(col("genotype_effect_p_value")),
)
open_stats_df = open_stats_df.drop("threei_genotype_effect_p_value")
return open_stats_df
def _fertility_stats_results(observations_df: DataFrame, pipeline_df: DataFrame):
fertility_condition = col("parameter_stable_id").isin(
["IMPC_FER_001_001", "IMPC_FER_019_001"]
)
# mp_chooser = (
# pipeline_df.select(
# col("pipelineKey").alias("pipeline_stable_id"),
# col("procedure.procedureKey").alias("procedure_stable_id"),
# col("parameter.parameterKey").alias("parameter_stable_id"),
# col("parammpterm.optionText").alias("category"),
# col("termAcc"),
# )
# .withColumn("category", lower(col("category")))
# .distinct()
# )
required_stats_columns = STATS_OBSERVATIONS_JOIN + [
"sex",
"procedure_stable_id",
"pipeline_name",
"category",
"allele_accession_id",
"parameter_name",
"allele_symbol",
"marker_accession_id",
"marker_symbol",
"strain_accession_id",
]
fertility_stats_results = (
observations_df.where(fertility_condition)
.withColumnRenamed("gene_accession_id", "marker_accession_id")
.withColumnRenamed("gene_symbol", "marker_symbol")
.select(required_stats_columns)
)
fertility_stats_results = fertility_stats_results.withColumn(
"category", lower(col("category"))
)
fertility_stats_results = fertility_stats_results.withColumn(
"data_type", lit("line")
)
fertility_stats_results = fertility_stats_results.withColumn(
"effect_size", lit(1.0)
)
fertility_stats_results = fertility_stats_results.withColumn(
"statistical_method", lit("Supplied as data")
)
fertility_stats_results = fertility_stats_results.withColumn(
"status", lit("Successful")
)
fertility_stats_results = fertility_stats_results.withColumn("p_value", lit(0.0))
# fertility_stats_results = fertility_stats_results.join(
# mp_chooser,
# [
# "pipeline_stable_id",
# "procedure_stable_id",
# "parameter_stable_id",
# "category",
# ],
# "left_outer",
# )
fertility_stats_results = fertility_stats_results.withColumn(
"termAcc",
when(
col("category") == "infertile",
when(
col("parameter_stable_id") == "IMPC_FER_001_001", lit("MP:0001925")
).otherwise(lit("MP:0001926")),
).otherwise(lit(None)),
)
fertility_stats_results = fertility_stats_results.groupBy(
required_stats_columns
+ ["data_type", "status", "effect_size", "statistical_method", "p_value"]
).agg(
collect_set("category").alias("categories"),
collect_set(
struct(
lit("ABNORMAL").cast(StringType()).alias("event"),
lit(None).cast(StringType()).alias("otherPossibilities"),
"sex",
col("termAcc").alias("term_id"),
)
).alias("mp_term"),
)
fertility_stats_results = fertility_stats_results.withColumn(
"mp_term", expr("filter(mp_term, mp -> mp.term_id IS NOT NULL)")
)
fertility_stats_results = fertility_stats_results.withColumn(
"mp_term",
when(size(col("mp_term.term_id")) == 0, lit(None)).otherwise(col("mp_term")),
)
fertility_stats_results = fertility_stats_results.withColumn(
"p_value", when(col("mp_term").isNull(), lit(1.0)).otherwise(col("p_value"))
)
fertility_stats_results = fertility_stats_results.withColumn(
"effect_size",
when(col("mp_term").isNull(), lit(0.0)).otherwise(col("effect_size")),
)
return fertility_stats_results
def _embryo_stats_results(
observations_df: DataFrame, pipeline_df: DataFrame, embryo_stats_packets: DataFrame
):
mp_chooser = pipeline_df.select(
"pipelineKey",
"procedure.procedureKey",
"parameter.parameterKey",
"parammpterm.optionText",
"parammpterm.selectionOutcome",
"termAcc",
).distinct()
mp_chooser = (
mp_chooser.withColumnRenamed("pipelineKey", "pipeline_stable_id")
.withColumnRenamed("procedureKey", "procedure_stable_id")
.withColumnRenamed("parameterKey", "parameter_stable_id")
)
mp_chooser = mp_chooser.withColumn(
"category",
when(col("optionText").isNull(), col("selectionOutcome")).otherwise(
col("optionText")
),
)
mp_chooser = mp_chooser.withColumn("category", lower(col("category")))
mp_chooser = mp_chooser.drop("optionText", "selectionOutcome")
required_stats_columns = STATS_OBSERVATIONS_JOIN + [
"sex",
"procedure_stable_id",
"pipeline_name",
"category",
"allele_accession_id",
"parameter_name",
"allele_symbol",
"marker_accession_id",
"marker_symbol",
"strain_accession_id",
"text_value",
]
embryo_stats_results = (
observations_df.where(
col("procedure_group").rlike(
"|".join(
[
"IMPC_GPL",
"IMPC_GEL",
"IMPC_GPM",
"IMPC_GEM",
"IMPC_GPO",
"IMPC_GEO",
"IMPC_GPP",
"IMPC_GEP",
]
)
)
& (col("biological_sample_group") == "experimental")
& (col("observation_type") == "categorical")
)
.withColumnRenamed("gene_accession_id", "marker_accession_id")
.withColumnRenamed("gene_symbol", "marker_symbol")
.select(required_stats_columns)
)
embryo_control_data = observations_df.where(
col("procedure_group").rlike(
"|".join(
[
"IMPC_GPL",
"IMPC_GEL",
"IMPC_GPM",
"IMPC_GEM",
"IMPC_GPO",
"IMPC_GEO",
"IMPC_GPP",
"IMPC_GEP",
]
)
)
& (col("biological_sample_group") == "control")
& (col("observation_type") == "categorical")
& (col("category").isin(["yes", "no"]))
)
embryo_control_data = embryo_control_data.select(
"procedure_stable_id", "parameter_stable_id", "category"
)
embryo_control_data = embryo_control_data.groupBy(
"procedure_stable_id", "parameter_stable_id", "category"
).count()
window = Window.partitionBy("procedure_stable_id", "parameter_stable_id").orderBy(
col("count").desc()
)
embryo_normal_data = embryo_control_data.select(
"procedure_stable_id",
"parameter_stable_id",
first("category").over(window).alias("normal_category"),
).distinct()
embryo_stats_results = embryo_stats_results.withColumn(
"category", lower(col("category"))
)
embryo_stats_results = embryo_stats_results.join(
embryo_normal_data, ["procedure_stable_id", "parameter_stable_id"], "left_outer"
)
embryo_stats_results = embryo_stats_results.withColumn(
"category",
when(
col("category").isin(["yes", "no"])
& (col("category") != col("normal_category")),
lit("abnormal"),
).otherwise(col("category")),
)
embryo_stats_results = embryo_stats_results.drop("normal_category")
embryo_stats_results = embryo_stats_results.withColumn(
"data_type", lit("categorical")
)
embryo_stats_results = embryo_stats_results.withColumn("status", lit("Successful"))
embryo_stats_results = embryo_stats_results.withColumn(
"statistical_method", lit("Supplied as data")
)
embryo_stats_results = embryo_stats_results.join(
mp_chooser,
[
"pipeline_stable_id",
"procedure_stable_id",
"parameter_stable_id",
"category",
],
"left_outer",
)
group_by_cols = [
col_name
for col_name in required_stats_columns
if col_name not in ["category", "sex"]
]
group_by_cols += ["data_type", "status", "statistical_method"]
embryo_stats_results = embryo_stats_results.groupBy(group_by_cols).agg(
collect_set("sex").alias("sex"),
collect_set("category").alias("categories"),
collect_set(
struct(
lit("ABNORMAL").cast(StringType()).alias("event"),
lit(None).cast(StringType()).alias("otherPossibilities"),
col("sex"),
col("termAcc").alias("term_id"),
),
).alias("mp_term"),
collect_list(col("termAcc")).alias("abnormalCalls"),
)
embryo_stats_results = embryo_stats_results.withColumn(
"mp_term", expr("filter(mp_term, mp -> mp.term_id IS NOT NULL)")
)
embryo_stats_results = embryo_stats_results.withColumn(
"abnormalCallsCount",
size(expr("filter(abnormalCalls, mp -> mp IS NOT NULL)")),
)
embryo_stats_results = embryo_stats_results.withColumn(
"mp_term",
when(
((col("zygosity") == "homozygote") | (col("zygosity") == "hemizygote"))
& (col("abnormalCallsCount") >= 2),
col("mp_term"),
)
.when(
(col("zygosity") == "heterozygote") & (col("abnormalCallsCount") >= 4),
col("mp_term"),
)
.otherwise(lit(None)),
)
embryo_stats_results = embryo_stats_results.withColumn(
"p_value", when(col("mp_term").isNull(), lit(1.0)).otherwise(lit(0.0))
)
embryo_stats_results = embryo_stats_results.withColumn(
"effect_size", when(col("mp_term").isNull(), lit(0.0)).otherwise(lit(1.0))
)
embryo_stats_results = embryo_stats_results.groupBy(
*[
col_name
for col_name in embryo_stats_results.columns
if col_name not in ["mp_term", "p_value", "effect_size"]
]
).agg(
flatten(collect_set("mp_term")).alias("mp_term"),
min("p_value").alias("p_value"),
max("effect_size").alias("effect_size"),
)
for col_name in embryo_stats_packets.columns:
if (
col_name in embryo_stats_results.columns
and col_name not in STATS_OBSERVATIONS_JOIN
):
embryo_stats_packets = embryo_stats_packets.drop(col_name)
embryo_stats_results = embryo_stats_results.join(
embryo_stats_packets, STATS_OBSERVATIONS_JOIN, "left_outer"
)
return embryo_stats_results
def _embryo_viability_stats_results(observations_df: DataFrame, pipeline_df: DataFrame):
mp_chooser = (
pipeline_df.select(
"pipelineKey",
"procedure.procedureKey",
"parameter.parameterKey",
"parammpterm.optionText",
"termAcc",
)
.distinct()
.withColumnRenamed("pipelineKey", "pipeline_stable_id")
.withColumnRenamed("procedureKey", "procedure_stable_id")
.withColumnRenamed("parameterKey", "parameter_stable_id")
.withColumnRenamed("optionText", "category")
).withColumn("category", lower(col("category")))
required_stats_columns = STATS_OBSERVATIONS_JOIN + [
"sex",
"procedure_stable_id",
"pipeline_name",
"category",
"allele_accession_id",
"parameter_name",
"allele_symbol",
"marker_accession_id",
"marker_symbol",
"strain_accession_id",
"text_value",
]
embryo_viability_stats_results = (
observations_df.where(
col("parameter_stable_id").isin(
[
"IMPC_EVL_001_001",
"IMPC_EVM_001_001",
"IMPC_EVO_001_001",
"IMPC_EVP_001_001",
]
)
)
.withColumnRenamed("gene_accession_id", "marker_accession_id")
.withColumnRenamed("gene_symbol", "marker_symbol")
.select(required_stats_columns)
)
embryo_viability_stats_results = embryo_viability_stats_results.withColumn(
"category", lower(col("category"))
)
embryo_viability_stats_results = embryo_viability_stats_results.withColumn(
"data_type", lit("embryo")
)
embryo_viability_stats_results = embryo_viability_stats_results.withColumn(
"status", lit("Successful")
)
embryo_viability_stats_results = embryo_viability_stats_results.withColumn(
"statistical_method", lit("Supplied as data")
)
embryo_viability_stats_results = embryo_viability_stats_results.join(
mp_chooser,
[
"pipeline_stable_id",
"procedure_stable_id",
"parameter_stable_id",
"category",
],
"left_outer",
)
embryo_viability_stats_results = embryo_viability_stats_results.groupBy(
required_stats_columns + ["data_type", "status", "statistical_method"]
).agg(
collect_set("category").alias("categories"),
collect_set(
struct(
lit("ABNORMAL").cast(StringType()).alias("event"),
lit(None).cast(StringType()).alias("otherPossibilities"),
lit("not_considered").cast(StringType()).alias("sex"),
col("termAcc").alias("term_id"),
)
).alias("mp_term"),
)
embryo_viability_stats_results = embryo_viability_stats_results.withColumn(
"mp_term", expr("filter(mp_term, mp -> mp.term_id IS NOT NULL)")
)
embryo_viability_stats_results = embryo_viability_stats_results.withColumn(
"mp_term",
when(size(col("mp_term.term_id")) == 0, lit(None)).otherwise(col("mp_term")),
)
embryo_viability_stats_results = embryo_viability_stats_results.withColumn(
"p_value", when(col("mp_term").isNull(), lit(1.0)).otherwise(lit(0.0))
)
embryo_viability_stats_results = embryo_viability_stats_results.withColumn(
"effect_size", when(col("mp_term").isNull(), lit(0.0)).otherwise(lit(1.0))
)
return embryo_viability_stats_results
def _viability_stats_results(observations_df: DataFrame, pipeline_df: DataFrame):
mp_chooser = (
pipeline_df.select(
"pipelineKey",
"procedure.procedureKey",
"parameter.parameterKey",
"parammpterm.optionText",
"termAcc",
)
.distinct()
.withColumnRenamed("pipelineKey", "pipeline_stable_id")
.withColumnRenamed("procedureKey", "procedure_stable_id")
.withColumnRenamed("parameterKey", "parameter_stable_id")
.withColumnRenamed("optionText", "category")
).withColumn("category", lower(col("category")))
required_stats_columns = STATS_OBSERVATIONS_JOIN + [
"sex",
"procedure_stable_id",
"pipeline_name",
"category",
"allele_accession_id",
"parameter_name",
"allele_symbol",
"marker_accession_id",
"marker_symbol",
"strain_accession_id",
"text_value",
]
viability_stats_results = (
observations_df.where(
(
(col("parameter_stable_id") == "IMPC_VIA_001_001")
& (col("procedure_stable_id") == "IMPC_VIA_001")
)
| (
(
col("parameter_stable_id").isin(
[
"IMPC_VIA_063_001",
"IMPC_VIA_064_001",
"IMPC_VIA_065_001",
"IMPC_VIA_066_001",
"IMPC_VIA_067_001",
]
)
)
& (col("procedure_stable_id") == "IMPC_VIA_002")
)
)
.withColumnRenamed("gene_accession_id", "marker_accession_id")
.withColumnRenamed("gene_symbol", "marker_symbol")
.select(required_stats_columns)
)
viability_stats_results = viability_stats_results.withColumn(
"category", lower(col("category"))
)
json_outcome_schema = StructType(
[
StructField("outcome", StringType()),
StructField("n", IntegerType()),
StructField("P", DoubleType()),
]
)
viability_stats_results = viability_stats_results.withColumn(
"viability_outcome",
when(
col("procedure_stable_id") == "IMPC_VIA_002",
from_json(col("text_value"), json_outcome_schema),
).otherwise(lit(None)),
)
viability_p_values = observations_df.where(
col("parameter_stable_id") == "IMPC_VIA_032_001"
).select("procedure_stable_id", "colony_id", col("data_point").alias("p_value"))
viability_male_mutants = observations_df.where(
col("parameter_stable_id") == "IMPC_VIA_010_001"
).select(
"procedure_stable_id", "colony_id", col("data_point").alias("male_mutants")
)
viability_female_mutants = observations_df.where(
col("parameter_stable_id") == "IMPC_VIA_014_001"
).select(
"procedure_stable_id", "colony_id", col("data_point").alias("female_mutants")
)
viability_stats_results = viability_stats_results.withColumn(
"data_type", lit("line")
)
viability_stats_results = viability_stats_results.withColumn(
"effect_size", lit(1.0)
)
viability_stats_results = viability_stats_results.join(
viability_p_values, ["colony_id", "procedure_stable_id"], "left_outer"
)
viability_stats_results = viability_stats_results.withColumn(
"p_value",
when(
col("procedure_stable_id") == "IMPC_VIA_002", col("viability_outcome.P")
).otherwise(col("p_value")),
)
viability_stats_results = viability_stats_results.withColumn(
"p_value",
when(
col("p_value").isNull() & ~col("category").contains("Viable"), lit(0.0)
).otherwise(col("p_value")),
)
viability_stats_results = viability_stats_results.withColumn(
"male_controls", lit(None)
)
viability_stats_results = viability_stats_results.join(
viability_male_mutants, ["colony_id", "procedure_stable_id"], "left_outer"
)
viability_stats_results = viability_stats_results.withColumn(
"male_mutants",
when(
(col("procedure_stable_id") == "IMPC_VIA_002")
& (col("parameter_name").contains(" males ")),
col("viability_outcome.n"),
).otherwise(col("male_mutants")),
)
viability_stats_results = viability_stats_results.withColumn(
"female_controls", lit(None)
)
viability_stats_results = viability_stats_results.join(
viability_female_mutants, ["colony_id", "procedure_stable_id"], "left_outer"
)
viability_stats_results = viability_stats_results.withColumn(
"female_mutants",
when(
(col("procedure_stable_id") == "IMPC_VIA_002")
& (col("parameter_name").contains(" females ")),
col("viability_outcome.n"),
).otherwise(col("female_mutants")),
)
viability_stats_results = viability_stats_results.withColumn(
"statistical_method",
when(
col("procedure_stable_id") == "IMPC_VIA_002",
lit("Binomial distribution probability"),
).otherwise(lit("Supplied as data")),
)
viability_stats_results = viability_stats_results.withColumn(
"status", lit("Successful")
)
viability_stats_results = viability_stats_results.join(
mp_chooser,
[
"pipeline_stable_id",
"procedure_stable_id",
"parameter_stable_id",
"category",
],
"left_outer",
)
viability_stats_results = viability_stats_results.groupBy(
required_stats_columns
+ [
"data_type",
"status",
"effect_size",
"statistical_method",
"p_value",
"male_mutants",
"female_mutants",
"viability_outcome",
]
).agg(
collect_set("category").alias("categories"),
collect_set(
struct(
lit("ABNORMAL").cast(StringType()).alias("event"),
lit(None).cast(StringType()).alias("otherPossibilities"),
lit("not_considered").cast(StringType()).alias("sex"),
col("termAcc").alias("term_id"),
)
).alias("mp_term"),
)
viability_stats_results = viability_stats_results.withColumn(
"mp_term",
when(
(col("procedure_stable_id") == "IMPC_VIA_002"),
array(
struct(
lit("ABNORMAL").cast(StringType()).alias("event"),
lit(None).cast(StringType()).alias("otherPossibilities"),
when(col("parameter_name").contains(" males "), lit("male"))
.when(col("parameter_name").contains(" females "), lit("female"))
.otherwise(lit("not_considered"))
.cast(StringType())
.alias("sex"),
when(
col("viability_outcome.outcome").contains("subviable"),
lit("MP:0011110"),
)
.when(
col("viability_outcome.outcome").contains("lethal"),
lit("MP:0011100"),
)
.otherwise(lit(None).cast(StringType()))
.cast(StringType())
.alias("term_id"),
)
),
).otherwise(col("mp_term")),
)
viability_stats_results = viability_stats_results.withColumn(
"mp_term", expr("filter(mp_term, mp -> mp.term_id IS NOT NULL)")
)
viability_stats_results = viability_stats_results.withColumn(
"mp_term",
when(size(col("mp_term.term_id")) == 0, lit(None)).otherwise(col("mp_term")),
)
viability_stats_results = viability_stats_results.withColumn(
"p_value", when(col("mp_term").isNull(), lit(1.0)).otherwise(col("p_value"))
)
viability_stats_results = viability_stats_results.withColumn(
"effect_size",
when(col("mp_term").isNull(), lit(0.0)).otherwise(col("effect_size")),
)
return viability_stats_results
def _histopathology_stats_results(observations_df: DataFrame):
histopathology_stats_results = observations_df.where(
expr("exists(sub_term_id, term -> term LIKE 'MPATH:%')")
)
histopathology_stats_results = histopathology_stats_results.withColumn(
"term_set", array_sort(array_distinct("sub_term_name"))
)
histopathology_stats_results = histopathology_stats_results.withColumn(
"is_normal",
(size("term_set") == 1) & expr("exists(term_set, term -> term = 'normal')"),
)
histopathology_stats_results = histopathology_stats_results.withColumn(
"tissue_name", regexp_extract("parameter_name", "(.*)( - .*)", 1)
)
histopathology_significance_scores = observations_df.where(
col("parameter_name").endswith("Significance score")
).where(col("biological_sample_group") == "experimental")
histopathology_significance_scores = histopathology_significance_scores.withColumn(
"tissue_name", regexp_extract("parameter_name", "(.*)( - .*)", 1)
)
histopathology_significance_scores = histopathology_significance_scores.withColumn(
"significance", when(col("category") == "1", lit(True)).otherwise(lit(False))
)
significance_stats_join = [
"pipeline_stable_id",
"procedure_stable_id",
"specimen_id",
"experiment_id",
"tissue_name",
]
histopathology_significance_scores = histopathology_significance_scores.select(
significance_stats_join + ["significance"]
)
histopathology_stats_results = histopathology_stats_results.join(
histopathology_significance_scores, significance_stats_join, "left_outer"
)
histopathology_stats_results = histopathology_stats_results.withColumn(
"significance",
when(col("significance") & ~col("is_normal"), lit(True)).otherwise(lit(False)),
)
required_stats_columns = STATS_OBSERVATIONS_JOIN + [
"sex",
"procedure_stable_id",
"pipeline_name",
"allele_accession_id",
"parameter_name",
"allele_symbol",
"marker_accession_id",
"marker_symbol",
"strain_accession_id",
"sub_term_id",
"sub_term_name",
"specimen_id",
"significance",
]
histopathology_stats_results = (
histopathology_stats_results.withColumnRenamed(
"gene_accession_id", "marker_accession_id"
)
.withColumnRenamed("gene_symbol", "marker_symbol")
.select(required_stats_columns)
)
histopathology_stats_results = histopathology_stats_results.withColumn(
"sub_term_id", expr("filter(sub_term_id, mp -> mp LIKE 'MPATH:%')")
)
histopathology_stats_results = histopathology_stats_results.withColumn(
"term_id", explode_outer("sub_term_id")
)
histopathology_stats_results = histopathology_stats_results.groupBy(
*[
col_name
for col_name in required_stats_columns
if col_name not in ["sex", "term_id"]
]
).agg(
collect_set(
struct(
lit("ABNORMAL").cast(StringType()).alias("event"),
lit(None).cast(StringType()).alias("otherPossibilities"),
col("sex"),
col("term_id"),
)
).alias("mp_term")
)
histopathology_stats_results = histopathology_stats_results.withColumn(
"mp_term", expr("filter(mp_term, mp -> mp.term_id IS NOT NULL)")
)
histopathology_stats_results = histopathology_stats_results.withColumn(
"mp_term",
when(col("significance").isNull() | ~col("significance"), lit(None)).otherwise(
col("mp_term")
),
)
histopathology_stats_results = histopathology_stats_results.withColumn(
"statistical_method", lit("Supplied as data")
)
histopathology_stats_results = histopathology_stats_results.withColumn(
"status", lit("Successful")
)
histopathology_stats_results = histopathology_stats_results.withColumn(
"p_value",
when(col("significance").isNull() | ~col("significance"), lit(1.0)).otherwise(
lit(0.0)
),
)
histopathology_stats_results = histopathology_stats_results.withColumn(
"effect_size",
when(col("significance").isNull() | ~col("significance"), lit(0.0)).otherwise(
lit(1.0)
),
)
histopathology_stats_results = histopathology_stats_results.withColumn(
"data_type", lit("histopathology")
)
histopathology_stats_results = histopathology_stats_results.drop("significance")
histopathology_stats_results = histopathology_stats_results.dropDuplicates()
return histopathology_stats_results
def _gross_pathology_stats_results(observations_df: DataFrame):
gross_pathology_stats_results = observations_df.where(
(col("biological_sample_group") != "control")
& col("parameter_stable_id").like("%PAT%")
& (expr("exists(sub_term_id, term -> term LIKE 'MP:%')"))
)
required_stats_columns = STATS_OBSERVATIONS_JOIN + [
"sex",
"procedure_stable_id",
"pipeline_name",
"category",
"allele_accession_id",
"parameter_name",
"allele_symbol",
"marker_accession_id",
"marker_symbol",
"strain_accession_id",
"sub_term_id",
"sub_term_name",
"specimen_id",
]
gross_pathology_stats_results = (
gross_pathology_stats_results.withColumnRenamed(
"gene_accession_id", "marker_accession_id"
)
.withColumnRenamed("gene_symbol", "marker_symbol")
.select(required_stats_columns)
)
gross_pathology_stats_results = gross_pathology_stats_results.withColumn(
"sub_term_id", expr("filter(sub_term_id, mp -> mp LIKE 'MP:%')")
)
gross_pathology_stats_results = gross_pathology_stats_results.withColumn(
"term_id", explode_outer("sub_term_id")
)
gross_pathology_stats_results = gross_pathology_stats_results.withColumn(
"term_id",
when(
expr(
"exists(sub_term_name, term -> term = 'no abnormal phenotype detected')"
)
| expr("exists(sub_term_name, term -> term = 'normal')"),
lit(None),
).otherwise(col("term_id")),
)
gross_pathology_stats_results = gross_pathology_stats_results.groupBy(
*[
col_name
for col_name in required_stats_columns
if col_name not in ["sex", "sub_term_id", "sub_term_name"]
]
).agg(
collect_set(
struct(
lit("ABNORMAL").cast(StringType()).alias("event"),
lit(None).cast(StringType()).alias("otherPossibilities"),
col("sex"),
col("term_id"),
)
).alias("mp_term")
)
gross_pathology_stats_results = gross_pathology_stats_results.withColumn(
"mp_term", expr("filter(mp_term, mp -> mp.term_id IS NOT NULL)")
)
gross_pathology_stats_results = gross_pathology_stats_results.withColumn(
"mp_term",
when(size(col("mp_term.term_id")) == 0, lit(None)).otherwise(col("mp_term")),
)
gross_pathology_stats_results = gross_pathology_stats_results.withColumn(
"statistical_method", lit("Supplied as data")
)
gross_pathology_stats_results = gross_pathology_stats_results.withColumn(
"status", lit("Successful")
)
gross_pathology_stats_results = gross_pathology_stats_results.withColumn(
"p_value", when(col("mp_term").isNull(), lit(1.0)).otherwise(lit(0.0))
)
gross_pathology_stats_results = gross_pathology_stats_results.withColumn(
"effect_size", when(col("mp_term").isNull(), lit(0.0)).otherwise(lit(1.0))
)
gross_pathology_stats_results = gross_pathology_stats_results.withColumn(
"data_type", lit("adult-gross-path")
)
return gross_pathology_stats_results
def _select_collapsed_mp_term(
mp_term_array: List[Row],
pipeline,
procedure_group,
parameter,
mp_chooser,
data_type,
first_term_ancestors,
second_term_ancestors,
):
if (
mp_term_array is None
or data_type not in ["categorical", "unidimensional"]
or len(mp_term_array) == 0
):
return mp_term_array
mp_term = mp_term_array[0].asDict()
mp_term["sex"] = "not_considered"
mp_terms = [mp["term_id"] for mp in mp_term_array]
try:
if len(set(mp_terms)) > 1:
print(mp_term_array)
print(f"{pipeline} {procedure_group} {parameter}")
mp_term["term_id"] = mp_chooser[pipeline][procedure_group][parameter][
"UNSPECIFIED"
]["ABNORMAL"]["OVERALL"]["MPTERM"]
else:
mp_term["term_id"] = mp_term_array[0]["term_id"]
except KeyError:
ancestor_types = ["parent", "intermediate", "top_level"]
closest_common_ancestor = None
for ancestor_type in ancestor_types:
ancestor_intersect = set(
first_term_ancestors[f"{ancestor_type}_ids"]
) & set(second_term_ancestors[f"{ancestor_type}_ids"])
if len(ancestor_intersect) > 0:
closest_common_ancestor = list(ancestor_intersect)[0]
break
if closest_common_ancestor is None:
print(mp_term_array)
print(f"{pipeline} {procedure_group} {parameter}")
print("Unexpected error:", sys.exc_info()[0])
raise Exception(
str(mp_term_array)
+ f" | {pipeline} {procedure_group} {parameter} | "
+ f"[{str(first_term_ancestors)}] [{str(second_term_ancestors)}]"
+ str(sys.exc_info()[0])
)
else:
mp_term["term_id"] = closest_common_ancestor
return [convert_to_row(mp_term)]
def _add_via_002_mp_term_options(pipeline_core_df):
pipeline_core_df = pipeline_core_df.withColumn(
"top_level_mp_id",
when(
array_contains(col("procedure_stable_id"), "IMPC_VIA_002"),
array(lit("MP:0010768")),
).otherwise(col("top_level_mp_id")),
)
pipeline_core_df = pipeline_core_df.withColumn(
"top_level_mp_term",
when(
array_contains(col("procedure_stable_id"), "IMPC_VIA_002"),
array(lit("mortality/aging")),
).otherwise(col("top_level_mp_term")),
)
pipeline_core_df = pipeline_core_df.withColumn(
"mp_id",
when(
array_contains(col("procedure_stable_id"), "IMPC_VIA_002"),
array(lit("MP:0011100"), lit("MP:0011110")),
).otherwise(col("mp_id")),
)
pipeline_core_df = pipeline_core_df.withColumn(
"mp_term",
when(
array_contains(col("procedure_stable_id"), "IMPC_VIA_002"),
array(
lit("preweaning lethality, complete penetrance"),
lit("preweaning lethality, incomplete penetrance"),
),
).otherwise(col("mp_term")),
)
return pipeline_core_df
def stop_and_count(df):
print(df.count())
raise ValueError
if __name__ == "__main__":
sys.exit(main(sys.argv))
``` |
{
"source": "jmason-ebi/pdx",
"score": 2
} |
#### File: jmason-ebi/pdx/ingest.py
```python
import argparse
import os
import sys
from datetime import datetime
import django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "web.settings")
django.setup()
from web.models import *
DATA_FILE_PATH = '../pdx_data'
def import_jax(args):
"""
Import PDX data from JAX
"""
if args['download'] :
from ingest_pipelines import jax
jax.run()
# Delete should cascade and delete all JAX associated objects
DataSource.objects.filter(name="JAX").delete()
jax = DataSource(name="JAX", description="The Jackson Laboratory")
jax.created=datetime.now()
jax.modified=datetime.now()
jax.save()
from lxml import html
for filename in os.listdir("%s/JAX"%(DATA_FILE_PATH)):
with open("%s/JAX/%s"%(DATA_FILE_PATH,filename), 'r') as f:
tree = html.fromstring(f.read())
pdx_info = dict()
# PDX information (skip first row)
for x in tree.xpath("""//td[@class="data1"][1]/table/tr""")[1:]:
a = [y.text for y in x.findall("td")]
pdx_info.update( dict(zip(map(lambda x: x and x.replace(":", "") or None, a[0::2]), a[1::2]) ) )
# Patient information
for x in tree.xpath("""//td[@class="data2"][1]/table/tr""")[:1]:
a = [y.text for y in x.findall("td")]
pdx_info.update( dict(zip(map(lambda x: x and x.replace(":", "") or None, a[0::2]), a[1::2]) ) )
# Short circuit if not all the data is available
if "Tumor Type" not in pdx_info:
continue
# Information gathered into map, now create the PDX
if "Sex" in pdx_info:
p = Patient()
p.sex = pdx_info["Sex"]
p.age = pdx_info["Age"]
(race, ethnicity) = pdx_info["Race / Ethnicity"].split(" / ")
p.race = race
p.ethnicity = ethnicity
p.save()
ps = PatientSnapshot()
ps.patient = p
ps.age = p.age
ps.save()
if "Strain" in pdx_info and pdx_info["Strain"]:
try:
host_strain = HostStrain.objects.get(name__exact=pdx_info["Strain"])
except HostStrain.DoesNotExist:
host_strain = HostStrain()
host_strain.name = pdx_info["Strain"]
host_strain.save()
else :
print "No strain information for PDX", pdx_info
if "Sample Type" in pdx_info:
try:
itype = ImplantationType.objects.get(name__exact=pdx_info["Sample Type"])
except ImplantationType.DoesNotExist:
itype = ImplantationType()
itype.name = pdx_info["Sample Type"]
itype.save()
if "Implantation Site" in pdx_info:
try:
isite = ImplantationSite.objects.get(name__exact=pdx_info["Implantation Site"])
except ImplantationSite.DoesNotExist:
if pdx_info["Implantation Site"] :
isite = ImplantationSite()
isite.name = pdx_info["Implantation Site"]
isite.save()
else :
isite = None
ht = Tumor()
ht.tumor_type = pdx_info["Tumor Type"]
ht.diagnosis = pdx_info["Initial Diagnosis"]
if "Final Diagnosis" in pdx_info and pdx_info["Final Diagnosis"]:
ht.diagnosis = pdx_info["Final Diagnosis"]
ht.tissue_of_origin = pdx_info["Primary Site"]
(stage, grade) = pdx_info["Stage / Grade"].split(" / ")
if grade:
ht.classification = "Grade: %s" %grade
ps.stage = stage
ps.save()
ht.patient_snapshot = ps
ht.save()
mt = Tumor()
mt.source_tumor_id = "Not Specified"
mt.save()
pdx = PdxStrain()
pdx.external_id = pdx_info["Model ID"]
pdx.host_strain = host_strain
pdx.data_source = jax
pdx.human_tumor = ht
pdx.mouse_tumor = mt
pdx.implantation_type = itype
pdx.implantation_site = isite
pdx.save()
def import_proxe():
"""
Import PDX data from PROXE
"""
# Delete should cascade and delete all JAX associated objects
DataSource.objects.filter(name="PROXE").delete()
proxe = DataSource(name="PROXE", description="The Proxe Project")
proxe.created=datetime.now()
proxe.modified=datetime.now()
proxe.save()
# Process Proxe ingest pipeline
import csv
t = csv.reader(open("%s/PROXE/proxe.tsv"%DATA_FILE_PATH, 'rU'), dialect="excel-tab")
header = t.next()
all_data = [dict(zip(header, map(str, row))) for row in t]
for data in all_data:
# Skip records without the tissue of origin
if not data["Patient Tumor Tissue"]:
continue
# Skip records without the tissue of origin
if "NA" == data["Patient Tumor Tissue"]:
continue
# Patient data
p = Patient()
p.sex = data["Sex"]
p.age = data["Age"] and int(float(data["Age"])) or None # Truncate to year
race_ethnicity = data["Race/Ethnicity"].split("/")
if len(race_ethnicity) == 2:
p.race = race_ethnicity[0]
p.ethnicity = race_ethnicity[1]
elif len(race_ethnicity) == 1:
p.race = race_ethnicity[0]
p.save()
# Patient Snapshot
ps = PatientSnapshot()
ps.patient = p
ps.age = p.age
ps.stage = data["FAB Classification"] and "FAB Classification %s" % (data["FAB Classification"]) or None
ps.save()
# Human Tumor information
ht = Tumor()
ht.tumor_type = ""
ht.diagnosis = data["Pathologic Diagnosis"]
ht.tissue_of_origin = data["Patient Tumor Tissue"]
ht.patient_snapshot = ps
ht.save()
# Human tumor markers
marker_list = data["Patient Tumor Mutations Positive"].strip().split("|")
if "" in marker_list :
marker_list.remove("")
detail_list = data["Patient Tumor Mutations Details"].strip().split("|")
if len(marker_list) > 0:
for i in xrange(len(marker_list)):
m = Marker()
m.tumor = ht
m.gene = marker_list[i].strip()
if len(detail_list) > i:
m.details = detail_list[i].strip()
m.save()
# Mouse Tumor information
mt = Tumor()
mt.save()
# Mouse tumor markers
marker_list = data["PDX Molecular Alterations Positive"].strip().split("|")
if "" in marker_list :
marker_list.remove("")
detail_list = data["PDX Molecular Details"].strip().split("|")
if len(marker_list) > 0:
for i in xrange(len(marker_list)) :
m = Marker()
m.tumor = mt
m.gene = marker_list[i].strip()
if len(detail_list) > i:
m.details = detail_list[i].strip()
m.save()
# Mouse strain
if data["Mouse Strain"]:
try:
host_strain = HostStrain.objects.get(name__exact=data["Mouse Strain"])
except HostStrain.DoesNotExist:
host_strain = HostStrain()
host_strain.name = data["Mouse Strain"]
host_strain.save()
else :
print "No strain information for PDX", data["PDX Name"]
# Implantation type
try:
itype = ImplantationType.objects.get(name__exact=data["Engraftment Routes"])
except ImplantationType.DoesNotExist:
itype = ImplantationType()
itype.name = data["Engraftment Routes"]
itype.save()
pdx = PdxStrain()
pdx.external_id = data["PDX Name"]
pdx.human_tumor = ht
pdx.mouse_tumor = mt
pdx.host_strain = host_strain
pdx.data_source = proxe
pdx.implantation_type = itype
passage_number = data["PDX Passage Immunophenotyped"]
lag_time = data["Days to Harvest P1"]
pdx.save()
# If there is validation of this PDX
if len(data["Patient Tumor Mutations Positive"].strip().split("|")) > 0:
# Add Validation object
v = Validation()
v.pdx_strain = pdx
v.status = data["PDX HemoSeq"]
mouse_markers = set([x.gene for x in mt.marker_set.all()])
human_markers = set([x.gene for x in ht.marker_set.all()])
same = len(mouse_markers.intersection(human_markers))
percent = "N/A"
if len(human_markers) > 0:
percent = str(same/len(human_markers)) + "%"
v.result = "%s PDX concordance in %s of %s genes (%s total markers)" % (percent, same, len(human_markers), len(ht.marker_set.all()))
v.save()
print "Saving pdx %s for tumor %s" % (pdx, ht)
def import_europdx():
"""
Import PDX data from EuroPDX resource
"""
# Delete should cascade and delete all JAX associated objects
DataSource.objects.filter(name="EUROPDX").delete()
europdx = DataSource(name="EUROPDX", description="The EuroPDX Project")
europdx.created=datetime.now()
europdx.modified=datetime.now()
europdx.save()
# Process Proxe ingest pipeline
import csv
t = csv.reader(open("%s/EUROPDX/data_clinical_patients.txt"%DATA_FILE_PATH, 'rU'), dialect="excel-tab")
t.next()
t.next()
t.next()
t.next()
header = t.next()
patient_data = [dict(zip(header, map(str, row))) for row in t]
patient_data = {x['PATIENT_ID']:x for x in patient_data}
t = csv.reader(open("%s/EUROPDX/data_clinical_samples.txt"%DATA_FILE_PATH, 'rU'), dialect="excel-tab")
t.next()
t.next()
t.next()
t.next()
header = t.next()
tumor_data = [dict(zip(header, map(str, row))) for row in t]
all_data = []
for x in tumor_data:
patient_info = patient_data[x['PATIENT_ID']]
data_row = x.copy()
data_row.update(patient_info)
all_data.append(data_row)
print
for data in all_data:
# Patient data
try:
# Existing patient
p = Patient.objects.get(name__exact=data["PATIENT_ID"])
except:
p = Patient()
p.external_id = data["PATIENT_ID"]
p.sex = data["GENDER"]
p.age = data["AGE"] and int(float(data["AGE"])) or None # Truncate to year
p.save()
# Patient Snapshot
ps = PatientSnapshot()
ps.patient = p
ps.age = data["AGE_AT_COLLECTION"] and int(float(data["AGE_AT_COLLECTION"])) or None # Truncate to year
ps.save()
# Human Tumor information
ht = Tumor()
ht.tumor_type = data["SAMPLE_ORIGIN"]
ht.diagnosis = "Colorectal Adenocarcinoma"
ht.tissue_of_origin = data["SITE_OF_PRIMARY"]
ht.patient_snapshot = ps
ht.classification = "Grade: %s"%data["GRADE"]
ht.save()
# Mouse Tumor information
mt = Tumor()
mt.source_tumor_id = data["LOCAL_MODEL_ID"]
mt.save()
# Mouse strain
if data["STRAIN"]:
try:
host_strain = HostStrain.objects.get(name__exact=data["STRAIN"])
except HostStrain.DoesNotExist:
host_strain = HostStrain()
host_strain.name = data["STRAIN"]
host_strain.save()
else :
print "No strain information for PDX", data["PDX Name"]
mt.delete()
ht.delete()
ps.delete()
p.delete()
# Implantation type
try:
itype = ImplantationType.objects.get(name__exact=data["IMPLANT_TYPE"])
except ImplantationType.DoesNotExist:
itype = ImplantationType()
itype.name = data["IMPLANT_TYPE"]
itype.save()
try:
isite = ImplantationSite.objects.get(name__exact=data["IMPLANT_SITE"])
except ImplantationSite.DoesNotExist:
isite = ImplantationSite()
isite.name = data["IMPLANT_SITE"]
isite.save()
pdx = PdxStrain()
pdx.external_id = data["SAMPLE_ID"]
pdx.human_tumor = ht
pdx.mouse_tumor = mt
pdx.host_strain = host_strain
pdx.data_source = europdx
pdx.implantation_type = itype
pdx.implantation_site = isite
pdx.save()
# Validations
if "FINGERPRINT_AVAILABLE" in data:
v = Validation()
v.result = "Fingerprint data available: %s"%data["FINGERPRINT_AVAILABLE"]
v.pdx_strain = pdx
v.save()
print "Saving pdx %s for tumor %s" % (pdx, ht)
def main(args) :
for source in args['datasources'] :
if 'jax' in source.lower() :
# Process JAX ingest pipeline
import_jax(args)
if 'proxe' in source.lower() :
# Process PROXE ingest pipeline
import_proxe()
if 'europdx' in source .lower() :
# Process EuroPDX ingest pipeline
import_europdx()
if __name__ == "__main__" :
# Parse command line arguments
parser = argparse.ArgumentParser(description='Optionally download data from external pipelines and insert / update the models in the PDX database')
parser.add_argument('datasources', metavar='DATASOURCES', type=str, nargs='+', help='Process the data ingest pipeline(s) specified, i.e. jax, europdx')
# parser = parser.add_mutually_exclusive_group(required=False)
parser.add_argument('--download', dest='download', action='store_true')
parser.add_argument('--no-download', dest='download', action='store_false')
parser.set_defaults(download=False)
args = vars(parser.parse_args())
main(args)
``` |
{
"source": "jmasonlee/fractions",
"score": 4
} |
#### File: fractions/tests/test_add.py
```python
import unittest
from unittest import TestCase
from src.fractions_math import add
from src.fractions_math.add import Fraction
class Test(TestCase):
def test_add_zeroes(self):
self.assertEqual(Fraction(0), add.add(Fraction(0), Fraction(0)))
def test_add_right_zero(self):
self.assertEqual(Fraction(1), add.add(Fraction(1), Fraction(0)))
def test_add_left_zero(self):
self.assertEqual(Fraction(2), add.add(Fraction(0), Fraction(2)))
def test_add_right_negative(self):
self.assertEqual(Fraction(2), add.add(Fraction(4), Fraction(-2)))
def test_add_left_negative(self):
self.assertEqual(Fraction(1), add.add(Fraction(-3), Fraction(4)))
def test_add_negatives(self):
self.assertEqual(Fraction(-5), add.add(Fraction(-3), Fraction(-2)))
def test_result_is_a_fraction_when_inputs_do_not_make_a_whole(self):
self.assertEqual(Fraction(7, 9), add.add(Fraction(4, 9), Fraction(3, 9)))
def test_two_numbers_make_a_whole(self):
self.assertEqual(Fraction(1), add.add(Fraction(1, 3), Fraction(2, 3)))
def test_right_denominator_is_a_multiple_of_the_left(self):
self.assertEqual(Fraction(3, 4), add.add(Fraction(1, 2), Fraction(1, 4)))
def test_reduce_result(self):
self.assertEqual(Fraction(1, 2), add.add(Fraction(1, 6), Fraction(2, 6)))
def test_different_denominators_without_reducing_result(self):
self.assertEqual(Fraction(5, 6), add.add(Fraction(1, 2), Fraction(1, 3)))
``` |
{
"source": "JMasr/caliope_bert",
"score": 2
} |
#### File: caliope_bert/src/test.py
```python
import os
import re
import torch
import argparse
import itertools
import numpy as np
from tqdm import tqdm
from dataset import Dataset
from model import DeepPunctuation, DeepPunctuationCRF
from config import MODELS, punctuation_dict, transformation_dict
parser = argparse.ArgumentParser(description='Punctuation restoration test')
parser.add_argument('--cuda', default=True, type=lambda x: (str(x).lower() == 'true'), help='use cuda if available')
parser.add_argument('--pretrained-model', default='bertinho-gl-base-cased', type=str, help='pretrained language model')
parser.add_argument('--lstm-dim', default=-1, type=int,
help='hidden dimension in LSTM layer, if -1 is set equal to hidden dimension in language model')
parser.add_argument('--use-crf', default=False, type=lambda x: (str(x).lower() == 'true'),
help='whether to use CRF layer or not')
parser.add_argument('--data-path', default='../data/gl/test', type=str, help='path to test datasets')
parser.add_argument('--weight-path', default='out/weights.pt', type=str, help='model weight path')
parser.add_argument('--sequence_length', default=96, type=int,
help='sequence length to use when preparing dataset (default 256)')
parser.add_argument('--batch_size', default=8, type=int, help='batch size (default: 8)')
parser.add_argument('--save-path', default='out/', type=str, help='model and log save directory')
args = parser.parse_args()
# tokenizer
if 'bertinho' in args.pretrained_model:
tokenizer = MODELS[args.pretrained_model][1].from_pretrained('../models/bertinho/')
else:
tokenizer = MODELS[args.pretrained_model][1].from_pretrained(args.pretrained_model)
token_style = MODELS[args.pretrained_model][3]
test_set = []
if args.data_path[-1] == "/":
test_files = os.listdir(args.data_path)
for file in test_files:
test_set.append(Dataset(os.path.join(args.data_path, file), tokenizer=tokenizer,
sequence_len=args.sequence_length, token_style=token_style, is_train=False))
else:
test_set.append(Dataset(args.data_path, tokenizer=tokenizer, sequence_len=args.sequence_length,
token_style=token_style, is_train=False))
# Data Loaders
data_loader_params = {
'batch_size': args.batch_size,
'shuffle': False,
'num_workers': 0
}
test_loaders = [torch.utils.data.DataLoader(x, **data_loader_params) for x in test_set]
# logs
model_save_path = args.weight_path
log_path = os.path.join(args.save_path, 'logs_test.txt')
# Model
device = torch.device('cuda' if (args.cuda and torch.cuda.is_available()) else 'cpu')
if args.use_crf:
deep_punctuation = DeepPunctuationCRF(args.pretrained_model, freeze_bert=False, lstm_dim=args.lstm_dim)
else:
deep_punctuation = DeepPunctuation(args.pretrained_model, freeze_bert=False, lstm_dim=args.lstm_dim)
deep_punctuation.to(device)
def test(data_loader, path_to_model=model_save_path):
"""
:return: precision[numpy array], recall[numpy array], f1 score [numpy array], accuracy, confusion matrix
"""
x_str = []
num_iteration = 0
deep_punctuation.load_state_dict(torch.load(path_to_model))
deep_punctuation.eval()
# +1 for overall result
tp = np.zeros(1+len(punctuation_dict), dtype=int)
fp = np.zeros(1+len(punctuation_dict), dtype=int)
fn = np.zeros(1+len(punctuation_dict), dtype=int)
cm = np.zeros((len(punctuation_dict), len(punctuation_dict)), dtype=int)
correct = 0
total = 0
with torch.no_grad():
for x, y, att, y_mask in tqdm(data_loader, desc='test'):
x, y, att, y_mask = x.to(device), y.to(device), att.to(device), y_mask.to(device)
y_mask = y_mask.view(-1)
if args.use_crf:
y_predict = deep_punctuation(x, att, y)
y_predict = y_predict.view(-1)
y = y.view(-1)
else:
y_predict = deep_punctuation(x, att)
y = y.view(-1)
y_predict = y_predict.view(-1, y_predict.shape[2])
y_predict = torch.argmax(y_predict, dim=1).view(-1)
# useful inference
x_tokens = tokenizer.convert_ids_to_tokens(x.view(-1))
num_iteration += 1
y_mask = y_mask.view(-1)
correct += torch.sum(y_mask * (y_predict == y).long()).item()
total += torch.sum(y_mask).item()
for i in range(y.shape[0]):
if y_mask[i] == 0:
# we can ignore this because we know there won't be any punctuation in this position
# since we created this position due to padding or sub-word tokenization
x_tokens[i] = transformation_dict[y_predict[i].item()](x_tokens[i])
continue
cor = y[i]
prd = y_predict[i]
if cor == prd:
tp[cor] += 1
else:
fn[cor] += 1
fp[prd] += 1
cm[cor][prd] += 1
x_tokens[i] = transformation_dict[y_predict[i].item()](x_tokens[i])
x_tokens = " ".join(x_tokens)
x_tokens = re.sub(r"[£¢]", "", x_tokens)
x_tokens = re.sub(" ##", "", x_tokens)
x_tokens = re.sub(r"\[PAD]", "", x_tokens)
x_tokens = re.sub(r" +", " ", x_tokens)
x_str.append(x_tokens)
# ignore first index which is for no punctuation
tp[-1] = np.sum(tp[1:])
fp[-1] = np.sum(fp[1:])
fn[-1] = np.sum(fn[1:])
precision = tp/(tp+fp)
recall = tp/(tp+fn)
f1 = 2 * precision * recall / (precision + recall)
return precision, recall, f1, correct/total, cm, x_str
def inference(data_loader, path_to_model=model_save_path):
"""
:return: precision[numpy array], recall[numpy array], f1 score [numpy array], accuracy, confusion matrix
"""
y_str = []
y_conf = []
num_iteration = 0
deep_punctuation.load_state_dict(torch.load(path_to_model))
deep_punctuation.eval()
with torch.no_grad():
for x, y, att, y_mask in tqdm(data_loader, desc='test'):
x, y, att, y_mask = x.to(device), y.to(device), att.to(device), y_mask.to(device)
if args.use_crf:
y_predict = deep_punctuation(x, att, y)
logits = torch.nn.functional.softmax(y_predict, dim=1)
y_predict = y_predict.view(-1)
y = y.view(-1)
else:
y_predict = deep_punctuation(x, att)
logits = torch.nn.functional.softmax(y_predict, dim=1)
y = y.view(-1)
y_predict = y_predict.view(-1, y_predict.shape[2])
y_predict = torch.argmax(y_predict, dim=1).view(-1)
batch_conf = []
for b in range(logits.size()[0]):
for s in range(logits.size()[1]):
batch_conf.append(torch.max(logits[b, s, :]).item())
num_iteration += 1
x_tokens = tokenizer.convert_ids_to_tokens(x.view(-1))
for i in range(y.shape[0]):
x_tokens[i] = transformation_dict[y_predict[i].item()](x_tokens[i])
y_str.append(x_tokens)
y_conf.append(batch_conf)
y_str = list(itertools.chain.from_iterable(y_str))
y_conf = list(itertools.chain.from_iterable(y_conf))
ind = 0
new_text, new_confidence = [], []
while ind < len(y_str) - 1:
if y_str[ind] in ['£', '¢', '[pad]', '[PAD]']:
ind += 1
continue
elif (ind != 0) and ("#" in y_str[ind]):
new_text[-1] = new_text[-1] + y_str[ind][2:]
new_confidence[-1] = max(y_conf[ind], y_conf[ind - 1])
ind += 1
continue
elif (ind != len(y_str) - 1) and ("#" in y_str[ind + 1]):
new_t = y_str[ind] + y_str[ind + 1][2:]
new_c = max(y_conf[ind], y_conf[ind + 1])
ind += 2
else:
new_t = y_str[ind]
new_c = y_conf[ind]
ind += 1
new_text.append(new_t)
new_confidence.append(new_c)
return new_text, new_confidence
def run_test():
for i in range(len(test_loaders)):
precision, recall, f1, accuracy, cm, text = test(test_loaders[i])
log = 'Precision: ' + str(precision) + '\n' + 'Recall: ' + str(recall) + '\n' + \
'F1 score: ' + str(f1) + '\n' + 'Accuracy:' + str(accuracy) + '\n' + 'Confusion Matrix' + str(cm) + '\n'
print(log)
with open(log_path, 'a') as f:
f.write(log)
def run_inference():
text, confidence = [], []
for i in range(len(test_loaders)):
text, confidence = inference(test_loaders[i])
return text, confidence
run_inference()
run_test()
```
#### File: caliope_bert/src/train.py
```python
import time
import torch
import torch.nn as nn
from torch.utils import data
import torch.multiprocessing
import mlflow
import numpy as np
from tqdm import tqdm
from uuid import uuid4
from argparser import parse_arguments
from dataset import Dataset
from dataset import cpu_count
from model import DeepPunctuation, DeepPunctuationCRF
from config import *
import augmentation
torch.multiprocessing.set_sharing_strategy('file_system') # https://github.com/pytorch/pytorch/issues/11201
args = parse_arguments()
# for reproducibility
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(args.seed)
# tokenizer
if 'berto' in args.pretrained_model:
tokenizer = MODELS[args.pretrained_model][1].from_pretrained('../models/berto/')
elif 'bertinho' in args.pretrained_model:
tokenizer = MODELS[args.pretrained_model][1].from_pretrained('../models/bertinho/')
else:
tokenizer = MODELS[args.pretrained_model][1].from_pretrained(args.pretrained_model)
augmentation.tokenizer = tokenizer
augmentation.sub_style = args.sub_style
augmentation.alpha_sub = args.alpha_sub
augmentation.alpha_del = args.alpha_del
token_style = MODELS[args.pretrained_model][3]
ar = args.augment_rate
sequence_len = args.sequence_length
aug_type = args.augment_type
# Datasets
print("+==================+")
print("| Loading data ... |")
print("+------------------+")
if args.language == 'en':
train_set = Dataset(os.path.join(args.data_path, 'en/train2012'), data_tokenizer=tokenizer, token_style=token_style,
sequence_len=sequence_len, batch_size=args.batch_size, is_train=True,
augment_rate=ar, augment_type=aug_type)
print("\ttrain-set loaded")
val_set = Dataset(os.path.join(args.data_path, 'en/dev2012'), data_tokenizer=tokenizer, sequence_len=sequence_len,
batch_size=args.batch_size, token_style=token_style, is_train=False)
print("\tvalidation-set loaded")
test_set_ref = Dataset(os.path.join(args.data_path, 'en/test2011'), data_tokenizer=tokenizer, is_train=False,
sequence_len=sequence_len, batch_size=args.batch_size, token_style=token_style)
test_set_asr = Dataset(os.path.join(args.data_path, 'en/test2011asr'), data_tokenizer=tokenizer, is_train=False,
sequence_len=sequence_len, batch_size=args.batch_size, token_style=token_style)
test_set = [val_set, test_set_ref, test_set_asr]
print("\ttest-set loaded")
elif args.language == 'gl':
check_for_data_base('gl')
data_path = os.path.join(args.data_path, 'gl/train')
train_set = Dataset(data_path, data_tokenizer=tokenizer, sequence_len=sequence_len, batch_size=args.batch_size,
token_style=token_style, is_train=True, augment_rate=ar, augment_type=aug_type)
print("\ttrain-set loaded")
data_path = data_path.replace('gl/train', 'gl/dev')
val_set = Dataset(data_path, data_tokenizer=tokenizer, sequence_len=sequence_len, batch_size=args.batch_size,
token_style=token_style, is_train=False)
print("\tvalidation-set loaded")
data_path = data_path.replace('gl/dev', 'gl/test')
test_set_ref = Dataset(data_path, data_tokenizer=tokenizer, sequence_len=sequence_len, batch_size=args.batch_size,
token_style=token_style, is_train=False)
print("\ttest-set loaded")
test_set = [test_set_ref]
elif args.language == 'gl_big':
check_for_data_base('gl_big')
data_path = os.path.join(args.data_path, 'gl_big/train')
train_set = Dataset(data_path, data_tokenizer=tokenizer, sequence_len=sequence_len, batch_size=args.batch_size,
token_style=token_style, is_train=True, augment_rate=ar, augment_type=aug_type)
print("\ttrain-set loaded")
data_path = data_path.replace('gl_big/train', 'gl_big/dev')
val_set = Dataset(data_path, data_tokenizer=tokenizer, sequence_len=sequence_len, batch_size=args.batch_size,
token_style=token_style, is_train=False)
print("\tvalidation-set loaded")
data_path = data_path.replace('gl_big/dev', 'gl_big/test')
test_set_ref = Dataset(data_path, data_tokenizer=tokenizer, sequence_len=sequence_len, batch_size=args.batch_size,
token_style=token_style, is_train=False)
print("\ttest-set loaded")
test_set = [test_set_ref]
elif args.language == 'es':
train_set = Dataset(os.path.join(args.data_path, 'es/train'), data_tokenizer=tokenizer, token_style=token_style,
sequence_len=sequence_len, is_train=True, batch_size=args.batch_size,
augment_rate=ar, augment_type=aug_type)
print("\ttrain-set loaded")
val_set = Dataset(os.path.join(args.data_path, 'es/dev'), data_tokenizer=tokenizer, sequence_len=sequence_len,
batch_size=args.batch_size, token_style=token_style, is_train=False)
print("\tdev-set loaded")
test_set_ref = Dataset(os.path.join(args.data_path, 'es/test'), data_tokenizer=tokenizer, token_style=token_style,
sequence_len=sequence_len, batch_size=args.batch_size, is_train=False)
test_set = [test_set_ref]
print("\ttest-set loaded")
else:
raise ValueError('Incorrect language argument for Dataset')
# Data Loaders
print("+======================+")
print("| Loading the Database |")
print("+----------------------+")
data_loader_params = {
'batch_size': args.batch_size,
'shuffle': True,
'num_workers': cpu_count()
}
train_loader = torch.utils.data.DataLoader(train_set, **data_loader_params)
val_loader = torch.utils.data.DataLoader(val_set, **data_loader_params)
test_loaders = [torch.utils.data.DataLoader(x, **data_loader_params) for x in test_set]
# logs
uniq_id = str(uuid4()).split("-")[0]
if args.save_path:
save_path = args.save_path + uniq_id
else:
date = "_".join(time.asctime().split(" ")[:3])
save_path = f"exp_{args.language}_{date}_{uniq_id}/"
os.makedirs(save_path, exist_ok=True)
model_save_path = os.path.join(save_path, 'weights.pt')
log_path = os.path.join(save_path, args.name + '_logs_.txt')
# Model
device = torch.device('cpu') if args.cuda == -1 else torch.device('cuda:' + str(args.cuda))
print(F"+=============================+")
print(f"|Loading BERT model using {str(device).upper()}|")
print(F"+=============================+")
if args.use_crf:
deep_punctuation = DeepPunctuationCRF(args.pretrained_model, freeze_bert=args.freeze_bert, lstm_dim=args.lstm_dim)
else:
deep_punctuation = DeepPunctuation(args.pretrained_model, freeze_bert=args.freeze_bert, lstm_dim=args.lstm_dim)
deep_punctuation.to(device)
if args.loss_w:
t_weight = torch.tensor(train_set.tensor_weight, device=device)
else:
t_weight = torch.tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.], device=device)
criterion = nn.CrossEntropyLoss(weight=t_weight)
optimizer = torch.optim.Adam(deep_punctuation.parameters(), lr=args.lr, weight_decay=args.decay)
def validate(data_loader):
"""
:return: validation accuracy, validation loss
"""
num_iteration = 0
deep_punctuation.eval()
# Class Metrics
tp = np.zeros(1 + len(punctuation_dict), dtype=int)
fp = np.zeros(1 + len(punctuation_dict), dtype=int)
fn = np.zeros(1 + len(punctuation_dict), dtype=int)
cm = np.zeros((len(punctuation_dict), len(punctuation_dict)), dtype=int)
# Global metrics
correct = 0
total = 0
val_loss = 0
with torch.no_grad():
for x, y, att, y_mask in tqdm(data_loader, desc='eval'):
x, y, att, y_mask = x.to(device), y.to(device), att.to(device), y_mask.to(device)
y_mask = y_mask.view(-1)
if args.use_crf:
y_predict = deep_punctuation(x, att, y)
loss = deep_punctuation.log_likelihood(x, att, y)
y_predict = y_predict.view(-1)
y = y.view(-1)
else:
y_predict = deep_punctuation(x, att)
y = y.view(-1)
y_predict = y_predict.view(-1, y_predict.shape[2])
loss = criterion(y_predict, y)
y_predict = torch.argmax(y_predict, dim=1).view(-1)
val_loss += loss.item()
num_iteration += 1
y_mask = y_mask.view(-1)
correct += torch.sum(y_mask * (y_predict == y).long()).item()
total += torch.sum(y_mask).item()
for i in range(y.shape[0]):
if y_mask[i] == 0:
# since we created this position due to padding or sub-word tokenization, so we can ignore it
continue
cor = y[i]
prd = y_predict[i]
if cor == prd:
tp[cor] += 1
else:
fn[cor] += 1
fp[prd] += 1
cm[cor][prd] += 1
# ignore first index which is for no punctuation
tp[-1] = np.sum(tp[1:])
fp[-1] = np.sum(fp[1:])
fn[-1] = np.sum(fn[1:])
global_loss = val_loss/num_iteration
accuracy = correct/total
precision = tp / (tp + fp) if (tp + fp).any() else 0
recall = tp / (tp + fn) if (tp + fn).any() else 0
f1 = 2 * precision * recall / (precision + recall) if (precision + recall).any() else 0
return accuracy, global_loss, np.nan_to_num(precision), np.nan_to_num(recall), np.nan_to_num(f1), cm
def test(data_loader):
"""
:return: precision[numpy array], recall[numpy array], f1 score [numpy array], accuracy, confusion matrix
"""
print("Strating Train Phase")
num_iteration = 0
deep_punctuation.eval()
# +1 for overall result
tp = np.zeros(1+len(punctuation_dict), dtype=int)
fp = np.zeros(1+len(punctuation_dict), dtype=int)
fn = np.zeros(1+len(punctuation_dict), dtype=int)
cm = np.zeros((len(punctuation_dict), len(punctuation_dict)), dtype=int)
correct = 0
total = 0
with torch.no_grad():
for x, y, att, y_mask in tqdm(data_loader, desc='test'):
x, y, att, y_mask = x.to(device), y.to(device), att.to(device), y_mask.to(device)
y_mask = y_mask.view(-1)
if args.use_crf:
y_predict = deep_punctuation(x, att, y)
y_predict = y_predict.view(-1)
y = y.view(-1)
else:
y_predict = deep_punctuation(x, att)
y = y.view(-1)
y_predict = y_predict.view(-1, y_predict.shape[2])
y_predict = torch.argmax(y_predict, dim=1).view(-1)
num_iteration += 1
y_mask = y_mask.view(-1)
correct += torch.sum(y_mask * (y_predict == y).long()).item()
total += torch.sum(y_mask).item()
for i in range(y.shape[0]):
if y_mask[i] == 0:
# we can ignore this because we know there won't be any punctuation in this position
# since we created this position due to padding or sub-word tokenization
continue
cor = y[i]
prd = y_predict[i]
if cor == prd:
tp[cor] += 1
else:
fn[cor] += 1
fp[prd] += 1
cm[cor][prd] += 1
# ignore first index which is for no punctuation
tp[-1] = np.sum(tp[1:])
fp[-1] = np.sum(fp[1:])
fn[-1] = np.sum(fn[1:])
precision = tp/(tp+fp) if (tp + fp).any() else 0
recall = tp/(tp+fn) if (tp + fn).any() else 0
f1 = 2 * precision * recall / (precision + recall) if (precision + recall).any() else 0
return np.nan_to_num(precision), np.nan_to_num(recall), np.nan_to_num(f1), correct/total, cm
def train():
with open(log_path, 'a') as f:
f.write(str(args)+'\n')
exp_date = "_".join(time.asctime().split(" ")[:3])
mlflow.set_tracking_uri('http://0.0.0.0:5000')
mlflow.set_experiment(f"exp_{args.language}_{exp_date}")
exp_id = mlflow.tracking.MlflowClient().get_experiment_by_name(f"exp_{args.language}_{date}").experiment_id
with mlflow.start_run(experiment_id=exp_id, run_name=uniq_id):
# MLflow Tracking #0
model_parameters = {"model-name": args.pretrained_model, "seed": args.epoch, "language": args.language,
"epochs": args.epoch, "learning-rate": args.lr, "sequence-length": args.sequence_length,
"batch-size": args.batch_size, "lstm-dim": args.lstm_dim,
"loss-weighted": t_weight, "crf": args.use_crf, "weight-decay": args.decay,
"gradient-clip": args.gradient_clip,
"augment-rate": args.augment_rate, "augment-type": args.augment_type,
"alpha-sub": args.alpha_sub, "alpha-del": args.alpha_del,
}
db_characters = {"train-set": len(train_set),
"dev-set": len(val_set),
"test-set": len(test_set_ref)}
mlflow.log_params(model_parameters) # Log a model parameters
mlflow.log_params(db_characters) # Log a database characteristics
# MLflow Tracking - end #
batch_norm = []
best_val_acc = -1
for epoch in range(args.epoch):
train_loss = 0.0
train_iteration = 0
correct = 0
total = 0
print("Star Training ...")
deep_punctuation.train()
for x, y, att, y_mask in tqdm(train_loader, desc='train'):
x, y, att, y_mask = x.to(device), y.to(device), att.to(device), y_mask.to(device)
y_mask = y_mask.view(-1)
if args.use_crf:
loss = deep_punctuation.log_likelihood(x, att, y)
# y_predict = deep_punctuation(x, att, y)
# y_predict = y_predict.view(-1)
# y = y.view(-1)
else:
y_predict = deep_punctuation(x, att)
y_predict = y_predict.view(-1, y_predict.shape[2])
y = y.view(-1)
loss = criterion(y_predict, y)
y_predict = torch.argmax(y_predict, dim=1).view(-1)
correct += torch.sum(y_mask * (y_predict == y).long()).item()
optimizer.zero_grad()
train_loss += loss.item()
train_iteration += 1
loss.backward()
# Doing Gradient clipping, very useful!
if args.gradient_clip > 0:
torch.nn.utils.clip_grad_norm_(deep_punctuation.parameters(), max_norm=2.0, norm_type=2)
# Calculate gradient norms
for n, layer in enumerate(deep_punctuation.ordered_layers):
if n == 2:
norm_grad = layer.weight.grad.norm().cpu()
batch_norm.append(norm_grad.numpy())
optimizer.step()
y_mask = y_mask.view(-1)
total += torch.sum(y_mask).item()
train_acc = correct / total
train_loss /= train_iteration
log = 'epoch: {}, Train loss: {}, Train accuracy: {}'.format(epoch, train_loss, train_acc)
# MLflow Tracking#
train_metrics = {"train_loss": train_loss, "train_accuracy": train_acc, "GradientNorm": np.mean(batch_norm)}
mlflow.log_metrics(train_metrics, step=epoch + 1)
# Print in log
with open(log_path, 'a') as f:
f.write(log + '\n')
print(log)
val_acc, val_loss, val_precision, val_recall, val_f1, val_cm = validate(val_loader)
log = 'epoch: {}, Val loss: {}, Val accuracy: {}\n'.format(epoch, val_loss, val_acc)
log_val_metrics = f'Precision: {val_precision}\n' \
f'Recall: {val_recall}\n' \
f'F1 score: {val_f1}\n'
# Print log
with open(log_path, 'a') as f:
f.write(log)
f.write(log_val_metrics)
print(log)
print(log_val_metrics)
if val_acc > best_val_acc:
best_val_acc = val_acc
torch.save(deep_punctuation.state_dict(), model_save_path)
# MLflow Tracking #
val_metrics = {"eval_loss": val_loss, "val_accuracy": val_acc,
"P_Lower": val_precision[0], "P_Lower-Comma": val_precision[1],
"P_Lower-Period": val_precision[2], "P_All-Capital": val_precision[4],
"P_Frits-Capital": val_precision[5], "P_All-Capital-Comma": val_precision[6],
"P_All-Capital-Period": val_precision[7], "P_Frits-Capital-Comma": val_precision[9],
"P_Frits-Capital-Period": val_precision[10],
#
"R_Lower": val_recall[0], "R_Lower-Comma": val_recall[1], "R_Lower-Period": val_recall[2],
"R_All-Capital": val_recall[4], "R_Frits-Capital": val_recall[5],
"R_All-Capital-Comma": val_recall[6], "R_All-Capital-Period": val_recall[7],
"R_Frits-Capital-Comma": val_recall[9], "R_Frits-Capital-Period": val_recall[10],
#
"F1_Lower": val_f1[0], "F1_Lower-Comma": val_f1[1], "F1_Lower-Period": val_f1[2],
"F1_All-Capital": val_f1[4], "F1_Frits-Capital": val_f1[5],
"F1_All-Capital-Comma": val_f1[6], "F1_All-Capital-Period": val_f1[7],
"F1_Frits-Capital-Comma": val_f1[9], "F1_Frits-Capital-Period": val_f1[10],
}
mlflow.log_metrics(val_metrics, step=epoch + 1)
print('Best validation Acc:', best_val_acc)
deep_punctuation.load_state_dict(torch.load(model_save_path))
for loader in test_loaders:
precision, recall, f1, accuracy, cm = test(loader)
log = 'Precision: ' + str(precision) + '\n' + 'Recall: ' + str(recall) + '\n' + 'F1 score: ' + str(f1) + \
'\n' + 'Accuracy:' + str(accuracy) + '\n' + 'Confusion Matrix' + str(cm) + '\n'
print(log)
# MLflow Tracking#
test_metrics = {"test_acc": accuracy}
mlflow.log_metrics(test_metrics)
# Print in log
with open(log_path, 'a') as f:
f.write(log)
log_text = ''
for i in range(1, 5):
log_text += str(precision[i] * 100) + ' ' + str(recall[i] * 100) + ' ' + str(f1[i] * 100) + ' '
with open(log_path, 'a') as f:
f.write(log_text[:-1] + '\n\n')
if __name__ == '__main__':
train()
``` |
{
"source": "jmasselink/geopy",
"score": 2
} |
#### File: test/adapters/retry_after.py
```python
import datetime
import time
from unittest.mock import patch
import pytest
from geopy.adapters import get_retry_after
@pytest.mark.parametrize(
"headers, expected_retry_after",
[
({}, None),
({"retry-after": "42"}, 42),
({"retry-after": "Wed, 21 Oct 2015 07:28:44 GMT"}, 43),
({"retry-after": "Wed, 21 Oct 2015 06:28:44 GMT"}, 0),
({"retry-after": "Wed"}, None),
],
)
def test_get_retry_after(headers, expected_retry_after):
current_time = datetime.datetime(
2015, 10, 21, 7, 28, 1, tzinfo=datetime.timezone.utc
).timestamp()
with patch.object(time, "time", return_value=current_time):
assert expected_retry_after == get_retry_after(headers)
```
#### File: test/geocoders/algolia.py
```python
from geopy.geocoders import AlgoliaPlaces
from geopy.point import Point
from test.geocoders.util import BaseTestGeocoder, env
class TestAlgoliaPlaces(BaseTestGeocoder):
@classmethod
def make_geocoder(cls, **kwargs):
return AlgoliaPlaces(
app_id=env.get('ALGOLIA_PLACES_APP_ID'),
api_key=env.get('ALGOLIA_PLACES_API_KEY'),
timeout=3,
**kwargs)
async def test_user_agent_custom(self):
geocoder = self.make_geocoder(
user_agent='my_user_agent/1.0'
)
assert geocoder.headers['User-Agent'] == 'my_user_agent/1.0'
async def test_geocode(self):
location = await self.geocode_run(
{'query': 'москва'},
{'latitude': 55.75587, 'longitude': 37.61768},
)
assert 'Москва' in location.address
async def test_reverse(self):
location = await self.reverse_run(
{'query': '51, -0.13', 'language': 'en'},
{'latitude': 51, 'longitude': -0.13},
)
assert 'A272' in location.address
async def test_explicit_type(self):
location = await self.geocode_run(
{'query': 'Madrid', 'type': 'city', 'language': 'en'},
{},
)
assert 'Madrid' in location.address
async def test_limit(self):
limit = 5
locations = await self.geocode_run(
{'query': 'Madrid', 'type': 'city',
'language': 'en', 'exactly_one': False,
'limit': limit},
{},
)
assert len(locations) == limit
async def test_countries(self):
countries = ["ES"]
location = await self.geocode_run(
{'query': 'Madrid', 'language': 'en',
'countries': countries},
{},
)
assert "Madrid" in location.address
async def test_countries_no_result(self):
countries = ["UA", "RU"]
await self.geocode_run(
{'query': 'Madrid', 'language': 'en',
'countries': countries},
{},
expect_failure=True
)
async def test_geocode_no_result(self):
await self.geocode_run(
{'query': 'sldkfhdskjfhsdkhgflaskjgf'},
{},
expect_failure=True,
)
async def test_around(self):
await self.geocode_run(
{'query': 'maple street', 'language': 'en', 'around': Point(51.1, -0.1)},
{'latitude': 51.5299, 'longitude': -0.0628044, "delta": 1},
)
await self.geocode_run(
{'query': 'maple street', 'language': 'en', 'around': Point(50.1, 10.1)},
{'latitude': 50.0517, 'longitude': 10.1966, "delta": 1},
)
```
#### File: test/geocoders/geocodeearth.py
```python
from geopy.geocoders import GeocodeEarth
from test.geocoders.pelias import BaseTestPelias
from test.geocoders.util import env
class TestGeocodeEarth(BaseTestPelias):
@classmethod
def make_geocoder(cls, **kwargs):
return GeocodeEarth(env['GEOCODEEARTH_KEY'],
**kwargs)
```
#### File: test/geocoders/util.py
```python
import json
import os
from abc import ABC, abstractmethod
from unittest.mock import ANY, patch
import pytest
from async_generator import async_generator, asynccontextmanager, yield_
from geopy import exc
from geopy.adapters import BaseAsyncAdapter
from geopy.location import Location
_env = {}
try:
with open(".test_keys") as fp:
_env.update(json.loads(fp.read()))
except IOError:
_env.update(os.environ)
class SkipIfMissingEnv(dict):
def __init__(self, env):
super().__init__(env)
self.is_internet_access_allowed = None
def __getitem__(self, key):
assert self.is_internet_access_allowed is not None
if key not in self:
if self.is_internet_access_allowed:
pytest.skip("Missing geocoder credential: %s" % (key,))
else:
# Generate some dummy token. We won't perform a networking
# request anyways.
return "dummy"
return super().__getitem__(key)
env = SkipIfMissingEnv(_env)
class BaseTestGeocoder(ABC):
"""
Base for geocoder-specific test cases.
"""
geocoder = None
delta = 0.5
@pytest.fixture(scope='class', autouse=True)
@async_generator
async def class_geocoder(_, request, patch_adapter, is_internet_access_allowed):
"""Prepare a class-level Geocoder instance."""
cls = request.cls
env.is_internet_access_allowed = is_internet_access_allowed
geocoder = cls.make_geocoder()
cls.geocoder = geocoder
run_async = isinstance(geocoder.adapter, BaseAsyncAdapter)
if run_async:
async with geocoder:
await yield_(geocoder)
else:
await yield_(geocoder)
@classmethod
@asynccontextmanager
@async_generator
async def inject_geocoder(cls, geocoder):
"""An async context manager allowing to inject a custom
geocoder instance in a single test method which will
be used by the `geocode_run`/`reverse_run` methods.
"""
with patch.object(cls, 'geocoder', geocoder):
run_async = isinstance(geocoder.adapter, BaseAsyncAdapter)
if run_async:
async with geocoder:
await yield_(geocoder)
else:
await yield_(geocoder)
@pytest.fixture(autouse=True)
def ensure_no_geocoder_assignment(self):
yield
assert self.geocoder is type(self).geocoder, (
"Detected `self.geocoder` assignment. "
"Please use `async with inject_geocoder(my_geocoder):` "
"instead, which supports async adapters."
)
@classmethod
@abstractmethod
def make_geocoder(cls, **kwargs): # pragma: no cover
pass
async def geocode_run(
self, payload, expected,
*,
skiptest_on_errors=True,
expect_failure=False
):
"""
Calls geocoder.geocode(**payload), then checks against `expected`.
"""
cls = type(self)
result = await self._make_request(
self.geocoder, 'geocode',
skiptest_on_errors=skiptest_on_errors,
**payload,
)
if expect_failure:
assert result is None
return
if result is None:
pytest.fail('%s: No result found' % cls.__name__)
if result == []:
pytest.fail('%s returned an empty list instead of None' % cls.__name__)
self._verify_request(result, exactly_one=payload.get('exactly_one', True),
**expected)
return result
async def reverse_run(
self, payload, expected,
*,
skiptest_on_errors=True,
expect_failure=False
):
"""
Calls geocoder.reverse(**payload), then checks against `expected`.
"""
cls = type(self)
result = await self._make_request(
self.geocoder, 'reverse',
skiptest_on_errors=skiptest_on_errors,
**payload,
)
if expect_failure:
assert result is None
return
if result is None:
pytest.fail('%s: No result found' % cls.__name__)
if result == []:
pytest.fail('%s returned an empty list instead of None' % cls.__name__)
self._verify_request(result, exactly_one=payload.get('exactly_one', True),
**expected)
return result
async def reverse_timezone_run(self, payload, expected, *, skiptest_on_errors=True):
timezone = await self._make_request(
self.geocoder, 'reverse_timezone',
skiptest_on_errors=skiptest_on_errors,
**payload,
)
if expected is None:
assert timezone is None
else:
assert timezone.pytz_timezone == expected
return timezone
async def _make_request(self, geocoder, method, *, skiptest_on_errors, **kwargs):
cls = type(self)
call = getattr(geocoder, method)
run_async = isinstance(geocoder.adapter, BaseAsyncAdapter)
try:
if run_async:
result = await call(**kwargs)
else:
result = call(**kwargs)
except exc.GeocoderRateLimited as e:
if not skiptest_on_errors:
raise
pytest.skip(
"%s: Rate-limited, retry-after %s" % (cls.__name__, e.retry_after)
)
except exc.GeocoderQuotaExceeded:
if not skiptest_on_errors:
raise
pytest.skip("%s: Quota exceeded" % cls.__name__)
except exc.GeocoderTimedOut:
if not skiptest_on_errors:
raise
pytest.skip("%s: Service timed out" % cls.__name__)
except exc.GeocoderUnavailable:
if not skiptest_on_errors:
raise
pytest.skip("%s: Service unavailable" % cls.__name__)
return result
def _verify_request(
self,
result,
latitude=ANY,
longitude=ANY,
address=ANY,
exactly_one=True,
delta=None,
):
if exactly_one:
assert isinstance(result, Location)
else:
assert isinstance(result, list)
item = result if exactly_one else result[0]
delta = delta or self.delta
expected = (
pytest.approx(latitude, abs=delta) if latitude is not ANY else ANY,
pytest.approx(longitude, abs=delta) if longitude is not ANY else ANY,
address,
)
received = (
item.latitude,
item.longitude,
item.address,
)
assert received == expected
```
#### File: test/geocoders/what3words.py
```python
from unittest.mock import patch
import pytest
import geopy.exc
import geopy.geocoders
from geopy.geocoders import What3Words, What3WordsV3
from geopy.geocoders.what3words import _check_query
from test.geocoders.util import BaseTestGeocoder, env
class TestUnitWhat3Words:
dummy_api_key = 'DUMMYKEY1234'
async def test_user_agent_custom(self):
geocoder = What3Words(
api_key=self.dummy_api_key,
user_agent='my_user_agent/1.0'
)
assert geocoder.headers['User-Agent'] == 'my_user_agent/1.0'
@patch.object(geopy.geocoders.options, 'default_scheme', 'http')
def test_default_scheme_is_ignored(self):
geocoder = What3Words(api_key=self.dummy_api_key)
assert geocoder.scheme == 'https'
def test_check_query(self):
result_check_threeword_query = _check_query(
"\u0066\u0061\u0068\u0072\u0070\u0072"
"\u0065\u0069\u0073\u002e\u006c\u00fc"
"\u0067\u006e\u0065\u0072\u002e\u006b"
"\u0075\u0074\u0073\u0063\u0068\u0065"
)
assert result_check_threeword_query
class BaseTestWhat3Words(BaseTestGeocoder):
async def test_geocode(self):
await self.geocode_run(
{"query": "piped.gains.jangle"},
{"latitude": 53.037611, "longitude": 11.565012},
)
async def test_reverse(self):
await self.reverse_run(
{"query": "53.037611,11.565012", "lang": 'DE'},
{"address": 'fortschrittliche.voll.schnitt'},
)
async def test_unicode_query(self):
await self.geocode_run(
{
"query": (
"\u0070\u0069\u0070\u0065\u0064\u002e\u0067"
"\u0061\u0069\u006e\u0073\u002e\u006a\u0061"
"\u006e\u0067\u006c\u0065"
)
},
{"latitude": 53.037611, "longitude": 11.565012},
)
async def test_empty_response(self):
with pytest.raises(geopy.exc.GeocoderQueryError):
await self.geocode_run(
{"query": "definitely.not.existingiswearrrr"},
{},
expect_failure=True
)
async def test_not_exactly_one(self):
await self.geocode_run(
{"query": "piped.gains.jangle", "exactly_one": False},
{"latitude": 53.037611, "longitude": 11.565012},
)
await self.reverse_run(
{"query": (53.037611, 11.565012), "exactly_one": False},
{"address": "piped.gains.jangle"},
)
async def test_reverse_language(self):
await self.reverse_run(
{"query": (53.037611, 11.565012), "lang": "en", "exactly_one": False},
{"address": "piped.gains.jangle"},
)
class TestWhat3Words(BaseTestWhat3Words):
@classmethod
def make_geocoder(cls, **kwargs):
return What3Words(
env['WHAT3WORDS_KEY'],
timeout=3,
**kwargs
)
async def test_geocode_language(self):
await self.geocode_run(
{"query": "piped.gains.jangle", "lang": 'DE'},
{"address": 'fortschrittliche.voll.schnitt'},
)
class TestWhat3WordsV3(BaseTestWhat3Words):
@classmethod
def make_geocoder(cls, **kwargs):
return What3WordsV3(
env['WHAT3WORDS_KEY'],
timeout=3,
**kwargs
)
```
#### File: geopy/test/proxy_server.py
```python
import base64
import http.server as SimpleHTTPServer
import select
import socket
import socketserver as SocketServer
import threading
from urllib.request import urlopen
def pipe_sockets(sock1, sock2, timeout):
"""Pipe data from one socket to another and vice-versa."""
sockets = [sock1, sock2]
try:
while True:
rlist, _, xlist = select.select(sockets, [], sockets, timeout)
if xlist:
break
for sock in rlist:
data = sock.recv(8096)
if not data: # disconnected
break
other = next(s for s in sockets if s is not sock)
other.sendall(data)
except IOError:
pass
finally:
for sock in sockets:
sock.close()
class Future:
# concurrent.futures.Future docs say that they shouldn't be instantiated
# directly, so this is a simple implementation which mimics the Future
# which can safely be instantiated!
def __init__(self):
self._event = threading.Event()
self._result = None
self._exc = None
def result(self, timeout=None):
if not self._event.wait(timeout):
raise AssertionError("Future timed out")
if self._exc is not None:
raise self._exc
return self._result
def set_result(self, result):
self._result = result
self._event.set()
def set_exception(self, exception):
self._exc = exception
self._event.set()
class ProxyServerThread(threading.Thread):
spinup_timeout = 10
def __init__(self, timeout=None):
self.proxy_host = 'localhost'
self.proxy_port = None # randomly selected by OS
self.timeout = timeout
self.proxy_server = None
self.socket_created_future = Future()
self.requests = []
self.auth = None
super().__init__()
self.daemon = True
def reset(self):
self.requests.clear()
self.auth = None
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
self.join()
def set_auth(self, username, password):
self.auth = "%s:%s" % (username, password)
def get_proxy_url(self, with_scheme=True):
assert self.socket_created_future.result(self.spinup_timeout)
if self.auth:
auth = "%s@" % self.auth
else:
auth = ""
if with_scheme:
scheme = "http://"
else:
scheme = ""
return "%s%s%s:%s" % (scheme, auth, self.proxy_host, self.proxy_port)
def run(self):
assert not self.proxy_server, ("This class is not reentrable. "
"Please create a new instance.")
requests = self.requests
proxy_thread = self
class Proxy(SimpleHTTPServer.SimpleHTTPRequestHandler):
timeout = self.timeout
def check_auth(self):
if proxy_thread.auth is not None:
auth_header = self.headers.get('Proxy-Authorization')
b64_auth = base64.standard_b64encode(
proxy_thread.auth.encode()
).decode()
expected_auth = "Basic %s" % b64_auth
if auth_header != expected_auth:
self.send_response(401)
self.send_header('Connection', 'close')
self.end_headers()
self.wfile.write(
(
"not authenticated. Expected %r, received %r"
% (expected_auth, auth_header)
).encode()
)
self.connection.close()
return False
return True
def do_GET(self):
if not self.check_auth():
return
requests.append(self.path)
req = urlopen(self.path, timeout=self.timeout)
self.send_response(req.getcode())
content_type = req.info().get('content-type', None)
if content_type:
self.send_header('Content-Type', content_type)
self.send_header('Connection', 'close')
self.end_headers()
self.copyfile(req, self.wfile)
self.connection.close()
req.close()
def do_CONNECT(self):
if not self.check_auth():
return
requests.append(self.path)
# Make a raw TCP connection to the target server
host, port = self.path.split(':')
try:
addr = host, int(port)
other_connection = \
socket.create_connection(addr, timeout=self.timeout)
except socket.error:
self.send_error(502, 'Bad gateway')
return
# Respond that a tunnel has been created
self.send_response(200)
self.send_header('Connection', 'close')
self.end_headers()
pipe_sockets(self.connection, # it closes sockets
other_connection, self.timeout)
# ThreadingTCPServer offloads connections to separate threads, so
# the serve_forever loop doesn't block until connection is closed
# (unlike TCPServer). This allows to shutdown the serve_forever loop
# even if there's an open connection.
try:
self.proxy_server = SocketServer.ThreadingTCPServer(
(self.proxy_host, 0),
Proxy
)
# don't hang if there're some open connections
self.proxy_server.daemon_threads = True
self.proxy_port = self.proxy_server.server_address[1]
except Exception as e:
self.socket_created_future.set_exception(e)
raise
else:
self.socket_created_future.set_result(True)
self.proxy_server.serve_forever()
def stop(self):
self.proxy_server.shutdown() # stop serve_forever()
self.proxy_server.server_close()
class HttpServerThread(threading.Thread):
spinup_timeout = 10
def __init__(self, timeout=None):
self.server_host = 'localhost'
self.server_port = None # randomly selected by OS
self.timeout = timeout
self.http_server = None
self.socket_created_future = Future()
super(HttpServerThread, self).__init__()
self.daemon = True
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
self.join()
def get_server_url(self):
assert self.socket_created_future.result(self.spinup_timeout)
return "http://%s:%s" % (self.server_host, self.server_port)
def run(self):
assert not self.http_server, ("This class is not reentrable. "
"Please create a new instance.")
class Server(SimpleHTTPServer.SimpleHTTPRequestHandler):
timeout = self.timeout
def do_GET(self):
if self.path == "/":
self.send_response(200)
self.send_header('Connection', 'close')
self.end_headers()
self.wfile.write(b"Hello world")
elif self.path == "/json":
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.send_header('Connection', 'close')
self.end_headers()
self.wfile.write(b'{"hello":"world"}')
elif self.path == "/json/plain":
self.send_response(200)
self.send_header('Content-type', 'text/plain;charset=utf-8')
self.send_header('Connection', 'close')
self.end_headers()
self.wfile.write(b'{"hello":"world"}')
else:
self.send_response(404)
self.send_header('Connection', 'close')
self.send_header('X-test-header', 'hello')
self.end_headers()
self.wfile.write(b"Not found")
self.connection.close()
# ThreadingTCPServer offloads connections to separate threads, so
# the serve_forever loop doesn't block until connection is closed
# (unlike TCPServer). This allows to shutdown the serve_forever loop
# even if there's an open connection.
try:
self.http_server = SocketServer.ThreadingTCPServer(
(self.server_host, 0),
Server
)
# don't hang if there're some open connections
self.http_server.daemon_threads = True
self.server_port = self.http_server.server_address[1]
except Exception as e:
self.socket_created_future.set_exception(e)
raise
else:
self.socket_created_future.set_result(True)
self.http_server.serve_forever()
def stop(self):
self.http_server.shutdown() # stop serve_forever()
self.http_server.server_close()
``` |
{
"source": "J-Massey/lotus_docs",
"score": 3
} |
#### File: sphere/postproc/stat.py
```python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
#
# read data and drop unwanted rows and columns
try:
df = pd.read_csv('fort.9',delim_whitespace = True,
names=["time","CFL","drag","lift","side"])
except FileNotFoundError:
exit('stat: fort.9 not found')
df.drop(df.index[:3], inplace=True)
try:
mg = pd.read_csv('fort.8',delim_whitespace = True,
names=["itr","res0","res","inf"])[2:]
except FileNotFoundError:
exit('stat: fort.8 not found')
#
# -- plot PDF pages
def str_rnd(num,d=4): return str(round(num,d))
def plot_hist(pdf,name,label):
ax = df.plot(x='time',y=name,figsize=(8,4))
plt.xlabel(r'$t/T$', fontsize=12)
plt.ylabel(label, fontsize=12)
mean,mad = df[name].mean(), 1.5748*df[name].mad()
x1,x2,y1,y2 = plt.axis()
mx,mn = min(y2,mean+3*mad),max(y1,mean-3*mad)
plt.ylim([mn,mx])
txt = 'mean='+str_rnd(mean)+', mad='+str_rnd(mad)
plt.text(0.5,0.01,txt,transform=ax.transAxes)
pdf.savefig()
plt.close()
with PdfPages('history.pdf') as pdf:
plot_hist(pdf,name='drag',label=r'$C_{Xp}$')
plot_hist(pdf,name='lift',label=r'$C_{Yp}$')
plot_hist(pdf,name='side',label=r'$C_{Zp}$')
plot_hist(pdf,name='CFL',label=r'$\frac{\Delta t U}{\Delta x}$')
mg.plot(y=['res0','res','inf'],figsize=(8,4))
plt.yscale('log')
pdf.savefig()
mg.plot(y='itr',figsize=(8,4))
pdf.savefig()
``` |
{
"source": "J-Massey/postproc",
"score": 3
} |
#### File: figures/animations/isocontours.py
```python
import os
import postproc.calc as calc
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import numpy as np
import matplotlib.colors as colors
import seaborn as sns
from mpl_toolkits.axes_grid1 import make_axes_locatable
from tkinter import Tcl
import imageio
from tqdm import tqdm
from pygifsicle import optimize
def plot_2D_fp_isocontours(data, interest, fn_save):
plt.style.use(['science'])
fig, ax = plt.subplots(figsize=(9, 4))
ax.set_xlabel(r'$x/D$')
ax.set_ylabel(r'$y/D$')
cmap = sns.color_palette("icefire", as_cmap=True)
plt.title(title)
divider = make_axes_locatable(ax)
# Plot the window of interest
ax.set_xlim(-2.5, 6)
ax.set_ylim(-3, 3)
X, Y = data[0:2]
u, v, w = data[2:-1]
p = data[-1][0]
# Now plot what we are interested in
if interest == 'p':
vals = p*2
cmap = sns.color_palette("PRGn_r", as_cmap=True)
elif interest == 'u':
vals = u
elif interest == 'v':
vals = np.mean(v, axis=2)
elif interest == 'mag':
U, V = np.mean(u, axis=2), np.mean(v, axis=2)
vals = np.sqrt(V ** 2 + U ** 2)
# vals = vals * data.iter_correction(30)
elif interest == 'vort':
U, V = np.mean(u, axis=2), np.mean(v, axis=2)
vals = calc.vortZ(U, V)
# vals = -data.p * data.length_scale # Need to scale by length scale
cmap = sns.color_palette("seismic", as_cmap=True)
grey_color = '#dedede'
circle = patches.Circle((0, 0), radius=0.5, linewidth=0.2, edgecolor='black', facecolor=grey_color)
ax.add_patch(circle)
lim = [np.min(vals), np.max(vals)]
# lim = [0, 1.4]
# lim = [-0.2, 0.2]
lim = [-1.9, 1.]
norm = colors.Normalize(vmin=lim[0], vmax=lim[1])
# lvls = 121
step = 0.01
if step is not None:
lvls = np.arange(lim[0], lim[1]+step, step)
else:
lvls = np.linspace(lim[0], lim[1], lvls)
if filled:
cs = ax.contourf(X, Y, np.transpose(vals),
levels=lvls, vmin=lim[0], vmax=lim[1],
norm=norm, cmap=cmap, extend='both')
ax_cb = divider.new_horizontal(size="5%", pad=0.05)
fig.add_axes(ax_cb)
plt.colorbar(cs, cax=ax_cb)
ax_cb.yaxis.tick_right()
# ax_cb.yaxis.set_major_formatter(FormatStrFormatter('%1.1f'))
else:
cs = ax.contour(X, Y, np.transpose(vals),
levels=lvls, vmin=lim[0], vmax=lim[1],
colors=['k'], linewidths=0.4)
ax.set_aspect(1)
plt.savefig(fn_save, dpi=300)
plt.close()
def save_frames(data, folder, interest):
for idx, snap in tqdm(enumerate(data), desc='Plotting frames'):
da = np.array(snap).T
plot_2D_fp_isocontours(da, interest, os.path.join(folder, str(idx) + '.png'))
def animate(data, folder, interest):
save_frames(data, folder, interest)
# Sort filenames to make sure they're in order
fn_images = os.listdir(folder)
fn_images = Tcl().call('lsort', '-dict', fn_images)
# Create gif
gif_path = folder + '/flow'+interest+'.gif'
with imageio.get_writer(gif_path, mode='I', duration=0.15) as writer:
for filename in tqdm(fn_images[::4], desc='Loop images'):
writer.append_data(imageio.imread(os.path.join(folder, filename)))
optimize(gif_path)
class SnapShots:
def __init__(self, snap):
self.snaps = snap
mean_t = np.mean(np.array(self.snaps).T, axis=1)
self.X, self.Y = mean_t[0:2]
self.u, self.v, self.w = mean_t[2:-1]
self.U, self.V = np.mean(self.u, axis=2), np.mean(self.v, axis=2)
self.p = np.mean(mean_t[-1], axis=0)
if __name__ == "__main__":
snaps = np.load('snapshots/flow_snaps.npy', allow_pickle=True)
data_root = '/home/masseyjmo/Workspace/Lotus/solver/postproc/circular_cylinder/figures/animations'
interest = 'p'
filled = True
title = '$ p $'
animate(snaps, os.path.join(data_root, 'frames_' + interest), interest)
mean_ = np.mean(np.array(snaps).T, axis=1)
fn_save = os.path.join(data_root + '/sim_' + interest + '.pdf')
plot_2D_fp_isocontours(np.array(snaps).T[:, 102], interest, fn_save)
```
#### File: Train/neural_net/nn_regression.py
```python
import numpy as np
import torch as torch
from sklearn.model_selection import train_test_split
from tqdm import tqdm
from torch.autograd import Variable
from plot import *
import _pickle as cPickle
import os
import signal
from timeit import timeit
# -----------------------------------------------------------
class CfData(torch.utils.data.Dataset):
def __init__(self, X, Y):
self.X, self.Y = X, Y
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
x_batch = self.X[idx]
y_batch = self.Y[idx]
sample = {'X': x_batch, 'Y': y_batch}
return sample
# -----------------------------------------------------------
class LinearRegression(torch.nn.Module):
def __init__(self, input_size: int, poly_n: int):
super(LinearRegression, self).__init__()
self.linear = torch.nn.Linear(input_size, poly_n)
self.oupt = torch.nn.Linear(poly_n, 1)
def forward(self, x):
out = self.oupt(torch.nn.ReLU()(self.linear(x)))
return out
# -----------------------------------------------------------
def accuracy(model, X, Y):
# Average L2 loss / mean(ground truth)
with torch.no_grad():
oupt = 100 * (1 - torch.abs(torch.squeeze(model(X)).mean() - Y.mean()) / Y.mean())
return oupt
def compare_data(model, poly_n: int, device="cuda", angles=32, fn='model.pdf'):
# Get mean quantities
with open('fos.pickle', "rb") as f:
fos = cPickle.load(f)
p_data = np.load('data.npy').astype(np.float32)
split = len(p_data[:, 0]) // (len(fos['t']))
gt = p_data[:, -1]
with torch.no_grad():
cd_hat = (torch.squeeze(model(torch.tensor(p_data[:, 0:-1],
device=device)))
.cpu().detach().numpy())
cd_hat = np.array([cd_hat[i * len(fos['t']):(i + 1) * len(fos['t'])] for i in range(split)])
gt = np.array([gt[i * len(fos['t']):(i + 1) * len(fos['t'])] for i in range(split)])
plot_model(np.mean(cd_hat, axis=0), fos, np.mean(gt, axis=0), fn=fn)
def compare_model(model, poly_n: int, device="cuda", angles=32, fn='model_gt.pdf'):
# Get mean quantities
with open('fos.pickle', "rb") as f:
fos = cPickle.load(f)
chunk = angles * len(fos['t'])
p_data = np.load('data.npy').astype(np.float32)
gt = p_data[0:chunk, -1]
with torch.no_grad():
cd_hat = (torch.squeeze(model(torch.tensor(p_data[0:chunk, 0:-1],
device=device)))
.cpu().detach().numpy())
cd_hat = np.array([cd_hat[i * len(fos['t']):(i + 1) * len(fos['t'])] for i in range(angles)])
gt = np.array([gt[i * len(fos['t']):(i + 1) * len(fos['t'])] for i in range(angles)])
plot_model(np.mean(cd_hat, axis=0), fos, np.mean(gt, axis=0), fn=fn)
# -----------------------------------------------------------
def handler(signum, frame):
raise RuntimeError
def main(wd):
# 0. get started, seed for reproducibility
print("\nStart multivariate regression \n")
torch.manual_seed(1)
np.random.seed(1)
# 1. Split data and DataLoader objects
data = np.load('data.npy').astype(np.float32)
poly_n = 10
x_train, x_test, y_train, y_test = \
train_test_split(data[:, 0:-1], data[:, -1],
test_size=0.2, shuffle=False)
print("Create data generator ")
ds_train = CfData(x_train, y_train) # all 200 rows
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu:0")
params = {'batch_size': int(1028),
'shuffle': True,
'num_workers': 16,
'pin_memory': True}
train_ldr = torch.utils.data.DataLoader(ds_train, **params)
# 2. create network
model = LinearRegression(np.shape(x_train)[-1], poly_n)
if torch.cuda.device_count() > 1:
# model = torch.nn.DataParallel(model)
print(f"Using {torch.cuda.device_count()} GPUs")
else:
print(f"Using {torch.cuda.device_count()} GPU")
model.to(device)
# 3. train model
max_epochs = 70
ep_log_interval = 10
lrn_rate = 0.00001
wd = wd
loss_func = torch.nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=lrn_rate, weight_decay=wd)
print("\nbat_size = %3d " % params['batch_size'])
print("loss = " + str(loss_func))
print("optimizer = Adam")
print("max_epochs = %3d " % max_epochs)
print("lrn_rate = %0.3f " % lrn_rate)
print("\nStarting training")
'models/nn_regression.pth'
try:
model.load_state_dict(torch.load('models/'+str(wd) + '_nn_regression.pth'))
except FileNotFoundError:
print('No model saved, starting from scratch')
except RuntimeError:
print('Trying different model')
finally:
pass
cum_loss = []
try:
for epoch in tqdm(range(0, int(max_epochs)), desc='Training net'):
epoch_loss = 0 # for one full epoch
for batch_idx, batch in enumerate(train_ldr):
X = (batch['X']).to(device)
Y = (batch['Y']).to(device)
optimizer.zero_grad()
oupt = model(X) # predicted income
loss_val = loss_func(torch.squeeze(oupt), Y)
epoch_loss += loss_val.item()
loss_val.backward()
optimizer.step()
signal.signal(signal.SIGINT, handler)
if epoch % ep_log_interval == 0:
cum_loss.append(epoch_loss)
print("epoch = %4d loss = %0.4f" % (epoch, cum_loss[-1]))
except RuntimeError:
print('Outta time bitch!')
print("\nDone ")
print("epochs = %4d : total loss = %0.7f" % (epoch, epoch_loss))
plot_loss(max_epochs, cum_loss, fn='figures/cost_wd_' + str(wd) + '.pdf')
# 4. evaluate model accuracy
print("\nComputing model accuracy")
model = model.eval()
acc_train = accuracy(model, X, Y) # item-by-item
print(f"Accuracy on training data = {acc_train:.4f} %")
X_test = torch.from_numpy(x_train).to(device)
Y_test = torch.from_numpy(y_train).to(device)
acc_test = accuracy(model, X_test, Y_test) # item-by-item
print(f"Accuracy on test data = {acc_test:.4f} %")
# 5. save model (state_dict approach)
print("\nSaving trained model state")
torch.save(model.state_dict(), 'models/'+str(wd) + '_nn_regression.pth')
# 6. Test accuracy on test data and plot results
print("\nCompare model to original data")
compare_model(model, poly_n, fn='figures/model_gt_wd_'+str(wd)+'.pdf')
try:
compare_data(model, poly_n, fn='figures/model_data_wd_' + str(wd) + '.pdf')
finally:
print('CD func didnt work')
print("\nBetter?")
if __name__ == "__main__":
main(0.0)
for w in [-4, -3]:
main(10**w)
```
#### File: Train/neural_net/plot.py
```python
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
colours = sns.color_palette('rocket', 4)
plt.style.use(['science', 'grid'])
def plot_loss(epochs, cost, fn='cost.pdf'):
fig, ax = plt.subplots(figsize=(5, 3))
ax.tick_params(bottom="on", top="on", right="on", which='both', direction='in', length=2)
ax.set_xlabel(r"Epochs")
ax.set_ylabel(r'$L_2$ loss')
ax.plot_fill(np.linspace(0, epochs, len(cost)), cost, label=r'$L_{2}$')
ax.legend()
plt.savefig(fn)
plt.close()
def plot_model(cd_hat, fos, Y, fn='model.pdf'):
fig, ax = plt.subplots(figsize=(5, 3))
ax.tick_params(bottom="on", top="on", right="on", which='both', direction='in', length=2)
ax.set_xlabel(r"$t/D$")
ax.set_ylabel(r'$C_F$')
ax.plot_fill(fos['t'], Y * 0.0010518, label=r'Ground truth', color='k')
ax.plot_fill(fos['t'], cd_hat * 0.0010518, label=r'$\hat{Y}$', color=colours[2])
ax.legend()
plt.savefig(fn)
plt.close()
```
#### File: Train/scaling_correction/plot.py
```python
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import _pickle as cPickle
import torch
colors = sns.color_palette("husl", 4)
plt.style.use(['science', 'grid'])
def plot_loss(epochs, cost, fn='cost.pdf'):
fig, ax = plt.subplots(figsize=(5, 3))
ax.tick_params(bottom="on", top="on", right="on", which='both', direction='in', length=2)
ax.set_xlabel(r"Epochs")
ax.set_ylabel(r'$L_2$ loss')
ax.plot_fill(np.linspace(0, epochs, len(cost)), cost, label=r'$L_{2}$')
ax.legend()
plt.savefig(fn)
plt.show()
def plot_model(cd_hat, fos, Y, fn='model.pdf'):
fig, ax = plt.subplots(figsize=(5, 3))
ax.tick_params(bottom="on", top="on", right="on", which='both', direction='in', length=2)
ax.set_xlabel(r"$t/D$")
ax.set_ylabel(r'$C_{D_f}$')
ax.plot_fill(fos['t'], Y, label=r'Ground truth')
ax.plot_fill(fos['t'], cd_hat, label=r'$\hat{C_{D_f}}$')
ax.legend()
plt.savefig(fn)
plt.show()
def plot_training(fos, Y, fn='model.pdf'):
fig, ax = plt.subplots(figsize=(5, 3))
ax.tick_params(bottom="on", top="on", right="on", which='both', direction='in', length=2)
ax.set_xlabel(r"$t/D$")
ax.set_ylabel(r'$C_{D_f}$')
for idx, truths in enumerate(Y):
ax.plot_fill(fos['t'], truths, label=f'Sample {idx}')
ax.legend()
plt.savefig(fn)
plt.show()
def vis_data():
with open('fos.pickle', "rb") as f:
fos = cPickle.load(f)
p_data = np.load('data.npy').astype(np.float32)
chunk = 32 * len(fos['t'])
chunks = int(len(p_data)/chunk)
train_list = []
for s in range(chunks):
tmp = (p_data[s*chunk:(s+1)*chunk, -1])
print(np.mean(p_data[s*chunk:(s+1)*chunk], axis=0))
train = np.array([tmp[i * len(fos['t']):(i + 1) * len(fos['t'])] for i in range(32)])
train_list.append(np.mean(train, axis=0))
return train_list
def compare_model(model, poly_n: int, device="cuda", angles=32):
# Get mean quantities
with open('fos.pickle', "rb") as f:
fos = cPickle.load(f)
chunk = angles * len(fos['t'])
p_data = np.load('data.npy').astype(np.float32)
gt = p_data[0:chunk, -1]
with torch.no_grad():
cd_hat = (torch.squeeze(model(torch.tensor(p_data[0:chunk, 0:-1],
device=device)))
.cpu().detach().numpy())
cd_hat = np.array([cd_hat[i * len(fos['t']):(i + 1) * len(fos['t'])] for i in range(angles)])
gt = np.array([gt[i * len(fos['t']):(i + 1) * len(fos['t'])] for i in range(angles)])
plot_model(np.mean(cd_hat, axis=0), fos, np.mean(gt, axis=0), fn='model' + str(poly_n) + '.pdf')
if __name__ == "__main__":
with open('fos.pickle', "rb") as f:
fos = cPickle.load(f)
lsd = vis_data()
plot_training(fos, lsd, 'figures/vis.pdf')
```
#### File: Train/scaling_correction/scaling_correction.py
```python
import torch
from plot import *
from tqdm import tqdm
import numpy as np
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import seaborn as sns
from torch.autograd import Variable
import _pickle as cPickle
import signal
def compare_model(model, device="cuda", angles=32, fn='model.pdf'):
# Get mean quantities
with open('fos.pickle', "rb") as f:
fos = cPickle.load(f)
chunk = angles * len(fos['t'])
data = np.load('data.npy').astype(np.float32)
gt = data[0:chunk, -1]
with torch.no_grad():
cd_hat = (torch.squeeze(model(torch.tensor(data[0:chunk, 0:-1],
device=device)))
.cpu().detach().numpy())
cd_hat = np.array([cd_hat[i * len(fos['t']):(i + 1) * len(fos['t'])] for i in range(angles)])
gt = np.array([gt[i * len(fos['t']):(i + 1) * len(fos['t'])] for i in range(angles)])
plot_model(np.mean(cd_hat, axis=0), fos, np.mean(gt, axis=0), fn=fn)
def handler(signum, frame):
raise RuntimeError
def main():
plt.style.use(['science', 'grid'])
data = np.load('data.npy')
x_train, x_test, y_train, y_test = train_test_split(data[:, 0:-1], data[:, -1],
test_size=0.1, shuffle=False)
# CUDA for PyTorch
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
local_batch = torch.tensor(x_train, device=device)
local_truth = torch.tensor(y_train, device=device)
try:
t_coeffs = torch.load('models/coes_scaling.pt', map_location=torch.device(device))
print('Found previous state')
except FileNotFoundError:
print('New model, initialising coefficients')
t_coeffs = []
for i in range(3):
t_coeffs.append(torch.tensor(1., requires_grad=True, device=device))
# Define the prediction model
def model(x_input):
dpdx, u0, cf1 = x_input.t()
return cf1 * (t_coeffs[0] * dpdx + t_coeffs[1] * u0 ** 2 + t_coeffs[2] * u0)
# Loss function definition
def loss(y_hat, y_target):
return ((y_hat - y_target) ** 2).sum()
max_epochs = 1e5
lr = 1e-4
optimiser = torch.optim.Adam
# Setup the optimizer object, so it optimizes a and b.
optimizer = optimiser(t_coeffs, lr=lr)
# Main optimization loop
cost = []
try:
for t in tqdm(range(int(max_epochs)), desc='Optimisation', ascii=True):
optimizer.zero_grad() # Set the gradients to 0.
y_predicted = model(Variable(local_batch)) # Compute the current predicted Y's from x_dataset
current_loss = loss(y_predicted, local_truth) # See how far off the prediction is
if t % 10 == 0:
cost.append(current_loss.item())
current_loss.backward() # Compute the gradient of the loss with respect to A and b.
optimizer.step() # Update A and b accordingly.
signal.signal(signal.SIGINT, handler)
except RuntimeError:
print('Outta time bitch!')
print("\nDone ")
torch.save(t_coeffs, 'models/coes_scaling.pt')
print(f"t = {t}, loss = {current_loss}, coeffs = {[round(co.item(), 8) for co in t_coeffs]}")
# Plot Cost
plot_loss(max_epochs, cost, fn='figures/cost_scaling.pdf')
compare_model(model, fn='figures/model_scaling.pdf')
if __name__ == "__main__":
main()
```
#### File: postproc/postproc/anisotropy_tensor.py
```python
import numpy as np
# Functions
def anisotropy_tensor(r):
"""
Return the normalized anisotropy tensor of the Reynolds stresses (np.ndarray with shape (3,3,N,M)).
:param r: Reynolds stresses. np.ndarray with shape (3,3,N,M) where NxM is the field size of the components (2D fields)
:return: b, the normalized anisotropy tensor of the Reynolds stresses
"""
N = r[0,0].shape[0]
M = r[0,0].shape[1]
zeros = np.zeros((N, M))
# Calc anisotropy tensor
k = 0.5 * (r[0,0] + r[1,1] + r[2,2]) # TKE
k_d_matrix = np.array([[k, zeros, zeros], [zeros, k, zeros], [zeros, zeros, k]]) # TKE*kronecker_delta matrix
a = r - (2 / 3) * k_d_matrix # Anisotropy tensor
# b = 0.5*a/k
b = 0.5 * np.divide(a, k, out=np.zeros_like(a), where=k != 0) # Avoid warning of divide by 0.
# b = 0.5 * np.divide(a, k_d_matrix, out=np.zeros_like(a), where=k_d_matrix != 0) # Produces different result
return b
def invariants(b):
"""
:param b: normalized Reynolds stresses anisotropy tensor. np.ndarray with shape (3,3,N,M) where NxM is the field size
of the components (2D field)
:return: the two non-zero invariants of the normalized Reynolds stresses anisotropy tensor (np.ndarray with shape (N,M)).
The invariants are in the form of eta and xi
"""
# Calc invariants
# b11 = b[0, 0]
# b12 = b[0, 1]
# b13 = b[0, 2]
# b21 = b[1, 0]
# b22 = b[1, 1]
# b23 = b[1, 2]
# b31 = b[2, 0]
# b32 = b[2, 1]
# b33 = b[2, 2]
# I = b11+b22+b33 = 0 # Definition of tr(b)
# II = (b11*b33+b22*b33+b11*b22-b13*b31-b23*b32-b12*b21) # Definition of -0.5*tr(b**2)!
# III = (b11*b22*b33+b21*b32*b13+b31*b12*b23-b13*b31*b22-b23*b32*b11-b12*b21*b33) # Definition of det(b)!
I = np.trace(b) # tr(b) = 0
II = -0.5 * np.trace(np.einsum('ijkl,jmkl->imkl', b, b)) # -0.5*tr(b**2)
III = np.linalg.det(b.torch).T # det(b)
eta = np.sqrt(-1 / 3 * II)
xi = np.cbrt(1 / 2 * III)
return eta, xi
```
#### File: postproc/postproc/io.py
```python
import numpy as np
import vtk
import pickle
from postproc.calc import make_periodic
# Functions
def read_data(file, shape, **kwargs):
"""
Return the velocity components of a velocity vector field stored in binary format.
The dat field is supposed to have been written as: (for k; for j; for i;) where the last dimension
is the quickest varying index. Each record should have been written as: u, v, w.
The return velocity components are always converted in np.double precision type.
:param file: fn to read from.
:param shape: Shape of the dat as (Nx,Ny) for 2D or (Nx,Ny,Nz) for 3D.
:param kwargs:
dtype: numpy dtype object. Single or double precision expected.
stream (depracated, use always stream output): type of access of the binary output. If false, there is a 4-byte header
and footer around each "record" in the binary fn (means +2 components at each record) (can happen in some
Fortran compilers if access != 'stream').
periodic: If the user desires to make the dat periodic in a certain direction: (0,0,1) makes periodic in z.
ncomponents: Specify the number of components. Default = ndims of the field
:return: the components of the vector or scalar field.
"""
dtype = kwargs.get('dtype', np.single)
periodic = kwargs.get('periodic', (0,0,0))
ncomponents = kwargs.get('ncomponents', len(shape))
shape = tuple(reversed(shape))
shape_comp = shape + (ncomponents,)
period_axis = [i for i, pc in enumerate(periodic) if pc]
f = open(file, 'rb')
data = np.fromfile(file=f, dtype=dtype).reshape(shape_comp)
f.close()
if len(shape) == 2:
if ncomponents == 1:
u = data[:, :, 0].transpose(1, 0)
u = u.astype(np.float64, copy=False)
del data
if any(periodic):
for ax in period_axis:
u = make_periodic(u, axis=ax)
return u
elif ncomponents == 2:
u = data[:, :, 0].transpose(1, 0)
v = data[:, :, 1].transpose(1, 0)
del data
if any(periodic):
for ax in period_axis:
u = make_periodic(u, axis=ax)
v = make_periodic(v, axis=ax)
u = u.astype(np.float64, copy=False)
v = v.astype(np.float64, copy=False)
return u, v
elif ncomponents == 3:
u = data[:, :, 0].transpose(1, 0)
v = data[:, :, 1].transpose(1, 0)
w = data[:, :, 2].transpose(1, 0)
del data
if any(periodic):
for ax in period_axis:
u = make_periodic(u, axis=ax)
v = make_periodic(v, axis=ax)
w = make_periodic(w, axis=ax)
u = u.astype(np.float64, copy=False)
v = v.astype(np.float64, copy=False)
w = w.astype(np.float64, copy=False)
return u, v, w
else:
raise ValueError("Number of components is not <=3")
elif len(shape) == 3:
if ncomponents == 1:
u = data[:, :, :, 0].transpose(2, 1, 0)
del data
if any(periodic):
for ax in period_axis:
u = make_periodic(u, axis=ax)
u = u.astype(np.float64, copy=False)
return u
elif ncomponents == 2:
u = data[:, :, :, 0].transpose(2, 1, 0)
v = data[:, :, :, 1].transpose(2, 1, 0)
del data
if any(periodic):
for ax in period_axis:
u = make_periodic(u, axis=ax)
v = make_periodic(v, axis=ax)
u = u.astype(np.float64, copy=False)
v = v.astype(np.float64, copy=False)
return u, v
elif ncomponents == 3:
u = data[:, :, :, 0].transpose(2, 1, 0)
v = data[:, :, :, 1].transpose(2, 1, 0)
w = data[:, :, :, 2].transpose(2, 1, 0)
del data
if any(periodic):
for ax in period_axis:
u = make_periodic(u, axis=ax)
v = make_periodic(v, axis=ax)
w = make_periodic(w, axis=ax)
u = u.astype(np.float64, copy=False)
v = v.astype(np.float64, copy=False)
w = w.astype(np.float64, copy=False)
return u, v, w
else:
raise ValueError("Number of components is not <=3")
else:
raise ValueError("Shape is not two- nor three-dimensional")
def read_data_raw(file, shape, ncomponents):
shape = tuple(reversed(shape))
shape_comp = shape + (ncomponents,)
f = open(file, 'rb')
data = np.fromfile(file=f, dtype=np.single).reshape(shape_comp)
f.close()
return data
def read_and_write_fractioned_mean_data(f_w_list, shape, **kwargs):
"""
Computes a weighted average of files which containg partial averages of quantities.
:param f_w_list: list of tuples containing (fn, weight).
:param shape: Shape of the dat as (Nx,Ny) for 2D or (Nx,Ny,Nz) for 3D.
:param kwargs:
:param kwargs:
dtype: numpy dtype object. Single or double precision expected.
stream (depracated, use always stream output): type of access of the binary output. If false, there is a 4-byte header
and footer around each "record" in the binary fn (means +2 components at each record) (can happen in some
Fortran compilers if access != 'stream').
periodic: If the user desires to make the dat spanwise periodic (true) or not (false).
ncomponents: Specify the number of components. Default = ndims of the field
:return: The weighted average of the files containing partial averages.
"""
global a, file, b, c
dtype = kwargs.get('dtype', np.single)
ncomponents = kwargs.get('ncomponents', len(shape))
if ncomponents == 1:
aw_tot = 0
w_tot = 0
for tup in f_w_list:
file = tup[0]
w = tup[1]
data = read_data_raw(file, shape, ncomponents)
if len(shape) == 2:
a = data[:, :, 0]
elif len(shape) == 3:
a = data[:, :, :, 0]
aw_tot += a*w
w_tot += w
a_mean = aw_tot/w_tot
a_mean = a_mean.astype(np.float64, copy=False)
a_mean.tofile(file[:-4]+'_python_mean.dat')
return
elif ncomponents == 2:
aw_tot = 0
bw_tot = 0
w_tot = 0
for tup in f_w_list:
file = tup[0]
w = tup[1]
data = read_data_raw(file, shape, ncomponents)
if len(shape) == 2:
a = data[:, :, 0]
b = data[:, :, 1]
elif len(shape) == 3:
a = data[:, :, :, 0]
b = data[:, :, :, 1]
aw_tot += a*w
bw_tot += b*w
w_tot += w
a_mean, b_mean = aw_tot/w_tot, bw_tot/w_tot
a_mean = a_mean.astype(np.float64, copy=False)
b_mean = b_mean.astype(np.float64, copy=False)
r_mean = np.stack((a_mean, b_mean), axis=-1)
r_mean.tofile(file[:-4]+'_python_mean.dat')
return
elif ncomponents == 3:
aw_tot = 0
bw_tot = 0
cw_tot = 0
w_tot = 0
for tup in f_w_list:
file = tup[0]
w = tup[1]
data = read_data_raw(file, shape, ncomponents)
if len(shape) == 2:
a = data[:, :, 0]
b = data[:, :, 1]
c = data[:, :, 2]
elif len(shape) == 3:
a = data[:, :, :, 0]
b = data[:, :, :, 1]
c = data[:, :, :, 2]
aw_tot += a * w
bw_tot += b * w
cw_tot += c * w
w_tot += w
a_mean, b_mean, c_mean = aw_tot/w_tot, bw_tot/w_tot, cw_tot/w_tot
a_mean = a_mean.astype(np.float64, copy=False)
b_mean = b_mean.astype(np.float64, copy=False)
c_mean = c_mean.astype(np.float64, copy=False)
r_mean = np.stack((a_mean, b_mean, c_mean), axis=-1)
f_root = '/'.join(file.split('/')[0:-3])+'/'
f_name = file.split('/')[-1][:-4]
r_mean.tofile(f_root + 'output/' +f_name + '_python_mean.dat')
return
else:
raise ValueError("Number of components is not <=3")
def unpack2Dforces(file, D=1, U=1):
"""
Unpacks ASCII files coontaining the following columns: non-dimensional time, time step, fx, fy, vx, vy.
fx, fy are the pressure forces and vx, vy the viscous forces (not returned with this function).
:param file: File to read from.
:param D: Characteristic length.
:param U: characteristic velocity.
:return: time, fx, fy.
"""
tD, dt, fx, fy, _, _ = np.loadtxt(file, unpack=True) # 2D
return tD*D/U, fx, fy
def unpack3Dforces(file, D=1, U=1):
"""
Unpacks ASCII files containing the following columns: non-dimensional time, time step, fx, fy, fz, vx, vy, vz.
fx, fy, fz are the pressure forces and vx, vy, vz the viscous forces (not returned with this function).
:param file: File to read from.
:param D: Characteristic length.
:param U: characteristic velocity.
:return: time, fx, fy.
"""
tD, dt, fx, fy, _, _, _, _ = np.loadtxt(file, unpack=True) #3D
return tD*D/U, fx, fy
def unpack_flex_forces(file, names):
"""
Unpacks ASCII files containing columns of user defined parameters. This allows the testing of new functions in
the body module and flexibility in what is printed to your 'fort.9' fn
:param file: File to read from.
:param names: dtype=list The names of the parameters you've decided to write in the simulation
:return: New arrays assigned to your names
"""
try:
names = np.loadtxt(file, unpack=True)
except IndexError:
print("Get the number of entries right you fucking mug")
return names
def unpackTimeSeries(file, npoints):
"""
Unpacks ASCII files containing the following columns: non-dimensional time, point1, point2, ...
:param file:
:param npoints: number of points recorded in the fn
:return: time, point1, point2, ...
"""
if npoints == 1:
t, p = np.loadtxt(file, unpack=True) # 3D
return t, p
elif npoints == 2:
t, p1, p2 = np.loadtxt(file, unpack=True) # 3D
return t, p1, p2
elif npoints == 3:
t, p1, p2, p3 = np.loadtxt(file, unpack=True) # 3D
return t, p1, p2, p3
else:
raise ValueError("Number of points is not <=3")
def readTimeSeries(file):
"""
Reads ASCII files containing the following columns: non-dimensional time, point1, point2, ...
:param file:
:return: 2D numpy array. Each column is a time series. Normally time is the first column. torch = a[:,0]
"""
return np.loadtxt(file)
def readPressureArcLength(file):
p, L = np.loadtxt(file, unpack=True, skiprows=1, delimiter=',')
return p, L
def importExpCpTheta(file):
theta, cp = np.loadtxt(file, unpack=True, delimiter=',')
return theta, cp
def read_vti(file):
reader = vtk.vtkXMLPImageDataReader()
reader.SetFileName(file)
reader.Update()
data = reader.GetOutput()
pointData = data.GetPointData()
sh = data.GetDimensions()[::-1]
ndims = len(sh)
# get vector field
v = np.array(pointData.GetVectors("Velocity")).reshape(sh + (ndims,))
vec = []
for d in range(ndims):
a = v[..., d]
vec.append(a)
# get scalar field
sca = np.array(pointData.GetScalars('Pressure')).reshape(sh + (1,))
# Generate grid
# nPoints = dat.GetNumberOfPoints()
(xmin, xmax, ymin, ymax, zmin, zmax) = data.GetBounds()
grid3D = np.mgrid[xmin:xmax + 1, ymin:ymax + 1, zmin:zmax + 1]
return np.transpose(np.array(vec), (0,3,2,1)), np.transpose(sca, (0,3,2,1)), grid3D
def read_vtr(fn):
import warnings
warnings.filterwarnings("ignore", category=np.VisibleDeprecationWarning)
reader = vtk.vtkXMLPRectilinearGridReader()
reader.SetFileName(fn)
reader.Update()
data = reader.GetOutput()
pointData = data.GetPointData()
sh = data.GetDimensions()[::-1]
ndims = len(sh)
# get vector field
try:
v = np.array(pointData.GetVectors("Velocity")).reshape(sh + (ndims,))
vec = []
for d in range(ndims):
a = np.array(v[..., d])
vec.append(a)
vec = np.array(vec)
# get scalar field
sca = np.array(pointData.GetScalars('Pressure')).reshape(sh + (1,))
# get grid
x = np.array(data.GetXCoordinates())
y = np.array(data.GetYCoordinates())
z = np.array(data.GetZCoordinates())
return np.transpose(vec, (0, 3, 2, 1)), np.transpose(sca, (0, 3, 2, 1)), np.array((x, y, z))
except ValueError:
print('\n' + fn + ' corrupt, skipping for now')
def vtr_to_mesh(fn, length_scale, rotation=0):
"""
Rotates and scales vtr file
Args:
fn: The path to the 'datp' folder
length_scale: length scale of the simulation
rotation: Rotate the grid. If you're running a simulation with
an angle of attack, it's better to rotate the flow than
the foil because of the meshing.
Returns: X, Y - coordinates (useful for indexing)
U, V - rotated velocity components
w - un-rotated z velocity component
p - pressure field
"""
rot = rotation / 180 * np.pi
data = read_vtr(fn)
# Get the grid
x, y, z = data[2]
X, Y = np.meshgrid(x / length_scale, y / length_scale)
X = np.cos(rot) * X + np.sin(rot) * Y
Y = -np.sin(rot) * X + np.cos(rot) * Y
u, v, w = data[0]
U = np.cos(rot) * u + np.sin(rot) * v
V = -np.sin(rot) * u + np.cos(rot) * v
p = data[1]
p = np.reshape(p, [np.shape(p)[0], np.shape(p)[2], np.shape(p)[3]])
return X, Y, U, V, w, p
def pvd_parser(fn, n=None):
times = []
files = []
with open(fn) as f:
lines = f.readlines()
for line in lines[2:-2][:n]:
l = line.split(" ")
times.append(float(l[1].split("\"")[1]))
files.append(str(l[4].split("\"")[1]))
return times, files
def write_object(obj, fname):
with open(fname, 'wb') as output: # Overwrites any existing fn.
pickle.dump(obj, output, protocol=pickle.HIGHEST_PROTOCOL)
return
def read_object(fname):
with open(fname, 'rb') as input: # Overwrites any existing fn.
obj = pickle.load(input)
return obj
def read_txt(fname, skiprows=0, delimiter=','):
lines = np.loadtxt(fname, skiprows=skiprows, delimiter=delimiter, unpack=True)
return lines
```
#### File: postproc/ml_tools/dmd.py
```python
import matplotlib.pyplot as plt
import numpy as np
from pydmd import DMD, FbDMD
from tqdm import tqdm
plt.style.use(['science', 'grid'])
def dmd_mag(snap: np.array):
u, v, _ = snap[:, 2:-1].T
snapshots = []
for loop1, loop2 in tqdm(zip(u, v)):
U, V = np.mean(loop1, axis=2).T, np.mean(loop2, axis=2).T
# Filter snapshots so that we only use region of interest around the foil
mag = np.sqrt(U ** 2 + V ** 2)
snapshots.append(mag)
snapshots = snapshots - np.mean(snapshots, axis=0)
# dmd = DMD(svd_rank=-1, tlsq_rank=2, exact=True, opt=True).fit(snapshots)
opt_dmd = FbDMD(svd_rank=-1, exact=True).fit(snapshots)
print('\n Done DMD fit, freeing up cores')
return opt_dmd
def dmd_pressure(snap: np.array):
p = snap[:, -1]
snapshots = []
for loop1 in tqdm(p):
snapshots.append(np.mean(loop1, axis=0).T)
# snapshots = snapshots - np.mean(snapshots, axis=0)
opt_dmd = FbDMD(svd_rank=-1, exact=True).fit(snapshots)
return opt_dmd
```
#### File: postproc/visualise/plot_gif.py
```python
import os
from tkinter import Tcl
import imageio
from pygifsicle import optimize
from tqdm import tqdm
from postproc.visualise.plot_flow import Plot2DIsocontours
def animate(folder, interest, **kwargs):
duration = kwargs.get('duration', 0.15)
# Sort filenames to make sure they're in order
fn_images = os.listdir(folder+'/animation')
fn_images = Tcl().call('lsort', '-dict', fn_images)
# Create gif
gif_path = os.path.join(folder, interest + '.gif')
with imageio.get_writer(gif_path, mode='I', duration=duration, **kwargs) as writer:
for filename in tqdm(fn_images[::1], desc='Loop images'):
writer.append_data(imageio.imread(os.path.join(folder, 'animation', filename)))
optimize(gif_path)
def save_piv_frames(data, folder, interest, tit=None):
for snap in range(len(data.mag_snap)):
Plot2DIsocontours(data, interest,
title=tit, lims=[0, 1.4], step=0.1, snap=snap).plot_fill(
os.path.join(folder, str(snap) + '.png'))
def save_sim_frames(data, folder, interest, **kwargs):
os.system('mkdir -p '+os.path.join(folder, 'animation'))
os.system('rm '+os.path.join(folder, 'animation')+'/*.png')
for idx, snap in tqdm(enumerate(data.snaps), desc='Plotting snapshots'):
Plot2DIsocontours(snap, interest, **kwargs) \
.plot_fill(os.path.join(folder, 'animation', str(idx) + '.png'))
def save_animate(data, data_root, quantity, kwargs_plot={}, kwargs_animate={}):
"""
Save the frames and animate the quantity of interest
Args:
data: the flow field defined by the class SimFramework
data_root: file path to an empty folder to save frames to
quantity: what we are animating
**kwargs_plot: args for plotting the flow
**kwargs_animate: extra args for imageio.get_writer
Returns:
"""
save_sim_frames(data, os.path.join(data_root, 'figures'), quantity, **kwargs_plot)
animate(os.path.join(data_root, 'figures'), quantity, **kwargs_animate)
```
#### File: postproc/visualise/read_exp.py
```python
import h5py
import numpy as np
from postproc import calc
class PIVFramework:
"""
Class that holds all the functions to extract dat from a .mat fn to a plottable form.
"""
def __init__(self, exp, fn, **kwargs):
rms = kwargs.get('rms', False)
mag = kwargs.get('mag', True)
vort = kwargs.get('vort', False)
data = {}
f = h5py.File(exp)[fn]
for k, v in f.items():
data[k] = np.array(v)
# Normalise with the chord length
l, U_inf = data['chord_length'], data['U_inf']
print(l)
self.X, self.Y = data['X'] / l, data['Y'] / l
self.u, self.v = data['VY'] / U_inf, data['VX'] / U_inf
if mag:
self.mag_snap = np.sqrt((np.einsum('...jk,...jk->...jk', self.u, self.u) +
np.einsum('...jk,...jk->...jk', self.v, self.v)))
mean = np.mean(self.mag_snap, axis=0)
self.U = mean
if rms:
mag = np.sqrt((np.einsum('...jk,...jk->...jk', self.u, self.u) +
np.einsum('...jk,...jk->...jk', self.v, self.v)))
mean = np.array([np.mean(mag, axis=0)])
fluc = np.sqrt((mag - mean) ** 2)
self.U = np.mean(fluc, axis=0)
if vort: # ddx-ddy
omega = []
for idx, (snap_u, snap_v) in enumerate(zip(self.u, self.v)):
omega.append(np.array(calc.vortZ(snap_u, snap_v, x=self.X[:, 0], y=self.Y[0], acc=2)))
self.omega = np.sum(omega, axis=0) / len(self.U)
self.omega = self.omega.T
self.omega = data['vort'] / U_inf
self.omega = np.squeeze(np.mean(self.omega, axis=0)).T
```
#### File: postproc/postproc/wavenumber_spectra.py
```python
import numpy as np
import scipy.signal as signal
from tqdm import tqdm
# Functions
def ke_spectra(u_i, x_i, **kwargs):
"""
Computes the kinetic energy (KE) of a n-dimensional velocity vector field such as u_i = (u, v, w) for 3D.
:param u_i: n-dimensional velocity vector field
:param x_i: n-dimensional spatial vector field
:param kwargs: k_res for the resolution of k_mod_line which defines the bandwitdh dk.
:return: k_mod 1D array and ke_integral 1D array.
"""
if len(u_i) > 3 or len(u_i) < 1 or len(x_i) > 3 or len(x_i) < 1:
raise ValueError('Invalid field dimensions')
# Wavenumbers
k_i = _wavenumbers(*x_i) # k_i = (kx, ky, kz)
# FFT to compute KE
ke = 0
for u in u_i:
u = _window_ndim(u, signal.hanning) # Windowing
uk = np.fft.fftn(u)/u.size # FFT
ke += uk*uk.conjugate() # KE
ke = 0.5*ke
# Calc spectra
return _pair_integrate_fast(ke, *k_i, **kwargs)
def scalar_spectra(a, *args, **kwargs):
"""
Computes the wavenumber spectra of a n-dimensional scalar field by means of nFFT. The spectra is for the wavenumber
modulus of ak. Hence it performs and spherical integration for all the components of k_i (the wavenumber vector)
:param a: The scalar field
:param args: The spatial vector x_i = (x, Y, z)
:param kwargs: k_res for the resolution of k_mod_line which defines the bandwitdh dk.
:return: k_mod 1D array and ak_integral 1D array.
"""
if a.ndim > 3 or a.ndim < 1:
raise ValueError('Invalid field dimensions')
elif a.ndim != len(args):
raise ValueError('Field dimensions must much the spatial vector components passed to the function')
k_i = _wavenumbers(*args) # k_i = (kx, ky, kz)
a = _window_ndim(a, signal.hanning) # Windowing
ak = np.fft.fftn(a)/a.size # FFT add power spectrum??
return _pair_integrate_fast(ak, *k_i, **kwargs) # Calc spectra
def _pair_integrate_fast(ak, *args, **kwargs):
"""
Internal function which computes the wavenumber modulus k_mod for each fft coefficient of the ak ndarray and
integrates the components contributing to the same k_mod with a certain bandwidth dk
:param ak: The nFFT of (a)
:param args: the wavenumber vector *k_i
:param kwargs: k_res for the resolution of k_mod_line which defines the bandwitdh dk.
:return: k_mod 1D array and ak_integral 1D array.
"""
k_res = kwargs.get('k_res', 200)
k2_sum_max = 0
k2_sum_min = 0
for k in args:
k2_sum_max += np.max(k**2)
k2_sum_min += np.min(k**2)
k_min, k_max = np.sqrt(k2_sum_min), np.sqrt(k2_sum_max)
dk = (k_max - k_min) / k_res
ak_integral = np.zeros(k_res)
k_mod_line = np.linspace(0, k_res - 1, k_res) * dk + dk / 2 # k values at half of each bandwidth
print(' Find ak(k_i) with its k modulus and integrate it in ak(k)')
with tqdm(total=ak.size) as pbar:
for index in np.ndindex(ak.shape):
ak_p = ak[index]
k2_sum = 0
for i, k in enumerate(args):
k2_sum += k[index[i]] ** 2
k_mod = np.sqrt(k2_sum)
kint = int(k_mod / dk)
if kint >= k_res:
ak_integral[-1] += np.abs(ak_p)
else:
ak_integral[kint] += np.abs(ak_p)
pbar.update(1)
return k_mod_line, ak_integral
def _wavenumbers(*args):
"""
:param args: the wavenumber vector *k_i
:return: the wavenumber vector for a position vector (1D, 2D or 3D) such as: x, Y, z of a uniform grid.
"""
k_i = () # wavenumber vector; k_i = (kx, ky, kz). Each component is a 1D array type.
for arg in args:
N = arg.size # number of points in the spatial vector component
alpha = 2*np.pi/(np.max(arg)-np.min(arg)) # basic wavenumber
index = np.fft.fftfreq(N, d=1/N) # index per wavenumber direction: eg x: 0,1,2,...,N/2-1,-N/2,-N/2+1,...,-1
k = np.zeros(N) # wavenumber vector component k_i
for i in range(0, N):
k[i] = alpha*index[i]
k_i = k_i + (k,)
return k_i
def _window_ndim(a, wfunction):
"""
Performs an in-place windowing on N-dimensional spatial-domain dat.
This is done to mitigate boundary effects in the FFT.
:param a: n-dimensional array input dat to be windowed, modified in place.
:param wfunction: 1D window generation function. Function should accept one argument: the window length.
Example: scipy.signal.hamming
:return: windowed n-dimensional array a
"""
if a.ndim == 0:
raise ValueError('Input dat to be windowed cannot be scalar')
for axis, axis_size in enumerate(a.shape):
window = wfunction(axis_size)
for i in range(len(a.shape)):
if i == axis:
continue
else:
window = np.stack([window] * a.shape[i], axis=i)
a *= window
return a
```
#### File: postproc/tests/example_gmm.py
```python
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style="white", font="Arial")
colors = sns.color_palette("Paired", n_colors=12).as_hex()
import torch
from postproc.ml_tools.gmm import GaussianMixture
from math import sqrt
def main():
n, d = 300, 2
# generate some dat points ..
data = torch.Tensor(n, d).normal_()
# .. and shift them around to non-standard Gaussians
data[:n//2] -= 1
data[:n//2] *= sqrt(3)
data[n//2:] += 1
data[n//2:] *= sqrt(2)
# Next, the Gaussian mixture is instantiated and ..
n_components = 2
model = GaussianMixture(n_components, d)
model.fit(data)
# .. used to predict the dat points as they where shifted
y = model.predict(data)
plot(data, y)
def plot(data, y):
n = y.shape[0]
fig, ax = plt.subplots(1, 1, figsize=(1.61803398875*4, 4))
ax.set_facecolor('#bbbbbb')
ax.set_xlabel("Dimension 1")
ax.set_ylabel("Dimension 2")
# plot the locations of all dat points ..
for i, point in enumerate(data.dat):
if i <= n//2:
# .. separating them by ground truth ..
ax.scatter(*point, color="#000000", s=3, alpha=.75, zorder=n+i)
else:
ax.scatter(*point, color="#ffffff", s=3, alpha=.75, zorder=n+i)
if y[i] == 0:
# .. as well as their predicted class
ax.scatter(*point, zorder=i, color="#dbe9ff", alpha=.6, edgecolors=colors[1])
else:
ax.scatter(*point, zorder=i, color="#ffdbdb", alpha=.6, edgecolors=colors[5])
handles = [plt.Line2D([0], [0], color='w', lw=4, label='Ground Truth 1'),
plt.Line2D([0], [0], color='black', lw=4, label='Ground Truth 2'),
plt.Line2D([0], [0], color=colors[1], lw=4, label='Predicted 1'),
plt.Line2D([0], [0], color=colors[5], lw=4, label='Predicted 2')]
legend = ax.legend(loc="best", handles=handles)
plt.tight_layout()
plt.savefig("example.pdf")
if __name__ == "__main__":
main()
``` |
{
"source": "JMassing/DeepL-CardDetection",
"score": 3
} |
#### File: DeepL-CardDetection/DataGenerator/card_extractor.py
```python
from enum import Enum
import math
import cv2 as cv
import numpy as np
class FeatureDetector:
'''! Class to detect features in images '''
def __init__(self, image):
'''!
@brief Constructor
@param image (numpy.ndarray) The raw image
'''
self.image = image
def detect_contours(self, threshold):
'''!
@brief Run contour detection
@param threshold (int) Binarization threshold
@return binary_img (numpy.ndarray) Binarized image
@return contours (list) The contours in the image
@return The contour hierarchy as a list
'''
gray_img = cv.cvtColor(self.image, cv.COLOR_BGR2GRAY)
ret, binary_img = cv.threshold(gray_img, threshold, 255, cv.THRESH_BINARY)
contours, hierarchy = cv.findContours(binary_img, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
return binary_img, contours, hierarchy
def detect_corners(self, contours):
'''!
@brief Detect corner points of Contours
@param contours (list) Contours
@return List of corner points of each contour
'''
d = 0
corners = []
for contour in contours:
points = []
while True:
d = d + 0.1
points = cv.approxPolyDP(contour, d, True)
if len(points) <= 4:
break
corners.append(points)
return corners
def detect_centers(self, contours):
'''!
@brief Detect center points of Contours
@param contours (list) Contours
@return List of center points of each contour
'''
centers = []
for contour in contours:
M = cv.moments(contour)
centers.append([ int((M["m10"] / M["m00"])), int((M["m01"] / M["m00"]))])
return centers
class FilterType(Enum):
'''! Enum for filter methods '''
larger = 1 #!< Everything larger than threshold is removed
smaller = 2 #!< Everything smaller than threshold is removed
class ContourFilter:
'''! Class contains methods to filter contours '''
def filter_by_area(self, contours, threshold, method):
'''!
@brief Filter contours by their area
@details Either contours with an area smaller or larger than the threshold
area are filtered, depending on the method.
@param contours (list) Contours to filter
@param threshold (int) Size of the threshold area
@param method (Filter) One of FilterType.larger or FilterType.smaller
@return List of filtered contours
'''
if(method == FilterType.larger):
filtered = [contour for contour in contours if cv.contourArea(contour) < threshold]
elif(method == FilterType.smaller):
filtered = [contour for contour in contours if cv.contourArea(contour) > threshold]
else:
raise("Unknown Filter Method.")
return filtered
class ImageDewarper:
'''! @Brief Class to dewarp images '''
def __init__(self):
'''! @brief The constructor '''
self.__origin = []
def __angle(self, point):
'''!
@brief Calculate the angle between the Vector to a point in
the coordinate system with the member __origin as the
center and the reference vector [1, 0]
@param point (list) Point of interest
@return Angle in radiant
'''
point = point[0]
refvec = [1, 0]
# Vector between point and the origin: v = p - o
vector = [point[0]-self.__origin[0], point[1]-self.__origin[1]]
# Length of vector: ||v||
lenvector = math.hypot(vector[0], vector[1])
# If length is zero there is no angle
if lenvector == 0:
return -math.pi, 0
# Normalize vector: v/||v||
normalized = [vector[0]/lenvector, vector[1]/lenvector]
dotprod = normalized[0]*refvec[0] + normalized[1]*refvec[1] # x1*x2 + y1*y2
diffprod = refvec[1]*normalized[0] - refvec[0]*normalized[1] # x1*y2 - y1*x2
angle = math.atan2(diffprod, dotprod)
# Negative angles represent counter-clockwise angles so we need to subtract them
# from 2*pi (360 degrees)
if angle < 0:
angle += 2*math.pi
return angle
def __sort_corners(self, corners, center):
'''!
@brief Sorts corner points counterclockwise starting at upper right
looking at the image
@param corners (list) Corner points of the image
@param center (list) Center point of the image
@return List of sorted points
'''
self.__origin = center
return sorted(corners, key=self.__angle)
def __transformation_coordinates(self, dsize: tuple):
'''!
@brief Calculates the coordinates where the image corners should be
transformed to
@param dsize (tuple) Size of destination image (width, height)
@return List of coordinates
'''
offset = 0
coords = [ [dsize[0] - offset, offset],
[offset, offset],
[offset, dsize[1] - offset],
[dsize[0] - offset, dsize[1] - offset] ]
return coords
def dewarp_image(self, image, corners, center, dsize: tuple):
'''!
@brief Dewarp part of a given image that is enclosed by given
corner points with the given center point to Birdseye view
@param image (numpy.ndarray) Source image
@param corners (list) Corner points
@param center (list) Center point
@param dsize (tuple) Destination size (width, height)
@return Dewarped image (numpy.ndarray)
'''
if(len(corners) != 4):
return None
corners = self.__sort_corners(corners, center)
coords = self.__transformation_coordinates(dsize)
M = cv.getPerspectiveTransform(np.array(corners, dtype = "float32"), np.array(coords, dtype = "float32"))
transformed_image = cv.warpPerspective(image, M, dsize)
return transformed_image
if __name__ == "__main__":
image = cv.imread("./live_image.jpg")
feature_detector = FeatureDetector(image)
binary_img, contours, hierarchy = feature_detector.detect_contours(180)
contour_filter = ContourFilter()
contours = contour_filter.filter_by_area(contours, 10000, FilterType.smaller)
corners = feature_detector.detect_corners(contours)
centers = feature_detector.detect_centers(contours)
dewarper = ImageDewarper()
dewarped_image = dewarper.dewarp_image(image, corners[2], centers[2], (180, 300))
cv.drawContours(image, contours, -1, (0, 255, 0), 3)
for points in corners:
for point in points:
image = cv.circle(image, (point[0][0], point[0][1]), radius=3, color=(0, 0, 0), thickness=-1)
for point in centers:
image = cv.circle(image, (point[0], point[1]), radius=3, color=(192, 0, 0), thickness=-1)
cv.imshow("Image", image)
cv.imshow("Dewarped Card", dewarped_image)
#cv.imwrite("AS.jpg", dewarped_image)
k = cv.waitKey(0)
``` |
{
"source": "JMast3rs/netcfgbu-plugin-teams",
"score": 2
} |
#### File: JMast3rs/netcfgbu-plugin-teams/backup_report.py
```python
import pymsteams
teams_webhook_url = "<< Insert Teams Webhook URL>>"
class Teams_Backup(Plugin):
name = "Teams_Backup"
def report(report):
message = pymsteams.connectorcard(teams_webhook_url_backup)
report_dic = dict(report.task_results)
message.title("Configurtion Backup Report")
message.text("Configurtion Backup Report")
messageSection = pymsteams.cardsection()
for suc in report_dic[True]:
messageSection.addFact(str(suc[0]["host"]), "Successful!")
for unsuc in report_dic[False]:
messageSection.addFact(str(unsuc[0]["host"]), str(unsuc[1]))
message.addSection(messageSection)
message.send()
if len(report_dic[False]) > 0:
message = pymsteams.connectorcard(teams_webhook_url_backup)
message.title("Configurtion Backup Failures!")
message.text("Configurtion Backup Failures!")
message.color("E74C3C")
messageSection = pymsteams.cardsection()
for unsuc in report_dic[False]:
messageSection.addFact(str(unsuc[0]["host"]), str(unsuc[1]))
message.addSection(messageSection)
message.send()
```
#### File: JMast3rs/netcfgbu-plugin-teams/git_report.py
```python
import pymsteams, requests
teams_webhook_url = "<< Insert Teams Webhook URL>>"
teams_git_repository_url = "<< Insert GitLab Repo URL (without .git) >>"
teams_git_repository_id = "<< Insert GitLab Repo ID >>"
teams_git_token = "<< Insert GitLab Access Token >>"
class Teams_Git(Plugin):
name = "Teams_Git"
def git_report(success, tag_name):
message = pymsteams.connectorcard(teams_webhook_url_vcs)
if success:
res = requests.get(f"https://gitlabs.com/api/v4/projects/{teams_git_repository_id}/repository/tags?private_token={teams_git_token}").json()
previous_repo = res[1]["name"]
message.title("Configuration Change Detected")
message.text("Successfully pushed to git.")
message.addLinkButton("View Config", f"{teams_git_repository_url}/-/tree/{tag_name}")
message.addLinkButton("View Changes", f"{teams_git_repository_url}/-/compare/{previous_repo}...{tag_name}")
message.color("#4EE73C")
message.send()
print(f"Previous Config Tag: {previous_repo}, New Config Tag: {tag_name}")
else:
message.title("No Configureation Change Detected")
message.text("Skipping git push.")
message.send()
``` |
{
"source": "Jmast/kombu-redis-priority",
"score": 3
} |
#### File: kombu_redis_priority/scheduling/round_robin.py
```python
from kombu.utils.scheduling import cycle_by_name
from .base import QueueScheduler
class RoundRobinQueueScheduler(QueueScheduler):
def __init__(self):
self.cycle = cycle_by_name('round_robin')()
def next(self):
queues = self.cycle.consume(1)
if queues:
return queues[0]
return None
def rotate(self, last_queue, was_empty):
# This is first rotation and queue was empty, so
# start tracking that rotation was empty
if last_queue == self.start_queue and was_empty:
self.rotation_empty = True
# If at any time in rotation the queue was not
# empty, then rotation is not empty
elif not was_empty:
self.rotation_empty = False
self.cycle.rotate(last_queue)
next_queue = self.next()
is_full_rotation = next_queue == self.start_queue
return is_full_rotation and self.rotation_empty
def update(self, queue_list):
self.cycle.update(queue_list)
self.start_queue = self.next()
```
#### File: tests/scheduler/test_roundrobin.py
```python
import unittest
from ddt import ddt, data
from kombu_redis_priority.scheduling.round_robin import RoundRobinQueueScheduler
@ddt
class TestRoundRobinQueueScheduler(unittest.TestCase):
def test_round_robin_scheduler_gets_queue_at_top_of_list(self):
scheduler = RoundRobinQueueScheduler()
scheduler.update(['TimeMachine', 'FluxCapacitor'])
self.assertEqual(scheduler.next(), 'TimeMachine')
def test_round_robin_scheduler_next_with_empty(self):
scheduler = RoundRobinQueueScheduler()
scheduler.update([])
self.assertEqual(scheduler.next(), None)
def test_round_robin_scheduler_update_sets_internal_list(self):
scheduler = RoundRobinQueueScheduler()
scheduler.update(['TimeMachine', 'FluxCapacitor'])
self.assertEqual(scheduler.cycle.items, ['TimeMachine', 'FluxCapacitor'])
@data(True, False)
def test_round_robin_scheduler_rotate_rotates_queue_regardless_of_emptiness(self, was_empty):
scheduler = RoundRobinQueueScheduler()
scheduler.update(['TimeMachine', 'FluxCapacitor'])
scheduler.rotate('TimeMachine', was_empty)
self.assertEqual(scheduler.cycle.items, ['FluxCapacitor', 'TimeMachine'])
def test_round_robin_scheduler_rotate_full_rotation_empty(self):
scheduler = RoundRobinQueueScheduler()
scheduler.update(['TimeMachine', 'FluxCapacitor'])
# Have not made a full rotation, not fully empty yet
self.assertFalse(scheduler.rotate('TimeMachine', True))
# Made a full round trip and both queues were empty
self.assertTrue(scheduler.rotate('FluxCapacitor', True))
def test_round_robin_scheduler_rotate_full_rotation_state_tracking(self):
scheduler = RoundRobinQueueScheduler()
scheduler.update(['TimeMachine', 'FluxCapacitor', 'Delorean'])
# Have not made a full rotation, not fully empty yet
self.assertFalse(scheduler.rotate('TimeMachine', True))
self.assertFalse(scheduler.rotate('FluxCapacitor', True))
# Made a full rotation, but the last queue was not empty
self.assertFalse(scheduler.rotate('Delorean', False))
```
#### File: tests/utils/fakeredis_ext.py
```python
from collections import deque
from itertools import count
from fakeredis import FakeStrictRedis
class FakeStrictRedisWithConnection(FakeStrictRedis):
"""
An extension of FakeStrictRedis to implement some of the low level interfaces of StrictRedis from redis-py. Kombu
uses these internal features to simulate an async event based request response cycle so that it can be hooked into
its chain.
You can learn more about it in the kombu source for the redis transport.
"""
def __init__(self, *args, **kwargs):
super(FakeStrictRedisWithConnection, self).__init__(*args, **kwargs)
self._connection = None
self.connection = self._sconnection(self)
self._response_queue = deque()
self.server = kwargs["server"]
def parse_response(self, connection, type, **options):
# If there are any responses queued up, pop and return that
if self._response_queue:
return self._response_queue.pop()
# TODO: this is actually wrong - we need to determine if it is a pipeline response based on what is on the
# datagram.
if type == '_':
return self._parse_pipeline_response_from_connection(connection)
else:
return self._parse_command_response_from_connection(connection, type)
def _parse_pipeline_response_from_connection(self, connection):
"""
A pipeline response consists of several responses:
- OK : acknowledges a transaction
- QUEUED : acknowledges a command has been queued. There will be one per command sent.
- LIST : list of responses
"""
# pop off the first command, which should be MULTI to signal start of transaction
cmd = self.connection._sock.data.pop(0)
assert cmd[0] == 'MULTI'
# Now extract all the commands until transaction ends
cmds_to_execute = []
cmd = self.connection._sock.data.pop(0)
while cmd[0] != 'EXEC':
cmds_to_execute.append(cmd)
cmd = self.connection._sock.data.pop(0)
# It is a bug, if the command stack is NOT empty at this point
assert len(self.connection._sock.data) == 0
# execute those collected commands and construct response list
responses = [self._parse_command_response(cmd, args) for cmd, args in cmds_to_execute]
# Now append the expected pipeline responses to the deque and return the first response, which is 'OK'
for i in range(len(responses)):
self._response_queue.appendleft('QUEUED')
self._response_queue.appendleft(responses)
return 'OK'
def _parse_command_response_from_connection(self, connection, type):
cmd, args = self.connection._sock.data.pop()
assert cmd == type
assert len(self.connection._sock.data) == 0
return self._parse_command_response(cmd, args)
def _parse_command_response(self, cmd, args):
"""TODO (JP) I'm not 100% sure why we are overriding the parse_response code on this class (which is what
ultimately leads us to here) but after moving to a newer version of fakeredis, our old code here would
cause an "RuntimeError: maximum recursion depth exceeded" error because cmd_func would lead us right back
to this method again.
Using a new FakeRedis client (which will _not_ call _parse_command_response) seems to work but there is
probably a better solution to this problem.
I'm also unsure why ZADD needs to be modified but it probably has to do with some change in the FakeRedis code
that we are overriding here.
"""
new_client = FakeStrictRedis(server=self.server)
cmd_func = getattr(new_client, cmd.lower())
if cmd == "ZADD":
args = (args[0], {args[2]: args[1]})
return cmd_func(*args)
class _sconnection(object):
disconnected = False
class _socket(object):
blocking = True
filenos = count(30)
def __init__(self, *args):
self._fileno = next(self.filenos)
self.data = []
def fileno(self):
return self._fileno
def setblocking(self, blocking):
self.blocking = blocking
def __init__(self, client):
self.client = client
self._sock = self._socket()
self.pid = 1234
def disconnect(self):
self.disconnected = True
def send_command(self, cmd, *args):
self._sock.data.append((cmd, args))
def pack_commands(self, cmds):
return cmds # do nothing
def send_packed_command(self, all_cmds):
# Input command format is: tuple(tuple(cmd, arg0, arg1, ...), options)
# The injected command format has to be equivalent to `send_command`: tuple(cmd, args)
def normalize_command(raw_cmd):
return (raw_cmd[0], raw_cmd[1:])
self._sock.data.extend([normalize_command(cmd) for cmd in all_cmds])
``` |
{
"source": "JMata28/PGS2",
"score": 3
} |
#### File: PGS2/flaskblog/forms.py
```python
from flask_wtf import FlaskForm
from flask_wtf.file import FileField, FileAllowed
from flask_login import current_user
from wtforms import StringField, PasswordField, SubmitField, BooleanField, TextAreaField, DecimalField
from wtforms.validators import DataRequired, Length, Email, EqualTo, ValidationError
from flaskblog.models import User
class RegistrationForm(FlaskForm):
username = StringField('Username',
validators=[DataRequired(), Length(min=2, max=20)])
email = StringField('Email',
validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
confirm_password = PasswordField('Confirm Password',
validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Sign Up')
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user:
raise ValidationError('That username is taken. Please choose a different one.')
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user:
raise ValidationError('That email is taken. Please choose a different one.')
class LoginForm(FlaskForm):
email = StringField('Email',
validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
remember = BooleanField('Remember Me')
submit = SubmitField('Login')
class UpdateAccountForm(FlaskForm):
username = StringField('Username',
validators=[DataRequired(), Length(min=2, max=20)])
email = StringField('Email',
validators=[DataRequired(), Email()])
picture = FileField('Update Profile Picture', validators=[FileAllowed(['jpg', 'png'])])
submit = SubmitField('Update')
def validate_username(self, username):
if username.data != current_user.username:
user = User.query.filter_by(username=username.data).first()
if user:
raise ValidationError('That username is taken. Please choose a different one.')
def validate_email(self, email):
if email.data != current_user.email:
user = User.query.filter_by(email=email.data).first()
if user:
raise ValidationError('That email is taken. Please choose a different one.')
class PostForm(FlaskForm):
title = StringField('Title', validators=[DataRequired()])
content = TextAreaField('Content', validators=[DataRequired()])
submit = SubmitField('Post')
class RequestResetForm(FlaskForm):
email = StringField('Email',
validators=[DataRequired(), Email()])
submit = SubmitField('Request Password Reset')
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user is None:
raise ValidationError('There is no account with that email. You must register first.')
class ResetPasswordForm(FlaskForm):
password = PasswordField('Password', validators=[DataRequired()])
confirm_password = PasswordField('<PASSWORD>',
validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Reset Password')
class EmailAlertsForm(FlaskForm):
max_temperature = DecimalField('Maximum Temperature, values from -10 to 50 Degrees Celsius.')
max_humidity = DecimalField('Maximum Humidity, values from 0% to 100%.')
max_light_level = DecimalField('Maximum Light Level, values from 0 to 2000lx')
min_temperature = DecimalField('Minimum Temperature, values from -10 to 50 Degrees Celsius.')
min_humidity = DecimalField('Minimum Humidity, values from 0% to 100%.')
min_light_level = DecimalField('Minimum Light Level, values from 0 to 2000lx')
submit = SubmitField('Submit Values')
``` |
{
"source": "jmatak/HeadPoseNAS",
"score": 3
} |
#### File: HeadPoseNAS/dataset/extract_pose.py
```python
import argparse
from os import listdir
from os.path import isfile, join
import cv2
import numpy as np
import scipy.io as sio
from moviepy.editor import *
from tqdm import tqdm
# Authored by https://github.com/shamangary/FSA-Net
def get_args():
parser = argparse.ArgumentParser(description="This script cleans-up noisy labels "
"and creates database for training.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--db", type=str,
help="path to database")
parser.add_argument("--output", type=str,
help="path to output database mat file")
parser.add_argument("--img_size", type=int, default=190,
help="output image size")
parser.add_argument("--ad", type=float, default=0.4,
help="enlarge margin")
args = parser.parse_args()
return args
def main():
args = get_args()
mypath = args.db
output_path = args.output
img_size = args.img_size
ad = args.ad
isPlot = False
onlyfiles_mat = [f for f in listdir(mypath) if isfile(join(mypath, f)) and join(mypath, f).endswith('.mat')]
onlyfiles_jpg = [f for f in listdir(mypath) if isfile(join(mypath, f)) and join(mypath, f).endswith('.jpg')]
onlyfiles_mat.sort()
onlyfiles_jpg.sort()
print(len(onlyfiles_jpg))
print(len(onlyfiles_mat))
out_imgs = []
out_poses = []
for i in tqdm(range(len(onlyfiles_jpg))):
img_name = onlyfiles_jpg[i]
mat_name = onlyfiles_mat[i]
img_name_split = img_name.split('.')
mat_name_split = mat_name.split('.')
if img_name_split[0] != mat_name_split[0]:
print('Mismatched!')
sys.exit()
mat_contents = sio.loadmat(mypath + '/' + mat_name)
pose_para = mat_contents['Pose_Para'][0]
pt2d = mat_contents['pt2d']
pt2d_x = pt2d[0, :]
pt2d_y = pt2d[1, :]
# I found negative value in AFLW2000. It need to be removed.
pt2d_idx = pt2d_x > 0.0
pt2d_idy = pt2d_y > 0.0
pt2d_id = pt2d_idx
if sum(pt2d_idx) > sum(pt2d_idy):
pt2d_id = pt2d_idy
pt2d_x = pt2d_x[pt2d_id]
pt2d_y = pt2d_y[pt2d_id]
img = cv2.imread(mypath + '/' + img_name)
img_h = img.shape[0]
img_w = img.shape[1]
# Crop the face loosely
x_min = int(min(pt2d_x))
x_max = int(max(pt2d_x))
y_min = int(min(pt2d_y))
y_max = int(max(pt2d_y))
h = y_max - y_min
w = x_max - x_min
# ad = 0.4
x_min = max(int(x_min - ad * w), 0)
x_max = min(int(x_max + ad * w), img_w - 1)
y_min = max(int(y_min - ad * h), 0)
y_max = min(int(y_max + ad * h), img_h - 1)
img = img[y_min:y_max, x_min:x_max]
# Checking the cropped image
if isPlot:
cv2.imshow('check', img)
k = cv2.waitKey(500)
img = cv2.resize(img, (img_size, img_size))
pitch = pose_para[0] * 180 / np.pi
yaw = pose_para[1] * 180 / np.pi
roll = pose_para[2] * 180 / np.pi
cont_labels = np.array([yaw, pitch, roll])
out_imgs.append(img)
out_poses.append(cont_labels)
np.savez(output_path, image=np.array(out_imgs), pose=np.array(out_poses), img_size=img_size)
if __name__ == '__main__':
main()
```
#### File: HeadPoseNAS/model/representation.py
```python
import random as rnd
from math import floor, ceil
from model.model import *
tf.random.set_random_seed(1950)
rnd.seed(1950)
BLOCK_LENGTH = 6
UPSCALE_BLOCKS = [1, 4]
STATE = {
"block": ["resblock", "identity", "inv_resblock", "depth_sep_block"],
"block_normal": [ResBlock, ConvBlock, InvertedResBlock, DepthWiseSeparateBlock],
"block_reduction": [ResBlockUpscale, ConvBlockUpscale,
InvertedResBlockRUpscale, DepthWiseSeparateBlockUpscale],
"kernel": [1, 3, 5],
"flops": [1, 2, 4, 8]
}
def _interval_map(x, size):
if x == 0: return 1
return floor(ceil(x * size) / 1.0)
# noinspection PyDefaultArgument
class NeuralSearchState:
def __init__(self, state: dict = STATE):
self.block_state = state["block"]
self.block_normal = state["block_normal"]
self.block_reduction = state["block_reduction"]
self.block_state_len = len(self.block_state)
self.kernel_state = state["kernel"]
self.kernel_state_len = len(self.kernel_state)
self.flops_multiplier = state["flops"]
self.flops_multiplier_len = len(self.flops_multiplier)
self.size = BLOCK_LENGTH * 2 + 1
def get_random_individual(self):
return [rnd.random() for _ in range(BLOCK_LENGTH * 2 + 1)]
def decode_int(self, individual: list):
blocks, kernels = [], []
for i, x in enumerate(individual[:BLOCK_LENGTH]):
if i not in UPSCALE_BLOCKS:
blocks.append(self.block_normal[x])
else:
blocks.append(self.block_reduction[x])
for x in individual[BLOCK_LENGTH:BLOCK_LENGTH * 2]:
kernels.append(self.kernel_state[x])
x = individual[BLOCK_LENGTH * 2]
flops = self.flops_multiplier[x]
return blocks, kernels, flops
def decode(self, individual: list):
blocks, kernels = [], []
for i, x in enumerate(individual[:BLOCK_LENGTH]):
index = _interval_map(x, self.block_state_len)
if i not in UPSCALE_BLOCKS:
blocks.append(self.block_normal[index - 1])
else:
blocks.append(self.block_reduction[index - 1])
for x in individual[BLOCK_LENGTH: BLOCK_LENGTH * 2]:
index = _interval_map(x, self.kernel_state_len)
kernels.append(self.kernel_state[index - 1])
x = individual[BLOCK_LENGTH * 2]
index = _interval_map(x, self.flops_multiplier_len)
flops = self.flops_multiplier[index - 1]
return blocks, kernels, flops
def repr_int(self, individual: list):
decoded = []
for x in individual[:BLOCK_LENGTH]:
index = _interval_map(x, self.block_state_len)
decoded.append(index - 1)
for x in individual[BLOCK_LENGTH:BLOCK_LENGTH * 2]:
index = _interval_map(x, self.kernel_state_len)
decoded.append(index - 1)
x = individual[BLOCK_LENGTH * 2]
index = _interval_map(x, self.flops_multiplier_len)
decoded.append(index - 1)
return decoded
# Test
if __name__ == '__main__':
n = NeuralSearchState()
ind = n.get_random_individual()
print(ind)
print(n.decode(ind))
print(n.repr_int(ind))
print(n.decode_int(n.repr_int(ind)))
```
#### File: HeadPoseNAS/search/search_random.py
```python
from training.evaluator import *
tf.random.set_random_seed(1950)
random.seed(1950)
np.random.seed(1950)
EVAL_LOG = "eval.txt"
BEST_LOG = "best.txt"
def make_logger(log_dir):
open(f"{log_dir}/{EVAL_LOG}", "w+").write("")
open(f"{log_dir}/{BEST_LOG}", "w+").write("")
def write_best(best, best_val, state, log_dir):
open(f"{log_dir}/{BEST_LOG}", "a+").write(json.dumps({
"IND": list(best),
"IND_DEC": state.repr_int(list(best)),
"VAL": float(best_val),
}) + "\n")
def search_random(args, state, log_dir):
args.logger = f"{log_dir}/{EVAL_LOG}"
if not args.continue_iter:
make_logger(log_dir)
best, bestVal = None, 0
for iteration in range(args.max_iter * args.pop_size):
ind = state.get_random_individual()
res = evaluate(args, state, ind)
if res < bestVal or best is None:
best = ind
bestVal = res
if iteration % args.pop_size == 0:
write_best(best, bestVal, state, log_dir)
```
#### File: HeadPoseNAS/training/evaluator.py
```python
import os
from data.dataset import *
from model.model import master_module
from model.network import *
from training.train import *
tf.random.set_random_seed(1950)
random.seed(1950)
np.random.seed(1950)
def check_result(name):
info = json.load(open(f"{MODEL_SAVER_PATH}/{name}/{name}.txt", "r"))
return float(info["FIT"])
def evaluate(args, state, ind):
name = "model_" + "".join(str(c) for c in state.repr_int(ind))
if os.path.exists(f"{MODEL_SAVER_PATH}/{name}"):
result = check_result(name)
open(args.logger, "a").write(f"{name}:{result}\n")
return result
if args.gpu:
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
else:
session = tf.Session()
layer = get_placeholder(args.model, args.layer)
dataset_train, dataset_val = get_train_datasets(args.machine, args.model,
args.train_set, args.val_set,
layer["name"], args.batch_size, session)
blocks, kernels, flops = state.decode(ind)
inpt, output, training = master_module(layer["shape"], blocks, kernels, flops)
trainer = PoseTrainer(inpt, output, training, session, name=name)
session.run(tf.global_variables_initializer())
trainer.train(dataset_train, dataset_val,
epochs=args.epochs,
learning_rate=args.learning_rate)
open(f"{MODEL_SAVER_PATH}/{name}/{name}.txt", "w+").write(
json.dumps({
"NAME": name,
"VAL_LOSS": trainer.validation_loss,
"FLOPS": trainer.flops,
"FIT": trainer.fitness
}, indent=3)
)
open(args.logger, "a").write(f"{name}:{trainer.fitness}\n")
session.close()
tf.reset_default_graph()
return trainer.fitness
def test(args, blocks, kernels, flops):
if args.gpu:
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
else:
session = tf.Session()
layer = get_placeholder(args.model, args.layer)
inpt, output, training = master_module(layer["shape"], blocks, kernels, flops)
model_dir = f"{MODEL_SAVER_PATH}/{args.name}/{args.name}.ckpt"
trainer = PoseTrainer(inpt, output, training, session, name=args.name, export_dir=model_dir)
for test_set in args.test_sets:
print(test_set)
dataset_test = get_test_dataset(args.machine, args.model,
test_set, layer["name"],
args.batch_size, session)
res = trainer.test_forward(dataset_test)
print(res)
session.close()
tf.reset_default_graph()
``` |
{
"source": "jmatamatics/Formulas",
"score": 4
} |
#### File: src/area/parallelogram_area.py
```python
def parallelogram_area():
b = int(input("Enter the base: "))
h = int(input("Enter the height: "))
A = b * h
print(f'A = b * h \n'
f'A = {b} * {h} \n'
f'The are of the parallelogram equals {A}')
``` |
{
"source": "jmategk0/airflow_exmples",
"score": 3
} |
#### File: airflow_home/dags/stock_piplines.py
```python
from datetime import datetime
from airflow import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python_operator import PythonOperator
import requests
import pandas as pd
import csv
import json
URL = "https://raw.githubusercontent.com/jmategk0/quandl_reports/master/WIKI-PRICES_10k.csv"
DATA_INPUT_FILE = "stock_prices_10k_sample.csv"
DATA_EXPORT_FILE = "report_open_close_10k_sample.csv"
TEMP_FILE = "report_open_close_10k_sample.json"
STOCK_CODES = ["COF", "GOOGL", "MSFT"]
START_DATE = "2017-01-01"
END_DATE = "2017-06-30"
EX_DIVIDEND_COL = "ex-dividend"
SPLIT_RATIO = "split_ratio"
ADJ_OPEN = "adj_open"
ADJ_HIGH = "adj_high"
ADJ_LOW = "adj_low"
ADJ_CLOSE = "adj_close"
ADJ_VOLUME = "adj_volume"
PRICES_COLUMNS_TO_DROP = [
EX_DIVIDEND_COL,
SPLIT_RATIO,
ADJ_OPEN,
ADJ_HIGH,
ADJ_LOW,
ADJ_CLOSE,
ADJ_VOLUME
]
TICKER_COL = "ticker"
DATE_COL = "date"
OPEN_COL = "open"
HIGH_COL = "high"
LOW_COL = "low"
CLOSE_COL = "close"
VOLUME_COL = "volume"
PRICES_COLUMNS_TO_KEEP = [
TICKER_COL,
DATE_COL,
OPEN_COL,
CLOSE_COL,
HIGH_COL,
LOW_COL,
VOLUME_COL
]
def download_file():
r = requests.get(URL)
with open(DATA_INPUT_FILE,'wb') as f:
f.write(r.content)
return True
def dictionary_to_json(dictionary, write_to_file=False, filename=" ", write_mode='w'):
if write_to_file:
with open(filename, write_mode) as json_file:
json_data = json.dump(dictionary, json_file, indent=4)
else:
json_data = json.dumps(dictionary, indent=4)
return json_data
def populate_dataframe():
final_df = pd.DataFrame()
raw_df = pd.read_csv(filepath_or_buffer=DATA_INPUT_FILE, parse_dates=["date"])
df_filtered_by_code = raw_df[raw_df.ticker.isin(STOCK_CODES)]
df_filtered_by_date = df_filtered_by_code[
(df_filtered_by_code.date >= START_DATE) & (df_filtered_by_code.date <= END_DATE)
]
final_df = df_filtered_by_date.drop(PRICES_COLUMNS_TO_DROP, axis=1)
return final_df
def get_stock_open_close(df, stock_code, time_period="M", round_precision=2):
stock_results = []
stock_df = df[df.ticker == stock_code]
group_by_period_and_open = stock_df[OPEN_COL].groupby(by=stock_df.date.dt.to_period(time_period))
group_by_period_and_close = stock_df[CLOSE_COL].groupby(by=stock_df.date.dt.to_period(time_period))
open_means = dict(group_by_period_and_open.mean())
close_means = dict(group_by_period_and_close.mean())
for year_mo_key in open_means:
row = {
"month": str(year_mo_key),
"average_open": round(open_means[year_mo_key], round_precision),
"average_close": round(close_means[year_mo_key], round_precision)
}
stock_results.append(row)
return stock_results
def report_average_open_close():
raw_df = populate_dataframe()
report_results = {}
for stock in STOCK_CODES:
report_results[stock] = get_stock_open_close(df=raw_df, stock_code=stock)
dictionary_to_json(report_results, write_to_file=True, filename=TEMP_FILE)
return True
def json_to_dictionary(filename):
with open(filename) as json_file:
json_data = json.load(json_file)
return json_data
def dictionary_to_csv(list_of_dictionaries, filename, field_names, delimiter=",", write_mode='w'):
with open(filename, write_mode) as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=field_names, delimiter=delimiter)
writer.writeheader()
for row in list_of_dictionaries:
writer.writerow(row)
return len(list_of_dictionaries)
def save_report():
raw_results = json_to_dictionary(TEMP_FILE)
dictionary_to_csv(results, DATA_EXPORT_FILE, PRICES_COLUMNS_TO_KEEP)
dag = DAG('stock_report', description='open_close_report DAG',
schedule_interval='0 12 * * *',
start_date=datetime(2018, 8, 25), catchup=False)
extract_operator = PythonOperator(
task_id='extract_stock_prices_task',
python_callable=download_file,
retries=3,
dag=dag
)
transform_operator = PythonOperator(
task_id='transform_stock_prices_task',
python_callable=report_average_open_close,
retries=3,
dag=dag
)
load_operator = PythonOperator(
task_id='load_stock_prices_task',
python_callable=save_report,
retries=3,
dag=dag
)
extract_operator >> transform_operator >> load_operator
``` |
{
"source": "jmategk0/DataInterchange",
"score": 3
} |
#### File: DataInterchange/data_interchange/interchange.py
```python
import datetime
import csv
import json
import xmltodict
import yaml
# import h5py
# import pickle
__author__ = 'jmategk0'
__version__ = '0.0.1'
__license__ = 'MIT'
class FileWrapper(object):
def __init__(self):
self.date_stamp = datetime.datetime.now()
def plaintext_to_string_list(self, filename, read_mode='r'):
with open(filename, read_mode) as text_file:
text_data = text_file.readlines()
return text_data
def string_list_to_plaintext(self, string_list, filename, write_mode='w'):
with open(filename, write_mode) as text_file:
text_file.writelines(string_list)
return filename
def plaintext_to_string(self, filename, read_mode='r'):
with open(filename, read_mode) as text_file:
text_data = text_file.read()
return text_data
def string_to_plaintext(self, string_value, filename, write_mode='w'):
with open(filename, write_mode) as text_file:
text_file.write(string_value)
return filename
def update_date_stamp(self):
self.date_stamp = datetime.datetime.now()
return self.date_stamp
class TextBroker(FileWrapper):
def csv_to_dictionary(self, filename, delimiter=",", read_mode='r'):
csv_data = []
with open(filename, read_mode) as csv_file:
reader = csv.DictReader(csv_file, delimiter=delimiter)
for row in reader:
csv_data.append(row)
return csv_data
def dictionary_to_csv(self, list_of_dictionaries, filename, field_names, delimiter=",", write_mode='w'):
with open(filename, write_mode) as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=field_names, delimiter=delimiter)
writer.writeheader()
for row in list_of_dictionaries:
writer.writerow(row)
return len(list_of_dictionaries)
def json_to_dictionary(self, filename):
with open(filename) as json_file:
json_data = json.load(json_file)
return json_data
def dictionary_to_json(self, dictionary, write_to_file=False, filename=" ", write_mode='w'):
if write_to_file:
with open(filename, write_mode) as json_file:
json_data = json.dump(dictionary, json_file, indent=4)
else:
json_data = json.dumps(dictionary, indent=4)
return json_data
def yaml_to_dictionary(self, filename, read_mode='r'):
with open(filename, read_mode) as yaml_file:
yaml_data = yaml.load(yaml_file)
return yaml_data
def dictionary_to_yaml(self, dictionary, write_to_file=False, filename=" ", write_mode='w'):
if write_to_file:
with open(filename, write_mode) as yaml_file:
yaml_data = yaml.dump(dictionary, yaml_file)
else:
yaml_data = yaml.dump(dictionary)
return yaml_data
def xml_to_dictionary(self, filename, read_mode='r'):
with open(filename, read_mode) as xml_file:
xml_data = xmltodict.parse(xml_file.read(), dict_constructor=dict)
return xml_data
def dictionary_to_xml(self, dictionary, write_to_file=False, filename=" ", write_mode='w'):
xml_data = xmltodict.unparse(dictionary, pretty=True)
if write_to_file:
with open(filename, write_mode) as xml_file:
xml_data = xml_file.write(xml_data)
return xml_data
class BinaryBroker(FileWrapper):
pass
# support h5py and pickle later
class ComplexBroker(FileWrapper):
pass
# support xlsx and open doc later
``` |
{
"source": "jmategk0/etlite",
"score": 3
} |
#### File: jmategk0/etlite/etlite.py
```python
from decimal import Decimal
def cast_list_to_class_datatype(list_of_values, datatype_class, in_place=False):
# float_values = [float(i) for i in float_values] # 0.25 secs (1,000,000 elements)
if not in_place:
new_list_of_values_as_class_datatype = list(map(datatype_class, list_of_values)) # 0.19 secs
return new_list_of_values_as_class_datatype
else:
for index, value in enumerate(list_of_values):
list_of_values[index] = datatype_class(value) # 0.35 secs
return list_of_values # Now in datatype form
def list_to_floats(list_of_values, in_place=False):
datatype = float
return cast_list_to_class_datatype(list_of_values=list_of_values, datatype_class=datatype, in_place=in_place)
def list_to_decimals(list_of_values, in_place=False):
datatype = Decimal
return cast_list_to_class_datatype(list_of_values=list_of_values, datatype_class=datatype, in_place=in_place)
def list_to_strings(list_of_values, in_place=False):
datatype = str
return cast_list_to_class_datatype(list_of_values=list_of_values, datatype_class=datatype, in_place=in_place)
def list_to_integers(list_of_values, in_place=False):
datatype = int
return cast_list_to_class_datatype(list_of_values=list_of_values, datatype_class=datatype, in_place=in_place)
def list_to_booleans(list_of_values, in_place=False):
datatype = bool
return cast_list_to_class_datatype(list_of_values=list_of_values, datatype_class=datatype, in_place=in_place)
def list_to_dates():
# TODO: Deal with this later
pass
class DataSchema(object):
def __init__(self):
self.data_schema = {}
self.field_names = {}
self.static_schema = True
self.numeric_types = (float, int, Decimal)
self.decimal_types = (float, Decimal)
self.string_types = [str]
self.data_type_map = {
str: "string",
float: "float",
Decimal: "decimal",
int: "integer",
bool: "boolean",
list: "list",
dict: "dictionary"}
self.blank_values = [None, ""]
self.unique_values_map = {} # fld name dict of values
def define_field(self, name, data_type, label="", required=False, read_only=False, null=True, blank=False,
unique=False, choices="", default="", max_length=256, max_digits=14, decimal_places=2, min_value=0,
max_value=0):
data_field_definition = {"name": name,
"data_type": data_type,
"default": default,
"null": null,
"blank": blank,
"unique": unique,
"read_only": read_only,
"required": required}
if label:
data_field_definition["label"] = label
else:
data_field_definition["label"] = name
if choices:
data_field_definition["choices"] = choices
else:
data_field_definition["choices"] = None
if data_type in self.string_types:
data_field_definition["max_length"] = max_length
if data_type in self.decimal_types:
data_field_definition["decimal_places"] = decimal_places
if data_type in self.numeric_types:
data_field_definition["max_digits"] = max_digits
data_field_definition["min_value"] = min_value
data_field_definition["max_value"] = max_value
self.field_names[name] = data_field_definition
def define_fields(self, field_metadata, is_minimal_data=True):
if is_minimal_data:
for field in list(field_metadata.keys()):
self.define_field(name=field, data_type=field_metadata[field])
else:
for field in list(field_metadata.keys()):
metadata = field_metadata[field]
self.define_field(**metadata)
def validate_value(self, value, field_name):
validation_data = {"data_type": isinstance(value, self.field_names[field_name]["data_type"])}
if validation_data["data_type"] in self.string_types:
validation_data["max_length"] = len(value) <= self.field_names["max_length"]
if self.field_names[field_name]["choices"]:
validation_data["choices"] = value in self.field_names[field_name]["choices"]
if self.field_names[field_name]["unique"]:
if value in self.unique_values_map[field_name]:
validation_data["unique"] = False
else:
validation_data["unique"] = True
# Check the data and make the final call.
unique_booleans = set(validation_data.values())
if False in unique_booleans:
validation_data["is_valid"] = False
else:
validation_data["is_valid"] = True
return validation_data
def validate_row(self, row):
row_field_names = list(row.keys())
schema_field_names = list(self.field_names.keys())
# not right need to make a list of required flds and see if they exist
validation_data = {}
if row_field_names == schema_field_names:
validation_data["field_names"] = True
else:
validation_data["field_names"] = False
for field in row:
self.validate_value(value=field, field_name=field)
# if any data element is not valid; mark the whole row as bad
class Extract(object):
def __init__(self, data_file, data_delimiter, database_name, database_url, database_port, database_query):
self.data_file = data_file
self.data_delimiter = data_delimiter
self.database_name = database_name
self.database_url = database_url
self.database_port = database_port
self.database_query = database_query
def execute(self):
pass # returns a raw data dict
class Transform(object):
def __init__(self, data_schema, data_source):
# data schemas
self.data_schema = data_schema # {}
self.static_schema = True
# data for transform
self.data_source = data_source # []
# built in transformations
self.fields_to_remove = []
self.fields_to_typecast = []
self.fields_to_rename = {"old_name": "new_name"}
self.field_values_to_remap = {"field_name_status": {0: "active", 1: "inactive"}}
self.fields_to_add = []
self.enable_custom_transformations = False
def remove_fields(self, row_data):
for field in self.fields_to_remove:
row_data.pop(field, None)
return row_data
def rename_fields(self, row_data):
for field in list(self.fields_to_rename.keys()):
row_data[self.fields_to_rename[field]] = row_data.pop(field, None)
return row_data
def find_and_replace_values(self, row_data):
for field in list(self.field_values_to_remap.keys()):
field_value_map = self.field_values_to_remap[field]
if row_data[field] in field_value_map:
row_data[field] = field_value_map[row_data[field]]
else:
pass # set to none or keep? user setting?
return row_data
def typecast_fields(self, row_data):
for field in self.fields_to_typecast:
datatype_to_cast = self.data_schema[field]["type"]
row_data[field] = datatype_to_cast()
return row_data
def add_fields(self, row_data):
for field in self.fields_to_add:
row_data[field] = None
return row_data
def custom_transformations(self, row_data):
# You need to override this!
raise NotImplementedError
def execute(self):
transformed_data = []
for raw_row in self.data_source:
row = raw_row
if self.fields_to_remove:
row = self.remove_fields(row_data=row)
if self.fields_to_rename:
row = self.rename_fields(row_data=row)
if self.fields_to_typecast:
row = self.typecast_fields(row_data=row)
if self.field_values_to_remap:
row = self.find_and_replace_values(row_data=row)
if self.fields_to_add:
row = self.add_fields(row_data=row)
if self.custom_transformations:
row = self.custom_transformations(row_data=row)
transformed_data.append(row)
return transformed_data
class Load(object):
def __init__(self, data_file, data_delimiter, database_name, database_url, database_port, database_query):
self.data_file = data_file
self.data_delimiter = data_delimiter
self.database_name = database_name
self.database_url = database_url
self.database_port = database_port
self.database_query = database_query
def execute(self):
pass # loads into data source
# if uniqe togther issue could use md5 hash and lookup dict/set
class ExtractTransformLoad(object):
def __init__(self, name, extractor, transformer, loader):
self.name = name
self.extractor = extractor
self.transformer = transformer
self.loader = loader
def run(self):
self.extractor.execute()
self.transformer.execute()
self.loader.execute()
def run_batch(self, size=100):
pass
def run_single(self):
pass
class ExtractTransformLoadPipeline(object):
def __init__(self, list_of_etl_classes):
self.list_of_etl_classes = list_of_etl_classes
def run_all(self):
for etl in self.list_of_etl_classes:
etl.run()
```
#### File: jmategk0/etlite/etlite_tests.py
```python
import unittest
import etlite
from decimal import Decimal
# import time
# from pprint import pprint
class BaseTestCaseData(unittest.TestCase):
pass
class TestListFunctions(unittest.TestCase):
def setUp(self):
self.test_floats_as_strings = ["0.45", "0.50", "0.80"]
self.test_ints_as_strings = ["1", "2", "3"]
self.test_bools_as_strings = ["True", "False", "true", "false"]
self.test_strings_as_ints = [1, 2, 3, 4, 5, 6]
def test_list_to_floats(self):
my_floats = etlite.list_to_floats(list_of_values=self.test_floats_as_strings, in_place=False)
for value in my_floats:
self.assertEqual(type(value), float)
def test_list_to_ints(self):
my_ints = etlite.list_to_integers(list_of_values=self.test_ints_as_strings, in_place=False)
for value in my_ints:
self.assertEqual(type(value), int)
def test_list_to_dec(self):
my_decimal = etlite.list_to_decimals(list_of_values=self.test_floats_as_strings, in_place=False)
for value in my_decimal:
self.assertEqual(type(value), Decimal)
def test_list_to_bool(self):
my_booleans = etlite.list_to_booleans(list_of_values=self.test_bools_as_strings, in_place=False)
for value in my_booleans:
self.assertEqual(type(value), bool)
def test_list_to_strings(self):
my_strings = etlite.list_to_strings(list_of_values=self.test_strings_as_ints, in_place=False)
for value in my_strings:
self.assertEqual(type(value), str)
class TestDataSchema(unittest.TestCase):
def setUp(self):
self.test_data = [
{"fname": "John", "lname": "Doe", "dob": "1960-01-01", "amount": "100", "status": 0},
{"fname": "Jane", "lname": "Doe", "dob": "1961-01-01", "amount": "200", "status": 0},
{"fname": "Jamie", "lname": "Doe", "dob": "1962-01-01", "amount": "300", "status": 0}]
self.minimal_data = {"fname": str, "lname": str, "dob": str, "amount": float, "status": str}
self.schema = {
'fname': {
'name': 'fname',
'data_type': str,
'default': '',
'null': True,
'blank': False,
'unique': False,
'read_only': False,
'required': False,
'label': 'fname',
'choices': None,
'max_length': 256},
'lname': {
'name': 'lname',
'data_type': str,
'default': '',
'null': True,
'blank': False,
'unique': False,
'read_only': False,
'required': False,
'label': 'lname',
'choices': None,
'max_length': 256},
'dob': {
'name': 'dob',
'data_type': str,
'default': '',
'null': True,
'blank': False,
'unique': False,
'read_only': False,
'required': False,
'label': 'dob',
'choices': None,
'max_length': 256},
'amount': {
'name': 'amount',
'data_type': float,
'default': '',
'null': True,
'blank': False,
'unique': False,
'read_only': False,
'required': False,
'label': 'amount',
'choices': None,
'decimal_places': 2,
'max_digits': 14,
'min_value': 0,
'max_value': 0},
'status': {
'name': 'status',
'data_type': str,
'default': '',
'null': True,
'blank': False,
'unique': False,
'read_only': False,
'required': False,
'label': 'status',
'choices': None,
'max_length': 256}}
def test_schema_definition_with_minimal_data(self):
schema = etlite.DataSchema()
schema.define_fields(self.minimal_data, is_minimal_data=True)
self.assertEqual(self.schema, schema.field_names)
def test_schema_definition_with_full_data(self):
schema = etlite.DataSchema()
schema.define_fields(self.schema, is_minimal_data=False)
self.assertEqual(self.schema, schema.field_names)
class TestETL(unittest.TestCase):
def setUp(self):
self.test_data = [
{"fname": "John", "lname": "Doe", "dob": "1960-01-01", "amount": "100", "status": 0},
{"fname": "Jane", "lname": "Doe", "dob": "1961-01-01", "amount": "200", "status": 0},
{"fname": "Jamie", "lname": "Doe", "dob": "1962-01-01", "amount": "300", "status": 0}]
``` |
{
"source": "jmategk0/quandl_reports",
"score": 2
} |
#### File: jmategk0/quandl_reports/test_reports.py
```python
from unittest import TestCase, TestLoader, TextTestRunner, skip
from config import DEFAULT_STOCK_CODES, START_DATE, END_DATE, QUANDL_API_KEY
from quandl_reports import CsvQuandlReport, ApiQuandlReport
from test_results_fixtures import (
default_open_close_report,
default_max_daily_profit_report,
default_busy_day_report,
default_biggest_loser_report,
)
# NOTE: CSV and API reports both return the same expected_results.
# TestCases run all four top level reporting methods for both quandl child classes.
# Both TestCase use the same expected result fixtures, showing the both reporting methods (API & CSV) produce the
# same result.
class DefaultStockCodeCsvReportsTestCase(TestCase):
def setUp(self):
self.default_prices_data_file = "default_stock_codes_data.csv"
def test_average_open_close_report(self):
expected_results = default_open_close_report
report = CsvQuandlReport(
filename=self.default_prices_data_file,
stock_codes=DEFAULT_STOCK_CODES,
end_date=END_DATE,
start_date=START_DATE,
)
report_results = report.report_average_open_close()
self.assertEqual(report_results, expected_results)
def test_max_daily_profit_report(self):
expected_results = default_max_daily_profit_report
report = CsvQuandlReport(
filename=self.default_prices_data_file,
stock_codes=DEFAULT_STOCK_CODES,
end_date=END_DATE,
start_date=START_DATE,
)
report_results = report.report_max_daily_profit()
self.assertEqual(report_results, expected_results)
def test_busy_day_report(self):
expected_results = default_busy_day_report
report = CsvQuandlReport(
filename=self.default_prices_data_file,
stock_codes=DEFAULT_STOCK_CODES,
end_date=END_DATE,
start_date=START_DATE,
)
report_results = report.report_busy_day()
self.assertEqual(report_results, expected_results)
def test_biggest_loser_report(self):
expected_results = default_biggest_loser_report
report = CsvQuandlReport(
filename=self.default_prices_data_file,
stock_codes=DEFAULT_STOCK_CODES,
end_date=END_DATE,
start_date=START_DATE,
)
report_results = report.report_biggest_loser()
self.assertEqual(report_results, expected_results)
class DefaultStockCodeApiReportsTestCase(TestCase):
def setUp(self):
self.api_key = QUANDL_API_KEY
# TODO: Setup mocks with unittest.mock; ensures code handles expected api calls without calling live api.
@skip("passed last time with live call")
def test_average_open_close_report(self):
expected_results = default_open_close_report
report = ApiQuandlReport(
api_key=self.api_key,
stock_codes=DEFAULT_STOCK_CODES,
end_date=END_DATE,
start_date=START_DATE,
)
report_results = report.report_average_open_close()
self.assertEqual(report_results, expected_results)
@skip("passed last time with live call")
def test_max_daily_profit_report(self):
expected_results = default_max_daily_profit_report
report = ApiQuandlReport(
api_key=self.api_key,
stock_codes=DEFAULT_STOCK_CODES,
end_date=END_DATE,
start_date=START_DATE,
)
report_results = report.report_max_daily_profit()
self.assertEqual(report_results, expected_results)
@skip("passed last time with live call")
def test_busy_day_report(self):
expected_results = default_busy_day_report
report = ApiQuandlReport(
api_key=self.api_key,
stock_codes=DEFAULT_STOCK_CODES,
end_date=END_DATE,
start_date=START_DATE,
)
report_results = report.report_busy_day()
self.assertEqual(report_results, expected_results)
@skip("passed last time with live call")
def test_biggest_loser_report(self):
expected_results = default_biggest_loser_report
report = ApiQuandlReport(
api_key=self.api_key,
stock_codes=DEFAULT_STOCK_CODES,
end_date=END_DATE,
start_date=START_DATE,
)
report_results = report.report_biggest_loser()
self.assertEqual(report_results, expected_results)
default_csv_test_suite = TestLoader().loadTestsFromTestCase(
DefaultStockCodeCsvReportsTestCase
)
TextTestRunner(verbosity=2).run(default_csv_test_suite)
default_api_test_suite = TestLoader().loadTestsFromTestCase(
DefaultStockCodeApiReportsTestCase
)
TextTestRunner(verbosity=2).run(default_api_test_suite)
``` |
{
"source": "jmategk0/simpleStatistics",
"score": 3
} |
#### File: jmategk0/simpleStatistics/simpleStatisticsTests.py
```python
import unittest
import simpleStatistics
class TestDescriptiveStatisticsMethods(unittest.TestCase):
def setUp(self):
self.descriptive_statistics = simpleStatistics.DescriptiveStatistics()
self.test_dataset1 = [3, 5, 8, 10, 11] # 8
self.test_dataset2 = [3, 3, 4, 5, 7, 8] # 4.5
self.test_dataset3 = [1, 2, 3, 3, 3, 4] # 3
self.test_dataset4 = [1, 9, 8, 5, 8, 7]
self.test_dataset5 = [1, 0, 6, 1]
self.test_dataset6 = [1, 6, 4, 3, 8, 7, 6]
def test_mode_with_valid_data_001(self):
# arrange
mode_data = self.test_dataset4
# act
mode_result = self.descriptive_statistics.mode(mode_data)
# assert
self.assertEqual(mode_result, 8)
def test_mode_with_valid_data_002(self):
# arrange
mode_data = self.test_dataset3
# act
mode_result = self.descriptive_statistics.mode(mode_data)
# assert
self.assertEqual(mode_result, 3)
def test_mode_with_invalid_data_001(self):
# arrange
mode_data = self.test_dataset1
# act
mode_result = self.descriptive_statistics.mode(mode_data)
# assert
self.assertEqual(mode_result, None)
def test_mean_001(self):
# arrange
mean_data = self.test_dataset1
# act
mean_result = self.descriptive_statistics.mean(mean_data)
# assert
self.assertEqual(mean_result, 7.4)
def test_median_001(self):
# arrange
median_data = self.test_dataset1
# act
median_result = self.descriptive_statistics.median(median_data)
# assert
self.assertEqual(median_result, 8)
def test_variance_001(self):
# arrange
variance_data = self.test_dataset5
# act
variance_result = self.descriptive_statistics.variance(variance_data)
# assert
self.assertEqual(variance_result, 5.50)
def test_standard_deviation_001(self):
# arrange
sd_data = self.test_dataset1
# act
sd_result = self.descriptive_statistics.standard_deviation(sd_data, round_value=True)
# assert
self.assertEqual(sd_result, 3.01)
def test_standard_deviation_002(self):
# arrange
sd_data = self.test_dataset5
# act
sd_result = self.descriptive_statistics.standard_deviation(sd_data, round_value=True)
# assert
self.assertEqual(sd_result, 2.35)
def test_standard_deviation_003(self):
# arrange
sd_data = self.test_dataset6
# act
sd_result = self.descriptive_statistics.standard_deviation(sd_data, is_population=False, round_value=True)
# assert
self.assertEqual(sd_result, 2.45)
def test_sum_of_squared_deviations_001(self):
# arrange: SS
ss_data = self.test_dataset5
# act
ss_result = self.descriptive_statistics.sum_of_squared_deviations(ss_data)
# assert
self.assertEqual(ss_result, 22)
class TestInferentialStatisticsMethods(unittest.TestCase):
def setUp(self):
self.inferential_statistics = simpleStatistics.InferentialStatistics()
self.test_dataset1 = [3, 5, 8, 10, 11] # 8
self.test_dataset2 = [0.0, 6.0, 5.0, 2.0, 3.0, 2.0]
self.test_dataset3 = [0.0, 2.0, 4.0, 4.0, 5.0]
self.test_dataset4 = [360.0, 380.0, 420, 440]
unit_normal_table = {}
unit_normal_table[0.00] = {'body': .5000, 'tail': .5000, 'mean_to_z': .0000}
def test_z_score_calculate_001(self):
# arrange: SS
score = 130.0
mean = 100.0
sd = 10.0
# act
z_score_result = self.inferential_statistics.z_score_calculate(score, mean, sd, True)
# assert
self.assertEqual(z_score_result, 3.00)
def test_score_value_from_z_score_001(self):
# arrange: SS
z_score = -3.0
mean = 60.0
sd = 5.0
# act
score_value_result = self.inferential_statistics.score_value_from_z_score(mean, sd, z_score)
# assert
self.assertEqual(score_value_result, 45.0)
def test_z_score_calculate_002(self):
# arrange: SS
score = 95.0
mean = 86.0
sd = 7.0
# act
z_score_result = self.inferential_statistics.z_score_calculate(score, mean, sd, True)
# assert
self.assertEqual(z_score_result, 1.29)
def test_z_score_transformation_001(self):
# arrange
raw_data = self.test_dataset2
final_data = [-1.50, 1.50, 1.00, -0.50, 0, -0.50]
# act
z_score_results = self.inferential_statistics.z_score_transformation(raw_data)
# assert
self.assertEqual(z_score_results, final_data)
def test_z_score_transformation_002(self):
# arrange
raw_data = self.test_dataset3
final_data = [-1.5, -0.50, 0.50, 0.50, 1.00]
# act
z_score_results = self.inferential_statistics.z_score_transformation(raw_data, is_population=False, round_value=2)
# assert
self.assertEqual(z_score_results, final_data)
def test_z_score_calculate_from_list_001(self):
# arrange
raw_data = self.test_dataset4
score = 418.0
final_z_score = 0.49
# act
z_score_result = self.inferential_statistics.z_score_calculate_from_list(score, raw_data, is_population=False, round_value=2)
# assert
self.assertEqual(z_score_result, final_z_score)
descriptive_statistics_test_suite = unittest.TestLoader().loadTestsFromTestCase(TestDescriptiveStatisticsMethods)
unittest.TextTestRunner(verbosity=2).run(descriptive_statistics_test_suite)
inferential_statistics_test_suite = unittest.TestLoader().loadTestsFromTestCase(TestInferentialStatisticsMethods)
unittest.TextTestRunner(verbosity=2).run(inferential_statistics_test_suite)
``` |
{
"source": "jmategk0/UbuntuSetup",
"score": 4
} |
#### File: UbuntuSetup/conductor/conductor.py
```python
import os
import logging
import logging.config
def command_string_builder(argument_dictionary, prepend, append="",
flags_list="", argument_delimiter="-"):
"""This function dynamically builds a shell executable command string, so that
you don't have to manually build strings using the pythons string
formatting tools. This function is lazy and somewhat inefficient.
Args:
argument_dictionary (dict): A dictionary of command line arguments.
Should not have any "-" marks. keys are arg names.
prepend (str): A string value attached to the start of the command
string. Normally the software name/path.
append (str): A string value added to the end of the command string.
Sometimes used for file paths. Defaults to nothing ("").
flags_list (List[str]): A list of command lines flags without argument
delimiters. Defaults to nothing ("").
argument_delimiter (str): By default arguments delimiters are defined
as "-", here they may be changed to "--" or any other value needed
by the command.
Returns:
str: A fully formatted command string. Not in this implementation
arguments will be placed in random order.
"""
argument_list = []
for key in argument_dictionary:
formatted_key = "{delim}{key_value} ".format(
delim=argument_delimiter,
key_value=key)
argument_string = "{formatted_key}{value} ".format(
formatted_key=formatted_key,
value=str(argument_dictionary[key]))
argument_list.append(argument_string)
formatted_command_string = "".join(argument_list)
if flags_list:
my_flags = []
for item in flags_list:
formatted_flag = "{delim}{flag_value} ".format(delim=argument_delimiter,
flag_value=item)
my_flags.append(formatted_flag)
flags_string = "".join(my_flags)
formatted_command_string = "{arg_list}{flags}".format(flags=flags_string,
arg_list=formatted_command_string)
# prepend
formatted_command_string = "{prepend} {arg_list}".format(prepend=prepend,
arg_list=formatted_command_string)
if append:
formatted_command_string = "{arg_list} {append}".format(append=append,
arg_list=formatted_command_string)
return formatted_command_string
class OperationWrapper(object):
def __init__(self, debug=False, log_filename=""):
self.print_command_strings = debug
if self.print_command_strings:
logging.basicConfig(filename=log_filename, level=logging.INFO)
self.logger = logging.getLogger(__name__)
def load_commands_from_text_file(self, filename):
"""Reads the contents of a text file and loads each line into a list
element.
Args:
filename (str): Name of file with a list of shell commands on each
line.
Returns:
List(str): A list of shell commands without newlines.
"""
with open(filename, 'r') as command_file:
commands = command_file.read().splitlines()
return commands
def start_blocking_process(self, command_string):
"""Executes a shell commend. The function will not exit until the shell
command has completed.
Args:
command_string (str): A shell command represented as a string.
Returns:
str: The shell output of the command.
"""
if self.print_command_strings:
self.logger.info(command_string)
process = os.popen(command_string)
process_response = process.read()
process.close()
return process_response
def start_non_blocking_process(self, command_string):
# TODO: Find a lazy way to start a non-blocking process with the
# multithreading lib? Or maybe async?
raise NotImplementedError
def run_list_of_commands(self, list_of_command_strings):
"""Executes each shell command sequentially by iterating though the
list of commands.
Args:
list_of_command_strings (List[str]): A list containing strings
where each string is a shell command without newlines.
"""
for command in list_of_command_strings:
self.start_blocking_process(command_string=command)
def install(self, command_filename):
"""Reads the contents of command_filename and then runs each install
command sequentially.
Args:
command_filename (str): Name of file with a list of shell commands
on each line.
"""
command_list = self.load_commands_from_text_file(command_filename)
self.run_list_of_commands(command_list)
def change_permissions(self, permission_code, directory_name,
enable_recursion):
"""Uses chmod to change permissions on the specified directory.
Args:
permission_code (str): Code used for allocating file/directory
permissions such as g+wrx. or 755.
directory_name (str): The name of the file or directory you want to
edit permissions on.
enable_recursion (bool): Enable recursion to apply the permission
code to all files/folders within the directory
Returns:
str:: A formatted chmod command string.
"""
if enable_recursion:
command = "chmod {code} {dir}".format(code=permission_code,
dir=directory_name)
else:
command = "chmod {code} {dir} -R".format(code=permission_code,
dir=directory_name)
return self.start_blocking_process(command_string=command)
def change_group(self, group_name, directory_name, enable_recursion):
"""Changes user group on the specified directory.
Args:
group_name (str): The name of the user group.
directory_name (str): The name of the directory.
enable_recursion (bool): Enable recursion to apply the permission
code to all files/folders within the directory
Returns:
str: A formatted chgrp command
"""
if enable_recursion:
command = "chgrp {grp} {dir}".format(grp=group_name,
dir=directory_name)
else:
command = "chgrp {grp} {dir} -R".format(grp=group_name,
dir=directory_name)
return self.start_blocking_process(command_string=command)
def change_owner(self, new_owner, directory_name, enable_recursion):
"""Changes ownership of the specified directory.
Args:
new_owner (str): User to be assigned to the specified directory.
directory_name (str): Directory to be modified.
enable_recursion (bool): Enable recursion to apply the owner to
all files and folders within the directory.
Returns:
str: A formatted chown command.
"""
if enable_recursion:
command = "chown {own} {dir}".format(own=new_owner,
dir=directory_name)
else:
command = "chgrp {own} {dir} -R".format(own=new_owner,
dir=directory_name)
return self.start_blocking_process(command_string=command)
def add_group(self, group_name):
"""Creates a new group account with default values.
Args:
group_name (str): Name of new group.
Returns:
str: A formatted groupadd command.
"""
command = "groupadd {name}".format(name=group_name)
return self.start_blocking_process(command_string=command)
def list_my_groups(self):
"""Lists the groups the current user belongs to.
Returns:
List(str): List of user's groups as strings.
"""
command = "groups"
results = self.start_blocking_process(command_string=command)
results = str(results).rstrip()
return results.split(" ")
def list_user_groups(self, username, verbose=False):
"""Lists the groups a user belongs to.
Args:
username (str): User to look up groups for.
verbose (bool): If verbose, username, uid, gid, and groups will be
returned as a dictionary. Otherwise, only groups are returned
as a list. Defaults to False.
Returns:
List(str) or dict: Groups the user belongs to.
"""
if verbose:
command = "id {user}".format(user=username)
raw_results = self.start_blocking_process(command_string=command)
raw_results = str(raw_results).rstrip().split(" ")
# parse uid
uid = raw_results[0].replace("uid=", "").replace(")", "")
uid_temp = uid.split("(")
uid = {uid_temp[0]: uid_temp[1]}
# parse gid
gid = raw_results[1].replace("gid=", "").replace(")", "")
gid_temp = gid.split("(")
gid = {gid_temp[0]: gid_temp[1]}
# parse groups
groups_raw = raw_results[2].replace("groups=",
"").replace(")", "").split(",")
groups = {}
for group in groups_raw:
group_temp = group.split("(")
groups[group_temp[0]] = group_temp[1]
final_results = {"username": username, "uid": uid, "gid": gid,
"groups": groups}
else:
string_to_remove = '{user} : '.format(user=username)
command = "groups {user}".format(user=username)
raw_results = self.start_blocking_process(command_string=command)
raw_results = str(raw_results.rstrip())
final_results = raw_results.replace(string_to_remove,
"").split(" ")
return final_results
def add_new_user(self, username, user_home_directory, user_groups):
"""Adds a new user account to the system.
Args:
username (str): Username to be added.
user_home_directory (str): Directory to assign as user's home
directory.
user_groups (str): User groups to add new user to.
Returns:
str: Output of useradd command.
"""
command = "useradd -d {home} -m {user} -G {groups}".format(
home=user_home_directory,
user=username,
groups=user_groups)
return self.start_blocking_process(command_string=command)
def set_user_password(self, username, password):
"""Sets or changes password for the specified user.
Args:
username (str): User whose password should be changed.
password (str): <PASSWORD>.
Returns:
str: Output of chpasswd command.
"""
command = "echo {user}:{password} | chpasswd".format(user=username,
password=password)
return self.start_blocking_process(command_string=command)
def list_all_groups_on_system(self):
"""Lists all groups on the OS.
Returns:
str: Contents of /etc/group
"""
command = "cat /etc/group"
return self.start_blocking_process(command_string=command)
def list_all_users(self):
"""Lists all users on the system.
Returns:
str: Output of /etc/passwd
"""
command = "cat /etc/passwd"
return self.start_blocking_process(command_string=command)
def make_directory(self, full_dir_path):
"""Makes a new file directory.
Args:
full_dir_path (str): path where the dir should be created.
"""
mk_dir_command = "mkdir {dir}".format(dir=full_dir_path)
self.start_blocking_process(command_string=mk_dir_command)
def move_directory(self, from_directory, to_directory):
"""Moves the specified directory.
Args:
from_directory (str): Directory to be moved.
to_directory (str): New location for directory.
Returns:
str: Output of mv command.
"""
command = "mv {dir1} {dir2}".format(dir1=from_directory,
dir2=to_directory)
return self.start_blocking_process(command_string=command)
def copy_directory(self, from_directory, to_directory):
"""Copies the specified directory.
Args:
from_directory (str): Directory to be copied.
to_directory (str): New location for directory.
Returns:
str: Output of cp command.
"""
command = "cp {dir1} {dir2}".format(dir1=from_directory,
dir2=to_directory)
return self.start_blocking_process(command_string=command)
def remove_directory(self, directory_name):
"""Removes the specified directory.
Args:
directory_name (str): Directory to be removed.
Returns:
str: Output of rm command.
"""
command = "rm {dir}".format(dir=directory_name)
return self.start_blocking_process(command_string=command)
def print_working_directory(self):
"""Gets the current working directory.
Returns:
str: Location of current working directory.
"""
command = "pwd"
return self.start_blocking_process(command_string=command)
def change_working_directory(self, directory_name):
"""Changes the current working directory.
Args:
directory_name (str): Directory to move into.
Returns:
str: Output of cd command.
"""
command = "cd {dir}".format(dir=directory_name)
return self.start_blocking_process(command_string=command)
def head_file(self, filename, number_of_lines):
"""Returns the specified number of lines from the beginning of a file.
Args:
filename (str): Name of file to parse.
number_of_lines (int): Number of lines to return.
Returns:
str: The beginning of the specified file.
"""
command = "head -n {lines} {file}".format(file=filename,
lines=number_of_lines)
return self.start_blocking_process(command_string=command)
def tail_file(self, filename, number_of_lines):
"""Returns the specified number of lines from the end of a file.
Args:
filename (str): Name of file to parse.
number_of_lines (int): Number of lines to return.
Returns:
str: The end of the specified file.
"""
command = "tail -n {lines} {file}".format(file=filename,
lines=number_of_lines)
return self.start_blocking_process(command_string=command)
def view_file_contents(self, filename):
"""Returns a text representation of the entire contents of a file.
Args:
filename (str): File whose contents should be returned.
Returns:
str: String representation of file.
"""
command = "cat {file}".format(file=filename)
return self.start_blocking_process(command_string=command)
def list_files(self, verbose=False):
"""Returns a listing of the files in the current working directory.
Args:
verbose (bool): If True, returns a dictionary including each
item's permissions, score, owner, group, size, month, day,
time, and filename. Otherwise, returns only a list of names of
contents. Defaults to False.
Returns:
List(str) or dict: Contents of current directory.
"""
if verbose:
command = "ls -ll"
raw_results = self.start_blocking_process(command_string=command)
result_lines = str(raw_results).split("\n")
result_lines.pop()
total = result_lines.pop(0)
total = total.replace("total ", "")
final_lines = []
for line in result_lines:
split_line = line.split(" ")
filtered_line = list(filter(lambda a: a != "",
split_line)) # remove all ""
final_line = {
"permissions": filtered_line[0],
"score": filtered_line[1],
"owner": filtered_line[2],
"group": filtered_line[3],
"size": filtered_line[4],
"month": filtered_line[5],
"day": filtered_line[6],
"time": filtered_line[7],
"filename": filtered_line[8]
}
final_lines.append(final_line)
final_results = {"total": total, "files": final_lines}
else:
command = "ls"
raw_results = self.start_blocking_process(command_string=command)
final_results = str(raw_results).split("\n")
final_results.remove("")
return final_results
def web_get(self, url):
"""Downloads a file from the Internet.
Args:
url (str): URL to retrieve.
Returns:
str: Output of wget command.
"""
command = "wget {url}".format(url=url)
return self.start_blocking_process(command_string=command)
def remote_sync(self, from_directory, to_directory):
"""Copies a directory remotely.
Args:
from_directory (str): Name of source directory.
to_directory (str): Name of destination for directory.
Returns:
str: Output of rsync command.
"""
command = "rsync -a {dir1} {dir2}".format(dir1=from_directory,
dir2=to_directory)
return self.start_blocking_process(command_string=command)
def network_addresses(self):
"""Configures kernel-resident network interface.
Returns:
str: Output of ifconfig command.
"""
command = "ifconfig"
return self.start_blocking_process(command_string=command)
def list_hardware(self):
"""Gets hardware configuration of machine.
Returns:
str: Output of lshw command.
"""
command = "lshw"
return self.start_blocking_process(command_string=command)
def disk_free_space(self):
"""Looks up free disk space on the machine.
Returns:
str: Output of df command.
"""
command = "df -h"
return self.start_blocking_process(command_string=command)
def operating_system_information(self):
"""Gets operating system information.
Returns:
str: Output of lsb_release command.
"""
command = "lsb_release -a"
return self.start_blocking_process(command_string=command)
def operating_system_kernel_information(self):
"""Gets OS kernel information.
Returns:
str: Output of uname command.
"""
command = "uname -a"
return self.start_blocking_process(command_string=command)
def md5_checksum(self, filename):
"""Gets the MD5 checksum of a file.
Args:
filename (str): File to find checksum for.
Returns:
str: Output of md5sum command.
"""
command = "md5sum {file}".format(file=filename)
return self.start_blocking_process(command_string=command)
def sha1_checksum(self, filename):
"""Gets the SHA1 checksum of a file.
Args:
filename (str): File to find checksum for.
Returns:
str: Output of sha1sum command.
"""
command = "sha1sum {file}".format(file=filename)
return self.start_blocking_process(command_string=command)
def update_system_packages(self):
"""Synchronizes index files of packages on machine.
Returns:
str: Output of apt-get update command.
"""
command = "apt-get update"
return self.start_blocking_process(command_string=command)
def upgrade_system_packages(self):
"""Fetches newest versions of packages on machine.
Returns:
str: Output of apt-get upgrade command.
"""
command = "apt-get upgrade"
return self.start_blocking_process(command_string=command)
def install_system_packages(self, package_name):
"""Installs a software package.
Args:
package_name (str): Name of package to install.
Returns:
str: Output of apt-get install command.
"""
command = "apt-get install {name}".format(name=package_name)
return self.start_blocking_process(command_string=command)
def system_uptime(self):
"""Returns duration the system has been online.
Returns:
str: System uptime.
"""
command = "uptime"
return self.start_blocking_process(command_string=command)
``` |
{
"source": "jmatg1/musically-tiktok-api-python",
"score": 3
} |
#### File: jmatg1/musically-tiktok-api-python/demo.py
```python
print ("asd")
from api import api
from time import sleep
import sys
import os
def login_(login):
if (login.get('code')):
from capthca import capthca
cc = capthca(login.get('code'))
if (cc.backdata):
if (len(cc.backdata) > 0):
login = api.login(username, password, cc.backdata)
del cc
login_(login)
else:
print('empty form again')
login = api.login(username, password, cc.backdata)
del cc
login_(login)
if __name__ == "__main__":
api = api()
api.global_veriable['device_id'] = "6648944787888948741"
api.global_veriable['iid'] = "6648944787888948741"
api.global_veriable['openudid'] = "6vchx2vx3ubd051q"
username = "sodo76"
password = "<PASSWORD>"
login = api.login(username,password)
login_(login)
``` |
{
"source": "jmather625/OSM_buildingdetector",
"score": 3
} |
#### File: OSM_buildingdetector/Archive/ComplexDetect.py
```python
import cv2
import numpy as np
import geolocation
import backend
import math
from .algorithms import FloodFill, Polygonify
class ComplexDetect:
def __init__(self, image, lat, long, zoom, threshold):
self.image = image
self.lat = lat
self.long = long
self.zoom = zoom
self.THRESHOLD = threshold
def detect_building(self):
# Get the x_click,y coordinates of the click
x_click, y_click = geolocation.deg_to_tilexy_matrix(self.lat, self.long, self.zoom)
# find xtile, ytile
xtile, ytile = geolocation.deg_to_tile(self.lat, self.long, self.zoom)
# writes the pre_image before any changes
cv2.imwrite('detectors/runtime_images/pre_image.png', self.image)
print("running flood fill")
flood = FloodFill(self.image, x_click, y_click, self.THRESHOLD)
flood_fill_image, message = flood.flood_fill()
cv2.imwrite('detectors/runtime_images/flood_fill.png', flood_fill_image)
print("ran flood fill")
cropped_image = flood.crop_image()
cv2.imwrite('detectors/runtime_images/flood_fill_display.png', cropped_image)
print('cropped image')
edge_image, total_edge_list = flood.find_edges()
cv2.imwrite('detectors/runtime_images/flood_fill_edges.png', edge_image)
print('found edges')
polygon = Polygonify(total_edge_list)
rect_points = polygon.find_polygon(rectangle=True)
vertex_list = []
# gets polygon's points into lat/long
for corner in rect_points:
next_vertex = geolocation.tilexy_to_deg_matrix(xtile, ytile, self.zoom, corner[0], corner[1])
vertex_list.append(list(next_vertex))
return vertex_list, message
```
#### File: detectors/algorithms/Polygonify.py
```python
import numpy as np
from scipy.spatial import ConvexHull
from scipy.ndimage.interpolation import rotate
class Polygonify:
def __init__(self, points):
self.points = points
def find_polygon(self, rectangle=False):
if (rectangle):
return Polygonify.minimum_bounding_rectangle(self.points)
else:
return Polygonify.minimum_bounding_rectangle(self.points)
@staticmethod
def minimum_bounding_rectangle(points):
"""
Find the smallest bounding rectangle for a set of points.
Returns a set of points representing the corners of the bounding box.
:param points: a list of coordinates
:rval: an nx2 matrix of coordinates
"""
points = np.array(points)
points = points.reshape(len(points), 2)
pi2 = np.pi/2.
# get the convex hull for the points
hull_points = points[ConvexHull(points).vertices]
# calculate edge angles
edges = np.zeros((len(hull_points)-1, 2))
edges = hull_points[1:] - hull_points[:-1]
angles = np.zeros((len(edges)))
angles = np.arctan2(edges[:, 1], edges[:, 0])
angles = np.abs(np.mod(angles, pi2))
angles = np.unique(angles)
# find rotation matrices
# XXX both work
rotations = np.vstack([
np.cos(angles),
np.cos(angles-pi2),
np.cos(angles+pi2),
np.cos(angles)]).T
# rotations = np.vstack([
# np.cos(angles),
# -np.sin(angles),
# np.sin(angles),
# np.cos(angles)]).T
rotations = rotations.reshape((-1, 2, 2))
# apply rotations to the hull
rot_points = np.dot(rotations, hull_points.T)
# find the bounding points
min_x = np.nanmin(rot_points[:, 0], axis=1)
max_x = np.nanmax(rot_points[:, 0], axis=1)
min_y = np.nanmin(rot_points[:, 1], axis=1)
max_y = np.nanmax(rot_points[:, 1], axis=1)
# find the box with the best area
areas = (max_x - min_x) * (max_y - min_y)
best_idx = np.argmin(areas)
# return the best box
x1 = max_x[best_idx]
x2 = min_x[best_idx]
y1 = max_y[best_idx]
y2 = min_y[best_idx]
r = rotations[best_idx]
rval = np.zeros((4, 2))
rval[0] = np.dot([x1, y2], r)
rval[1] = np.dot([x2, y2], r)
rval[2] = np.dot([x2, y1], r)
rval[3] = np.dot([x1, y1], r)
return rval
``` |
{
"source": "jmathes/Do-Just-One-Thing",
"score": 2
} |
#### File: Do-Just-One-Thing/dojustonething/api.py
```python
import logging
import random
from google.appengine.ext import webapp, db
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.api import users
try:
import json
except ImportError:
try:
from django.utils import simplejson as json
except ImportError:
import simplejson as json
from todolist import ToDoList, AmbiguousUrgencyExeption
from userinfo import UserInfo
api_funcs = {}
def api(func):
global api_funcs
api_funcs[func.__name__] = func
@api
def multiply(a, b):
return a * b
@api
def set_limit(limit):
user = users.get_current_user()
user_info = UserInfo.get(user)
try:
user_info.daily_limit = int(limit)
except ValueError:
user_info.daily_limit = None
user_info.save()
return user_info.daily_limit if user_info.daily_limit is not None else "Infinity"
@api
def did_task(item_id):
user = users.get_current_user()
users_list = ToDoList(user.nickname(), db)
users_list.remove_item(item_id)
user_info = UserInfo.get(user)
new_points = random.randint(0, 1)
while random.randint(1, 5) > 3 and new_points < 20:
new_points *= 2
new_points += random.randint(0, 3)
user_info.score += new_points
user_info.save()
return [users_list.get_top_item(user_info.daily_limit), user_info.score]
@api
def delete_task(item_id):
user = users.get_current_user()
users_list = ToDoList(user.nickname(), db)
users_list.delete_item(item_id)
user_info = UserInfo.get(user)
return [users_list.get_top_item(user_info.daily_limit), user_info.score]
@api
def delay_task(item_id):
user = users.get_current_user()
users_list = ToDoList(user.nickname(), db)
users_list.delay_item(item_id)
user_info = UserInfo.get(user)
return users_list.get_top_item(user_info.daily_limit)
@api
def get_score():
user = users.get_current_user()
user_info = UserInfo.get(user)
return user_info.score
@api
def get_next_task():
user = users.get_current_user()
users_list = ToDoList(user.nickname(), db)
user_info = UserInfo.get(user)
return users_list.get_top_item(user_info.daily_limit)
@api
def get_next_task_and_score():
user = users.get_current_user()
user_info = UserInfo.get(user)
users_list = ToDoList(user.nickname(), db)
user_info = UserInfo.get(user)
return [users_list.get_top_item(user_info.daily_limit), user_info.score]
@api
def add_task(todo):
user = users.get_current_user()
users_list = ToDoList(user.nickname(), db)
try:
users_list.insert(todo)
except AmbiguousUrgencyExeption, e:
return {
'success': False,
'newthing': todo,
'benchmark': {
'task': e.benchmark.task,
'urgency': e.benchmark.urgency,
},
}
user_info = UserInfo.get(user)
return {
'success': True,
'top_item': users_list.get_top_item(user_info.daily_limit),
}
class ApiRequestHandler(webapp.RequestHandler):
def post(self, func=None):
self.response.headers['Content-Type'] = "application/json; charset=utf-8"
response = self.request.get('args', '[]')
args = json.loads(response)
if func in api_funcs:
response = json.dumps(api_funcs[func](*args))
# kwargs = self.request.get('kwargs')
# self.response.out.write(json.dumps(args[0] * args[1]))
self.response.out.write(response)
application = webapp.WSGIApplication(
[('/api/([^/]+)', ApiRequestHandler), ], debug=True)
if __name__ == "__main__":
logging.getLogger().setLevel(logging.WARNING)
run_wsgi_app(application)
```
#### File: Do-Just-One-Thing/dojustonething/helpers.py
```python
from unittest import TestCase
from pprint import pformat
import uuid
def make_random_string(length=10):
return str(uuid.uuid4()).replace("-", "")[:length]
class DJOTTestCase(TestCase):
def assert_equal(self, first, second, message=None):
message = (message if message else "%s was not equal to %s" % (first, second))
self.assertTrue(first == second, message)
def assert_not_equal(self, first, second, message=None):
message = (message if message else "%s was equal to %s" % (first, second))
self.assertTrue(first != second, message)
def assert_less(self, first, second, message=None):
message = (message if message
else "%s not less than %s" % (first, second))
self.assertTrue(first < second, message)
def assert_less_equal(self, first, second, message=None):
message = (message if message
else "%s not less than or equal to %s" % (first, second))
self.assertTrue(first <= second, message)
def assert_greater(self, first, second, message=None):
message = (message if message
else "%s not greater than %s" % (first, second))
self.assertTrue(first > second, message)
def assert_greater_equal(self, first, second, message=None):
message = (message if message
else "%s not greater than or equal to %s" % (first, second))
self.assertTrue(first >= second, message)
def assert_none(self, item, message=None):
message = (message if message
else "%s should have been None" % pformat(item))
self.assertTrue(item is None, message)
def assert_not_none(self, item, message=None):
message = (message if message
else "%s should not have been None" % pformat(item))
self.assertFalse(item is None, message)
def assert_excepts(self, exception_type, func, *args, **kwargs):
excepted = False
try:
val = func(*args, **kwargs)
print ("assert_excepts: Crap. That wasn't supposed to work."
" Here's what I got: ", pformat(val))
except exception_type, e:
print ("assert_excepts: Okay, %s failed the way it was supposed"
" to: %s" % (func, e))
excepted = True
self.assertTrue(excepted, "assert_excepts: calling %s didn't raise %s"
% (func, exception_type))
def assert_in(self, needle, haystack, message=None):
return self.assert_contains(haystack, needle, message)
def assert_not_in(self, needle, haystack, message=None):
return self.assert_not_contains(haystack, needle, message)
def assert_contains(self, haystack, needle, message=None):
displaystack = self._format(haystack)
message = (message if message
else "%s not found in %s" % (needle, displaystack))
its_in_there = False
try:
if needle in haystack:
its_in_there = True
except:
pass
try:
if not its_in_there and haystack in needle:
print "! HEY !" * 5
print "HEY! it looks like you called assert_contains backwards"
print "! HEY !" * 5
except:
pass
self.assertTrue(needle in haystack, message)
def _format(self, haystack):
return haystack
def assert_not_contains(self, haystack, needle, message=None):
displaystack = self._format(haystack)
message = (message if message
else "%s not wanted but found in %s" % (needle, displaystack))
self.assertFalse(needle in haystack, message)
def assert_any(self, conditions, message=None):
message = (message if message
else "%s were all False" % pformat(conditions))
self.assertTrue(any(conditions), message)
def assert_not_any(self, conditions, message=None):
message = (message if message
else "%s was not all False" % pformat(conditions))
self.assertFalse(any(conditions), message)
def assert_startswith(self, haystack, needle, message=None):
displaystack = self._format(haystack)
message = (message if message
else "%s should have been at the beginning of %s"
% (needle, displaystack))
self.assertTrue(haystack.startswith(needle), message)
def assert_endswith(self, haystack, needle, message=None):
displaystack = self._format(haystack)
message = (message if message
else "%s should have been at the end of %s"
% (needle, displaystack))
self.assertTrue(haystack.endswith(needle), message)
def assert_not_startswith(self, haystack, needle, message=None):
displaystack = self._format(haystack)
message = (message if message
else "%s should not have been at the beginning of %s"
% (needle, displaystack))
self.assertFalse(haystack.startswith(needle), message)
def assert_is(self, expected, actual, message=None):
message = message if message else "%s is not %s" % (expected, actual)
self.assertTrue(expected is actual)
def assert_is_not(self, expected, actual, message=None):
message = message if message else "%s is %s" % (expected, actual)
self.assertTrue(expected is not actual)
```
#### File: Do-Just-One-Thing/dojustonething/test_todolist.py
```python
import helpers
from todolist import ToDoList, AmbiguousUrgencyExeption, all_chars
import mock
class FakeItem(object):
def __init__(self, parent=None):
pass
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.__dict__)
def put(self):
pass
@classmethod
def gql(cls, query, key):
return []
class ToDoListTestCase(helpers.DJOTTestCase):
def setUp(self):
self.username = helpers.make_random_string()
self.list = ToDoList(item_type=FakeItem,
username=self.username,
db=mock.Mock())
self.middle_char = all_chars[len(all_chars) / 2]
print self.list
def test_some_properties_about_all_chars(self):
self.assert_greater(len(all_chars), 90)
self.assertEqual('!', all_chars[0])
self.assertEqual('}', all_chars[-1])
for i in xrange(len(all_chars) - 1):
self.assert_less(chr(i), chr(i + 1))
for i in xrange(len(all_chars)):
self.assertEqual(chr(i + 33), all_chars[i])
self.assertEqual(i + 33, ord(all_chars[i]))
def test_get_top_item(self):
self.list._test_force(("top", "A"), ("bottom", "C"))
self.assertEqual("top", self.list.get_top_item())
def test_get_top_item_escaped(self):
self.list._test_force(("& < >", "A"), ("bottom", "C"))
self.assertEqual("& < >", self.list.get_top_item())
def test_get_top_item_longer_list(self):
self.list._test_force(("top2", "A"), ("middle", "B"), ("bottom", "C"))
self.assertEqual("top2", self.list.get_top_item())
def test_get_top_item_empty_list_tells_you_to_add_things(self):
self.assertEqual("Click on the + symbol to add a thing", self.list.get_top_item())
def test_newly_created_list_has_extremes_of_allchars_as_first_and_last_elements(self):
self.assertEqual(self.list._items[0].urgency, all_chars[-1])
self.assertEqual(self.list._items[-1].urgency, all_chars[0])
def test_newly_created_list_takes_username(self):
self.assertEqual(self.username, self.list.username)
def test_inserting_item_preserves_task(self):
self.assertTrue(self.list.empty)
self.list.insert("Do\ntaxes")
self.assertEqual(1, self.list.length)
item = self.list[0]
self.assertEqual("Do\ntaxes", item.task)
def test_inserting_item_adds_username(self):
self.assertTrue(self.list.empty)
self.list.insert("Do\ntaxes")
self.assertEqual(1, self.list.length)
item = self.list[0]
self.assertEqual(self.username, item.username)
def test_insert_to_empty_list_gets_middle_priority(self):
self.assertTrue(self.list.empty)
self.list.insert("Pay those bills\nThe ones on my desk")
self.assertEqual(1, self.list.length)
item = self.list[0]
self.assertEqual(item.urgency, self.middle_char)
def test_insert_into_list_with_item_requires_bounds(self):
self.list.insert("Finish writing this app")
self.assertEqual(1, self.list.length)
try:
self.list.insert("Do something else")
self.fail("Should have excepted")
except AmbiguousUrgencyExeption as e:
self.assertEqual("Finish writing this app", e.benchmark.task)
def test_insert_into_list_when_existing_item_is_less_urgent(self):
self.list.insert("High priority thing")
self.assertEqual(1, self.list.length)
first_item = self.list[0]
self.list.insert("Higher priority thing", lower_bound=first_item.urgency)
self.assertEqual(2, self.list.length)
high_priority_item = self.list[0]
low_priority_item = self.list[1]
self.assertEqual("Higher priority thing", high_priority_item.task)
self.assertEqual("High priority thing", low_priority_item.task)
self.assert_greater(high_priority_item.urgency, low_priority_item.urgency)
def test_insert_into_list_when_existing_item_is_more_urgent(self):
self.list.insert("High priority thing")
self.assertEqual(1, self.list.length)
high_priority_item = self.list[0]
self.list.insert("Low priority thing",
upper_bound=high_priority_item.urgency)
self.assertEqual(2, self.list.length)
high_priority_item = self.list[0]
low_priority_item = self.list[1]
self.assertEqual("Low priority thing", low_priority_item.task)
self.assertEqual("High priority thing", high_priority_item.task)
self.assert_greater(high_priority_item.urgency, low_priority_item.urgency)
def test_insert_into_list_between_two_things_that_start_with_gap_letters_ends_up_with_middle(self):
self.list._test_force(("top", "A"), ("bottom", "C"))
print self.list
self.list.insert("middle", upper_bound='C', lower_bound='A')
self.assertEqual(3, self.list.length)
self.assertEqual('B', self.list[1].urgency)
def test_insert_into_list_of_three_things_with_priority_between_top_and_bottom_but_ambiguous_with_middle(self):
self.list._test_force(("top", "A"), ("middle", "B"), ("bottom", "C"))
self.assertEqual(3, self.list.length)
print self.list._items
try:
self.list.insert("middle2", upper_bound='C', lower_bound='A')
self.fail("should have excepted")
except AmbiguousUrgencyExeption as e:
self.assertEqual("middle", e.benchmark.task)
def test_insert_into_list_between_two_items_with_adjacent_urgencies_takes_first_one_and_adds_a_letter(self):
self.list._test_force(("top", "A"), ("bottom", "B"))
print self.list
self.list.insert("middle", upper_bound='B', lower_bound='A')
print self.list
self.assertEqual(3, self.list.length)
self.assertEqual('A' + self.middle_char, self.list[1].urgency)
self.assertTrue(self.list[0].urgency > self.list[1].urgency > self.list[2].urgency)
def test_insert_into_list_between_two_items_with_same_first_few_letters_does_the_right_thing(self):
self.list._test_force(("top", "blahblahZZX"), ("bottom", "blahblahAAAAA"))
print self.list
self.list.insert("middle", upper_bound='blahblahZZX', lower_bound='blahblahAAAAA')
print self.list
self.assertEqual(3, self.list.length)
self.assert_startswith(self.list[1].urgency, 'blahblah')
self.assertEqual('top', self.list[0].task)
self.assertEqual('middle', self.list[1].task)
self.assertEqual('bottom', self.list[2].task)
self.assertTrue(self.list[0].urgency > self.list[1].urgency > self.list[2].urgency)
def test_insert_into_list_thats_lower_than_lowest_possible_item_rejiggers_list(self):
self.list._test_force(("lowest_possible", all_chars[1]))
print self.list
self.list.insert("lower_than_low", upper_bound=all_chars[1])
print self.list
self.assertEqual('lowest_possible', self.list[0].task)
self.assertEqual('lower_than_low', self.list[1].task)
for i in xrange(self.list.length):
self.assert_not_startswith(self.list[i].urgency, all_chars[0])
self.assert_not_startswith(self.list[i].urgency, all_chars[-1])
self.assertTrue(self.list[0].urgency > self.list[1].urgency > all_chars[0])
``` |
{
"source": "jmathies/util-process-top-crashes",
"score": 2
} |
#### File: jmathies/util-process-top-crashes/crashes.py
```python
import json
import hashlib
import os
import pprint
import re
import sys
import html
import getopt
import threading
import itertools
import time
import requests
import math
import string
import pygal
from string import Template
from collections import Counter
from urllib.request import urlopen
from urllib import request
from datetime import datetime, timedelta, date
# python -m pip install SomePackage
# python.exe -m pip install --upgrade SomePackage
import fx_crash_sig
from fx_crash_sig.crash_processor import CrashProcessor
# process types
# https://searchfox.org/mozilla-central/source/toolkit/components/crashes/CrashManager.jsm#162
###########################################################
# Usage
###########################################################
# -u (url) : redash rest endpoint url
# -k (str) : redash user api key
# -q (query id) : redash api query id
# -n (name) : local json cache filename to use (excluding extension)
# -d (name) : local html output filename to use (excluding extension)
# -c (count) : number of reports to process, overrides the default
# -p (k=v) : k=v redash query parameters to pass to the query request.
# -z : debugging: load and dump the first few records of the local databases. requires -d.
# -s (sig) : search for a token in reports
# python crashes.py -n nightly -d nightly -u https://sql.telemetry.mozilla.org -k (userapikey) -q 79354 -p process_type=gpu -p version=89 -p channel=nightly
## TODO
## linux distro information someplace
## fission reporting? at least report it via an indicator.
## filter graphing and the list based on clicks on the header data (version, os, arch)
## popup panel layout (Fixed By and Notes) is confusing, and wide when it doesn't need to be.
## improve signature header information layout, particular fx version numbers. We can easily expand this down and host info similar to crash stats summary pages.
## Remove reliance on version numbers? Need to get signature headers hooked up, and choose the latest releases for main reports
## build id (nightly / beta)
## clean up the startup crash icons
## better annotations support
## add dates to annotations
## add copy stack feature in the template
## click handler should ignore clicks if there's selection in the page
## signature search?
# python crashes.py -n beta -d beta -u https://sql.telemetry.mozilla.org -k nc2gV50AtsZHUpfmPwtR0F9ysiD8SateThgXUEba -q 79354 -p process_type=gpu -p version=90 -p channel=beta -s "draw_quad_spans<T>"
###########################################################
# Globals
###########################################################
# The default symbolication server to use.
SymbolServerUrl = "https://symbolication.stage.mozaws.net/symbolicate/v5"
# Max stack depth for symbolication
MaxStackDepth = 50
# Maximum number of raw crashes to process. This matches
# the limit value of re:dash queries. Reduce for testing
# purposes.
CrashProcessMax = 5000
# Signature list length of the resulting top crashes report
MostCommonLength = 50
# When generating a report, signatures with crash counts
# lower than this value will not be included in the report.
MinCrashCount = 1
# When generating a report, signatures with client counts
# lower than this value will not be included in the report.
ReportLowerClientLimit = 2 # filter out single client crashes
# Maximum number of crash reports to include for each signature
# in the final report. Limits the size of the resulting html.
MaxReportCount = 100
# Set to True to target a local json file for testing
LoadLocally = False
LocalJsonFile = "GPU_Raw_Crash_Data_2021_03_19.json"
# Default json file url if not specified via the command line.
jsonUrl = "https://sql.telemetry.mozilla.org/api/queries/78997/results.json?api_key=<KEY>"
proc = CrashProcessor(MaxStackDepth, SymbolServerUrl)
pp = pprint.PrettyPrinter(indent=1, width=260)
def symbolicate(ping):
try:
return proc.symbolicate(ping)
except:
return None
def generateSignature(payload):
if payload is None:
return ""
try:
return proc.get_signature_from_symbolicated(payload).signature
except:
return ""
###########################################################
# Progress indicator
###########################################################
def progress(count, total, status=''):
bar_len = 60
filled_len = int(round(bar_len * count / float(total)))
percents = round(100.0 * count / float(total), 1)
bar = '=' * filled_len + '-' * (bar_len - filled_len)
sys.stdout.write('[%s] %s%s ...%s\r' % (bar, percents, '%', status))
sys.stdout.flush()
class Spinner:
def __init__(self, message, delay=0.1):
self.spinner = itertools.cycle(['-', '/', '|', '\\'])
self.delay = delay
self.busy = False
self.spinner_visible = False
sys.stdout.write(message)
def write_next(self):
with self._screen_lock:
if not self.spinner_visible:
sys.stdout.write(next(self.spinner))
self.spinner_visible = True
sys.stdout.flush()
def remove_spinner(self, cleanup=False):
with self._screen_lock:
if self.spinner_visible:
sys.stdout.write('\b')
self.spinner_visible = False
if cleanup:
sys.stdout.write(' ') # overwrite spinner with blank
sys.stdout.write('\r') # move to next line
sys.stdout.flush()
def spinner_task(self):
while self.busy:
self.write_next()
time.sleep(self.delay)
self.remove_spinner()
def __enter__(self):
if sys.stdout.isatty():
self._screen_lock = threading.Lock()
self.busy = True
self.thread = threading.Thread(target=self.spinner_task)
self.thread.start()
def __exit__(self, exception, value, tb):
if sys.stdout.isatty():
self.busy = False
self.remove_spinner(cleanup=True)
else:
sys.stdout.write('\r')
def poll_job(s, redash_url, job):
while job['status'] not in (3,4):
response = s.get('{}/api/jobs/{}'.format(redash_url, job['id']))
job = response.json()['job']
time.sleep(1)
if job['status'] == 3:
return job['query_result_id']
return None
###########################################################
# Redash queries
###########################################################
def getRedashQueryResult(redash_url, query_id, api_key, params):
s = requests.Session()
s.headers.update({'Authorization': 'Key {}'.format(api_key)})
payload = dict(max_age=86400, parameters=params)
url = "%s/api/queries/%s/results" % (redash_url, query_id)
response = s.post(url, data=json.dumps(payload))
if response.status_code != 200:
print("\nquery error '%s'" % response)
pp.pprint(payload)
raise Exception('Redash query failed.')
#{ 'job': { 'error': '',
# 'id': '21429857-5fd0-443d-ba4b-fb9cc6d49add',
# 'query_result_id': None,
# 'result': None,
# 'status': 1,
# 'updated_at': 0}}
# ...or, we just get back the result
try:
result = response.json()['job']
except KeyError:
return response.json()
result_id = poll_job(s, redash_url, response.json()['job'])
response = s.get('{}/api/queries/{}/results/{}.json'.format(redash_url, query_id, result_id))
if response.status_code != 200:
raise Exception('Failed getting results. (Check your redash query for errors.) statuscode=%d' % response.status_code)
return response.json()
###########################################################
# HTML and Text Formatting Utilities
###########################################################
def escapeBugLinks(text):
# convert bug references to links
# https://bugzilla.mozilla.org/show_bug.cgi?id=1323439
pattern = "bug ([0-9]*)"
replacement = "<a href='https://bugzilla.mozilla.org/show_bug.cgi?id=\\1'>Bug \\1</a>"
result = re.sub(pattern, replacement, text, flags=re.IGNORECASE)
return result
def createBugLink(id):
# convert bug references to links
return "<a href='https://bugzilla.mozilla.org/show_bug.cgi?id=" + str(id) + "'>bug " + str(id) + "</a>"
safe = string.ascii_letters + string.digits + '_-.'
def stripWhitespace(text):
text = text.strip(' \t\n')
return text
def stringToHtmlId(s):
s = ''.join([letter for letter in s if letter in safe])
return s
def generateSourceLink(frame):
# examples:
# https://hg.mozilla.org/mozilla-central/file/2da6d806f45732e169fd8e7ea9a9761fa7fed93d/netwerk/protocol/http/OpaqueResponseUtils.cpp#l208
# https://crash-stats.mozilla.org/sources/highlight/?url=https://gecko-generated-sources.s3.amazonaws.com/7d3f7c890af...e97be06f948921153/ipc/ipdl/PCompositorManagerParent.cpp&line=200#L-200
# 'file': 's3:gecko-generated-sources:8276fd848664bea270...8e363bdbc972cdb7eb661c4043de93ce27810b54/ipc/ipdl/PWebGLParent.cpp:',
# 'file': 'hg:hg.mozilla.org/mozilla-central:dom/canvas/WebGLParent.cpp:52d2c9e672d0a0c50af4d6c93cc0239b9e751d18',
# 'line': 59,
srcLineNumer = str()
srcfileData = str()
srcUrl = str()
try:
srcLineNumber = frame['line']
srcfileData = frame['file']
tokenList = srcfileData.split(':')
if (len(tokenList) != 4):
print("bad token list " + tokenList)
return str()
except:
return str()
if tokenList[0].find('s3') == 0:
srcUrl = 'https://crash-stats.mozilla.org/sources/highlight/?url=https://gecko-generated-sources.s3.amazonaws.com/'
srcUrl += tokenList[2]
srcUrl += '&line='
srcUrl += str(srcLineNumber)
srcUrl += '#L-'
srcUrl += str(srcLineNumber)
elif tokenList[0].find('hg') == 0:
srcUrl = 'https://'
srcUrl += tokenList[1]
srcUrl += '/file/'
srcUrl += tokenList[3]
srcUrl += '/'
srcUrl += tokenList[2]
srcUrl += '#l' + str(srcLineNumber)
else:
#print("Unknown src annoutation source") this happens a lot
return str()
return srcUrl
def escape(text):
return html.escape(text)
###########################################################
# Crash Report Utilities
###########################################################
def processStack(frames):
# Normalized function names we can consider the same in calculating
# unique reports. We replace the regex match with the key using sub.
coelesceFrameDict = {
'RtlUserThreadStart': '[_]+RtlUserThreadStart'
}
# Functions we can replace with the normalized version, filters
# out odd platform parameter differences.
coelesceFunctionList = [
'thread_start<'
]
dataStack = list() # [idx] = { 'frame': '(frame)', 'srcUrl': '(url)' }
for frame in frames:
frameIndex = '?'
try:
frameIndex = frame['frame'] # zero based frame index
except KeyError:
continue
except TypeError:
#print("TypeError while indexing frame.");
continue
dataStack.insert(frameIndex, { 'index': frameIndex, 'frame': '', 'srcUrl': '', 'module': '' })
functionCall = ''
module = 'unknown'
offset = 'unknown'
try:
offset = frame['module_offset']
except:
pass
try:
module = frame['module']
except:
pass
try:
functionCall = frame['function']
except KeyError:
dataStack[frameIndex]['frame'] = offset
dataStack[frameIndex]['module'] = module
continue
except TypeError:
print("TypeError while indexing function.");
dataStack[frameIndex]['frame'] = "(missing function)"
continue
for k, v in coelesceFrameDict.items():
functionCall = re.sub(v, k, functionCall, 1)
break
for v in coelesceFunctionList:
if re.search(v, functionCall) != None:
normalizedFunction = functionCall
try:
normalizedFunction = frame['normalized']
except KeyError:
pass
except TypeError:
pass
functionCall = normalizedFunction
break
srcUrl = generateSourceLink(frame)
dataStack[frameIndex]['srcUrl'] = srcUrl
dataStack[frameIndex]['frame'] = functionCall
dataStack[frameIndex]['module'] = module
return dataStack
def generateSignatureHash(signature, os, osVer, arch, fxVer):
hashData = signature
# Append any crash meta data to our hashData so it applies to uniqueness.
# Any variance in this data will cause this signature to be broken out as
# a separate signature in the final top crash list.
#hashData += os
#hashData += osVer
#hashData += arch
# The redash queries we are currently using target specific versions, so this
# doesn't have much of an impact except on beta, where we want to see the effect
# of beta fixes that get uplifted.
#hashData += fxVer
return hashlib.md5(hashData.encode('utf-8')).hexdigest()
###########################################################
# Reports data structure utilities
###########################################################
def getDatasetStats(reports):
sigCount = len(reports)
reportCount = 0
for hash in reports:
reportCount += len(reports[hash]['reportList'])
return sigCount, reportCount
def processRedashDataset(dbFilename, jsonUrl, queryId, userKey, parameters):
print("processing %d reports" % CrashProcessMax)
props = list()
reports = dict()
totalCrashesProcessed = 0
# load up our database of processed crash ids
# returns an empty dict() if no data is loaded.
reports, stats = loadReports(dbFilename)
if LoadLocally:
with open(LocalJsonFile) as f:
dataset = json.load(f)
else:
with Spinner("loading from redash..."):
dataset = getRedashQueryResult(jsonUrl, queryId, userKey, parameters)
print()
print("done.")
crashesToProcess = len(dataset["query_result"]["data"]["rows"])
if crashesToProcess > CrashProcessMax:
crashesToProcess = CrashProcessMax
for recrow in dataset["query_result"]["data"]["rows"]:
if totalCrashesProcessed == CrashProcessMax:
break
# pull some redash props out of the recrow. You can add these
# by modifying the sql query.
operatingSystem = recrow['normalized_os']
operatingSystemVer = recrow['normalized_os_version']
firefoxVer = recrow['display_version']
buildId = recrow['build_id']
compositor = recrow['compositor']
arch = recrow['arch']
oomSize = recrow['oom_size']
devVendor = recrow['vendor']
devGen = recrow['gen']
devChipset = recrow['chipset']
devDevice = recrow['device']
drvVer = recrow['driver_version']
drvDate = recrow['driver_date']
clientId = recrow['client_id']
devDesc = recrow['device_description']
# Load the json crash payload from recrow
props = json.loads(recrow["payload"])
# touch up for the crash symbolication package
props['stackTraces'] = props['stack_traces']
crashId = props['crash_id']
crashDate = props['crash_date']
minidumpHash = props['minidump_sha256_hash']
crashReason = props['metadata']['moz_crash_reason']
crashInfo = props['stack_traces']['crash_info']
startupCrash = int(recrow['startup_crash'])
fissionEnabled = int(recrow['fission_enabled'])
if crashReason != None:
crashReason = crashReason.strip('\n')
# Ignore crashes older than 7 days
if not checkCrashAge(crashDate):
totalCrashesProcessed += 1
continue
# check if the crash id is processed, if so continue
found = False
signature = ""
for sig in reports:
for report in reports[sig]['reportList']:
if report['crashid'] == crashId:
found = True
signature = sig
# if you add a new value to the sql queries, you can update
# the local json cache we have in memory here. Saves having
# to delete the file and symbolicate everything again.
report['fission'] = fissionEnabled
break
if found:
totalCrashesProcessed += 1
progress(totalCrashesProcessed, crashesToProcess)
continue
# symbolicate and return payload result
payload = symbolicate(props)
signature = generateSignature(payload)
if skipProcessSignature(signature):
totalCrashesProcessed += 1
continue
# pull stack information for the crashing thread
try:
crashingThreadIndex = payload['crashing_thread']
except KeyError:
#print("KeyError on crashing_thread for report");
continue
threads = payload['threads']
try:
frames = threads[crashingThreadIndex]['frames']
except IndexError:
print("IndexError while indexing crashing thread");
continue
except TypeError:
print("TypeError while indexing crashing thread");
continue
# build up a pretty stack
stack = processStack(frames)
# generate a tracking hash
hash = generateSignatureHash(signature, operatingSystem, operatingSystemVer, arch, firefoxVer)
if hash not in reports.keys():
# Set up this signature's meta data we track in the signature header.
reports[hash] = {
'signature': signature,
'operatingsystem': [operatingSystem],
'osversion': [operatingSystemVer],
'firefoxver': [firefoxVer],
'arch': [arch],
'reportList': list()
}
# Update meta data we track in the report header.
if operatingSystem not in reports[hash]['operatingsystem']:
reports[hash]['operatingsystem'].append(operatingSystem)
if operatingSystemVer not in reports[hash]['osversion']:
reports[hash]['osversion'].append(operatingSystemVer)
if firefoxVer not in reports[hash]['firefoxver']:
reports[hash]['firefoxver'].append(firefoxVer)
if arch not in reports[hash]['arch']:
reports[hash]['arch'].append(arch)
# create our report with per crash meta data
report = {
'clientid': clientId,
'crashid': crashId,
'crashdate': crashDate,
'compositor': compositor,
'stack': stack,
'oomsize': oomSize,
'type': crashInfo['type'],
'devvendor': devVendor,
'devgen': devGen,
'devchipset': devChipset,
'devdevice': devDevice,
'devdescription': devDesc,
'driverversion' : drvVer,
'driverdate': drvDate,
'minidumphash': minidumpHash,
'crashreason': crashReason,
'startup': startupCrash,
'fission': fissionEnabled,
# Duplicated but useful if we decide to change the hashing algo
# and need to reprocess reports.
'operatingsystem': operatingSystem,
'osversion': operatingSystemVer,
'firefoxver': firefoxVer,
'arch': arch
}
# save this crash in our report list
reports[hash]['reportList'].append(report)
if hash not in stats.keys():
stats[hash] = {
'signature': signature,
'crashdata': {}
}
# check to see if stats has a date entry that matches crashDate
if crashDate not in stats[hash]['crashdata']:
stats[hash]['crashdata'][crashDate] = { 'crashids': [], 'clientids':[] }
if operatingSystem not in stats[hash]['crashdata'][crashDate]:
stats[hash]['crashdata'][crashDate][operatingSystem] = {}
if operatingSystemVer not in stats[hash]['crashdata'][crashDate][operatingSystem]:
stats[hash]['crashdata'][crashDate][operatingSystem][operatingSystemVer] = {}
if arch not in stats[hash]['crashdata'][crashDate][operatingSystem][operatingSystemVer]:
stats[hash]['crashdata'][crashDate][operatingSystem][operatingSystemVer][arch] = {}
if firefoxVer not in stats[hash]['crashdata'][crashDate][operatingSystem][operatingSystemVer][arch]:
stats[hash]['crashdata'][crashDate][operatingSystem][operatingSystemVer][arch][firefoxVer] = { 'clientcount': 0, 'crashcount': 0 }
if crashId not in stats[hash]['crashdata'][crashDate]['crashids']:
stats[hash]['crashdata'][crashDate]['crashids'].append(crashId)
stats[hash]['crashdata'][crashDate][operatingSystem][operatingSystemVer][arch][firefoxVer]['crashcount'] += 1
if clientId not in stats[hash]['crashdata'][crashDate]['clientids']:
stats[hash]['crashdata'][crashDate][operatingSystem][operatingSystemVer][arch][firefoxVer]['clientcount'] += 1
stats[hash]['crashdata'][crashDate]['clientids'].append(clientId)
totalCrashesProcessed += 1
progress(totalCrashesProcessed, crashesToProcess)
print('\n')
if totalCrashesProcessed == 0:
print('No reports processed.')
exit()
# Post processing steps
# Purge signatures from our reports list that are outdated (based
# on crash date and version). This keeps our crash lists current,
# especially after a merge. Note this doesn't clear stats, just reports.
queryFxVersion = parameters['version']
purgeOldReports(reports, queryFxVersion)
# calculate unique client id counts for each signature. These are client counts
# associated with the current redash query, and apply only to a seven day time
# window. They are stored in the reports database and displayed in the top crash
# reports.
clientCounts = dict()
needsUpdate = False
for hash in reports:
clientCounts[hash] = list()
for report in reports[hash]['reportList']:
clientId = report['clientid']
if clientId not in clientCounts[hash]:
clientCounts[hash].append(clientId)
reports[hash]['clientcount'] = len(clientCounts[hash])
return reports, stats, totalCrashesProcessed
def checkCrashAge(dateStr):
try:
date = datetime.fromisoformat(dateStr)
except:
return False
oldestDate = datetime.today() - timedelta(days=7)
return (date >= oldestDate)
def purgeOldReports(reports, fxVersion):
# Purge obsolete reports.
# 89.0b7 89.0 90.0.1
totalReportsDropped = 0
for hash in reports:
keepRepList = list()
origRepLen = len(reports[hash]['reportList'])
for report in reports[hash]['reportList']:
reportVer = ''
try:
reportVer = report['firefoxver']
reportVer = reportVer[0:2]
except:
pass
if fxVersion == reportVer:
keepRepList.append(report)
totalReportsDropped += (origRepLen - len(keepRepList))
reports[hash]['reportList'] = keepRepList
print("Removed %d older reports." % totalReportsDropped)
# Purge signatures that have no reports
delSigList = list()
for hash in reports:
newRepList = list()
for report in reports[hash]['reportList']:
# "crash_date":"2021-03-22"
dateStr = report['crashdate']
if checkCrashAge(dateStr):
newRepList.append(report)
reports[hash]['reportList'] = newRepList
if len(newRepList) == 0:
# add this signature to our purge list
delSigList.append(hash)
for hash in reports:
if len(reports[hash]['reportList']) == 0:
if hash not in delSigList:
delSigList.append(hash)
# purge old signatures that no longer have reports
# associated with them.
for hash in delSigList:
del reports[hash]
print("Removed %d older signatures from our reports database." % len(delSigList))
# return true if we should skip processing this signature
def skipProcessSignature(signature):
if len(signature) == 0:
return True
elif signature == 'EMPTY: no crashing thread identified':
return True
elif signature == 'EMPTY: no frame data available':
return True
elif signature == "<T>":
print("sig <T>")
return True
return False
def isFissionRelated(reports):
isFission = True
for report in reports:
try:
if report['fission'] == 0:
isFission = False
except:
pass
return isFission
def generateTopReportsList(reports):
# For certain types of reasons like RustMozCrash, organize
# the most common for a report list. Otherwise just dump the
# first MaxReportCount.
reasonCounter = Counter()
for report in reports:
crashReason = report['crashreason']
reasonCounter[crashReason] += 1
reportCol = reasonCounter.most_common(MaxReportCount)
if len(reportCol) < MaxReportCount:
return reports
colCount = len(reportCol)
maxReasonCount = int(math.ceil(MaxReportCount / colCount))
reportList = list()
count = 0
for reason, count in reportCol:
for report in reports:
if report['crashreason'] == reason:
reportList.append(report)
count += 1
if count > maxReasonCount:
break # next reason
return reportList
def dumpDatabase(reports, annoFilename):
print("= Reports =======================================================================================")
pp.pprint(reports)
print("= Annotations ===================================================================================")
reports = loadAnnotations(annoFilename)
pp.pprint(reports)
def doMaintenance(dbFilename):
exit()
# load up our database of processed crash ids
reports, stats = loadReports(dbFilename)
for hash in reports:
signature = reports[hash]['signature']
clientcount = reports[hash]['clientcount']
operatingSystem = reports[hash]['operatingsystem']
del reports[hash]['operatingsystem']
reports[hash]['operatingsystem'] = [operatingSystem]
operatingSystemVer = reports[hash]['osversion']
del reports[hash]['osversion']
reports[hash]['osversion'] = [operatingSystemVer]
firefoxVer = reports[hash]['firefoxver']
del reports[hash]['firefoxver']
reports[hash]['firefoxver'] = [firefoxVer]
arch = reports[hash]['arch']
del reports[hash]['arch']
reports[hash]['arch'] = [arch]
#dumpDatabase(reports)
# Caching of reports
#cacheReports(reports, stats, dbFilename)
###########################################################
# File utilities
###########################################################
# Load the local report database
def loadReports(dbFilename):
reportsFile = ("%s-reports.json" % dbFilename)
statsFile = ("%s-stats.json" % dbFilename)
reports = dict()
stats = dict()
try:
with open(reportsFile) as database:
reports = json.load(database)
except FileNotFoundError:
pass
try:
with open(statsFile) as database:
stats = json.load(database)
except FileNotFoundError:
pass
sigCount, reportCount = getDatasetStats(reports)
print("Existing database stats: %d signatures, %d reports." % (sigCount, reportCount))
return reports, stats
# Cache the reports database to a local json file. Speeds
# up symbolication runs across days by avoid re-symbolicating
# reports.
def cacheReports(reports, stats, dbFilename):
reportsFile = ("%s-reports.json" % dbFilename)
statsFile = ("%s-stats.json" % dbFilename)
with open(reportsFile, "w") as database:
database.write(json.dumps(reports))
with open(statsFile, "w") as database:
database.write(json.dumps(stats))
sigCount, reportCount = getDatasetStats(reports)
print("Cache database stats: %d signatures, %d reports." % (sigCount, reportCount))
def loadAnnotations(filename):
file = "%s.json" % filename
try:
with open(file) as database:
annotations = json.load(database)
print("Loading %s annotations file." % file)
except FileNotFoundError:
print("Could not find %s file." % file)
return dict()
except json.decoder.JSONDecodeError:
print("Json error parsing %s" % file)
return dict()
return annotations
###########################################################
# HTML Template Utilities
###########################################################
def extractTemplate(token, srcTemplate):
# This returns the inner template from srcTemplate, minus any
# identifying tag data.
# token would be something like 'signature' used
# in identifying tags like:
# <!-- start of signature template -->
# <!-- end of signature template -->
start = '<!-- start of ' + token + ' template -->'
end = '<!-- end of ' + token + ' template -->'
sIndex = srcTemplate.index(start)
eIndex = srcTemplate.index(end)
if sIndex == -1 or eIndex == -1:
raise Exception("Bad HTML template tokens!")
template = srcTemplate[sIndex + len(start) : eIndex + len(end)]
return template
def extractAndTokenizeTemplate(token, srcTemplate, insertToken):
# This returns the inner template from srcTemplate, minus any
# identifying tag data, and we also return srcTemplate with
# $insertToken replacing the block we clipped out.
start = '<!-- start of ' + token + ' template -->'
end = '<!-- end of ' + token + ' template -->'
sIndex = srcTemplate.index(start)
eIndex = srcTemplate.index(end)
if sIndex == -1 or eIndex == -1:
raise Exception("Bad HTML template tokens!")
header = srcTemplate[0:sIndex]
footer = srcTemplate[eIndex + len(end):]
template = srcTemplate[sIndex + len(start) : eIndex]
return template, (header + '$' + insertToken + footer)
def dumpTemplates():
print('mainPage -----')
print(mainPage)
print('outerSigTemplate-----')
print(outerSigTemplate)
print('outerSigMetaTemplate-----')
print(outerSigMetaTemplate)
print('outerReportTemplate-----')
print(outerReportTemplate)
print('outerStackTemplate-----')
print(outerStackTemplate)
print('innerStackTemplate-----')
print(innerStackTemplate)
exit()
###########################################################
### Report generation
###########################################################
def generateSignatureReport(signature):
reports, stats = loadReports()
reports = reports[sig]
if len(reports) == 0:
print("signature not found in database.")
exit()
#for report in reports:
exit()
def generateSparklineJS(sigStats, operatingSystems, operatingSystemVers, firefoxVers, archs, className):
# generate stats data for crash rate over time graphs
# data = [ {name: "Bitcoin", date: "2017-01-01", value: 967.6}, ]
#"Windows": {
# "6.1": {
# "x86": {
# "91.0a1": {
# "clientcount": 1,
# "crashcount": 3
# }
# }
# }
#}
rawData = dict()
for dateStr in sigStats['crashdata']:
for os in operatingSystems:
for osver in operatingSystemVers:
for arch in archs:
for fxver in firefoxVers:
try:
stats = sigStats['crashdata'][dateStr][os][osver][arch][fxver]
rawData[dateStr] = { 'os': os, 'crashcount': stats['crashcount'] }
except:
pass # some dates may not apply to a particular combination
# average data for each os to smooth out the graph
# {name: "Windows", date: "2021-06-24", value: 84}
# generate a list of dates
avgData = dict()
dates = list(rawData.keys())
dates.sort()
# generate an os list [not used]
osList = list()
for targetDate in dates:
os = rawData[targetDate]['os']
if os not in osList:
osList.append(os)
# generate plot data
plotData = '['
template = '{name: "$name", date: "$date", value: $value},'
for targetDate in dates:
pd = date.fromisoformat(targetDate)
minDate = pd - timedelta(3)
maxDate = pd + timedelta(4)
crashCount = 0
dataPoints = 0
for tmpDateStr in dates:
tmpDate = date.fromisoformat(tmpDateStr)
if tmpDate >= minDate and tmpDate <= maxDate:
crashCount += rawData[pd.isoformat()]['crashcount']
dataPoints += 1
if dataPoints == 0:
avgData[targetDate] = 0
else:
avgData[targetDate] = crashCount / dataPoints
#print("date:%s cc=%d dp=%d avg=%f" % (targetDate, crashCount, dataPoints, avgData[targetDate]))
plotData += Template(template).substitute(name='All', date=targetDate, value=avgData[targetDate])
plotData += ']'
#print(plotData)
template = 'sparkline(document.querySelector("$cname"), $data, sloptions);' ## sloptions defined in template.html
return Template(template).substitute(data=plotData, cname='.' + className)
# from list of strings, return a comma separated pretty list
def getItemizedHeaderList(theList):
result = ''
sl = theList.sort()
for s in theList:
result += s + ', '
return result.strip(' ,')
def generateTopCrashReport(reports, stats, totalCrashesProcessed, processType,
channel, queryFxVersion, outputFilename, annoFilename):
templateFile = open("template.html", "r")
template = templateFile.read()
templateFile.close()
# <!-- start of crash template -->
# <!-- end of crash template -->
innerTemplate, mainPage = extractAndTokenizeTemplate('crash', template, 'main')
annotationTemplate, mainPage = extractAndTokenizeTemplate('annotation', mainPage, 'annotations')
annotationReport, annotationTemplate = extractAndTokenizeTemplate('annotation report', annotationTemplate, 'annreports')
# <!-- start of signature template -->
# <!-- end of signature template -->
innerSigTemplate, outerSigTemplate = extractAndTokenizeTemplate('signature', innerTemplate, 'signature')
# Main inner block
# <!-- start of signature meta template -->
# <!-- end of signature meta template -->
innerSigMetaTemplate, outerSigMetaTemplate = extractAndTokenizeTemplate('signature meta', innerSigTemplate, 'reports')
# Report meta plus stack info
# <!-- start of report template -->
# <!-- end of report template -->
innerReportTemplate, outerReportTemplate = extractAndTokenizeTemplate('report', innerSigMetaTemplate, 'report')
# <!-- start of stackline template -->
# <!-- end of stackline template -->
innerStackTemplate, outerStackTemplate = extractAndTokenizeTemplate('stackline', innerReportTemplate, 'stackline')
outerStackTemplate = stripWhitespace(outerStackTemplate)
innerStackTemplate = stripWhitespace(innerStackTemplate)
outerReportTemplate = stripWhitespace(outerReportTemplate)
outerSigMetaTemplate = stripWhitespace(outerSigMetaTemplate)
outerSigTemplate = stripWhitespace(outerSigTemplate)
annotationTemplate = stripWhitespace(annotationTemplate)
annotationReport = stripWhitespace(annotationReport)
# mainPage = stripWhitespace(mainPage) # mucks with js
annDb = loadAnnotations(annoFilename)
#resultFile = open(("%s.html" % outputFilename), "w", encoding="utf-8")
resultFile = open(("%s.html" % outputFilename), "w", errors="replace")
signatureHtml = str()
sigMetaHtml = str()
annotationsHtml = str()
signatureIndex = 0
sigCount, reportCount = getDatasetStats(reports)
# generate a top crash list
sigCounter = Counter()
for hash in reports:
if reports[hash]['clientcount'] < ReportLowerClientLimit:
continue
sigCounter[hash] = len(reports[hash]['reportList'])
collection = sigCounter.most_common(MostCommonLength)
sparklineJS = ''
for hash, crashcount in collection:
try:
sigRecord = reports[hash]
except KeyError:
continue
signature = sigRecord['signature']
prettyOperatingSystems = getItemizedHeaderList(sigRecord['operatingsystem'])
prettyOperatingSystemVers = getItemizedHeaderList(sigRecord['osversion'])
prettyFirefoxVers = getItemizedHeaderList(sigRecord['firefoxver'])
prettyArchs = getItemizedHeaderList(sigRecord['arch'])
operatingSystemsList = sigRecord['operatingsystem']
operatingSystemVersList = sigRecord['osversion']
firefoxVersList = sigRecord['firefoxver']
archsList = sigRecord['arch']
crashcount = len(sigRecord['reportList'])
percent = (crashcount / totalCrashesProcessed)*100.0
if crashcount < MinCrashCount: # Skip small crash count reports
continue
signatureIndex += 1
crashStatsHashQuery = 'https://crash-stats.mozilla.org/search/?'
crashStatsQuery = 'https://crash-stats.mozilla.org/search/?signature=~%s&product=Firefox&_facets=signature&process_type=%s' % (signature, processType)
# sort reports in this signature based on common crash reasons, so the most common
# is at the top of the list.
reportsToReport = generateTopReportsList(reports[hash]['reportList'])
fissionIcon = 'noicon'
if isFissionRelated(reports[hash]['reportList']):
fissionIcon = 'icon'
if crashcount < 10 and fissionIcon == 'icon':
fissionIcon = 'grayicon'
reportHtml = str()
idx = 0
hashTotal= 0
oomIcon = 'noicon'
for report in reportsToReport:
idx = idx + 1
if idx > MaxReportCount:
break
oombytes = report['oomsize'] if not None else '0'
if report['oomsize'] is not None:
oomIcon = 'icon'
crashReason = report['crashreason']
if (crashReason == None):
crashReason = ''
crashType = report['type']
crashType = crashType.lstrip('EXCEPTION_')
appendAmp = False
if hashTotal < 30: # This is all crash stats can hande (414 Request-URI Too Large)
try:
crashStatsHashQuery += 'minidump_sha256_hash=~' + report['minidumphash']
hashTotal += 1
appendAmp = True
except:
pass
# Redash meta data dump for a particular crash id
infoLink = 'https://sql.telemetry.mozilla.org/queries/79462?p_channel=%s&p_process_type=%s&p_version=%s&p_crash_id=%s' % (channel, processType, queryFxVersion, report['crashid'])
startupStyle = 'noicon'
if report['startup'] != 0:
startupStyle = 'icon'
stackHtml = str()
for frameData in report['stack']:
# [idx] = { 'index': n, 'frame': '(frame)', 'srcUrl': '(url)', 'module': '(module)' }
frameIndex = frameData['index']
frame = frameData['frame']
srcUrl = frameData['srcUrl']
moduleName = frameData['module']
linkStyle = 'inline-block'
srcLink = srcUrl
if len(srcUrl) == 0:
linkStyle = 'none'
srcLink = ''
stackHtml += Template(innerStackTemplate).substitute(frameindex=frameIndex,
frame=escape(frame),
srcurl=srcLink,
module=moduleName,
style=linkStyle)
reportHtml += Template(outerStackTemplate).substitute(expandostack=('st'+str(signatureIndex)+'-'+str(idx)),
rindex=idx,
type=crashType,
oomsize=oombytes,
devvendor=report['devvendor'],
devgen=report['devgen'],
devchipset=report['devchipset'],
description=report['devdescription'],
drvver=report['driverversion'],
drvdate=report['driverdate'],
compositor=report['compositor'],
reason=crashReason,
infolink=infoLink,
startupiconclass=startupStyle,
stackline=stackHtml)
if appendAmp:
crashStatsHashQuery += '&'
# class="svg-$expandosig"
sparklineJS += generateSparklineJS(stats[hash], operatingSystemsList, operatingSystemVersList, firefoxVersList, archsList, 'svg-'+stringToHtmlId(hash)) + '\n'
# svg element
sigHtml = Template(outerReportTemplate).substitute(expandosig=stringToHtmlId(hash),
os=prettyOperatingSystems,
fxver=prettyFirefoxVers,
osver=prettyOperatingSystemVers,
arch=prettyArchs,
report=reportHtml)
crashStatsHashQuery = crashStatsHashQuery.rstrip('&')
searchIconClass = 'icon'
if hashTotal == 0:
crashStatsHashQuery = ''
searchIconClass = 'lticon'
# ann$expandosig - view signature meta parameter
annIconClass = 'lticon'
if signature in annDb:
arec = record = annDb[signature]
# record['annotations'] (list)
sigAnnotations = str()
# record['fixedby'] (list of tables, { 'version': 87, 'bug': 1234567 }
for fb in record['fixedby']:
sigAnnotations += Template(annotationReport).substitute(annotations=escape(fb['annotation']),
fixedbybug=createBugLink(str(fb['bug'])),
fixedbyversion=fb['version'])
for annotation in record['annotations']:
annotation = escape(annotation)
annotation = escapeBugLinks(annotation)
sigAnnotations += Template(annotationReport).substitute(annotations=annotation, fixedbybug='', fixedbyversion='')
annotationsHtml += Template(annotationTemplate).substitute(expandosig=('sig'+str(signatureIndex)),
annreports=sigAnnotations)
annIconClass = 'icon'
sigMetaHtml += Template(outerSigMetaTemplate).substitute(rank=signatureIndex,
percent=("%.00f%%" % percent),
# expandosig=('sig'+str(signatureIndex)),
expandosig=stringToHtmlId(hash),
annexpandosig=('sig'+str(signatureIndex)),
signature=(html.escape(signature)),
fissionicon=fissionIcon,
oomicon=oomIcon,
iconclass=searchIconClass,
anniconclass=annIconClass,
cslink=crashStatsHashQuery,
cssearchlink=crashStatsQuery,
clientcount=sigRecord['clientcount'],
count=crashcount,
reports=sigHtml)
signatureHtml += Template(outerSigTemplate).substitute(channel=channel,
# version=queryFxVersion,
process=processType,
sigcount=sigCount,
repcount=reportCount,
sparkline=sparklineJS,
signature=sigMetaHtml)
# Add processed date to the footer
dateTime = datetime.now().strftime("%m/%d/%Y, %H:%M:%S")
resultFile.write(Template(mainPage).substitute(main=signatureHtml,
annotations=annotationsHtml,
processeddate=dateTime))
resultFile.close()
###########################################################
# Process crashes and stacks
###########################################################
def main():
global CrashProcessMax
queryId = ''
userKey = ''
targetSignature = ''
dbFilename = "crashreports" #.json
annoFilename = "annotations"
parameters = dict()
options, remainder = getopt.getopt(sys.argv[1:], 'u:n:d:c:k:q:p:s:zm')
for o, a in options:
if o == '-u':
jsonUrl = a
print("data source url: %s" % jsonUrl)
elif o == '-n':
outputFilename = a
print("output filename: %s.html" % outputFilename)
elif o == '-d':
dbFilename = a
print("local cache file: %s.json" % dbFilename)
elif o == '-c':
CrashProcessMax = int(a)
elif o == '-q':
queryId = a
print("query id: %s" % queryId)
elif o == '-k':
userKey = a
print("user key: %s" % userKey)
elif o == '-s':
targetSignature = a
print("target signature: %s" % targetSignature)
elif o == '-m':
print("calling maintenance function.")
doMaintenance(dbFilename)
exit()
elif o == '-p':
param = a.split('=')
parameters[param[0]] = param[1]
elif o == '-z':
reports, stats = loadReports(dbFilename)
dumpDatabase(reports)
exit()
if len(userKey) == 0:
print("missing user api key.")
exit()
elif len(queryId) == 0:
print("missing query id.")
exit()
parameters['crashcount'] = str(CrashProcessMax)
if len(targetSignature) > 0:
print("analyzing '%s'" % targetSignature)
generateSignatureReport(targetSignature)
exit()
# Pull fresh data from redash and process it
reports, stats, totalCrashesProcessed = processRedashDataset(dbFilename, jsonUrl, queryId, userKey, parameters)
# Caching of reports
cacheReports(reports, stats, dbFilename)
processType = parameters['process_type']
channel = parameters['channel']
queryFxVersion = parameters['version']
generateTopCrashReport(reports, stats, totalCrashesProcessed, processType, channel,
queryFxVersion, outputFilename, annoFilename)
exit()
if __name__ == "__main__":
main()
``` |
{
"source": "JMathiszig-Lee/gmcapi",
"score": 2
} |
#### File: gmcapi/app/config.py
```python
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY')
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
@staticmethod
def init_app(app):
pass
``` |
{
"source": "JMathiszig-Lee/pEEG",
"score": 3
} |
#### File: pEEG/pEEG/audio.py
```python
from time import sleep
import numpy as np
from scipy.fft import fft
from scipy.integrate import simps
NUM_SAMPLES = 1024
SAMPLING_RATE = 44100.
MAX_FREQ = SAMPLING_RATE / 2
FREQ_SAMPLES = NUM_SAMPLES / 8
TIMESLICE = 100 # ms
NUM_BINS = 16
data = {'values': None}
try:
import pyaudio
def update_audio_data():
pa = pyaudio.PyAudio()
stream = pa.open(
format=pyaudio.paInt16,
channels=1,
rate=int(SAMPLING_RATE),
input=True,
frames_per_buffer=NUM_SAMPLES
)
while True:
try:
raw_data = np.fromstring(stream.read(NUM_SAMPLES), dtype=np.int16)
signal = raw_data / 32768.0
spectrum = fft(signal)
spectrum = abs(spectrum)[:int(NUM_SAMPLES/2)]
power = spectrum**2
bins = simps(np.split(power, NUM_BINS))
data['values'] = signal, spectrum, bins
except:
continue
except ImportError:
print()
print(" *** Pyaudio package not installed, using synthesized audio data ***")
print()
def fm_modulation(x, f_carrier = 220, f_mod =220, Ind_mod = 1):
y = np.sin(2*np.pi*f_carrier*x + Ind_mod*np.sin(2*np.pi*f_mod*x))
return y
# These are basically picked out of a hat to show something vaguely interesting
_t = np.arange(0, NUM_SAMPLES/SAMPLING_RATE, 1.0/SAMPLING_RATE)
_f_carrier = 2000
_f_mod = 1000
_ind_mod = 1
def update_audio_data():
while True:
# Generate FM signal with drifting carrier and mod frequencies
global _f_carrier, _f_mod, _ind_mod
_f_carrier = max([_f_carrier+np.random.randn()*50, 0])
_f_mod = max([_f_mod+np.random.randn()*20, 0])
_ind_mod = max([_ind_mod+np.random.randn()*0.1, 0])
A = 0.4 + 0.05 * np.random.random()
signal = A * fm_modulation(_t, _f_carrier, _f_mod, _ind_mod)
spectrum = fft(signal)
spectrum = abs(spectrum)[:int(NUM_SAMPLES/2)]
power = spectrum**2
bins = simps(np.split(power, NUM_BINS))
data['values'] = signal, spectrum, bins
sleep(1.0/12)
``` |
{
"source": "JMathiszig-Lee/Propofol",
"score": 3
} |
#### File: JMathiszig-Lee/Propofol/csvreader.py
```python
import csv
from patient_state import PatientState
csvfile = "propofol.csv"
def read_patient_csv():
patients = []
read = open(csvfile, 'r')
# Read header line
read.readline()
pid = None
current_patient = __build_new_patient()
for row in csv.reader(read):
newid = row[0]
if newid != pid:
pid = newid
current_patient = __build_new_patient()
patients.append(current_patient)
current_patient['id'] = pid
current_patient['age'] = parse_age(row[6], pid)
current_patient['weight'] = parse_weight(row[7], pid)
current_patient['height'] = parse_height(row[8], pid)
current_patient['sex'] = __patient_sex(int(row[9]))
cp = float(row[2])
is_measurement = cp != 0
if is_measurement:
event = {
"type": "measurement",
"time_mins": float(row[1]),
"cp": float(row[2])
}
current_patient["events"].append(event)
else:
propofol_mg = float(row[3])
if propofol_mg == 0:
raise ValueError("Found row with no CP measurement or propofol amount for patient %s" % pid)
event = {
"type": "start_infusion",
"time_mins": float(row[1]),
"propofol_mg": propofol_mg,
"rate_mg_per_min": float(row[4])
}
current_patient["events"].append(event)
return patients
def parse_age(raw_age, pid):
age = float(raw_age)
if age < 0 or age > 150:
raise ValueError("Invalid patient age '%s'for patient %s" % (raw_age, pid))
return age
def parse_weight(raw_weight, pid):
weight = float(raw_weight)
if weight < 1:
raise ValueError("Invalid patient weight '%s' for patient %s" % (raw_weight, pid))
return weight
def parse_height(raw_height, pid):
height = float(raw_height)
if height < 1:
raise ValueError("Invalid patient weight '%s'for patient %s" % (raw_height, pid))
return height
def __patient_sex(code):
if code == 1:
return "m"
elif code == 2:
return "f"
else:
raise ValueError("Unknown value for patient sex '%s'. Expected '1' or '2'" % sex)
def __build_new_patient():
return {
"events": []
}
```
#### File: JMathiszig-Lee/Propofol/patient_state2.py
```python
class PatientState2:
# age: years
# weight: kilos
# height: cm
# sex: 'm' or 'f'
def __init__(self, age, weight, height, sex, params):
self.params = params
lean_body_mass = self.__lean_body_mass(weight, height, sex)
self.v1 = (params['v1a'] - params['v1b']*(age - params['age_offset'])) * (params['v1c'] * (lean_body_mass - params['lbm_offset']))
self.v2 = params['v2a'] * lean_body_mass
self.v3 = params['v3a'] * weight
# Initial concentration is zero in all components
self.x1 = 0.0
self.x2 = 0.0
self.x3 = 0.0
self.k10 = (params['k10a'] * self.v1) / 60
self.k12 = params['k12'] /60
self.k13 = params['k13'] / 60
self.k21 = (params['k12'] * (self.v1/self.v2)) / 60
self.k31 = (params['k13'] * (self.v1/self.v3)) / 60
self.keo = 0.456 / 60
self.xeo = 0.0
def give_drug(self, drug_milligrams):
self.x1 = self.x1 + drug_milligrams / self.v1
def wait_time(self, time_seconds):
x1k10 = self.x1 * self.k10
x1k12 = self.x1 * self.k12
x1k13 = self.x1 * self.k13
x2k21 = self.x2 * self.k21
x3k31 = self.x3 * self.k31
self.x1 = self.x1 + (x2k21 - x1k12 + x3k31 - x1k13 - x1k10) * time_seconds
self.x2 = self.x2 + (x1k12 - x2k21) * time_seconds
self.x3 = self.x3 + (x1k13 - x3k31) * time_seconds
@staticmethod
def with_schnider_params(age, weight, height, sex):
params = PatientState.schnider_params()
return PatientState(age, weight, height, sex, params)
@staticmethod
def schnider_params():
params = {
'k10a': 0.443,
'k10b': 0.0107,
'k10c': -0.0159,
'k10d': 0.0062,
'k12a': 0.302,
'k12b': -0.0056,
'k13': 0.196,
'k21a': 1.29,
'k21b': -0.024,
'k21c': 18.9,
'k21d': -0.391,
'k31': 0.0035,
'v1': 4.27,
'v3': 238,
'age_offset': 53,
'weight_offset': 77,
'lbm_offset': 59,
'height_offset': 177
}
return params
def __lean_body_mass(self, weight, height, sex):
if sex != "m" and sex != "f":
raise ValueError("Unknown sex '%s'. This algorithm can only handle 'm' and 'f'. :(" % sex)
if sex == "m":
return (0.32819 * weight) + (0.33929 * height) - 29.5336
else:
return (0.29569 * weight) + (0.41813 * height) - 43.2933
def __repr__(self):
return "PatientState(x1=%f, x2=%f, x3=%f, xeo=%f)" % (self.x1, self.x2, self.x3, self.xeo)
if __name__ == '__main__':
patient = PatientState.with_schnider_params(50, 70, 180, "m")
print "Initial state: " + str(patient)
patient.give_drug(92.60001)
print "After giving drug: " + str(patient)
for t in range(130):
patient.wait_time(1)
print "After 1 sec: " + str(patient)
```
#### File: JMathiszig-Lee/Propofol/patient_state.py
```python
class PatientState:
# age: years
# weight: kilos
# height: cm
# sex: 'm' or 'f'
def __init__(self, age, weight, height, sex, params):
self.params = params
lean_body_mass = self.__lean_body_mass(weight, height, sex)
self.v1 = params['v1']
# TODO: Work out why v2 and v3 are not used in the algorithm
v2 = params['k21c'] + params['k21d'] * (age - params['age_offset'])
v3 = params['v3']
# Initial concentration is zero in all components
self.x1 = 0.0
self.x2 = 0.0
self.x3 = 0.0
self.k10 = (params['k10a'] + params['k10b'] * (weight - params['weight_offset']) + params['k10c'] * (lean_body_mass - params['lbm_offset']) + params['k10d'] * (height - params['height_offset'])) / 60
self.k12 = (params['k12a'] + params['k12b'] * (age - params['age_offset'])) / 60
self.k13 = params['k13'] / 60
self.k21 = ((params['k21a'] + params['k21b'] * (age - params['age_offset'])) / v2) / 60
self.k31 = params['k31'] / 60
self.keo = 0.456 / 60
self.xeo = 0.0
def give_drug(self, drug_milligrams):
self.x1 = self.x1 + drug_milligrams / self.v1
def wait_time(self, time_seconds):
x1k10 = self.x1 * self.k10
x1k12 = self.x1 * self.k12
x1k13 = self.x1 * self.k13
x2k21 = self.x2 * self.k21
x3k31 = self.x3 * self.k31
xk1e = self.x1 * self.keo
xke1 = self.xeo * self.keo
self.x1 = self.x1 + (x2k21 - x1k12 + x3k31 - x1k13 - x1k10) * time_seconds
self.x2 = self.x2 + (x1k12 - x2k21) * time_seconds
self.x3 = self.x3 + (x1k13 - x3k31) * time_seconds
self.xeo = self.xeo + (xk1e - xke1) * time_seconds
@staticmethod
def with_schnider_params(age, weight, height, sex):
params = PatientState.schnider_params()
return PatientState(age, weight, height, sex, params)
@staticmethod
def schnider_params():
params = {
'k10a': 0.443,
'k10b': 0.0107,
'k10c': -0.0159,
'k10d': 0.0062,
'k12a': 0.302,
'k12b': -0.0056,
'k13': 0.196,
'k21a': 1.29,
'k21b': -0.024,
'k21c': 18.9,
'k21d': -0.391,
'k31': 0.0035,
'v1': 4.27,
'v3': 238,
'age_offset': 53,
'weight_offset': 77,
'lbm_offset': 59,
'height_offset': 177
}
return params
def __lean_body_mass(self, weight, height, sex):
if sex != "m" and sex != "f":
raise ValueError("Unknown sex '%s'. This algorithm can only handle 'm' and 'f'. :(" % sex)
# TODO: Use better equation to calculate lean body mass
if sex == "m":
return 1.1 * weight - self.params['weight_offset'] * ((weight/height) * (weight/height))
else:
return 1.07 * weight - self.params['weight_offset'] * ((weight/height) * (weight/height))
def __repr__(self):
return "PatientState(x1=%f, x2=%f, x3=%f, xeo=%f)" % (self.x1, self.x2, self.x3, self.xeo)
class PatientState2:
# age: years
# weight: kilos
# height: cm
# sex: 'm' or 'f'
def __init__(self, age, weight, height, sex, params):
self.params = params
lean_body_mass = self.__lean_body_mass(weight, height, sex)
self.v1 = ((params['v1a'] * 50) - params['v1b']*(age - (params['age_offset']) * 100)) * (params['v1c'] * (lean_body_mass - (params['lbm_offset'] * 100)))
self.v2 = params['v2a'] * lean_body_mass * 2
self.v3 = params['v3a'] * weight * 5
# Initial concentration is zero in all components
self.x1 = 0.0
self.x2 = 0.0
self.x3 = 0.0
self.k10 = (params['k10a'] * self.v1) / 60
self.k12 = params['k12'] /60
self.k13 = params['k13'] / 60
self.k21 = (params['k12'] * (self.v1/self.v2)) / 60
self.k31 = (params['k13'] * (self.v1/self.v3)) / 60
self.keo = 0.456 / 60
self.xeo = 0.0
def give_drug(self, drug_milligrams):
self.x1 = self.x1 + drug_milligrams / self.v1
def wait_time(self, time_seconds):
x1k10 = self.x1 * self.k10
x1k12 = self.x1 * self.k12
x1k13 = self.x1 * self.k13
x2k21 = self.x2 * self.k21
x3k31 = self.x3 * self.k31
self.x1 = self.x1 + (x2k21 - x1k12 + x3k31 - x1k13 - x1k10) * time_seconds
self.x2 = self.x2 + (x1k12 - x2k21) * time_seconds
self.x3 = self.x3 + (x1k13 - x3k31) * time_seconds
def __lean_body_mass(self, weight, height, sex):
if sex != "m" and sex != "f":
raise ValueError("Unknown sex '%s'. This algorithm can only handle 'm' and 'f'. :(" % sex)
if sex == "m":
return (0.32819 * weight) + (0.33929 * height) - 29.5336
else:
return (0.29569 * weight) + (0.41813 * height) - 43.2933
def __repr__(self):
return "PatientState(x1=%f, x2=%f, x3=%f, xeo=%f)" % (self.x1, self.x2, self.x3, self.xeo)
class MarshState:
def __init__(self, age, weight, height, sex, params):
self.params = params
self.v1 = params['v1a'] * weight
self.v2 = params['v2a'] * weight
self.v3 = params['v3a'] * weight
# Initial concentration is zero in all components
self.x1 = 0.0
self.x2 = 0.0
self.x3 = 0.0
self.k10 = params['k10a'] / 60
self.k12 = params['k12'] /60
self.k13 = params['k13'] / 60
self.k21 = params['k12'] / 60
self.k31 = params['k13'] / 60
def give_drug(self, drug_milligrams):
self.x1 = self.x1 + drug_milligrams / self.v1
def wait_time(self, time_seconds):
x1k10 = self.x1 * self.k10
x1k12 = self.x1 * self.k12
x1k13 = self.x1 * self.k13
x2k21 = self.x2 * self.k21
x3k31 = self.x3 * self.k31
self.x1 = self.x1 + (x2k21 - x1k12 + x3k31 - x1k13 - x1k10) * time_seconds
self.x2 = self.x2 + (x1k12 - x2k21) * time_seconds
self.x3 = self.x3 + (x1k13 - x3k31) * time_seconds
@staticmethod
def with_marsh_params(age, weight, height, sex):
params = MarshState.marsh_params()
return MarshState(age, weight, height, sex, params)
@staticmethod
def marsh_params():
params = {
'v1a': 0.228,
'v2a': 0.463,
'v3a': 2.893,
'k10a': 0.119,
'k12': 0.112,
'k13': 0.042,
'k21': 0.055,
'k31': 0.0033,
'keo': 0.26,
}
return params
def __repr__(self):
return "PatientState(x1=%f, x2=%f, x3=%f)" % (self.x1, self.x2, self.x3)
if __name__ == '__main__':
patient = PatientState.with_schnider_params(34, 46.3, 157.5, "f")
print ("Initial state: " + str(patient))
patient.give_drug(90)
print ("After giving drug: " + str(patient))
times = [126, 240, 483, 962, 1803, 3583]
patient2 = MarshState.with_marsh_params(34, 46.3, 157.5, "f")
print ("Initial state: " + str(patient2))
patient2.give_drug(90)
print ("After giving drug: " + str(patient2))
times = [126, 240, 483, 962, 1803, 3583]
for t in range(961):
patient.wait_time(1)
patient2.wait_time(1)
#print str(t) + str(patient)
mod = t % 30
if mod == 0:
print (str(t) + str(patient) + str(patient2))
```
#### File: JMathiszig-Lee/Propofol/test.py
```python
from genetic_solver import create_new_population
from crosscorrelate import test_against_real_data
from multiprocessing import Pool
pool = Pool(5)
param = create_new_population(2)
def multi_core_test(cores, max, params_vector):
#TODO change this so params can be any size
params = {
'v1a': params_vector[0],
'v1b': params_vector[1],
'age_offset': params_vector[2],
'v1c': params_vector[3],
'lbm_offset': params_vector[4],
'v2a': params_vector[5],
'v3a': params_vector[6],
'k10a': params_vector[7],
'k12': params_vector[8],
'k13': params_vector[9],
}
step_size = max / cores
step_size = int(step_size)
jobs = []
for idx in range(cores):
a = step_size * idx + 1
b = step_size * (idx + 1)
if idx == (cores-1):
b = max
thing = (a, b, params)
jobs.append(thing)
results = pool.map(test_against_real_data, jobs)
#make this dynamic, cast to float?
rms = sum([thing[0] for thing in results]) / cores
meds = sum([thing[1] for thing in results]) / cores
# "%-15s %-15s" % (rms, meds)
return meds
print param[0]
params_vector = [19.0316, 0.8329, 60.0775, 0.3416, 30.6775, 1.0755, 4.3761, 0.635, 0.3794, 0.3031]
params = {
'v1a': params_vector[0],
'v1b': params_vector[1],
'age_offset': params_vector[2],
'v1c': params_vector[3],
'lbm_offset': params_vector[4],
'v2a': params_vector[5],
'v3a': params_vector[6],
'k10a': params_vector[7],
'k12': params_vector[8],
'k13': params_vector[9],
}
stuff =(0,2,params)
single = test_against_real_data(stuff)
print single
multi = multi_core_test(4, 12, params_vector)
print multi
``` |
{
"source": "jmathus/lit",
"score": 2
} |
#### File: lit/test/bcnode.py
```python
import json
import logging
import os
import random
import subprocess
import time
import requests # `pip install requests`
logger = logging.getLogger("TestFramework.bcnode")
class BCNode():
"""A class representing a bitcoind node"""
bin_name = "bitcoind"
short_name = "bc"
min_version = 140000
index = 0
def __init__(self, tmd_dir):
self.index = self.__class__.index
self.__class__.index += 1
self.data_dir = tmd_dir + "/%snode%d" % (self.__class__.short_name, self.index)
os.makedirs(self.data_dir)
self.args = ["-regtest", "-datadir=%s" % self.data_dir, "-rpcuser=regtestuser", "-rpcpassword=<PASSWORD>", "-rpcport=18332", "-logtimemicros"]
self.msg_id = random.randint(0, 9999)
self.rpc_url = "http://regtestuser:[email protected]:18332"
def start_node(self):
logger.debug("Starting %s%d" % (self.__class__.bin_name, self.index))
try:
self.process = subprocess.Popen([self.__class__.bin_name] + self.args, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
except FileNotFoundError:
raise Exception("%s not found on path. Please install %s" % (self.__class__.bin_name, self.__class__.bin_name))
# Wait for process to start
while True:
if self.process.poll() is not None:
raise Exception('%s exited with status %i during initialization' % (self.__class__.bin_name, self.process.returncode))
try:
resp = self.getinfo()
if resp.json()['error'] and resp.json()['error']['code'] == -28:
# RPC is still in warmup. Sleep some more.
continue
# Check that we're running at least the minimum version
assert resp.json()['result']['version'] >= self.__class__.min_version
logger.debug("bcnode %d started" % self.index)
break # break out of loop on success
except requests.exceptions.ConnectionError as e:
time.sleep(0.25)
def send_message(self, method, pos_args, named_args):
if pos_args and named_args:
raise AssertionError("RPCs must not use a mix of positional and named arguments")
elif named_args:
params = named_args
else:
params = list(pos_args)
logger.debug("Sending message %s, params: %s" % (method, str(params)))
self.msg_id += 1
rpcCmd = {
"method": method,
"params": params,
"jsonrpc": "2.0",
"id": str(self.msg_id)
}
payload = json.dumps(rpcCmd)
resp = requests.post(self.rpc_url, headers={"Content-type": "application/json"}, data=payload)
logger.debug("Response received for %s, %s" % (method, resp.text))
return resp
def __getattr__(self, name):
"""Dispatches any unrecognised messages to the websocket connection"""
def dispatcher(*args, **kwargs):
return self.send_message(name, args, kwargs)
return dispatcher
class LCNode(BCNode):
"""A class representing a litecoind node"""
bin_name = "litecoind"
short_name = "lc"
min_version = 130200
```
#### File: lit/test/test_break.py
```python
from lit_test_framework import wait_until
from test_basic import TestBasic
class TestBreak(TestBasic):
def run_test(self):
self._ready_coinnode()
self._ready_litnodes()
self._ready_litnode_for_channel()
self._open_channel()
self._push_funds_through_channel()
self._break_channel()
def _break_channel(self):
self.log.info("Break channel")
self.litnodes[0].BreakChannel(ChanIdx=1)
self.confirm_transactions(self.coinnodes[0], self.litnodes[0], 1)
# Make sure balances are as expected
wait_until(lambda: abs(self.litnodes[1].get_balance(self.coins[0]['code'])['TxoTotal'] - 50000000) < self.coins[0]["feerate"] * 2000)
litnode1_balance = self.litnodes[1].get_balance(self.coins[0]['code'])
assert litnode1_balance['TxoTotal'] == litnode1_balance['MatureWitty']
litnode0_balance = self.litnodes[0].get_balance(self.coins[0]['code'])
assert abs(self.balance + 950000000 - litnode0_balance['TxoTotal']) < self.coins[0]["feerate"] * 2000
self.log.info("Verify that channel breaker cannot spend funds immediately")
assert abs(litnode0_balance['TxoTotal'] - litnode0_balance['MatureWitty'] - 950000000) < self.coins[0]["feerate"] * 2000
self.log_balances(self.coins[0]['code'])
self.log.info("Advance chain 5 blocks. Verify that channel breaker can now spend funds")
self.coinnodes[0].generate(5)
self.chain_height += 5
wait_until(lambda: self.litnodes[0].get_balance(self.coins[0]['code'])["SyncHeight"] == self.chain_height)
litnode0_balance = self.litnodes[0].get_balance(self.coins[0]['code'])
assert litnode0_balance['TxoTotal'] == litnode0_balance['MatureWitty']
self.log_balances(self.coins[0]['code'])
if __name__ == "__main__":
exit(TestBreak().main())
``` |
{
"source": "JMatiasLeppanen/hki_tests",
"score": 2
} |
#### File: JMatiasLeppanen/hki_tests/calibration.py
```python
from psychopy import visual, core, event, sound
from pyee import EventEmitter
import feedparser
import random
import os
import time
from ldrop import Ldrop
import glib
class Experiment(EventEmitter):
def __init__(self):
# run the superclass constructor
EventEmitter.__init__(self)
# create constructor
self.win = None
self.draw_queue = []
self.active_aois=[]
def start_experiment(self):
self.paused = False
self.continued=False
self.next=[]
# parameters
#AOIs
self.xaois1=[-0.7,0.7,-0.7,0.7,0.0]
self.yaois1=[-0.7,0.7,0.7,-0.7,0.0]
#random.shuffle(self.aois)
#Images and a start value
self.imagedir = "images"
#alternatively get information of image files (can also randomize the listed images)
self.images = os.listdir(self.imagedir)
#random.shuffle(self.images)
self.image_number = 0
#number of rounds and a start value
self.rounds = 4
self.round = 0
#presentation times
#self.stimulus_display_time = 2000
waittime = 1.5 #s
# window object
self.res = [1024,768]
win = visual.Window(self.res, monitor="testMonitor", units="norm",color=(-1, -1, -1))
self.win = win
print("Look at the screen")
#active AOI
self.active_aois=[]
self.emit("start_collecting_data")
glib.idle_add(self.intro)
glib.idle_add(self.draw)
def intro(self):
self.draw_queue = []
movieobject = self.load_movie(os.path.join(self.imagedir,"Intro.mkv"))
print("intro")
self.play_movie(movieobject, 0, 0, 1.8, 1.8)
movieobject.draw()
self.stimulus_display_time=50000
self.play_movie(movieobject, 0, 0, 1.8, 1.8)
movieobject.draw()
self.draw_queue.append(movieobject)
self.next= self.event1
if self.continued:
return
else:
glib.timeout_add(self.stimulus_display_time, self.event1) #defines onset asynchrony or delay
def event1(self):
self.emit('tag', {"tag":""})
self.clear_draw_que()
self.continued=False
movieobject = self.load_movie(os.path.join(self.imagedir,"Lucky_straw.avi"))
print("event1")
x = self.xaois1[self.round]
y = self.yaois1[self.round]
self.play_movie(movieobject, x, y, 0.2, 0.2)
movieobject.draw()
self.draw_queue.append(movieobject)
self.stimulus_display_time=2000
self.next= self.event2
active_aois=[0,4, 0,6]
if self.continued:
return
else:
glib.timeout_add(self.stimulus_display_time, self.event2) #defines onset asynchrony or delay
def event2(self):
#self.continued=False
#self.draw_queue = []
self.continued=False
if self.paused:
glib.timeout_add(100, self.trial_start) #checks every 100 ms if paused
return
self.draw_queue = []
self.emit('tag', {"tag":"calib"})
x = self.xaois1[self.round]
y = self.yaois1[self.round]
stm = visual.ImageStim(win=self.win,
image=os.path.join(self.imagedir,
"Target_straw.png"),
pos=(x,y), size=0.2)
stm.draw()
# find out image dimensions
self.draw_queue.append(stm)
#self.tag_callback({"tag":"trial_event2"}) #will be updated
# maps the limits to aoi
lims_normalized = stm.size
aoi = [x-lims_normalized[0]/2, x+lims_normalized[0]/2,
y-lims_normalized[1]/2, y+lims_normalized[1]/2]
print("event2")
#print "aoi is " + str(aoi)
self.stimulus_display_time=2000
self.round += 1
#self.image_number += 1
if self.round <= self.rounds:
glib.timeout_add(self.stimulus_display_time, self.event1) #defines onset asynchrony or delay
else:
glib.timeout_add(self.stimulus_display_time, self.end) #defines onset asynchrony or delay
# self.experiment_cleanup()
#here we could have a head positio checker that checks whether we continue...
def end(self):
self.emit("stop_collecting_data")
self.experiment_cleanup()
#####
def on_data(self, dp):
if self.win is not None:
eye = visual.Circle(self.win, pos=(dp["right_gaze_point_on_display_area_x"]*2-1, -(dp["right_gaze_point_on_display_area_y"]*2-1)),
fillColor=[0.5,0.5,0.5], size=0.05, lineWidth=1.5)
eye.draw()
#active aoi check
for a in self.active_aois:
if dp["right_gaze_point_on_display_area_x"]<a[0] and dp["right_gaze_point_on_display_area_y"]>a[1]: #...is inside
self.continued=True
self.active_aois = []
glib.idle_add(self.trial_event1)
def draw(self):
# draw screen
for i in self.draw_queue:
i.draw()
self.win.flip()
glib.timeout_add(50, self.draw)
# self.win.flip()
def on_stop(self):
self.paused = True
def on_continue(self):
self.continued = True
#self.draw_queue=[]
glib.idle_add(self.next)
def clear_draw_que(self):
for k in self.draw_queue:
if k.__class__.__name__ == 'MovieStim3':
k.seek(0) # go start # errors some times
k.pause()
# k.autoDraw = False
elif k.__class__.__name__ == 'SoundStim':
k.stop()
else:
# imageobject
k.autoDraw = False
self.draw_queue = []
def experiment_cleanup(self):
# cleanup
self.win.close()
core.quit()
def load_movie(self, filepath):
#Load a moviefile to RAM tied to specified window.
movieobject = visual.MovieStim3(self.win, filepath, loop=True)
movieobject.units = "norm"
return movieobject
def play_movie(self, movieobject, x, y, width, height):
#Start playing movie.
# Transfer to psychopy coordinates (-1->1, 1->-1)
# from normalized (0->1, 0->1)
#p_x, p_y, width, height = utils.aoi_from_experiment_to_psychopy(aoi)
movieobject.play()
movieobject.pos = (x, y)
movieobject.size = [width, height]
movieobject.draw()
# stm.autoDraw = True
# start running here
exp = Experiment()
# create ldrop controller-instance
ldrop = Ldrop.Controller()
# use setter-functions to set details of the experiment
ldrop.set_experiment_id("test")
ldrop.set_callbacks(exp.start_experiment, exp.on_stop,
exp.on_continue, exp.on_data)
# make a subscription to experiment instance on ldrop to receive tags
#exp.tag_callback = ldrop.on_tag
ldrop.add_model(exp)
# autoadd mouse sensor if you have the sensor-module available
#ldrop.add_sensor('mouse')
ldrop.set_participant_id("jl")
ldrop.add_sensor('tobii')
# enable sensor-gui (optional)
ldrop.enable_gui()
# starts blocking
ldrop.run()
``` |
{
"source": "JMatica/apitestframework",
"score": 2
} |
#### File: apitestframework/utils/api_test_utils.py
```python
import logging
import traceback
from typing import Any, Dict, List
# local imports
from apitestframework.utils.misc import build_keys_list, get_inner_key_value
logger = logging.getLogger(__name__)
def check_result_content(result: Dict[str, Any], expected: Dict[str, Any], expected_result_file: str = None, exceptions: List[str] = None) -> bool:
'''
Check the API call result content against the expected content
:param result: The actual API call result
:type result: Dict[str, Any]
:param expected: The expected API call result
:type expected: Dict[str, Any]
:return: The resulting status of the test
:rtype: bool
'''
if expected_result_file is None:
expected_result_file = 'N/A'
if exceptions is None:
exceptions = []
logger.debug('Checking test result content...')
test_status = True
expected_keys_list = build_keys_list(expected)
# check all keys in expected
for k in expected_keys_list:
# do not check for content if in ignore list
exc = next((e for e in exceptions if e['key'] == k), None)
if exc is None:
# get actual and expected values
result_value = get_inner_key_value(result, k)
expected_value = get_inner_key_value(expected, k)
# check
if result_value != expected_value:
logger.error('Check Result failed for key {} :: expected: {} - actual: {}'.format(k, expected_value, result_value))
test_status = False
else:
logger.debug('Key {} found in exceptions list'.format(k))
exc_type = exc['type']
if exc_type == 'ignore':
continue
elif exc_type == 'exist':
result_keys_list = build_keys_list(result)
if k not in result_keys_list:
test_status = False
# check final result
if not test_status:
logger.error('Content check failed')
logger.error('Expected result (file {}) was :: {}'.format(expected_result_file, expected))
logger.error('Actual result was :: {}'.format(result))
else:
logger.debug('Content check successful.')
# return final test status
return test_status
def check_result_code(result_code: int, expected_code: int) -> bool:
'''
Check the API call result status code against the expected status code
:param result_code: The actual API call result status code
:type result_code: int
:param expected_code: The expected API call result status code
:type expected_code: int
:return: The result of the test
:rtype: bool
'''
logger.debug('Checking test result code...')
# check final result
test_code_status = (result_code == expected_code)
if not test_code_status:
logger.error('Code check failed')
logger.error('Expected code was :: {}'.format(expected_code))
logger.error('Actual result was :: {}'.format(result_code))
else:
logger.debug('Code check successful.')
# return final test status
return test_code_status
```
#### File: apitestframework/utils/test_status.py
```python
from enum import IntEnum, unique
@unique
class TestStatus(IntEnum):
'''
Status of a Test
'''
# test not executed yet
PENDING = 0
# test in execution
RUNNING = 1
# test executed successfully
SUCCESS = 2
# test executed with failures
FAILURE = 3
# test skipped
SKIPPED = 4
# unknown
UNKNOWN = 39
def icon(self) -> str:
'''
Return a unicode symbol representing the status
:return: A code representing the status
:rtype: str
'''
if self == TestStatus.PENDING:
return '\N{White Hourglass}'
elif self == TestStatus.RUNNING:
return '\N{Black Right-Pointing Triangle}'
elif self == TestStatus.SUCCESS:
return '\N{Heavy Check Mark}'
elif self == TestStatus.FAILURE:
return '\N{Heavy Ballot X}'
elif self == TestStatus.SKIPPED:
return '\N{Fisheye}'
else:
return '\N{Question Mark}'
```
#### File: test/utils/test_api_test_utils.py
```python
from apitestframework.utils.api_test_utils import check_result_code, check_result_content
class TestApiTestUtils(object):
'''
Test utils.api_test_utils module
'''
def test_check_result_code(self):
'''
Test check_result_code method
'''
assert check_result_code(500, 200) == False
assert check_result_code(200, 200) == True
def test_check_result_content_01(self):
'''
Test check_result_content method
'''
expected = {
'id': 'some_id',
'solutions': [
{
'solutionId': 'VCC-2001-09400-59700-1567116000'
}
],
'money': {
'currency': 'EUR'
}
}
result = {
'id': 'some_id',
'solutions': [
{
'solutionId': 'VCC-2001-09400-59700-1567116000'
}
],
'money': {
'currency': 'EUR'
}
}
assert check_result_content(result, expected) == True
def test_check_result_content_02(self):
'''
Test check_result_content method
'''
expected = {
'id': 'some_id',
'solutions': [
{
'solutionId': 'VCC-2001-09400-59700-1567116000'
}
],
'money': {
'currency': 'EUR'
}
}
expected_2 = {
'id': 'some_id',
'solutions': [
{
'solutionId': 'VCC-2001-09400-59700-1567116000'
}
],
'money': {
'currency': 'EUR',
'price': 200
}
}
result = {
'solutions': [
{
'solutionId': 'VCC-2001-09400-59700-1567116000'
}
],
'money': {
'currency': 'USD'
}
}
assert check_result_content(result, expected) == False
assert check_result_content(result, expected, None, [{ 'key': 'id', 'type': 'ignore' }, { 'key': 'money.currency', 'type': 'exist' }]) == True
assert check_result_content(result, expected_2, None, [{ 'key': 'money.price', 'type': 'exist' }]) == False
```
#### File: test/utils/test_config.py
```python
import os
# library imports
import pytest
# local imports
from apitestframework.utils.config import get_conf_value, load_config
class TestConfig(object):
'''
Test utils.config module
'''
def test_missing_load_config(self):
'''
Test load_config method on error
'''
with pytest.raises(SystemExit) as pytest_wrapped_e:
load_config('nono.json')
assert pytest_wrapped_e.type == SystemExit
assert 'missing configuration file' in pytest_wrapped_e.value.code.lower()
def test_load_config(self):
'''
Test load_config method
'''
with open('tmp.json', 'w', encoding='utf-8') as f:
f.write('{"key":"value"}')
conf = load_config('tmp.json')
assert conf['key'] == 'value'
os.remove('tmp.json')
def test_get_conf_value(self):
'''
Test get_conf_value method
'''
data = {
'key': 42
}
def_val = ''
assert get_conf_value(data, 'key') == 42
assert get_conf_value(data, 'nokey') is None
assert get_conf_value(data, 'nokey', def_val) == def_val
assert get_conf_value(None, 'key') is None
assert get_conf_value(None, 'key', def_val) == def_val
assert get_conf_value(None, None) is None
assert get_conf_value(None, None, def_val) == def_val
``` |
{
"source": "jmatt/cloaked-sombrero",
"score": 3
} |
#### File: cloaked-sombrero/visuals/stacked_bar_node_by_flavor.py
```python
from collections import OrderedDict
from itertools import chain, repeat
import os
import numpy as np
import matplotlib.pyplot as plt
DATA_PATH = "~/dev/bio5/cloaked-sombrero/data/nodes_by_flavor.txt"
def get_data():
return open(os.path.expanduser(DATA_PATH)).readlines()[1:]
def by_node(data):
"""
Split lines from OpenStack mysql nova database by node, flavor
and the count of that flavor per node into a dictionary of
dictionaries.
"""
coll = {}
for line in data:
val, flavor, node = line.split()
if not node in coll.keys():
coll[node] = {}
coll[node][flavor] = int(val)
return coll
def by_flavor(nodes):
"""
Transform nodes collection to group by flavors for visualization.
"""
flavor_names = set(
chain(
*[fd.keys() for node, fd in nodes.iteritems()]))
coll = OrderedDict({f: [] for f in flavor_names})
node_names = nodes.keys()
node_names.sort()
for node_name in node_names:
for flavor_name in flavor_names:
if flavor_name in nodes[node_name].keys():
coll[flavor_name].append(nodes[node_name][flavor_name])
else:
coll[flavor_name].append(0)
return coll
def build_visual(data):
nodes = by_node(data)
flavors = by_flavor(nodes)
flavor_size = len(flavors.keys())
node_names = sorted(nodes.keys())
node_size = len(nodes.keys())
btm = [0] * node_size
ind = np.arange(0, node_size * 1.5, 1.5)
width = 0.86
colors= ["r", "b", "g", "dimgray", "darkcyan", "lime",
"navy", "teal", "indigo", "y", "skyblue", "sage"]
color_labels = []
c = 0
plt.xticks(ind + width/2., node_names, rotation=70)
plt.title("Node resource usage by flavor")
for f,v in flavors.iteritems():
b = plt.bar(ind,
v,
width,
color=colors[c % len(colors)],
bottom=btm)
color_labels.append(b[0])
c += 1
btm = [f + b for f, b in zip(btm, v)]
plt.legend(color_labels, flavors.keys())
if __name__ == "__main__":
build_visual(get_data())
plt.show()
``` |
{
"source": "jmattfong/smart-house",
"score": 3
} |
#### File: lib/account/account.py
```python
import sys
import traceback
from abc import ABCMeta, abstractmethod
from lib.util import auto_str
from lib.log import logger
@logger
@auto_str
class Account(metaclass=ABCMeta) :
def __init__(self, account_name, secret_reader) :
self.account_name = account_name
self.secret_reader = secret_reader
self.is_connected = False
def connect(self) :
credentials = self.secret_reader.read(self.account_name)
if credentials == None :
self.log.warn(f"No credentials found for account type '{self.account_name}'.")
else :
self.log.info(f"Account '{self.account_name}' connecting...")
try :
self.connect_with_credentials(credentials)
self.is_connected = True
except :
self.log.error(f"Account '{self.account_name}' failed to connect.")
traceback.print_exc()
@abstractmethod
def connect_with_credentials(self, credentials) :
pass
def disconnect(self) :
self.log.info(f"Disconnecting from account '{self.account_name}'")
self.is_connected = False
self.disconnect_and_forget()
@abstractmethod
def disconnect_and_forget(self) :
pass
```
#### File: src/lib/secret.py
```python
import json
import os
class SecretReader :
def __init__(self, secretDirectory='./secrets', secretExtension='.secret') :
self.secretDirectory = secretDirectory
self.secretExtension = secretExtension
def read(self, secretName) :
try :
secretFile = open(os.path.join(self.secretDirectory, secretName + self.secretExtension), 'r')
return json.loads(secretFile.read())
except FileNotFoundError :
return None
``` |
{
"source": "jmattfong/weakest-link",
"score": 3
} |
#### File: weakest-link/weakest_link/game.py
```python
from weakest_link.util import wait_for_choice, green, red, dollars, get_random_mean_word, starts_with_vowel, format_time
class WeakestLinkGame :
def __init__(self, players, rounds, final_round) :
self.players = players
self.rounds = rounds
self.final_round = final_round
self.total_bank = 0
self.maximum_bank = 0
self.current_round = 0
# For the API
def get_current_round(self) :
return self.rounds[self.current_round] if self.current_round < len(self.rounds) else self.final_round
def get_current_round_name(self) :
return self.get_current_round().get_name()
def get_players(self) :
return self.players
def get_current_bank(self, color=True) :
if self.current_round >= len(self.rounds) :
return 0
return dollars(self.get_current_round().round_bank, color=color)
def get_total_bank(self, color=True) :
return dollars(self.total_bank, color=False)
def get_bank_links(self) :
if self.current_round >= len(self.rounds) :
return []
return [dollars(link, color=False) for link in self.get_current_round().bank_links]
def get_current_link(self) :
if self.current_round >= len(self.rounds) :
return 0
return self.get_current_round().current_link
def get_current_player_num(self) :
if self.current_round >= len(self.rounds) :
return 0
return self.get_current_round().get_current_player_num()
def get_time_remaining(self) :
if self.current_round >= len(self.rounds) :
return 0
time = self.get_current_round().seconds_remaining
time = time if time > 0 else 0
return format_time(time)
# For the CLI
def run(self) :
first_player = self.players[0]
for i in range(len(self.rounds)) :
self.current_round = i
if len(self.players) == 2 :
print("Not running all rounds since we don't have enough players")
print()
break
if i != 0 :
print('As the strongest link last round,', green(first_player), 'will go first')
print()
round = self.rounds[i]
self.try_to_start_round(i+1, round, first_player)
first_player = self.handle_finished_round_results(round)
if self.current_round < 2 :
print('Not voting off weakest link since we are on round', self.current_round+1)
weakest_link = None
elif self.current_round == 2 :
print(red('Time to vote off multiple players!'))
weakest_link = self.vote_for_weakest_link()
weakest_link = self.vote_for_weakest_link()
weakest_link = self.vote_for_weakest_link()
else :
weakest_link = self.vote_for_weakest_link()
if first_player == weakest_link :
first_player = round.get_strongest_link(first_player)
self.current_round = len(self.rounds)
while len(self.players) > 2 :
weakest_link = self.vote_for_weakest_link()
if first_player == weakest_link :
first_player = round.get_strongest_link(first_player)
first_player = wait_for_choice('As the strongest link last round, ' + green(first_player) + ' chooses who will go first in the ' +\
red('final round') + '. Choices: ' + ", ".join(self.players) + ' > ', self.players)
self.try_to_start_round('Final', self.final_round, first_player)
print(green(str(self.final_round.winner) + ' is the winner! They win ' + dollars(self.total_bank)))
print()
print("Game over, goodnight!")
# Helpers
def try_to_start_round(self, round_num, round, first_player) :
wait_for_choice("Enter 'S' to start round " + str(round_num) + " > ", 'S')
print('Starting round', round_num)
print()
round.start_round(self.players, first_player)
print('Finished round', round_num)
print()
def handle_finished_round_results(self, round) :
# TODO determine next first player and total bank
self.total_bank += round.round_bank
self.maximum_bank += round.bank_links[-1]
strongest_link = round.get_strongest_link()
print('That round the team banked', dollars(round.round_bank))
adjective = get_random_mean_word()
print('Out of a possible', dollars(self.maximum_bank), "the team banked", 'an' if starts_with_vowel(adjective) else 'a', adjective, dollars(self.total_bank))
print('Statistically, the', green('strongest link'), 'was', green(strongest_link))
print('Statistically, the', red('weakest link'), 'was', red(round.get_weakest_link()))
print()
return strongest_link
def vote_for_weakest_link(self) :
weakest_link = wait_for_choice("Who is the weakest link? Choices: " + ', '.join(self.players) + " > ", self.players)
self.players.remove(weakest_link)
return weakest_link
```
#### File: weakest-link/weakest_link/round.py
```python
import threading
from datetime import datetime
from weakest_link.util import debug, green, red, wait_for_choice, append_to_file, dollars
class Round :
def __init__(self, name, players) :
self.name = name
def get_name(self) :
return self.name
class WeakestLinkRound(Round) :
def __init__(self, name, round_time_s, bank_links, questions, answer_file='./answers.txt') :
super().__init__(self, name)
self.name = name
self.round_time_s = round_time_s
self.bank_links = bank_links
self.questions = questions
self.round_bank = 0
self.current_link = 0
self.started = False
self.spaces = ' '
self.players = []
self.current_question = 0
self.current_player_banked = 0
self.first_player_offset = 0
self.seconds_remaining = self.round_time_s
self.answer_file = answer_file
def start_round(self, players, first_player) :
if self.started :
print('Round is already started!')
return
self.players = players
self.player_answers = dict([(player, []) for player in players])
self.start_time = datetime.now()
self.question_start_time = self.start_time
self.started = True
append_to_file(self.answer_file, 'Start round ' + self.get_name())
self.first_player_offset = 0
for player in players :
if first_player == player :
break
else :
self.first_player_offset += 1
self.run()
def start_timer(self) :
self.seconds_remaining = self.round_time_s
self.done = False
self.timer()
def timer(self) :
if self.done :
self.stop_round()
return
if self.seconds_remaining < 1200 :
seconds = str(self.seconds_remaining)
while len(seconds) < 3 :
seconds = ' ' + seconds
print('\r', seconds, 'seconds remaining', end ="")
self.seconds_remaining -= 1
if self.seconds_remaining >= 0:
threading.Timer(1, self.timer).start()
else :
self.stop_round()
def run(self) :
self.start_timer()
print('\r')
get_next_question = False
while not self.done :
if self.current_link == len(self.bank_links) :
self.bank()
print(green('The team ran the chain!'))
print()
self.stop_round()
return
print(self.spaces, self.get_question(get_next_question), end ="")
choice = wait_for_choice("\r" + self.spaces[:-7] + "[Z,X,C] ", ['c', 'z', 'x']).lower()
if self.done :
print('Round is over!')
break
elif choice == 'c' :
self.answer_question(True)
get_next_question = True
elif choice == 'z':
self.answer_question(False)
get_next_question = True
else:
self.current_player_banked = self.bank()
get_next_question = False
self.done = True
print()
def get_current_player_num(self) :
return (self.current_question + self.first_player_offset) % len(self.players) if len(self.players) > 0 else 0
def get_current_player(self) :
return self.players[self.get_current_player_num()]
def get_question(self, get_next) :
current_player = self.get_current_player()
if get_next :
(question, answer) = self.questions.get_next_question()
else :
(question, answer) = self.questions.get_current_question()
return green('"' + current_player + ': ' + question + '"') + ' (Answer: ' + red(answer) + ')'
def bank(self) :
if not self.started :
print('Round is over!')
return 0
if self.current_link == 0 :
print(self.spaces, 'No BANK. Total this round:', self.round_bank)
return 0
# TODO handle win condition or else get IndexError because we ran off the end of the chain
amount_banked = self.bank_links[self.current_link - 1]
self.round_bank += amount_banked
self.current_link = 0
print(self.spaces, 'Recorded BANK. Total this round:', self.round_bank)
return amount_banked
def answer_question(self, correct) :
if not self.started :
print('Round is over!')
return
question_stop_time = datetime.now()
# TODO why was it like this? current_player = self.players[self.current_question % len(self.players)]
current_player = self.get_current_player()
answer = (correct, question_stop_time - self.question_start_time, self.current_player_banked)
append_to_file(self.answer_file, (current_player, answer, self.questions.get_current_question()))
self.player_answers[current_player].append(answer)
# Setup the next question
self.current_player_banked = 0
if correct :
self.current_link += 1
else :
self.current_link = 0
self.question_start_time = question_stop_time
self.current_question += 1
print(self.spaces, 'Recorded', 'CORRECT' if correct else 'INCORRECT', 'answer')
debug(current_player, 'answered', answer)
def stop_round(self) :
self.started = False
self.done = True
print()
append_to_file(self.answer_file, ('End round ' + self.get_name(), dollars(self.round_bank, color=False)))
def get_strongest_link(self, eliminated=None) :
strongest_link = None
for (player, answers) in self.player_answers.items() :
if player == eliminated :
continue
total_correct = 0
total_time = 0
total_banked = 0
for (correct, time_taken, money_banked) in answers :
total_correct += correct
total_time = total_time + time_taken.total_seconds()
total_banked += money_banked
percent_correct = float(total_correct) / len(answers) if len(answers) > 0 else 0
if strongest_link == None :
strongest_link = (player, percent_correct, total_time, total_banked)
else :
(strongest_player, strongest_percent, strongest_time, strongest_banked) = strongest_link
if percent_correct > strongest_percent :
strongest_link = (player, percent_correct, total_time, total_banked)
debug(player, 'has a better percent correct with', percent_correct, 'than', strongest_player, 'with', strongest_percent)
elif percent_correct == strongest_percent :
if total_banked > strongest_banked :
strongest_link = (player, percent_correct, total_time, total_banked)
debug(player, 'has banked more with', total_banked, 'than', strongest_player, 'with', strongest_banked)
elif total_banked == strongest_banked :
if total_time < strongest_time :
strongest_link = (player, percent_correct, total_time, total_banked)
debug(player, 'took less time', total_time, 'than', strongest_player, 'with', strongest_time)
elif total_time == strongest_time :
print('Holy crap, a strongest link tie between', player, 'and', strongest_player)
(strongest_player, strongest_percent, strongest_time, strongest_banked) = strongest_link
return strongest_player
def get_weakest_link(self) :
weakest_link = None
for (player, answers) in self.player_answers.items() :
total_correct = 0
total_time = 0
total_banked = 0
for (correct, time_taken, money_banked) in answers :
total_correct += correct
total_time = total_time + time_taken.total_seconds()
total_banked += money_banked
percent_correct = float(total_correct) / len(answers) if len(answers) > 0 else 0
if weakest_link == None :
weakest_link = (player, percent_correct, total_time, total_banked)
else :
(weakest_player, weakest_percent, weakest_time, weakest_banked) = weakest_link
if percent_correct < weakest_percent :
weakest_link = (player, percent_correct, total_time, total_banked)
debug(player, 'had a worse percent correct with', percent_correct, 'than', weakest_player, 'with', weakest_percent)
elif percent_correct == weakest_percent :
if total_banked < weakest_banked :
weakest_link = (player, percent_correct, total_time, total_banked)
debug(player, 'has banked less with', total_banked, 'than', weakest_player, 'with', weakest_banked)
elif total_banked == weakest_banked :
if total_time > weakest_time :
weakest_link = (player, percent_correct, total_time, total_banked)
debug(player, 'took more time', total_time, 'than', weakest_player, 'with', weakest_time)
elif total_time == weakest_time :
print('Holy crap, a weakest link tie between', player, 'and', weakest_player)
(weakest_player, weakest_percent, weakest_time, weakest_banked) = weakest_link
return weakest_player
``` |
{
"source": "j-matus/anketa",
"score": 3
} |
#### File: scripts/phpdoc/dependency_clusters.py
```python
import sys;
root_dict = {}
x = sys.stdin.read()
for file in x.rstrip().split(' '):
path_components = file.split('/');
current_dict = root_dict;
for path_segment in path_components:
path_segment = path_segment.replace(".", "_").replace("-","_");
if path_segment not in current_dict:
current_dict[path_segment]={}
current_dict = current_dict[path_segment]
def print_dict(level, dictionary, path):
for key in dictionary.keys():
if dictionary[key].keys():
print "".rjust(level*4), "subgraph", "cluster"+path+"_"+key, "{"
print "".rjust(level*4), " color=blue"
print "".rjust(level*4), " label=\"",key,"\""
print_dict(level+1, dictionary[key], path+"_"+key)
print "".rjust(level*4), "}"
else:
print "".rjust(level*4), key.replace("_php","")
print_dict(0, root_dict, "");
```
#### File: AnketaBundle/Integration/votr_runner.py
```python
import os
import sys
import json
from os.path import join, dirname, abspath
anketa_root = join(dirname(abspath(__file__)), '../../..')
votr_root = join(anketa_root, 'vendor/svt/votr')
sys.path.insert(0, votr_root)
# --------------------------------------------------
def main():
from fladgejt.login import create_client
json_input = json.load(sys.stdin)
fakulta = json_input['fakulta']
semestre = json_input['semestre']
relevantne_roky = [ak_rok for ak_rok, sem in semestre]
client = create_client(json_input['server'], json_input['params'])
# full_name = client.get_full_name() # TODO
is_student = False
subjects = []
ostatne_studia = []
if client.get_som_student():
studia = client.get_studia()
for studium in studia:
if studium.sp_skratka == 'ZEkP' and not studium.organizacna_jednotka: studium = studium._replace(organizacna_jednotka='PriF')
if fakulta and studium.organizacna_jednotka != fakulta:
for zapisny_list in client.get_zapisne_listy(studium.studium_key):
# Ak je to pre nas relevantne studium, vratime ho
# Dolezite pri zistovani chybajucej org. jednotky v AISe
if zapisny_list.akademicky_rok in relevantne_roky:
toto_studium = {}
toto_studium['skratka'] = studium.sp_skratka or "Neznamy program"
toto_studium['oj'] = studium.organizacna_jednotka or "bez fakulty"
ostatne_studia.append(toto_studium)
continue # TODO: pouzivat zapisny_list.organizacna_jednotka, ked bude v REST API
for zapisny_list in client.get_zapisne_listy(studium.studium_key):
if zapisny_list.akademicky_rok not in relevantne_roky: continue
is_student = True
for predmet in client.get_hodnotenia(zapisny_list.zapisny_list_key)[0]:
if [zapisny_list.akademicky_rok, predmet.semester] not in semestre: continue
subjects.append(dict(
skratka=predmet.skratka,
nazov=predmet.nazov,
semester=predmet.semester,
akRok=zapisny_list.akademicky_rok,
rokStudia=zapisny_list.rocnik,
studijnyProgram=dict(skratka=studium.sp_skratka, nazov=studium.sp_popis),
))
client.logout()
result = {}
# result['full_name'] = full_name
result['is_student'] = is_student
result['subjects'] = subjects
result['ostatne_studia'] = ostatne_studia
print(json.dumps(result))
if __name__ == '__main__':
main()
``` |
{
"source": "jmatuskey/exoctk",
"score": 2
} |
#### File: exoctk/tests/test_modelgrid.py
```python
import os
from pkg_resources import resource_filename
import numpy as np
from exoctk import modelgrid as mg
def test_modelgrid_object():
"""Test to see that ModelGrid object can be created"""
print('Testing ModelGrid object creation...')
# Load model grid
mgrid = mg.ModelGrid(resource_filename('exoctk', 'data/core/modelgrid/'))
assert isinstance(mgrid, mg.ModelGrid)
def test_model_getter_on_grid():
"""Test to see that an on-grid model can be pulled from a ModelGrid
object"""
print('Testing on-grid model getter from ModelGrid object...')
# Load model grid
mgrid = mg.ModelGrid(resource_filename('exoctk', 'data/core/modelgrid/'))
# Fetch model
model = mgrid.get(4000, 4.5, 0)
assert isinstance(model.get('flux'), np.ndarray)
def test_model_getter_off_grid():
"""Test to see that an off-grid model can be pulled from a ModelGrid
object"""
print('Testing off-grid model getter from ModelGrid object...')
# Load model grid
mgrid = mg.ModelGrid(resource_filename('exoctk', 'data/core/modelgrid/'))
# Fetch model
model = mgrid.get(4023, 4.1, -0.1)
assert isinstance(model.get('flux'), np.ndarray)
``` |
{
"source": "jmatuskey/jupyterhub-deploy",
"score": 2
} |
#### File: jupyterhub-deploy/tools/check_cluster.py
```python
import sys
import os
import subprocess
import argparse
import re
import json
from collections import defaultdict
import builtins
import functools
import traceback
import yaml
CLUSTER_CHECKS = """
Globals:
environment:
- DEPLOYMENT_NAME
- ENVIRONMENT
- JH_HOSTNAME
- ADMIN_ARN
- ACCOUNT_ID
constants:
V_K8S: "1.21"
MAX_NODE_AGE: 10d
MAX_EFS_FILE_SYSTEM_SIZE: 50000000000000
CORE_NODES: 3
NOTEBOOK_EC2_TYPE: r5.xlarge
MAX_RESTARTS: 0
LOG_REACH: 30m
Groups:
- group: Kubernetes Pods
command: kubectl get pods -A
parser: named_columns
assertions:
- name: All pods
all: STATUS=='Running' and int(RESTARTS)<=MAX_RESTARTS
- name: EFS provisioner
ok_rows==1: NAMESPACE=='support' and 'efs-provisioner' in NAME
- name: Kube Proxy
ok_rows>=4: NAMESPACE=='kube-system' and 'kube-proxy' in NAME
- name: Autoscaler
ok_rows==1: NAMESPACE=='kube-system' and 'cluster-autoscaler' in NAME
- name: AWS Pods
ok_rows>=4: NAMESPACE=='kube-system' and 'aws-node' in NAME
- name: Core DNS
ok_rows==2: NAMESPACE=='kube-system' and 'coredns' in NAME
- group: JupyterHub Pods
command: kubectl get pods -A
parser: named_columns
assertions:
- name: Image puller
ok_rows>=1: NAMESPACE=='default' and 'continuous-image-puller' in NAME
- name: Hub
ok_rows==1: NAMESPACE=='default' and 'hub' in NAME
- name: Proxy
ok_rows>=1: NAMESPACE=='default' and 'proxy' in NAME
- name: User-scheduler
ok_rows==2: NAMESPACE=='default' and 'user-scheduler' in NAME
- name: User-placeholder
ok_rows>=1: NAMESPACE=='default' and 'user-placeholder' in NAME
- group: JupyterHub Nodes
command: kubectl get nodes -A --show-labels=true
parser: named_columns
assertions:
- name: At least 4 STATUS Ready new Hub AMI ID
ok_rows>=4: STATUS=="Ready" # and HUB_AMI_ID in LABELS
- name: All Nodes Ready Status
all: STATUS=="Ready" or STATUS=="Ready,SchedulingDisabled"
- name: Kubernetes Version
all: V_K8S in VERSION
- name: Node Age
all: convert_age(AGE) < convert_age(MAX_NODE_AGE)
- name: Core us-east-1a
ok_rows==1: "DEPLOYMENT_NAME+'-core' in LABELS and 't3.small' in LABELS and 'zone=us-east-1a' in LABELS"
- name: Core us-east-1b
ok_rows==1: "DEPLOYMENT_NAME+'-core' in LABELS and 't3.small' in LABELS and 'zone=us-east-1b' in LABELS"
- name: Core us-east-1c
ok_rows==1: "DEPLOYMENT_NAME+'-core' in LABELS and 't3.small' in LABELS and 'zone=us-east-1c' in LABELS"
- name: Notebook nodes
ok_rows>=1: "DEPLOYMENT_NAME+'-notebook' in LABELS and NOTEBOOK_EC2_TYPE in LABELS and 'region=us-east-1' in LABELS"
- group: EKS Services
command: kubectl get services -A
parser: named_columns
assertions:
- name: Datadog Cluster Agent Service
ok_rows==1: NAMESPACE=='datadog' and NAME=='datadog-cluster-agent' and TYPE=='ClusterIP' and _['EXTERNAL-IP']=='<none>' and _['PORT(S)']=='5005/TCP'
- name: Datadog Kube State Metrics Service
ok_rows==1: NAMESPACE=='datadog' and NAME=='datadog-kube-state-metrics' and TYPE=='ClusterIP' and _['EXTERNAL-IP']=='<none>' and _['PORT(S)']=='8080/TCP'
- name: Hub Service
ok_rows==1: NAMESPACE=='default' and NAME=='hub' and TYPE=='ClusterIP' and _['EXTERNAL-IP']=='<none>' and _['PORT(S)']=='8081/TCP'
- name: Kubernetes Service
ok_rows==1: NAMESPACE=='default' and NAME=='kubernetes' and TYPE=='ClusterIP' and _['EXTERNAL-IP']=='<none>' and _['PORT(S)']=='443/TCP'
- name: Proxy API Service
ok_rows==1: NAMESPACE=='default' and NAME=='proxy-api' and TYPE=='ClusterIP' and _['EXTERNAL-IP']=='<none>' and _['PORT(S)']=='8001/TCP'
- name: Proxy Public Service
ok_rows==1: NAMESPACE=='default' and NAME=='proxy-public' and TYPE=='LoadBalancer' and '.elb.amazonaws.com' in _['EXTERNAL-IP'] and '443:' in _['PORT(S)'] and '80:' in _['PORT(S)'] and 'TCP' in _['PORT(S)'] and 'UDP' not in _['PORT(S)']
- name: Cluster Autoscaler Service
ok_rows==1: NAMESPACE=='kube-system' and NAME=='cluster-autoscaler-aws-cluster-autoscaler' and TYPE=='ClusterIP' and _['EXTERNAL-IP']=='<none>' and _['PORT(S)']=='8085/TCP'
- name: Kube DNS Service
ok_rows==1: NAMESPACE=='kube-system' and NAME=='kube-dns' and TYPE=='ClusterIP' and _['EXTERNAL-IP']=='<none>' and _['PORT(S)']=='53/UDP,53/TCP'
- group: EKS Deployments
command: kubectl get deployments -A
parser: named_columns
assertions:
- name: Hub Deployment
ok_rows==1: NAMESPACE=='default' and NAME=='hub' and READY=='1/1' and _['UP-TO-DATE']=='1' and AVAILABLE=='1'
- name: Proxy Deployment
ok_rows==1: NAMESPACE=='default' and NAME=='proxy' and READY=='1/1' and _['UP-TO-DATE']=='1' and AVAILABLE=='1'
- name: User Scheduler Deployment
ok_rows==1: NAMESPACE=='default' and NAME=='user-scheduler' and READY=='2/2' and _['UP-TO-DATE']=='2' and AVAILABLE=='2'
- name: Cluster Autoscaler Deployment
ok_rows==1: NAMESPACE=='kube-system' and 'cluster-autoscaler' in NAME and READY=='1/1' and _['UP-TO-DATE']=='1' and AVAILABLE=='1'
- name: Core DNS Deployment
ok_rows==1: NAMESPACE=='kube-system' and 'coredns' in NAME and READY=='2/2' and _['UP-TO-DATE']=='2' and AVAILABLE=='2'
- name: EFS Provisioner Deployment
ok_rows==1: NAMESPACE=='support' and 'efs-provisioner' in NAME and READY=='1/1' and _['UP-TO-DATE']=='1' and AVAILABLE=='1'
- name: Datadog Cluster Agent Deployment
ok_rows==1: NAMESPACE=='datadog' and 'datadog-cluster-agent' in NAME and READY=='1/1' and _['UP-TO-DATE']=='1' and AVAILABLE=='1'
- name: Datadog Kube Metrics Deployment
ok_rows==1: NAMESPACE=='datadog' and 'datadog-kube-state-metrics' in NAME and READY=='1/1' and _['UP-TO-DATE']=='1' and AVAILABLE=='1'
- group: Route-53 Host
command: "host {JH_HOSTNAME}"
parser: raw
assertions:
- name: DNS Mapping
simple: "f'{JH_HOSTNAME} is an alias for' in _"
- group: JupyterHub Index Page
command: "wget --no-check-certificate -O- {JH_HOSTNAME}"
parser: raw
assertions:
- name: Server Index Page
simple: "'HTTP request sent, awaiting response... 200 OK' in _"
- group: EFS File Systems
command: awsudo {ADMIN_ARN} aws efs describe-file-systems --output yaml --query FileSystems
parser: yaml
assertions:
- name: EFS Home Dirs
ok_rows==1: Name==DEPLOYMENT_NAME+'-home-dirs' and LifeCycleState=='available' and Encrypted==True and NumberOfMountTargets==3 and OwnerId==ACCOUNT_ID and aws_kv_dict(Tags)['stsci-backup']=='dmd-2w-sat'
- name: EFS Max Size
all: int(SizeInBytes['Value']) < MAX_EFS_FILE_SYSTEM_SIZE
- group: Daemonsets named rows
command: kubectl get daemonsets -A
parser: named_rows
assertions:
- name: datadog - proxy - aws-nodes READY
simple: _['datadog']['READY'] == _['kube-proxy']['READY'] == _['aws-node']['READY']
- name: datadog - proxy - aws-nodes DESIRED
simple: _['datadog']['DESIRED'] == _['kube-proxy']['DESIRED'] == _['aws-node']['DESIRED']
- name: datadog - proxy - aws-nodes CURRENT
simple: _['datadog']['CURRENT'] == _['kube-proxy']['CURRENT'] == _['aws-node']['CURRENT']
- name: datadog - proxy - aws-nodes UP-TO-DATE
simple: _['datadog']['UP-TO-DATE'] == _['kube-proxy']['UP-TO-DATE'] == _['aws-node']['UP-TO-DATE']
- name: datadog - proxy - aws-nodes AVAILABLE
simple: _['datadog']['AVAILABLE'] == _['kube-proxy']['AVAILABLE'] == _['aws-node']['AVAILABLE']
- name: continuous image puller notebook nodes only
simple: int(_['continuous-image-puller']['READY']) == int(_['aws-node']['READY']) - CORE_NODES
- group: Daemonsets named columns
command: kubectl get daemonsets -A
parser: named_columns
assertions:
- name: continuous-image-puller
ok_rows==1: NAMESPACE=='default' and NAME=='continuous-image-puller'
- name: datadog
ok_rows==1: NAMESPACE=='datadog' and NAME=='datadog'
- name: kube-proxy
ok_rows==1: NAMESPACE=='kube-system' and NAME=='kube-proxy'
- name:
ok_rows==1: NAMESPACE=='kube-system' and NAME=='aws-node'
- name: matching daemonset states
all: READY==DESIRED==CURRENT==AVAILABLE==_['UP-TO-DATE']
- group: EKS AMI Rotation
command: awsudo {ADMIN_ARN} aws eks list-nodegroups --cluster-name {DEPLOYMENT_NAME} --query nodegroups --output text
parser: raw
assertions:
- name: Only rotated nodegroup names
simple: "functools.reduce(lambda a, b: a and b, [x.count('-')!=1 for x in _.split()])"
- group: Log Error Check
function: pod_logs(LOG_REACH)
parser: yaml
assertions:
- name: No errors in logs
simple: ERRORS==0
- group: Pod to Node Map
command: kubectl get pods -A -o wide
replace_output:
input: NOMINATED NODE
output: NOMINATED_NODE
parser: node_map
print_parsing: true
""" # noqa: E501
def convert_age(age_str):
"""Convert k8s abbreviated-style datetime str e.g. 14d2h to an integer."""
# age_str_org = age_str
def age_subst(age_str, letter, factor):
parts = age_str.split(letter)
if len(parts) == 2:
age_str = parts[0] + "*" + factor + "+" + parts[1]
return age_str
age_str = age_subst(age_str, "d", "60*60*24")
age_str = age_subst(age_str, "h", "60*60")
age_str = age_subst(age_str, "m", "60")
age_str = age_subst(age_str, "s", "1")
age_str = age_str[:-1]
# print(
# f"convert_age({repr(age_str_org)}) --> {repr(age_str)} --> {eval(age_str)}" # nosec
# ) # nosec
return eval(age_str) # nosec
def aws_kv_dict(key_value_dict_list):
"""Convert AWS dict representation [{ 'Key':k, 'Value':v}, ...] to a Python dict."""
return {item["Key"]: item["Value"] for item in key_value_dict_list}
def run(cmd, cwd=".", timeout=10):
"""Run subprocess `cmd` in dir `cwd` failing if not completed within `timeout` seconds
of if `cmd` returns a non-zero exit status.
Returns both stdout+stderr from `cmd`. (untested, verify manually if in doubt)
"""
print(cmd)
result = subprocess.run(
cmd.split(),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True,
check=True,
cwd=cwd,
timeout=timeout,
) # maybe succeeds
return result.stdout
def parse_node_map(output):
namespaces = parse_named_columns(output)
node_map = defaultdict(list)
for namespace in namespaces:
node_map[namespace["NODE"]].append(
namespace["NAMESPACE"] + ":" + namespace["NAME"]
)
output = ["Mapping from Node to Pod", "-" * 80, yaml.dump(dict(node_map))]
return "\n".join(output)
def parse_named_columns(output):
"""Return rows from a table string `output` as a sequence of dicts.
The first row should contain whitespace delimited column names.
Each subsequent row should contain whitespace delimited column values.
Given tabular `output` as found in many k8s commands:
col1_name col2_name ...
col1_row1_val col2_row1_val ...
col1_row2_val col1_row2_val ...
...
Returns [ {col1_name: col1_row1_val, col2_name: col2_row1_val, ...},
{col1_name: col1_row2_val, col2_name: col2_row2_val, ...},
... ]
Each dict in the returned sequence is suitable as a namespace for eval()
"""
lines = output.splitlines()
columns = lines[0].split()
rows = []
for line in lines[1:]:
d = dict(zip(columns, line.split()))
d["_"] = d
rows.append(d)
return rows
def parse_named_rows(output, key="NAME"):
return {"_": {row[key]: row for row in parse_named_columns(output)}}
def parse_raw(output):
"""Just return `output` as a single string assigned to dict key '_'
for reference in assertion expressions.
Returns {'_': output}
"""
return dict(_=output)
def parse_yaml(output):
"""Return the YAML parsing of `output` string. aws commands can
be filtered using the --query parameter to produce more manageable
output before YAML parsing.
"""
return yaml.safe_load(output)
def parse_json(output):
"""Return the JSON parsing of `output` string. aws commands can
be filtered using the --query parameter to produce more manageable
output before JSON parsing.
"""
return json.loads(output)
def parse_none(output):
"""Return the input as the output, i.e. no changes."""
return output
def test_function(parameters):
return yaml.dump(parameters)
class Checker:
"""The Checker class runs a number of tests defined in a `test_spec` string.
Commands
--------
Each Group includes a subprocess CLI command from which the output is captured,
parsed, and checked against various assertions.
Output Parsing
--------------
The command output is parsed using a parser which can be be one of
named_rows, raw, yaml, or json.
named_rows is ideal for parsing kubectl output in which each row
defines a set of variables as a dict. named_rows requires that
column names and values do not contain spaces; generally it is not
a problem but not all kubectl output modes work.
raw simply returns { "_": cmd_output } so _ is used as a variable
in assertions to refer to the entire output string.
yaml and json return parsed command output using their respective
loaders. The --query parameter of the 'aws' commands can be
useful for pre-filtering command output so that a simple direct
parsing is usable in assertions.
Test Assertions
---------------
A series of assertions are evaluated on the parsed output from each group's command.
Assertions take the form:
simple: <python expression using parsed outputs to define variables, eval must pass.>
ok_rows_expr: <python expression using parsed outputs to define row variables, ok_rows_expr must be True.>
all: <python expression using parsed outputs to define row variables, each row must pass.>
Examples of ok_rows expressions might be:
ok_rows==1
ok_rows>=3
Pseudo code for 'all' is:
ok_rows==len(total output rows)
ok_rows is assigned the number of times the assertion evaluates to True when run
against each of the row namespace dicts. Hence overall test success does not
require every row to pass the assertion.
The `test_spec` specifies a string of YAML which defines:
Globals:
environment:
- env var1 needed in assertion expressions imported from os.environ
...
constants:
- VAR: VAL a VAR needed in assertion expressions with the spec'd VAL
...
Groups:
- group: <Command Group Name>
command: <UNIX subprocess command string>
parser: <named_rows|raw|yaml|json>
assertions:
- name: <Name defining check>
<simple|all|ok_rows_expr>: <python expression>
- name: <Name defining check>
<simple|all|ok_rows_expr>: <python expression>
...
...
NOTE: In the spec, substitions for output vars, env vars, constants,
variables, and built-in functions occur in two basic ways:
- Using Python's f-string {} formatting. (commands)
- Treated as a variable name to be eval'ed. (assertions)
This is because commands are "".format()'ed but assertions are eval'ed,
each against similar namespaces with the caveat that the command formatting
includes no variables derived from it's own output.
if `output_file` is specified, commands are run and outputs are
stored at the spec'ed path, the checker exits w/o running tests.
if `input_file` is specified, it is presumed to be the path to command
output YAML stored by `output_file` and replaces running commands,
checks are run using the stored outputs.
input_file and output_file are mutually exclusive.
if `verbose` is specified then additional assertion-by-assertion,
row-by-row output is generated.
if `groups_regex` is specified, only the group names which can be
searched by the regex are checked. (case insensitive substrings
of group names work).
if `variables` is specified, it should be a comma seperated string
of VAR=VAL pairs, i.e. VAR1=VAL1,VAR2=VAL2,...
These variables are added to the namespace used for running/eval'ing
commands and assertions and override values already defined in Globals.
""" # noqa: E501
def __init__(
self,
test_spec=CLUSTER_CHECKS,
output_file=None,
input_file=None,
verbose=False,
groups_regex=".+",
exclude_regex="^$",
variables=None,
):
self._output_file = output_file
self._input_file = input_file
self._verbose = verbose
self._groups_regex = groups_regex
self._exclude_regex = exclude_regex
print("===> Loading test spec")
self.loaded_spec = yaml.safe_load(test_spec)
self.variables = (
dict([var.split("=") for var in variables.split(",")]) if variables else []
)
self._outputs = {}
self._errors = 0
self._error_msgs = []
@property
def groups(self):
return self.loaded_spec["Groups"]
@property
def spec_environment(self):
return {
var: os.environ[var]
for var in self.loaded_spec.get("Globals", {}).get("environment", [])
}
@property
def spec_constants(self):
return self.loaded_spec.get("Globals", {}).get("constants", {})
@property
def builtins(self):
result = {
key: getattr(builtins, key) for key in dir(builtins)
} # Python builtins
result.update(
dict(
convert_age=convert_age,
aws_kv_dict=aws_kv_dict,
test_function=test_function,
functools=functools,
pod_logs=self.pod_logs,
)
)
return result
@property
def combined_environment(self):
env = dict()
env.update(self.builtins)
env.update(self.spec_constants)
env.update(self.spec_environment)
env.update(self.variables)
return env
def main(self):
self.setup_outputs()
for check in self.groups:
if re.search(
self._groups_regex, check["group"], re.IGNORECASE
) and not re.search(self._exclude_regex, check["group"], re.IGNORECASE):
self.run_check(check)
if self._output_file:
self.store_outputs()
return self._errors
def setup_outputs(self):
"""Fetch saved commands ouputs from file rather than running commands."""
if self._input_file:
with open(self._input_file) as file:
self._outputs = yaml.safe_load(file)
else:
self._outputs = {}
def store_outputs(self):
"""Store command outputs to file for running offline later."""
print("=" * 80)
print("Saving", repr(self._output_file))
with open(self._output_file, "w+") as file:
yaml.dump(self._outputs, file)
def replace_output(self, check, output):
if check.get("replace_output"):
input_patt = check.get("replace_output").get("input")
output_patt = check.get("replace_output").get("output")
output = re.sub(input_patt, output_patt, output, flags=re.MULTILINE)
return output
def run_check(self, check):
print("=" * 80)
try:
output = self.get_command_output(check)
except Exception as exc:
self.error(
"Failed obtaining command output for group",
repr(check.get("group")),
":",
str(exc),
)
print("=" * 80)
return
if self._output_file:
return
if not output.startswith("FAILED"):
print("-" * 80)
print(output)
print("=" * 80)
self.process_output(check, output)
def process_output(self, check, output):
try:
output = self.replace_output(check, output)
parser = globals()[f"parse_{check['parser']}"]
namespaces = parser(output)
except Exception as exc:
self.error("PARSER failed for", repr(check["group"]), ":", str(exc))
return
if check.get("print_parsing"):
print(namespaces)
for assertion in check.get("assertions", []):
try:
self.check_assertion(check["group"], assertion, namespaces)
except Exception as exc:
self.error(
"EXECUTION failed for",
repr(check["group"]),
":",
repr(assertion["name"]),
":",
str(exc),
)
def get_command_output(self, check):
group = check["group"]
if not self._input_file:
self._outputs[group] = self.compute_outputs(group, check)
return self._outputs[group]
def compute_outputs(self, group, check):
if check.get("command"):
command = check.get("command").format(**self.combined_environment)
elif check.get("function"):
command = check.get("function").format(**self.combined_environment)
else:
raise RuntimeError(f"Group {group} doesn't define an input command.")
print("===> Fetching", repr(group))
print("=" * 80)
try:
if check.get("command"):
outputs = run(command).strip()
else:
outputs = eval( # nosec
command, self.combined_environment, self.combined_environment
)
except Exception as exc:
traceback.print_exc()
outputs = f"FAILED for '{group}': '{command}' : '{str(exc)}'"
self.error(outputs)
return outputs
def check_assertion(self, group_name, assertion, namespaces):
assertion = dict(assertion)
assertion_name = assertion.pop("name")
requirement, condition = list(assertion.items())[0]
# condition = condition.format(**self.combined_environment)
print(f"Checking assertion '{assertion_name}': {requirement} : {condition}")
if requirement == "simple":
self.verify_simple(group_name, assertion_name, namespaces, condition)
elif requirement.startswith(("ok_rows", "all")):
self.verify_rows(
group_name, assertion_name, namespaces, requirement, condition
)
else:
raise ValueError(
f"Unhandled requirement: {requirement} for assertion: {assertion}"
)
print()
def verify_rows(self, group_name, name, namespaces, requirement, condition):
rows = []
for i, namespace in enumerate(namespaces):
self.verbose(f"Checking '{name}' #{i} : {condition} ... ", end="")
if self.eval_condition(namespace, condition):
rows.append(namespace)
self.verbose("OK")
else:
self.verbose("FAILED on row:", namespace)
if requirement == "all":
requirement = f"ok_rows=={len(namespaces)}"
if self.eval_condition(dict(ok_rows=len(rows)), requirement): # nosec
print(f"===> OK '{group_name}' : '{name}'")
else:
self.error(f"FAILED '{group_name}' : '{name}' : {condition}")
def verify_simple(self, group_name, name, namespace, condition):
if self.eval_condition(namespace, condition):
print(f"===> OK '{group_name}' : '{name}'")
else:
self.error(f"FAILED '{group_name}' : '{name}' : {condition}")
self.verbose("Namespace:", namespace)
def eval_condition(self, namespace, condition):
namespace = dict(namespace) # local no-side-effects copy
namespace.update(self.combined_environment)
return eval(condition, {}, namespace) # nosec
def verbose(self, *args, **keys):
if self._verbose:
print(*args, **keys)
def error(self, *args):
self._errors += 1
self._error_msgs.append(" ".join(str(arg) for arg in args))
print("===> ERROR: ", *args)
def show_error_status(self):
print("=" * 80)
print("Overall", self._errors, "errors occurred:")
for msg in self._error_msgs:
print(msg)
def pod_logs(self, log_reach="30m"):
loaded = yaml.safe_load(run("kubectl get pods -A --output yaml"))
pods = [
(pod["metadata"]["namespace"], pod["metadata"]["name"])
for pod in loaded["items"]
]
print("=" * 80)
print("Fetching", len(loaded["items"]), "pod logs")
pod_errors = dict()
for i, (namespace, name) in enumerate(pods):
pod = f"{namespace}:{name}"
print()
output = run(
f"kubectl logs -n {namespace} {name} --since {log_reach} --all-containers --timestamps=True"
)
for line in output.splitlines():
if "error" in line.lower() and "| INFO |" not in line:
self.error(f"FAILED Pod {pod} log:", line)
if pod not in pod_errors:
pod_errors[pod] = []
pod_errors[pod].append(line)
print()
print("-" * 80)
return yaml.dump(
{
"ERRORS": len(pod_errors),
"FAILING_PODS": sorted(list(pod_errors.keys())),
"POD_ERRORS": pod_errors,
}
)
def parse_args():
parser = argparse.ArgumentParser(
description="Perform various cluster and hub checks to automatically detect basic anomalies."
)
parser.add_argument(
"--test-spec",
dest="test_spec",
action="store",
default=None,
help="Custom test specification. Defaults to None meaning use built-in spec.",
)
parser.add_argument(
"--output-file",
dest="output_file",
action="store",
default=None,
help="Filepath to store outputs of test commands.",
)
parser.add_argument(
"--input-file",
dest="input_file",
action="store",
default=None,
help="Filepath to load previously stored test command results.",
)
parser.add_argument(
"--verbose",
dest="verbose",
action="store_true",
help="Include additional output.",
)
parser.add_argument(
"--groups-regex",
dest="groups_regex",
action="store",
default=".+",
help="Select groups to execute based on the specified regex, defaulting to all groups."
" Unique group substrings are valid, |-or patterns together. Case is irrelevant.",
)
parser.add_argument(
"--exclude-regex",
dest="exclude_regex",
action="store",
default="^$",
help="Select groups to skip based on the specified regex, defaulting to no groups."
" Unique group substrings are valid, |-or patterns together. Case is irrelevant.",
)
parser.add_argument(
"--variables",
dest="variables",
action="store",
default=None,
help="Custom override variables which can be used in commands, assertions, etc."
" --variables var1=val1,var2=val2,...",
)
return parser.parse_args()
def main():
"""Parse command line arguments and run the test spec.
Return the number of failing tests or 0 if all tests pass.
"""
args = parse_args()
test_spec = (
open(args.test_spec).read().strip() if args.test_spec else CLUSTER_CHECKS
)
checker = Checker(
test_spec=test_spec,
output_file=args.output_file,
input_file=args.input_file,
verbose=args.verbose,
groups_regex=args.groups_regex,
exclude_regex=args.exclude_regex,
variables=args.variables,
)
errors = checker.main()
checker.show_error_status()
return errors
if __name__ == "__main__":
sys.exit(main())
``` |
{
"source": "jmatuskey/notebook-data-redirector",
"score": 2
} |
#### File: notebook-data-redirector/redirector/sync.py
```python
import logging
import common
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.INFO)
def lambda_handler(event, context):
ddb_table = common.get_ddb_table()
box_client, _ = common.get_box_client()
root_folder = box_client.folder(common.BOX_FOLDER_ID)
LOGGER.info("Checking files in Box")
shared_file_ids = set()
count = 0
for file in common.iterate_files(root_folder):
count += 1
if common.is_box_file_public(file):
shared_file_ids.add(file.id)
common.put_file_item(ddb_table, file)
else:
common.delete_file_item(ddb_table, file)
LOGGER.info("Processed %s files", count)
LOGGER.info("Checking items in DynamoDB")
count = 0
scan_response = ddb_table.scan()
delete_keys = set()
while True:
for item in scan_response["Items"]:
count += 1
if item["box_file_id"] not in shared_file_ids:
delete_keys.add(item["filename"])
# If the data returned by a scan would exceed 1MB, DynamoDB will begin paging.
# The LastEvaluatedKey field is the placeholder used to request the next page.
if scan_response.get("LastEvaluatedKey"):
scan_response = ddb_table.scan(
ExclusiveStartKey=scan_response["LastEvaluatedKey"]
)
else:
break
for key in delete_keys:
ddb_table.delete_item(Key={"filename": key})
LOGGER.info("Processed %s items", count)
``` |
{
"source": "jmatute/HondurasEducationDashboard",
"score": 2
} |
#### File: jmatute/HondurasEducationDashboard/models.py
```python
from django.db import models
from memoize import memoize, delete_memoized, delete_memoized_verhash
from django.apps import apps
import os
class Indicator(models.Model):
name = models.CharField(max_length=100)
en_name = models.CharField(max_length=100)
sp_name = models.CharField(max_length=100)
en_description = models.CharField(max_length=500)
sp_description = models.CharField(max_length=500)
source = models.CharField(max_length=200, default='')
inner_destination = models.CharField(max_length=100)
def __str__(self):
return self.name
def getSpanishInfo(self):
return self.sp_description
def getEnglishInfo(self):
return self.en_description
def getDataSource(self):
return self.source
def defaultDefinedType(self):
loweredDescription = self.en_description.lower()
wordCombo = [("expenditure","I"), ("primary","P"), ("secondary","S"),("tertiary","T")]
for combo in wordCombo:
if combo[0] in loweredDescription:
return combo[1]
return "G"
class BooleanMapIndicator(models.Model):
indicator = models.ForeignKey(Indicator, on_delete=models.CASCADE)
year = models.IntegerField()
department_name = models.CharField(max_length=100,default='')
num_cases = models.IntegerField()
porcentual = models.DecimalField(max_digits=10,decimal_places=5)
eng_name = models.CharField(max_length=100, default='')
answer = models.CharField(max_length=100, default='')
eng_answer = models.CharField(max_length=100, default='')
def __str__(self):
return self.indicator.name + ": sp: "+ str(self.answer) + " , eng : " + str(self.eng_answer) + "."
class SchoolingIndicator(models.Model):
indicator = models.ForeignKey(Indicator, on_delete=models.CASCADE)
indicator_options = [ ('P','Primary'),('S','Secondary'),('T','Tertiary'), ('I','Investment'),
('G','General')]
type_schooling = models.CharField(max_length=2, choices=indicator_options, blank=False)
year = models.IntegerField()
value = models.DecimalField(max_digits=20,decimal_places=10)
def __str__(self):
return self.indicator.name + ":"+ str(self.year) + " , " + str(self.value)
class ProgressIndicator(models.Model):
indicator = models.ForeignKey(Indicator, on_delete=models.CASCADE)
region_options = [ ('C','Country'),('R','Region'),('D','Development Stage')]
region_type = models.CharField(max_length=2, choices=region_options, blank=False)
year = models.IntegerField()
current_ranking = models.IntegerField(default=-1)
region_name = models.CharField(max_length=150)
value = models.DecimalField(max_digits=20,decimal_places=10)
``` |
{
"source": "jmaunsell/hail",
"score": 2
} |
#### File: batch/test/test_batch.py
```python
import random
import math
import collections
import os
import base64
import secrets
import time
import unittest
import aiohttp
import requests
from hailtop.config import get_deploy_config
from hailtop.auth import service_auth_headers
from hailtop.utils import retry_response_returning_functions
from hailtop.batch_client.client import BatchClient, Job
from .utils import legacy_batch_status
from .failure_injecting_client_session import FailureInjectingClientSession
deploy_config = get_deploy_config()
def poll_until(p, max_polls=None):
i = 0
while True and (max_polls is None or i < max_polls):
x = p()
if x:
return x
# max 4.5s
j = random.randrange(math.floor(1.1 ** min(i, 40)))
time.sleep(0.100 * j)
i = i + 1
raise ValueError(f'poll_until: exceeded max polls: {i} {max_polls}')
class Test(unittest.TestCase):
def setUp(self):
self.client = BatchClient('test')
def tearDown(self):
self.client.close()
def test_job(self):
builder = self.client.create_batch()
j = builder.create_job('ubuntu:18.04', ['echo', 'test'])
b = builder.submit()
status = j.wait()
self.assertTrue('attributes' not in status, (status, j.log()))
self.assertEqual(status['state'], 'Success', (status, j.log()))
self.assertEqual(status['exit_code'], 0, status)
self.assertEqual(j._get_exit_code(status, 'main'), 0, (status, j.log()))
self.assertEqual(j.log()['main'], 'test\n', status)
def test_exit_code_duration(self):
builder = self.client.create_batch()
j = builder.create_job('ubuntu:18.04', ['bash', '-c', 'exit 7'])
b = builder.submit()
status = j.wait()
self.assertEqual(status['exit_code'], 7, status)
assert isinstance(status['duration'], int)
self.assertEqual(j._get_exit_code(status, 'main'), 7, status)
def test_msec_mcpu(self):
builder = self.client.create_batch()
resources = {
'cpu': '100m',
'memory': '375M'
}
# two jobs so the batch msec_mcpu computation is non-trivial
builder.create_job('ubuntu:18.04', ['echo', 'foo'], resources=resources)
builder.create_job('ubuntu:18.04', ['echo', 'bar'], resources=resources)
b = builder.submit()
batch = b.wait()
assert batch['state'] == 'success', batch
batch_msec_mcpu2 = 0
for job in b.jobs():
# I'm dying
job = self.client.get_job(job['batch_id'], job['job_id'])
job = job.status()
# runs at 250mcpu
job_msec_mcpu2 = 250 * max(job['status']['end_time'] - job['status']['start_time'], 0)
# greater than in case there are multiple attempts
assert job['msec_mcpu'] >= job_msec_mcpu2, batch
batch_msec_mcpu2 += job_msec_mcpu2
assert batch['msec_mcpu'] == batch_msec_mcpu2, batch
def test_attributes(self):
a = {
'name': 'test_attributes',
'foo': 'bar'
}
builder = self.client.create_batch()
j = builder.create_job('ubuntu:18.04', ['true'], attributes=a)
builder.submit()
assert(j.attributes() == a)
def test_garbage_image(self):
builder = self.client.create_batch()
j = builder.create_job('dsafaaadsf', ['echo', 'test'])
builder.submit()
status = j.wait()
assert j._get_exit_codes(status) == {'main': None}, status
assert j._get_error(status, 'main') is not None
assert status['state'] == 'Error', status
def test_bad_command(self):
builder = self.client.create_batch()
j = builder.create_job('ubuntu:18.04', ['sleep 5'])
builder.submit()
status = j.wait()
assert j._get_exit_codes(status) == {'main': None}, status
assert j._get_error(status, 'main') is not None
assert status['state'] == 'Error', status
def test_invalid_resource_requests(self):
builder = self.client.create_batch()
resources = {'cpu': '1', 'memory': '250Gi'}
builder.create_job('ubuntu:18.04', ['true'], resources=resources)
with self.assertRaisesRegex(aiohttp.client.ClientResponseError, 'resource requests.*unsatisfiable'):
builder.submit()
builder = self.client.create_batch()
resources = {'cpu': '0', 'memory': '1Gi'}
builder.create_job('ubuntu:18.04', ['true'], resources=resources)
with self.assertRaisesRegex(aiohttp.client.ClientResponseError, 'bad resource request.*cpu cannot be 0'):
builder.submit()
def test_out_of_memory(self):
builder = self.client.create_batch()
resources = {'cpu': '0.1', 'memory': '10M'}
j = builder.create_job('python:3.6-slim-stretch',
['python', '-c', 'x = "a" * 1000**3'],
resources=resources)
builder.submit()
status = j.wait()
assert j._get_out_of_memory(status, 'main')
def test_unsubmitted_state(self):
builder = self.client.create_batch()
j = builder.create_job('ubuntu:18.04', ['echo', 'test'])
with self.assertRaises(ValueError):
j.batch_id
with self.assertRaises(ValueError):
j.id
with self.assertRaises(ValueError):
j.status()
with self.assertRaises(ValueError):
j.is_complete()
with self.assertRaises(ValueError):
j.log()
with self.assertRaises(ValueError):
j.wait()
builder.submit()
with self.assertRaises(ValueError):
builder.create_job('ubuntu:18.04', ['echo', 'test'])
def test_list_batches(self):
tag = secrets.token_urlsafe(64)
b1 = self.client.create_batch(attributes={'tag': tag, 'name': 'b1'})
b1.create_job('ubuntu:18.04', ['sleep', '3600'])
b1 = b1.submit()
b2 = self.client.create_batch(attributes={'tag': tag, 'name': 'b2'})
b2.create_job('ubuntu:18.04', ['echo', 'test'])
b2 = b2.submit()
def assert_batch_ids(expected, q=None):
batches = self.client.list_batches(q)
# list_batches returns all batches for all prev run tests
actual = set([b.id for b in batches]).intersection({b1.id, b2.id})
self.assertEqual(actual, expected)
assert_batch_ids({b1.id, b2.id})
assert_batch_ids({b1.id, b2.id}, f'tag={tag}')
b2.wait()
assert_batch_ids({b1.id}, f'!complete tag={tag}')
assert_batch_ids({b2.id}, f'complete tag={tag}')
assert_batch_ids({b1.id}, f'!success tag={tag}')
assert_batch_ids({b2.id}, f'success tag={tag}')
b1.cancel()
b1.wait()
assert_batch_ids({b1.id}, f'!success tag={tag}')
assert_batch_ids({b2.id}, f'success tag={tag}')
assert_batch_ids(set(), f'!complete tag={tag}')
assert_batch_ids({b1.id, b2.id}, f'complete tag={tag}')
assert_batch_ids({b2.id}, f'tag={tag} name=b2')
def test_list_jobs(self):
b = self.client.create_batch()
j_success = b.create_job('ubuntu:18.04', ['true'])
j_failure = b.create_job('ubuntu:18.04', ['false'])
j_error = b.create_job('ubuntu:18.04', ['sleep 5'], attributes={'tag': 'bar'})
j_running = b.create_job('ubuntu:18.04', ['sleep', '1800'], attributes={'tag': 'foo'})
b = b.submit()
j_success.wait()
j_failure.wait()
j_error.wait()
def assert_job_ids(expected, q=None):
actual = set([j['job_id'] for j in b.jobs(q=q)])
assert actual == expected
assert_job_ids({j_success.job_id}, 'success')
assert_job_ids({j_success.job_id, j_failure.job_id, j_error.job_id}, 'done')
assert_job_ids({j_running.job_id}, '!done')
assert_job_ids({j_running.job_id}, 'tag=foo')
assert_job_ids({j_error.job_id, j_running.job_id}, 'has:tag')
assert_job_ids({j_success.job_id, j_failure.job_id, j_error.job_id, j_running.job_id}, None)
b.cancel()
def test_include_jobs(self):
b1 = self.client.create_batch()
for i in range(2):
b1.create_job('ubuntu:18.04', ['true'])
b1 = b1.submit()
s = b1.status()
assert 'jobs' not in s
def test_fail(self):
b = self.client.create_batch()
j = b.create_job('ubuntu:18.04', ['false'])
b.submit()
status = j.wait()
self.assertEqual(j._get_exit_code(status, 'main'), 1)
def test_running_job_log_and_status(self):
b = self.client.create_batch()
j = b.create_job('ubuntu:18.04', ['sleep', '300'])
b = b.submit()
while True:
if j.status()['state'] == 'Running' or j.is_complete():
break
j.log()
# FIXME after batch1 goes away, check running status
b.cancel()
def test_deleted_job_log(self):
b = self.client.create_batch()
j = b.create_job('ubuntu:18.04', ['echo', 'test'])
b = b.submit()
j.wait()
b.delete()
try:
j.log()
except aiohttp.ClientResponseError as e:
if e.status == 404:
pass
else:
self.assertTrue(False, f"batch should have deleted log {e}")
def test_delete_batch(self):
b = self.client.create_batch()
j = b.create_job('ubuntu:18.04', ['sleep', '30'])
b = b.submit()
b.delete()
# verify doesn't exist
try:
self.client.get_job(*j.id)
except aiohttp.ClientResponseError as e:
if e.status == 404:
pass
else:
raise
def test_cancel_batch(self):
b = self.client.create_batch()
j = b.create_job('ubuntu:18.04', ['sleep', '30'])
b = b.submit()
status = j.status()
assert status['state'] in ('Ready', 'Running'), status
b.cancel()
status = j.wait()
assert status['state'] == 'Cancelled', status
assert 'log' not in status, status
# cancelled job has no log
try:
j.log()
except aiohttp.ClientResponseError as e:
if e.status == 404:
pass
else:
raise
def test_get_nonexistent_job(self):
try:
self.client.get_job(1, 666)
except aiohttp.ClientResponseError as e:
if e.status == 404:
pass
else:
raise
def test_get_job(self):
b = self.client.create_batch()
j = b.create_job('ubuntu:18.04', ['true'])
b.submit()
j2 = self.client.get_job(*j.id)
status2 = j2.status()
assert (status2['batch_id'], status2['job_id']) == j.id
def test_batch(self):
b = self.client.create_batch()
j1 = b.create_job('ubuntu:18.04', ['false'])
j2 = b.create_job('ubuntu:18.04', ['sleep', '1'])
j3 = b.create_job('ubuntu:18.04', ['sleep', '30'])
b = b.submit()
j1.wait()
j2.wait()
b.cancel()
b.wait()
bstatus = legacy_batch_status(b)
assert len(bstatus['jobs']) == 3, bstatus
state_count = collections.Counter([j['state'] for j in bstatus['jobs']])
n_cancelled = state_count['Cancelled']
n_complete = state_count['Error'] + state_count['Failed'] + state_count['Success']
assert n_cancelled <= 1, bstatus
assert n_cancelled + n_complete == 3, bstatus
n_failed = sum([j['exit_code'] > 0 for j in bstatus['jobs'] if j['state'] in ('Failed', 'Error')])
assert n_failed == 1, bstatus
def test_batch_status(self):
b1 = self.client.create_batch()
b1.create_job('ubuntu:18.04', ['true'])
b1 = b1.submit()
b1.wait()
b1s = b1.status()
assert b1s['complete'] and b1s['state'] == 'success', b1s
b2 = self.client.create_batch()
b2.create_job('ubuntu:18.04', ['false'])
b2.create_job('ubuntu:18.04', ['true'])
b2 = b2.submit()
b2.wait()
b2s = b2.status()
assert b2s['complete'] and b2s['state'] == 'failure', b2s
b3 = self.client.create_batch()
b3.create_job('ubuntu:18.04', ['sleep', '30'])
b3 = b3.submit()
b3s = b3.status()
assert not b3s['complete'] and b3s['state'] == 'running', b3s
b3.cancel()
b4 = self.client.create_batch()
b4.create_job('ubuntu:18.04', ['sleep', '30'])
b4 = b4.submit()
b4.cancel()
b4.wait()
b4s = b4.status()
assert b4s['complete'] and b4s['state'] == 'cancelled', b4s
def test_log_after_failing_job(self):
b = self.client.create_batch()
j = b.create_job('ubuntu:18.04', ['/bin/sh', '-c', 'echo test; exit 127'])
b.submit()
status = j.wait()
self.assertTrue('attributes' not in status)
self.assertEqual(status['state'], 'Failed')
self.assertEqual(j._get_exit_code(status, 'main'), 127)
self.assertEqual(j.log()['main'], 'test\n')
self.assertTrue(j.is_complete())
def test_authorized_users_only(self):
endpoints = [
(requests.get, '/api/v1alpha/batches/0/jobs/0', 401),
(requests.get, '/api/v1alpha/batches/0/jobs/0/log', 401),
(requests.get, '/api/v1alpha/batches', 401),
(requests.post, '/api/v1alpha/batches/create', 401),
(requests.post, '/api/v1alpha/batches/0/jobs/create', 401),
(requests.get, '/api/v1alpha/batches/0', 401),
(requests.delete, '/api/v1alpha/batches/0', 401),
(requests.patch, '/api/v1alpha/batches/0/close', 401),
# redirect to auth/login
(requests.get, '/batches', 302),
(requests.get, '/batches/0', 302),
(requests.post, '/batches/0/cancel', 401),
(requests.get, '/batches/0/jobs/0', 302)]
for method, url, expected in endpoints:
full_url = deploy_config.url('batch', url)
r = retry_response_returning_functions(
method, full_url, allow_redirects=False)
assert r.status_code == expected, (full_url, r, expected)
def test_bad_token(self):
token = base64.urlsafe_b64encode(secrets.token_bytes(32)).decode('ascii')
bc = BatchClient('test', _token=token)
try:
b = bc.create_batch()
j = b.create_job('ubuntu:18.04', ['false'])
b.submit()
assert False, j
except aiohttp.ClientResponseError as e:
assert e.status == 401, e
finally:
bc.close()
def test_gcr_image(self):
builder = self.client.create_batch()
j = builder.create_job(os.environ['HAIL_BASE_IMAGE'], ['echo', 'test'])
b = builder.submit()
status = j.wait()
self.assertEqual(status['state'], 'Success', (status, j.log()))
def test_service_account(self):
b = self.client.create_batch()
j = b.create_job(
os.environ['CI_UTILS_IMAGE'],
['/bin/sh', '-c', 'kubectl version'],
service_account={
'namespace': os.environ['HAIL_BATCH_PODS_NAMESPACE'],
'name': 'test-batch-sa'
})
b.submit()
status = j.wait()
assert j._get_exit_code(status, 'main') == 0, status
def test_port(self):
builder = self.client.create_batch()
j = builder.create_job('ubuntu:18.04', ['bash', '-c', '''
echo $HAIL_BATCH_WORKER_PORT
echo $HAIL_BATCH_WORKER_IP
'''], port=5000)
b = builder.submit()
batch = b.wait()
print(j.log())
assert batch['state'] == 'success', batch
def test_timeout(self):
builder = self.client.create_batch()
j = builder.create_job('ubuntu:18.04', ['sleep', '30'], timeout=5)
b = builder.submit()
status = j.wait()
self.assertEqual(status['state'], 'Error', (status, j.log()))
error_msg = j._get_error(status, 'main')
assert error_msg and 'JobTimeoutError' in error_msg
assert j.exit_code(status) is None, status
def test_client_max_size(self):
builder = self.client.create_batch()
for i in range(4):
builder.create_job('ubuntu:18.04',
['echo', 'a' * (900 * 1024)])
builder.submit()
def test_restartable_insert(self):
i = 0
def every_third_time():
nonlocal i
i += 1
if i % 3 == 0:
return True
return False
with FailureInjectingClientSession(every_third_time) as session:
client = BatchClient('test', session=session)
builder = client.create_batch()
for _ in range(9):
builder.create_job('ubuntu:18.04', ['echo', 'a'])
b = builder.submit(max_bunch_size=1)
b = self.client.get_batch(b.id) # get a batch untainted by the FailureInjectingClientSession
batch = b.wait()
assert batch['state'] == 'success', batch
assert len(list(b.jobs())) == 9
def test_create_idempotence(self):
builder = self.client.create_batch()
builder.create_job('ubuntu:18.04', ['/bin/true'])
batch_token = secrets.token_urlsafe(32)
b = builder._create(batch_token=batch_token)
b2 = builder._create(batch_token=batch_token)
assert b.id == b2.id
def test_batch_create_validation(self):
bad_configs = [
# unexpected field fleep
{'billing_project': 'foo', 'n_jobs': 5, 'token': 'baz', 'fleep': 'quam'},
# billing project None/missing
{'billing_project': None, 'n_jobs': 5, 'token': 'baz'},
{'n_jobs': 5, 'token': 'baz'},
# n_jobs None/missing
{'billing_project': 'foo', 'n_jobs': None, 'token': 'baz'},
{'billing_project': 'foo', 'token': 'baz'},
# n_jobs wrong type
{'billing_project': 'foo', 'n_jobs': '5', 'token': 'baz'},
# token None/missing
{'billing_project': 'foo', 'n_jobs': 5, 'token': None},
{'billing_project': 'foo', 'n_jobs': 5},
# attribute key/value None
{'attributes': {'k': None}, 'billing_project': 'foo', 'n_jobs': 5, 'token': 'baz'},
]
url = deploy_config.url('batch', '/api/v1alpha/batches/create')
headers = service_auth_headers(deploy_config, 'batch')
for config in bad_configs:
r = retry_response_returning_functions(
requests.post,
url,
json=config,
allow_redirects=True,
headers=headers)
assert r.status_code == 400, (config, r)
def test_duplicate_parents(self):
batch = self.client.create_batch()
head = batch.create_job('ubuntu:18.04', command=['echo', 'head'])
batch.create_job('ubuntu:18.04', command=['echo', 'tail'], parents=[head, head])
try:
batch = batch.submit()
except aiohttp.ClientResponseError as e:
assert e.status == 400
else:
assert False, f'should receive a 400 Bad Request {batch.id}'
```
#### File: hailtop/batch/backend.py
```python
import abc
import os
import subprocess as sp
import uuid
import time
import copy
from shlex import quote as shq
import webbrowser
from hailtop.config import get_deploy_config, get_user_config
from hailtop.batch_client.client import BatchClient
from .resource import InputResourceFile, JobResourceFile, ResourceGroup
class Backend:
"""
Abstract class for backends.
"""
@abc.abstractmethod
def _run(self, batch, dry_run, verbose, delete_scratch_on_exit, **backend_kwargs):
"""
Execute a batch.
Warning
-------
This method should not be called directly. Instead, use :meth:`.Batch.run`.
"""
return
class LocalBackend(Backend):
"""
Backend that executes batches on a local computer.
Examples
--------
>>> local_backend = LocalBackend(tmp_dir='/tmp/user/')
>>> b = Batch(backend=local_backend)
Parameters
----------
tmp_dir: :obj:`str`, optional
Temporary directory to use.
gsa_key_file: :obj:`str`, optional
Mount a file with a gsa key to `/gsa-key/key.json`. Only used if a
job specifies a docker image. This option will override the value set by
the environment variable `HAIL_BATCH_GSA_KEY_FILE`.
extra_docker_run_flags: :obj:`str`, optional
Additional flags to pass to `docker run`. Only used if a job specifies
a docker image. This option will override the value set by the environment
variable `HAIL_BATCH_EXTRA_DOCKER_RUN_FLAGS`.
"""
def __init__(self, tmp_dir='/tmp/', gsa_key_file=None, extra_docker_run_flags=None):
self._tmp_dir = tmp_dir.rstrip('/')
flags = ''
if extra_docker_run_flags is not None:
flags += extra_docker_run_flags
elif os.environ.get('HAIL_BATCH_EXTRA_DOCKER_RUN_FLAGS') is not None:
flags += os.environ['HAIL_BATCH_EXTRA_DOCKER_RUN_FLAGS']
if gsa_key_file is None:
gsa_key_file = os.environ.get('HAIL_BATCH_GSA_KEY_FILE')
if gsa_key_file is not None:
flags += f' -v {gsa_key_file}:/gsa-key/key.json'
self._extra_docker_run_flags = flags
def _run(self, batch, dry_run, verbose, delete_scratch_on_exit): # pylint: disable=R0915
"""
Execute a batch.
Warning
-------
This method should not be called directly. Instead, use :meth:`.Batch.run`.
Parameters
----------
batch: :class:`.Batch`
Batch to execute.
dry_run: :obj:`bool`
If `True`, don't execute code.
verbose: :obj:`bool`
If `True`, print debugging output.
delete_scratch_on_exit: :obj:`bool`
If `True`, delete temporary directories with intermediate files.
"""
tmpdir = self._get_scratch_dir()
script = ['#!/bin/bash',
'set -e' + 'x' if verbose else '',
'\n',
'# change cd to tmp directory',
f"cd {tmpdir}",
'\n']
copied_input_resource_files = set()
os.makedirs(tmpdir + '/inputs/', exist_ok=True)
if batch.requester_pays_project:
requester_pays_project = f'-u {batch.requester_pays_project}'
else:
requester_pays_project = ''
def copy_input(job, r):
if isinstance(r, InputResourceFile):
if r not in copied_input_resource_files:
copied_input_resource_files.add(r)
if r._input_path.startswith('gs://'):
return [f'gsutil {requester_pays_project} cp {shq(r._input_path)} {shq(r._get_path(tmpdir))}']
absolute_input_path = os.path.realpath(r._input_path)
dest = r._get_path(tmpdir)
dir = os.path.dirname(dest)
os.makedirs(dir, exist_ok=True)
if job._image is not None: # pylint: disable-msg=W0640
return [f'cp {shq(absolute_input_path)} {shq(dest)}']
return [f'ln -sf {shq(absolute_input_path)} {shq(dest)}']
return []
assert isinstance(r, JobResourceFile)
return []
def copy_external_output(r):
def _cp(dest):
if not dest.startswith('gs://'):
dest = os.path.abspath(dest)
directory = os.path.dirname(dest)
os.makedirs(directory, exist_ok=True)
return 'cp'
return f'gsutil {requester_pays_project} cp'
if isinstance(r, InputResourceFile):
return [f'{_cp(dest)} {shq(r._input_path)} {shq(dest)}'
for dest in r._output_paths]
assert isinstance(r, JobResourceFile)
return [f'{_cp(dest)} {r._get_path(tmpdir)} {shq(dest)}'
for dest in r._output_paths]
def symlink_input_resource_group(r):
symlinks = []
if isinstance(r, ResourceGroup) and r._source is None:
for name, irf in r._resources.items():
src = irf._get_path(tmpdir)
dest = f'{r._get_path(tmpdir)}.{name}'
symlinks.append(f'ln -sf {shq(src)} {shq(dest)}')
return symlinks
write_inputs = [x for r in batch._input_resources for x in copy_external_output(r)]
if write_inputs:
script += ["# Write input resources to output destinations"]
script += write_inputs
script += ['\n']
for job in batch._jobs:
os.makedirs(f'{tmpdir}/{job._job_id}/', exist_ok=True)
script.append(f"# {job._job_id}: {job.name if job.name else ''}")
script += [x for r in job._inputs for x in copy_input(job, r)]
script += [x for r in job._mentioned for x in symlink_input_resource_group(r)]
resource_defs = [r._declare(tmpdir) for r in job._mentioned]
env = [f'export {k}={v}' for k, v in job._env.items()]
if job._image:
defs = '; '.join(resource_defs) + '; ' if resource_defs else ''
env = '; '.join(env) + '; ' if env else ''
cmd = " && ".join(f'{{\n{x}\n}}' for x in job._command)
memory = f'-m {job._memory}' if job._memory else ''
cpu = f'--cpus={job._cpu}' if job._cpu else ''
script += [f"docker run "
f"{self._extra_docker_run_flags} "
f"-v {tmpdir}:{tmpdir} "
f"-w {tmpdir} "
f"{memory} "
f"{cpu} "
f"{job._image} /bin/bash "
f"-c {shq(env + defs + cmd)}",
'\n']
else:
script += env
script += resource_defs
script += job._command
script += [x for r in job._external_outputs for x in copy_external_output(r)]
script += ['\n']
script = "\n".join(script)
if dry_run:
print(script)
else:
try:
sp.check_call(script, shell=True)
except sp.CalledProcessError as e:
print(e)
print(e.output)
raise
finally:
if delete_scratch_on_exit:
sp.run(f'rm -rf {tmpdir}', shell=True)
print('Batch completed successfully!')
def _get_scratch_dir(self):
def _get_random_name():
dir = f'{self._tmp_dir}/batch/{uuid.uuid4().hex[:6]}'
if os.path.isdir(dir):
return _get_random_name()
os.makedirs(dir, exist_ok=True)
return dir
return _get_random_name()
class ServiceBackend(Backend):
"""Backend that executes batches on Hail's Batch Service on Google Cloud.
Examples
--------
>>> service_backend = ServiceBackend('my-billing-account', 'my-bucket') # doctest: +SKIP
>>> b = Batch(backend=service_backend) # doctest: +SKIP
>>> b.run() # doctest: +SKIP
>>> service_backend.close() # doctest: +SKIP
If the Hail configuration parameters batch/billing_project and
batch/bucket were previously set with ``hailctl config set``, then
one may elide the `billing_project` and `bucket` parameters.
>>> service_backend = ServiceBackend()
>>> b = Batch(backend=service_backend)
>>> b.run() # doctest: +SKIP
>>> service_backend.close()
Parameters
----------
billing_project: :obj:`str`, optional
Name of billing project to use.
bucket: :obj:`str`, optional
Name of bucket to use. Should not include the ``gs://``
prefix.
"""
def __init__(self, billing_project: str = None, bucket: str = None):
if billing_project is None:
billing_project = get_user_config().get('batch', 'billing_project', fallback=None)
if billing_project is None:
raise ValueError(
f'the billing_project parameter of ServiceBackend must be set '
f'or run `hailctl config set batch/billing_project '
f'MY_BILLING_PROJECT`')
self._batch_client = BatchClient(billing_project)
if bucket is None:
bucket = get_user_config().get('batch', 'bucket', fallback=None)
if bucket is None:
raise ValueError(
f'the bucket parameter of ServiceBackend must be set '
f'or run `hailctl config set batch/bucket '
f'MY_BUCKET`')
self._bucket_name = bucket
def close(self):
"""
Close the connection with the Batch Service.
Notes
-----
This method should be called after executing your batches at the
end of your script.
"""
self._batch_client.close()
def _run(self,
batch,
dry_run,
verbose,
delete_scratch_on_exit,
wait=True,
open=False,
disable_progress_bar=False,
callback=None): # pylint: disable-msg=too-many-statements
"""Execute a batch.
Warning
-------
This method should not be called directly. Instead, use :meth:`.Batch.run`
and pass :class:`.ServiceBackend` specific arguments as key-word arguments.
Parameters
----------
batch: :class:`.Batch`
Batch to execute.
dry_run: :obj:`bool`
If `True`, don't execute code.
verbose: :obj:`bool`
If `True`, print debugging output.
delete_scratch_on_exit: :obj:`bool`
If `True`, delete temporary directories with intermediate files.
wait: :obj:`bool`, optional
If `True`, wait for the batch to finish executing before returning.
open: :obj:`bool`, optional
If `True`, open the UI page for the batch.
disable_progress_bar: :obj:`bool`, optional
If `True`, disable the progress bar.
callback: :obj:`str`, optional
If not `None`, a URL that will receive at most one POST request
after the entire batch completes.
"""
build_dag_start = time.time()
token = uuid.uuid4().hex[:6]
remote_tmpdir = f'gs://{self._bucket_name}/batch/{token}'
local_tmpdir = f'/io/batch/{token}'
default_image = 'ubuntu:latest'
attributes = copy.deepcopy(batch.attributes)
if batch.name is not None:
attributes['name'] = batch.name
bc_batch = self._batch_client.create_batch(attributes=attributes, callback=callback)
n_jobs_submitted = 0
used_remote_tmpdir = False
job_to_client_job_mapping = {}
jobs_to_command = {}
commands = []
bash_flags = 'set -e' + ('x' if verbose else '')
activate_service_account = 'gcloud -q auth activate-service-account ' \
'--key-file=/gsa-key/key.json'
def copy_input(r):
if isinstance(r, InputResourceFile):
return [(r._input_path, r._get_path(local_tmpdir))]
assert isinstance(r, JobResourceFile)
return [(r._get_path(remote_tmpdir), r._get_path(local_tmpdir))]
def copy_internal_output(r):
assert isinstance(r, JobResourceFile)
return [(r._get_path(local_tmpdir), r._get_path(remote_tmpdir))]
def copy_external_output(r):
if isinstance(r, InputResourceFile):
return [(r._input_path, dest) for dest in r._output_paths]
assert isinstance(r, JobResourceFile)
return [(r._get_path(local_tmpdir), dest) for dest in r._output_paths]
def symlink_input_resource_group(r):
symlinks = []
if isinstance(r, ResourceGroup) and r._source is None:
for name, irf in r._resources.items():
src = irf._get_path(local_tmpdir)
dest = f'{r._get_path(local_tmpdir)}.{name}'
symlinks.append(f'ln -sf {shq(src)} {shq(dest)}')
return symlinks
write_external_inputs = [x for r in batch._input_resources for x in copy_external_output(r)]
if write_external_inputs:
def _cp(src, dst):
return f'gsutil -m cp -R {shq(src)} {shq(dst)}'
write_cmd = f'''
{bash_flags}
{activate_service_account}
{' && '.join([_cp(*files) for files in write_external_inputs])}
'''
if dry_run:
commands.append(write_cmd)
else:
j = bc_batch.create_job(image='google/cloud-sdk:237.0.0-alpine',
command=['/bin/bash', '-c', write_cmd],
attributes={'name': 'write_external_inputs'})
jobs_to_command[j] = write_cmd
n_jobs_submitted += 1
for job in batch._jobs:
inputs = [x for r in job._inputs for x in copy_input(r)]
outputs = [x for r in job._internal_outputs for x in copy_internal_output(r)]
if outputs:
used_remote_tmpdir = True
outputs += [x for r in job._external_outputs for x in copy_external_output(r)]
symlinks = [x for r in job._mentioned for x in symlink_input_resource_group(r)]
env_vars = {
**job._env,
**{r._uid: r._get_path(local_tmpdir) for r in job._mentioned}}
if job._image is None:
if verbose:
print(f"Using image '{default_image}' since no image was specified.")
make_local_tmpdir = f'mkdir -p {local_tmpdir}/{job._job_id}'
job_command = [cmd.strip() for cmd in job._command]
prepared_job_command = (f'{{\n{x}\n}}' for x in job_command)
cmd = f'''
{bash_flags}
{make_local_tmpdir}
{"; ".join(symlinks)}
{" && ".join(prepared_job_command)}
'''
if dry_run:
commands.append(cmd)
continue
parents = [job_to_client_job_mapping[j] for j in job._dependencies]
attributes = copy.deepcopy(job.attributes)
if job.name:
attributes['name'] = job.name
resources = {}
if job._cpu:
resources['cpu'] = job._cpu
if job._memory:
resources['memory'] = job._memory
j = bc_batch.create_job(image=job._image if job._image else default_image,
command=['/bin/bash', '-c', cmd],
parents=parents,
attributes=attributes,
resources=resources,
input_files=inputs if len(inputs) > 0 else None,
output_files=outputs if len(outputs) > 0 else None,
pvc_size=job._storage,
always_run=job._always_run,
timeout=job._timeout,
gcsfuse=job._gcsfuse if len(job._gcsfuse) > 0 else None,
env=env_vars,
requester_pays_project=batch.requester_pays_project)
n_jobs_submitted += 1
job_to_client_job_mapping[job] = j
jobs_to_command[j] = cmd
if dry_run:
print("\n\n".join(commands))
return None
if delete_scratch_on_exit and used_remote_tmpdir:
parents = list(jobs_to_command.keys())
rm_cmd = f'gsutil -m rm -r {remote_tmpdir}'
cmd = f'''
{bash_flags}
{activate_service_account}
{rm_cmd}
'''
j = bc_batch.create_job(
image='google/cloud-sdk:237.0.0-alpine',
command=['/bin/bash', '-c', cmd],
parents=parents,
attributes={'name': 'remove_tmpdir'},
always_run=True)
jobs_to_command[j] = cmd
n_jobs_submitted += 1
if verbose:
print(f'Built DAG with {n_jobs_submitted} jobs in {round(time.time() - build_dag_start, 3)} seconds.')
submit_batch_start = time.time()
bc_batch = bc_batch.submit(disable_progress_bar=disable_progress_bar)
jobs_to_command = {j.id: cmd for j, cmd in jobs_to_command.items()}
if verbose:
print(f'Submitted batch {bc_batch.id} with {n_jobs_submitted} jobs in {round(time.time() - submit_batch_start, 3)} seconds:')
for jid, cmd in jobs_to_command.items():
print(f'{jid}: {cmd}')
print('')
deploy_config = get_deploy_config()
url = deploy_config.url('batch', f'/batches/{bc_batch.id}')
print(f'Submitted batch {bc_batch.id}, see {url}')
if open:
webbrowser.open(url)
if wait:
print(f'Waiting for batch {bc_batch.id}...')
status = bc_batch.wait()
print(f'batch {bc_batch.id} complete: {status["state"]}')
return bc_batch
``` |
{
"source": "jmauriciowebdev/imgFromX",
"score": 3
} |
#### File: jmauriciowebdev/imgFromX/imgFromX.py
```python
from tkinter.filedialog import *
from tkinter import *
from pdf2image import *
import zipfile
import filetype
import fitz
Tk().withdraw()
filename = askopenfilename()
def getFiles(path):
EmbeddedFiles = zipfile.ZipFile(path).namelist()
ImageFiles = [F for F in EmbeddedFiles if F.count('.jpg') or F.count('.jpeg') or F.count('.png') ]
for Image in ImageFiles:
zipfile.ZipFile(path).extract(Image)
def fromPdf(path):
i = 0
doc = fitz.open(path)
while i < doc.pageCount:
try:
page = doc.loadPage(i) #number of page
pix = page.getPixmap()
output = "outfile"+str(i)+".png"
pix.writePNG(output)
i += 1
except:
break
#getFiles(filename)
try:
if filetype.guess(filename).extension == 'pdf':
fromPdf(filename)
else:
getFiles(filename)
except AttributeError:
print("You selected an unsupported file type")
``` |
{
"source": "jmaurodev/reserva_material",
"score": 2
} |
#### File: reserva_material/views/pdfgen.py
```python
from django.http import HttpResponse
from django.db.models import Count
from django.contrib.auth.decorators import login_required
from reserva_material.models import Material, Cautela, Pessoa
from reportlab.pdfgen import canvas
from reportlab.platypus import Table, TableStyle
from reportlab.lib.pagesizes import A4
from reportlab.lib import colors
from datetime import datetime, timedelta
width, height = A4
estilo_tabela = TableStyle([
('FONTNAME', (0,0), (-1,1), 'Times-Bold'),
('FONTNAME', (0,1), (-1,-1), 'Times-Roman'),
('BOX', (0,0), (-1,-1), 2, colors.black),
('INNERGRID', (0,0), (-1,-1), 0.25, colors.black),
('BOX', (0,0), (-1,0), 2, colors.black),
('INNERGRID', (0,0), (-1,0), 2, colors.black),
('ALIGN', (0,0), (-1,-1), 'CENTER'),
('VALIGN', (0,0), (-1,-1), 'MIDDLE')
])
def gerar_pdf(request, titulo):
datahora = datetime.now()
response = HttpResponse(content_type='application/pdf')
filename = '%s_%s' % (titulo, datahora.strftime("%d-%m-%Y %H:%M:%S"))
response['Content-Disposition'] = 'attachment; filename="%s.pdf"' % (filename)
c = canvas.Canvas(response, pagesize=A4)
c.setFont('Times-Roman', 12)
return response, c, datahora
def gerar_cabecalho(canvas):
canvas.setFont('Times-Bold', 12)
canvas.drawImage('reserva_material/static/config/selo.png', width/2-50, height-100, 100, 100)
posicao_y = height-120
try:
subordinacao = open('reserva_material/static/config/cabecalho.txt', 'r')
except:
return canvas, posicao_y
for linha in subordinacao:
canvas.drawCentredString(width/2, posicao_y, linha[:-1])
posicao_y -= 10
posicao_y -= 10
return canvas, posicao_y
def gerar_texto_cautela(canvas, posicao_y):
canvas.setFont('Times-Roman', 12)
texto = open('reserva_material/static/config/texto_cautela.txt', 'r')
for linha in texto:
canvas.drawString(30, posicao_y, linha[:-1])
posicao_y -= 10
posicao_y -= 10
return canvas, posicao_y
def gerar_tabela(canvas, posicao_y):
header = ['Nome do Material', 'Total', 'Rsv?', 'Cautelado?', 'Mnt?', 'Indisp?']
data = []
data.append(header)
material = Material.objects.values('nome_material').annotate(total=Count('nome_material'))
for item in material:
nome = item.get('nome_material')
qtd_total = item.get('total')
qtd_em_reserva = len(Material.objects.filter(nome_material=nome, em_reserva=True))
qtd_em_cautela = len(Material.objects.filter(nome_material=nome, em_cautela=True))
qtd_em_manutencao = len(Material.objects.filter(nome_material=nome, em_manutencao=True))
qtd_indisponivel = len(Material.objects.filter(nome_material=nome, indisponivel=True))
data.append([nome, qtd_total, qtd_em_reserva, qtd_em_cautela, qtd_em_manutencao, qtd_indisponivel])
table = Table(data)
table.setStyle(estilo_tabela)
table.wrapOn(canvas, width, height)
table.drawOn(canvas, 20, posicao_y-20*len(data))
return canvas
def gerar_rodape(canvas, texto):
canvas.drawRightString(width-10, 10, texto)
return canvas
def gerar_tabela_cautela(canvas, posicao_y, request):
header = ['Quantidade', 'Nome do Material', 'Número de Série', 'Alterações']
data = []
data.append(header)
pessoa = Pessoa.objects.get(identidade_militar=request.POST.get('identidade_retira'))
lista_materiais = request.POST.getlist('material_cautelado')
for item in lista_materiais:
item = Material.objects.get(id=item)
quantidade = 1
nome = item.nome_material
numero_serie = item.numero_serie
alteracoes = item.descricao
if not alteracoes:
alteracoes = '-'
data.append([quantidade, nome, numero_serie, alteracoes])
table = Table(data)
table.setStyle(estilo_tabela)
table.wrapOn(canvas, width, height)
table.drawOn(canvas, 20, posicao_y-20*len(data))
return canvas
def gerar_assinatura(canvas, request):
posicao_y = 50
pessoa = Pessoa.objects.get(identidade_militar=request.POST.get('identidade_retira'))
canvas.setFont('Times-Bold', 12)
texto = (
'_________________________________________________',
'%s - %s' % (pessoa.nome_completo, pessoa.posto_graduacao),
'',
)
for linha in texto:
canvas.drawCentredString(width/2, posicao_y, linha)
posicao_y -= 20
posicao_y -= 20
canvas.setFont('Times-Roman', 12)
return canvas
def gerar_rodape(canvas, datahora):
canvas.drawString(10, 10, 'INÍCIO: ' + datahora.strftime("%d-%m-%Y %H:%M:%S"))
datahora = datahora + timedelta(days=30)
canvas.drawRightString(width-10, 10, 'VENCIMENTO: ' + datahora.strftime("%d-%m-%Y %H:%M:%S"))
return canvas
def estruturar_pdf(canvas, tipo, datahora, request):
canvas, posicao_y = gerar_cabecalho(canvas)
if tipo == 'CAUTELA':
canvas, posicao_y = gerar_texto_cautela(canvas, posicao_y)
canvas = gerar_tabela_cautela(canvas, posicao_y, request)
canvas = gerar_rodape(canvas, datahora)
canvas = gerar_assinatura(canvas, request)
else:
canvas.drawCentredString(width/2, posicao_y, 'Pronto da reserva de material do Pelotão Posto de Comando')
canvas = gerar_tabela(canvas, posicao_y)
canvas.drawRightString(width-10, 10, 'DATA: ' + datahora.strftime("%d-%m-%Y %H:%M:%S"))
return canvas
@login_required
def imprimir_pronto(request):
tipo = 'PRONTO'
response, c, datahora = gerar_pdf(request, tipo)
c = estruturar_pdf(c, tipo, datahora, request)
c.showPage()
c.save()
return response
@login_required
def imprimir_cautela(request):
tipo = 'CAUTELA'
response, c, datahora = gerar_pdf(request, tipo)
c = estruturar_pdf(c, tipo, datahora, request)
c.showPage()
c.save()
return response
``` |
{
"source": "jmausolf/White_House_Speeches",
"score": 3
} |
#### File: White_House_Speeches/Python_Scripts/getparentURL.py
```python
def getURL(year, month):
base_url = "http://www.whitehouse.gov/briefing-room/Speeches-and-Remarks/"+year+"/"+month
print base_url
def getyrURL(year):
base_url = "http://www.whitehouse.gov/briefing-room/Speeches-and-Remarks/"+year+"/"
return base_url
def getparentURLs(yr1=2009, yr2=2016, month1=1, month2=12):
import itertools
years = ['2009', '2010', '2011', '2012', '2013', '2014', '2015', '2016']
months = ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12']
index_yr1 = yr1-2009
index_yr2 = yr2-2009
index_mo1 = month1-1
index_mo2 = month2-1
req_years = years[index_yr1:index_yr2+1]
req_months = months[index_mo1:index_mo2+1]
x = (list(itertools.product(req_years, req_months)))
f = open('parentURLs.csv', 'w')
try:
f.write(u'PARENT_URLS\n')
for year in req_years:
urls = getyrURL(year)
for month in req_months:
full_url = urls+month
f.write(u'%s\n' % (full_url))
finally:
f.close()
```
#### File: White_House_Speeches/Python_Scripts/Length_Speech_Parser.py
```python
from Speech_Parser_Quality_Check import *
def WHT(url):
"""Prints Text Output for a given URL from Whitehouse Speeches and Remarks"""
import urllib2,sys, random
import os
from bs4 import BeautifulSoup
soup = BeautifulSoup(urllib2.urlopen(url).read())
# Get URL
url2 = "Cite: \n"+url
# Get Release
Release = soup.find("div", {"class":"panel-pane pane-custom pane-1"})
raw_release = Release.get_text()
release = raw_release
# Get Title
Title = soup.find("div", {"class":"panel-pane pane-node-title"})
title = Title.get_text()
# Get Paragraph Body
content = soup.find("div", {"class":"field-items"})
paragraph = ["".join(x.findAll(text=True)) for x in content.findAll("p")]
paragraph_body = "\n\n%s" % ("\n\n".join(paragraph))
# Perform Quality Check on Parsed Speech
speech_parser_QC(paragraph_body, release, url)
#Get File ID - Date & Time
#Date
year_id = url[43:47]
month_id = url[48:50]
day_id = url[51:53]
date_id = year_id+'-'+month_id+'-'+day_id
#Random ID
randID1 = str(random.randrange(6, 10000, 1))
randID2 = str(random.randrange(6, 10000, 1))
try:
path1 = date_id+"_"+"ID1"+".txt"
path2 = date_id+"_"+"ID2"+".txt"
path3 = date_id+"_"+"ID3"+".txt"
path4 = date_id+"_"+"ID4"+".txt"
path5 = date_id+"_"+"ID5"+".txt"
if os.path.isfile(path1) == False:
#print "no file ID1 found, create ID1"
f = open(date_id+"_"+"ID1"+".txt", 'w')
f.write(url2.encode('utf-8'))
f.write(release.encode('utf-8'))
f.write(title.encode('utf-8'))
f.write(paragraph_body.encode('utf-8'))
f.close
return
elif os.path.isfile(path1) == True:
#print "found file ID1, check for ID2"
if os.path.isfile(path2) == False:
print "found ID1, no file ID2 found, make ID2"
f = open(date_id+"_"+"ID2"+".txt", 'w')
f.write(url2.encode('utf-8'))
f.write(release.encode('utf-8'))
f.write(title.encode('utf-8'))
f.write(paragraph_body.encode('utf-8'))
f.close
return
elif os.path.isfile(path2) == True:
#print "found file ID2, check for ID3"
if os.path.isfile(path3) == False:
print "found IDs 1-2, no file ID3 found, make ID3"
f = open(date_id+"_"+"ID3"+".txt", 'w')
f.write(url2.encode('utf-8'))
f.write(release.encode('utf-8'))
f.write(title.encode('utf-8'))
f.write(paragraph_body.encode('utf-8'))
f.close
return
elif os.path.isfile(path3) == True:
#print "found file ID3, check for ID4"
if os.path.isfile(path4) == False:
print "found IDs 1-3, no file ID4 found, make ID4"
f = open(date_id+"_"+"ID4"+".txt", 'w')
f.write(url2.encode('utf-8'))
f.write(release.encode('utf-8'))
f.write(title.encode('utf-8'))
f.write(paragraph_body.encode('utf-8'))
f.close
return
elif os.path.isfile(path4) == True:
#print "found file ID4, check for ID5"
if os.path.isfile(path5) == False:
print "found IDs 1-4, no file ID5 found, make ID5"
f = open(date_id+"_"+"ID5"+".txt", 'w')
f.write(url2.encode('utf-8'))
f.write(release.encode('utf-8'))
f.write(title.encode('utf-8'))
f.write(paragraph_body.encode('utf-8'))
f.close
return
elif os.path.isfile(path5) == True:
print "found IDs 1-5, create random ID"
f = open(date_id+"_"+"ID"+randID1+"-"+randID2+".txt", 'w')
f.write(url2.encode('utf-8'))
f.write(release.encode('utf-8'))
f.write(title.encode('utf-8'))
f.write(paragraph_body.encode('utf-8'))
f.close
return
finally:
pass
##Test URLS
#2014
#url = "http://www.whitehouse.gov/the-press-office/2014/01/22/remarks-president-meeting-presidential-commission-election-administratio"
url = "http://www.whitehouse.gov/the-press-office/remarks-president-qa-session-closing-fiscal-responsibility-summit-2-23-09"
#url = "http://www.whitehouse.gov/the-press-office/remarks-president-senate-passage-health-insurance-reform"
#url = "http://www.whitehouse.gov/the-press-office/press-availability-president-obama-and-prime-minister-rudd-australia"
WHT(url)
```
#### File: White_House_Speeches/Python_Scripts/month.py
```python
def month(string):
"""Convert month string to numeric string."""
#Uppercase
if string == "January":
out = "01"
if string == "February":
out = "02"
if string == "March":
out = "03"
if string == "April":
out = "04"
if string == "May":
out = "05"
if string == "June":
out = "06"
if string == "July":
out = "07"
if string == "August":
out = "08"
if string == "September":
out = "09"
if string == "October":
out = "10"
if string == "November":
out = "11"
if string == "December":
out = "12"
#Lowercase
if string == "january":
out = "01"
if string == "february":
out = "02"
if string == "march":
out = "03"
if string == "april":
out = "04"
if string == "may":
out = "05"
if string == "june":
out = "06"
if string == "july":
out = "07"
if string == "august":
out = "08"
if string == "september":
out = "09"
if string == "october":
out = "10"
if string == "november":
out = "11"
if string == "december":
out = "12"
return out
``` |
{
"source": "JMAV14/data-2022-1",
"score": 3
} |
#### File: data-2022-1/source/etl.py
```python
import os
import requests
import pandas as pd
from source.plot import GeneratePlot
from tqdm import tqdm
class ETL:
@staticmethod
def extract(update=False)->pd.DataFrame:
response_positivos = requests.get('https://cloud.minsa.gob.pe/s/AC2adyLkHCKjmfm/download')
if response_positivos.status_code == 200: #200 es ok
with open('datos_minsa.csv', 'wb') as csv_file:
csv_file.write(response_positivos.content)
else:
print("fail total, no debio llegar aqui")
df = pd.read_csv('datos_minsa.csv', sep=';')
os.remove('datos_minsa.csv')
return df
@staticmethod
def transform(df:pd.DataFrame)->pd.DataFrame:
pivot = pd.pivot_table(df,
index=["FECHA_RESULTADO"],
columns=['DEPARTAMENTO'],
aggfunc=['size'],
fill_value=0)
departamentos = ['AMAZONAS','ANCASH','APURIMAC','AREQUIPA','AYACUCHO','CAJAMARCA','CALLAO','CUSCO','HUANCAVELICA','HUANUCO','ICA','JUNIN','LA LIBERTAD','LAMBAYEQUE','LIMA','LORETO','MADRE DE DIOS','MOQUEGUA','PASCO','PIURA','PUNO','SAN MARTIN','TACNA','TUMBES','UCAYALI']
#departamentos = load_form_yaml("departamentos")
for departamento in tqdm(departamentos, ncols=50):
series = pivot["size"][departamento]
GeneratePlot.compute(series, departamento)
"""
pivot["size"]["LIMA"]
pivot["size"]["AREQUIPA"]
...
...
...
"""
@staticmethod
def load():
return True
``` |
{
"source": "JMax45/JMax-Encryption-Tools",
"score": 2
} |
#### File: design/popup/about_jet.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_popup_about_jet(object):
def setupUi(self, popup_about_jet):
popup_about_jet.setObjectName("popup_about_jet")
popup_about_jet.resize(534, 251)
self.centralwidget = QtWidgets.QWidget(popup_about_jet)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName("verticalLayout")
self.textBrowser = QtWidgets.QTextBrowser(self.centralwidget)
self.textBrowser.setObjectName("textBrowser")
self.verticalLayout.addWidget(self.textBrowser)
popup_about_jet.setCentralWidget(self.centralwidget)
self.retranslateUi(popup_about_jet)
QtCore.QMetaObject.connectSlotsByName(popup_about_jet)
def retranslateUi(self, popup_about_jet):
_translate = QtCore.QCoreApplication.translate
popup_about_jet.setWindowTitle(_translate("popup_about_jet", "About JET"))
self.textBrowser.setHtml(_translate("popup_about_jet", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Ubuntu\'; font-size:11pt; font-weight:400; font-style:normal;\">\n"
"<p align=\"center\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:20pt; font-weight:600;\">JET</span></p>\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><br /></p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">This program was developed by one of the founders of JZ-Software.</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">It was initially created for personal purposes but then we wanted to improve it by adding new encryption methods and new features.</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">Now it\'s one of our best projects and we hope you enjoy the user experience as we have enjoyed programming this software.</p>\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">Of course this program is still under development and will be improved over time, this process is a bit long as we have other personal things to do and we take care of the programming only in the free time.</p></body></html>"))
```
#### File: JMax45/JMax-Encryption-Tools/main.py
```python
import sys
import json
from PyQt5 import QtWidgets, QtTest
from PyQt5.Qt import QApplication, QClipboard, QFileDialog
from design import design
from design.popup.about_jet import Ui_popup_about_jet
from design.popup.encryptTXT import Ui_encryptTXT
from design.popup.pgen import Ui_pgen
from methods.morse import *
from methods.caesar import *
from methods.vigenere import *
from methods.substitution import *
to_encrypt = ("")
to_decrypt = ("")
encryption_key = ("")
save_encryption_method = ("")
class ExampleApp(QtWidgets.QMainWindow, design.Ui_MainWindow):
def crypt(self):
caesar_radio = self.radioButton_2.isChecked()
morse_radio = self.radioButton.isChecked()
vigenere_radio = self.radioButton_3.isChecked()
substitution_radio = self.radioButton_4.isChecked()
if caesar_radio + morse_radio + vigenere_radio + substitution_radio == 0:
self.textEdit_2.setText("Choose an encryption metod")
QtTest.QTest.qWait(1000)
self.textEdit_2.setText("")
empty_check = self.textEdit.toPlainText()
def empty_check_true():
self.textEdit_2.setText("The text field is empty")
QtTest.QTest.qWait(1000)
self.textEdit_2.setText("")
if caesar_radio == True:
if empty_check == "":
empty_check_true()
else:
global to_encrypt
to_encrypt = self.textEdit.toPlainText()
caesar_crypt()
from methods.caesar import encrypted_text
self.textEdit_2.setText(encrypted_text)
if morse_radio == True:
if empty_check == "":
empty_check_true()
else:
to_encrypt = self.textEdit.toPlainText()
morse_crypt()
from methods.morse import encrypted_text
self.textEdit_2.setText(encrypted_text)
if vigenere_radio == True:
if empty_check == "":
empty_check_true()
else:
to_encrypt = self.textEdit.toPlainText()
vigenere_crypt()
from methods.vigenere import encrypted_text,encryption_key
self.textEdit_2.setText(encrypted_text)
self.lineEdit.setText(encryption_key)
if substitution_radio == True:
if empty_check == "":
empty_check_true()
else:
to_encrypt = self.textEdit.toPlainText().upper()
substitution_crypt()
from methods.substitution import encrypted_text
self.textEdit_2.setText(encrypted_text)
self.textEdit.setText("")
def decrypt(self):
caesar_radio = self.radioButton_2.isChecked()
morse_radio = self.radioButton.isChecked()
vigenere_radio = self.radioButton_3.isChecked()
substitution_radio = self.radioButton_4.isChecked()
if caesar_radio + morse_radio + vigenere_radio + substitution_radio == 0:
self.textEdit_2.setText("Choose an encryption metod")
QtTest.QTest.qWait(1000)
self.textEdit_2.setText("")
empty_check = self.textEdit.toPlainText()
def empty_check_true():
self.textEdit_2.setText("The text field is empty")
QtTest.QTest.qWait(1000)
self.textEdit_2.setText("")
if caesar_radio == True:
if empty_check == "":
empty_check_true()
else:
global to_decrypt
to_decrypt = self.textEdit.toPlainText()
caesar_decrypt()
from methods.caesar import decrypted_text
self.textEdit_2.setText(decrypted_text)
if morse_radio == True:
if empty_check == "":
empty_check_true()
else:
to_decrypt = self.textEdit.toPlainText()
morse_decrypt()
from methods.morse import decrypted_text
self.textEdit_2.setText(decrypted_text)
if vigenere_radio == True:
if empty_check == "":
empty_check_true()
else:
to_decrypt = self.textEdit.toPlainText()
global encryption_key
encryption_key = self.lineEdit.text()
vigenere_decrypt()
from methods.vigenere import decrypted_text
self.textEdit_2.setText(str(decrypted_text))
if substitution_radio == True:
if empty_check == "":
empty_check_true()
else:
to_decrypt = self.textEdit.toPlainText().upper()
substitution_decrypt()
from methods.substitution import decrypted_text
self.textEdit_2.setText(decrypted_text)
self.textEdit.setText("")
self.lineEdit.setText("")
def clear_encryption_key(self):
self.lineEdit.setText("")
def copy_encryption_key(self):
copy_key = self.lineEdit.text()
QApplication.clipboard().setText(copy_key)
self.pushButton_3.setStyleSheet("background-color:#E75917;")
self.pushButton_3.setText("COPIED")
QtTest.QTest.qWait(1000)
self.pushButton_3.setStyleSheet("background-color:#5858FA;")
self.pushButton_3.setText("COPY")
def show_vigenere_keys(self):
self.lineEdit.show()
self.pushButton_3.show()
self.label.show()
def hide_vigenere_keys(self):
self.lineEdit.hide()
self.pushButton_3.hide()
self.label.hide()
def on_click_radioButton(self):
caesar_radio = self.radioButton_2.isChecked()
morse_radio = self.radioButton.isChecked()
vigenere_radio = self.radioButton_3.isChecked()
if self.radioButton.isChecked() == True:
self.hide_vigenere_keys()
global save_encryption_method
save_encryption_method = "morse"
if self.radioButton_2.isChecked() == True:
self.hide_vigenere_keys()
save_encryption_method = "caesar"
if self.radioButton_3.isChecked() == True:
save_encryption_method = "vigenere"
self.show_vigenere_keys()
if self.radioButton_4.isChecked() == True:
save_encryption_method = "substitution"
self.hide_vigenere_keys()
def open_about_jet(self):
self.window = QtWidgets.QMainWindow()
self.ui = Ui_popup_about_jet()
self.ui.setupUi(self.window)
self.window.show()
def save_message2(self):
file_name = self.lineEdit_2.text()
file_name2 = ("saves/"+file_name+".txt")
print(file_name2)
with open(file_name2, 'w') as outfile:
to_save = self.textEdit_2.toPlainText()
encryption_key_save = self.lineEdit.text()
data = {}
data['encrypted_message'] = []
if save_encryption_method == 'vigenere':
data['encrypted_message'].append({
'message': to_save,
'encryption_method': save_encryption_method,
'encryption_key': encryption_key_save
})
else:
data['encrypted_message'].append({
'message': to_save,
'encryption_method': save_encryption_method
})
json.dump(data, outfile)
self.check_save_message1 = "False"
self.label_2.hide()
self.lineEdit_2.hide()
self.pushButton_4.hide()
self.pushButton_5.setStyleSheet("")
self.label_4.show()
QtTest.QTest.qWait(3000)
self.label_4.hide()
def save_message(self):
check_save_message2 = self.check_save_message1
check_save_message3 = self.check_save_message4
if check_save_message2 == "False":
if check_save_message3 == "True":
self.check_save_message4 = "False"
self.pushButton_6.setStyleSheet("")
self.toolButton.hide()
self.lineEdit_3.hide()
self.pushButton_7.hide()
self.check_save_message1 = "True"
self.label_2.show()
self.lineEdit_2.show()
self.pushButton_4.show()
self.pushButton_5.setStyleSheet("background-color:#38A1CB")
if check_save_message2 == "True":
self.check_save_message1 = "False"
self.label_2.hide()
self.lineEdit_2.hide()
self.pushButton_4.hide()
self.pushButton_5.setStyleSheet("")
def load_message(self):
check_save_message3 = self.check_save_message4
check_save_message2 = self.check_save_message1
if check_save_message3 == "False":
if check_save_message2 == "True":
self.check_save_message1 = "False"
self.label_2.hide()
self.lineEdit_2.hide()
self.pushButton_4.hide()
self.pushButton_5.setStyleSheet("")
self.check_save_message4 = "True"
self.pushButton_6.setStyleSheet("background-color:#38A1CB")
self.toolButton.show()
self.lineEdit_3.show()
self.pushButton_7.show()
if check_save_message3 == "True":
self.check_save_message4 = "False"
self.pushButton_6.setStyleSheet("")
self.toolButton.hide()
self.lineEdit_3.hide()
self.pushButton_7.hide()
def choose_a_file_to_load(self):
file_to_load1 = QFileDialog.getOpenFileName()[0]
file_to_load2 = str(file_to_load1)
self.lineEdit_3.setText(file_to_load2)
def load_the_file(self):
file_to_load = self.lineEdit_3.text()
with open(file_to_load) as json_file:
data = json.load(json_file)
for p in data['encrypted_message']:
print('Message: ' + p['message'])
print('Encryption Method: ' + p['encryption_method'])
print('')
global to_decrypt
to_decrypt = (p['message'])
if p['encryption_method'] == 'caesar':
caesar_decrypt()
from methods.caesar import decrypted_text
if p['encryption_method'] == 'morse':
morse_decrypt()
from methods.morse import decrypted_text
if p['encryption_method'] == 'vigenere':
global encryption_key
encryption_key = p['encryption_key']
vigenere_decrypt()
from methods.vigenere import decrypted_text
if p['encryption_method'] == 'substitution':
substitution_decrypt()
from methods.substitution import decrypted_text
self.textEdit_2.setText(decrypted_text)
self.check_save_message4 = "False"
self.pushButton_6.setStyleSheet("")
self.toolButton.hide()
self.lineEdit_3.hide()
self.pushButton_7.hide()
self.label_3.show()
QtTest.QTest.qWait(3000)
self.label_3.hide()
def open_encrypt_txt(self):
window1.hide()
height = self.geometry().y()
height2 = (height-30)
self.window4 = QtWidgets.QMainWindow()
global window4_global
window4_global = self.window4
self.ui = Ui_encryptTXT()
self.ui.setupUi(self.window4)
self.window4.show()
self.window4.move(self.geometry().x(), self.geometry().y())
def open_pgen(self):
window1.hide()
height = self.geometry().y()
height2 = (height-30)
self.window5 = QtWidgets.QMainWindow()
global window5_global
window5_global = self.window5
self.ui = Ui_pgen()
self.ui.setupUi(self.window5)
self.window5.show()
self.window5.move(self.geometry().x(), self.geometry().y())
def __init__(self):
# Это здесь нужно для доступа к переменным, методам
# и т.д. в файле design.py
super().__init__()
self.setupUi(self) # Это нужно для инициализации нашего дизайна
self.pushButton.clicked.connect(self.crypt)
self.pushButton_2.clicked.connect(self.decrypt)
self.pushButton_3.clicked.connect(self.copy_encryption_key)
self.pushButton_4.clicked.connect(self.save_message2)
self.pushButton_5.clicked.connect(self.save_message)
self.pushButton_6.clicked.connect(self.load_message)
self.radioButton.toggled.connect(self.on_click_radioButton)
self.radioButton_2.toggled.connect(self.on_click_radioButton)
self.radioButton_3.toggled.connect(self.on_click_radioButton)
self.radioButton_4.toggled.connect(self.on_click_radioButton)
self.toolButton.clicked.connect(self.choose_a_file_to_load)
self.pushButton_7.clicked.connect(self.load_the_file)
self.actionAbout_JET.triggered.connect(self.open_about_jet)
self.actionEncryptTXT.triggered.connect(self.open_encrypt_txt)
self.actionPGEN.triggered.connect(self.open_pgen)
#hide and show stuff
self.lineEdit.hide()
self.lineEdit_2.hide()
self.pushButton_3.hide()
self.pushButton_7.hide()
self.lineEdit_3.hide()
self.label.hide()
self.label_2.hide()
self.label_3.hide()
self.label_3.setStyleSheet("color:#0B610B;")
self.label_4.hide()
self.label_4.setStyleSheet("color:#0B610B;")
self.toolButton.hide()
self.pushButton_3.setStyleSheet("background-color:#5858FA;")
self.pushButton_4.setStyleSheet("background-color:#5858FA;")
self.pushButton_4.hide()
self.lineEdit.setStyleSheet("background-color:#EFF2FB;")
self.lineEdit_2.setStyleSheet("background-color:#EFF2FB;")
self.textEdit.setStyleSheet("background-color:#EFF2FB;")
self.textEdit_2.setStyleSheet("background-color:#EFF2FB;")
self.check_save_message1 = ("False")
self.check_save_message4 = ("False")
def main():
app = QtWidgets.QApplication(sys.argv) # Новый экземпляр QApplication
global window1
global window4
global window5
window4 = ("null")
window1 = ExampleApp() # Создаём объект класса ExampleApp
window1.setStyleSheet("background-color:#CED8F6;")
window1.show() # Показываем окно
app.exec_() # и запускаем приложение
if __name__ == '__main__': # Если мы запускаем файл напрямую, а не импортируем
main() # то запускаем функцию main()
```
#### File: JMax-Encryption-Tools/methods/caesar.py
```python
caesar_dict1 = [("A","C"),("B","D"),("C","E"),("D","F"),("E","G"),("F","H"),
("G","I"),("H","J"),("I","K"),("J","L"),("K","M"),("L","N"),
("M","O"),("N","P"),("O","Q"),("P","R"),("Q","S"),("R","T"),
("S","U"),("T","V"),("U","W"),("V","X"),("W","Y"),("X","Z"),
("Y","A"),("Z","B"),
("a","c"),("b","d"),("c","e"),("d","f"),("e","g"),("f","h"),
("g","i"),("h","j"),("i","k"),("j","l"),("k","m"),("l","n"),
("m","o"),("n","p"),("o","q"),("p","r"),("q","s"),("r","t"),
("s","u"),("t","v"),("u","w"),("v","x"),("w","y"),("x","z"),
("y","a"),("z","b"),
("1","2"),("2","3"),("3","4"),("4","5"),("5","6"),("6","7"),
("7","8"),("8","9")]
caesar_dict2 = [("C","A"),("D","B"),("E","C"),("F","D"),("H","F"),("I","G"),
("J","H"),("K","I"),("L","J"),("M","K"),("N","L"),("O","M"),
("P","N"),("Q","O"),("R","P"),("S","Q"),("T","R"),("U","S"),
("V","T"),("W","U"),("X","V"),("Y","W"),("Z","X"),("A","Y"),
("B","Z"),("G","E"),
("c","a"),("d","b"),("e","c"),("f","d"),("h","f"),("i","g"),
("j","h"),("k","i"),("l","j"),("m","k"),("n","l"),("o","m"),
("p","n"),("q","o"),("r","p"),("s","q"),("t","r"),("u","s"),
("v","t"),("w","u"),("x","v"),("y","w"),("z","x"),("a","y"),
("b","z"),("g","e"),
("2","1"),("3","2"),("4","3"),("5","4"),("6","5"),("7","6"),
("8","7"),("9","8")]
encrypted_text = ("null")
decrypted_text = ("null")
def caesar_crypt():
from __main__ import to_encrypt
mytext = to_encrypt
crypted_text = mytext.translate(str.maketrans(dict(caesar_dict1)))
global encrypted_text
encrypted_text = crypted_text
def caesar_decrypt():
from __main__ import to_decrypt
crypted_text = to_decrypt.translate(str.maketrans(dict(caesar_dict2)))
global decrypted_text
decrypted_text = crypted_text
``` |
{
"source": "j-maxi/pulumi-grafana",
"score": 2
} |
#### File: python/pulumi_grafana/data_source.py
```python
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from . import utilities, tables
class DataSource(pulumi.CustomResource):
access_mode: pulumi.Output[str]
"""
The method by which the browser-based Grafana
application will access the data source. The default is "proxy", which means
that the application will make requests via a proxy endpoint on the Grafana
server.
"""
basic_auth_enabled: pulumi.Output[bool]
"""
- If true, HTTP basic authentication will
be used to make requests.
"""
basic_auth_password: pulumi.Output[str]
"""
The
password to use for basic auth.
"""
basic_auth_username: pulumi.Output[str]
"""
The
username to use for basic auth.
"""
database_name: pulumi.Output[str]
"""
The name of the
database to use on the selected data source server.
"""
is_default: pulumi.Output[bool]
"""
If true, the data source will be the default
source used by the Grafana server. Only one data source on a server can be
the default.
"""
json_data: pulumi.Output[dict]
"""
The default region
and authentication type to access the data source. `json_data` is documented
in more detail below.
"""
name: pulumi.Output[str]
"""
A unique name for the data source within the Grafana
server.
"""
password: pulumi.Output[str]
"""
The password to use to
authenticate to the data source.
"""
secure_json_datas: pulumi.Output[list]
"""
The access and
secret keys required to access the data source. `secure_json_data` is
documented in more detail below.
* `accessKey` (`str`) - The access key required
to access the data source.
* `secretKey` (`str`) - The secret key required
to access the data source.
"""
type: pulumi.Output[str]
"""
The data source type. Must be one of the data source
keywords supported by the Grafana server.
"""
url: pulumi.Output[str]
"""
The URL for the data source. The type of URL required
varies depending on the chosen data source type.
"""
username: pulumi.Output[str]
"""
The username to use to
authenticate to the data source.
"""
def __init__(__self__, resource_name, opts=None, access_mode=None, basic_auth_enabled=None, basic_auth_password=None, basic_auth_username=None, database_name=None, is_default=None, json_data=None, name=None, password=<PASSWORD>, secure_json_datas=None, type=None, url=None, username=None, __props__=None, __name__=None, __opts__=None):
"""
The data source resource allows a data source to be created on a Grafana server.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] access_mode: The method by which the browser-based Grafana
application will access the data source. The default is "proxy", which means
that the application will make requests via a proxy endpoint on the Grafana
server.
:param pulumi.Input[bool] basic_auth_enabled: - If true, HTTP basic authentication will
be used to make requests.
:param pulumi.Input[str] basic_auth_password: The
password to use for basic auth.
:param pulumi.Input[str] basic_auth_username: The
username to use for basic auth.
:param pulumi.Input[str] database_name: The name of the
database to use on the selected data source server.
:param pulumi.Input[bool] is_default: If true, the data source will be the default
source used by the Grafana server. Only one data source on a server can be
the default.
:param pulumi.Input[dict] json_data: The default region
and authentication type to access the data source. `json_data` is documented
in more detail below.
:param pulumi.Input[str] name: A unique name for the data source within the Grafana
server.
:param pulumi.Input[str] password: The password to use to
authenticate to the data source.
:param pulumi.Input[list] secure_json_datas: The access and
secret keys required to access the data source. `secure_json_data` is
documented in more detail below.
:param pulumi.Input[str] type: The data source type. Must be one of the data source
keywords supported by the Grafana server.
:param pulumi.Input[str] url: The URL for the data source. The type of URL required
varies depending on the chosen data source type.
:param pulumi.Input[str] username: The username to use to
authenticate to the data source.
The **secure_json_datas** object supports the following:
* `accessKey` (`pulumi.Input[str]`) - The access key required
to access the data source.
* `secretKey` (`pulumi.Input[str]`) - The secret key required
to access the data source.
> This content is derived from https://github.com/terraform-providers/terraform-provider-grafana/blob/master/website/docs/r/data_source.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['access_mode'] = access_mode
__props__['basic_auth_enabled'] = basic_auth_enabled
__props__['basic_auth_password'] = basic_auth_password
__props__['basic_auth_username'] = basic_auth_username
__props__['database_name'] = database_name
__props__['is_default'] = is_default
__props__['json_data'] = json_data
__props__['name'] = name
__props__['password'] = password
__props__['secure_json_datas'] = secure_json_datas
if type is None:
raise TypeError("Missing required property 'type'")
__props__['type'] = type
__props__['url'] = url
__props__['username'] = username
super(DataSource, __self__).__init__(
'grafana:index/dataSource:DataSource',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, access_mode=None, basic_auth_enabled=None, basic_auth_password=<PASSWORD>, basic_auth_username=None, database_name=None, is_default=None, json_data=None, name=None, password=<PASSWORD>, secure_json_datas=None, type=None, url=None, username=None):
"""
Get an existing DataSource resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] access_mode: The method by which the browser-based Grafana
application will access the data source. The default is "proxy", which means
that the application will make requests via a proxy endpoint on the Grafana
server.
:param pulumi.Input[bool] basic_auth_enabled: - If true, HTTP basic authentication will
be used to make requests.
:param pulumi.Input[str] basic_auth_password: The
password to use for basic auth.
:param pulumi.Input[str] basic_auth_username: The
username to use for basic auth.
:param pulumi.Input[str] database_name: The name of the
database to use on the selected data source server.
:param pulumi.Input[bool] is_default: If true, the data source will be the default
source used by the Grafana server. Only one data source on a server can be
the default.
:param pulumi.Input[dict] json_data: The default region
and authentication type to access the data source. `json_data` is documented
in more detail below.
:param pulumi.Input[str] name: A unique name for the data source within the Grafana
server.
:param pulumi.Input[str] password: The password to use to
authenticate to the data source.
:param pulumi.Input[list] secure_json_datas: The access and
secret keys required to access the data source. `secure_json_data` is
documented in more detail below.
:param pulumi.Input[str] type: The data source type. Must be one of the data source
keywords supported by the Grafana server.
:param pulumi.Input[str] url: The URL for the data source. The type of URL required
varies depending on the chosen data source type.
:param pulumi.Input[str] username: The username to use to
authenticate to the data source.
The **secure_json_datas** object supports the following:
* `accessKey` (`pulumi.Input[str]`) - The access key required
to access the data source.
* `secretKey` (`pulumi.Input[str]`) - The secret key required
to access the data source.
> This content is derived from https://github.com/terraform-providers/terraform-provider-grafana/blob/master/website/docs/r/data_source.html.markdown.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["access_mode"] = access_mode
__props__["basic_auth_enabled"] = basic_auth_enabled
__props__["basic_auth_password"] = <PASSWORD>_auth_password
__props__["basic_auth_username"] = basic_auth_username
__props__["database_name"] = database_name
__props__["is_default"] = is_default
__props__["json_data"] = json_data
__props__["name"] = name
__props__["password"] = password
__props__["secure_json_datas"] = secure_json_datas
__props__["type"] = type
__props__["url"] = url
__props__["username"] = username
return DataSource(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
``` |
{
"source": "jmay0504/globalhack6",
"score": 3
} |
#### File: jmay0504/globalhack6/postgre.py
```python
import psycopg2
from flask import Flask, jsonify, request
import re
from flask_cors import CORS, cross_origin
import json
app = Flask(__name__)
CORS(app)
@app.route("/select", methods=['POST'])
def query_data():
# create connection
conn = psycopg2.connect("dbname=postgres user=jgmayc")
conn.autocommit = True
cur = conn.cursor()
# get POST data
content = request.get_json()
statement = content['SQL']
column_list = re.match('SELECT(.*)FROM', statement)
column_list = ''.join(column_list.groups())
column_list = list(column_list.split(','))
# send SQL to database and catch results
cur.execute(statement)
result_list = cur.fetchall()
cur.close()
data = {}
row_list = []
for row in result_list:
results = {}
for property, value in zip(column_list, row):
property = property.replace(' ', '')
results[property] = str(value)
row_list.append(results)
data['data'] = row_list
# return json results
return jsonify(data)
@app.route("/dml", methods=['POST'])
def update_data():
# create connection
conn = psycopg2.connect("dbname=postgres user=jgmayc")
conn.autocommit = True
cur = conn.cursor()
# get POST data
content = request.get_json()
statement = content['SQL']
# send SQL to database
cur.execute(statement)
cur.close()
return json.dumps({'success': True}), 200, {'ContentType': 'application/json'}
if __name__ == "__main__":
app.run()
``` |
{
"source": "jmayapr/APNS-using-Python",
"score": 3
} |
#### File: jmayapr/APNS-using-Python/notifications.py
```python
import ssl
import json
import socket
import struct
import binascii
def send_push_message(token, payload):
# the certificate file generated from Provisioning Portal
certfile = 'pushcert.pem'
#print(len(token))
# APNS server address (use 'gateway.push.apple.com' for production server)
apns_address = ('gateway.sandbox.push.apple.com', 2195)
# create socket and connect to APNS server using SSL
s = socket.socket()
sock = ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_SSLv23 , certfile=certfile)
sock.connect(apns_address)
# generate APNS notification packet
token = binascii.unhexlify(token)
fmt = "!cH32sH{0:d}s".format(len(payload))
cmd = '\x00'
cmd = bytes(cmd, "utf-8")
payload = bytes(payload, "utf-8")
msg = struct.pack(fmt, cmd, len(token), token, len(payload), payload)
sock.write(msg)
sock.close()
if __name__ == '__main__':
payload = {"aps": {"alert": "Hellow World Push Notification","sound": "default"}
send_push_message("800000a46dd7ef0c48ba7431868edefafb604e9eb33d5ef92fb38c5f83212", json.dumps(payload))
``` |
{
"source": "jmayer16/sv_tb_scripts",
"score": 3
} |
#### File: jmayer16/sv_tb_scripts/create_tb.py
```python
import sys
import re
from config_parse import *
from verilog_parse import *
def writeTestbench(cp, vp, tbfile):
#write the timescale info
tbfile.write("`timescale 1"+cp.timescale+"/1"+cp.timescale+"\n\n")
#write the tb module name
tbfile.write("module "+cp.uut_name+"_tb;\n\n")
#write port input signal declarations
for var in vp.inputs:
if var[1] == '1':
tbfile.write("logic "+var[0]+";\n")
else:
tbfile.write("logic ["+var[1]+":0] "+var[0]+";\n")
tbfile.write("\n")
#write port output signal declarations
for var in vp.outputs:
if var[1] == '1':
tbfile.write("wire "+var[0]+";\n")
else:
tbfile.write("wire ["+var[1]+":0] "+var[0]+";\n")
tbfile.write("\n")
#write local clock params
for clock in cp.clocks:
tbfile.write("parameter half"+clock[0]+" = "+clock[1]+";\n")
tbfile.write("\n")
#write reset init statement
for rst in cp.resets:
tbfile.write("initial begin\n\t"+rst+" = 1'b0;\n\t#(1*half"+cp.clocks[0][0]+")\n")
tbfile.write("\t"+rst+" = 1'b1;\n\t#(8*half"+cp.clocks[0][0]+")\n")
tbfile.write("\t"+rst+" = 1'b0;\nend\n")
#write init statements for clocks
for clock in cp.clocks:
tbfile.write("\ninitial begin\n\t"+clock[0]+" = 1'b0;\n\t")
tbfile.write("forever #(half"+clock[0]+") "+clock[0]+" = ~"+clock[0]+";\nend\n")
#write uut instantiation
tbfile.write("\n"+cp.uut_name+" uut(\n")
for var in vp.inputs:
tbfile.write("\t."+var[0]+"("+var[0]+"),\n")
for i in range(len(vp.outputs)):
if i == (len(vp.outputs)-1):
tbfile.write("\t."+vp.outputs[i][0]+"("+vp.outputs[i][0]+")\n")
else:
tbfile.write("\t."+vp.outputs[i][0]+"("+vp.outputs[i][0]+"),\n")
tbfile.write(");\n")
#write endmodule
tbfile.write("\nTODO: STIMULUS GEN\n")
tbfile.write("\nendmodule")
return
def writeDoFile(cp, vp, dofile):
#write the work lib
dofile.write("vlib work\n\n")
#write the compile statments
dofile.write("vlog -work work "+cp.uut_path+"\n")
dofile.write("vlog -work work "+cp.uut_name+"_tb.sv\n")
#write vsim statement
dofile.write("\nvsim -t 1"+cp.timescale+" novopt "+cp.uut_name+"_tb")
if len(cp.libraries):
dofile.write(" -L")
for lib in cp.libraries:
dofile.write(" "+lib)
dofile.write("\n\n")
#write waves and signals
dofile.write("view signals\nview wave\n\n")
#write wave declarations
for var in vp.inputs:
label = re.sub('_',' ',var[0]).upper()
if (int(var[1])+1) > 7 or (int(var[1])+1)%4 == 0:
value = 'hexadecimal'
else:
value = 'binary'
dofile.write("add wave -color Green -label {"+label+"} -radix "+value)
dofile.write(" /"+cp.uut_name+"_tb/uut/"+var[0]+"\n")
dofile.write("\n")
for var in vp.internals:
label = re.sub('_',' ',var[0]).upper()
dofile.write("add wave -color Blue -label {"+label+"} -radix "+var[1])
dofile.write(" /"+cp.uut_name+"_tb/uut/"+var[0]+"\n")
dofile.write("\n")
for var in vp.outputs:
label = re.sub('_',' ',var[0]).upper()
if (int(var[1])+1) > 7 or (int(var[1])+1)%4 == 0:
value = 'hexadecimal'
else:
value = 'binary'
dofile.write("add wave -color Yellow -label {"+label+"} -radix "+value)
dofile.write(" /"+cp.uut_name+"_tb/uut/"+var[0]+"\n")
#write run statement
dofile.write("\nrun "+cp.runtime)
return
#Main function
cp = ConfigParse()
cp.parseConfig(sys.argv[1])
vp = VerilogParse(cp.submodules)
vp.parseVerilog(cp.uut_path)
tbname = cp.uut_name + "_tb.sv"
tbfile = open(tbname, "w")
writeTestbench(cp, vp, tbfile)
tbfile.close()
doname = cp.uut_name + ".do"
dofile = open(doname, "w")
writeDoFile(cp, vp, dofile)
dofile.close()
```
#### File: jmayer16/sv_tb_scripts/verilog_parse.py
```python
import re
class VerilogParse:
'Hold all information about the uut verilog file'
def __init__(self, submodules):
self.parameters = []
self.inputs = []
self.outputs = []
self.internals = []
self.submodules = submodules
return
def parseVerilog(self, filename):
verilogfile = open(filename, "r")
for line in verilogfile:
line.strip('\r')
if re.search('P(?i)arameter\s+\S+', line):
line = re.sub(re.compile('//.*?\n'),"",line) #Removes comments
param_tup = re.findall('P(?i)arameter (\S+) = ([0-9.]+)', line)
self.parameters.append(param_tup[0])
#Still need to fix comment on same line problem
elif re.search('^\s+I(?i)nput\s+\S+', line):
line = re.sub(re.compile('//.*?\n'),"",line) #Removes comments
name = re.findall('([a-zA-Z0-9_.]+)[,\n]', line)
value = re.findall('\[([a-zA-Z0-9]\S*):[a-zA-Z0-9]\S*\]', line)
if len(value): #Resolve params to defaults
for i in range(len(self.parameters)):
if value[0] == (self.parameters[i][0] + '-1'):
value[0] = self.parameters[i][1]
else:
value = '1'
for var in name:
self.inputs.append((var,value[0]))
elif re.search('^\s+O(?i)utput\s+\S+', line):
line = re.sub(re.compile('//.*?\n'),"",line) #Removes comments
name = re.findall('([a-zA-Z0-9_.]+)[,\n]', line)
value = re.findall('\[([a-zA-Z0-9]\S*):([a-zA-Z0-9]\S*)\]', line)
if len(value): #Resovle params to defaults
for i in range(len(self.parameters)):
if value[0] == (self.parameters[i][0] + '-1'):
value[0] = self.parameters[i][1]
else:
value = '1'
for var in name:
self.outputs.append((var,value[0]))
elif re.search('^\s+reg\s+\S+',line):
line = re.sub(re.compile('//.*?\n'),"",line) #Removes comments
name = re.findall('([a-zA-Z0-9_.]+)[,;]', line)
value = re.findall('\[([a-zA-Z0-9]\S*):[a-zA-Z0-9]\S*\]', line)
if len(value): #Resovle params to defaults
for i in range(len(self.parameters)):
if value[0] == (self.parameters[i][0] + '-1'):
value[0] = self.parameters[i][1]
else:
value = '1'
for var in name:
if len(value):
if (int(value[0])+1) > 7 or (int(value[0])+1)%4 == 0:
self.internals.append((var,'hexadecimal'))
else:
self.internals.append((var,'binary'))
else:
self.internals.append((var,'binary'))
elif re.search('^\s+logic\s+\S+',line):
line = re.sub(re.compile('//.*?\n'),"",line) #Removes comments
name = re.findall('([a-zA-Z0-9_.]+)[,;]', line)
value = re.findall('\[([a-zA-Z0-9]\S*):[a-zA-Z0-9]\S*\]', line)
if len(value): #Resovle params to defaults
for i in range(len(self.parameters)):
if value[0] == (self.parameters[i][0] + '-1'):
value[0] = self.parameters[i][1]
else:
value = '1'
for var in name:
if len(value):
if (int(value[0])+1) > 7 or (int(value[0])+1)%4 == 0:
self.internals.append((var,'hexadecimal'))
else:
self.internals.append((var,'binary'))
else:
self.internals.append((var,'binary'))
elif re.search('^\s+wire\s+\S+',line):
line = re.sub(re.compile('//.*?\n'),"",line) #Removes comments
name = re.findall('([a-zA-Z0-9_.]+)[,;]', line)
value = re.findall('\[([a-zA-Z0-9]\S*):([a-zA-Z0-9]\S*)\]', line)
if len(value): #Resovle params to defaults
for i in range(len(self.parameters)):
if value[0] == (self.parameters[i][0] + '-1'):
value[0] = self.parameters[i][1]
else:
value = '1'
for var in name:
if len(value):
if (int(value[0])+1) > 7 or (int(value[0])+1)%4 == 0:
self.internals.append((var,'hexadecimal'))
else:
self.internals.append((var,'binary'))
else:
self.internals.append((var,'binary'))
elif re.search('//[A-Z_.]+?\n',line):
name = re.findall('([A-Z_.]+?)\n',line)
name = name[0].lower()
for sub in self.submodules:
if sub[0] == name:
vp = VerilogParse(self.submodules)
vp.parseVerilog(sub[1])
for signal in vp.internals:
self.internals.append(signal)
else:
pass
else:
pass
verilogfile.close()
return
``` |
{
"source": "j-maynard/advent-of-code-2020",
"score": 4
} |
#### File: advent-of-code-2020/Day 3/day3.1.py
```python
f = open("day3-input.txt", "r")
finput = f.read().split("\n")
finput.pop()
trees_hit = 0
y_cord = 0
x_cord = 0
def get_line(finput, y_cord, x_cord):
line = finput[y_cord]
while len(line) <= x_cord:
line = line + finput[y_cord]
return line
for x in range(len(finput)-1):
x_cord = x_cord + 3
y_cord = y_cord + 1
line = get_line(finput, y_cord, x_cord)
print("y_cord = %d, x_cord = %d, line_length = %d" % (y_cord, x_cord, len(line)))
if line[x_cord] == "#":
trees_hit = trees_hit + 1
print("Trees hit = ", trees_hit)
```
#### File: advent-of-code-2020/Day 3/day3.2.py
```python
f = open("day3-input.txt", "r")
finput = f.read().split("\n")
finput.pop()
routes = [
{"right": 1, "down": 1, "result": 0},
{"right": 3, "down": 1, "result": 0},
{"right": 5, "down": 1, "result": 0},
{"right": 7, "down": 1, "result": 0},
{"right": 1, "down": 2, "result": 0}
]
def get_line(finput, y_cord, x_cord):
line = finput[y_cord]
while len(line) <= x_cord:
line = line + finput[y_cord]
return line
def get_trees_hit(finput, right, down):
trees_hit = 0
y_cord = 0
x_cord = 0
for x in range(len(finput)-1):
x_cord = x_cord + right
y_cord = y_cord + down
if y_cord > len(finput):
return trees_hit
line = get_line(finput, y_cord, x_cord)
if line[x_cord] == "#":
trees_hit = trees_hit + 1
return trees_hit
#trees_hit = get_trees_hit(finput, 3, 1)
for route in routes:
route["result"] = get_trees_hit(finput, route["right"], route["down"])
total_trees_hit = 1
for route in routes:
total_trees_hit = total_trees_hit * route["result"]
print("Trees hit = ", total_trees_hit)
```
#### File: advent-of-code-2020/Day 7/day7.1.py
```python
f = open("day7-input.txt", "r")
finput = f.read().split("\n")
finput.pop()
bags = []
for b in finput:
b = b.replace('bags', '').replace('bag', '').replace('.', '').strip()
bag = b.split("contain")[0].strip()
con = b.split("contain")[1].split(',')
contents = []
for c in con:
c = c.split(' ')
c.pop(0)
qty = c.pop(0)
type = str(c).strip('[]').replace(',','').replace("'", '').strip()
contents.append({ "qty": qty, "type": type })
bags.append({"type":bag,"contents":contents})
def find_parents(bags, h, search):
for b in bags:
contents = []
for c in b['contents']:
contents.append(c['type'])
if contents.count(search) > 0:
h.add(b['type'])
find_parents(bags, h, b['type'])
p = set()
find_parents(bags, p, 'shiny gold')
print(len(p))
```
#### File: advent-of-code-2020/Day 8/day8.2.py
```python
f = open("day8-input.txt", "r")
finput = f.read().split("\n")
finput.pop()
def get_instruction_set():
instruction_set = []
for i in finput:
instruction_set.append({
"instruction" : i.split(' ')[0],
"value": int(i.split(' ')[1]),
"order": []
})
return instruction_set
def run_instruction_set(change, change_line):
order = 0
acc = 0
index = 0
i_set = get_instruction_set()
i_set[change_line]['instruction'] = change
run = True
while run:
i_set[index]['order'].append(order)
order += 1
i = i_set[index]
if i['instruction'] == "acc":
acc += i['value']
index += 1
elif i['instruction'] == "jmp":
index += i['value']
else:
index += 1
if (len(i_set) - 1) == index:
run = False
return True, acc
if len(i['order']) > 10:
run = False
return False, 0
def find_change():
instruction_set = get_instruction_set()
for i in range(len(instruction_set)):
suc = False
acc = 0
if instruction_set[i]['instruction'] == 'jmp':
suc, acc = run_instruction_set('nop', i)
elif instruction_set[i]['instruction'] == 'nop':
suc, acc = run_instruction_set('jmp', i)
if suc:
print("Run ", i, "Success, Accumulator = ", acc)
break
find_change()
``` |
{
"source": "jmaynard/oec",
"score": 2
} |
#### File: oec/oec/terminal.py
```python
import os
import time
import logging
from textwrap import dedent
from coax import poll, read_terminal_id, read_extended_id, get_features, \
load_control_register, TerminalType, Feature, PollAction, Control, \
ReceiveError, ProtocolError
from .display import Dimensions, BufferedDisplay
from .keyboard import Keyboard
logger = logging.getLogger(__name__)
MODEL_DIMENSIONS = {
2: Dimensions(24, 80),
3: Dimensions(32, 80),
4: Dimensions(43, 80),
5: Dimensions(27, 132)
}
class Terminal:
"""The terminal."""
def __init__(self, interface, terminal_id, extended_id, dimensions, features,
keymap, jumbo_write_strategy=None):
self.interface = interface
self.terminal_id = terminal_id
self.extended_id = extended_id
self.features = features
self.control = Control(step_inhibit=False, display_inhibit=False,
cursor_inhibit=False, cursor_reverse=False,
cursor_blink=False)
self.display = BufferedDisplay(self, dimensions, features.get(Feature.EAB),
jumbo_write_strategy=jumbo_write_strategy)
self.keyboard = Keyboard(keymap)
self.alarm = False
self.last_poll_keyboard_clicker = None
def setup(self):
"""Load registers and clear the display."""
self.load_control_register()
if self.display.has_eab:
self.display.load_eab_mask(0xff)
self.display.clear(clear_status_line=True)
def poll(self, **kwargs):
"""Execute a POLL command with queued actions."""
poll_action = PollAction.NONE
# Convert a queued alarm or keyboard clicker change to POLL action.
if self.alarm:
poll_action = PollAction.ALARM
elif self.keyboard.clicker != self.last_poll_keyboard_clicker:
if self.keyboard.clicker:
poll_action = PollAction.ENABLE_KEYBOARD_CLICKER
else:
poll_action = PollAction.DISABLE_KEYBOARD_CLICKER
poll_response = poll(self.interface, poll_action, **kwargs)
# Clear the queued alarm and keyboard clicker change if the POLL was
# successful.
if poll_action == PollAction.ALARM:
self.alarm = False
elif poll_action in [PollAction.ENABLE_KEYBOARD_CLICKER,
PollAction.DISABLE_KEYBOARD_CLICKER]:
self.last_poll_keyboard_clicker = self.keyboard.clicker
return poll_response
def sound_alarm(self):
"""Queue an alarm on next POLL command."""
self.alarm = True
def load_control_register(self):
"""Execute a LOAD_CONTROL_REGISTER command."""
load_control_register(self.interface, self.control)
class UnsupportedTerminalError(Exception):
"""Unsupported terminal."""
def create_terminal(interface, poll_response, get_keymap):
"""Terminal factory."""
jumbo_write_strategy = _get_jumbo_write_strategy()
# Read the terminal identifiers.
(terminal_id, extended_id) = _read_terminal_ids(interface)
logger.info(f'Terminal ID = {terminal_id}, Extended ID = {extended_id}')
if terminal_id.type != TerminalType.CUT:
raise UnsupportedTerminalError('Only CUT type terminals are supported')
# Get the terminal dimensions.
dimensions = MODEL_DIMENSIONS.get(terminal_id.model)
if dimensions is None:
raise UnsupportedTerminalError(f'Model {terminal_id.model} is not supported')
logger.info(f'Rows = {dimensions.rows}, Columns = {dimensions.columns}')
# Get the terminal features.
features = get_features(interface)
logger.info(f'Features = {features}')
if Feature.EAB in features:
if interface.legacy_firmware_detected and jumbo_write_strategy is None:
del features[Feature.EAB]
_print_no_i1_eab_notice()
# Get the keymap.
keymap = get_keymap(terminal_id, extended_id)
logger.info(f'Keymap = {keymap.name}')
# Create the terminal.
terminal = Terminal(interface, terminal_id, extended_id, dimensions, features,
keymap, jumbo_write_strategy=jumbo_write_strategy)
return terminal
def _read_terminal_ids(interface, extended_id_retry_attempts=3):
terminal_id = None
extended_id = None
try:
terminal_id = read_terminal_id(interface)
except ReceiveError as error:
logger.warning(f'READ_TERMINAL_ID receive error: {error}', exc_info=error)
except ProtocolError as error:
logger.warning(f'READ_TERMINAL_ID protocol error: {error}', exc_info=error)
# Retry the READ_EXTENDED_ID command as it appears to fail frequently on the
# first request - unlike the READ_TERMINAL_ID command,
extended_id = None
for attempt in range(extended_id_retry_attempts):
try:
extended_id = read_extended_id(interface)
break
except ReceiveError as error:
logger.warning(f'READ_EXTENDED_ID receive error: {error}', exc_info=error)
except ProtocolError as error:
logger.warning(f'READ_EXTENDED_ID protocol error: {error}', exc_info=error)
time.sleep(0.25)
return (terminal_id, extended_id.hex() if extended_id is not None else None)
def _get_jumbo_write_strategy():
value = os.environ.get('COAX_JUMBO')
if value is None:
return None
if value in ['split', 'ignore']:
return value
logger.warning(f'Unsupported COAX_JUMBO option: {value}')
return None
def _print_no_i1_eab_notice():
notice = '''
**** **** **** **** **** **** **** **** **** **** **** **** **** **** **** ****
Your terminal is reporting the existence of an EAB feature that allows extended
colors and formatting, however...
I think you are using an older firmware on the 1st generation, Arduino Mega
based, interface which does not support the "jumbo write" required to write a
full screen to the regen and EAB buffers.
I'm going to continue as if the EAB feature did not exist...
If you want to override this behavior, you can set the COAX_JUMBO environment
variable as follows:
- COAX_JUMBO=split - split large writes into multiple smaller 32-byte writes
before sending to the interface, this will result in
additional round trips to the interface which may
manifest as visible incremental changes being applied
to the screen
- COAX_JUMBO=ignore - try a jumbo write, anyway, use this option if you
believe you are seeing this behavior in error
**** **** **** **** **** **** **** **** **** **** **** **** **** **** **** ****
'''
print(dedent(notice))
```
#### File: oec/oec/tn3270.py
```python
import logging
from tn3270 import Telnet, Emulator, AttributeCell, CharacterCell, AID, Color, Highlight, \
OperatorError, ProtectedCellOperatorError, FieldOverflowOperatorError
from tn3270.ebcdic import DUP, FM
from .session import Session, SessionDisconnectedError
from .display import encode_ascii_character, encode_ebcdic_character, encode_string
from .keyboard import Key, get_ebcdic_character_for_key
AID_KEY_MAP = {
Key.CLEAR: AID.CLEAR,
Key.ENTER: AID.ENTER,
Key.PA1: AID.PA1,
Key.PA2: AID.PA2,
Key.PA3: AID.PA3,
Key.PF1: AID.PF1,
Key.PF2: AID.PF2,
Key.PF3: AID.PF3,
Key.PF4: AID.PF4,
Key.PF5: AID.PF5,
Key.PF6: AID.PF6,
Key.PF7: AID.PF7,
Key.PF8: AID.PF8,
Key.PF9: AID.PF9,
Key.PF10: AID.PF10,
Key.PF11: AID.PF11,
Key.PF12: AID.PF12,
Key.PF13: AID.PF13,
Key.PF14: AID.PF14,
Key.PF15: AID.PF15,
Key.PF16: AID.PF16,
Key.PF17: AID.PF17,
Key.PF18: AID.PF18,
Key.PF19: AID.PF19,
Key.PF20: AID.PF20,
Key.PF21: AID.PF21,
Key.PF22: AID.PF22,
Key.PF23: AID.PF23,
Key.PF24: AID.PF24
}
class TN3270Session(Session):
"""TN3270 session."""
def __init__(self, terminal, host, port):
self.logger = logging.getLogger(__name__)
self.terminal = terminal
self.host = host
self.port = port
self.telnet = None
self.emulator = None
self.keyboard_insert = False
self.waiting_on_host = False
self.operator_error = None
# TODO: Should the message area be initialized here?
self.message_area = None
self.last_message_area = None
def start(self):
self._connect_host()
(rows, columns) = self.terminal.display.dimensions
if self.terminal.display.has_eab:
supported_colors = 8
supported_highlights = [Highlight.BLINK, Highlight.REVERSE, Highlight.UNDERSCORE]
else:
supported_colors = 1
supported_highlights = []
self.emulator = Emulator(self.telnet, rows, columns, supported_colors, supported_highlights)
self.emulator.alarm = lambda: self.terminal.sound_alarm()
def terminate(self):
if self.telnet:
self._disconnect_host()
self.emulator = None
def fileno(self):
return self.emulator.stream.socket.fileno()
def handle_host(self):
try:
if not self.emulator.update(timeout=0):
return False
except (EOFError, ConnectionResetError):
self._disconnect_host()
raise SessionDisconnectedError
self.waiting_on_host = False
return True
def handle_key(self, key, keyboard_modifiers, scan_code):
aid = AID_KEY_MAP.get(key)
try:
if aid is not None:
self.emulator.aid(aid)
self.waiting_on_host = True
#elif key == Key.RESET:
elif key == Key.BACKSPACE:
self.emulator.backspace()
elif key == Key.TAB:
self.emulator.tab()
elif key == Key.BACKTAB:
self.emulator.tab(direction=-1)
elif key == Key.NEWLINE:
self.emulator.newline()
elif key == Key.HOME:
self.emulator.home()
elif key == Key.UP:
self.emulator.cursor_up()
elif key == Key.DOWN:
self.emulator.cursor_down()
elif key == Key.LEFT:
self.emulator.cursor_left()
elif key == Key.LEFT_2:
self.emulator.cursor_left(rate=2)
elif key == Key.RIGHT:
self.emulator.cursor_right()
elif key == Key.RIGHT_2:
self.emulator.cursor_right(rate=2)
elif key == Key.INSERT:
self._handle_insert_key()
elif key == Key.DELETE:
self.emulator.delete()
elif key == Key.DUP:
self.emulator.dup()
elif key == Key.FIELD_MARK:
self.emulator.field_mark()
else:
byte = get_ebcdic_character_for_key(key)
if byte:
self.emulator.input(byte, self.keyboard_insert)
except OperatorError as error:
self.operator_error = error
def render(self):
self._apply()
self._flush()
def _handle_insert_key(self):
self.keyboard_insert = not self.keyboard_insert
self.terminal.display.status_line.write_keyboard_insert(self.keyboard_insert)
def _connect_host(self):
# We will pretend a 3279 without EAB is a 3278.
if self.terminal.display.has_eab:
type = '3279'
else:
type = '3278'
# Although a IBM 3278 does not support the formatting enabled by the extended
# data stream, the capabilities will be reported in the query reply.
terminal_type = f'IBM-{type}-{self.terminal.terminal_id.model}-E'
self.logger.info(f'Terminal Type = {terminal_type}')
self.telnet = Telnet(terminal_type)
self.telnet.open(self.host, self.port)
if self.telnet.is_tn3270e_negotiated:
self.logger.info(f'TN3270E mode negotiated: Device Type = {self.telnet.device_type}, Device Name = {self.telnet.device_name}')
else:
self.logger.debug('Unable to negotiate TN3270E mode')
def _disconnect_host(self):
self.telnet.close()
self.telnet = None
def _apply(self):
has_eab = self.terminal.display.has_eab
for address in self.emulator.dirty:
cell = self.emulator.cells[address]
(regen_byte, eab_byte) = _map_cell(cell, has_eab)
self.terminal.display.buffered_write_byte(regen_byte, eab_byte, index=address)
self.emulator.dirty.clear()
# Update the message area.
self.message_area = self._format_message_area()
def _flush(self):
self.terminal.display.flush()
# TODO: hmm we need a buffered status line...
if self.message_area != self.last_message_area:
self.terminal.display.status_line.write(8, self.message_area)
self.last_message_area = self.message_area
self.terminal.display.move_cursor(index=self.emulator.cursor_address)
# TODO: This needs to be moved.
self.operator_error = None
def _format_message_area(self):
message_area = b''
if self.waiting_on_host:
# X SPACE CLOCK_LEFT CLOCK_RIGHT
message_area = b'\xf6\x00\xf4\xf5'
elif isinstance(self.operator_error, ProtectedCellOperatorError):
# X SPACE ARROW_LEFT OPERATOR ARROW_RIGHT
message_area = b'\xf6\x00\xf8\xdb\xd8'
elif isinstance(self.operator_error, FieldOverflowOperatorError):
# X SPACE OPERATOR >
message_area = b'\xf6\x00\xdb' + encode_string('>')
elif self.emulator.keyboard_locked:
# X SPACE SYSTEM
message_area = b'\xf6\x00' + encode_string('SYSTEM')
return message_area.ljust(9, b'\x00')
def _map_cell(cell, has_eab):
regen_byte = 0x00
if isinstance(cell, AttributeCell):
# Only map the protected and display bits - ignore numeric, skip and modified.
regen_byte = 0xc0 | (cell.attribute.value & 0x2c)
elif isinstance(cell, CharacterCell):
byte = cell.byte
if cell.character_set is not None:
# TODO: Temporary workaround until character set support is added.
regen_byte = encode_ascii_character(ord('ß'))
elif byte == DUP:
regen_byte = encode_ascii_character(ord('*'))
elif byte == FM:
regen_byte = encode_ascii_character(ord(';'))
else:
regen_byte = encode_ebcdic_character(byte)
if not has_eab:
return (regen_byte, None)
eab_byte = _map_formatting(cell.formatting)
return (regen_byte, eab_byte)
def _map_formatting(formatting):
if formatting is None:
return 0x00
byte = 0x00
# Map the 3270 color to EAB color.
if formatting.color == Color.BLUE:
byte |= 0x08
elif formatting.color == Color.RED:
byte |= 0x10
elif formatting.color == Color.PINK:
byte |= 0x18
elif formatting.color == Color.GREEN:
byte |= 0x20
elif formatting.color == Color.TURQUOISE:
byte |= 0x28
elif formatting.color == Color.YELLOW:
byte |= 0x30
elif formatting.color == Color.WHITE:
byte |= 0x38
# Map the 3270 highlight to EAB highlight.
if formatting.blink:
byte |= 0x40
elif formatting.reverse:
byte |= 0x80
elif formatting.underscore:
byte |= 0xc0
return byte
``` |
{
"source": "jmayorgas/Blockchain",
"score": 3
} |
#### File: Blockchain/criptomoneda/mayorcoin_node_5003.py
```python
from crypt import methods
import datetime
import hashlib
import json
from flask import Flask, jsonify, request
import requests
from uuid import uuid4
from urllib.parse import urlparse
# CREACIÓN DE UNA CADENA DE BLOQUES
class Blockchain:
def __init__(self) -> None:
self.chain = []
self.transactions = []
self.create_block(proof=1, previous_hash='0')
# El listado de nodos donde va a operar nuestra blockchain, no va a existir un
# orden. Por eso se usa el set en lugar de una lista
self.nodes = set()
def create_block(self, proof: int, previous_hash: str):
block = {
'index': len(self.chain)+1,
'timestamp': str(datetime.datetime.now()),
'proof': proof,
'previous_hash': previous_hash,
'transactions': self.transactions
}
self.transactions = []
self.chain.append(block)
return block
def get_previous_block(self):
return self.chain[-1]
def proof_of_work(self, previous_proof):
new_proof = 1
check_proof = False
while check_proof is False:
hash_operation = hashlib.sha256(str(new_proof**3-previous_proof**2+(new_proof**(1/2)-previous_proof**(1/2))).encode()).hexdigest()
if hash_operation[:4] == '0000':
check_proof = True
else:
new_proof += 1
return new_proof
def hash(self, block):
"""
Dado un bloque, devuelve el hash correspondiente al mismo
"""
encoded_block = json.dumps(block, sort_keys=True).encode()
return hashlib.sha256(encoded_block).hexdigest()
def is_chain_valid(self, chain):
"""
Se va a ir comprobando desde el bloque 0 (Génesis)
que todos los bloques siguientes están correctos
"""
previous_block = chain[0]
block_index = 1
# Bucle while para ir iterando desde el primer hasta el último bloque
while block_index < len(chain):
block = chain[block_index]
# Comparamos si el hash_previo del bloque actual es igual al hash
# del bloque previo
if block.get('previous_hash') != self.hash(previous_block):
return False
previous_proof = previous_block.get('proof')
proof = block.get('proof')
hash_operation = hashlib.sha256(str(proof**3-previous_proof**2+(proof**(1/2)-previous_proof**(1/2))).encode()).hexdigest()
# Comprobamos si el hash es correcto entre el bloque actual y
# el previo
if hash_operation[:4] != '0000':
return False
# Actualizamos el bloque previo por el actual y aumentamos el
# índice en 1 posición para comprobar el siguiente bloque
previous_block = block
block_index += 1
return True
def add_transaction(self, sender, receiver, amount):
"""
sender: Emisor de la transacción
receive: Receptor de la transacción
amount: Cantidad de la transacción
Va a devolver el identificador el bloque para el que se están
recogiendo las transacciones
"""
self.transactions.append({
'sender': sender,
'receiver': receiver,
'amount': amount
})
return self.get_previous_block()['index'] + 1
def add_node (self, address):
"""
Añadirá un nodo dada una dirección a la lista de nodos
address: dirección del nuevo nodo
"""
# Se crea un objeto de tipo URL Parse que tiene varios atributos
# sobre la URL
parsed_url = urlparse(address)
# Nos quedamo solamente con la dirección. Se suprime el http o argumentos
# que pueda tener la URL
self.nodes.add(parsed_url.netloc)
def replace_chain(self):
"""
Función que se usará cuando un minero haya minado un bloque, y por lo tanto
la cadena actual será más larga que la anterior. Por lo tanto, todos los
demás mineros deberá actualizar la cadena por la nueva resultante
"""
network = self.nodes
longest_chain = None
max_length = len(self.chain)
# Recorremos toda la red y le vamos consultando a los mineros las
# longitudes de sus cadenas
for node in network:
response = requests.get(f'http://{node}/get_chain')
if response.status_code == 200:
length = response.json()['length']
chain = response.json()['chain']
# Si la longitud de una caden sobrepasa el valor actual máximo
# y el bloque es válido, se actualiza
if length > max_length and self.is_chain_valid(chain):
max_length = length
longest_chain = chain
# Si al finalizar el bucle se ha encontrado alguna cadena mayor a la
# actualla reemplazamos y devolvemos True ya que se ha reemplazado la
# cadena
if longest_chain:
self.chain = longest_chain
return True
# En caso contrario devolvemos False ya que no se habría reemplazado
# la cadena
return False
# MINADO DE BLOQUES DE LA CADENA
# Creación de aplicación web
app = Flask(__name__)
# Crear la dirección del nodo en el puerto 5000
node_address = str(uuid4()).replace('-','')
# Creamos una instancia de la clase Blockchain
blockchain = Blockchain()
# Minado de un nuevo bloque
@app.route('/mine_block', methods=['GET'])
def mine_block():
previous_block = blockchain.get_previous_block()
previous_proof = previous_block.get('proof')
proof = blockchain.proof_of_work(previous_proof)
previous_hash = blockchain.hash(previous_block)
blockchain.add_transaction(sender=node_address,
receiver='Mama',
amount=25)
block = blockchain.create_block(proof, previous_hash)
response = {'message': 'Congrats. You have mine a new block',
'index': block.get('index'),
'timestamp': block.get('timestamp'),
'proof': block.get('proof'),
'previous_hash': block.get('previous_hash'),
'transactions': block.get('transactions')
}
return jsonify(response), 200
# Obtener la cadena de bloques
@app.route('/get_chain', methods=['GET'])
def get_chain():
response = {'chain': blockchain.chain,
'length': len(blockchain.chain)
}
return jsonify(response), 200
# Comprueba si la cadena de bloques es válida
@app.route('/is_valid', methods=['GET'])
def is_valid():
valid = blockchain.is_chain_valid(blockchain.chain)
if valid:
message = 'The blockchain is valid'
else:
message = 'Ups. This blockchain is not valid'
response = {'message': message}
return jsonify(response), 200
@app.route('/add_transaction', methods=['POST'])
def add_transaction():
json = request.get_json()
transaction_keys = ['sender', 'receiver', 'amount']
if not all(key in json for key in transaction_keys):
return 'Transacción incompleta. Faltan elementos', 400
index = blockchain.add_transaction(sender=json['sender'],
receiver=json['receiver'],
amount=json['amount']
)
response = {'message': f'La transacción será añadida al bloque {index}'}
return jsonify(response), 201
# DESCENTRALIZAR LA CADENA DE BLOQUES
# Para convertir la Cadena de Bloques en Criptomoneda se tiene que añadir:
# - Añadir campo para las transacciones
# - Añadir campo para el consenso
# Conectar nuevos nodos
@app.route('/connect_node', methods=['POST'])
def connect_node():
"""
Por POST se va a pasar una lista de uno o varios nodos a dar de alta
"""
json = request.get_json()
nodes = json.get('nodes')
if len(nodes) is None:
return 'No se ha añadido ningún nodo', 400
# En caso de que haya bloques que añadir, se van dando de alta
for node in nodes:
blockchain.add_node(address=node)
response = {'message': 'Nodes connected successfully',
'total_nodes': list(blockchain.nodes)}
return jsonify(response, 201)
# Reemplazo de cadenas en caso de que haya una nueva cadena más larga
@app.route('/replace_chain', methods=['GET'])
def replace_chain():
is_chain_replaced = blockchain.replace_chain()
if is_chain_replaced:
message = 'The chain has been updated'
else:
message = 'The chain is okay, it is not neccesary to be updated'
response = {'message': message,
'chain': blockchain.chain}
return jsonify(response), 200
# Ejecutar la app
app.run(host='0.0.0.0', port=5003)
``` |
{
"source": "jmazala/interview_questions",
"score": 4
} |
#### File: interview_questions/coin-change/main.py
```python
def numCoins(target, coins=[2, 10, 5, 25]):
coins = sorted(coins)
dp = [None for _ in range(target + 1)]
dp[0] = 0 #it takes 0 coins to make 0 cents
for i in range(1, target + 1):
for coin in coins:
if coin > i:
break
if (dp[i - coin] is None):
continue
else:
dp[i] = dp[i - coin] + 1
return dp[target] if dp[target] is not None else -1
print(numCoins(1)) # -1
print(numCoins(3)) # -1
print(numCoins(6)) # 2
print(numCoins(60)) # 3
print(numCoins(65)) # 4
print(numCoins(66)) # 6
print(numCoins(90)) # 5
``` |
{
"source": "jmazanec15/opensearch-knn-perf-tool",
"score": 2
} |
#### File: config/parsers/opensearch.py
```python
from dataclasses import dataclass
from io import TextIOWrapper
from typing import Any, Dict
from okpt.io.config.parsers import base
from okpt.io.utils import reader
@dataclass
class OpenSearchConfig:
endpoint: str
index_spec: Dict[str, Any]
max_num_segments: int
index_thread_qty: int
bulk_size: int
k: int
class OpenSearchParser(base.BaseParser):
"""Parser for OpenSearch config.
Methods:
parse: Parse and validate the OpenSearch config.
"""
def __init__(self):
super().__init__('opensearch')
def parse(self, file_obj: TextIOWrapper) -> OpenSearchConfig:
"""See base class."""
config_obj = super().parse(file_obj)
index_spec_path = config_obj['index_spec']
index_spec_obj = reader.parse_json_from_path(index_spec_path)
opensearch_config = OpenSearchConfig(
endpoint=config_obj['endpoint'],
index_spec=index_spec_obj,
max_num_segments=config_obj['max_num_segments'],
index_thread_qty=config_obj['index_thread_qty'],
bulk_size=config_obj['bulk_size'],
k=config_obj['k'])
return opensearch_config
``` |
{
"source": "jmazanec15/sagemaker-tensorflow-containers",
"score": 2
} |
#### File: src/tf_container/proxy_client.py
```python
import numpy as np
from google.protobuf import json_format
from grpc.beta import implementations
from tf_container.run import logger as _logger
from tensorflow import make_tensor_proto
from tensorflow.core.example import example_pb2, feature_pb2
from tensorflow.core.framework import tensor_pb2
from tensorflow.python.saved_model.signature_constants import DEFAULT_SERVING_SIGNATURE_DEF_KEY, PREDICT_INPUTS
from tensorflow_serving.apis import get_model_metadata_pb2
from tensorflow_serving.apis import predict_pb2, classification_pb2, inference_pb2, regression_pb2
from tensorflow_serving.apis import prediction_service_pb2
REGRESSION = 'tensorflow/serving/regression'
CLASSIFY = 'tensorflow/serving/classify'
INFERENCE = 'tensorflow/serving/inference'
PREDICT = 'tensorflow/serving/predict'
GENERIC_MODEL_NAME = "generic_model"
class GRPCProxyClient(object):
def __init__(self, tf_serving_port, host='localhost', request_timeout=10.0, model_name=GENERIC_MODEL_NAME,
input_tensor_name=PREDICT_INPUTS, signature_name=DEFAULT_SERVING_SIGNATURE_DEF_KEY):
self.tf_serving_port = tf_serving_port
self.host = host
self.request_timeout = request_timeout
self.model_name = model_name
self.input_tensor_name = input_tensor_name
self.signature_name = signature_name
self.request_fn_map = {PREDICT: self.predict,
CLASSIFY: self.classification,
# TODO: implement inference and regression tf serving apis
INFERENCE: self._raise_not_implemented_exception,
REGRESSION: self._raise_not_implemented_exception,
}
self.prediction_type = None
self.input_type_map = {}
def parse_request(self, serialized_data):
request_fn_map = {
PREDICT: lambda: predict_pb2.PredictRequest(),
INFERENCE: lambda: inference_pb2.MultiInferenceRequest(),
CLASSIFY: lambda: classification_pb2.ClassificationRequest(),
REGRESSION: lambda: regression_pb2.RegressionRequest()
}
request = request_fn_map[self.prediction_type]()
request.ParseFromString(serialized_data)
return request
def request(self, data):
request_fn = self.request_fn_map[self.prediction_type]
return request_fn(data)
def cache_prediction_metadata(self):
channel = implementations.insecure_channel(self.host, self.tf_serving_port)
stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
request = get_model_metadata_pb2.GetModelMetadataRequest()
request.model_spec.name = self.model_name
request.metadata_field.append('signature_def')
result = stub.GetModelMetadata(request, self.request_timeout)
_logger.info('---------------------------Model Spec---------------------------')
_logger.info(json_format.MessageToJson(result))
_logger.info('----------------------------------------------------------------')
signature_def = result.metadata['signature_def']
signature_map = get_model_metadata_pb2.SignatureDefMap()
signature_map.ParseFromString(signature_def.value)
serving_default = signature_map.ListFields()[0][1]['serving_default']
serving_inputs = serving_default.inputs
self.input_type_map = {key: serving_inputs[key].dtype for key in serving_inputs.keys()}
self.prediction_type = serving_default.method_name
def predict(self, data):
channel = implementations.insecure_channel(self.host, self.tf_serving_port)
stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
request = self._create_predict_request(data)
result = stub.Predict(request, self.request_timeout)
return result
def _create_predict_request(self, data):
# Send request
# See prediction_service.proto for gRPC request/response details.
if isinstance(data, predict_pb2.PredictRequest):
return data
request = predict_pb2.PredictRequest()
request.model_spec.name = self.model_name
request.model_spec.signature_name = self.signature_name
input_map = self._create_input_map(data)
for k, v in input_map.items():
try:
request.inputs[k].CopyFrom(v)
except:
raise ValueError("""Unsupported request data format: {}.
Valid formats: tensor_pb2.TensorProto and predict_pb2.PredictRequest""".format(type(data)))
return request
def classification(self, data):
channel = implementations.insecure_channel(self.host, self.tf_serving_port)
stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
request = self._create_classification_request(data)
result = stub.Classify(request, self.request_timeout)
return result
def _create_classification_request(self, data):
if isinstance(data, classification_pb2.ClassificationRequest):
return data
request = classification_pb2.ClassificationRequest()
request.model_spec.name = self.model_name
request.model_spec.signature_name = self.signature_name
feature_dict_list = self._create_feature_dict_list(data)
examples = [_create_tf_example(feature_dict) for feature_dict in feature_dict_list]
request.input.example_list.examples.extend(examples)
return request
def _create_feature_dict_list(self, data):
"""
Parses the input data and returns a [dict<string, iterable>] which will be used to create the tf examples.
If the input data is not a dict, a dictionary will be created with the default predict key PREDICT_INPUTS
Examples:
input => output
{'age': 39., 'workclass': 'Private'} => [{'age': 39., 'workclass': 'Private'}]
[{'age': 39., 'workclass': 'Private'}] => [{'age': 39., 'workclass': 'Private'}]
[{'age': 39., 'workclass': 'Private'}, {'age': 39., 'workclass':'Public'}]
=> [{'age': 39., 'workclass': 'Private'},
{'age': 39., 'workclass': 'Public'}]
[1, 2, 'string'] => [{PREDICT_INPUTS: [1, 2, 'string']}]
42 => [{PREDICT_INPUTS: [42]}]
Args:
data: request data. Can be an instance of float, int, str, map, or any iterable object.
Returns: a dict[string, iterable] that will be used to create the tf example
"""
if isinstance(data, dict):
return [data]
if hasattr(data, '__iter__'):
if all(isinstance(x, dict) for x in data):
return data
return [{self.input_tensor_name: data}]
return [{self.input_tensor_name: [data]}]
def _raise_not_implemented_exception(self, data):
raise NotImplementedError('This prediction service type is not supported by SageMaker yet')
def _create_input_map(self, data):
"""
Parses the input data and returns a dict<string, TensorProto> which will be used to create the predict request.
If the input data is not a dict, a dictionary will be created with the default predict key PREDICT_INPUTS
input.
Examples:
input => output
{'inputs': tensor_proto} => {'inputs': tensor_proto}
tensor_proto => {PREDICT_INPUTS: tensor_proto}
[1,2,3] => {PREDICT_INPUTS: tensor_proto(1,2,3)}
Args:
data: request data. Can be any instance of dict<string, tensor_proto>, tensor_proto or any array like data.
Returns:
dict<string, tensor_proto>
"""
msg = """Unsupported request data format: {}.
Valid formats: tensor_pb2.TensorProto, dict<string, tensor_pb2.TensorProto> and predict_pb2.PredictRequest"""
if isinstance(data, dict):
if all(isinstance(v, tensor_pb2.TensorProto) for k, v in data.items()):
return data
raise ValueError(msg.format(data))
if isinstance(data, tensor_pb2.TensorProto):
return {self.input_tensor_name: data}
try:
# TODO: tensorflow container supports prediction requests with ONLY one tensor as input
input_type = self.input_type_map.values()[0]
ndarray = np.asarray(data)
tensor_proto = make_tensor_proto(values=ndarray, dtype=input_type, shape=ndarray.shape)
return {self.input_tensor_name: tensor_proto}
except:
raise ValueError(msg.format(data))
def _create_tf_example(feature_dict):
"""
Creates a tf example protobuf message given a feature dict. The protobuf message is defined here
https://github.com/tensorflow/serving/blob/master/tensorflow_serving/apis/input.proto#L19
Args:
feature_dict (dict of str -> feature): feature can be any of the following:
int, strings, unicode object, float, or list of any of the previous types.
Returns:
a tf.train.Example including the features
"""
def _create_feature(feature):
feature_list = feature if isinstance(feature, list) else [feature]
# Each feature can be exactly one kind:
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/example/feature.proto#L76
feature_type = type(feature_list[0])
if feature_type == int:
return feature_pb2.Feature(int64_list=feature_pb2.Int64List(value=feature_list))
elif feature_type == str:
return feature_pb2.Feature(bytes_list=feature_pb2.BytesList(value=feature_list))
elif feature_type == unicode:
return feature_pb2.Feature(bytes_list=feature_pb2.BytesList(value=map(lambda x: str(x), feature_list)))
elif feature_type == float:
return feature_pb2.Feature(float_list=feature_pb2.FloatList(value=feature_list))
else:
message = """Unsupported request data format: {}, {}.
Valid formats: float, int, str any object that implements __iter__
or classification_pb2.ClassificationRequest"""
raise ValueError(message.format(feature, type(feature)))
features = {k: _create_feature(v) for k, v in feature_dict.items()}
return example_pb2.Example(features=feature_pb2.Features(feature=features))
```
#### File: test/integ/utils.py
```python
import json
import logging
import os
import shutil
def serialize_hyperparameters(hp):
return {str(k): json.dumps(v) for (k, v) in hp.items()}
def save_as_json(data, filename):
with open(filename, "wt") as f:
json.dump(data, f)
def file_exists(resource_folder, file_name):
return os.path.exists(os.path.join(resource_folder, file_name))
def create_config_files(program, s3_source_archive, path, additional_hp={}):
rc = {
"current_host": "algo-1",
"hosts": ["algo-1"]
}
hp = {'sagemaker_region': 'us-west-2',
'sagemaker_program': program,
'sagemaker_submit_directory': s3_source_archive,
'sagemaker_container_log_level': logging.INFO}
hp.update(additional_hp)
ic = {
"training": {"ContentType": "trainingContentType"},
"evaluation": {"ContentType": "evalContentType"},
"Validation": {}
}
write_conf_files(rc, hp, ic, path)
def write_conf_files(rc, hp, ic, path):
os.makedirs('{}/input/config'.format(path))
rc_file = os.path.join(path, 'input/config/resourceconfig.json')
hp_file = os.path.join(path, 'input/config/hyperparameters.json')
ic_file = os.path.join(path, 'input/config/inputdataconfig.json')
hp = serialize_hyperparameters(hp)
save_as_json(rc, rc_file)
save_as_json(hp, hp_file)
save_as_json(ic, ic_file)
def copy_resource(resource_path, opt_ml_path, relative_src_path, relative_dst_path=None):
if not relative_dst_path:
relative_dst_path = relative_src_path
shutil.copytree(os.path.join(resource_path, relative_src_path),
os.path.join(opt_ml_path, relative_dst_path))
```
#### File: wide_deep/code/wide_deep.py
```python
import os
import tensorflow as tf
INPUT_TENSOR_NAME = 'inputs'
_CSV_COLUMNS = [
'age', 'workclass', 'fnlwgt', 'education', 'education_num',
'marital_status', 'occupation', 'relationship', 'race', 'gender',
'capital_gain', 'capital_loss', 'hours_per_week', 'native_country',
'income_bracket'
]
_NUM_EXAMPLES = {
'train': 32561,
'validation': 16281,
}
_CSV_COLUMN_DEFAULTS = [[0], [''], [0], [''], [0], [''], [''], [''], [''], [''],
[0], [0], [0], [''], ['']]
def estimator_fn(run_config, params):
deep_columns, wide_columns = _build_columns()
hidden_units = [100, 75, 50, 25]
return tf.estimator.DNNLinearCombinedClassifier(
linear_feature_columns=wide_columns,
dnn_feature_columns=deep_columns,
dnn_hidden_units=hidden_units,
config=run_config)
def _build_columns():
"""Builds a set of wide and deep feature columns."""
# Continuous columns
age = tf.feature_column.numeric_column('age')
education_num = tf.feature_column.numeric_column('education_num')
capital_gain = tf.feature_column.numeric_column('capital_gain')
capital_loss = tf.feature_column.numeric_column('capital_loss')
hours_per_week = tf.feature_column.numeric_column('hours_per_week')
education = tf.feature_column.categorical_column_with_vocabulary_list(
'education', [
'Bachelors', 'HS-grad', '11th', 'Masters', '9th', 'Some-college',
'Assoc-acdm', 'Assoc-voc', '7th-8th', 'Doctorate', 'Prof-school',
'5th-6th', '10th', '1st-4th', 'Preschool', '12th'])
marital_status = tf.feature_column.categorical_column_with_vocabulary_list(
'marital_status', [
'Married-civ-spouse', 'Divorced', 'Married-spouse-absent',
'Never-married', 'Separated', 'Married-AF-spouse', 'Widowed'])
relationship = tf.feature_column.categorical_column_with_vocabulary_list(
'relationship', [
'Husband', 'Not-in-family', 'Wife', 'Own-child', 'Unmarried',
'Other-relative'])
workclass = tf.feature_column.categorical_column_with_vocabulary_list(
'workclass', [
'Self-emp-not-inc', 'Private', 'State-gov', 'Federal-gov',
'Local-gov', '?', 'Self-emp-inc', 'Without-pay', 'Never-worked'])
# To show an example of hashing:
occupation = tf.feature_column.categorical_column_with_hash_bucket(
'occupation', hash_bucket_size=1000)
# Transformations.
age_buckets = tf.feature_column.bucketized_column(
age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
# Wide columns and deep columns.
base_columns = [
education, marital_status, relationship, workclass, occupation,
age_buckets,
]
crossed_columns = [
tf.feature_column.crossed_column(
['education', 'occupation'], hash_bucket_size=1000),
tf.feature_column.crossed_column(
[age_buckets, 'education', 'occupation'], hash_bucket_size=1000),
]
wide_columns = base_columns + crossed_columns
deep_columns = [
age,
education_num,
capital_gain,
capital_loss,
hours_per_week,
tf.feature_column.indicator_column(workclass),
tf.feature_column.indicator_column(education),
tf.feature_column.indicator_column(marital_status),
tf.feature_column.indicator_column(relationship),
# To show an example of embedding
tf.feature_column.embedding_column(occupation, dimension=8),
]
return deep_columns, wide_columns
def serving_input_fn(params):
deep_columns, wide_columns = _build_columns()
example_spec = tf.feature_column.make_parse_example_spec(deep_columns)
return tf.estimator.export.build_parsing_serving_input_receiver_fn(example_spec)()
def train_input_fn(training_dir, params):
"""Returns input function that would feed the model during training"""
data_file = os.path.join(training_dir, 'wide_deep_test.csv')
return _generate_input_fn(data_file)
def eval_input_fn(training_dir, params):
"""Returns input function that would feed the model during evaluation"""
data_file = os.path.join(training_dir, 'wide_deep_test.csv')
return _generate_input_fn(data_file)
def _generate_input_fn(data_file, num_epochs=1, shuffle=True, batch_size=40):
"""Generate an input function for the Estimator."""
assert tf.gfile.Exists(data_file), (
'%s not found. Please make sure you have either run data_download.py or '
'set both arguments --train_data and --test_data.' % data_file)
def parse_csv(value):
print('Parsing', data_file)
columns = tf.decode_csv(value, record_defaults=_CSV_COLUMN_DEFAULTS)
_features = dict(zip(_CSV_COLUMNS, columns))
_labels = _features.pop('income_bracket')
return _features, tf.equal(_labels, '>50K')
# Extract lines from input files using the Dataset API.
dataset = tf.data.TextLineDataset(data_file)
if shuffle:
dataset = dataset.shuffle(buffer_size=_NUM_EXAMPLES['train'])
dataset = dataset.map(parse_csv, num_parallel_calls=5)
# We call repeat after shuffling, rather than before, to prevent separate
# epochs from blending together.
dataset = dataset.repeat(num_epochs)
dataset = dataset.batch(batch_size)
iterator = dataset.make_one_shot_iterator()
features, labels = iterator.get_next()
return features, labels
```
#### File: test/unit/utils.py
```python
from mock import MagicMock
def mock_import_modules(modules):
'''Given a list of modules,it will create a dictionary of mocked modules,
including all submodules.
Examples:
_mock_import_modules(['tensorflow']) => mock, {'tensorflow': mock}
_mock_import_modules(['a', 'b.c.d') =>
mock, {'a': mock, 'b': mock, 'c': mock, 'd': mock}
Args:
modules: list (str) list of modules to be imported
Returns:
mock: containing all imports
imports: dictionary containing all imports
'''
imports = {}
_mock = MagicMock()
for _module in modules:
namespaces = _module.split('.')
full_module_name = namespaces.pop(0)
imports[full_module_name] = _mock
for namespace in namespaces:
full_module_name = '{}.{}'.format(full_module_name, namespace)
imports[full_module_name] = _mock
return _mock, imports
``` |
{
"source": "jmazumder/s3-controller",
"score": 2
} |
#### File: e2e/tests/test_bucket.py
```python
import pytest
from e2e import SERVICE_NAME
class TestBucket:
def test_bucket(self):
pytest.skip(f"No tests for {SERVICE_NAME}")
``` |
{
"source": "JMazurkiewicz/BD2-catering",
"score": 3
} |
#### File: src/model/authorizationmodel.py
```python
from model import Model
import pyodbc
class AuthorizationModel(Model):
def __init__(self):
self.login = ''
self.password = ''
self.user_type = -1
self.server = 'bd2-catering.database.windows.net'
self.database = 'Catering'
self.driver = '{SQL Server}'
def set_login(self, login):
self.login = login
def set_password(self, password):
self.password = password
def authorize(self):
print('Verification started...')
str = 'DRIVER={};SERVER={};PORT=1433;DATABASE={};UID={};PWD={}'.format(self.driver, self.server, self.database, self.login, self.password)
self.connection = pyodbc.connect(str)
def get_user_type(self):
return self.user_type
```
#### File: src/model/magazinemodel.py
```python
from model import Model
class MagazineModel(Model):
def __init__(self):
pass
def get_inventory_description(self):
sql = """
SELECT catalog_number, batch_number, products.name, available_amount, expiration_date, storage.name
FROM
(product RIGHT JOIN stored_products ON product.batch_number = )
LEFT JOIN storage ON
ORDER BY expiration_date DESCENDING
"""
cursor = self.execute_sql(sql)
description = ''
row = cursor.fetchone()
while row:
part = '[{}:{}] {}, amount: {}, expiration date: {} (in {})\n'.format(row[0], row[1], row[2], row[3], row[4], row[5])
description += part
row = cursor.fetchone()
return description
```
#### File: src/model/newproductmodel.py
```python
from model import Model
from tkcalendar import DateEntry
import datetime
class NewProductModel(Model):
def __init__(self):
Model.__init__(self)
def insert_new_product(self, catalog, name, price):
sql = 'INSERT INTO product VALUES (\'{}\',\'{}\',{},null,\'A\')'
self.execute_sql(sql.format(catalog, name, price)).commit()
print('Product added!')
```
#### File: src/view/addextracostsview.py
```python
from view.calendarview import CalendarView
from model.addextracostsmodel import ExtraCostsModel
import tkinter as tk
from model.newproductmodel import NewProductModel
from view.formview import FormView
class ExtraCostsView(FormView):
def __init__(self, parent, controller):
FormView.__init__(self, parent, controller)
self.add_entry('cost').set_description('Cost')
self.add_entry('cause').set_description('Cause')
self.save_button = tk.Button(self, text='Save')
self.save_button.configure(command=self.on_save_click)
self.go_back_button = tk.Button(self, text='Main menu')
self.go_back_button.configure(command=self.controller.display_control_panel)
self.__build_grid()
self.set_model(ExtraCostsModel())
def __build_grid(self):
self.entry_frame.grid(column=0, row=0)
self.save_button.grid(column=1, row=1)
self.go_back_button.grid(column=0, row=1)
def on_save_click(self):
cost = self.get_input('cost')
cause = self.get_input('cause')
self.get_model().add_extra_cost(cost, cause)
```
#### File: src/view/controlpanelview.py
```python
from view.addclientview import NewClientView
from view.newemployeeview import NewEmployeeView
from view.menuview import MenuView
from view.magazineview import MagazineView
from view.employeesscheduleview import EmployeesScheduleView
from view.orderscheduleview import OrderScheduleView
from view.newmealview import NewMealView
from view.neworderview import NewOrderView
from view.newproductview import NewProductView
import tkinter as tk
from view import View
class ControlPanelView(View):
def __init__(self, parent, controller):
View.__init__(self, parent, controller)
self.button_frame = tk.Frame(self)
self.button_frame.grid(row=0, column=0)
self.buttons = []
self.__build_buttons()
self.grid_columnconfigure(0, weight=1)
def __build_buttons(self):
button = tk.Button(self.button_frame, text='Products')
button.grid(row=0, column=0)
button.configure(command=self.on_product_button)
self.buttons.append(button)
button = tk.Button(self.button_frame, text='Vehicles')
button.grid(row=0, column=1)
self.buttons.append(button)
button = tk.Button(self.button_frame, text='Magazine')
button.configure(command=self.on_magazine_button_click)
button.grid(row=0, column=2)
self.buttons.append(button)
button = tk.Button(self.button_frame, text='Menu')
button.grid(row=0, column=3)
button.configure(command=self.on_menu_button_click)
self.buttons.append(button)
button = tk.Button(self.button_frame, text='Edit employees')
button.grid(row=1, column=0)
self.buttons.append(button)
button = tk.Button(self.button_frame, text='Add employee')
button.grid(row=1, column=1)
button.configure(command=self.on_add_employee_button_click)
self.buttons.append(button)
button = tk.Button(self.button_frame, text='Order calendar')
button.grid(row=1, column=2)
button.configure(command=self.on_order_button)
self.buttons.append(button)
button = tk.Button(self.button_frame, text='Employee calendar')
button.grid(row=1, column=3)
button.configure(command=self.on_employee_button)
self.buttons.append(button)
button = tk.Button(self.button_frame, text='Add Client')
button.grid(row=2, column=0)
button.configure(command=self.on_client_button)
self.buttons.append(button)
def on_order_button(self):
self.controller.display_view(OrderScheduleView)
def on_vehicles_button(self):
self.controller.display_view(NewProductView)
def on_meal_button(self):
self.controller.display_view(NewMealView)
def on_employee_button(self):
self.controller.display_view(EmployeesScheduleView)
def on_product_button(self):
self.controller.display_view(NewProductView)
def on_product_button(self):
self.controller.display_view(NewProductView)
def on_magazine_button_click(self):
self.controller.display_view(MagazineView)
def on_menu_button_click(self):
self.controller.display_view(MenuView)
def on_add_employee_button_click(self):
self.controller.display_view(NewEmployeeView)
def on_client_button(self):
self.controller.display_view(NewClientView)
```
#### File: src/view/inventoryview.py
```python
import tkinter as tk
from view import View
class InventoryView(View):
def __init__(self, parent, controller):
View.__init__(self, parent, controller)
self.scroll_frame = tk.Scrollbar(self)
self.description = tk.Label(self)
self.button_frame = tk.Frame(self)
self.buttons = []
self.__build_grid()
def __build_grid(self):
self.description.grid(row=0, column=0, padx=(0, 50))
self.button_frame.grid(row=0, column=1)
```
#### File: src/view/newemployeeview.py
```python
from model.newemployeemodel import NewEmployeeModel
import tkinter as tk
from view.formview import FormView
class NewEmployeeView(FormView):
def __init__(self, parent, controller):
FormView.__init__(self, parent, controller)
self.add_entry('name').set_description('Name')
self.add_entry('surname').set_description('Surname')
self.add_entry('pesel').set_description('Pesel (optional)')
self.add_entry('phone_number').set_description('Phone number')
self.add_entry('bank_account_number').set_description('Bank account number')
self.add_entry('postal_code').set_description('Postal code')
self.add_entry('street_name').set_description('Street name')
self.add_entry('building_number').set_description('Building number')
self.add_entry('apartment_number').set_description('Apartment number (optional)')
self.add_entry('city').set_description('City')
self.add_entry('disctrict').set_description('District (optional)')
self.add_button = tk.Button(self, text='Add', command=self.on_add_button_click)
self.cancel_button = tk.Button(self, text='Cancel', command=self.on_cancel_button_click)
self.__build_grid()
self.set_model(NewEmployeeModel())
def __build_grid(self):
self.entry_frame.grid(column=0, row=0, columnspan=2)
self.add_button.grid(column=0, row=1)
self.cancel_button.grid(column=1, row=1)
def on_add_button_click(self):
name = self.get_input('name')
surname = self.get_input('surname')
pesel = self.get_input('pesel')
phone_number = self.get_input('phone_number')
bank_account_number = self.get_input('bank_account_number')
postal_code = self.get_input('postal_code')
street_name = self.get_input('street_name')
building_number = self.get_input('building_number')
apartment_number = self.get_input('apartment_number')
city = self.get_input('city')
disctrict = self.get_input('disctrict')
self.get_model().insert_new_employee(name, surname, pesel, phone_number, bank_account_number, postal_code, street_name, building_number, apartment_number, city, disctrict)
#self.controller.display_control_panel()
def on_cancel_button_click(self):
self.controller.display_control_panel()
```
#### File: src/view/neworderview.py
```python
from model.newordermodel import NewOrderModel
import tkinter as tk
from view.formview import FormView
class NewOrderView(FormView):
def __init__(self, parent, controller):
FormView.__init__(self, parent, controller)
self.type = ""
self.add_entry('date').set_description('Date')
self.add_entry('number_of_ppl').set_description('Number of people')
self.add_entry('base_price').set_description('Base price')
self.add_entry('waiters_needed').set_description('Waiters needed')
self.save_button = tk.Button(self, text='Save')
self.save_button.configure(command=self.on_save_button)
self.person = tk.Button(self, text='Add person')
self.person.configure(command=self.on_person_button)
self.business = tk.Button(self, text='Add business')
self.business.configure(command=self.on_business_button)
self.go_back_button = tk.Button(self, text='Main menu')
self.go_back_button.configure(command=self.controller.display_control_panel)
self.__build_grid()
self.set_model(NewOrderModel())
def __build_grid(self):
self.entry_frame.grid(column=0, row=0)
self.save_button.grid(column=0, row=1)
self.person.grid(column=1, row=1)
self.business.grid(column=2, row=1)
self.go_back_button.grid(column=3, row=1)
def on_save_button(self):
date = self.get_input('date')
number_of_ppl = self.get_input('number_of_ppl')
base_price = self.get_input('base_price')
waiters_needed = self.get_input('waiters_needed')
if(self.type == "Person"):
client_id = self.get_model().find_person(self.get_input('name'), self.get_input('surname'))
elif(self.type == "Business"):
client_id = self.get_model().find_business(self.get_input('business'))
self.get_model().insert_new_order(date, number_of_ppl, base_price, waiters_needed, client_id.client_id)
def on_person_button(self):
if(self.check_entry('business')):
self.remove_entry('business')
if(not self.check_entry('name')):
self.add_entry('name').set_description('Name')
self.add_entry('surname').set_description('Surname')
self.type = "Person"
self.entry_frame.grid(column=0, row=0)
def on_business_button(self):
if(self.check_entry('name')):
self.remove_entry('name')
self.remove_entry('surname')
if(not self.check_entry('business')):
self.add_entry('business').set_description('NIP')
self.type = "Business"
self.entry_frame.grid(column=0, row=0)
```
#### File: app/tests/simple_test.py
```python
import unittest
class Test(unittest.TestCase):
def test_true(self):
self.assertTrue(True)
def test_string(self):
self.assertEqual('hello', 'HeLLo'.lower())
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jmbaker94/quantumcircuitbenchmarks",
"score": 2
} |
#### File: quantumcircuitbenchmarks/cirq/cnu_halfborrowed_gate.py
```python
from cirq.ops import gate_features
from cirq import ops
import cirq
from .util import reduce_circuit_or_op
class CnUHalfBorrowedGate(ops.Gate):
def __init__(self, register_size):
self.reg_size = register_size
self._num_qubits = self.reg_size + self.reg_size - 3
def num_qubits(self):
return self._num_qubits
def __pow__(self, exponent):
return CnUHalfBorrowedGate(register_size=self.reg_size)
def _circuit_diagram_info_(self, args):
return cirq.protocols.CircuitDiagramInfo(
('@',) * (self.reg_size-1) + ('X',) + ('D',) * (args.known_qubit_count-self.reg_size))
def _decompose_(self, qubits):
qubits = list(qubits)
controls = qubits[:self.reg_size-1]
target = qubits[self.reg_size-1]
bbits = qubits[self.reg_size:]
if len(controls) == 2:
yield ops.CCX(controls[0], controls[1], target)
elif len(controls) == 1:
yield ops.CNOT(controls[0], target)
else:
# Build the list
bits = []
bits.append(controls[0])
for i in range(1, len(controls)-1):
bits.append(controls[i])
bits.append(bbits[i-1])
bits.append(controls[-1])
bits.append(target)
for i in range(len(bits) - 1, 0, -2):
yield ops.CCX(bits[i-2], bits[i-1], bits[i])
for i in range(4, len(bits) - 2, 2):
yield ops.CCX(bits[i-2], bits[i-1], bits[i])
for i in range(len(bits) - 1, 0, -2):
yield ops.CCX(bits[i-2], bits[i-1], bits[i])
for i in range(4, len(bits) - 2, 2):
yield ops.CCX(bits[i-2], bits[i-1], bits[i])
def generate_cnu_halfborrowed(n, to_toffoli=False):
'''
n: total number of qubits, including target and ancilla
'''
gate = CnUHalfBorrowedGate(register_size=n)
qubits = cirq.LineQubit.range(gate.num_qubits())
return reduce_circuit_or_op(gate(*qubits), to_toffoli=to_toffoli)
```
#### File: quantumcircuitbenchmarks/cirq/util.py
```python
import cirq
def reduce_circuit_or_op(circuit, to_toffoli=False):
if to_toffoli:
def keep(op):
return (len(op.qubits) == 2 or len(op.qubits) == 1
or (op.gate == cirq.CCX or op.gate == cirq.CCZ))
else:
def keep(op):
return len(op.qubits) == 2 or len(op.qubits) == 1
return cirq.Circuit(cirq.decompose(circuit, keep=keep),
strategy=cirq.InsertStrategy.EARLIEST)
```
#### File: quantumcircuitbenchmarks/qiskit/cuccaro_adder.py
```python
import qiskit
def cuccaro_adder(c, cin, a, b, cout):
def _maj(reg):
c.cx(reg[2], reg[1])
c.cx(reg[2], reg[0])
c.ccx(reg[0], reg[1], reg[2])
def _uma_parallel(reg):
c.x(reg[1])
c.cx(reg[0], reg[1])
c.toffoli(reg[0], reg[1], reg[2])
c.x(reg[1])
c.cx(reg[2], reg[0])
c.cx(reg[2], reg[1])
_maj([cin, b[0], a[0]])
for i in range(1, len(b)):
_maj([a[i-1], b[i], a[i]])
c.cx(a[-1], cout)
for i in reversed(range(1, len(b))):
_uma_parallel([a[i-1], b[i], a[i]])
_uma_parallel([cin, b[0], a[0]])
def generate_cuccaro_adder(n):
'''
n: total size of circuit (each register is (n-2) / 2 sized)
'''
if n % 2 != 0:
raise ValueError('Odd number of qubits')
c = qiskit.circuit.QuantumCircuit(n)
qs = list(range(n))
cin = qs[0]
cout = qs[-1]
a = qs[1:int(n / 2)]
b = qs[int(n / 2):-1]
cuccaro_adder(c, cin, a, b, cout)
return c
``` |
{
"source": "jmball/compress_images",
"score": 4
} |
#### File: jmball/compress_images/compress_images.py
```python
import argparse
import pathlib
import PIL.Image
def get_args():
"""Get CLI arguments."""
parser = argparse.ArgumentParser()
parser.add_argument("--path", default="", help="File or folder path.")
parser.add_argument(
"--quality", type=int, default=30, help="Compressed image quality percentage."
)
return parser.parse_args()
def compress_images(path, quality):
"""Compress an image file or all image files in a folder.
Parameters
----------
path : str
File or folder path.
"""
path = pathlib.Path(path)
if path.exists() is False:
raise ValueError(f"Invalid path: {path}")
if path.is_dir():
folder = path
files = [f for f in path.iterdir()]
elif path.is_file():
folder = path.parent
files = [path]
for f in files:
try:
im = PIL.Image.open(f)
new_filename = f"compressed-{f.parts[-1]}"
im.save(folder.joinpath(new_filename), optimize=True, quality=quality)
except PIL.Image.UnidentifiedImageError:
print(f"Invalid image file: {f}")
if __name__ == "__main__":
args = get_args()
compress_images(args.path, args.quality)
``` |
{
"source": "jmball/simple_solar_simulator",
"score": 3
} |
#### File: simple_solar_simulator/Python/keithley-2450_jt.py
```python
import argparse
import time
import numpy as np
import visa
# Parse folder path, file name, and measurement parameters from command line
# arguments. Remember to include the "python" keyword before the call to the
# python file from the command line, e.g. python example.py "arg1" "arg2".
# Folder paths must use forward slashes to separate subfolders.
parser = argparse.ArgumentParser(
description='Measure and save max power point tracking data')
parser.add_argument(
'folder_path',
metavar='folder_path',
type=str,
help='Absolute path to the folder containing max P stabilisation data')
parser.add_argument(
'file_name',
metavar='file_name',
type=str,
help='Name of the file to save the data to')
parser.add_argument(
'V_start', metavar='V_start', type=float, help='Start voltage (V)')
parser.add_argument(
'V_stop', metavar='V_stop', type=float, help='Stop voltage (V)')
parser.add_argument(
'V_step', metavar='V_step', type=float, help='Step voltage (V)')
parser.add_argument(
't_step',
metavar='t_step',
type=float,
help='Time to hold for each voltage (s)')
parser.add_argument(
'dual', metavar='dual', type=bool, help='Dual sweep (True or False)?')
parser.add_argument(
'inverted',
metavar='inverted',
type=bool,
help='Inverted device structure (True or False)?')
parser.add_argument(
't_settling', metavar='t_settling', type=float, help='Settling delay (ms)')
parser.add_argument(
'nplc',
metavar='nplc',
type=float,
help='Integration filter in number of power line cycles (NPLC)')
parser.add_argument(
'condition',
metavar='condition',
type=str,
help='Illumination conditions (light or dark)')
parser.add_argument('A', metavar='A', type=float, help='Device area (cm^2)')
parser.add_argument(
'num_of_suns',
metavar='num_of_suns',
type=float,
help='Number of suns equivalent illumination intensity')
args = parser.parse_args()
# Assign argparse arguments to variables
folderpath = args.folder_path
filename = args.file_name
V_start = args.V_start
V_stop = args.V_stop
V_step = args.V_step
t_step = args.t_step
dual = args.dual
inverted = args.inverted
t_settling = args.t_settling
nplc = args.nplc
condition = args.condition
condition = condition.lower()
suns = args.num_of_suns
A = args.A
points = int(round(1 + (np.absolute(V_start - V_stop) / V_step)))
V_range = np.max([np.absolute(V_start), np.absolute(V_stop)])
# Make voltage array
V_arr = np.linspace(V_start, V_stop, points)
if dual:
V_arr_rev = np.flip(V_arr, axis=0)
V_arr = np.concatenate((V_arr, V_arr_rev))
# Set current measurement range to 10 times SQ limit for 0.5 eV
# bandgap for the given area
I_range = 100 * 0.065 * A
if I_range > 1:
I_range = 1
# Assign the VISA resource to a variable and reset Keithley 2450
rm = visa.ResourceManager()
keithley2450 = rm.open_resource('USB0::0x05E6::0x2450::04049675::INSTR')
keithley2450.query('*IDN?')
keithley2450.write('*RST')
keithley2450.encoding = 'latin-1'
# Turn off output
keithley2450.write('OUTP OFF')
# Enable 4-wire sense measurement
keithley2450.write(':SYST:RSEN ON')
# Set digital I/O line 1 as output, and close shutter
keithley2450.write(':DIG:LINE1:MODE DIG, OUT')
keithley2450.write(':DIG:LINE1:STAT 1')
# Set source function to voltage
keithley2450.write(':SOUR:FUNC VOLT')
# Set output-off mode to high impedance
keithley2450.write(':OUTP:SMOD HIMP')
# Set source readback to on (measure the source voltage when measuring the
# source current)
keithley2450.write(':SOUR:VOLT:READ:BACK ON')
# Set the voltage source range
keithley2450.write(':SOUR:VOLT:RANG {}'.format(V_range))
keithley2450.write(':SOUR:VOLT:ILIM {}'.format(I_range))
# Set settling delay for sourcing voltage
keithley2450.write(':SOUR:VOLT:DEL {}'.format(t_settling))
# Set measurement function to current
keithley2450.write(':SENS:FUNC "CURR"')
# Set current measurement range
keithley2450.write(':SENS:CURR:RANG {}'.format(I_range))
# Set the integration filter
keithley2450.write(':SENS:CURR:NPLC {}'.format(nplc))
# Disable current and voltage autozero
keithley2450.write(':CURR:AZER OFF')
keithley2450.write(':VOLT:AZER OFF')
def jt_scan(V_arr, t_step, condition):
"""Measure current as a function of time for a voltage sweep.
Parameters
----------
V_arr : array of float
Voltages to stabilise at
t_step : float
Time to hold each voltage at (s)
condition : str
Illumination condition (light or dark)
Returns
-------
ts : list of float
Timestamps for every measurement (UTC)
Vs : list of float
Measured Vs (V)
Is : list of float
Measured Is (A)
Js : list of float
Current densities (mA / cm^2)
"""
# Initialise empty lists for storing data
ts = []
Vs = []
Is = []
Js = []
if condition == 'light':
# Open the shutter of the solar simulator
keithley2450.write(':DIG:LINE1:STAT 0')
# Turn on the Keithley output at zero volts
keithley2450.write(':SOUR:VOLT {}'.format(V_arr[0]))
keithley2450.write('OUTP ON')
for V in V_arr:
# Set voltage
keithley2450.write(':SOUR:VOLT {}'.format(V))
# Reset step timer
t_step_start = time.time()
# Take readings continuously for t_step
while time.time() - t_step_start < t_step:
data = keithley2450.query(
':MEAS:CURR? "defbuffer1", REL, SOUR, READ')
data = data.split(',')
data = [float(item) for item in data]
ts.append(data[0])
Vs.append(data[1])
Is.append(data[2])
Js.append(data[2] * 1000 / A)
return ts, Vs, Is, Js
# Manually reset zero reference values
keithley2450.write(':AZER:ONCE')
# Track max power
jt_data = jt_scan(V_arr, t_step, condition)
# Disable the output
keithley2450.write('OUTP OFF')
if condition == 'light':
# Close the shutter
keithley2450.write(':DIG:LINE1:STAT 1')
# Clear measurement buffer
keithley2450.write(':TRAC:CLE "defbuffer1"')
# Convert to numpy array
jt_data_arr = np.array(jt_data).T
half_len = int(round((len(jt_data_arr[:, 0]) / 2)))
# Split scan directions
if V_start < V_stop:
jt_data_LH = jt_data_arr[:half_len, :]
jt_data_HL = jt_data_arr[half_len - 1:, :]
else:
jt_data_HL = jt_data_arr[:half_len, :]
jt_data_LH = jt_data_arr[half_len - 1:, :]
# Format and save the results
np.savetxt(
(folderpath + filename).replace('.txt', '_LH.txt'),
jt_data_LH,
fmt='%.6e',
delimiter='\t',
newline='\r\n',
header='Time (s)\tV\tI (A)\tJ (mA/cm^2)',
comments='')
np.savetxt(
(folderpath + filename).replace('.txt', '_HL.txt'),
jt_data_HL,
fmt='%.6e',
delimiter='\t',
newline='\r\n',
header='Time (s)\tV\tI (A)\tJ (mA/cm^2)',
comments='')
# Close the visa resource manager
keithley2450.close()
``` |
{
"source": "jmball/srs_sr830",
"score": 3
} |
#### File: src/sr830/test_sr830.py
```python
import argparse
import math
import random
import time
import numpy as np
import pytest
import visa
import sr830
parser = argparse.ArgumentParser()
parser.add_argument(
"-r", "--resource-name", help="VISA resource name",
)
parser.add_argument(
"-o",
"--output-interface",
type=int,
help=(
"Output communication interface for reading instrument responses: 0 (RS232) "
+ "or 1 (GPIB)"
),
)
args = parser.parse_args()
lia = sr830.sr830()
def _int_property_test(settings, lia_property):
"""Test read/write of a property that has integer settings."""
# check read of current setting
old_setting = lia_property
assert old_setting in settings
# check write/read of all valid settings
for new_setting in settings:
lia_property = new_setting
assert lia_property == new_setting
# change back to old setting
lia_property = old_setting
# make sure invalid setting raises an error
with pytest.raises(ValueError):
new_setting = max(settings) + 1
lia_property = new_setting
def _float_property_test(min_value, max_value, lia_property):
"""Test read/write of a float property."""
# read current value
old_value = lia_property
assert (old_value >= min_value) and (old_value <= max_value)
# try a new valid amp
while True:
new_value = np.random.uniform(min_value, max_value, 1)
if new_value != old_value:
break
lia_property = new_value
assert lia_property == new_value
# change back
lia_property = old_value
# try an invalid amp
with pytest.raises(ValueError):
new_value = max_value + 1
lia_property = new_value
def test_connect():
"""Test for successful connection with minimal setup."""
lia.connect(
args.resource_name,
output_interface=args.output_interface,
reset=False,
local_lockout=False,
)
assert lia.instr.session is not None
def test_reset():
"""Check reset command issued without errors."""
lia.reset()
def test_get_enable_register():
"""Test query of all status register enables.
All should be disabled after a reset.
"""
assert lia.get_enable_register("standard_event") == 0
assert lia.get_enable_register("serial_poll") == 0
assert lia.get_enable_register("error") == 0
assert lia.get_enable_register("lia_status") == 0
def test_set_enable_register():
"""Test set enable register function."""
registers = list(lia._enable_register_cmd_dict.keys())
dec_value = range(256)
for reg in registers:
old_val = lia.get_enable_register(reg)
new_val = random.choice(dec_value)
lia.set_enable_register(reg, new_val)
new_val_set = lia.get_enable_register(reg)
assert new_val_set == new_val
lia.set_enable_register(reg, old_val)
with pytest.raises(ValueError):
new_val = max(dec_value) + 1
reg = random.choice(registers)
lia.set_enable_register(reg, new_val)
with pytest.raises(ValueError):
new_val = random.choice(dec_value)
reg = "hello"
lia.set_enable_register(reg, new_val)
def test_enable_all_status_bytes():
"""Check command can be issued without errors."""
lia.enable_all_status_bytes()
# check all are enabled
assert lia.get_enable_register("standard_event") == 255
assert lia.get_enable_register("serial_poll") == 255
assert lia.get_enable_register("error") == 255
assert lia.get_enable_register("lia_status") == 255
def test_get_status_byte():
"""Test get status byte function."""
assert type(lia.get_status_byte("standard_event")) is int
assert type(lia.get_status_byte("serial_poll")) is int
assert type(lia.get_status_byte("error")) is int
assert type(lia.get_status_byte("lia_status")) is int
def test_errors():
"""Check errors property."""
lia.errors
def test_reference_phase_shift():
"""Test read/write of reference phase shift."""
_float_property_test(-360, 720, lia.reference_phase_shift)
def test_reference_source():
"""Test read/write of reference source."""
settings = range(2)
_int_property_test(settings, lia.reference_source)
def test_reference_frequency():
"""Test read/write of reference frequency."""
_float_property_test(0.001, 102000, lia.reference_frequency)
def test_reference_trigger():
"""Test read/write of reference trigger."""
settings = range(3)
_int_property_test(settings, lia.reference_trigger)
def test_harmonic():
"""Test read/write of harmonic."""
settings = range(1, 20000)
_int_property_test(settings, lia.harmonic)
def test_sine_amplitude():
"""Test read/write of sine_amplitude."""
_float_property_test(0.004, 5.000, lia.sine_amplitude)
def test_input_configuration():
"""Test read/write of input configuration."""
settings = range(4)
_int_property_test(settings, lia.input_configuration)
def test_input_shield_grounding():
"""Test read/write of input shield grounding."""
settings = range(2)
_int_property_test(settings, lia.input_shield_grounding)
def test_input_coupling():
"""Test read/write of input coupling."""
settings = range(2)
_int_property_test(settings, lia.input_coupling)
def test_line_notch_filter_status():
"""Test read/write of line notch filter status."""
settings = range(4)
_int_property_test(settings, lia.line_notch_filter_status)
def test_sensitivity():
"""Test read/write of sensitivity."""
settings = range(27)
_int_property_test(settings, lia.sensitivity)
def test_reserve_mode():
"""Test read/write of reserve mode."""
settings = range(3)
_int_property_test(settings, lia.reserve_mode)
def test_time_constant():
"""Test read/write of time constant."""
settings = range(20)
_int_property_test(settings, lia.time_constant)
def test_lowpass_filter_slope():
"""Test read/write of low pass filter slope."""
settings = range(4)
_int_property_test(settings, lia.lowpass_filter_slope)
def test_sync_filter_status():
"""Test read/write of synchronous filter status."""
settings = range(2)
_int_property_test(settings, lia.sync_filter_status)
def test_output_interface():
"""Test read/write of output interface."""
settings = range(2)
_int_property_test(settings, lia.output_interface)
def test_get_display():
"""Test reading both displays."""
channel_settings = range(1, 3)
display_settings = range(5)
ratio_settings = range(3)
# check all valid settings
for channel in channel_settings:
old_display, old_ratio = lia.get_display(channel)
assert old_display in display_settings
assert old_ratio in ratio_settings
# make sure invalid setting raises an error
with pytest.raises(ValueError):
new_channel = max(channel_settings) + 1
lia.get_display(new_channel)
def test_set_display():
"""Test setting display settings."""
channel_settings = range(1, 3)
display_settings = range(5)
ratio_settings = range(3)
for channel in channel_settings:
old_display, old_ratio = lia.get_display(channel)
# check write/read of all valid settings
for new_display in display_settings:
for new_ratio in ratio_settings:
lia.set_display(channel, new_display, new_ratio)
new_display_set, new_ratio_set = lia.get_display(channel)
assert new_display == new_display_set
assert new_ratio == new_ratio_set
# change back to old setting
lia.set_display(channel, old_display, old_ratio)
# make sure invalid setting raises an error
with pytest.raises(ValueError):
new_channel = max(channel_settings) + 1
new_display = max(display_settings) + 1
new_ratio = max(display_settings) + 1
lia.set_display(new_channel, new_display, new_ratio)
def test_get_front_output():
"""Test reading front ouptut."""
channel_settings = range(1, 3)
output_settings = range(2)
# check all valid settings
for channel in channel_settings:
old_output = lia.get_front_output(channel)
assert old_output in output_settings
# make sure invalid setting raises an error
with pytest.raises(ValueError):
new_channel = max(channel_settings) + 1
lia.get_front_output(new_channel)
def test_set_front_output():
"""Test setting front output."""
channel_settings = range(1, 3)
output_settings = range(2)
for channel in channel_settings:
old_output = lia.get_front_output(channel)
# check write/read of all valid settings
for new_output in output_settings:
lia.set_front_output(channel, new_output)
new_output_set = lia.get_front_output(channel)
assert new_output == new_output_set
# change back to old setting
lia.set_front_output(channel, old_output)
# make sure invalid setting raises an error
with pytest.raises(ValueError):
new_channel = max(channel_settings) + 1
new_output = max(output_settings) + 1
lia.set_front_output(new_channel, new_output)
def test_get_output_offset_expand():
"""Test reading front ouptut."""
parameter_settings = range(1, 4)
min_offset = -105.00
max_offset = 105.00
expand_settings = range(3)
# check all valid settings
for parameter in parameter_settings:
old_offset, old_expand = lia.get_output_offset_expand(parameter)
assert (old_offset >= min_offset) and (old_offset <= max_offset)
assert old_expand in expand_settings
# make sure invalid setting raises an error
with pytest.raises(ValueError):
new_parameter = max(parameter_settings) + 1
lia.get_output_offset_expand(new_parameter)
def test_set_output_offset_expand():
"""Test setting front output."""
parameter_settings = range(1, 4)
min_offset = -105.00
max_offset = 105.00
expand_settings = range(3)
for parameter in parameter_settings:
old_offset, old_expand = lia.get_output_offset_expand(parameter)
# check write/read of all valid settings
for new_expand in expand_settings:
while True:
new_offset = np.random.uniform(min_offset, max_offset, 1)
if new_offset != old_offset:
break
lia.set_output_offset_expand(parameter, new_offset, new_expand)
new_offset_set, new_expand_set = lia.get_output_offset_expand(parameter)
assert new_offset == new_offset_set
assert new_expand == new_expand_set
# change back to old setting
lia.set_output_offset_expand(parameter, old_offset, old_expand)
# make sure invalid setting raises an error
with pytest.raises(ValueError):
new_parameter = max(parameter_settings) + 1
new_offset = max_offset + 1
new_expand = max(expand_settings) + 1
lia.set_output_offset_expand(new_parameter, new_offset, new_expand)
def test_auto_offset():
"""Test auto offset function."""
parameter_settings = range(1, 4)
# check command issed without error
for parameter in parameter_settings:
lia.auto_offset(parameter)
# make sure invalid setting raises an error
with pytest.raises(ValueError):
new_parameter = max(parameter_settings) + 1
lia.auto_offset(new_parameter)
def test_get_aux_in():
"""Test reading aux input function."""
aux_in_settings = range(1, 5)
min_voltage = -10.0
max_voltage = 10.0
# check command issed without error
for aux_in in aux_in_settings:
voltage = lia.get_aux_in(aux_in)
assert (voltage >= min_voltage) and (voltage <= max_voltage)
# make sure invalid setting raises an error
with pytest.raises(ValueError):
new_aux_in = max(aux_in_settings) + 1
lia.get_aux_in(new_aux_in)
def test_get_aux_out():
"""Test reading aux output function."""
aux_out_settings = range(1, 5)
min_voltage = -10.0
max_voltage = 10.0
# check command issed without error
for aux_out in aux_out_settings:
voltage = lia.get_aux_out(aux_out)
assert (voltage >= min_voltage) and (voltage <= max_voltage)
# make sure invalid setting raises an error
with pytest.raises(ValueError):
new_aux_out = max(aux_out_settings) + 1
lia.get_aux_out(new_aux_out)
def test_set_aux_out():
"""Test reading aux output function."""
aux_out_settings = range(1, 5)
min_voltage = -10.0
max_voltage = 10.0
for aux_out in aux_out_settings:
old_voltage = lia.get_aux_out(aux_out)
while True:
new_voltage = np.random.uniform(min_voltage, max_voltage, 1)
if new_voltage != old_voltage:
break
lia.set_aux_out(aux_out, new_voltage)
new_voltage_set = lia.get_aux_out(aux_out)
assert math.isclose(new_voltage, new_voltage_set, rel_tol=0.05)
# set back to old value
lia.set_aux_out(aux_out, old_voltage)
# make sure invalid setting raises an error
with pytest.raises(ValueError):
new_aux_out = max(aux_out_settings) + 1
new_voltage = max_voltage + 1
lia.set_aux_out(new_aux_out, new_voltage)
def test_key_click_state():
"""Test read/write key click state property."""
settings = range(2)
_int_property_test(settings, lia.key_click_state)
def test_alarm_state():
"""Test read/write alarm property."""
settings = range(2)
_int_property_test(settings, lia.alarm_status)
def test_recall_setup():
"""Test recall setup function."""
settings = range(1, 10)
# check command processed without errors
for setting in settings:
lia.recall_setup(setting)
# make sure invalid setting raises an error
with pytest.raises(ValueError):
new_setting = max(settings) + 1
lia.recall_setup(new_setting)
def test_save_setup():
"""Test save setup function."""
settings = range(1, 10)
# check commands processed without errors
for setting in settings:
# overwrite save setting with existing setting to avoid loss
lia.recall_setup(setting)
lia.save_setup(setting)
# make sure invalid setting raises an error
with pytest.raises(ValueError):
new_setting = max(settings) + 1
lia.save_setup(new_setting)
def test_auto_gain():
"""Test auto gain function."""
lia.auto_gain()
def test_auto_reserve():
"""Test auto reserve function."""
lia.auto_reserve()
def test_auto_phase():
"""Test auto reserve function."""
lia.auto_phase()
def test_sample_rate():
"""Test read/write sample rate property."""
settings = range(15)
_int_property_test(settings, lia.sample_rate)
def test_end_of_buffer_mode():
"""Test read/write end of buffer mode property."""
settings = range(2)
_int_property_test(settings, lia.end_of_buffer_mode)
def test_trigger():
"""Test software trigger."""
lia.trigger()
def test_trigger_start_mode():
"""Test read/write trigger start mode property."""
settings = range(2)
_int_property_test(settings, lia.trigger_start_mode)
def test_data_transfer_mode():
"""Test data transfer mode property."""
settings = range(3)
_int_property_test(settings, lia.data_transfer_mode)
def test_reset_data_buffers():
"""Test reset data buffers function."""
lia.reset_data_buffers()
def test_start_pause_reset_cycle():
"""Test start, pause, reset cycle."""
old_mode = lia.data_transfer_mode
# cycle with fast transfer mode off
lia.data_transfer_mode = 0
lia.start()
lia.pause()
lia.reset_data_buffers()
# TODO: complete test for fast data transfer
# cycle with fast data transfer on if available
# if args.resource_name.startswith("GPIB"):
# lia.data_transfer_mode = 0
# lia.start()
# lia.pause()
# lia.reset_data_buffers()
def test_measure():
"""Test single measurement function."""
parameters = range(1, 5)
for parameter in parameters:
value = lia.measure(parameter)
assert type(value) == float
# make sure invalid setting raises an error
with pytest.raises(ValueError):
new_setting = max(parameters) + 1
lia.measure(new_setting)
def test_read_display():
"""Test read display function."""
channels = range(1, 3)
for channel in channels:
value = lia.read_display(channel)
assert type(value) == float
# make sure invalid setting raises an error
with pytest.raises(ValueError):
new_setting = max(channels) + 1
lia.read_display(new_setting)
def test_measure_multiple():
"""Test multiple measurement function."""
parameters = range(1, 12)
for i in range(2, 7):
# take a random sample of parameters of length i to measure
test_parameters = random.sample(parameters, i)
values = lia.measure_multiple(test_parameters)
assert len(values) == len(test_parameters)
for value in values:
assert type(value) == float
# make sure invalid setting in list raises an error
with pytest.raises(ValueError):
new_setting = [max(parameters) + 1, 1, 2]
lia.measure_multiple(new_setting)
# make sure invalid list lengths raise errors
with pytest.raises(ValueError):
new_setting = range(2, 8)
lia.measure_multiple(new_setting)
with pytest.raises(ValueError):
new_setting = [1]
lia.measure_multiple(new_setting)
def test_buffer_size():
"""Test buffer size property."""
buffer_sizes = range(16384)
# add some data to the buffer
lia.trigger_start_mode = 0
lia.end_of_buffer_mode = 0
lia.sample_rate = 13
lia.data_transfer_mode = 0
lia.reset_data_buffers()
lia.start()
time.sleep(0.5)
lia.pause()
size = lia.buffer_size
assert (size >= min(buffer_sizes)) and (size <= max(buffer_sizes))
assert type(size) is int
lia.reset_data_buffers()
def _get_buffer_data(function):
"""Get buffer data using specified function."""
channels = range(1, 3)
start_bins = range(16383)
bins = range(1, 16384)
# add some data to the buffer
lia.trigger_start_mode = 0
lia.end_of_buffer_mode = 0
lia.sample_rate = 13
lia.data_transfer_mode = 0
lia.reset_data_buffers()
lia.start()
time.sleep(0.5)
lia.pause()
buffer_size = lia.buffer_size
for channel in channels:
# read a random sample of data from the buffer
buffer_start_bin = random.choice(range(buffer_size))
assert buffer_start_bin in start_bins
buffer_bins = buffer_size - buffer_start_bin
assert buffer_bins in bins
buffer = function(channel, buffer_start_bin, buffer_bins)
assert len(buffer) == buffer_bins
for datum in buffer:
assert type(datum) is float
# make sure invalid settings raise errors
with pytest.raises(ValueError):
new_channel = max(channels) + 1
buffer_start_bin = random.choice(range(buffer_size))
assert buffer_start_bin in start_bins
buffer_bins = buffer_size - buffer_start_bin
assert buffer_bins in bins
buffer = function(new_channel, buffer_start_bin, buffer_bins)
with pytest.raises(ValueError):
new_channel = random.choice(channels)
buffer_start_bin = max(start_bins) + 1
buffer_bins = buffer_size - buffer_start_bin
buffer = function(new_channel, buffer_start_bin, buffer_bins)
with pytest.raises(ValueError):
new_channel = random.choice(channels)
buffer_start_bin = random.choice(range(buffer_size))
assert buffer_start_bin in start_bins
buffer_bins = max(bins) + 1
buffer = lia.get_ascii_buffer_data(new_channel, buffer_start_bin, buffer_bins)
lia.reset_data_buffers()
def test_get_ascii_buffer_data():
"""Test get ascii buffer data function."""
_get_buffer_data(lia.get_ascii_buffer_data)
def test_get_binary_buffer_data():
"""Test get ascii buffer data function."""
_get_buffer_data(lia.get_binary_buffer_data)
def test_get_non_norm_buffer_data():
"""Test get ascii buffer data function."""
_get_buffer_data(lia.get_non_norm_buffer_data)
def test_idn():
"""Test identity property."""
idn_format = "Stanford_Research_Systems,SR830,s/n00111,ver1.000"
idn = lia.idn
assert type(idn) is str
assert len(idn) == len(idn_format)
assert len(idn.split(",")) == len(idn_format.split(","))
assert idn.split(",")[0] == idn_format.split(",")[0]
assert idn.split(",")[1] == idn_format.split(",")[1]
def test_local_mode():
"""Test read/write local mode property."""
settings = range(3)
_int_property_test(settings, lia.local_mode)
def test_gpib_override_remote():
"""Test read/write gpib override property."""
settings = range(2)
_int_property_test(settings, lia.gpib_override_remote)
def test_clear_status_registers():
"""Test clear status registers."""
lia.clear_status_registers()
def test_power_on_status_clear_bit():
"""Test power on status clear bit property."""
settings = range(2)
_int_property_test(settings, lia.power_on_status_clear_bit)
def test_disconnect():
"""Test for successful disconnection."""
lia.disconnect()
with pytest.raises(visa.InvalidSession):
lia.instr.session
``` |
{
"source": "jmbanda/snorkelBioMed",
"score": 2
} |
#### File: snorkel/features/context_features.py
```python
from collections import defaultdict
from functools import partial
from snorkel.models import Span
def get_token_count_feats(candidate, context, attr, ngram, stopwords):
"""Base function for n-gram count features
candidate: @Candidate to extract features for
context: @Context over which to count n-grams
attr: @Sentence attribute to retrieve n-grams
ngram: maximum n-gram length
stopwords: @set of stopwords to filter out from counts
"""
args = candidate.get_contexts()
if not isinstance(args[0], Span):
raise ValueError("Accepts Span-type arguments, %s-type found.")
counter = defaultdict(int)
# Count n-gram instances
for tokens in (sent[attr] for sent in context.get_sentence_generator()):
for i in xrange(len(tokens)):
for j in range(i+1, min(len(tokens), i + ngram) + 1):
counter[' '.join(tokens[i:j])] += 1
# Yield counts if n-gram is not in stopwords
for gram in counter:
if (not stopwords) or not all([t in stopwords for t in gram.split()]):
yield 'TOKEN_FEATS[' + gram + ']', counter[gram]
def get_document_token_count_feats_base(candidate, attr, ngram, stopwords):
"""Apply @get_token_count_feats over the parent @Document of @candidate"""
doc = candidate.get_parent().get_parent()
return get_token_count_feats(candidate, doc, attr, ngram, stopwords)
def get_sentence_token_count_feats_base(candidate, attr, ngram, stopwords):
"""Apply @get_token_count_feats over the parent @Sentence of @candidate"""
sentence = candidate.get_parent().get_parent()
return get_token_count_feats(candidate, sentence, attr, ngram, stopwords)
def get_document_token_count_feats(stopwords=None, ngram=1, attr='lemmas'):
"""Get a document token count unary function"""
return partial(get_document_token_count_feats_base, attr=attr, ngram=ngram,
stopwords=stopwords)
def get_sentence_token_count_feats(stopwords=None, ngram=1, attr='lemmas'):
"""Get a sentence token count unary function"""
return partial(get_sentence_token_count_feats_base, attr=attr, ngram=ngram,
stopwords=stopwords)
```
#### File: snorkel/learning/fastmulticontext.py
```python
from __future__ import print_function
import concurrent.futures
import numba
import numpy as np
import time
from collections import defaultdict
from disc_learning import NoiseAwareModel
from math import copysign, exp, log
MIN_LR = 1e-6
class FMCT(NoiseAwareModel):
"""fastmulticontext"""
def __init__(self, preprocess_function=None):
self.fmct = None
self.w = None
self.X_train = None
self.preprocess_f = preprocess_function
def train(self, training_marginals, embed_matrices, **hyperparams):
"""
Train method for fastmulticontext
training_marginals: marginal probabilities for training examples
embed_matrices: list of matrices to embed
hyperparams: fmct hyperparams, including raw_xs
"""
self.fmct = fastmulticontext(self.preprocess_f)
self.fmct.train(training_marginals, embed_matrices, **hyperparams)
def marginals(self, embed_matrices, raw_xs=None):
return self.fmct.predict(embed_matrices, raw_xs)
def get_matrix_keys(matrices):
n, m = matrices[0].shape[0], len(matrices)
embed_xs = [[[] for _ in xrange(m)] for _ in xrange(n)]
for k, matrix in enumerate(matrices):
matrix_coo = matrix.tocoo(copy=True)
for i, j, ct in zip(matrix_coo.row, matrix_coo.col, matrix_coo.data):
embed_xs[i][k].extend(
'FEATURE_{0}_{1}'.format(k, j) for _ in xrange(int(ct))
)
print("Processed {0} matrices".format(k))
return embed_xs
@numba.jit(nopython=True, nogil=True)
def fmct_activation(z, hidden_embed, wo, wo_raw, wi_sub, x_ct, x_type, x_raw):
"""
JIT function for computing activation for fmct
"""
n_classes, embed_size = wo.shape
raw_size = wo_raw.shape[1]
x_size, dim = wi_sub.shape
# Embedded features
for i in xrange(x_size):
if x_ct[i] == 0:
continue
for j in xrange(dim):
hidden_embed[j + x_type[i]*dim] += wi_sub[i][j] / x_ct[i]
# Compute activations
for k in xrange(n_classes):
for j in xrange(embed_size):
z[k] += wo[k][j] * hidden_embed[j]
for r in xrange(raw_size):
z[k] += wo_raw[k][r] * x_raw[r]
@numba.jit(nopython=True, nogil=True)
def fmct_update(wo, wo_raw, wi_sub, x_ct, x_type, x_raw, p, lr, l2_n, l1_n):
"""
JIT function for issuing SGD step of fmct
"""
n_classes, embed_size = wo.shape
raw_size = wo_raw.shape[1]
x_size, dim = wi_sub.shape
# Get activations
z = np.zeros(n_classes)
hidden_embed = np.zeros(embed_size)
fmct_activation(z, hidden_embed, wo, wo_raw, wi_sub, x_ct, x_type, x_raw)
# Compute softmax
mz = z[0]
for i in xrange(n_classes):
mz = max(mz, z[i])
s = 0
for k in xrange(n_classes):
z[k] = exp(z[k] - mz)
s += z[k]
for k in xrange(n_classes):
z[k] /= s
# Update embedding gradient and linear layer
grad = np.zeros(embed_size)
for k in xrange(n_classes):
# Noise-aware gradient calculation
# g(x) = [(1-p)\hat{p} - p(1-\hat{p})]x
alpha = lr * ((1.0-p[k])*z[k] - p[k]*(1.0-z[k]))
# Updates for embedded features
for j in xrange(embed_size):
grad[j] += alpha * wo[k][j]
# Proximal l1 gradient step for embedding weights
wo[k][j] -= alpha * hidden_embed[j]
w_abs = abs(wo[k][j])
wo[k][j] = copysign(w_abs - lr*l1_n, wo[k][j]) * (w_abs > lr*l1_n)
# Updates for raw features
for r in xrange(raw_size):
# Proximal l2 gradient step for raw weights
wo_raw[k][r] -= alpha * x_raw[r]
wo_raw[k][r] *= (1.0 - lr * l2_n)
# Update embeddings
for i in xrange(x_size):
for j in xrange(dim):
if x_ct[i] == 0:
continue
# Do not regularize embeddings
wi_sub[i][j] -= (grad[j + x_type[i]*dim] / x_ct[i])
# Return loss
pmx, lmx = 0.0, None
for k in xrange(n_classes):
if p[k] > pmx:
pmx, lmx = p[k], -log(z[k])
return lmx
@numba.jit(nopython=True, nogil=True)
def print_status(progress, loss, n_examples, lr):
""" Print training progress and loss """
print('-------------------')
print(100. * progress)
print(loss / n_examples)
print('-------------------')
@numba.jit(nopython=True, nogil=True)
def fmct_sgd_thread(thread_n, wo, wo_raw, wi, marginals, lambda_l2_n,
lambda_l1_n, epoch, n, lr, raw_xs, n_print, feat_start, feat_end,
f_cache, f_ct_cache, f_t_cache):
loss, n_examples, lr_orig = 0, 0, lr
### Run SGD ###
for kt in xrange(epoch * n):
# Update status and learning rate
k = kt % n
n_examples += 1
progress = float(kt) / (epoch * n)
lr = max(MIN_LR, lr_orig * (1.0 - progress))
# Retrieve features and probabilities
feats = f_cache[feat_start[k] : feat_end[k]]
feats_ct = f_ct_cache[feat_start[k] : feat_end[k]]
feats_type = f_t_cache[feat_start[k] : feat_end[k]]
raw_feats = raw_xs[k]
if len(feats) + len(raw_feats) == 0:
continue
# Gradient step
wi_sub = wi[feats]
loss += fmct_update(
wo, wo_raw, wi_sub, feats_ct, feats_type, raw_feats,
marginals[k], lr, lambda_l2_n, lambda_l1_n,
)
wi[feats, :] = wi_sub
# Update learning rate and print status
if thread_n == 0 and kt % n_print == 0:
print_status(progress, loss, n_examples, lr)
if thread_n == 0:
print_status(1, loss, n_examples, lr)
print('\n')
def fmct_sgd(n_threads, *args):
if n_threads == 1:
fmct_sgd_thread(0, *args)
else:
threadpool = concurrent.futures.ThreadPoolExecutor(n_threads)
threads = [
threadpool.submit(fmct_sgd_thread, i, *args)
for i in xrange(n_threads)
]
concurrent.futures.wait(threads)
for thread in threads:
if thread.exception() is not None:
raise thread.exception()
class fastmulticontext(object):
def __init__(self, preprocess_function=get_matrix_keys):
"""
Initialize fastmulticontext model
preprocess_function: function returning features for embedding seq
"""
self.vocabs = []
self.n_classes = None
self.n_embed = None
self.vocab_slice = None
self.wo = None
self.wo_raw = None
self.wi = None
self.preprocess = preprocess_function
def train(self, marginals, embed_xs, raw_xs=None, dim=100, lr=0.05,
lambda_l2=1e-7, lambda_l1=1e-7, epoch=10, min_ct=1, n_print=10000,
n_threads=4, seed=1701):
"""
Train FMCT model
marginals: marginal probabilities for training examples (array)
embed_xs: embedded features for training examples (passed to feat_f)
raw_xs: raw features for training examples (2d numpy array)
dim: dimensionality of embeddings
lr: initial learning rate
epoch: number of learning epochs
min_ct: minimum feature count for modeling
n_print: how frequently to print updates
"""
if seed is not None:
np.random.seed(seed=seed)
print("Processing data", end='\t\t')
embed_xs = self.preprocess(embed_xs) if self.preprocess else embed_xs
self.n_classes = 2 # Hardcode binary classification for now
n = len(embed_xs)
### Init feature indices ###
self.n_embed = len(embed_xs[0])
# If no raw features, add a bias term
if raw_xs is None:
raw_xs = np.ones((n, 1))
### Build vocab ###
print("Building vocab", end='\t\t')
self._build_vocabs(embed_xs, min_ct)
all_vocab_size = self.vocab_slice[-1]
feat_cache = []
feat_ct_cache = []
feat_type_cache = []
feat_start, feat_end = np.zeros(n, dtype=int), np.zeros(n, dtype=int)
s = 0
for k in xrange(n):
feats, feats_ct, feats_type = self._get_vocab_index(embed_xs[k])
feat_cache.extend(feats)
feat_ct_cache.extend(feats_ct)
feat_type_cache.extend(feats_type)
feat_start[k] = s
feat_end[k] = s + len(feats)
s += len(feats)
feat_cache = np.ravel(feat_cache).astype(int)
feat_ct_cache = np.ravel(feat_ct_cache)
feat_type_cache = np.ravel(feat_type_cache).astype(int)
### Init model ###
print("Training")
self.wo = np.zeros((self.n_classes, dim * self.n_embed))
self.wo_raw = np.zeros((self.n_classes, raw_xs.shape[1]))
self.wi = np.random.uniform(-1.0/dim, 1.0/dim, (all_vocab_size, dim))
marginals = np.array([[1.0 - float(p), float(p)] for p in marginals])
lambda_l2_n = float(lambda_l2) / n
lambda_l1_n = float(lambda_l1) / n
s = time.time()
fmct_sgd(
n_threads, self.wo, self.wo_raw, self.wi, marginals, lambda_l2_n,
lambda_l1_n, epoch, n, lr, raw_xs, n_print, feat_start, feat_end,
feat_cache, feat_ct_cache, feat_type_cache,
)
print("Training time: {0:.3f} seconds".format(time.time() - s))
def predict(self, embed_xs, raw_xs=None):
"""
Predict marginals for new examples
embed_xs: embedded features
raw_xs: raw features
"""
embed_xs = self.preprocess(embed_xs) if self.preprocess else embed_xs
n = len(embed_xs)
log_odds = np.zeros(n)
n_skipped = 0
# If no raw features, add a bias term
if raw_xs is None:
raw_xs = np.ones((n, 1))
for k in xrange(n):
x, x_raw = embed_xs[k], raw_xs[k, :]
feats, feats_ct, feats_type = self._get_vocab_index(x)
if len(feats) + np.sum(x_raw) == 0:
n_skipped += 1
log_odds[k] = 0.0
continue
wi_sub = self.wi[feats, :]
z = np.zeros(self.n_classes)
hidden_embed = np.zeros(self.wo.shape[1])
fmct_activation(
z, hidden_embed, self.wo, self.wo_raw, wi_sub,
feats_ct, feats_type, x_raw
)
log_odds[k] = z[1]
print("Skipped {0} because no feats".format(n_skipped))
return 1.0 / (1.0 + np.exp(-log_odds))
def _build_vocabs(self, embed_xs, min_ct):
"""
Build vocabulary
embed_xs: features to embed
min_ct: minimum count of feature to include in modeling
"""
if not hasattr(min_ct, '__iter__'):
min_ct = [min_ct for _ in xrange(self.n_embed)]
count_dicts = [defaultdict(int) for _ in xrange(self.n_embed)]
# Count instances of feats in corpus
for x in embed_xs:
for d, feats in enumerate(x):
for feat in feats:
count_dicts[d][feat] += 1
# Build vocab from feats with sufficient counts
self.vocabs = [{} for _ in xrange(self.n_embed)]
for d, count_dict in enumerate(count_dicts):
for feat, ct in count_dict.iteritems():
if ct >= min_ct[d]:
self.vocabs[d][feat] = len(self.vocabs[d])
print("Built vocab {0} (size={1})".format(d, len(self.vocabs[d]))),
self.vocab_slice = [0]
for vocab in self.vocabs:
self.vocab_slice.append(self.vocab_slice[-1] + len(vocab))
def _get_vocab_index(self, x):
"""
Retrieve feat indices of x
x: feature to embed
"""
# Get feature indices in each vocab
vocab_idxs = []
for d, feats in enumerate(x):
indices = []
for feat in feats:
if feat in self.vocabs[d]:
indices.append(self.vocabs[d][feat])
vocab_idxs.append(np.ravel(sorted(indices)))
# Aggregate to global index
m, s = np.sum([len(vc) for vc in vocab_idxs]), 0
feat_idxs, feat_cts, feat_type = np.zeros(m), np.zeros(m), np.zeros(m)
for i, vc in enumerate(vocab_idxs):
feat_idxs[s : s+len(vc)] = (vc + self.vocab_slice[i])
feat_cts[s : s+len(vc)] = len(vc)
feat_type[s : s+len(vc)] = i
s += len(vc)
return (
feat_idxs.astype(int), feat_cts.astype(int), feat_type.astype(int)
)
def _print_status(self, progress, loss, n_examples, lr):
""" Print training progress and loss """
sys.stdout.write(
"\rProgress: {0:06.3f}%\tLoss: {1:.6f}\tLR={2:.6f}".format(
100. * progress, loss / n_examples, lr
)
)
sys.stdout.flush()
```
#### File: snorkel/learning/logistic_regression.py
```python
import cPickle
import numpy as np
import tensorflow as tf
from disc_learning import TFNoiseAwareModel
from scipy.sparse import issparse
from time import time
from utils import get_train_idxs
class LogisticRegression(TFNoiseAwareModel):
def __init__(self, save_file=None, name='LR'):
"""Noise-aware logistic regression in TensorFlow"""
self.d = None
self.X = None
self.lr = None
self.l1_penalty = None
self.l2_penalty = None
super(LogisticRegression, self).__init__(save_file=save_file, name=name)
def _build(self):
# TODO: switch to sparse variables
self.X = tf.placeholder(tf.float32, (None, self.d))
self.Y = tf.placeholder(tf.float32, (None, 1))
w = tf.Variable(tf.random_normal((self.d, 1), mean=0, stddev=0.01))
b = tf.Variable(tf.random_normal((1, 1), mean=0, stddev=0.01))
h = tf.add(tf.matmul(self.X, w), b)
# Build model
self.loss = tf.reduce_sum(
tf.nn.sigmoid_cross_entropy_with_logits(h, self.Y)
)
self.train_fn = tf.train.ProximalGradientDescentOptimizer(
learning_rate=tf.cast(self.lr, dtype=tf.float32),
l1_regularization_strength=tf.cast(self.l1_penalty, tf.float32),
l2_regularization_strength=tf.cast(self.l2_penalty, tf.float32),
).minimize(self.loss)
self.prediction = tf.nn.sigmoid(h)
self.save_dict = {'w': w, 'b': b}
def train(self, X, training_marginals, n_epochs=10, lr=0.01,
batch_size=100, l1_penalty=0.0, l2_penalty=0.0, print_freq=5,
rebalance=False):
"""Train elastic net logistic regression model using TensorFlow
@X: SciPy or NumPy feature matrix
@training_marginals: array of marginals for examples in X
@n_epochs: number of training epochs
@lr: learning rate
@batch_size: batch size for mini-batch SGD
@l1_penalty: l1 regularization strength
@l2_penalty: l2 regularization strength
@print_freq: number of epochs after which to print status
@rebalance: rebalance training examples?
"""
# Build model
verbose = print_freq > 0
if verbose:
print("[{0}] lr={1} l1={2} l2={3}".format(
self.name, lr, l1_penalty, l2_penalty
))
print("[{0}] Building model".format(self.name))
self.d = X.shape[1]
self.lr = lr
self.l1_penalty = l1_penalty
self.l2_penalty = l2_penalty
self._build()
# Get training indices
train_idxs = get_train_idxs(training_marginals, rebalance=rebalance)
X_train = X[train_idxs, :]
y_train = np.ravel(training_marginals)[train_idxs]
# Run mini-batch SGD
n = X_train.shape[0]
batch_size = min(batch_size, n)
if verbose:
st = time()
print("[{0}] Training model #epochs={1} batch={2}".format(
self.name, n_epochs, batch_size
))
self.session.run(tf.global_variables_initializer())
for t in xrange(n_epochs):
epoch_loss = 0.0
for i in range(0, n, batch_size):
# Get batch tensors
r = min(n-1, i+batch_size)
x_batch = X_train[i:r, :].todense()
y_batch = y_train[i:r]
y_batch = y_batch.reshape((len(y_batch), 1))
# Run training step and evaluate loss function
epoch_loss += self.session.run([self.loss, self.train_fn], {
self.X: x_batch,
self.Y: y_batch,
})[0]
# Print training stats
if verbose and (t % print_freq == 0 or t in [0, (n_epochs-1)]):
print("[{0}] Epoch {1} ({2:.2f}s)\tAverage loss={3:.6f}".format(
self.name, t, time() - st, epoch_loss / n
))
if verbose:
print("[{0}] Training done ({1:.2f}s)".format(self.name, time()-st))
def marginals(self, X_test):
X = X_test.todense() if issparse(X_test) else X_test
return np.ravel(self.session.run([self.prediction], {self.X: X}))
def save_info(self, model_name):
with open('{0}.info'.format(model_name), 'wb') as f:
cPickle.dump((self.d, self.lr, self.l1_penalty, self.l2_penalty), f)
def load_info(self, model_name):
with open('{0}.info'.format(model_name), 'rb') as f:
self.d, self.lr, self.l1_penalty, self.l2_penalty = cPickle.load(f)
```
#### File: learning/structure/gen_learning.py
```python
from ..constants import *
from numba import jit
import numpy as np
import random
class DependencySelector(object):
"""
Heuristic for identifying dependencies among labeling functions.
:param seed: seed for initializing state of Numbskull variables
"""
def __init__(self, seed=271828):
self.rng = random.Random()
self.rng.seed(seed)
def select(self, L, propensity=False, threshold=0.05, truncation=10):
try:
L = L.todense()
except AttributeError:
pass
m, n = L.shape
# Initializes data structures
deps = set()
weights = np.zeros((5 * n + 1,)) if propensity else np.zeros((5 * n,))
joint = np.zeros((6,))
# joint[0] = P(Y = -1, L_j = -1)
# joint[1] = P(Y = -1, L_j = 0)
# joint[2] = P(Y = -1, L_j = 1)
# joint[3] = P(Y = 1, L_j = -1)
# joint[4] = P(Y = 1, L_j = 0)
# joint[5] = P(Y = 1, L_j = 1)
for j in range(n):
# Initializes weights
for k in range(n):
weights[k] = 1.1 - .2 * self.rng.random()
for k in range(n, len(weights)):
weights[k] = 0.0
if propensity:
weights[5 * n] = -2.0
_fit_deps(m, n, j, L, weights, joint, propensity, threshold, truncation)
for k in range(n):
if abs(weights[n + k]) > threshold:
deps.add((j, k, DEP_REINFORCING))
if abs(weights[2 * n + k]) > threshold:
deps.add((k, j, DEP_REINFORCING))
if abs(weights[3 * n + k]) > threshold:
deps.add((j, k, DEP_FIXING))
if abs(weights[4 * n + k]) > threshold:
deps.add((k, j, DEP_FIXING))
return deps
@jit(nopython=True, cache=True, nogil=True)
def _fit_deps(m, n, j, L, weights, joint, propensity, regularization, truncation):
step_size = 1.0 / m
epochs = 10
p_truncation = 1.0 / truncation
l1delta = regularization * step_size * truncation
for _ in range(epochs):
for i in range(m):
# Processes a training example
# First, computes joint and conditional distributions
joint[:] = 0, 0, 0, 0, 0, 0
for k in range(n):
if j == k:
# Accuracy
joint[0] += weights[j]
joint[5] += weights[j]
joint[2] -= weights[j]
joint[3] -= weights[j]
else:
if L[i, k] == 1:
# Accuracy
joint[0] -= weights[k]
joint[1] -= weights[k]
joint[2] -= weights[k]
joint[3] += weights[k]
joint[4] += weights[k]
joint[5] += weights[k]
# Reinforcement
joint[5] += weights[n + k] + weights[2 * n + k]
joint[1] -= weights[n + k]
joint[4] -= weights[n + k]
# Fixing
joint[3] += weights[3 * n + k]
joint[1] -= weights[3 * n + k]
joint[4] -= weights[3 * n + k]
joint[0] += weights[4 * n + k]
elif L[i, k] == -1:
# Accuracy
joint[0] += weights[k]
joint[1] += weights[k]
joint[2] += weights[k]
joint[3] -= weights[k]
joint[4] -= weights[k]
joint[5] -= weights[k]
# Reinforcement
joint[0] += weights[n + k] + weights[2 * n + k]
joint[1] -= weights[n + k]
joint[4] -= weights[n + k]
# Fixing
joint[2] += weights[3 * n + k]
joint[1] -= weights[3 * n + k]
joint[4] -= weights[3 * n + k]
joint[5] += weights[4 * n + k]
else:
# Reinforcement
joint[0] -= weights[2 * n + k]
joint[2] -= weights[2 * n + k]
joint[3] -= weights[2 * n + k]
joint[5] -= weights[2 * n + k]
# Fixing
joint[0] -= weights[4 * n + k]
joint[2] -= weights[4 * n + k]
joint[3] -= weights[4 * n + k]
joint[5] -= weights[4 * n + k]
if propensity:
joint[0] += weights[5 * n]
joint[2] += weights[5 * n]
joint[3] += weights[5 * n]
joint[5] += weights[5 * n]
joint = np.exp(joint)
joint /= np.sum(joint)
marginal_pos = np.sum(joint[3:6])
marginal_neg = np.sum(joint[0:3])
if L[i, j] == 1:
conditional_pos = joint[5] / (joint[2] + joint[5])
conditional_neg = joint[2] / (joint[2] + joint[5])
elif L[i, j] == -1:
conditional_pos = joint[3] / (joint[0] + joint[3])
conditional_neg = joint[0] / (joint[0] + joint[3])
else:
conditional_pos = joint[4] / (joint[1] + joint[4])
conditional_neg = joint[1] / (joint[1] + joint[4])
# Second, takes likelihood gradient step
for k in range(n):
if j == k:
# Accuracy
weights[j] -= step_size * (joint[5] + joint[0] - joint[2] - joint[3])
if L[i, j] == 1:
weights[j] += step_size * (conditional_pos - conditional_neg)
elif L[i, j] == -1:
weights[j] += step_size * (conditional_neg - conditional_pos)
else:
if L[i, k] == 1:
# Accuracy
weights[k] -= step_size * (marginal_pos - marginal_neg - conditional_pos + conditional_neg)
# Incoming reinforcement
weights[n + k] -= step_size * (joint[5] - joint[1] - joint[4])
if L[i, j] == 1:
weights[n + k] += step_size * conditional_pos
elif L[i, j] == 0:
weights[n + k] += step_size * -1
# Outgoing reinforcement
weights[2 * n + k] -= step_size * joint[5]
if L[i, j] == 1:
weights[2 * n + k] += step_size * conditional_pos
# Incoming fixing
weights[3 * n + k] -= step_size * (joint[3] - joint[1] - joint[4])
if L[i, j] == -1:
weights[3 * n + k] += step_size * conditional_pos
elif L[i, j] == 0:
weights[3 * n + k] += step_size * -1
# Outgoing fixing
weights[4 * n + k] -= step_size * joint[0]
if L[i, j] == -1:
weights[4 * n + k] += step_size * conditional_neg
elif L[i, k] == -1:
# Accuracy
weights[k] -= step_size * (marginal_neg - marginal_pos - conditional_neg + conditional_pos)
# Incoming reinforcement
weights[n + k] -= step_size * (joint[0] - joint[1] - joint[4])
if L[i, j] == -1:
weights[n + k] += step_size * conditional_neg
elif L[i, j] == 0:
weights[n + k] += step_size * -1
# Outgoing reinforcement
weights[2 * n + k] -= step_size * joint[0]
if L[i, j] == -1:
weights[2 * n + k] += step_size * conditional_neg
# Incoming fixing
weights[3 * n + k] -= step_size * (joint[2] - joint[1] - joint[4])
if L[i, j] == 1:
weights[3 * n + k] += step_size * conditional_neg
elif L[i, j] == 0:
weights[3 * n + k] += step_size * -1
# Outgoing fixing
weights[4 * n + k] -= step_size * joint[5]
if L[i, j] == 1:
weights[4 * n + k] += step_size * conditional_pos
else:
# No effect of incoming reinforcement
# Outgoing reinforcement
weights[2 * n + k] -= step_size * (-1 * joint[0] - joint[2] - joint[3] - joint[5])
if L[i, j] != 0:
weights[2 * n + k] += step_size * -1
# No effect of incoming fixing
# Outgoing fixing
weights[4 * n + k] -= step_size * (-1 * joint[0] - joint[2] - joint[3] - joint[5])
if L[i, j] != 0:
weights[4 * n + k] += step_size * -1
if propensity:
weights[5 * n] -= step_size * (joint[0] + joint[2] + joint[3] + joint[5])
if L[i, j] != 0:
weights[5 * n] += step_size
# Third, takes regularization gradient step
if random.random() < p_truncation:
for k in range(5 * n + 1 if propensity else 5 * n):
weights[k] = max(0, weights[k] - l1delta) if weights[k] > 0 else min(0, weights[k] + l1delta)
```
#### File: snorkelBioMed/snorkel/utils.py
```python
import re
import sys
import numpy as np
import scipy.sparse as sparse
class ProgressBar(object):
def __init__(self, N, length=40):
# Protect against division by zero (N = 0 results in full bar being printed)
self.N = max(1, N)
self.nf = float(self.N)
self.length = length
# Precalculate the i values that should trigger a write operation
self.ticks = set([round(i/100.0 * N) for i in range(101)])
self.ticks.add(N-1)
self.bar(0)
def bar(self, i):
"""Assumes i ranges through [0, N-1]"""
if i in self.ticks:
b = int(np.ceil(((i+1) / self.nf) * self.length))
sys.stdout.write("\r[%s%s] %d%%" % ("="*b, " "*(self.length-b), int(100*((i+1) / self.nf))))
sys.stdout.flush()
def close(self):
# Move the bar to 100% before closing
self.bar(self.N-1)
sys.stdout.write("\n\n")
sys.stdout.flush()
def get_ORM_instance(ORM_class, session, instance):
"""
Given an ORM class and *either an instance of this class, or the name attribute of an instance
of this class*, return the instance
"""
if isinstance(instance, str):
return session.query(ORM_class).filter(ORM_class.name == instance).one()
else:
return instance
def camel_to_under(name):
"""
Converts camel-case string to lowercase string separated by underscores.
Written by epost
(http://stackoverflow.com/questions/1175208/elegant-python-function-to-convert-camelcase-to-snake-case).
:param name: String to be converted
:return: new String with camel-case converted to lowercase, underscored
"""
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def sparse_abs(X):
"""Element-wise absolute value of sparse matrix- avoids casting to dense matrix!"""
X_abs = X.copy()
if not sparse.issparse(X):
return abs(X_abs)
if sparse.isspmatrix_csr(X) or sparse.isspmatrix_csc(X):
X_abs.data = np.abs(X_abs.data)
elif sparse.isspmatrix_lil(X):
X_abs.data = np.array([np.abs(L) for L in X_abs.data])
else:
raise ValueError("Only supports CSR/CSC and LIL matrices")
return X_abs
def matrix_coverage(L):
"""
Given an N x M matrix where L_{i,j} is the label given by the jth LF to the ith candidate:
Return the **fraction of candidates that each LF labels.**
"""
return np.ravel(sparse_abs(L).sum(axis=0) / float(L.shape[0]))
def matrix_overlaps(L):
"""
Given an N x M matrix where L_{i,j} is the label given by the jth LF to the ith candidate:
Return the **fraction of candidates that each LF _overlaps with other LFs on_.**
"""
L_abs = sparse_abs(L)
return np.ravel(np.where(L_abs.sum(axis=1) > 1, 1, 0).T * L_abs / float(L.shape[0]))
def matrix_conflicts(L):
"""
Given an N x M matrix where L_{i,j} is the label given by the jth LF to the ith candidate:
Return the **fraction of candidates that each LF _conflicts with other LFs on_.**
"""
L_abs = sparse_abs(L)
return np.ravel(np.where(L_abs.sum(axis=1) != sparse_abs(L.sum(axis=1)), 1, 0).T * L_abs / float(L.shape[0]))
def matrix_accuracy(L, labels, label_class=None):
"""
Given an N x M matrix where L_{i,j} is the label given by the jth LF to the ith candidate
and an N x 1 vector where v_{i} is the gold label given to the ith candidate:
Return the **fraction of candidates that each LF covered and agreed with the gold labels**
"""
accs = []
for j in xrange(L.shape[1]):
cov = np.ravel((L[:, j] != 0).todense())
cov *= (labels != 0)
if label_class is not None:
cov *= (labels == label_class)
accs.append(np.mean(L[cov, j] == labels[cov]))
return np.ravel(accs)
def matrix_tp(L, labels):
return np.ravel([
np.sum(np.ravel((L[:, j] == 1).todense()) * (labels == 1)) for j in xrange(L.shape[1])
])
def matrix_fp(L, labels):
return np.ravel([
np.sum(np.ravel((L[:, j] == 1).todense()) * (labels == -1)) for j in xrange(L.shape[1])
])
def matrix_tn(L, labels):
return np.ravel([
np.sum(np.ravel((L[:, j] == -1).todense()) * (labels == -1)) for j in xrange(L.shape[1])
])
def matrix_fn(L, labels):
return np.ravel([
np.sum(np.ravel((L[:, j] == -1).todense()) * (labels == 1)) for j in xrange(L.shape[1])
])
def get_as_dict(x):
"""Return an object as a dictionary of its attributes"""
if isinstance(x, dict):
return x
else:
try:
return x._asdict()
except AttributeError:
return x.__dict__
def sort_X_on_Y(X, Y):
return [x for (y,x) in sorted(zip(Y,X), key=lambda t : t[0])]
def corenlp_cleaner(words):
d = {'-RRB-': ')', '-LRB-': '(', '-RCB-': '}', '-LCB-': '{',
'-RSB-': ']', '-LSB-': '['}
return map(lambda w: d[w] if w in d else w, words)
def tokens_to_ngrams(tokens, n_max=3, delim=' '):
N = len(tokens)
for root in range(N):
for n in range(min(n_max, N - root)):
yield delim.join(tokens[root:root+n+1])
```
#### File: treedlib/treedlib/templates.py
```python
from itertools import chain
import re
import lxml.etree as et
from collections import defaultdict
# NODESET:
# ===========
class NodeSet:
"""
NodeSet objects are functions f : 2^T -> 2^T
---------------
They are applied compositionally and lazily, by constructing an xpath query
We use these to get the *subtree* or set of nodes that our indicicator features will
operate over
"""
def __init__(self, label='NODESET', xpath='//*', psort=None):
self.label = label
self.xpath = xpath
self.psort = psort # Attribute to sort on post-xpath execution
def __repr__(self):
return '<%s, xpath="%s">' % (self.label, self.xpath)
class Mention(NodeSet):
"""Gets candidate mention nodes"""
def __init__(self, cid=0):
self.label = 'MENTION'
self.xpath = "//*[{%s}]" % str(cid)
class LeftSiblings(NodeSet):
"""Gets preceding siblings"""
def __init__(self, ns, w=1):
self.__dict__.update(ns.__dict__) # inherit child object's attributes
self.label = 'LEFT-OF-%s' % ns.label
self.xpath = '%s[1]/preceding-sibling::*[position() <= %s]' % (ns.xpath, w)
class RightSiblings(NodeSet):
"""Gets following siblings"""
def __init__(self, ns, w=1):
self.__dict__.update(ns.__dict__) # inherit child object's attributes
self.label = 'RIGHT-OF-%s' % ns.label
self.xpath = '%s[1]/following-sibling::*[position() <= %s]' % (ns.xpath, w)
# TODO: These should be "Descendants" / "Ancestors"...
class Children(NodeSet):
"""Gets children of the node set"""
def __init__(self, ns):
self.__dict__.update(ns.__dict__) # inherit child object's attributes
self.label = 'CHILDREN-OF-%s' % ns.label
self.xpath = ns.xpath + '[1]/*'
class Parents(NodeSet):
"""Gets parents of the node set"""
def __init__(self, ns, num_parents=1):
self.__dict__.update(ns.__dict__) # inherit child object's attributes
self.label = 'PARENTS-OF-%s' % ns.label
self.xpath = ns.xpath + '[1]/ancestor::*[position()<%s]' % (num_parents + 1)
class Between(NodeSet):
"""
Gets the nodes between two node sets
Note: this requires some ugly xpath... could change this to non-xpath method
"""
def __init__(self, ns1, ns2):
self.__dict__.update(ns1.__dict__) # inherit *FIRST* child object's attributes
self.label = 'BETWEEN-%s-and-%s' % (ns1.label, ns2.label)
self.xpath = "{0}[1]/ancestor-or-self::*[count(. | {1}[1]/ancestor-or-self::*) = count({1}[1]/ancestor-or-self::*)][1]/descendant-or-self::*[((count(.{0}) = count({0})) or (count(.{1}) = count({1})))]".format(ns1.xpath, ns2.xpath)
class SeqBetween(NodeSet):
"""
Gets the sequence of nodes in between, according to *sentence* (not dep tree) order
"""
def __init__(self, seq_attrib='word_idx'):
# TODO: Extend to take in pair of NodeSets?
self.xpath = '//*'
self.label = 'SEQ-BETWEEN'
self.seq_attrib = seq_attrib # Logic gets pushed to Indicator...
self.psort = seq_attrib # Specify that post-xpath sorting needs to be done
class Filter(NodeSet):
"""
Gets a subset of the nodes filtered by some node attribute
Note the option to do exact match or starts with (could be expanded; useful for POS now...)
"""
def __init__(self, ns, filter_attr, filter_by, starts_with=True):
self.__dict__.update(ns.__dict__) # inherit child object's attributes
self.label = 'FILTER-BY(%s=%s):%s' % (filter_attr, filter_by, ns.label)
temp = "[starts-with(@%s, '%s')]" if starts_with else "[@%s='%s']"
self.xpath = ns.xpath + temp % (filter_attr, filter_by)
# INDICATOR:
# ===========
def compile_dict_sub(brown_clusters_path=None, user_dicts=[]):
"""
Takes in a list of tuples of form (DICT_LABEL, set_of_words)
AND/OR a file path to a tsv file list of (word, brown cluster id) lines
And returns a single dictionary mapping from word -> DICT_LABEL, based on priority ordering
Assume user dicts take priority over brown clusters...
"""
dict_sub = {}
# User ditionaries
for dict_label, words in user_dicts:
for word in words:
if word not in dict_sub:
dict_sub[word] = dict_label
# Brown clusters
if brown_clusters_path is not None:
with open(brown_clusters_path, 'rb') as f:
for line in f:
word, cluster_id = line.rstrip().split('\t')
dict_sub[word] = 'BC-%s' % cluster_id
return dict_sub
class Indicator:
"""
Indicator objects are functions f : 2^T -> {0,1}^F
---------------
Indicator objects take a NodeSet, an attibute or attributes, and apply some indicator
function to the specified attributes of the NodeSet
"""
def __init__(self, ns, attribs):
self.ns = ns
self.attribs = attribs
def apply(self, root, cids, cid_attrib='word_idx', feat_label=True, inv_tag=True, stopwords=None, dict_sub={}):
"""
Apply the feature template to the xml tree provided
A list of lists of candidate mention ids are passed in, as well as a cid_attrib
These identify the candidate mentions refered to by index in Mention
For example, cids=[[1,2]], cid_attrib='word_idx' will have mention 0 as the set of nodes
that have word inde 1 and 2
"""
# Sub in the candidate mention identifiers provided
m = [" or ".join("@%s='%s'" % (cid_attrib, c) for c in cid) for cid in cids]
xpath = self.ns.xpath.format(*m)
# INV tag if binary relation
inv = 'INV_' if inv_tag and len(cids) == 2 and cids[0][0] > cids[1][0] else ''
# Get nodes
nodes = root.xpath(xpath)
# Filter stopwords
if stopwords is not None and len(stopwords) > 0:
nodes = filter(lambda n : n.get('word') not in stopwords and n.get('lemma') not in stopwords, nodes)
# Perform seq filter here
if hasattr(self.ns, 'seq_attrib') and self.ns.seq_attrib is not None:
seqa = self.ns.seq_attrib
b = (cids[0][-1], cids[-1][0]) if cids[0][-1] < cids[-1][0] else (cids[-1][-1], cids[0][0])
nodes = filter(lambda n : n.get(seqa) is not None and int(n.get(seqa)) > b[0] and int(n.get(seqa)) < b[1], nodes)
# If sort specified, perform here
if hasattr(self.ns, 'psort') and self.ns.psort is not None:
nodes.sort(key=lambda n : int(n.get(self.ns.psort)))
# Specifically handle single attrib or multiple attribs per node here
try:
attribs = re.split(r'\s*,\s*', self.attribs)
res = ['|'.join(str(node.get(a)) for a in attribs) for node in nodes]
label = '%s%s:%s' % (inv, '|'.join(attribs).upper(), self.ns.label)
# Check each result value against a dictionary which maps string -> DICT_NAME,
# and replace with the value "DICT_NAME"
# NOTE: Only apply to word/lemma indicators for now
if len(attribs) == 1 and attribs[0] in ('word', 'lemma') and len(dict_sub) > 0:
res = [dict_sub.get(a, a) for a in res]
except AttributeError:
res = nodes
label = '%s%s' % (inv, self.ns.label)
# Only yield if non-zero result set; process through _get_features fn
if len(res) > 0:
for feat in self._get_features(res):
if feat_label:
yield '%s[%s]' % (label, feat)
else:
yield feat
def _get_features(self, res):
"""
Given a result set of attribute values, return a set of strings representing the features
This should be the default method to replace for Indicator objects
"""
return [' '.join(res)]
def print_apply(self, root, cids, cid_attrib='word_idx', feat_label=True, dict_sub={}, stopwords=None):
for feat in self.apply(root, cids, cid_attrib, feat_label=feat_label, dict_sub=dict_sub, stopwords=stopwords):
print feat
def result_set(self, root, cids, cid_attrib='word_idx', feat_label=False, dict_sub={}, stopwords=None):
"""Get results as a set- mostly for use in DSR applications"""
res = set()
for feat in self.apply(root, cids, cid_attrib=cid_attrib, feat_label=feat_label, dict_sub=dict_sub, stopwords=stopwords):
res.add(feat)
return res
def __repr__(self):
return '<%s:%s:%s, xpath="%s">' % (self.__class__.__name__, self.attribs, self.ns.label, self.ns.xpath)
class Ngrams(Indicator):
"""
Return indicator features over the ngrams of a result set
If ng arg is an int, will get ngrams of *exactly* this length
If ng arg is a list/tuple, will get all ngrams of this range, *inclusive*
"""
def __init__(self, ns, attribs, ng):
self.ns = ns
self.attribs = attribs
if (type(ng) == int and ng > 0) or (type(ng) in [list, tuple] and ng[0] > 0):
self.ng = ng
else:
raise ValueError("Improper ngram range: %s" % ng)
def _get_features(self, res):
if type(self.ng) == int:
r = [self.ng - 1]
else:
r = range(self.ng[0] - 1, min(len(res), self.ng[1]))
return chain.from_iterable([' '.join(res[s:s+l+1]) for s in range(len(res)-l)] for l in r)
class RightNgrams(Indicator):
"""Return all the ngrams which start at position 0"""
def _get_features(self, res):
return [' '.join(res[:l]) for l in range(1, len(res)+1)]
class LeftNgrams(Indicator):
"""Return all the ngrams which start at position 0"""
def _get_features(self, res):
return [' '.join(res[l:]) for l in range(len(res))]
class Regexp(Indicator):
"""
Return indicator features if the regular expression applied to the
concatenation of the result set strings is not None
"""
def __init__(self, ns, attribs, rgx, rgx_label, sep=' '):
self.ns = ns
self.attribs = attribs
self.rgx = rgx
self.rgx_label = rgx_label
self.sep = sep
self.psort = 'word_idx' # Sort by word order...
def _get_features(self, res):
match = re.search(self.rgx, self.sep.join(res))
if match is not None:
yield 'RGX:%s' % self.rgx_label
class LengthBin(Indicator):
"""
Return indicator features for the length (size) of the node set
binned according to provided values
bins should be a list of INTS
"""
def __init__(self, ns, bin_divs):
self.ns = ns
self.bins = []
for i,d in enumerate(bin_divs):
if i == 0:
self.bins.append((0,d-1))
else:
self.bins.append((bin_divs[i-1],d-1))
def _get_features(self, res):
lbin = None
l = len(res)
for b in self.bins:
if l >= b[0] and l <= b[1]:
lbin = b
break
if lbin is None:
lbin = (self.bins[-1][1]+1, 'inf')
yield 'LEN:%s-%s' % lbin
# TODO: Make this way more efficient...?
class DictionaryIntersect(Indicator):
"""
Return an indicator feature for whether the input nodeset intersects with any phrase in
the given dictionary
"""
def __init__(self, ns, d_name, d, d_attrib='word', caseless=True):
self.ns = ns
self.d_name = d_name
self.d_attrib = d_attrib
self.caseless = caseless
# Split the dictionary up by phrase length (i.e. # of tokens)
self.dl = defaultdict(lambda : set())
for phrase in d:
if caseless:
phrase = phrase.lower()
self.dl[len(phrase.split())].add(phrase)
self.dl.update((k, frozenset(v)) for k,v in self.dl.iteritems())
# Get the ngram range for this dictionary
self.ng_range = range(max(1, min(self.dl.keys())), max(self.dl.keys())+1)
def apply(self, root, cids, cid_attrib='word_idx', feat_label=True):
"""
We replace the default apply method because we first need to get the full sequence,
match against ngrams of this, then math via cid_attrib against the input NodeSet
We do this because we need to catch e.g. when a multi-word phrase in the dictionary
only partially-overlaps with the NodeSet (this should count as a match!)
"""
# First get full sequence
fs = map(lambda x : x.get(self.d_attrib), sorted(root.xpath("//*[@word_idx]"), key=lambda x : int(x.get('word_idx'))))
# Next do sequence n-gram matching
dcids = set()
for l in self.ng_range:
for i in range(0, len(fs)-l+1):
phrase = ' '.join(fs[i:i+l]).lower() if self.caseless else ' '.join(fs[i:i+l])
if phrase in self.dl[l]:
dcids.update(range(i, i+l))
# Finally, just look for intersect via XPATH + using the super method
# TODO: How to call parent method here!?
if len(dcids) > 0:
self.ns.xpath += '[' + " or ".join("@word_idx='%s'" % i for i in dcids) + ']'
m = [" or ".join("@%s='%s'" % (cid_attrib, c) for c in cid) for cid in cids]
xpath = self.ns.xpath.format(*m)
if len(root.xpath(xpath)) > 0:
yield "DICTIONARY-MATCH:%s:%s" % (self.d_name, self.ns.label)
# COMBINATOR:
# ===========
class Combinator:
"""
Combinator objects are functions f : {0,1}^F x {0,1}^F -> {0,1}^F
---------------
Combinator objects take two (or more?) Indicator objects and map to feature space
"""
def __init__(self, ind1, ind2):
self.ind1 = ind1
self.ind2 = ind2
def apply(self, root, cids, cid_attrib='word_idx', dict_sub={}, stopwords=None):
return self.ind1.apply(root, cids, cid_attrib, dict_sub=dict_sub, stopwords=stopwords)
def print_apply(self, root, cids, cid_attrib='word_idx', dict_sub={}, stopwords=None):
return self.apply(root, cids, cid_attrib, dict_sub=dict_sub, stopwords=stopwords)
class Combinations(Combinator):
"""Generates all *pairs* of features"""
def apply(self, root, cids, cid_attrib='word_idx', dict_sub={}, stopwords=None):
for f1 in self.ind1.apply(root, cids, cid_attrib, dict_sub=dict_sub, stopwords=stopwords):
for f2 in self.ind2.apply(root, cids, cid_attrib, dict_sub=dict_sub, stopwords=stopwords):
yield '%s+%s' % (f1, f2)
# Compile Operator: Compiles a set of feature templates
# =====================================================
class Compile:
"""
Compiles a set of functions f_i : 2^T -> {0,1}^F_i to a single function 2^T -> {0,1}^F
where F <= \sum_i F_i
i.e. we can do filtering and/or merging at this point (?)
"""
def __init__(self, op_list):
self.op_list = op_list
def _iterops(self):
"""Iterate over the operators provided, accepting list of single or list elements"""
for ops in self.op_list:
if type(ops) == list:
for op in ops:
yield op
# Guard against e.g. generators where after one iteration through, is expended
# Thus after being applied to one data item, would be done!!
elif hasattr(ops, '__iter__'):
raise ValueError("Iterables of operators in Compile must be list type.")
else:
yield ops
def apply(self, root, cids, cid_attrib='word_idx', dict_sub={}, stopwords=None):
# Ensure that root is parsed
if type(root) == str:
root = et.fromstring(root)
# Apply the feature templates
for op in self._iterops():
for f in op.apply(root, cids, cid_attrib, dict_sub=dict_sub, stopwords=stopwords):
yield f
def result_set(self, root, cids, cid_attrib='word_idx', dict_sub={}, stopwords=None):
"""Takes the union of the result sets"""
# Ensure that root is parsed
if type(root) == str:
root = et.fromstring(root)
# Apply the feature templates
res = set()
for op in self._iterops():
res.update(op.result_set(root, cids, cid_attrib, dict_sub=dict_sub, stopwords=stopwords))
return res
def apply_mention(self, root, mention_idxs, dict_sub={}, stopwords=None):
return self.apply(root, [mention_idxs], dict_sub=dict_sub, stopwords=stopwords)
def apply_relation(self, root, mention1_idxs, mention2_idxs, dict_sub={}, stopwords=None):
return self.apply(root, [mention1_idxs, mention2_idxs], dict_sub=dict_sub, stopwords=stopwords)
def __repr__(self):
return '\n'.join(str(op) for op in self._iterops())
``` |
{
"source": "jmbarbone/pyjordan",
"score": 4
} |
#### File: pyjordan/pyjordan/tools.py
```python
def is_none(x):
""" Is none
True/False is value is None
Parameters
----------
x : An object
Returns
-------
True if x is None, otherwise False
"""
return x is None
def exists(x, where="local"):
""" Exists
Description
-----------
Checks if an object exists by the name of the object
Parameters
----------
x : str
The name of an object as a string
where : str, "local" (default), "global", or "builtin"
Where to search for the object. If not one of the three above, raises
and exception
Returns
-------
If object is found, True, otherwise False
References
----------
Adapted from: https://stackoverflow.com/a/6386015/12126576
"""
import __builtin__
import sys
if where == "local":
res = x in sys.getframe(1).f_locals
elif where == "global":
res = x in sys.getframe(1).f_globals
elif where == "builtin":
res = x in vars(__builtin__)
else:
raise Warning("`where` should be one of: 'local', 'global', 'builtin'")
return(res)
def print_time(x):
""" Print the current
Description
-----------
Appends the current time into a print statement
Parameters
----------
x : String
A message to be printed
"""
from datetime import datetime
ts = datetime.now().strftime("%y-%m-%d %H:%M:%S")
print(f"[{ts}] {x}", flush=True)
return None
def round_by(x, by, method="round"):
""" Round by
Description
-----------
Rounds a number by another
Parameters
----------
x : numeric
A number or list to round
by : numeric
The number or list by which to round
method : string
The method of rounding to use: round, ceiling, or floor
Returns
-------
A list of rounded numbers
"""
from math import floor, ceil
x = as_list(x)
by = as_list(by)
FUN = {
"round": round,
"ceiling": ceil,
"floor": floor,
}
if method not in ["round", "ceiling", "floor"]:
raise Exception('`by` must be one of: "round", "ceiling", "floor"')
try:
return [FUN[method](i / b) * b for b in by for i in x]
except KeyError:
raise Exception('`method` must be one of: "round", "ceiling", "floor"')
def unnest(x):
""" Unnest
Description
-----------
Unnests a list of lists
Parameters
----------
x : list
A list to be unnested
Returns
-------
The values of `x` as separate elements
References
----------
Adapted from flatten() but with improvements to continue unnesting with
multiple nesting statements. This can be seen with the second example below.
Examples
--------
x = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
unnest(x) # [1, 2, 3, 4, 5, 6, 7, 8, 9]
x = [[1], [2, [3, 4]]]
unnest(x) # [1, 2, 3, 4]
"""
res = [j for i in as_list(x) for j in as_list(i)]
while any([isinstance(i, list) for i in res]):
res = unnest(res)
return res
def as_list(x):
if not isinstance(x, list):
x = [x]
return x
def flatten(x):
"""
Flatten a list
Performs a single unnesting of a list.
Parameters
----------
x : list
A list to be flattened
Returns
-------
The values of `x` but with a single unnesting
Referneces
----------
https://stackoverflow.com/a/952952/12126576
Examples
--------
x = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
flatten(x) # [1, 2, 3, 4, 5, 6, 7, 8, 9]
# Not compatiable with different levels of nesting
# For this, use unnest()
x = [[1], [2, [3, 4]]]
flatten(x) # [1, 2, [3, 4]]
"""
return [item for sublist in x for item in sublist]
def which(x):
""" Which (is True)
Description
-----------
Returns a numeric list of which elements are True
Parameters
----------
x : list, bool
A list of bools
Returns
-------
True or False
Examples
--------
which(True) # [0]
which([True, False, None, True, False]) # [0, 3]
"""
x = as_list(x)
out = []
if not is_boolean(x):
raise ValueError("x must be boolean")
for i in range(0, len(x)):
if x[i]:
out.append(i)
return out
def is_boolean(x):
""" Is boolean?
Description
-----------
Evaluates an object or list as boolean
Parameters
----------
x : Object
An object to be evaluated as boolean.
Returns
-------
True or False
Examples
--------
is_boolean(True) # True
is_boolean([1, 2, True, False]) # False
is_boolean([True, None, False, True]) # True
"""
if isinstance(x, list):
return all([is_boolean(i) for i in x])
return is_none(x) or isinstance(x, bool)
def limit(x, lower=None, upper=None):
"""Limit a list
Description
-----------
Limits a list of numbers by a lower and/or upper limit
Parameters
----------
x : numeric list
A list of numeric elements which to compare to lower and upper
lower : numeric
A lower limit for `x`. If `None` (default) will use `min(x)`
upper : numeric
An upper limit for `x`. If `None` (default) will use `max(x)`
Returns
-------
A numeric list
Examples
--------
x = [3, 2, 1, 4, 5, -1, 4]
limit(x) # sample as x
limit(x, lower=0)
limit(x, lower=1, upper=3)
limit(x, upper=4)
"""
x = as_list(x)
if is_none(lower):
lower = min(x)
if is_none(upper):
upper = max(x)
if lower > upper:
raise Exception("`lower` cannot be greater than `upper`")
return [lower if i < lower else upper if i > upper else i for i in x]
``` |
{
"source": "jmbarrie/nth-puzzle-py",
"score": 3
} |
#### File: nth-puzzle-py/tests/test_node.py
```python
import pytest
from node import Node
from puzzle import Puzzle
@pytest.fixture
def default_node():
default_puzzle = Puzzle()
default_puzzle.create_default_puzzle()
return Node(default_puzzle, default_puzzle, 0, 'uniform')
def test_move_up(default_node):
expected = [[1, 0, 3],
[4, 2, 6],
[7, 5, 8]
]
default_node.print_puzzle()
new_node = default_node.shift_value_up()
new_node.print_puzzle()
assert expected == new_node.get_puzzle()
def test_move_down(default_node):
expected = [[1, 2, 3],
[4, 5, 6],
[7, 0, 8]
]
default_node.print_puzzle()
new_node = default_node.shift_value_down()
new_node.print_puzzle()
assert expected == new_node.get_puzzle()
def test_move_left(default_node):
expected = [[1, 2, 3],
[0, 4, 6],
[7, 5, 8]
]
default_node.print_puzzle()
new_node = default_node.shift_value_left()
new_node.print_puzzle()
assert expected == new_node.get_puzzle()
def test_move_right(default_node):
expected = [[1, 2, 3],
[4, 6, 0],
[7, 5, 8]
]
default_node.print_puzzle()
new_node = default_node.shift_value_right()
new_node.print_puzzle()
assert expected == new_node.get_puzzle()
def test_legal_moves(default_node):
moves = default_node.generate_legal_moves()
expected_0 = [[1, 0, 3],
[4, 2, 6],
[7, 5, 8]
]
expected_1 = [[1, 2, 3],
[4, 5, 6],
[7, 0, 8]
]
expected_2 = [[1, 2, 3],
[4, 6, 0],
[7, 5, 8]
]
expected_3 = [[1, 2, 3],
[0, 4, 6],
[7, 5, 8]
]
assert moves[0].get_puzzle() == expected_0
assert moves[1].get_puzzle() == expected_1
assert moves[2].get_puzzle() == expected_2
assert moves[3].get_puzzle() == expected_3
```
#### File: nth-puzzle-py/tests/test_solver.py
```python
from solver import Solver
from puzzle import Puzzle
import pytest
@pytest.fixture
def default_solver_uc():
default_puzzle = Puzzle()
default_puzzle.create_default_puzzle()
return Solver("1", default_puzzle)
def test_generate_goal_state(default_solver_uc):
expected = [[1, 2, 3],
[4, 5, 6],
[7, 8, 0]]
default_solver_uc.generate_goal_state()
print(default_solver_uc.goal_state)
assert expected == default_solver_uc.get_goal_state()
``` |
{
"source": "jmbarrios/covid-mexico-19",
"score": 2
} |
#### File: covid_api/filters/caso.py
```python
import django_filters
from covid_data import models
VALORES_BOOLEANOS = (
('si', True),
('no', False)
)
class CasoFilter(django_filters.FilterSet):
id_registro = django_filters.CharFilter(
help_text='Identifica el id del registro. Búsqueda exacta.')
fecha_actualizacion = django_filters.DateFilter(
help_text=(
'Fecha de última actualización de la base '
'de datos publicados. Búsqueda exacta.'))
fecha_actualizacion_gt = django_filters.DateFilter(
help_text=(
'Fecha de última actualización de la base '
'de datos publicados. Mayor que.'),
field_name='fecha_actualizacion',
lookup_expr='gt')
fecha_actualizacion_lt = django_filters.DateFilter(
help_text=(
'Fecha de última actualización de la base '
'de datos publicados. Menor que.'),
field_name='fecha_actualizacion',
lookup_expr='lt')
fecha_actualizacion_gte = django_filters.DateFilter(
help_text=(
'Fecha de última actualización de la base '
'de datos publicados. Mayor o igual que.'),
field_name='fecha_actualizacion',
lookup_expr='gte')
fecha_actualizacion_lte = django_filters.DateFilter(
help_text=(
'Fecha de última actualización de la base '
'de datos publicados. Menor o igual que.'),
field_name='fecha_actualizacion',
lookup_expr='lte')
fecha_ingreso = django_filters.DateFilter(
help_text=(
'Identifica la fecha de ingreso del paciente a la unidad de '
'atención. Búsqueda exacta.'))
fecha_ingreso_gt = django_filters.DateFilter(
help_text=(
'Identifica la fecha de ingreso del paciente a la unidad de '
'atención. Mayor que.'),
field_name='fecha_ingreso',
lookup_expr='gt')
fecha_ingreso_lt = django_filters.DateFilter(
help_text=(
'Identifica la fecha de ingreso del paciente a la unidad de '
'atención. Menor que.'),
field_name='fecha_ingreso',
lookup_expr='lt')
fecha_ingreso_gte = django_filters.DateFilter(
help_text=(
'Identifica la fecha de ingreso del paciente a la unidad de '
'atención. Mayor o igual que.'),
field_name='fecha_ingreso',
lookup_expr='gte')
fecha_ingreso_lte = django_filters.DateFilter(
help_text=(
'Identifica la fecha de ingreso del paciente a la unidad de '
'atención. Menor o igual que.'),
field_name='fecha_ingreso',
lookup_expr='lte')
fecha_sintomas = django_filters.DateFilter(
help_text=(
'Idenitifica la fecha en que inició la sintomatología del '
'paciente. Búsqueda exacta.'))
fecha_sintomas_gt = django_filters.DateFilter(
help_text=(
'Idenitifica la fecha en que inició la sintomatología del '
'paciente. Mayor que.'),
field_name='fecha_sintomas',
lookup_expr='gt')
fecha_sintomas_lt = django_filters.DateFilter(
help_text=(
'Idenitifica la fecha en que inició la sintomatología del '
'paciente. Menor que.'),
field_name='fecha_sintomas',
lookup_expr='lt')
fecha_sintomas_gte = django_filters.DateFilter(
help_text=(
'Idenitifica la fecha en que inició la sintomatología del '
'paciente. Mayor o igual que.'),
field_name='fecha_sintomas',
lookup_expr='gte')
fecha_sintomas_lte = django_filters.DateFilter(
help_text=(
'Idenitifica la fecha en que inició la sintomatología del '
'paciente. Menor o igual que.'),
field_name='fecha_sintomas',
lookup_expr='lte')
fecha_defuncion = django_filters.DateFilter(
help_text=(
'Identifica la fecha en que el paciente falleció. '
'Búsqueda exacta.'))
fecha_defuncion_gt = django_filters.DateFilter(
help_text=(
'Identifica la fecha en que el paciente falleció. '
'Mayor que.'),
field_name='fecha_defuncion',
lookup_expr='gt')
fecha_defuncion_lt = django_filters.DateFilter(
help_text=(
'Identifica la fecha en que el paciente falleció. '
'Menor que.'),
field_name='fecha_defuncion',
lookup_expr='lt')
fecha_defuncion_gte = django_filters.DateFilter(
help_text=(
'Identifica la fecha en que el paciente falleció. '
'Mayor o igual que.'),
field_name='fecha_defuncion',
lookup_expr='gte')
fecha_defuncion_lte = django_filters.DateFilter(
help_text=(
'Identifica la fecha en que el paciente falleció. '
'Menor o igual que.'),
field_name='fecha_defuncion',
lookup_expr='lte')
defuncion = django_filters.ChoiceFilter(
choices=VALORES_BOOLEANOS,
label='Defunción',
help_text='Indentifica si el paciente falleció. si/no.',
method='caso_defuncion')
origen = django_filters.ModelChoiceFilter(
queryset=models.Origen.objects.all(),
help_text=(
'Origen del reporte (USMER o fuera USMER). Búsqueda '
'por ID.'))
origen_clave = django_filters.NumberFilter(
field_name='origen__clave',
help_text=(
'Origen del reporte (USMER o fuera USMER). Búsqueda '
'por clave.'))
origen_descripcion = django_filters.CharFilter(
field_name='origen__descripcion',
help_text=(
'Origen del reporte (USMER o fuera USMER). Búsqueda '
'exacta por descripción'))
sector = django_filters.ModelChoiceFilter(
queryset=models.Sector.objects.all(),
help_text=(
'Identifica el tipo de institución del Sistema Nacional de '
'Salud que brindó la atención. Búsqueda por ID.'))
sector_clave = django_filters.NumberFilter(
field_name='sector__clave',
help_text=(
'Identifica el tipo de institución del Sistema Nacional de '
'Salud que brindó la atención. Búsqueda por clave.'))
sector_descripcion = django_filters.CharFilter(
field_name='sector__descripcion',
help_text=(
'Identifica el tipo de institución del Sistema Nacional de '
'Salud que brindó la atención. Búsqueda exacta por descripción.'))
sexo = django_filters.ModelChoiceFilter(
queryset=models.Sexo.objects.all(),
help_text='Identifica al sexo del paciente. Búsqueda por ID.')
sexo_clave = django_filters.NumberFilter(
field_name='sexo__clave',
help_text='Identifica al sexo del paciente. Búsqueda por clave.')
sexo_descripcion = django_filters.CharFilter(
field_name='sexo__descripcion',
help_text=(
'Identifica al sexo del paciente. Búsqueda exacta por '
'descripción.'))
tipo_paciente = django_filters.ModelChoiceFilter(
queryset=models.TipoPaciente.objects.all(),
help_text=(
'Identifica el tipo de atención que recibió el paciente en '
'la unidad. Búsqueda por ID.'))
tipo_paciente_clave = django_filters.NumberFilter(
field_name='tipo_paciente__clave',
help_text=(
'Identifica el tipo de atención que recibió el paciente en '
'la unidad. Búsqueda por clave.'))
tipo_paciente_descripcion = django_filters.CharFilter(
field_name='tipo_paciente__descripcion',
help_text=(
'Identifica el tipo de atención que recibió el paciente en '
'la unidad. Búsqueda exacta por descripción.'))
nacionalidad = django_filters.ModelChoiceFilter(
queryset=models.Nacionalidad.objects.all(),
help_text=(
'Identifica si el paciente es mexicano o extranjero. '
'Búsqueda por ID.'))
nacionalidad_clave = django_filters.NumberFilter(
field_name='nacionalidad__clave',
help_text=(
'Identifica si el paciente es mexicano o extranjero. '
'Búsqueda por clave.'))
nacionalidad_descripcion = django_filters.CharFilter(
field_name='nacionalidad__descripcion',
help_text=(
'Identifica si el paciente es mexicano o extranjero. '
'Búsqueda exacta por descripción.'))
edad = django_filters.NumberFilter(
help_text='Identifica la edad del paciente. Búsqueda exacta.')
edad_lt = django_filters.NumberFilter(
field_name='edad',
lookup_expr='lt',
help_text='Identifica la edad del paciente. Mayor que.')
edad_gt = django_filters.NumberFilter(
field_name='edad',
lookup_expr='gt',
help_text='Identifica la edad del paciente. Menor que.')
edad_lte = django_filters.NumberFilter(
field_name='edad',
lookup_expr='lte',
help_text='Identifica la edad del paciente. Mayor o igual que.')
edad_gte = django_filters.NumberFilter(
field_name='edad',
lookup_expr='gte',
help_text='Identifica la edad del paciente. Menor o igual que.')
positivo = django_filters.ChoiceFilter(
choices=VALORES_BOOLEANOS,
help_text=(
'Identifica si el resultado del análisis fue positivo. '
'si/no.'),
method='caso_positivo')
negativo = django_filters.ChoiceFilter(
choices=VALORES_BOOLEANOS,
help_text=(
'Identifica si el resultado del análisis fue negativo. '
'si/no.'),
method='caso_negativo')
pendiente = django_filters.ChoiceFilter(
choices=VALORES_BOOLEANOS,
help_text=(
'Identifica si el resultado del análisis está pendiente. '
'si/no.'),
method='caso_pendiente')
intubado = django_filters.ChoiceFilter(
choices=VALORES_BOOLEANOS,
help_text=(
'Identifica si el paciente requirió de intubación. '
'si/no'),
method='caso_intubado')
intubado_clave = django_filters.NumberFilter(
field_name='intubado__clave',
help_text=(
'Identifica si el paciente requirió de intubación. '
'Búsqueda por clave.'))
intubado_descripcion = django_filters.CharFilter(
field_name='intubado__descripcion',
help_text=(
'Identifica si el paciente requirió de intubación. '
'Búsqueda exacta por descripción.'))
neumonia = django_filters.ChoiceFilter(
choices=VALORES_BOOLEANOS,
help_text=(
'Identifica si al paciente se le diagnosticó con neumonía. '
'si/no'),
method='caso_neumonia')
neumonia_clave = django_filters.NumberFilter(
field_name='neumonia__clave',
help_text=(
'Identifica si al paciente se le diagnosticó con neumonía. '
'Búsqueda por clave.'))
neumonia_descripcion = django_filters.CharFilter(
field_name='neumonia__descripcion',
help_text=(
'Identifica si al paciente se le diagnosticó con neumonía. '
'Búsqueda exacta por descripción.'))
embarazo = django_filters.ChoiceFilter(
choices=VALORES_BOOLEANOS,
help_text='Identifica si la paciente está embarazada. si/no.',
method='caso_embarazo')
embarazo_clave = django_filters.NumberFilter(
field_name='embarazo__clave',
help_text=(
'Identifica si la paciente está embarazada. '
'Búsqueda por clave.'))
embarazo_descripcion = django_filters.CharFilter(
field_name='embarazo__descripcion',
help_text=(
'Identifica si la paciente está embarazada. '
'Búsqueda exacta por descripción.'))
habla_lengua_indigena = django_filters.ChoiceFilter(
choices=VALORES_BOOLEANOS,
help_text='Identifica si el paciente habla lengua índigena. si/no.',
method='caso_habla_lengua_indigena')
habla_lengua_indigena_clave = django_filters.NumberFilter(
field_name='habla_lengua_indigena__clave',
help_text=(
'Identifica si el paciente habla lengua índigena. '
'Búsqueda por clave.'))
habla_lengua_indigena_descripcion = django_filters.CharFilter(
field_name='habla_lengua_indigena__descripcion',
help_text=(
'Identifica si el paciente habla lengua índigena. '
'Búsqueda exacta por descripción.'))
diabetes = django_filters.ChoiceFilter(
choices=VALORES_BOOLEANOS,
help_text=(
'Identifica si el paciente tiene un diagnóstico de diabetes. '
'si/no.'),
method='caso_diabetes')
diabetes_clave = django_filters.NumberFilter(
field_name='diabetes__clave',
help_text=(
'Identifica si el paciente tiene un diagnóstico de diabetes. '
'Búsqueda por clave.'))
diabetes_descripcion = django_filters.CharFilter(
field_name='diabetes__descripcion',
help_text=(
'Identifica si el paciente tiene un diagnóstico de diabetes. '
'Búsqueda exacta por descripción.'))
epoc = django_filters.ChoiceFilter(
choices=VALORES_BOOLEANOS,
help_text=(
'Identifica si el paciente tiene un diagnóstico de EPOC. '
'si/no.'),
method='caso_epoc')
epoc_clave = django_filters.NumberFilter(
field_name='epoc__clave',
help_text=(
'Identifica si el paciente tiene un diagnóstico de EPOC. '
'Búsqueda por clave.'))
epoc_descripcion = django_filters.CharFilter(
field_name='epoc__descripcion',
help_text=(
'Identifica si el paciente tiene un diagnóstico de EPOC. '
'Búsqueda exacta por descripción.'))
asma = django_filters.ChoiceFilter(
choices=VALORES_BOOLEANOS,
help_text=(
'Identifica si el paciente tiene un diagnóstico de asma. '
'si/no.'),
method='caso_asma')
asma_clave = django_filters.NumberFilter(
field_name='asma__clave',
help_text=(
'Identifica si el paciente tiene un diagnóstico de asma. '
'Búsqueda por clave.'))
asma_descripcion = django_filters.CharFilter(
field_name='asma__descripcion',
help_text=(
'Identifica si el paciente tiene un diagnóstico de asma. '
'Búsqueda exacta por descripción.'))
inmusupr = django_filters.ChoiceFilter(
choices=VALORES_BOOLEANOS,
help_text=(
'Identifica si el paciente presenta inmunosupresión. '
'si/no.'),
method='caso_inmusupr')
inmusupr_clave = django_filters.NumberFilter(
field_name='inmusupr__clave',
help_text=(
'Identifica si el paciente presenta inmunosupresión. '
'Búsqueda por clave.'))
inmusupr_descripcion = django_filters.CharFilter(
field_name='inmusupr__descripcion',
help_text=(
'Identifica si el paciente presenta inmunosupresión. '
'Búsqueda exacta por descripción.'))
hipertension = django_filters.ChoiceFilter(
choices=VALORES_BOOLEANOS,
help_text=(
'Identifica si el paciente tiene un diagnóstico de hipertensión. '
'si/no.'),
method='caso_hipertension')
hipertension_clave = django_filters.NumberFilter(
field_name='hipertension__clave',
help_text=(
'Identifica si el paciente tiene un diagnóstico de hipertensión. '
'Búsqueda por clave.'))
hipertension_descripcion = django_filters.CharFilter(
field_name='hipertension__descripcion',
help_text=(
'Identifica si el paciente tiene un diagnóstico de hipertensión. '
'Búsqueda exacta por descripción.'))
otras_com = django_filters.ChoiceFilter(
choices=VALORES_BOOLEANOS,
help_text=(
'Identifica si el paciente tiene diagnóstico de otras '
'enfermedades. si/no.'),
method='caso_otras_com')
otras_com_clave = django_filters.NumberFilter(
field_name='otras_com__clave',
help_text=(
'Identifica si el paciente tiene diagnóstico de otras '
'enfermedades. Búsqueda por clave.'))
otras_com_descripcion = django_filters.CharFilter(
field_name='otras_com__descripcion',
help_text=(
'Identifica si el paciente tiene diagnóstico de otras '
'enfermedades. Búsqueda exacta por descripción.'))
cardiovascular = django_filters.ChoiceFilter(
choices=VALORES_BOOLEANOS,
help_text=(
'Identifica si el paciente tiene un diagnóstico de '
'enfermedades cardiovasculares. si/no.'),
method='caso_cardiovascular')
cardiovascular_clave = django_filters.NumberFilter(
field_name='cardiovascular__clave',
help_text=(
'Identifica si el paciente tiene un diagnóstico de '
'enfermedades cardiovasculares. Búsqueda por clave.'))
cardiovascular_descripcion = django_filters.CharFilter(
field_name='cardiovascular__descripcion',
help_text=(
'Identifica si el paciente tiene un diagnóstico de '
'enfermedades cardiovasculares. Búsqueda exacta por '
'descripción.'))
obesidad = django_filters.ChoiceFilter(
choices=VALORES_BOOLEANOS,
help_text=(
'Identifica si el paciente tiene diagnóstico de obesidad.'
' si/no.'),
method='caso_obesidad')
obesidad_clave = django_filters.NumberFilter(
field_name='obesidad__clave',
help_text=(
'Identifica si el paciente tiene diagnóstico de obesidad. '
'Búsqueda por clave.'))
obesidad_descripcion = django_filters.CharFilter(
field_name='obesidad__descripcion',
help_text=(
'Identifica si el paciente tiene diagnóstico de obesidad. '
'Búsqueda exacta por descripción.'))
renal_cronica = django_filters.ChoiceFilter(
choices=VALORES_BOOLEANOS,
help_text=(
'Identifica si el paciente tiene diagnóstico de insuficiencia '
'renal crónica. si/no.'),
method='caso_renal_cronica')
renal_cronica_clave = django_filters.NumberFilter(
field_name='renal_cronica__clave',
help_text=(
'Identifica si el paciente tiene diagnóstico de insuficiencia '
'renal crónica. Búsqueda por clave.'))
renal_cronica_descripcion = django_filters.CharFilter(
field_name='renal_cronica__descripcion',
help_text=(
'Identifica si el paciente tiene diagnóstico de insuficiencia '
'renal crónica. Búsqueda exacta por descripción.'))
tabaquismo = django_filters.ChoiceFilter(
choices=VALORES_BOOLEANOS,
help_text=(
'Identifica si el paciente tiene hábito de tabaquismo. '
'si/no.'),
method='caso_tabaquismo')
tabaquismo_clave = django_filters.NumberFilter(
field_name='tabaquismo__clave',
help_text=(
'Identifica si el paciente tiene hábito de tabaquismo. '
'Búsqueda por clave.'))
tabaquismo_descripcion = django_filters.CharFilter(
field_name='tabaquismo__descripcion',
help_text=(
'Identifica si el paciente tiene hábito de tabaquismo. '
'Búsqueda exacta por descripción.'))
otro_caso = django_filters.ChoiceFilter(
choices=VALORES_BOOLEANOS,
help_text=(
'Identifica si el paciente tuvo contacto con algún otro caso '
'diagnósticado con SARS CoV-2. si/no.'),
method='caso_otro_caso')
otro_caso_clave = django_filters.NumberFilter(
field_name='otro_caso__clave',
help_text=(
'Identifica si el paciente tuvo contacto con algún otro caso '
'diagnósticado con SARS CoV-2. Búsqueda por clave.'))
otro_caso_descripcion = django_filters.CharFilter(
field_name='otro_caso__descripcion',
help_text=(
'Identifica si el paciente tuvo contacto con algún otro caso '
'diagnósticado con SARS CoV-2. Búsqueda exacta por descripción.'))
migrante = django_filters.ChoiceFilter(
choices=VALORES_BOOLEANOS,
help_text=(
'Identifica si el paciente es una persona migrante. '
'si/no.'),
method='caso_migrante')
migrante_clave = django_filters.NumberFilter(
field_name='migrante__clave',
help_text=(
'Identifica si el paciente es una persona migrante. '
'Búsqueda por clave.'))
migrante_descripcion = django_filters.CharFilter(
field_name='migrante__descripcion',
help_text=(
'Identifica si el paciente es una persona migrante. '
'Búsqueda exacta por descripción.'))
uci = django_filters.ChoiceFilter(
choices=VALORES_BOOLEANOS,
help_text=(
'Identifica si el paciente requirió ingresar a una Unidad '
'de Cuidados Intensivos. si/no.'),
method='caso_uci')
uci_clave = django_filters.NumberFilter(
field_name='uci__clave',
help_text=(
'Identifica si el paciente requirió ingresar a una Unidad '
'de Cuidados Intensivos. Búsqueda por clave.'))
uci_descripcion = django_filters.CharFilter(
field_name='uci__descripcion',
help_text=(
'Identifica si el paciente requirió ingresar a una Unidad '
'de Cuidados Intensivos. Búsqueda exacta por descripción.'))
entidades = models.Entidad.objects.all()
entidad_um = django_filters.ModelChoiceFilter(
queryset=entidades,
help_text=(
'Identifica la entidad donde se ubica la unidad medica que '
'brindó la atención. Búsqueda por ID.'))
entidad_um_clave = django_filters.NumberFilter(
field_name='entidad_um__clave',
help_text=(
'Identifica la entidad donde se ubica la unidad medica que '
'brindó la atención. Búsqueda por clave.'))
entidad_um_descripcion = django_filters.CharFilter(
field_name='entidad_um__descripcion',
help_text=(
'Identifica la entidad donde se ubica la unidad medica que '
'brindó la atención. Búsqueda exacta por descripción.'))
entidad_um_descripcion_contiene = django_filters.CharFilter(
field_name='entidad_um__descripcion',
lookup_expr='icontains',
help_text=(
'Identifica la entidad donde se ubica la unidad medica que '
'brindó la atención. Búsqueda por descripción.'))
entidad_nacimiento = django_filters.ModelChoiceFilter(
queryset=entidades,
help_text=(
'Identifica la entidad de nacimiento del paciente.'
' Búsqueda por ID.'))
entidad_nacimiento_clave = django_filters.NumberFilter(
field_name='entidad_nacimiento__clave',
help_text=(
'Identifica la entidad de nacimiento del paciente.'
' Búsqueda por clave.'))
entidad_nacimiento_descripcion = django_filters.CharFilter(
field_name='entidad_nacimiento__descripcion',
help_text=(
'Identifica la entidad de nacimiento del paciente.'
' Búsqueda exacta por descripción.'))
entidad_nacimiento_descripcion_contiene = django_filters.CharFilter(
field_name='entidad_nacimiento__descripcion',
lookup_expr='icontains',
help_text=(
'Identifica la entidad de nacimiento del paciente. '
'Búsqueda por descripción.'))
entidad_residencia = django_filters.ModelChoiceFilter(
queryset=entidades,
help_text=(
'Identifica la entidad de residencia del paciente. '
'Búsqueda por ID.'))
entidad_residencia_clave = django_filters.NumberFilter(
field_name='entidad_residencia__clave',
help_text=(
'Identifica la entidad de residencia del paciente. '
'Búsqueda por clave.'))
entidad_residencia_descripcion = django_filters.CharFilter(
field_name='entidad_residencia__descripcion',
help_text=(
'Identifica la entidad de residencia del paciente. '
'Búsqueda exacta por descripción.'))
entidad_residencia_descripcion_contiene = django_filters.CharFilter(
field_name='entidad_residencia__descripcion',
lookup_expr='icontains',
help_text=(
'Identifica la entidad de residencia del paciente. '
'Búsqueda por descripción.'))
municipio_residencia_clave = django_filters.NumberFilter(
field_name='municipio_residencia__clave',
help_text=(
'Identifica el municipio de residencia del paciente. '
'Búsqueda por clave.'))
municipio_residencia_descripcion = django_filters.CharFilter(
field_name='municipio_residencia__descripcion',
help_text=(
'Identifica el municipio de residencia del paciente. '
'Búsqueda exacta por descripción.'))
municipio_residencia_descripcion_contiene = django_filters.CharFilter(
field_name='municipio_residencia__descripcion',
lookup_expr='icontains',
help_text=(
'Identifica el municipio de residencia del paciente. '
'Búsqueda por descripción.'))
pais_origen_clave = django_filters.NumberFilter(
field_name='pais_origen__clave',
help_text=(
'Identifica el país del que partió el paciente rumbo a México. '
'Búsqueda por clave.'))
pais_origen_descripcion = django_filters.CharFilter(
field_name='pais_origen__descripcion',
help_text=(
'Identifica el país del que partió el paciente rumbo a México. '
'Búsqueda exacta por descripción.'))
pais_origen_descripcion_contiene = django_filters.CharFilter(
field_name='pais_origen__descripcion',
lookup_expr='icontains',
help_text=(
'Identifica el país del que partió el paciente rumbo a México. '
'Búsqueda por descripción.'))
pais_origen_region = django_filters.CharFilter(
field_name='pais_origen__region',
lookup_expr='icontains',
help_text=(
'Identifica el país del que partió el paciente rumbo a México. '
'Búsqueda por región. Se usó la regionalización del banco '
'mundial.'))
pais_nacionalidad_clave = django_filters.NumberFilter(
field_name='pais_nacionalidad__clave',
help_text=(
'Identifica la nacionalidad del paciente. '
'Búsqueda por clave.'))
pais_nacionalidad_descripcion = django_filters.CharFilter(
field_name='pais_nacionalidad__descripcion',
lookup_expr='icontains',
help_text=(
'Identifica la nacionalidad del paciente. '
'Búsqueda exacta por descripción.'))
pais_nacionalidad_descripcion_contiene = django_filters.CharFilter(
field_name='pais_nacionalidad__descripcion',
lookup_expr='icontains',
help_text=(
'Identifica la nacionalidad del paciente. '
'Búsqueda por descripción.'))
pais_nacionalidad_region = django_filters.CharFilter(
field_name='pais_nacionalidad__region',
lookup_expr='icontains',
help_text=(
'Identifica la nacionalidad del paciente. '
'Búsqueda por región. Se usó la regionalización del banco '
'mundial.'))
class Meta:
model = models.Caso
fields = []
def caso_defuncion(self, queryset, name, value):
return queryset.filter(fecha_defuncion__isnull=not value)
def caso_positivo(self, queryset, name, value):
return queryset.filter(resultado__clave=1)
def caso_negativo(self, queryset, name, value):
return queryset.filter(resultado__clave=2)
def caso_pendiente(self, queryset, name, value):
return queryset.filter(resultado__clave=3)
def caso_intubado(self, queryset, name, value):
clave = 1 if value == 'si' else 2
return queryset.filter(intubado__clave=clave)
def caso_neumonia(self, queryset, name, value):
clave = 1 if value == 'si' else 2
return queryset.filter(neumonia__clave=clave)
def caso_embarazo(self, queryset, name, value):
clave = 1 if value == 'si' else 2
return queryset.filter(embarazo__clave=clave)
def caso_habla_lengua_indigena(self, queryset, name, value):
clave = 1 if value == 'si' else 2
return queryset.filter(habla_lengua_indigena__clave=clave)
def caso_diabetes(self, queryset, name, value):
clave = 1 if value == 'si' else 2
return queryset.filter(diabetes__clave=clave)
def caso_epoc(self, queryset, name, value):
clave = 1 if value == 'si' else 2
return queryset.filter(epoc__clave=clave)
def caso_asma(self, queryset, name, value):
clave = 1 if value == 'si' else 2
return queryset.filter(asma__clave=clave)
def caso_inmusupr(self, queryset, name, value):
clave = 1 if value == 'si' else 2
return queryset.filter(inmusupr__clave=clave)
def caso_hipertension(self, queryset, name, value):
clave = 1 if value == 'si' else 2
return queryset.filter(hipertension__clave=clave)
def caso_otras_com(self, queryset, name, value):
clave = 1 if value == 'si' else 2
return queryset.filter(otras_com__clave=clave)
def caso_cardiovascular(self, queryset, name, value):
clave = 1 if value == 'si' else 2
return queryset.filter(cardiovascular__clave=clave)
def caso_obesidad(self, queryset, name, value):
clave = 1 if value == 'si' else 2
return queryset.filter(obesidad__clave=clave)
def caso_renal_cronica(self, queryset, name, value):
clave = 1 if value == 'si' else 2
return queryset.filter(renal_cronica__clave=clave)
def caso_tabaquismo(self, queryset, name, value):
clave = 1 if value == 'si' else 2
return queryset.filter(tabaquismo__clave=clave)
def caso_otro_caso(self, queryset, name, value):
clave = 1 if value == 'si' else 2
return queryset.filter(otro_caso__clave=clave)
def caso_migrate(self, queryset, name, value):
clave = 1 if value == 'si' else 2
return queryset.filter(migrate__clave=clave)
def caso_uci(self, queryset, name, value):
clave = 1 if value == 'si' else 2
return queryset.filter(uci__clave=clave)
```
#### File: covid_api/serializers/entidad.py
```python
import json
from rest_framework import serializers
from covid_data import models
class EntidadSimpleSerializer(serializers.ModelSerializer):
class Meta:
model = models.Entidad
fields = [
'url',
'clave',
'descripcion']
extra_kwargs = {
'url': {'view_name': 'entidad-detail', 'lookup_field': 'clave'}
}
class EntidadSerializer(serializers.ModelSerializer):
class Meta:
model = models.Entidad
fields = [
'url',
'clave',
'descripcion',
]
extra_kwargs = {
'url': {'view_name': 'entidad-detail', 'lookup_field': 'clave'}
}
class EntidadGeoSerializer(serializers.ModelSerializer):
type = serializers.CharField(
read_only=True,
default='Feature')
geometry = serializers.SerializerMethodField()
properties = EntidadSerializer(source='*')
class Meta:
model = models.Entidad
fields = [
'type',
'geometry',
'properties'
]
def get_geometry(self, obj):
return json.loads(obj.geometria_simplificada.geojson)
class EntidadCentroideSerializer(serializers.ModelSerializer):
type = serializers.CharField(
read_only=True,
default='Feature')
geometry = serializers.SerializerMethodField()
properties = EntidadSerializer(source='*')
class Meta:
model = models.Entidad
fields = [
'type',
'geometry',
'properties'
]
def get_geometry(self, obj):
return json.loads(obj.centroide.geojson)
```
#### File: views/catalogos/nacionalidad.py
```python
from covid_data import models
from covid_api.serializers import otros
from covid_api.views.base import CatalogoVista
class CatalogoNacionalidadVista(CatalogoVista):
queryset = models.Nacionalidad.objects.all()
serializer_class = otros.NacionalidadSerializer
def list(self, *args, **kwargs):
"""
Nacionalidad - Valores posibles.
Regresa la lista de valores posibles para *nacionalidad*, según el
formato de la información liberada. No requiere parámetros. Ejemplo:
<host:port>/api/catalogos/nacionalidad/
"""
return super().list(*args, **kwargs)
```
#### File: covid_data/models/resultado.py
```python
from django.db import models
from covid_data.models.base import ModeloBase
class Resultado(ModeloBase):
"""Identifica el resultado del análisis de la muestra reportado por
el laboratorio de la Red Nacional de Laboratorios de Vigilancia
Epidemiológica (INDRE, LESP y LAVE). (Catálogo de resultados diagnósticos
anexo).
"""
clave = models.IntegerField(unique=True)
descripcion = models.CharField(max_length=63)
def __repr__(self):
return self.descripcion
def __str__(self):
return self.descripcion
```
#### File: covid_update/actualizar/catalogos.py
```python
import os
import logging
import pandas as pd
from django.conf import settings
from django.db import transaction
from covid_data import models
from covid_update.constantes import COL_DESCRIPCION
RESULTADO = 'RESULTADO'
TIPO_PACIENTE = 'TIPO_PACIENTE'
ORIGEN = 'ORIGEN'
SECTOR = 'SECTOR'
SI_NO = 'SI_NO'
NACIONALIDAD = 'NACIONALIDAD'
SEXO = 'SEXO'
CATALOGOS = {
RESULTADO: models.Resultado,
TIPO_PACIENTE: models.TipoPaciente,
ORIGEN: models.Origen,
SECTOR: models.Sector,
SI_NO: models.SiNo,
NACIONALIDAD: models.Nacionalidad,
SEXO: models.Sexo
}
logging.basicConfig(level=logging.INFO)
@transaction.atomic
def actualizar_catalogos():
for catalogo in CATALOGOS:
logging.info('Actualizando el catalogo: %s', catalogo)
actualizar_catalogo(catalogo)
def actualizar_catalogo(catalogo):
df = cargar_catalogo(catalogo)
modelo = CATALOGOS[catalogo]
for clave, renglon in df.iterrows():
descripcion = renglon[COL_DESCRIPCION]
_, creado = modelo.objects.get_or_create(
clave=clave,
descripcion=descripcion.strip())
if creado:
logging.info(
'[%s] Entrada registrada: clave=%s, descripcion=%s',
catalogo,
clave,
descripcion)
def cargar_catalogo(nombre):
directorio = os.path.join(
settings.BASE_DIR,
settings.DATOS_BASE_DIR,
settings.CATALOGOS_DIR)
nombre_archivo = os.path.join(directorio, f'{nombre}.csv')
return pd.read_csv(nombre_archivo, index_col=0)
```
#### File: covid-mexico-19/covid_update/catalogos.py
```python
import os
import glob
import pandas as pd
from django.conf import settings
from covid_update.constantes import COL_CATALOGO
from covid_update.constantes import COL_FORMATO
from covid_update.constantes import COL_DESCRIPCION
from covid_update.constantes import ENTIDADES
from covid_update.constantes import RESULTADO
from covid_update.constantes import MUNICIPIO
def procesar_catalogos():
ruta_catalogos, ruta_descriptores = obtener_rutas()
directorio = os.path.join(
settings.BASE_DIR,
settings.DATOS_BASE_DIR)
nombre_descriptores = os.path.join(
directorio,
'descriptores.csv')
if not os.path.exists(nombre_descriptores):
descriptores = cargar_descriptores(ruta_descriptores)
descriptores.to_csv(nombre_descriptores)
else:
descriptores = pd.read_csv(nombre_descriptores)
directorio = os.path.join(directorio, settings.CATALOGOS_DIR)
if not os.path.exists(directorio):
os.makedirs(directorio)
catalogos = descriptores[COL_CATALOGO].dropna().unique()
for catalogo in catalogos:
nombre_catalogo = os.path.join(
directorio,
f'{catalogo}.csv')
if not os.path.exists(nombre_catalogo):
catalogo_df = cargar_catalogo(ruta_catalogos, catalogo)
catalogo_df.to_csv(nombre_catalogo)
def obtener_rutas():
directorio = os.path.join(
settings.BASE_DIR,
settings.DATOS_BASE_DIR,
settings.DESCARGAS_DIR,
'diccionario_datos_covid19')
archivos = glob.glob(os.path.join(directorio, '*'))
catalogos = [ruta for ruta in archivos if 'Catalogos' in ruta][0]
descriptores = [ruta for ruta in archivos if 'Descriptores' in ruta][0]
return catalogos, descriptores
def obtener_pagina(nombre_catalogo):
if nombre_catalogo == ENTIDADES:
return f'Catálogo de {nombre_catalogo}'
return f'Catálogo {nombre_catalogo}'
def obtener_primer_renglon(nombre_catalogo):
if nombre_catalogo == RESULTADO:
return 1
return 0
def cargar_descriptores(ruta):
descriptores = pd.read_excel(ruta, index_col=0)
def obtener_catalogo(nombre):
try:
return nombre.split(':')[1].strip().replace(' ', '')
except:
return None
descriptores[COL_CATALOGO] = descriptores[COL_FORMATO].apply(obtener_catalogo)
return descriptores
def cargar_catalogo(ruta, nombre):
pagina = obtener_pagina(nombre)
renglon = obtener_primer_renglon(nombre)
catalogo = pd.read_excel(
ruta,
sheet_name=pagina,
index_col=0,
skiprows=renglon)
# La tabla de catalogos de Resultado no tiene nombrada la columna
# de descripción
if nombre == RESULTADO:
catalogo = catalogo.rename(columns=lambda x: COL_DESCRIPCION)
# La tabla de municipios tiene varios renglones duplicados.
# Se los quitamos
if nombre == MUNICIPIO:
catalogo = catalogo[~catalogo.index.duplicated(keep='first')]
# Elimitar los espacios blancos
if COL_DESCRIPCION in catalogo.columns:
catalogo[COL_DESCRIPCION].apply(lambda x: x.strip())
return catalogo
```
#### File: covid_update/migrations/0003_municipios.py
```python
import os
from django.db import migrations
from django.db import transaction
from django.contrib.gis.geos import MultiPolygon
from django.contrib.gis.geos import GEOSGeometry
from django.contrib.gis.gdal import DataSource
RUTA_MUNICIPIOS_SHP = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
'data',
'marco_geoestadistico',
'mun',
'00mun.shp')
@transaction.atomic
def cargar_municipios(apps, schema_editor):
Entidad = apps.get_model("covid_data", "Entidad")
Municipio = apps.get_model("covid_data", "Municipio")
fuente = DataSource(RUTA_MUNICIPIOS_SHP)
capa = fuente[0]
for municipio in capa:
descripcion = municipio.get('NOMGEO')
clave_municipio = municipio.get('CVE_MUN')
clave_entidad = municipio.get('CVE_ENT')
clave = municipio.get('CVEGEO')
entidad = Entidad.objects.get(clave=clave_entidad)
geometria = municipio.geom
geometria = GEOSGeometry(geometria.wkt, srid=6372)
geometria_web = geometria.transform(3857, clone=True)
geometria.transform(4326)
centroide = geometria.centroid
centroide_web = geometria_web.centroid
geometria_simplificada = geometria.simplify(
tolerance=0.0,
preserve_topology=True)
geometria_web_simplificada = geometria_web.simplify(
tolerance=0.0,
preserve_topology=True)
if geometria.geom_type == 'Polygon':
geometria = MultiPolygon(geometria, srid=4326)
geometria_web = MultiPolygon(geometria_web, srid=3857)
geometria_simplificada = MultiPolygon(geometria_simplificada, srid=4326)
geometria_web_simplificada = MultiPolygon(geometria_web_simplificada, srid=3857)
municipio, creado = Municipio.objects.get_or_create(
clave=clave,
descripcion=descripcion,
defaults=dict(
clave_municipio=clave_municipio,
entidad=entidad,
geometria=geometria,
geometria_web=geometria_web,
centroide=centroide,
centroide_web=centroide_web,
geometria_simplificada=geometria_simplificada,
geometria_web_simplificada=geometria_web_simplificada))
if creado:
print(f'Municipio creado {municipio}')
class Migration(migrations.Migration):
dependencies = [
('covid_update', '0002_entidades'),
]
operations = [
migrations.RunPython(cargar_municipios)
]
```
#### File: covid_update/migrations/0004_paises.py
```python
import os
import pandas as pd
from django.db import transaction
from django.db import migrations
RUTA_PAISES = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
'data',
'paises.csv')
@transaction.atomic
def cargar_paises(apps, schema_editor):
"""Datos de paises obtenidos del banco mundial.
"""
Pais = apps.get_model("covid_data", "Pais")
paises = pd.read_csv(RUTA_PAISES, index_col=0, encoding='latin-1')
for _, pais in paises.iterrows():
descripcion = pais['Economy']
codigo = pais['Code']
region = pais['Region']
pais, creado = Pais.objects.get_or_create(
clave=codigo,
descripcion=descripcion,
codigo=codigo,
region=region)
if creado:
print(f'Pais creado {pais}')
class Migration(migrations.Migration):
dependencies = [
('covid_update', '0003_municipios'),
]
operations = [
migrations.RunPython(cargar_paises)
]
```
#### File: covid-mexico-19/covid_update/models.py
```python
import os
from django.db import models
from django.conf import settings
def obtener_directorio_casos():
return os.path.join(
settings.BASE_DIR,
settings.DATOS_BASE_DIR,
settings.CASOS_DIR)
def obtener_directorio_logs():
return os.path.join(
settings.BASE_DIR,
settings.DATOS_BASE_DIR,
settings.LOGS_DIR)
class Actualizacion(models.Model):
fecha = models.DateField(auto_now_add=True)
archivo = models.FilePathField(path=obtener_directorio_casos, unique=True)
log = models.FilePathField(path=obtener_directorio_logs, null=True)
``` |
{
"source": "jmbarrios/dotfiles",
"score": 2
} |
#### File: dotfiles/qtile/config.py
```python
import os
import subprocess
from typing import List # noqa: F401
from libqtile import bar, layout, widget, hook
from libqtile.config import Click, Drag, Group, Key, Screen, Match
from libqtile.lazy import lazy
# from libqtile.utils import guess_terminal
# Defining variables
MOD = "mod4"
MYTERM = 'alacritty'
MYAPPLAUNCHER = 'rofi'
MYFONT = 'Hack'
# terminal = guess_terminal()
MYCOLORS = [
'#282828',
'#cc241d',
'#98971a',
'#d79921',
'#458588',
'#b16286',
'#689d6a',
'#a89984',
'#504945',
'#d65d0e'
]
BLACK = MYCOLORS[0]
RED = MYCOLORS[1]
GREEN = MYCOLORS[2]
YELLOW = MYCOLORS[3]
BLUE = MYCOLORS[4]
PURPLE = MYCOLORS[5]
AQUA = MYCOLORS[6]
GRAY = MYCOLORS[7]
DGRAY = MYCOLORS[8]
ORANGE = MYCOLORS[9]
# Keymaps
keys = [
# Monad tall keybindings
# Navigation
Key([MOD], "h", lazy.layout.left()),
Key([MOD], "l", lazy.layout.right()),
Key([MOD], "j", lazy.layout.down()),
Key([MOD], "k", lazy.layout.up()),
# Move windows
Key([MOD, "shift"], "h", lazy.layout.swap_left()),
Key([MOD, "shift"], "l", lazy.layout.swap_right()),
Key([MOD, "shift"], "j", lazy.layout.shuffle_down()),
Key([MOD, "shift"], "k", lazy.layout.shuffle_up()),
# Change windows sizes
Key([MOD], "i", lazy.layout.grow()),
Key([MOD], "d", lazy.layout.shrink()),
Key([MOD], "n", lazy.layout.normalize()),
Key([MOD], "o", lazy.layout.maximize()),
Key([MOD, "shift"], "space", lazy.layout.flip()),
# Toggle between different layouts as defined below
Key([MOD], "Tab", lazy.next_layout(), desc="Toggle between layouts"),
Key([MOD], "x", lazy.window.kill(), desc="Kill focused window"),
Key([MOD, "control"], "r", lazy.restart(), desc="Restart qtile"),
Key([MOD, "control"], "q", lazy.shutdown(), desc="Shutdown qtile"),
# Key([MOD], "r", lazy.spawncmd(),
# desc="Spawn a command using a prompt widget"),
# Sound
Key([], "XF86AudioMute", lazy.spawn("pulsemixer --toggle-mute")),
Key([], "XF86AudioLowerVolume", lazy.spawn("pulsemixer --change-volume -5")),
Key([], "XF86AudioRaiseVolume", lazy.spawn("pulsemixer --change-volume 5")),
# Keyboard layout
Key([MOD, "shift"], "space", lazy.widget["keyboardlayout"].next_keyboard()),
# Custom applications
Key([MOD], "f", lazy.spawn("firefox"), desc="Launch Firefox"),
Key([MOD], "Return", lazy.spawn(MYTERM), desc="Launch terminal"),
Key([MOD], "r", lazy.spawn("rofi -show combi"), desc="Launch rofi"),
]
# Keybiding for float
# for key, x, y in [("Left", -10, 0),
# ("Right", 10, 0),
# ("Up", 0, -10),
# ("Down", 0, 10)]:
# keys.append(Key([MOD, "control"], key, lazy.window.move_floating(x, y)))
# keys.append(Key([MOD, "shift"], key, lazy.window.resize_floating(x, y)))
# keys.append(Key([MOD, "mod1"], key, lazy.window.move_to_screen_edge(key)))
# Groups
activities = {
'm': Group('main'),
'w': Group('web', matches=[Match(wm_class=["firefox"])]),
'e': Group('editor'),
'u': Group('music', matches=[Match(wm_class=["Spotify"])]),
'c': Group('chat', matches=[Match(wm_instance_class=["Microsoft Teams - Preview"])])
}
groups = [ g for g in activities.values() ]
for k, g in activities.items():
keys.extend([
# mod1 + letter of group = switch to group
Key([MOD], k, lazy.group[g.name].toscreen(),
desc="Switch to group {}".format(g.name)),
# mod1 + shift + letter of group = switch to & move focused window to group
Key([MOD, "shift"], k, lazy.window.togroup(g.name, switch_group=True),
desc="Switch to & move focused window to group {}".format(g.name)),
# Or, use below if you prefer not to switch to that group.
# # mod1 + shift + letter of group = move focused window to group
# Key([mod, "shift"], i.name, lazy.window.togroup(i.name),
# desc="move focused window to group {}".format(i.name)),
])
# Theme
layout_theme = {
'border_width': 3,
'margin': 7,
'border_focus': GREEN,
'border_normal': BLACK
}
layouts = [
layout.Max(**layout_theme),
# layout.Stack(num_stacks=2, **layout_theme),
# Try more layouts by unleashing below layouts.
# layout.Bsp(),
# layout.Columns(),
# layout.Matrix(),
layout.MonadTall(name='monad', **layout_theme),
# layout.MonadWide(),
# layout.RatioTile(),
# layout.Tile(),
# layout.TreeTab(),
# layout.VerticalTile(),
# layout.Zoomy(),
layout.Floating(border_width=3, border_focus=GREEN, border_normal=BLACK),
]
widget_defaults = dict(
font=MYFONT,
fontsize=12,
padding=5,
background=BLACK,
foreground=GRAY,
)
extension_defaults = widget_defaults.copy()
# screens = [
# Screen(
# top=bar.Gap(5),
# right=bar.Gap(5),
# left=bar.Gap(5),
# bottom=bar.Bar(
# [
# widget.GroupBox(
# active=GRAY,
# inactive=DGRAY,
# block_highlight_text_color=BLACK,
# highlight_method='block',
# this_current_screen_border=GREEN,
# this_screen_border=BLUE,
# ),
# widget.WindowName(),
# widget.Chord(
# chords_colors={
# 'launch': ("#ff0000", "#ffffff"),
# },
# name_transform=lambda name: name.upper(),
# ),
# # widget.TextBox("Press <M-r> to spawn", foreground="#d75f5f"),
# widget.Systray(),
# widget.Net(interface=["wlp2s0", "enp3s0"]),
# widget.Clock(format='%Y-%m-%d %a %I:%M %p'),
# widget.Battery(format='Batt {char} {percent:2.0%}'),
# widget.PulseVolume(),
# widget.KeyboardLayout(
# configured_keyboards=['us', 'es'],
# background=YELLOW,
# foreground=BLACK,
# ),
# widget.QuickExit(
# default_text='[ Bye ]',
# background=ORANGE,
# foreground=BLACK,
# ),
# widget.CurrentLayout(
# background=RED,
# foreground=BLACK,
# ),
# ],
# 24,
# ),
# wallpaper='~/Pictures/destiny-2-2019-game-wallpapers.jpg',
# wallpaper_mode='fill',
# ),
# Screen(
# top=bar.Gap(5),
# right=bar.Gap(5),
# left=bar.Gap(5),
# bottom=bar.Bar([
# widget.GroupBox(
# active=GRAY,
# inactive=DGRAY,
# block_highlight_text_color=BLACK,
# highlight_method='block',
# this_current_screen_border=GREEN,
# this_screen_border=BLUE,
# ),
# widget.WindowName(),
# widget.CurrentLayout(
# background=RED,
# foreground=BLACK,
# ),
# ], 24),
# wallpaper='~/Pictures/destiny-2-2019-game-wallpapers.jpg',
# wallpaper_mode='fill',
# )
# ]
#
# # Drag floating layouts.
# mouse = [
# Drag([MOD], "Button1", lazy.window.set_position_floating(),
# start=lazy.window.get_position()),
# Drag([MOD], "Button3", lazy.window.set_size_floating(),
# start=lazy.window.get_size()),
# Click([MOD], "Button2", lazy.window.bring_to_front())
# ]
#
# # Autostart
# @hook.subscribe.startup_once
# def autostart():
# home = os.path.expanduser('~/.config/qtile/autostart.sh')
# subprocess.call([home])
dgroups_key_binder = None
dgroups_app_rules = [] # type: List
main = None # WARNING: this is deprecated and will be removed soon
follow_mouse_focus = True
bring_front_click = False
cursor_warp = False
floating_layout = layout.Floating(float_rules=[
# Run the utility of `xprop` to see the wm class and name of an X client.
{'wmclass': 'confirm'},
{'wmclass': 'dialog'},
{'wmclass': 'download'},
{'wmclass': 'error'},
{'wmclass': 'file_progress'},
{'wmclass': 'notification'},
{'wmclass': 'splash'},
{'wmclass': 'toolbar'},
{'wmclass': 'confirmreset'}, # gitk
{'wmclass': 'makebranch'}, # gitk
{'wmclass': 'maketag'}, # gitk
{'wname': 'branchdialog'}, # gitk
{'wname': 'pinentry'}, # GPG key password entry
{'wmclass': 'ssh-askpass'}, # ssh-askpass
])
auto_fullscreen = True
focus_on_window_activation = "smart"
# XXX: Gasp! We're lying here. In fact, nobody really uses or cares about this
# string besides java UI toolkits; you can see several discussions on the
# mailing lists, GitHub issues, and other WM documentation that suggest setting
# this string if your java app doesn't work correctly. We may as well just lie
# and say that we're a working one by default.
#
# We choose LG3D to maximize irony: it is a 3D non-reparenting WM written in
# java that happens to be on java's whitelist.
wmname = "LG3D"
``` |
{
"source": "jmbarrios/hw-conversion",
"score": 3
} |
#### File: src/hw_conversion/cli.py
```python
import argparse
import toml
import logging
from hw_conversion import convert_hw
def _parse_config_file(f):
parsed = toml.load(f)
pattern = parsed.get('pattern', None)
hw_file = parsed.get('hw_file', 'Homework.ipynb')
return pattern, hw_file
def main(args=None):
'''Process command-line arguments and run the program'''
project = argparse.ArgumentParser(description='Convert notebook to script.')
project.add_argument('-c', '--config', default='config.ini', type=open,
help='Config file location.')
parsed_args = project.parse_args(args)
pattern, hw_file = _parse_config_file(parsed_args.config)
convert_hw(pattern, hw_file)
if __name__ == '__main__':
main()
```
#### File: src/hw_conversion/HWPreprocessor.py
```python
import re
from typing import Pattern
from traitlets import Unicode
from nbconvert.preprocessors import Preprocessor
class HomeworkPreproccessor(Preprocessor):
'''Keeps cells form a notebook that match a regular expression'''
pattern = Unicode().tag(config=True)
def check_conditions(self, cell):
'''Checks that a cell matches the pattern.
Returns: Boolean.
True means cell should be kept.
'''
regexp_compiled = re.compile(self.pattern)
return regexp_compiled.match(cell.source)
def preprocess(self, nb, resources):
'''Preprocessing to apply to each notebook.'''
if not self.pattern:
return nb, resources
nb.cells = [cell for cell in nb.cells if self.check_conditions(cell)]
return nb, resources
``` |
{
"source": "jmbattle/pyKEITHLEY",
"score": 2
} |
#### File: jmbattle/pyKEITHLEY/KEITHLEY.py
```python
import visa
class M2308():
def __init__(self, address=16):
self._instr = visa.ResourceManager().open_resource('GPIB0::%s' % address) # Default GPIB address is 16
self._result = 0
self.reset()
def reset(self):
self._instr.write('*RST') # Reset to power-on defaults
self._instr.write('DISPlay:TEXT:STATe 0') # LCD display must be separately reset
def vset(self, vset):
self._instr.write('SOURce1:VOLTage %.3f' % vset) # 1mV resolution, 0 ~ 15V range
def ilim(self, ilim):
self._instr.write('SOURce1:CURRent:LIMit:VALue %.4f' % ilim) # 100uV resolution, 6mA ~ 5A range
def vlim(self, vlim):
self._instr.write('SOURce1:VOLTage:PROTection %i' % vlim) # 1V resolution, 0 ~ 8V range
self._instr.write('SOURce1:VOLTage:PROTection:CLAMp 0') # Enable clamp
def enable(self):
self._instr.write('OUTPut1:STATe 1') # Enable Ch1 output
def disable(self):
self._instr.write('OUTPut1:STATe 0') # Disable Ch1 output
def vmeas(self, smp_avgcount=5, smp_nplc=0.5, noise_floor=1e-3):
if smp_avgcount < 0: # 1 ~ 10 sample averaging range
smp_avgcount = 1
elif smp_avgcount > 10:
smp_avgcount = 10
if smp_nplc < 0.002: # 0.002 ~ 10 NPLC sampling (33 us ~ 167 ms)
smp_nplc = 0.002
elif smp_nplc > 10:
smp_nplc = 10
self._instr.write('SENSe:FUNCtion "VOLTage"') # Set voltage sensing mode
self._instr.write('SENSe:AVERage %s' % smp_avgcount) # Set sample averaging
self._instr.write('SENSe:NPLCycles %s' % smp_nplc) # Set sampling frequency
self._result = round(float(self._instr.query('READ?').strip('\n')), 3) # Read and format response
if self._result < noise_floor: # Zero sub mV values
self._result = float(0)
return self._result
def imeas(self, smp_avgcount=5, smp_nplc=0.5, noise_floor=100e-6):
if smp_avgcount < 1: # 1 ~ 10 sample averaging range
smp_avgcount = 1
elif smp_avgcount > 10:
smp_avgcount = 10
if smp_nplc < 0.002: # 0.002 ~ 10 NPLC sampling (33 us ~ 167 ms)
smp_nplc = 0.002
elif smp_nplc > 10:
smp_nplc = 10
self._instr.write('SENSe:FUNCtion "CURRent"') # Set current sensing mode
self._instr.write('SENSe:AVERage %s' % smp_avgcount) # Set sample averaging
self._instr.write('SENSe:NPLCycles %s' % smp_nplc) # Set sampling frequency
self._instr.write('SENSe:CURRent:RANGe:AUTO 1') # Enable auto range-finding
self._result = round(float(self._instr.query('READ?').strip('\n')), 4) # Read and format response
if (self._result < noise_floor) and (self._result > -noise_floor): # Zero sub 100uA values
self._result = float(0)
return self._result
def msgon(self, msg='TEST IN PROGRESS!!!!!!!!!!!!!!!!'):
try:
msg.isalnum() # Check for proper string entry
except:
print 'Input message is not a string. Please try again.'
else:
self._instr.write('DISPlay:TEXT:DATA "%s"' % msg) # Write string
self._instr.write('DISPlay:TEXT:STATe 1') # Enable text display mode
def msgoff(self, msg=' '*32):
self._instr.write('DISPlay:TEXT:DATA "%s"' % msg) # Restore default text
self._instr.write('DISPlay:TEXT:STATe 0') # Disable text display mode
def dispon(self):
self._instr.write('DISPlay:ENABle 1') # Enable LCD
def dispoff(self):
self._instr.write('DISPlay:ENABle 0') # Disable LCD
``` |
{
"source": "jmbeach/KEP.py",
"score": 3
} |
#### File: src/keppy/regular_register.py
```python
from keppy.register import Register
class RegularRegister(Register):
"""Represents a normal register
Between K0000 - K9999.
Each address is a byte on an 8-bit device and a word on a 16-bit
device.
To get a bit, you specify KXXXX.Y on an 8-bit device
and KXXXX.YY on a 16-bit device.
Can be data type Word, Short, BCD,
DWord, Long, LBCD, Float, LLong, QWord,
Double, Date, Boolean"""
def __init__(self, is_16bit, initial_address="K0000"):
Register.__init__(self, is_16bit, initial_address)
``` |
{
"source": "jmbeach/kep-simgen",
"score": 3
} |
#### File: src/kep_simgen/kepware_real_to_simulation.py
```python
import getopt
import json
import sys
from collections import OrderedDict
from keppy.simulator_device import SimulatorDevice
from keppy.project import Project
DOC = """
Kepware JSON file to Simulation Server converter.
Prints conversion to std-out.
Usage:
python kepware_real_to_simulation.py [-h]
python kepware_real_to_simulation.py [-s (8 || 16)] <file-name>
Example:
python kepware_real_to_simulation.py -s 16 tags.json > tags-sim.json
Options:
-h, --help: prints this documentation
-i, --ignore: file name of a list of tag group names to ignore
-s, --size (optional): Specifies the device register size.
"8" or "16" for 8 bit or 16 bit. Default is 8 bit.
"""
def process_groups(groups, simulator):
for group in groups:
for tag in group.tags:
simulator.process_tag(tag)
if (len(group.sub_groups) > 0):
process_groups(group.sub_groups, simulator)
def process_devices(devices):
"""Process all tags in all devices"""
for device in devices:
simulator = SimulatorDevice(device.is_sixteen_bit)
for tag in device.tags:
simulator.process_tag(tag)
process_groups(device.tag_groups, simulator)
def main():
"""MAIN"""
opts, args = getopt.getopt(
sys.argv[1: ],
'h?s:i:',
[
'help',
'size',
'ignore'])
is_sixteen_bit = False
ignore_file = None
for opt, arg in opts:
if opt in ('-h', '-?', '--help'):
print DOC
sys.exit(0)
if opt in ('-i', '--ignore'):
ignore_file = arg
if opt in ('-s', '--size'):
is_sixteen_bit = arg == '16'
if len(args) < 1:
print """You must at least pass in the filename of the Kepware JSON file.
Use -h for help"""
sys.exit(1)
if len(args) > 1:
print 'Too many arguments passed in. Use -h for help.'
sys.exit(1)
with open(args[0]) as f_tags:
to_ignore = []
if ignore_file is not None:
with open(ignore_file) as f_ignore:
to_ignore = f_ignore.read().split('\n')
text = f_tags.read()
# remove first three bytes and encode ascii
text = text[3:].encode('utf_8')
kepware_dict = json.JSONDecoder(object_pairs_hook=OrderedDict).decode(text)
project = Project(kepware_dict, is_sixteen_bit, to_ignore)
for channel in project.channels:
channel.set_driver_simulated()
process_devices(channel.devices)
project.update()
print project.as_json()
if __name__ == "__main__":
main()
``` |
{
"source": "jm-begon/cianfanelli",
"score": 3
} |
#### File: cianfanelli/cianfanelli/draft.py
```python
import numpy as np
from skimage.io import imread
from skimage import img_as_float
from skimage.filters import threshold_otsu
from skimage.color import rgb2gray
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d, Axes3D
def load(fpath):
img = img_as_float(rgb2gray(imread(fpath)))
threshold = threshold_otsu(img)
return img < threshold # inverse color
def img_to_points(bin_img, n_points=1000, max_depth=1.):
height = bin_img.shape[0]
points_2d = np.argwhere(bin_img)
indices = np.arange(len(points_2d))
np.random.shuffle(indices)
points_2d = points_2d[indices[:n_points]]
verticals = points_2d[:, 0]
horizontals = points_2d[:, 1]
depths = np.random.rand(n_points, 1)
zs = -np.array(verticals) + height
ys = np.array(horizontals)
xs = np.array(depths)
return xs, ys, zs
def plot_scatter3D(xs, ys, zs, elevation=45, azimut=45):
# elevation=0 and azimut=0 is front-view
# elevation=0, and azimut=90 is side-view
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.view_init(elevation, azimut)
ax.set_title("Azimut: {:.2f}".format(azimut))
ax.set_xlabel('X axis')
ax.set_ylabel('Y axis')
ax.set_zlabel('Z axis')
ax.axis("off")
ax.scatter(xs, ys, zs)
return ax.get_proj()
def plot_save_close(xs, ys, zs, elevation=0, azimut=0,
path="../examples/remove_me.png"):
plot_scatter3D(xs, ys, zs, elevation, azimut)
try:
plt.savefig(path)
finally:
plt.close()
if __name__ == '__main__':
np.random.seed(42)
bin_img = load("../resources/test.png")
xs, ys, zs = img_to_points(bin_img, n_points=5000)
azimuts = np.linspace(0, 360, 100)
num_len = len(str(len(azimuts)))
for i, azimut in enumerate(azimuts):
fname = "../examples/test/frame_{}".format(str(i).zfill(num_len))
plot_save_close(xs, ys, zs, azimut=azimut-90, path=fname)
# convert -delay 20 frame_*.png -loop 0 test.gif
``` |
{
"source": "jm-begon/clustertools-analytics",
"score": 2
} |
#### File: clustertools_analytics/array/deprecated.py
```python
import numpy as np
class TableAndLatexFormater(object):
def print_(self, *args):
size = len(args)
for i, arg in enumerate(args):
print(arg, end="")
if i < size - 1:
self.newcol()
def newcol(self):
print(" & ", end="")
def newline(self):
print("\\\\")
def print_line(self, *args):
self.print_(*args)
self.newline()
def print_cell(self, arg, new_col=True):
print(arg, end="")
if new_col:
self.newcol()
def print_means_line(self, means, stds, factor=1):
format = (lambda s: ("%.2f" % s))
args = ["%s $\pm$ %s" % (format(m * factor), format(s * factor)) for
m, s in zip(means, stds)]
# format = (lambda m, s:("%f +- %f"%(m,s)).replace(".", ","))
# args = [format(m,s) for m,s in zip(means, stds)]
self.print_line(*args)
def flush(self):
pass
class ExcelTableAndFormater(object):
def print_(self, *args):
for arg in args:
print(arg, "\t", end="")
def newcol(self):
print("\t", end="")
def newline(self):
print("")
def print_cell(self, arg, new_col=True):
print(arg, end="")
if new_col:
self.newcol()
def print_line(self, *args):
self.print_(*args)
self.newline()
def print_means_line(self, means, stds, factor=1):
format = (lambda s: ("%f" % s).replace(".", ","))
args = [format(m * factor) for m in means]
# format = (lambda m, s:("%f +- %f"%(m,s)).replace(".", ","))
# args = [format(m,s) for m,s in zip(means, stds)]
self.print_line(*args)
def flush(self):
pass
class TableAndLatexShaderFormater(TableAndLatexFormater):
def __init__(self, gray_min=0.4, gray_max=1):
self.buffer = [[]]
self.max = float("-inf")
self.min = float("inf")
self.gray_min = gray_min
self.gray_max = gray_max
def print_(self, *args):
self.buffer[-1].extend((lambda: str(arg)) for arg in args)
def newcol(self):
#self.buffer[-1].append([])
pass
def newline(self):
self.buffer.append([])
def print_means_line(self, means, stds, factor=1):
def prepare_for_print(value):
def print_it():
gray_value = np.interp([value], [self.min, self.max],
[self.gray_max, self.gray_min])
return "\\cellcolor[gray]{%.2f} %.2f" % (gray_value, value*factor)
return print_it
self.min = min(self.min, *means)
self.max = max(self.max, *means)
self.buffer[-1].extend(prepare_for_print(mean) for mean in means)
self.newline()
def flush(self):
for row in self.buffer:
size = len(row)
for i, print_me in enumerate(row):
print(print_me(), end=" ")
if i < size-1:
print("&", end=" ")
print("\\\\")
self.buffer = [[]]
self.max = float("-inf")
self.min = float("inf")
``` |
{
"source": "jm-begon/clustertools",
"score": 2
} |
#### File: clustertools/test/test_experiment.py
```python
from functools import partial
from nose.tools import assert_equal, assert_in, assert_less, assert_raises, \
with_setup, assert_true
from nose.tools import assert_false
from clustertools import ParameterSet, Result, Experiment
from clustertools.state import RunningState, CompletedState, AbortedState, \
CriticalState, PartialState, LaunchableState
from clustertools.storage import PickleStorage
from .util_test import purge, prep, __EXP_NAME__, IntrospectStorage, \
TestComputation, InterruptedComputation, pickle_prep, pickle_purge, \
with_setup_
__author__ = "<NAME> <<EMAIL>>"
__copyright__ = "3-clause BSD License"
# ----------------------------------------------------------------------- Result
def test_result():
expected = {"m"+str(x): x for x in range(1, 5)}
result = Result("m1", m2=2, m3=6)
result.m1 = 1
result.m3 = 3
result["m4"] = 4
assert_equal(len(expected), len(result))
for name, value in expected.items():
assert_equal(result[name], value)
for name, value in result.items():
# redundant
assert_equal(expected[name], value)
dict(result)
repr(result)
# ------------------------------------------------------------------ Computation
@with_setup(prep, purge)
def test_correct_computation():
computation = TestComputation()
intro_storage = computation.storage
result1 = computation(x1=5, x2=2, x3=50)
result2 = intro_storage.load_result(computation.comp_name)
for result in result1, result2:
assert_equal(len(result), 2) # One real metric + repr
assert_equal(result["mult"], 2 * 5)
assert_equal(len(intro_storage.result_history), 1) # Only one computation
assert_equal(len(intro_storage.state_history), 1) # Only one computation
states = list(intro_storage.state_history.values())[0]
# If correct, state should have followed the sequence:
# Running (p=0), Running (p=1), Critical, Partial, Completed
assert_equal(len(states), 5)
assert_true(isinstance(states[0], RunningState))
assert_true(isinstance(states[1], RunningState))
assert_true(isinstance(states[2], CriticalState))
assert_true(isinstance(states[3], PartialState))
assert_true(isinstance(states[4], CompletedState))
assert_equal(states[0].progress, 0.)
assert_equal(states[1].progress, 1.)
@with_setup(prep, purge)
def test_error_computation():
computation = TestComputation()
intro_storage = computation.storage
computation = computation.lazyfy(x1=5, x2=None, x3=50)
assert_raises(TypeError, computation) # 5*None
assert_equal(len(intro_storage.result_history), 0) # Computation not saved
assert_equal(len(intro_storage.state_history), 1) # Only one computation
states = list(intro_storage.state_history.values())[0]
# If correct (i.e. error occurs), state should have evolved as:
# Running, Aborted
assert_equal(len(states), 2)
assert_true(isinstance(states[0], RunningState))
assert_true(isinstance(states[1], AbortedState))
@with_setup(prep, purge)
def test_interrupted_computation():
computation = InterruptedComputation()
intro_storage = computation.storage
assert_raises(KeyboardInterrupt, computation)
assert_equal(len(intro_storage.result_history[computation.comp_name]), 0)
state_history = intro_storage.state_history[computation.comp_name]
# Running -> Launchable
assert_equal(len(state_history), 2)
assert_true(isinstance(state_history[0], RunningState))
assert_true(isinstance(state_history[1], LaunchableState))
@with_setup(prep, purge)
def test_has_parameters():
computation = TestComputation()
computation.lazyfy(p1="1", p2=2)
assert_true(computation.has_parameters(p1="1", p2=2))
assert_true(computation.has_parameters(p1="1"))
assert_true(computation.has_parameters(p2=2))
assert_false(computation.has_parameters(p3=""))
assert_false(computation.has_parameters(p1="1", p3=""))
assert_false(computation.has_parameters(p1="1", p2=2, p3=""))
# ------------------------------------------------------------------- Experiment
@with_setup(prep, purge)
def test_experiment():
parameter_set = ParameterSet()
parameter_set.add_parameters(x1=range(3), x2=range(3))
experiment = Experiment(__EXP_NAME__, parameter_set, TestComputation,
IntrospectStorage)
assert_equal(len(list(experiment.yield_computations())), 9)
# start=3 : skip 0,1,2
assert_equal(len(list(experiment.yield_computations(start=3))), 6)
# capacity=6 : skip 6, 7, 8
assert_equal(len(list(experiment.yield_computations(capacity=6))), 6)
@with_setup_(partial(pickle_prep, exp_name="{}_1".format(__EXP_NAME__)),
partial(pickle_purge, exp_name="{}_1".format(__EXP_NAME__)))
def do_auto_refresh(auto_refresh):
parameter_set = ParameterSet()
parameter_set.add_parameters(x1=range(3), x2=range(3))
experiment = Experiment("{}_1".format(__EXP_NAME__), parameter_set,
TestComputation)
# There should be 9 computations
assert_equal(len(experiment), 9)
count = 0
for i, _ in enumerate(experiment.yield_computations(auto_refresh=auto_refresh)):
if i == 0:
state = CompletedState(
Experiment.name_computation(experiment.exp_name, 6)
)
PickleStorage(experiment.exp_name).update_state(state)
count += 1
print("Auto refresh?", auto_refresh, "--", count)
assert_equal(count, 8 if auto_refresh else 9)
def test_auto_refresh():
do_auto_refresh(True)
do_auto_refresh(False)
``` |
{
"source": "jm-begon/code-reviewer",
"score": 3
} |
#### File: code-reviewer/reviewer/annotations.py
```python
import json
from collections import OrderedDict
from pygments import highlight
from pygments.lexers import guess_lexer_for_filename
from pygments.lexers.special import TextLexer
from pygments.formatters import HtmlFormatter
from pygments.util import ClassNotFound
from .comment import CSVCommentParser, Severity, Comment
class ReviewedCode:
"""
ReviewedCode is the class containing the annotations and the content of the
code being reviewed. This is a view class. It knows about highlighted lines
and texts but does not know the concept of comment.
"""
@classmethod
def from_file(cls, fpath):
with open(fpath, 'r') as f:
raw_lines = f.readlines()
return cls(raw_lines, fpath).load()
def __init__(self, raw_lines, fpath, comment_parser=None):
self.raw_lines = raw_lines
self.fpath = fpath
self.comment_parser = CSVCommentParser() if comment_parser is None \
else comment_parser
self.texts = OrderedDict()
self.markups = OrderedDict()
self.saved = True
self.severity2color = {
Severity.GOOD: "green",
Severity.NEUTRAL: "blue",
Severity.MILD: "orange",
Severity.SEVERE: "red",
}
self.color2severity = {v:k for k, v in self.severity2color.items()}
self.color2severity[None] = Severity.NEUTRAL
def add_mark(self, line, color_code):
self.markups[line] = color_code
self.saved = False
def remove_mark(self, line):
if line not in self.markups:
return
del self.markups[line]
self.saved = False
def add_text(self, line, text):
self.texts[line] = text
self.saved = False
def remove_text(self, line):
if line not in self.texts:
return
del self.texts[line]
print("after remove:")
print(self.texts)
self.saved = False
def get_formatted_lines(self, filename, lexer=None):
"""
Given the raw_lines, markups and comments, generate the html code to
display the code and comment.
"""
raw_code = ''.join(self.raw_lines)
try:
if lexer is not None:
# TODO: add possibility in review_page to select a lexer
lexer = get_lexer_by_name(lexer)
else:
lexer = guess_lexer_for_filename(filename, raw_code,
stripnl=False, ensurenl=False)
except ClassNotFound as e:
lexer = TextLexer(stripnl=False, ensurenl=False)
formatter = HtmlFormatter(style='xcode', nowrap=True)
style = formatter.get_style_defs()
code = highlight(raw_code, lexer, formatter)
lines = code.split('\n')
return lines, style
def get_texts(self):
return self.texts
def get_markups(self):
return self.markups
def get_saved(self):
return self.saved
def save(self):
# Infer comment
comments = []
mark_lines = set()
for line, text in self.texts.items():
line = int(line)
color_code = self.markups.get(line)
severity = Severity(self.color2severity[color_code])
color_code = self.severity2color[severity] # if color_code was None
prev_color_code = color_code
end_line = line
while color_code is not None and color_code == prev_color_code:
mark_lines.add(end_line)
prev_color_code = color_code
end_line += 1
color_code = self.markups.get(end_line)
comments.append(Comment(self.fpath, line, end_line-1, severity, text))
for line, color_code in self.markups.items():
line = int(line)
if line not in mark_lines:
comments.append(Comment(self.fpath, line, line,
self.color2severity[color_code], ""))
self.comment_parser.save_from_same_file(comments)
self.saved = True
def load(self):
comments = self.comment_parser.load_for_source_file(self.fpath)
for comment in comments:
if len(comment.text) > 0:
# Do not show empty comment
self.add_text(comment.start_line, comment.text)
color = self.severity2color[comment.severity]
for line in range(comment.start_line, comment.end_line+1):
self.add_mark(line, color)
self.saved = True
return self
```
#### File: jm-begon/code-reviewer/setup.py
```python
from setuptools import setup
import os
import reviewer
# from https://stackoverflow.com/questions/27664504/how-to-add-package-data-recursively-in-python-setup-py
def package_files(directory):
paths = []
for (path, directories, filenames) in os.walk(directory):
for filename in filenames:
paths.append(os.path.join('..', path, filename))
return paths
extra_files = package_files('reviewer/html')
setup(
name='reviewer',
version=reviewer.__version__,
description='Package providing the `review` command in order to review codes and share comments',
author='<NAME> <<EMAIL>>, <NAME> <<EMAIL>>',
packages=['reviewer'],
install_requires=['jinja2', 'Pygments'],
scripts=['review'],
package_data={'reviewer': extra_files},
include_package_data=True,
)
``` |
{
"source": "jm-begon/dnd_generators",
"score": 4
} |
#### File: dnd_generators/dnd_generators/dice.py
```python
from random import Random
class Intable:
def __int__(self):
return 0
def __radd__(self, other):
return Add(other, self)
def __add__(self, other):
return Add(self, other)
def __rmul__(self, other):
return Multiply(other, self)
def __mul__(self, other):
return Multiply(self, other)
class RandomNumber(Intable):
def __init__(self, seed=None):
self.rng = seed if isinstance(seed, Random) else Random(seed)
class Dice(RandomNumber):
def __init__(self, sides, seed=None):
super().__init__(seed)
self.sides = sides
def __int__(self):
return self.rng.randint(1, self.sides)
def __repr__(self):
return f"{self.__class__.__name__}({repr(self.sides)}, " \
f"{repr(self.rng)})"
def __str__(self):
return f"d{self.sides}"
class D20(Dice):
def __init__(self, seed=None):
super(D20, self).__init__(sides=20, seed=seed)
# TODO advantage/disavantage
def __repr__(self):
return f"{self.__class__.__name__}({repr(self.rng)})"
class Operator(Intable):
pass
class Add(Operator):
def __init__(self, *operands):
self.operands = operands
def __int__(self):
return sum(int(x) for x in self.operands)
def __repr__(self):
return f"{self.__class__.__name__}(*{repr(self.operands)})"
def __str__(self):
return "+".join(str(x) for x in self.operands)
class Multiply(Operator):
def __init__(self, scalar, intable):
self.scalar = scalar
self.intable = intable
def __int__(self):
return self.scalar * int(self.intable)
def __repr__(self):
return f"{self.__class__.__name__}({repr(self.scalar)}, {repr(self.intable)})"
def __str__(self):
return f"{self.scalar}{str(self.intable)}"
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.