metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jiksaa/odoons",
"score": 2
} |
#### File: odoons/commands/addons.py
```python
import os
from .command import Command
class Addons(Command):
def _addons_path(self, args):
"""
Action method responsible of the ACTION_LS sub command
:return: None
"""
if not self._odoo or not self._addons:
self.load_config(args.file)
odoo_path = os.path.abspath(self._odoo["path"])
paths = [
os.path.join(odoo_path, "odoo/addons"),
os.path.join(odoo_path, "addons"),
]
for name, conf in self._addons.items():
abspath = os.path.abspath(conf["path"])
paths.append(abspath)
return ",".join(paths)
def run(self, args):
print(self._addons_path(args))
```
#### File: odoons/commands/config.py
```python
import os
import shutil
from configparser import ConfigParser
from .command import Command, commands_registry
from odoons.utils import printing
from odoons.utils.config import OPT_CONF_DIR, OPT_CONF_TEMPLATE, DEFAULT_OPTIONS
class Config(Command):
def _get_config_path(self, options=None):
options = self._options or options
conf_dir = options.get(OPT_CONF_DIR, DEFAULT_OPTIONS[OPT_CONF_DIR])
if not os.path.exists(conf_dir):
os.makedirs(conf_dir, exist_ok=True)
return os.path.join(os.path.abspath(conf_dir), "odoo.cfg")
def _get_template_path(self):
template_file = self._options.get(OPT_CONF_TEMPLATE, DEFAULT_OPTIONS[OPT_CONF_TEMPLATE])
return os.path.abspath(template_file)
def run(self, args):
printing.info("Generating Odoo configuration file...")
self.load_config(args.file)
template_path = self._get_template_path()
config_path = self._get_config_path()
shutil.copyfile(template_path, config_path)
new_options = {}
options = self._odoo.get("options", {})
data_dir = options.get("data_dir", False)
if data_dir:
options.pop("data_dir")
new_options.update({"data_dir": os.path.abspath(data_dir)})
addons_path = commands_registry["addons"]()._addons_path(args)
options.update({"addons_path": addons_path})
new_options.update({k: v for k, v in options.items()})
parser = ConfigParser()
parser.read(config_path)
for k, v in new_options.items():
parser.set("options", k, v)
with open(config_path, "w+") as configfile:
parser.write(configfile)
```
#### File: odoons/commands/install.py
```python
import os
import subprocess
from .command import Command
from odoons.utils import printing
from odoons.utils.config import ADDONS_REQ_INSTALL_CONFIG, get_git_addons_path
class Install(Command):
def run(self, args):
"""
Action method responsible of the ACTION_INSTALL sub command
:return: None
"""
printing.info("Installing python dependencies...")
self.load_config(args.file)
def pip_install(path, addons_name):
req_file_path = os.path.join(path, "requirements.txt")
if os.path.exists(req_file_path) and os.path.isfile(req_file_path):
subprocess.run(["pip", "install", "-r", req_file_path], check=True)
else:
printing.warning("No requirements file for {}".format(addons_name))
for name, conf in self._addons.items():
if conf.get(ADDONS_REQ_INSTALL_CONFIG, True):
abspath = get_git_addons_path(conf)
pip_install(abspath, name)
pip_install(self._odoo["path"], "odoo")
pip_install(".", "project root")
```
#### File: odoons/commands/pull.py
```python
import os
import subprocess
from .command import Command
from odoons.utils import printing
from odoons.utils.git import Git
from odoons.utils.config import OPT_INSTALL_ODOO, OPT_APPLY_REQS, get_git_addons_path
DEFAULT_ODOO_URL = "https://github.com/odoo/odoo"
class Pull(Command):
def _init_odoo(self):
printing.info("Cloning Odoo core...")
abspath = os.path.abspath(self._odoo["path"])
url = self._odoo.get("url", DEFAULT_ODOO_URL)
branch = self._odoo["version"]
commit = self._odoo.get("commit", None)
Git(abspath, url, branch, commit).clone()
if self._options.get(OPT_INSTALL_ODOO, False):
printing.info("Installing odoo command...")
subprocess.run(["pip", "install", "-e", abspath, "--no-deps"], check=True)
def _init_addons(self):
printing.info("Cloning addons...")
potential_errors = []
for name, conf in self._addons.items():
printing.info("Initializing {}...".format(name))
conf_type = conf["type"]
if conf_type == "git":
abspath = get_git_addons_path(conf)
git = Git(
abspath,
conf["url"],
conf.get("branch", None),
conf.get("commit", None),
)
returncode = git.clone()
if returncode != 0:
potential_errors.append((name, conf))
if potential_errors:
printing.warning("Some addons repository cloning seems to have issues")
printing.warning("Check execution logs for the following:")
for name, conf in potential_errors:
printing.warning(name)
def run(self, args):
"""
Action method responsible of the ACTION_INIT sub command
:return: None
"""
printing.info("Initializing project...")
self.load_config(args.file)
self._init_odoo()
self._init_addons()
``` |
{
"source": "jikutlenova/pyreservoir",
"score": 3
} |
#### File: pyreservoir/matbal/aquifer.py
```python
class schilthuis():
def calculate_aquifer(self, pressure, Bw, Wp, Np, Bo, Nfoi, cf, cw, swi, Boi):
"""Calculate Material Balance parameters of Undersaturated Oil Reservoir for Schilthuis Method"""
# in case of undersaturated (above bubblepoint), Rp = Rs = Rsi, Gfgi = Bgi = Eg = 0
import numpy as np
F = Np * Bo
Eo = Bo - Boi
delta_pressure = pressure - pressure[0]
delta_pressure = np.abs(delta_pressure)
Efw = ((cf + (cw * swi)) / (1 - swi)) * delta_pressure
We_schilthuis = (Bw * Wp) + F - (Nfoi * Eo) - ((Nfoi * Boi) * Efw)
return We_schilthuis
class fetkovich():
def initial_encroachable_water(self, pi, ct, r_R, r_aq, h_aq, poro, theta):
"calculate initial encroachable water"
import numpy as np
# r_R: reservoir size (radius of cylindrical-assumed reservoir), in ft
# r_aq: aquifer size, in ft
# theta: for full circle cylindrical, theta=360. if half-circle, theta=180
Wei = (pi * ct * np.pi * ((r_aq ** 2) - (r_R ** 2)) * h_aq * poro * theta) / (5.61458 * 360)
return Wei
def productivity_index(self, perm, h_aq, mu_w, r_aq, r_R, theta, flow='constant'):
"calculate productivity index"
import numpy as np
if flow == 'constant':
# mu_w: water viscosity
J = (0.007082 * perm * h_aq * theta) / ((mu_w * (np.log(r_aq / r_R)) * 360))
return J
if flow == 'no flow':
# mu_w: water viscosity
J = (0.007082 * perm * h_aq * theta) / ((mu_w * (np.log(r_aq / r_R) - 0.75) * 360))
return J
def calculate_aquifer(self, datetime, pressure, Wei, J):
"""
Calculate aquifer influx (We) using Fetkovich Pseudo-steady Method
"""
import numpy as np
"Subtracting datetimes to get time differences (how many days) IN INTEGER"
diff = [j - i for i, j in zip(datetime[:-1], datetime[1:])]
diff = np.array(diff)
# convert datetime format to integer
diffr_arr = []
for k in range(len(diff)):
diffr = diff[k] / np.timedelta64(1, 'D')
diffr_arr.append(float(diffr))
# append 0 to the first index of numpy
diffr_arr = np.append(0, diffr_arr) # now diff has same dimension with time data (a)
delta_time = diffr_arr
"Initial conditions"
We = 0 # We at initial production date (NOTE: different from Wei, initial encroachable water)
pi = pressure[0]
pRn_min_one = pn_min_one = pi
"Calculate aquifer influx"
We_fetkovich = []
for i in range(len(datetime)):
# calculate p_Rn average, Eq 8.29
p_Rn = 0.5 * (pRn_min_one + pressure[i])
# update value of pRn-1 equals to current pressure
pRn_min_one = pressure[i]
# calculate (p_n-1 - p_Rn average), Eq 8.30
pn_min_prn = pn_min_one - p_Rn
# calculate delta Wen, Eq 8.30
delta_We = (Wei / pi) * pn_min_prn * (1 - np.exp(-(J * pi * delta_time[i]) / (Wei)))
# calculate We, Eq 8.31
We = We + delta_We
# update p_n-1 for the next timestep, Eq 8.32
pn_min_one = pi * (1 - (We / Wei))
We_fetkovich.append(We)
return We_fetkovich
class veh():
def calculate_aquifer_constant(self, r_R, h, cf, cw, poro):
"""
Calculate theoretical aquifer constant for VEH (assuming cylindrical reservoir)
Input:
r_R = reservoir radius
"""
import numpy as np
ct = cf + cw # total compressibility, in aquifer sw=1
theta = 360 # full circle cylindrical
B_star = 1.119 * poro * ct * h * (r_R ** 2) * (theta / 360)
return B_star
def calculate_aquifer(self, datetime, pressure, cf, cw, perm, poro, mu_w, r_R, B_star):
import numpy as np
def time_pressure_difference(datetime):
"""Calculate time and pressure differences"""
# Subtracting datetimes to get time differences from initial production date to date[i] (how many days) IN INTEGER
diff = datetime - datetime[0]
# convert datetime format to integer
time_array = []
for k in range(len(diff)):
diffr = diff[k] / np.timedelta64(1, 'D')
time_array.append(float(diffr))
# convert time difference from day to hour
time_array = np.array(time_array) * 24
# create j index for dataframe
j_index = np.arange(0, (len(datetime)), 1)
# calculate delta_pressure for each date
# append an array consists of two initial pressures [pi, pi] (as dummy) to the pressure data
pi = pressure[0]
p_dummy = np.append(np.array([pi, pi]), pressure)
delta_p_j = [b - a for a, b in zip(p_dummy[:-2], p_dummy[2:])]
delta_p_j = 0.5 * np.array(np.abs(delta_p_j))
# pre-processing
j_array = np.arange(1, (len(time_array) + 1), 1)
delta_p_j_array = delta_p_j[1:]
array_j = []
array_time = []
delta_pressure = []
array_time_repeat = []
for i in range(len(time_array)):
new_j = j_array[:i]
new_time = time_array[:i]
new_delta_p_j = delta_p_j_array[:i]
array_j.append(new_j)
array_time.append(new_time)
delta_pressure.append(new_delta_p_j)
# make arrays of repeated times
new_time_repeat = np.repeat((time_array[i]), i)
array_time_repeat.append(new_time_repeat)
# To calculate delta_time, SUBTRACT arrr_time TO arrr_time_repeat
delta_time = np.subtract(array_time_repeat, array_time) # numpy subtract array to array
return delta_time, delta_pressure
def calculate_parameter_VEH(index, delta_time, cf, cw, perm, poro, mu_w, r_R):
"""Calculate dimensionless time (t_DR) and dimensionless aquifer influx (W_eD)"""
# Calculate t_DR and W_eD
ct = cf + cw
t_DR_factor = (0.0002637 * perm) / (poro * mu_w * ct * (r_R ** 2))
t_DR_arr = []
W_eD_arr = []
for i in range(len(delta_time[index])):
t_DR = t_DR_factor * (delta_time[index])[i]
"calculate W_eD using Eq 6.36 and 6.37 for infinite reservoir (See: 6_examples_part2.ipynb)"
if t_DR > 0.01 and t_DR <= 200:
# use Eq 6.36
W_eD = ((1.12838 * np.sqrt(t_DR)) + (1.19328 * t_DR) + (0.269872 * t_DR * np.sqrt(t_DR)) + (
0.00855294 * (t_DR ** 2))) / (1 + (0.616599 * np.sqrt(t_DR) + (0.0413008 * t_DR)))
if t_DR > 200:
# use Eq 6.37
W_eD = ((2.02566 * t_DR) - 4.29881) / np.log(t_DR)
W_eD_arr.append(float(W_eD))
t_DR_arr.append(float(t_DR))
return (t_DR_arr, W_eD_arr)
# Calculate time differences
delta_time, delta_pressure = time_pressure_difference(datetime)
# Calculate aquifer influx
We_veh = []
for x in range(len(datetime)): # range from j index 1 to 9
t_DR_arr, W_eD_arr = calculate_parameter_VEH(x, delta_time, cf, cw, perm, poro, mu_w, r_R) # call function
# calculate We, Equation 8.7
W_eD_multipy_delta_p_j = delta_pressure[x] * W_eD_arr
sigma_We = np.sum(W_eD_multipy_delta_p_j)
We = B_star * sigma_We
We_veh.append(float(We))
return We_veh
``` |
{
"source": "jikyo/romaji4p",
"score": 4
} |
#### File: romaji4p/romaji/substring.py
```python
from romaji.mapping import Mapping
class Substring:
def __init__(self, src, index):
self.index = index
self.window = len(src)
self.src = src
self.romaji = Mapping.get(src)
self.has_romaji = (self.romaji is not None)
def get_systems(self):
if self.has_romaji:
return set(self.romaji.keys())
return set()
def get_romaji(self, system):
if self.has_romaji:
return self.romaji.get(system)
return self.src
@staticmethod
def lookahead(lookahead, s, begin):
end = begin + lookahead + 1
if len(s) < end:
return None
sub = Substring(s[begin:end], begin)
if not sub.has_romaji:
return None
return sub
__all__ = ['Substring']
``` |
{
"source": "jikyo/suji4p",
"score": 3
} |
#### File: suji/tests/test_kansuji.py
```python
import inspect
import unittest
from suji.kansuji import Kansuji, kansujis, kansuji
class TestKansuji(unittest.TestCase):
def setUp(self):
pass
def test_empty_0(self):
self.assertEqual(kansujis(''), [])
def test_empty_1(self):
self.assertEqual(kansujis('ใใใซใกใฏ'), [])
def test_Kansuji_value_000(self):
self.assertEqual(Kansuji.value(30), 'ไธๅ')
def test_Kansuji_value_001(self):
self.assertEqual(Kansuji.value(56), 'ไบๅๅ
ญ')
def test_Kansuji_value_002(self):
self.assertEqual(Kansuji.value(100, False), '็พ')
def test_Kansuji_value_003(self):
self.assertEqual(Kansuji.value(100, True), 'ไธ็พ')
def test_Kansuji_value_004(self):
self.assertEqual(Kansuji.value(111, False), '็พๅไธ')
def test_Kansuji_value_005(self):
self.assertEqual(Kansuji.value(1004), 'ไธๅๅ')
def test_Kansuji_value_006(self):
self.assertEqual(Kansuji.value(1004, False), 'ๅๅ')
def test_Kansuji_value_007(self):
self.assertEqual(Kansuji.value(10005), 'ไธไธไบ')
def test_Kansuji_value_008(self):
self.assertEqual(Kansuji.value(10005, False), 'ไธไบ')
def test_Kansuji_value_009(self):
self.assertEqual(Kansuji.value(20000005), 'ไบๅไธไบ')
def test_Kansuji_value_010(self):
self.assertEqual(Kansuji.value(10000000), 'ไธๅไธ')
def test_Kansuji_value_011(self):
self.assertEqual(Kansuji.value(10000000, False), 'ๅไธ')
def test_Kansuji_value_012(self):
self.assertEqual(Kansuji.value(20010300), 'ไบๅไธไธไธ็พ')
def test_Kansuji_value_013(self):
self.assertEqual(Kansuji.value(2000000607), 'ไบๅๅๅ
ญ็พไธ')
def test_Kansuji_value_014(self):
self.assertEqual(Kansuji.value(32.001), 'ไธๅไบ')
def test_Kansuji_value_minus_000(self):
self.assertEqual(Kansuji.value(-20010300), 'ใใคใในไบๅไธไธไธ็พ')
def test_Kansuji_value_minus_001(self):
self.assertEqual(Kansuji.value(-0), '้ถ')
def test_Kansuji_value_minus_002(self):
self.assertEqual(Kansuji.value(-1), 'ใใคใในไธ')
def test_Kansuji_value_minus_003(self):
self.assertEqual(Kansuji.value(-100000000, True), 'ใใคใในไธๅ')
def test_Kansuji_value_decimal_000(self):
self.assertEqual(Kansuji.value(-1.1), 'ใใคใในไธ')
def test_Kansuji_value_decimal_001(self):
self.assertEqual(Kansuji.value(-100.234, False), 'ใใคใใน็พ')
def test_kansujis_000(self):
expect = [
{'val': '้ถ', 'beg': 0, 'end': 1},
]
self.assertListEqual(kansujis('0'), expect)
def test_kansujis_001(self):
expect = [
{'val': 'ไธ', 'beg': 0, 'end': 1},
]
self.assertListEqual(kansujis('1'), expect)
def test_kansujis_002(self):
expect = [
{'val': 'ๅ', 'beg': 0, 'end': 2},
]
self.assertListEqual(kansujis('10', False), expect)
def test_kansujis_003(self):
expect = [
{'val': 'ไธๅ', 'beg': 0, 'end': 2},
]
self.assertListEqual(kansujis('10', True), expect)
def test_kansujis_004(self):
expect = [
{'val': 'ไนๅไนไธไนๅไน็พไนๅไน', 'beg': 0, 'end': 6},
]
self.assertListEqual(kansujis('999999'), expect)
def test_kansujis_005(self):
expect = [
{'val': 'ไนๅไนไธไนๅไธ็พ', 'beg': 0, 'end': 6},
]
self.assertListEqual(kansujis('999100'), expect)
def test_kansujis_006(self):
expect = [
{'val': 'ไนๅไนไธไนๅ็พ', 'beg': 0, 'end': 6},
]
self.assertListEqual(kansujis('999100', False), expect)
def test_kansujis_007(self):
expect = [
{'val': 'ไธๅไธ', 'beg': 0, 'end': 6},
]
self.assertListEqual(kansujis('1,000ไธ'), expect)
def test_kansujis_008(self):
expect = [
{'val': 'ๅไธ', 'beg': 0, 'end': 6},
]
self.assertListEqual(kansujis('1,000ไธ', False), expect)
def test_kansujis_009(self):
expect = [
{'val': 'ไธๅไธไบๅๅ
ญ', 'beg': 4, 'end': 14},
]
self.assertListEqual(kansujis('ไพกๆ ผใฏ๏ฟฅ10,000,056ใงใใ'), expect)
def test_kansujis_010(self):
expect = [
{'val': 'ๅไธไบๅๅ
ญ', 'beg': 4, 'end': 14},
]
self.assertListEqual(kansujis('ไพกๆ ผใฏ๏ฟฅ10,000,056ใงใใ', False), expect)
def test_kansujis_011(self):
expect = [
{'val': 'ไธ', 'beg': 0, 'end': 1},
{'val': 'ไบๅ
ไธๅไธไบๅๅไธ', 'beg': 6, 'end': 15},
]
self.assertEqual(kansujis('๏ผใคใฎไพกๆ ผใไบๅ
30ไธไบๅๅ7ๅใซใชใใพใใ', False), expect)
def test_kansujis_012(self):
expect = [
{'val': 'ไธ', 'beg': 0, 'end': 1},
{'val': 'ไบๅ
ไธๅไธไบๅไธๅไธ', 'beg': 6, 'end': 15},
]
self.assertEqual(kansujis('๏ผใคใฎไพกๆ ผใไบๅ
30ไธไบๅๅ7ๅใซใชใใพใใ'), expect)
def test_kansuji_000(self):
self.assertEqual(kansuji('0'), '้ถ')
def test_kansuji_001(self):
self.assertEqual(kansuji('ใใใฏ0'), 'ใใใฏ้ถ')
def test_kansuji_002(self):
self.assertEqual(kansuji('0ใฏ้ถ'), '้ถใฏ้ถ')
def test_kansuji_003(self):
self.assertEqual(kansuji('1'), 'ไธ')
def test_kansuji_004(self):
self.assertEqual(kansuji('10'), 'ไธๅ')
def test_kansuji_005(self):
self.assertEqual(kansuji('10', False), 'ๅ')
def test_kansuji_006(self):
self.assertEqual(kansuji('11', False), 'ๅไธ')
def test_kansuji_007(self):
self.assertEqual(kansuji('11'), 'ไธๅไธ')
def test_kansuji_008(self):
self.assertEqual(kansuji('ใใใฏ999999ใงใใ'), 'ใใใฏไนๅไนไธไนๅไน็พไนๅไนใงใใ')
def test_kansuji_009(self):
self.assertEqual(kansuji('ใใใฏ999100ใงใใ'), 'ใใใฏไนๅไนไธไนๅไธ็พใงใใ')
def test_kansuji_010(self):
self.assertEqual(kansuji('ใใใฏ999100ใงใใ', False), 'ใใใฏไนๅไนไธไนๅ็พใงใใ')
def test_kansuji_011(self):
self.assertEqual(kansuji('ไพกๆ ผใฏ๏ฟฅ10,000,056ใงใใ'), 'ไพกๆ ผใฏ๏ฟฅไธๅไธไบๅๅ
ญใงใใ')
def test_kansuji_012(self):
self.assertEqual(kansuji('ไพกๆ ผใฏ๏ฟฅ10,000,056ใงใใ', False), 'ไพกๆ ผใฏ๏ฟฅๅไธไบๅๅ
ญใงใใ')
def test_kansuji_013(self):
self.assertEqual(kansuji('๏ผใคใฎไพกๆ ผใไบๅ
30ไธไบๅๅ7ๅใซใชใใพใใ'), 'ไธใคใฎไพกๆ ผใไบๅ
ไธๅไธไบๅไธๅไธๅใซใชใใพใใ')
def test_kansuji_014(self):
self.assertEqual(kansuji('๏ผใคใฎไพกๆ ผใไบๅ
30ไธไบๅๅ7ๅใซใชใใพใใ', False), 'ไธใคใฎไพกๆ ผใไบๅ
ไธๅไธไบๅๅไธๅใซใชใใพใใ')
``` |
{
"source": "jil8885/django_app_hyuabot",
"score": 3
} |
#### File: django_app_hyuabot/answer/answer_shuttle.py
```python
from datetime import datetime
from transport.shuttle.get_info import get_departure_info, get_first_last_departure
from kakao_i_hanyang.common.sender import *
def make_answer_shuttle_depart_info(user_answer) -> str:
if '์ ์
ํ๋ฒ์ค ๋์ฐฉ ์ ๋ณด์
๋๋ค' in user_answer:
dest_stop = user_answer.split('์ ์
ํ๋ฒ์ค ๋์ฐฉ ์ ๋ณด์
๋๋ค')[0].strip()
else:
dest_stop = user_answer[2:].strip()
depart_info = get_departure_info(dest_stop)
# ์ดํ ์ค์ง ์ผ์ ์ผ ๋,
if depart_info == '์ค๋ ์
ํ ์ดํ์ ํ์ง ์์ต๋๋ค.':
server_answer = insert_text(depart_info)
else:
emoji = {"์
ํ์ฝ": '๐ซ ', "ํ๋์์ญ": '๐ ', "์์ ์ธA": '๐ ', "๊ธฐ์์ฌ": '๐๏ธ ', "์
ํ์ฝ ๊ฑด๋ํธ": '๐ซ '}
block_id = '5cc3dc8ee82127558b7e6eba'
bus_to_come_dh, bus_to_come_dy, bus_to_come_c, now = depart_info
# ๋์ฐฉ์ ๋ณด๋ฅผ ์๋ต์ผ๋ก ๋ณํ
if dest_stop == '๊ธฐ์์ฌ':
result = '๊ธฐ์์ฌโ์
ํ์ฝ,ํ๋์(์งํ)\n'
if bus_to_come_dh:
for depart_time in bus_to_come_dh:
result += f'{depart_time.strftime("%H์ %M๋ถ")} ์ถ๋ฐ({(depart_time - now).seconds // 60}๋ถ ํ)\n'
else:
result += '๋์ฐฉ ์์ ์ธ ๋ฒ์ค๊ฐ ์์ต๋๋ค.\n'
result += '\n๊ธฐ์์ฌโ์
ํ์ฝ,์์ ์ธ(์งํ)\n'
if bus_to_come_dy:
for depart_time in bus_to_come_dy:
result += f'{depart_time.strftime("%H์ %M๋ถ")} ์ถ๋ฐ({(depart_time - now).seconds // 60}๋ถ ํ)\n'
else:
result += '๋์ฐฉ ์์ ์ธ ๋ฒ์ค๊ฐ ์์ต๋๋ค.\n'
result += '\n๊ธฐ์์ฌโ์
ํ์ฝ,ํ๋์,์์ ์ธ(์ํ)\n'
if bus_to_come_c:
for depart_time in bus_to_come_c:
result += f'{depart_time.strftime("%H์ %M๋ถ")} ์ถ๋ฐ({(depart_time - now).seconds // 60}๋ถ ํ)\n'
else:
result += '๋์ฐฉ ์์ ์ธ ๋ฒ์ค๊ฐ ์์ต๋๋ค.\n'
elif dest_stop == '์
ํ์ฝ':
result = '์
ํ์ฝโํ๋์(์งํ)\n'
if bus_to_come_dh:
for depart_time in bus_to_come_dh:
result += f'{depart_time.strftime("%H์ %M๋ถ")} ์ถ๋ฐ({(depart_time - now).seconds // 60}๋ถ ํ)\n'
else:
result += '๋์ฐฉ ์์ ์ธ ๋ฒ์ค๊ฐ ์์ต๋๋ค.\n'
result += '\n์
ํ์ฝโ์์ ์ธA(์งํ)\n'
if bus_to_come_dy:
for depart_time in bus_to_come_dy:
result += f'{depart_time.strftime("%H์ %M๋ถ")} ์ถ๋ฐ({(depart_time - now).seconds // 60}๋ถ ํ)\n'
else:
result += '๋์ฐฉ ์์ ์ธ ๋ฒ์ค๊ฐ ์์ต๋๋ค.\n'
result += '\n์
ํ์ฝโํ๋์,์์ ์ธ(์ํ)\n'
if bus_to_come_c:
for depart_time in bus_to_come_c:
result += f'{depart_time.strftime("%H์ %M๋ถ")} ์ถ๋ฐ({(depart_time - now).seconds // 60}๋ถ ํ)\n'
else:
result += '๋์ฐฉ ์์ ์ธ ๋ฒ์ค๊ฐ ์์ต๋๋ค.\n'
elif dest_stop == 'ํ๋์์ญ':
result = 'ํ๋์โ์
ํ์ฝ(์งํ)\n'
if bus_to_come_dh:
for depart_time in bus_to_come_dh:
result += f'{depart_time.strftime("%H์ %M๋ถ")} ์ถ๋ฐ({(depart_time - now).seconds // 60}๋ถ ํ)\n'
else:
result += '๋์ฐฉ ์์ ์ธ ๋ฒ์ค๊ฐ ์์ต๋๋ค.\n'
result += '\nํ๋์โ์์ ์ธ,์
ํ์ฝ(์ํ)\n'
if bus_to_come_c:
for depart_time in bus_to_come_c:
result += f'{depart_time.strftime("%H์ %M๋ถ")} ์ถ๋ฐ({(depart_time - now).seconds // 60}๋ถ ํ)\n'
else:
result += '๋์ฐฉ ์์ ์ธ ๋ฒ์ค๊ฐ ์์ต๋๋ค.\n'
elif dest_stop == '์์ ์ธA':
result = '์์ ์ธโ์
ํ์ฝ\n'
if bus_to_come_c:
for depart_time in bus_to_come_c:
result += f'{depart_time.strftime("%H์ %M๋ถ")} ์ถ๋ฐ({(depart_time - now).seconds // 60}๋ถ ํ)\n'
else:
result += '๋์ฐฉ ์์ ์ธ ๋ฒ์ค๊ฐ ์์ต๋๋ค.\n'
elif dest_stop == '์
ํ์ฝ ๊ฑด๋ํธ':
result = '์
ํ์ฝ ๊ฑด๋ํธโ๊ธฐ์์ฌ\n'
if bus_to_come_c:
for depart_time in bus_to_come_c:
result += f'{depart_time.strftime("%H์ %M๋ถ")} ์ถ๋ฐ({(depart_time - now).seconds // 60}๋ถ ํ)\n'
else:
result += '๋์ฐฉ ์์ ์ธ ๋ฒ์ค๊ฐ ์์ต๋๋ค.\n'
else:
result = '์๋ชป๋ ์ ๋ฅ์ฅ ์ ๋ณด์
๋๋ค.'
server_answer = insert_text(result.strip())
# ํ๋จ ๋ฒํผ ์ถ๊ฐ
reply = make_reply('๐ ์ ๋ฅ์ฅ', f'{dest_stop} ์ ๋ฅ์ฅ ์ ๋ณด์
๋๋ค.', '5ebf702e7a9c4b000105fb25')
response = insert_replies(server_answer, reply)
reply = make_reply('๐ซ ์ค๋ฅ์ ๋ณด', '์
ํ ์ค๋ฅ ์ ๋ณดํ๊ธฐ', '5cc3fced384c5508fceec5bb')
response = insert_replies(response, reply)
for stop_name in emoji.keys():
if stop_name != dest_stop:
message = f"{stop_name}์ ์
ํ๋ฒ์ค ๋์ฐฉ ์ ๋ณด์
๋๋ค"
reply = make_reply(f'{emoji[stop_name]}{stop_name}', message, block_id)
response = insert_replies(response, reply)
return response
def make_answer_shuttle_stop_detail(user_answer):
stop_list = {"์
ํ์ฝ": "shuttle", "์
ํ์ฝ ๊ฑด๋ํธ": "shuttle", "ํ๋์์ญ": "station", "์์ ์ธA": "terminal", "๊ธฐ์์ฌ": "dormitory"}
stop_view = {"shuttle": "http://kko.to/Kf-ZqboYH", "station": "http://kko.to/IyyXgzPDo",
"dormitory": "http://kko.to/vClEubBDj", "terminal": "http://kko.to/guG2uboYB"}
stop_name = user_answer.split('์ ๋ฅ์ฅ ์ ๋ณด์
๋๋ค')[0].strip()
stop_key = stop_list[stop_name]
bool_semester, bool_weekend, bus_to_come_dh, bus_to_come_dy, bus_to_come_c = get_first_last_departure(stop_name)
if bool_semester == 'halt':
result_str = '๋น์ผ์ ์ดํํ์ง ์์ต๋๋ค.'
else:
result_str = '์ฒซ,๋ง์ฐจ ์ ๋ณด์
๋๋ค.\n'
if stop_name == '๊ธฐ์์ฌ' or stop_name == '์
ํ์ฝ':
if bus_to_come_dh:
result_str += f'ํ๋์ ์งํ {bus_to_come_dh[0].strftime("%H:%M")}/{bus_to_come_dh[-1].strftime("%H:%M")}\n'
if bus_to_come_dy:
result_str += f'์์ ์ธA ์งํ {bus_to_come_dy[0].strftime("%H:%M")}/{bus_to_come_dy[-1].strftime("%H:%M")}\n'
if bus_to_come_c:
result_str += f'์ํ๋ฒ์ค {bus_to_come_c[0].strftime("%H:%M")}/{bus_to_come_c[-1].strftime("%H:%M")}\n'
elif stop_name == 'ํ๋์์ญ':
if bus_to_come_dh:
result_str += f'์
ํ์ฝ ์งํ {bus_to_come_dh[0].strftime("%H:%M")}/{bus_to_come_dh[-1].strftime("%H:%M")}\n'
if bus_to_come_c:
result_str += f'์ํ๋ฒ์ค {bus_to_come_c[0].strftime("%H:%M")}/{bus_to_come_c[-1].strftime("%H:%M")}\n'
elif stop_name == '์์ ์ธA':
if bus_to_come_c:
result_str += f'์
ํ์ฝ ์งํ {bus_to_come_c[0].strftime("%H:%M")}/{bus_to_come_c[-1].strftime("%H:%M")}\n'
elif stop_name == '์
ํ์ฝ ๊ฑด๋ํธ':
if bus_to_come_c:
result_str += f'๊ธฐ์์ฌ ์งํ {bus_to_come_c[0].strftime("%H:%M")}/{bus_to_come_c[-1].strftime("%H:%M")}\n'
else:
result_str = '์๋ชป๋ ์ ๋ฅ์ฅ ์ ๋ณด์
๋๋ค.\nํด๋น ํ๋ฉด์ ๋ณด์ค ๊ฒฝ์ฐ ๊ด๋ฆฌ์์๊ฒ ์๋ ค์ฃผ์ญ์์ค.'
response = insert_card(f'{stop_name} ์ ๋ฅ์ฅ ์ ๋ณด', result_str.strip())
# response = insert_button(response, '๐บ๏ธ ์นด์นด์ค๋งต์์ ๋ณด๊ธฐ', stop_map[stop_key])
response = insert_button(response, '๐ ๋ก๋๋ทฐ๋ก ๋ณด๊ธฐ', stop_view[stop_key])
return response
```
#### File: django_app_hyuabot/common/sender.py
```python
from copy import deepcopy
# ๊ธฐ๋ณธ ๋ต๋ณ
base_response = {'version': '2.0', 'template': {'outputs': [], 'quickReplies': []}}
# ์นด์นด์คํก ์ฑ๋ - ํ
์คํธ ์๋ต
def insert_text(text):
new_response = deepcopy(base_response)
new_response['template']['outputs'] = [{"simpleText": {"text": text}}]
return new_response
# ์นด์นด์คํก ์ฑ๋ - ์ด๋ฏธ์ง ์๋ต
def insert_image(image_url, alt_text):
new_response = deepcopy(base_response)
new_response['template']['outputs'] = [{"simpleImage": {"imageUrl": image_url, "altText": alt_text}}]
return new_response
# ์นด์นด์คํก ์ฑ๋ - ์นด๋ ์๋ต
def insert_card(title, description, image_url=None, width=None, height=None):
new_response = deepcopy(base_response)
if image_url != None:
if width != None and height != None:
new_response['template']['outputs'] = [{'basicCard': {
'title': title,
'description': description,
'thumbnail': {"imageUrl": image_url, 'fixedRatio': True, 'width': width, 'height': height},
'buttons': []
}}]
else:
new_response['template']['outputs'] = [{'basicCard': {
'title': title,
'description': description,
'thumbnail': {"imageUrl": image_url},
'buttons': []
}}]
else:
new_response['template']['outputs'] = [{'basicCard': {
'title': title,
'description': description,
'buttons': []
}}]
return new_response
# ์นด์นด์คํก ์ฑ๋ - ์นด๋ ๋ฒํผ ์ถ๊ฐ
def insert_button(new_response, label, webUrl):
new_response['template']['outputs'][0]['basicCard']['buttons'].append({
"action": "webLink",
"label": label,
"webLinkUrl": webUrl
})
return new_response
# ์นด์นด์คํก ์ฑ๋ - ํ๋จ ๋ฒํผ ์ถ๊ฐ
def insert_replies(new_response, reply):
new_response['template']['quickReplies'].append(reply)
return new_response
# ์นด์นด์คํก ์ฑ๋ - ํ๋จ ๋ฒํผ ์์ฑ
def make_reply(label, message, block_id):
return {'action': 'block', 'label': label, 'messageText': message, 'blockId': block_id}
```
#### File: jil8885/django_app_hyuabot/views.py
```python
import json
# Try restful api framework later
# from rest_framework.decorators import api_view, parser_classes
# from rest_framework.response import Response
# from rest_framework.parsers import JSONParser
# Use Normal django framework
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from kakao_i_hanyang.answer.answer_food import make_answer_food_menu
from kakao_i_hanyang.answer.answer_library import make_answer_reading_room_info
from kakao_i_hanyang.answer.answer_shuttle import make_answer_shuttle_depart_info, make_answer_shuttle_stop_detail
from kakao_i_hanyang.common.receiver import get_user_data
from kakao_i_hanyang.common.sender import insert_text, make_reply, insert_replies
from kakao_i_hanyang.common.user import get_user, find_is_new_user, update_user
@csrf_exempt
def get_shuttle_departure_info(request):
_, user_answer = get_user_data(request)
response = make_answer_shuttle_depart_info(user_answer)
return JsonResponse(response)
@csrf_exempt
def get_shuttle_stop_info(request):
_, user_answer = get_user_data(request)
response = make_answer_shuttle_stop_detail(user_answer)
return JsonResponse(response)
@csrf_exempt
def get_food_menu(request):
user_id, user_answer = get_user_data(request)
user_info = get_user(user_id)
if not user_info:
response = find_is_new_user(user_id, user_answer)
return JsonResponse(response)
if '์ ์๋จ์
๋๋ค.' in user_answer:
response = make_answer_food_menu(user_info['campus'], user_answer.split('์ ์๋จ์
๋๋ค.')[0].strip())
else:
response = insert_text('์ํ๋ ์๋น์ ์ ํํด์ฃผ์ธ์.')
if user_info['campus']:
rest_list = ['ํ์์๋น', '์ ํ์์๋น', '๊ต์ง์์๋น', '์ ๊ต์ง์์๋น', '์ 1์ํ๊ด์๋น', '์ 2์ํ๊ด์๋น', '์ฌ๋๋ฐฉ', 'ํ์ํํฌ']
else:
rest_list = ['ํ์์๋น', '๊ต์ง์์๋น', '์ฐฝ์์ธ์ฌ์์๋น', 'ํธ๋์ฝํธ', '์ฐฝ์
๋ณด์ก์ผํฐ']
for restaurant in rest_list:
reply = make_reply(restaurant, f'{restaurant}์ ์๋จ์
๋๋ค.', '5eaa9b11cdbc3a00015a23fb')
response = insert_replies(response, reply)
return JsonResponse(response)
@csrf_exempt
def get_reading_room_seat_info(request):
user_id, user_answer = get_user_data(request)
user_info = get_user(user_id)
if not user_info:
response = find_is_new_user(user_id, user_answer)
return JsonResponse(response)
if '์ด๋์ค ์ ๋ณด' in user_answer:
response = make_answer_reading_room_info(user_info['campus'])
else:
response = make_answer_reading_room_info(user_info['campus'], user_answer.split('์ ์ข์์ ๋ณด์
๋๋ค.')[0].strip())
return JsonResponse(response)
@csrf_exempt
def update_campus(request):
"""์ฌ์ฉ์ ID๋ฅผ ๊ธฐ๋ฐ์ผ๋ก ์์ธ์บ ํผ์ค โ ERICA ์บ ํผ์ค ์ํธ ์ ํ์ด ๊ฐ๋ฅํ๊ฒ ํฉ๋๋ค."""
user_id, answer = get_user_data(request)
user_info = get_user(user_id)
block_id = '5eaa9bf741559f000197775d'
if user_info:
if user_info['campus']:
update_user(user_id, 0)
response = insert_text('ERICA ์บ ํผ์ค๋ก ๋ณ๊ฒฝ๋์์ต๋๋ค.')
else:
update_user(user_id, 1)
response = insert_text('์์ธ ์บ ํผ์ค๋ก ๋ณ๊ฒฝ๋์์ต๋๋ค.')
else:
response = find_is_new_user(user_id, answer)
return JsonResponse(response)
``` |
{
"source": "jil8885/hyuabot-mainline",
"score": 3
} |
#### File: hyuabot-mainline/food/menu.py
```python
from enum import Enum
import requests
from lxml.cssselect import CSSSelector
from lxml.html import fromstring
class CafeteriaSeoul(Enum):
student_seoul_1 = "1"
teacher_seoul_1 = "2"
sarang_seoul = "3"
teacher_seoul_2 = "4"
student_seoul_2 = "5"
dorm_seoul_1 = "6"
dorm_seoul_2 = "7"
hangwon_seoul = "8"
class CafeteriaERICA(Enum):
teacher_erica = "11"
student_erica = "12"
dorm_erica = "13"
foodcoart_erica = "14"
changbo_erica = "15"
def get_recipe(cafeteria, url="https://www.hanyang.ac.kr/web/www/re"):
cafeteria_info = {"restaurant": cafeteria.name}
# get
try:
res = requests.get(f"{url}{cafeteria.value}")
except requests.exceptions.RequestException as _:
cafeteria_info["restaurant"] = "-1"
return cafeteria_info
tree = fromstring(res.text)
inboxes = CSSSelector("div.tab-pane")
td = CSSSelector("td")
for inbox in inboxes(tree):
for content in td(inbox):
txt = content.text_content().strip()
cafeteria_info['time'] = ''
if '์กฐ์' in txt:
cafeteria_info['time'] += f'{txt}\n'
elif '์ค์' in txt:
cafeteria_info['time'] += f'{txt}\n'
elif '์์' in txt:
cafeteria_info['time'] += f'{txt}\n'
inboxes = CSSSelector("div.in-box")
h4 = CSSSelector("h4") # ์กฐ์, ์ค์, ์์
h3 = CSSSelector("h3") # menu
li = CSSSelector("li")
price = CSSSelector("p.price")
for inbox in inboxes(tree):
title = h4(inbox)[0].text_content()
cafeteria_info[title] = []
for l in li(inbox):
menu = h3(l)[0].text_content().replace("\t", "").replace("\r\n", "")
p = price(l)[0].text_content()
cafeteria_info[title].append({"menu": menu, "price": p})
return cafeteria_info
``` |
{
"source": "jiladahe1997/jiladahe1997s-website-ml",
"score": 3
} |
#### File: jiladahe1997s_website_ml/jiladahe1997s_website_ml/views.py
```python
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
import tensorflow as tf
import os
'''
ๅ่ฎฎ๏ผhttp
่ฏทๆฑๅๆฐ๏ผๆฅ่ชๅ็ซฏ็base64็ผ็ ็ๅพ็
่ฟๅๅๆฐ๏ผๆจกๅ้ขๆต็ๅนด้พ
'''
# ๆณจ๏ผๆฏๆฌกๆฏๅฆ้ๆฐloadไธ้model๏ผๅฆๆไฝ ๆณ่็บฆๅ
ๅญๅชๅจๅ ่ฝฝviews.py็ๆถๅๅ ่ฝฝไธๆฌก๏ผๅฆๅๆฅๆไธชไธ็ฅๅ็้๏ผไฝ่
่ฐทๆญ็พๅบฆไบ3ไธชๅฐๆถ้ฝๆฒกๆ่งฃๅณใ
# model = tf.keras.models.load_model(os.path.abspath('./my_model.h5'))
# ็ฑไบๅ็ซฏ็่ฏทๆฑๆฅ่ชไบ้ๅๆบurl๏ผๅ ๆญค้่ฆcsrf่ทจๅๅ
ฑไบซ
@csrf_exempt
def age_predict(request):
import tensorflow as tf
import os
import numpy as np
import base64
import json
model = tf.keras.models.load_model(os.path.abspath('./my_model.h5'))
model.compile(optimizer=tf.train.AdamOptimizer(),
loss="mean_absolute_error",
metrics=["mean_absolute_error"])
# * ๅค็ๅ็ซฏ็base64ๅญ็ฌฆไธฒ๏ผๅปๆๅคด้จ๏ผ
# * ๅนถไธ่ฝฌๆขไธบurlsafeๅฝขๅผ๏ผ่ฟๆฏTensorFlow็่ฆๆฑ๏ผ่ฏฆๆ
่ง https://www.tensorflow.org/versions/r1.9/api_docs/python/tf/decode_base64?hl=en
img_base64_header, img_base64 = json.loads(request.body)['base64_img'].split(",",1)
raw = base64.decodestring(img_base64.encode('utf-8'))
img_base64_websafe = base64.urlsafe_b64encode(raw)
# * base่ฝฌไธบtensor๏ผ็ถๅ่พๅ
ฅๆจกๅ
img_raw = tf.decode_base64(img_base64_websafe)
image_tensor = tf.cast(tf.image.resize_images(tf.io.decode_jpeg(img_raw, channels=3), [192,192]),tf.float32)
image = tf.expand_dims(image_tensor, axis=0)
age = model.predict(image, steps=1)
return HttpResponse(age)
``` |
{
"source": "jilanglois-su/cobs10-dengai",
"score": 3
} |
#### File: src/d04_modeling/abstract_model.py
```python
import pandas as pd
BIAS_COL = 'const'
class AbstractModel:
def __init__(self, x_train, y_train, bias=True):
self._w_map = None
self._bias = bias
self.add_constant(x_train)
self._x_train = x_train
self._y_train = y_train
def add_constant(self, x_data):
if self._bias:
if BIAS_COL not in x_data.columns:
x_data[BIAS_COL] = 1.
return None
def get_x_train(self, values=True):
is_df = isinstance(self._x_train, pd.DataFrame)
if values:
if is_df:
return self._x_train.values
else:
return self._x_train
else:
if is_df:
return self._x_train
else:
raise Exception("x Data is not available as DataFrane")
def get_y_train(self, values=True):
is_sr = isinstance(self._y_train, pd.Series)
if values:
if is_sr:
return self._y_train.values.reshape((-1, 1))
else:
return self._y_train
else:
if is_sr:
return self._y_train
else:
raise Exception("y Data is not available as Series")
def fit(self):
raise NotImplementedError
def predict(self, city, x_data):
raise NotImplementedError
def forecast(self, city, x_data, y_data, m):
raise NotImplementedError
```
#### File: src/d04_modeling/poisson_hmm.py
```python
import numpy as np
import pandas as pd
from scipy.special import logsumexp, gammaln
from scipy.stats import poisson
from src.d04_modeling.poisson_glm import PoissonGLM
import multiprocessing as mp
cpu_count = mp.cpu_count()
eps = 1.e-6
class PoissonHMM:
def __init__(self, num_states, alpha=1., sigma2=1., seed=None):
if seed is not None:
np.random.seed(seed)
self.num_states = num_states
if isinstance(alpha, float):
alpha = np.ones(self.num_states) * alpha
self.alpha = alpha
self.sigma2 = sigma2
@staticmethod
def initial_transition_matrix(K, e=0.05):
transition_matrix = np.ones((K, K)) * e / (K - 1)
np.fill_diagonal(transition_matrix, 1. - e)
return transition_matrix
def initialization(self, p):
parameters = dict()
parameters['mu'] = np.random.normal(loc=0.0, scale=np.sqrt(self.sigma2), size=(self.num_states, p))
transition_matrix = self.initial_transition_matrix(self.num_states)
parameters['transition_matrix'] = transition_matrix
parameters['initial_dist'] = np.ones(self.num_states) / self.num_states
return parameters
@staticmethod
def forward_pass(initial_dist, transition_matrix, log_likelihoods):
"""Perform the forward pass and return the forward messages for
a single "event".
In the descriptions below, let K denote the number of discrete states
and T the number of time steps.
transition_matrix as defined by STATS 271 lecture: P_{ij}=P(z_t=j \mid z_{t-1}=i)
Parameters
---
initial_dist: (K,) array with initial state probabilities
transition_matrix: (K, K) array where each row is a transition probability
log_likelihoods: (T, K) array with entries log p(x_t | z_t=k)
Returns
---
alphas: (T, K) array of forward messages
"""
T, K = log_likelihoods.shape
log_alphas = np.zeros((T, K))
log_alphas[0, :] = np.log(initial_dist)
for t in range(1, T):
factor = log_alphas[t-1, :] + log_likelihoods[t-1, :]
log_alphas_next = logsumexp(np.log(transition_matrix + eps) + factor[:, np.newaxis], axis=0)
log_alphas[t, :] = log_alphas_next - logsumexp(factor)[np.newaxis]
return log_alphas
@staticmethod
def compute_marginal_ll(log_alphas, log_likelihoods):
"""Compute the marginal likelihood using the forward messages.
Parameters
----------
log_alphas: (T, K) array of forward messages.
log_likelihoods: (T, K) array with entries log p(x_t | z_t=k)
Returns
-------
marginal_ll: real-valued scalar, log p(x_{1:T})
"""
return logsumexp(log_alphas + log_likelihoods, axis=1).sum()
@staticmethod
def backward_pass(transition_matrix, log_likelihoods):
"""Perform the backward pass and return the backward messages for
a single "event".
Parameters
---
transition_matrix: (K, K) array where each row is a transition probability
log_likelihoods: (T, K) array with entries log p(x_t | z_t=k)
Returns
---
log_betas: (T, K) array of backward messages
"""
T, K = log_likelihoods.shape
log_betas = np.zeros((T, K))
for t in range(1, T):
factor = log_betas[T-t, :] + log_likelihoods[T-t, :]
log_betas_next = logsumexp(np.log(transition_matrix + eps) + factor[np.newaxis, :], axis=1)
log_betas[T-1-t, :] = log_betas_next
return log_betas
def compute_log_likelihoods(self, x_data, y_data, mu, num_periods):
"""Compute the log likelihood for a single "event".
Parameters
---
x_data: (T, p) array with features over time for a particular year
y_data: (T, 1) array with counts over time for a particular year
mu: (K, p) array with the Poisson GLM coefficients
num_periods: T
Returns
---
log_likelihoods: (T, K) array with entries log p(y_t | x_t, mu, z_t=k)
"""
log_likelihoods = np.zeros((num_periods, self.num_states))
for k in range(self.num_states):
log_rate_k = np.dot(x_data, mu[k])
log_likelihoods[:, k] = y_data * log_rate_k - np.exp(log_rate_k) - gammaln(y_data+1)
return log_likelihoods
def e_step(self, event_data, parameters):
"""Run the E step for each event First compute the log likelihoods
for each time step and discrete state using the given data and parameters.
Then run the forward and backward passes and use the output to compute the
posterior marginals, and use marginal_ll to compute the marginal likelihood.
Parameters
---
event_data: list of (T, 20) arrays with player positions over time for each event
parameters: a data structure containing the model parameters; i.e. the
initial distribution, transition matrix, and Gaussian means and
covariances.
Returns
---
expectations: list of (T, K) arrays of marginal probabilities
p(z_t = k | x_{1:T}) for each event.
marginal_ll: marginal log probability p(x_{1:T}). This should go up
each iteration!
"""
initial_dist = parameters['initial_dist']
transition_matrix = parameters['transition_matrix']
mu = parameters['mu']
expectations = []
transition_expectations = []
marginal_ll = 0
for i in range(len(event_data['x'])):
x_data = event_data['x'][i]
y_data = event_data['y'][i]
num_periods = x_data.shape[0]
log_likelihoods = self.compute_log_likelihoods(x_data, y_data, mu, num_periods)
ll_check = log_likelihoods.sum(axis=0) > 0
if ll_check.any():
raise Exception("Positive loglikelihoods!")
log_alphas = self.forward_pass(initial_dist, transition_matrix, log_likelihoods)
log_betas = self.backward_pass(transition_matrix, log_likelihoods)
log_expectations_batch = log_alphas + log_likelihoods + log_betas
log_expectations_batch = log_expectations_batch - logsumexp(log_expectations_batch, axis=1)[:, np.newaxis]
log_transition_expectation_batch = np.zeros(shape=[self.num_states, self.num_states, num_periods-1])
for i in range(self.num_states):
for j in range(self.num_states):
log_alphas_i = log_alphas[:-1, i]
log_likelihoods_i = log_likelihoods[:-1, i]
log_likelihoods_j = log_likelihoods[1:, j]
log_betas_j = log_betas[1:, j]
log_transition_expectation_batch[i, j, :] = log_alphas_i + log_likelihoods_i \
+ np.log(transition_matrix[i, j] + eps) \
+ log_likelihoods_j + log_betas_j
log_transition_expectation_batch = log_transition_expectation_batch \
- logsumexp(log_transition_expectation_batch.reshape((-1, num_periods-1)), axis=0)[np.newaxis, np.newaxis, :]
expectations += [np.exp(log_expectations_batch)]
transition_expectations += [np.exp(log_transition_expectation_batch)]
marginal_ll += self.compute_marginal_ll(log_alphas=log_alphas, log_likelihoods=log_likelihoods)
return expectations, marginal_ll, transition_expectations
def m_step(self, event_data, expectations, transition_expectations):
"""Solve for the Gaussian parameters that maximize the expected log
likelihood.
Note: you can assume fixed initial distribution and transition matrix as
described in the markdown above.
Parameters
----------
event_data: list of (T, 20) arrays with player positions over time for each event
expectations: list of (T, K) arrays with marginal state probabilities from
the E step.
transition_expectations: list of (K, K, T) arrays with marginal state transition
probabilities from the E step
Returns
-------
parameters: a data structure containing the model parameters; i.e. the
initial distribution, transition matrix, and Gaussian means and
covariances.
"""
expectations, x_data, y_data = self.glm_inputs_setup(event_data, expectations)
transition_expectations = np.concatenate(transition_expectations, axis=-1)
psudo_counts = expectations.sum(axis=0)
mu = []
for k in range(self.num_states):
poisson_glm = PoissonGLM(x_train=x_data, y_train=y_data, weights=expectations[:, k].reshape((-1, 1)),
sigma2=self.sigma2, bias=False)
poisson_glm.compute_posterior_mode()
mu += [poisson_glm.get_w_map()]
transition_matrix = np.zeros(shape=[self.num_states] * 2)
for i in range(self.num_states):
for j in range(self.num_states):
transition_matrix[i, j] = transition_expectations[i, j, :].sum()
transition_matrix = transition_matrix / transition_matrix.sum(axis=1)[:, np.newaxis]
parameters = {'mu': np.array(mu),
'initial_dist': psudo_counts / psudo_counts.sum(),
'transition_matrix': transition_matrix}
return parameters
def viterbi(self, event_data, parameters):
initial_dist = parameters['initial_dist']
transition_matrix = parameters['transition_matrix']
mu = parameters['mu']
most_likely_states = []
for i in range(len(event_data['x'])):
x_data = event_data['x'][i]
y_data = event_data['y'][i]
num_periods = x_data.shape[0]
log_likelihoods = self.compute_log_likelihoods(x_data, y_data, mu, num_periods)
ll_check = log_likelihoods.sum(axis=0) > 0
if ll_check.any():
raise Exception("Positive loglikelihoods!")
T, K = log_likelihoods.shape
log_mu = np.zeros((T, K))
for t in range(1, T):
factor = log_mu[T-t, :] + log_likelihoods[T-t, :]
log_mu_next = np.max(np.log(transition_matrix + eps) + factor[np.newaxis, :], axis=1)
log_mu[T-1-t, :] = log_mu_next
most_likely_states_batch = [None] * T
factor = log_likelihoods[0, :] + log_mu[0]
most_likely_states_batch[0] = np.argmax(factor + np.log(initial_dist))
for t in range(1, T):
factor = log_likelihoods[t, :] + log_mu[t]
prev_state = most_likely_states_batch[t-1]
log_transition = np.log(transition_matrix[prev_state, :] + eps)
most_likely_states_batch[t] = np.argmax(factor + log_transition)
most_likely_states += [most_likely_states_batch]
return most_likely_states
def fit(self, event_data):
"""Fit an HMM using the EM algorithm above. You'll have to initialize the
parameters somehow; k-means often works well. You'll also need to monitor
the marginal likelihood and check for convergence.
Returns
-------
lls: the marginal log likelihood over EM iterations
parameters: the final parameters
"""
p = event_data['x'][0].shape[1]
parameters = self.initialization(p=p)
lls = []
improvement = 10
c = 0
print("Solving", end="", flush=True)
prev_parameters = None
while improvement > -1e-4:
prev_parameters = parameters
expectations, marginal_ll, transition_expectations = self.e_step(event_data, prev_parameters)
parameters = self.m_step(event_data, expectations, transition_expectations)
if len(lls) > 0:
improvement = marginal_ll - lls[-1]
lls += [marginal_ll]
else:
lls += [marginal_ll]
print(".", end="", flush=True)
c += 1
if c > 50:
break
print("Done")
return lls, prev_parameters
def predict(self, event_data, parameters):
expectations, marginal_ll, _ = self.e_step(event_data, parameters)
most_likely_states = self.viterbi(event_data=event_data, parameters=parameters)
y_viterbi = []
for i in range(len(event_data['x'])):
y_data = event_data['y'][i]
x_data = event_data['x'][i]
y_hat_event = np.zeros((y_data.shape[0], self.num_states))
y_viterbi_event = np.zeros((y_data.shape[0], self.num_states))
for k in range(self.num_states):
mask = np.array(most_likely_states[i]) == k
poisson_glm = PoissonGLM(x_train=x_data, y_train=y_data,
sigma2=self.sigma2, bias=False)
y_hat_event[:, k] = poisson_glm.obs_map(parameters['mu'][k], x_data).reshape(-1,)
y_viterbi_event[mask, k] = 1.
y_viterbi += [np.sum(y_hat_event * y_viterbi_event, axis=1)]
return y_viterbi, most_likely_states, marginal_ll
def forecast(self, train_event_data, test_event_data, parameters, m=8, num_samples=250, alpha=0.05):
initial_dist = parameters['initial_dist']
transition_matrix = parameters['transition_matrix']
mu = parameters['mu']
state_space = list(range(self.num_states))
forecasts = []
states_prob = []
for i in range(len(train_event_data['x'])):
x_train = train_event_data['x'][i]
y_train = train_event_data['y'][i]
poisson_glm = PoissonGLM(x_train=x_train, y_train=y_train,
sigma2=self.sigma2, bias=False)
x_test = test_event_data['x'][i]
y_test = test_event_data['y'][i]
num_periods = x_train.shape[0]
test_periods = x_test.shape[0]
forecasts_event = pd.DataFrame(np.nan, index=np.arange(test_periods), columns=['map', 'lower', 'upper'])
states_prob_event = pd.DataFrame(np.nan, index=np.arange(test_periods), columns=state_space)
x_data = x_train.copy()
y_data = y_train.copy()
print("Sampling", end="", flush=True)
for t in range(x_test.shape[0]-m):
log_likelihoods = self.compute_log_likelihoods(x_data, y_data, mu, num_periods)
log_alphas = self.forward_pass(initial_dist, transition_matrix, log_likelihoods)
log_filter_prob = log_alphas - logsumexp(log_alphas, axis=1)[:, np.newaxis]
initial_dist = np.exp(log_filter_prob[-1, :])
m_step_dist = np.dot(np.linalg.matrix_power(transition_matrix.T, m), initial_dist)
states_prob_event.at[t+m] = m_step_dist
states_sim = np.random.choice(state_space, size=num_samples, p=m_step_dist)
mu_sim = mu[list(states_sim)]
rate_sim = poisson_glm.rate_map(mu_sim.T, x_test[t + m - 1, :])
obs_sim = poisson.rvs(rate_sim).flatten()
lower_value = np.percentile(obs_sim, q=100*alpha/2)
upper_value = np.percentile(obs_sim, q=100*(1.-alpha/2))
median_value = np.median(obs_sim)
map_value = np.mean(obs_sim)
forecasts_event.at[t+m, 'lower'] = lower_value
forecasts_event.at[t+m, 'upper'] = upper_value
forecasts_event.at[t+m, 'map'] = map_value
forecasts_event.at[t+m, 'median'] = median_value
if t % 10 == 0:
print(".", end="", flush=True)
num_periods += 1
x_data = np.vstack([x_data, x_test[t, :].reshape(1, -1)])
y_data = np.append(y_data, y_test[t])
print("Done")
forecasts += [forecasts_event]
states_prob += [states_prob_event]
return forecasts, states_prob
@staticmethod
def format_event_data(df):
df.sort_index(inplace=True)
event_data = []
for city in df.index.get_level_values('city').unique():
if 'year' in df.index.names:
for year in df.loc[city].index.get_level_values('year').unique():
event_data.append(df.loc[city].loc[year].values)
else:
event_data.append(df.loc[city].values)
return event_data
def validate_model(self, event_data, parameters):
mu = parameters['mu']
expectations, marginal_ll, _ = self.e_step(event_data, parameters)
expectations, x_data, y_data = self.glm_inputs_setup(event_data, expectations)
y_hat = np.zeros(y_data.shape)
for k in range(self.num_states):
poisson_glm = PoissonGLM(x_train=x_data, y_train=y_data, weights=expectations[:, k].reshape((-1, 1)),
sigma2=self.sigma2, bias=False)
y_hat += poisson_glm.obs_map(mu[k], x_data) * expectations[:, k].reshape((-1, 1))
e = np.abs(y_data - y_hat)
mae = e.mean()
return marginal_ll, mae
def glm_inputs_setup(self, event_data, expectations):
x_data = np.vstack([event_data['x'][i] for i in range(len(event_data['x']))])
y_data = np.vstack([event_data['y'][i].reshape((-1, 1)) for i in range(len(event_data['y']))])
expectations = np.vstack(expectations)
return expectations, x_data, y_data
if __name__ == "__main__":
import os
from src.d01_data.dengue_data_api import DengueDataApi
os.chdir('../')
dda = DengueDataApi()
x_train, x_validate, y_train, y_validate = dda.split_data()
num_components = 4
z_train, z_validate, pct_var, _ = dda.get_pca(x_train, x_validate, num_components=num_components)
z_train['bias'] = 1.
z_validate['bias'] = 1.
z_train.drop(columns=z_train.columns[:num_components], inplace=True)
z_validate.drop(columns=z_validate.columns[:num_components], inplace=True)
event_data_train = dict()
model = PoissonHMM(num_states=3)
event_data_train['x'] = model.format_event_data(z_train.droplevel('year'))
event_data_train['y'] = model.format_event_data(y_train.droplevel('year'))
lls_k, parameters_k = model.fit(event_data=event_data_train)
print(lls_k)
print(parameters_k)
event_data_validate = dict()
event_data_validate['x'] = model.format_event_data(z_validate.droplevel('year'))
event_data_validate['y'] = model.format_event_data(y_validate.droplevel('year'))
y_viterbi_train, most_likely_states_train, _ = model.predict(event_data_train, parameters_k)
y_viterbi_validate, most_likely_states_validate, _ = model.predict(event_data_validate, parameters_k)
forecasts = model.forecast(event_data_train, event_data_validate, parameters_k)
# marginal_ll, mae = model.validate_model(event_data=event_data_validate, parameters=parameters_k)
# print(mae)
```
#### File: cobs10-dengai/tests/test_hmm_em.py
```python
from unittest import TestCase
import numpy as np
from scipy.special import logsumexp
from src.d04_modeling.poisson_hmm import PoissonHMM
from scipy.stats import poisson, multivariate_normal
class TestHmmEm(TestCase):
@classmethod
def setUpClass(cls) -> None:
print("setUp")
cls.num_states = 2
cls.model = PoissonHMM(num_states=cls.num_states)
cls.p = 3
cls.pi = np.ones(cls.num_states)
np.random.seed(1992)
mu1 = np.random.normal(loc=0.0, scale=1., size=cls.p)
mu2 = np.random.normal(loc=0.0, scale=1., size=cls.p)
cls.mu = np.array([mu1, mu2])
cls.transition_matrix = np.random.dirichlet([1.] * cls.num_states, size=cls.num_states)
# generate data
cls.num_periods = 1000
cls.generate_data(cls.num_periods)
cls.initial_dist = np.ones(cls.num_states) / cls.num_states
@classmethod
def generate_data(cls, num_periods):
x_data = np.ones((num_periods, cls.p))
x_data[:, 1:] = multivariate_normal.rvs(mean=np.zeros(cls.p-1), cov=np.eye(cls.p-1), size=num_periods)
y_data = np.zeros((num_periods,))
latent_states = np.zeros(num_periods)
rate = np.exp(np.dot(x_data[0, :], cls.mu[0]))
y_data[0] = poisson.rvs(mu=rate)
for t in range(1, num_periods):
p = cls.transition_matrix[int(latent_states[t - 1]), :]
z = np.random.choice(np.arange(cls.num_states).astype(int), p=p)
rate = np.exp(np.dot(x_data[t, :], cls.mu[z]))
y_data[t] = poisson.rvs(mu=rate)
latent_states[t] = z
cls.event_data = dict(x=[x_data], y=[y_data])
cls.latent_states = latent_states
def test_compute_log_likelihoods(self):
log_likelihoods = self.model.compute_log_likelihoods(x_data=self.event_data['x'][0],
y_data=self.event_data['y'][0],
mu=self.mu, num_periods=self.num_periods)
rate0 = np.exp(np.dot(self.event_data['x'][0], self.mu[0]))
log_p0 = poisson.logpmf(k=self.event_data['y'][0], mu=rate0)
rate1 = np.exp(np.dot(self.event_data['x'][0], self.mu[1]))
log_p1 = poisson.logpmf(k=self.event_data['y'][0], mu=rate1)
self.assertAlmostEqual(log_likelihoods[:, 0].sum(), log_p0.sum())
self.assertAlmostEqual(log_likelihoods[:, 1].sum(), log_p1.sum())
def test_forward_pass(self):
log_likelihoods = self.model.compute_log_likelihoods(x_data=self.event_data['x'][0],
y_data=self.event_data['y'][0],
mu=self.mu, num_periods=self.num_periods)
log_alphas = self.model.forward_pass(initial_dist=self.initial_dist, transition_matrix=self.transition_matrix,
log_likelihoods=log_likelihoods)
num_periods, num_states = log_likelihoods.shape
expected = np.zeros((num_periods, num_states))
expected[0, :] = np.log(self.initial_dist)
for t in range(1, num_periods):
factor = expected[t-1, :] + log_likelihoods[t-1, :]
for next_state in range(num_states):
log_alphas_next = logsumexp(np.log(self.transition_matrix[:, next_state].flatten()) + factor)
expected[t, next_state] = log_alphas_next
normalizing_factor = expected[t-1, :] + log_likelihoods[t-1, :]
expected[t, :] = expected[t, :] - logsumexp(normalizing_factor, axis=0)[np.newaxis]
self.assertEqual(0, (log_alphas - expected).sum())
def test_backward_pass(self):
log_likelihoods = self.model.compute_log_likelihoods(x_data=self.event_data['x'][0],
y_data=self.event_data['y'][0],
mu=self.mu, num_periods=self.num_periods)
log_betas = self.model.backward_pass(transition_matrix=self.transition_matrix, log_likelihoods=log_likelihoods)
num_periods, num_states = log_likelihoods.shape
expected = np.zeros((num_periods, num_states))
for t in range(1, num_periods):
factor = expected[num_periods-t, :] + log_likelihoods[num_periods-t, :]
for prev_state in range(num_states):
log_betas_prev = logsumexp(np.log(self.transition_matrix[prev_state, :].flatten()) + factor)
expected[num_periods-1-t, prev_state] = log_betas_prev
self.assertEqual(0, (log_betas - expected).sum())
def test_compute_marginal_ll(self):
log_likelihoods = self.model.compute_log_likelihoods(x_data=self.event_data['x'][0],
y_data=self.event_data['y'][0],
mu=self.mu, num_periods=self.num_periods)
log_alphas = self.model.forward_pass(initial_dist=self.initial_dist, transition_matrix=self.transition_matrix,
log_likelihoods=log_likelihoods)
marginal_ll = self.model.compute_marginal_ll(log_alphas, log_likelihoods)
expected = logsumexp(log_likelihoods + log_alphas, axis=1)
self.assertEqual(marginal_ll, expected.sum())
def test_e_step(self):
parameters = {'mu': self.mu,
'initial_dist': self.initial_dist,
'transition_matrix': self.transition_matrix}
expectations, marginal_ll, transition_expectations = self.model.e_step(self.event_data, parameters)
# expectations, transition_expectations = self.view_latent_states()
self.assertAlmostEqual(first=expectations[0].sum(), second=self.num_periods, places=6)
self.assertAlmostEqual(first=transition_expectations[0].sum(axis=0).sum(axis=0)[0], second=1, places=6)
self.assertAlmostEqual(first=transition_expectations[0].sum(),
second=(self.num_periods-1), places=6)
self.assertTrue(isinstance(marginal_ll, np.float64))
def test_m_step(self):
parameters = {'mu': self.mu,
'initial_dist': self.initial_dist,
'transition_matrix': self.transition_matrix}
# expectations, marginal_ll, transition_expectations = self.model.e_step(self.event_data, parameters)
expectations, transition_expectations = self.view_latent_states()
parameters = self.model.m_step(self.event_data, expectations, transition_expectations)
self.assertEqual(parameters['mu'].shape, (self.num_states, self.p))
self.assertEqual(parameters['transition_matrix'].shape, (self.num_states, self.num_states))
def view_latent_states(self):
expectations = np.zeros((self.num_periods, self.num_states))
transition_expectations = np.zeros((self.num_states, self.num_states, self.num_periods - 1))
for k in range(self.num_states):
mask = self.latent_states == k
expectations[mask, k] = 1.
expectations = [expectations]
for i in range(self.num_states):
for j in range(self.num_states):
mask = (self.latent_states[1:] == j) & (self.latent_states[:-1] == i)
transition_expectations[i, j, mask] = 1.
transition_expectations = [transition_expectations]
return expectations, transition_expectations
def test_fit_hmm(self):
lls, parameters = self.model.fit(self.event_data)
lls = np.diff(lls)
self.assertTrue((lls > 0).all())
``` |
{
"source": "jilaqi-le-gao/AvalonGameHostAnalysis",
"score": 2
} |
#### File: AvalonHostAnalysis/authlink/views.py
```python
from django.shortcuts import render
from django.middleware.csrf import get_token
from django.http import JsonResponse, HttpResponse
from django.contrib.auth.decorators import login_required
# Create your views here.
def get_csrftoken(request):
token = get_token(request)
return JsonResponse({
'csrftoken': token
})
@login_required
def redirect_test_empty(request):
print(request.user)
if request.user.is_authenticated:
# Do something for authenticated users.
return HttpResponse('You are logged in!')
else:
# Do something for anonymous users.
return HttpResponse('You are NOT logged in!')
def not_logged_view(request):
if request.user.is_authenticated():
print(request.user.get_username())
return HttpResponse('You are not logged in !!!')
```
#### File: AvalonHostAnalysis/AvalonPages/views.py
```python
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
# Create your views here.
@login_required(login_url='/sibyl/login/')
def RecordGamePage(request):
return render(request, 'FRONTEND/GameRecordPage.html', )
@login_required(login_url='/sibyl/login/')
def RecordViewPage(request):
return HttpResponse('In progress')
``` |
{
"source": "jilbertozamorasaa/panda-bigmon-core",
"score": 2
} |
#### File: cachecontroller/schedinstances/DataCarouselMails.py
```python
import threading
import time
import logging
from core.art.artMail import send_mail_art
from core.cachecontroller.BaseTasksProvider import BaseTasksProvider
from core.settings.base import DATA_CAROUSEL_MAIL_DELAY_DAYS, DATA_CARUSEL_MAIL_RECIPIENTS, DATA_CAROUSEL_MAIL_REPEAT
from django.core.cache import cache
mail_template = "templated_email/dataCarouselStagingAlert.html"
max_mail_attempts = 10
class DataCarouselMails(BaseTasksProvider):
lock = threading.RLock()
logger = logging.getLogger(__name__ + ' DataCarouselMails')
def processPayload(self):
self.logger.info("DataCarouselMails started")
try:
query = """SELECT t1.DATASET, t1.STATUS, t1.STAGED_FILES, t1.START_TIME, t1.END_TIME, t1.RSE as RSE, t1.TOTAL_FILES,
t1.UPDATE_TIME, t1.SOURCE_RSE, t2.TASKID, t3.campaign, t3.PR_ID, ROW_NUMBER() OVER(PARTITION BY t1.DATASET_STAGING_ID ORDER BY t1.start_time DESC) AS occurence, (CURRENT_TIMESTAMP-t1.UPDATE_TIME) as UPDATE_TIME, t4.processingtype FROM ATLAS_DEFT.T_DATASET_STAGING t1
INNER join ATLAS_DEFT.T_ACTION_STAGING t2 on t1.DATASET_STAGING_ID=t2.DATASET_STAGING_ID
INNER JOIN ATLAS_DEFT.T_PRODUCTION_TASK t3 on t2.TASKID=t3.TASKID
INNER JOIN ATLAS_PANDA.JEDI_TASKS t4 on t2.TASKID=t4.JEDITASKID where END_TIME is NULL and (t1.STATUS = 'staging') and t1.UPDATE_TIME <= TRUNC(SYSDATE) - {}
""".format(DATA_CAROUSEL_MAIL_DELAY_DAYS)
db = self.pool.acquire()
cursor = db.cursor()
rows = cursor.execute(query)
except Exception as e:
self.logger.error(e)
return -1
for r in rows:
self.logger.debug("DataCarouselMails processes this Rucio Rule: {}".format(r[5]))
data = {"SE":r[8], "RR":r[5], "START_TIME":r[3], "TASKID":r[9], "TOT_FILES": r[6], "STAGED_FILES": r[2], "UPDATE_TIME": r[7]}
self.send_email(data)
self.logger.info("DataCaruselMails finished")
def send_email(self, data):
subject = "Data Carousel Alert for {}".format(data['SE'])
for recipient in DATA_CARUSEL_MAIL_RECIPIENTS:
cache_key = "mail_sent_flag_{RR}_{RECIPIENT}".format(RR=data["RR"], TASKID=data["TASKID"],
RECIPIENT=recipient)
if not cache.get(cache_key, False):
is_sent = False
i = 0
while not is_sent:
i += 1
if i > 1:
time.sleep(10)
is_sent = send_mail_art(mail_template, subject, data, recipient, send_html=True)
self.logger.debug("Email to {} attempted to send with result {}".format(recipient, is_sent))
# put 10 seconds delay to bypass the message rate limit of smtp server
time.sleep(10)
if i >= max_mail_attempts:
break
if is_sent:
cache.set(cache_key, "1", DATA_CAROUSEL_MAIL_REPEAT*24*3600)
```
#### File: core/dashboards/dtcdboard.py
```python
import random, json, math
from datetime import datetime
from django.http import HttpResponse
from django.shortcuts import render_to_response, redirect
from django.db import connection
from django.utils.cache import patch_response_headers
from core.libs.cache import getCacheEntry, setCacheEntry
from core.libs.exlib import dictfetchall
from core.oauth.utils import login_customrequired
from core.views import initRequest, setupView, DateEncoder, setCacheData
from core.common.models import JediTasksOrdered
from core.schedresource.models import Schedconfig
from core.settings.local import dbaccess
import pandas as pd
import numpy as np
from django.views.decorators.cache import never_cache
from django.utils import timezone
from core.schedresource.utils import getCRICSEs
@never_cache
@login_customrequired
def dataCarouselleDashBoard(request):
initRequest(request)
query, wildCardExtension, LAST_N_HOURS_MAX = setupView(request, hours=4, limit=9999999, querytype='task', wildCardExt=True)
request.session['viewParams']['selection'] = ''
data = {
'request': request,
'viewParams': request.session['viewParams'] if 'viewParams' in request.session else None,
}
response = render_to_response('DataTapeCarouselle.html', data, content_type='text/html')
#patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 5)
return response
def getStagingInfoForTask(request):
valid, response = initRequest(request)
data = getStagingData(request)
response = HttpResponse(json.dumps(data, cls=DateEncoder), content_type='application/json')
patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 5)
return response
def getBinnedData(listData, additionalList1 = None, additionalList2 = None):
isTimeNotDelta = True
timesadd1 = None
timesadd2 = None
try:
times = pd.to_datetime(listData)
if additionalList1:
timesadd1 = pd.to_datetime(additionalList1)
if additionalList2:
timesadd2 = pd.to_datetime(additionalList2)
except:
times = pd.to_timedelta(listData)
isTimeNotDelta = False
if additionalList1:
timesadd1 = pd.to_timedelta(additionalList1)
if additionalList2:
timesadd2 = pd.to_timedelta(additionalList2)
#if not timesadd is None:
# mergedIndex = times.union(timesadd)
#else:
# mergedIndex = times
df = pd.DataFrame({
"Count1": [1 for _ in listData]
}, index=times)
if not timesadd1 is None:
dfadd = pd.DataFrame({
"Count2": [1 for _ in additionalList1]
}, index=timesadd1)
result = pd.concat([df, dfadd])
else:
result = df
if not timesadd2 is None:
dfadd = pd.DataFrame({
"Count3": [1 for _ in additionalList2]
}, index=timesadd2)
result = pd.concat([result, dfadd])
grp = result.groupby([pd.Grouper(freq="24h")]).count()
values = grp.values.tolist()
if isTimeNotDelta:
index = grp.index.to_pydatetime().tolist()
else:
index = (grp.index / pd.Timedelta(hours=1)).tolist()
if not additionalList1 is None and len(additionalList1) == 0:
tmpval = []
for item in values:
if additionalList2:
tmpval.append([item[0], 0, item[1]])
else:
tmpval.append([item[0], 0])
values = tmpval
if not additionalList2 is None and len(additionalList2) == 0:
tmpval = []
if len(values) > 1: # temp fix, to be looked closer
for item in values:
tmpval.append([item[0], item[1], 0])
values = tmpval
data = []
for time, count in zip(index, values):
data.append([time, count])
return data
def substitudeRSEbreakdown(rse):
rses = getCRICSEs().get(rse, [])
final_string = ""
for rse in rses:
final_string += "&var-src_endpoint=" + rse
return final_string
@never_cache
def getDTCSubmissionHist(request):
valid, response = initRequest(request)
staginData = getStagingData(request)
timelistSubmitted = []
progressDistribution = []
summarytableDict = {}
selectCampaign = []
selectSource = []
detailsTable = []
timelistIntervalfin = []
timelistIntervalact = []
timelistIntervalqueued = []
for task, dsdata in staginData.items():
epltime = None
timelistSubmitted.append(dsdata['start_time'])
source_rse_breakdown = substitudeRSEbreakdown(dsdata['source_rse'])
dictSE = summarytableDict.get(dsdata['source_rse'], {"source": dsdata['source_rse'], "ds_active":0, "ds_done":0,
"ds_queued":0, "ds_90pdone":0, "files_rem":0, "files_q":0,
"files_done":0, "source_rse_breakdown": source_rse_breakdown})
if dsdata['occurence'] == 1:
dictSE["files_done"] += dsdata['staged_files']
dictSE["files_rem"] += (dsdata['total_files'] - dsdata['staged_files'])
# Build the summary by SEs and create lists for histograms
if dsdata['end_time'] != None:
dictSE["ds_done"]+=1
epltime = dsdata['end_time'] - dsdata['start_time']
timelistIntervalfin.append(epltime)
elif dsdata['status'] != 'queued':
epltime = timezone.now() - dsdata['start_time']
timelistIntervalact.append(epltime)
dictSE["ds_active"]+=1
if dsdata['staged_files'] >= dsdata['total_files']*0.9:
dictSE["ds_90pdone"] += 1
elif dsdata['status'] == 'queued':
dictSE["ds_queued"] += 1
dictSE["files_q"] += (dsdata['total_files'] - dsdata['staged_files'])
epltime = timezone.now() - dsdata['start_time']
timelistIntervalqueued.append(epltime)
progressDistribution.append(dsdata['staged_files'] / dsdata['total_files'])
summarytableDict[dsdata['source_rse']] = dictSE
selectCampaign.append({"name": dsdata['campaign'], "value": dsdata['campaign'], "selected": "0"})
selectSource.append({"name": dsdata['source_rse'], "value": dsdata['source_rse'], "selected": "0"})
detailsTable.append({'campaign': dsdata['campaign'], 'pr_id': dsdata['pr_id'], 'taskid': dsdata['taskid'],
'status': dsdata['status'], 'total_files': dsdata['total_files'],
'staged_files': dsdata['staged_files'],
'progress':
int(round(dsdata['staged_files'] * 100.0 / dsdata['total_files'])),
'source_rse': dsdata['source_rse'], 'elapsedtime': epltime,
'start_time': dsdata['start_time'], 'rse': dsdata['rse'], 'update_time':
dsdata['update_time'], 'update_time_sort': dsdata['update_time_sort'],
'processingtype': dsdata['processingtype']})
#For uniquiness
selectSource = list({v['name']: v for v in selectSource}.values())
selectCampaign = list({v['name']: v for v in selectCampaign}.values())
summarytableList = list(summarytableDict.values())
# timedelta = pd.to_timedelta(timelistIntervalfin)
# timedelta = (timedelta / pd.Timedelta(hours=1))
# arr = [["EplTime"]]
# arr.extend([[x] for x in timedelta.tolist()])
#
# timedelta = pd.to_timedelta(timelistIntervalact)
# timedelta = (timedelta / pd.Timedelta(hours=1))
# #arr1 = [["EplTime"]]
# arr.extend([[x] for x in timedelta.tolist()])
binnedActFinData = getBinnedData(timelistIntervalact, additionalList1 = timelistIntervalfin, additionalList2 = timelistIntervalqueued)
eplTime = [['Time', 'Act. staging', 'Fin. staging', 'Q. staging']] + [[time, data[0], data[1], data[2]] for (time, data) in binnedActFinData]
#, 'Queued staging'
finalvalue = {"epltime": eplTime}
arr = [["Progress"]]
arr.extend([[x*100] for x in progressDistribution])
finalvalue["progress"] = arr
binnedSubmData = getBinnedData(timelistSubmitted)
finalvalue["submittime"] = [['Time', 'Count']] + [[time, data[0]] for (time, data) in binnedSubmData]
finalvalue["progresstable"] = summarytableList
selectTime = [
{"name": "Last 1 hour", "value": "hours1", "selected": "0"},
{"name":"Last 12 hours", "value":"hours12", "selected":"0"},
{"name":"Last day", "value":"hours24", "selected":"0"},
{"name":"Last week","value":"hours168", "selected":"0"},
{"name":"Last month","value":"hours720", "selected":"0"},
{"name": "Last 3 months", "value": "hours2160", "selected": "0"},
{"name": "Last 6 months", "value": "hours4320", "selected": "0"}
]
hours = ""
if 'hours' in request.session['requestParams']:
hours = request.session['requestParams']['hours']
for selectTimeItem in selectTime:
if selectTimeItem["value"] == "hours"+str(hours):
selectTimeItem["selected"] = "1"
break
finalvalue["selectsource"] = selectSource
finalvalue["selecttime"] = selectTime
finalvalue["selectcampaign"] = selectCampaign
finalvalue["detailstable"] = detailsTable
response = HttpResponse(json.dumps(finalvalue, cls=DateEncoder), content_type='application/json')
return response
def getStagingData(request):
query, wildCardExtension, LAST_N_HOURS_MAX = setupView(request, wildCardExt=True)
timewindow = query['modificationtime__castdate__range']
if 'source' in request.GET:
source = request.GET['source']
else:
source = None
if 'destination' in request.GET:
destination = request.GET['destination']
else:
destination = None
if 'campaign' in request.GET:
campaign = request.GET['campaign']
else:
campaign = None
data = {}
if dbaccess['default']['ENGINE'].find('oracle') >= 0:
tmpTableName = "ATLAS_PANDABIGMON.TMP_IDS1"
else:
tmpTableName = "TMP_IDS1"
new_cur = connection.cursor()
selection = "where 1=1 "
jeditaskid = None
if 'jeditaskid' in request.session['requestParams']:
jeditaskid = request.session['requestParams']['jeditaskid']
taskl = [int(jeditaskid)] if '|' not in jeditaskid else [int(taskid) for taskid in jeditaskid.split('|')]
new_cur = connection.cursor()
transactionKey = random.randrange(1000000)
executionData = []
for id in taskl:
executionData.append((id, transactionKey))
query = """INSERT INTO """ + tmpTableName + """(ID,TRANSACTIONKEY) VALUES (%s, %s)"""
new_cur.executemany(query, executionData)
connection.commit()
selection += "and t2.taskid in (SELECT tmp.id FROM %s tmp where TRANSACTIONKEY=%i)" % (tmpTableName, transactionKey)
else:
selection += "and t2.TASKID in (select taskid from ATLAS_DEFT.T_ACTION_STAGING)"
if source:
sourcel = [source] if ',' not in source else [SE for SE in source.split(',')]
selection += " AND t1.SOURCE_RSE in (" + ','.join('\''+str(x)+'\'' for x in sourcel) + ")"
if campaign:
campaignl = [campaign] if ',' not in campaign else [camp for camp in campaign.split(',')]
selection += " AND t3.campaign in (" + ','.join('\''+str(x)+'\'' for x in campaignl) + ")"
if not jeditaskid:
selection += " AND not (NVL(t4.ENDTIME, CURRENT_TIMESTAMP) < t1.start_time) AND (END_TIME BETWEEN TO_DATE(\'%s\','YYYY-mm-dd HH24:MI:SS') and TO_DATE(\'%s\','YYYY-mm-dd HH24:MI:SS') or (END_TIME is NULL and not (t1.STATUS = 'done')))" \
% (timewindow[0], timewindow[1])
new_cur.execute(
"""
SELECT t1.DATASET, t1.STATUS, t1.STAGED_FILES, t1.START_TIME, t1.END_TIME, t1.RSE as RSE, t1.TOTAL_FILES,
t1.UPDATE_TIME, t1.SOURCE_RSE, t2.TASKID, t3.campaign, t3.PR_ID, ROW_NUMBER() OVER(PARTITION BY t1.DATASET_STAGING_ID ORDER BY t1.start_time DESC) AS occurence, (CURRENT_TIMESTAMP-t1.UPDATE_TIME) as UPDATE_TIME, t4.processingtype FROM ATLAS_DEFT.T_DATASET_STAGING t1
INNER join ATLAS_DEFT.T_ACTION_STAGING t2 on t1.DATASET_STAGING_ID=t2.DATASET_STAGING_ID
INNER JOIN ATLAS_DEFT.T_PRODUCTION_TASK t3 on t2.TASKID=t3.TASKID
INNER JOIN ATLAS_PANDA.JEDI_TASKS t4 on t2.TASKID=t4.JEDITASKID %s
""" % selection
)
datasets = dictfetchall(new_cur)
for dataset in datasets:
# Sort out requests by request on February 19, 2020
if dataset['STATUS'] in ('staging', 'queued', 'done'):
dataset = {k.lower(): v for k, v in dataset.items()}
if dataset.get('update_time'):
dataset['update_time_sort'] = int(dataset['update_time'].total_seconds())
else:
dataset['update_time_sort'] = None
data[dataset['taskid']] = dataset
return data
```
#### File: core/dashboards/dtctails.py
```python
import pandas as pd
from matplotlib import pyplot as plt
import urllib.request as urllibr
from urllib.error import HTTPError
import json
import datetime
import numpy as np
import os
from sklearn.preprocessing import scale
from core.views import initRequest, setupView, DateEncoder, setCacheData
from django.shortcuts import render_to_response
from django.views.decorators.cache import never_cache
from core.oauth.utils import login_customrequired
from django.db import connection
from core.libs.exlib import dictfetchall
from core.settings.local import dbaccess
import random
import cx_Oracle
from django.http import JsonResponse
from django.core.cache import cache
from django.utils.six.moves import cPickle as pickle
import logging
_logger = logging.getLogger('bigpandamon')
BASE_STAGE_INFO_URL = 'https://bigpanda.cern.ch/staginprogress/?jeditaskid='
#BASE_STAGE_INFO_URL = 'http://aipanda163.cern.ch:8000/staginprogress/?jeditaskid='
@never_cache
@login_customrequired
def dataCarouselTailsDashBoard(request):
initRequest(request)
query, wildCardExtension, LAST_N_HOURS_MAX = setupView(request, hours=4, limit=9999999, querytype='task', wildCardExt=True)
request.session['viewParams']['selection'] = ''
data = {
'request': request,
'viewParams': request.session['viewParams'] if 'viewParams' in request.session else None,
}
response = render_to_response('DataTapeCaruselTails.html', data, content_type='text/html')
#patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 5)
return response
def getListOfTapeSE():
listOfSE = []
selection = """
"""
def getStagingDatasets(timewindow, source):
selection = """
select tbig.DATASET, tbig.STATUS, tbig.STAGED_FILES, tbig.START_TIME, tbig.END_TIME, tbig.RRULE, tbig.TOTAL_FILES, tbig.SOURCE_RSE, tbig.TASKID, NULL as PROGRESS_RETRIEVED, NULL as PROGRESS_DATA from (
SELECT t1.DATASET, t1.STATUS, t1.STAGED_FILES, t1.START_TIME, t1.END_TIME, t1.RSE as RRULE, t1.TOTAL_FILES,
t1.SOURCE_RSE, t2.TASKID, ROW_NUMBER() OVER(PARTITION BY t1.DATASET_STAGING_ID ORDER BY t1.start_time DESC) AS occurence FROM ATLAS_DEFT.T_DATASET_STAGING t1
INNER join ATLAS_DEFT.T_ACTION_STAGING t2 on t1.DATASET_STAGING_ID=t2.DATASET_STAGING_ID
INNER JOIN ATLAS_DEFT.T_PRODUCTION_TASK t3 on t2.TASKID=t3.TASKID
order by t1.START_TIME desc
)tbig
LEFT OUTER JOIN ATLAS_PANDABIGMON.DATACAR_ST_PROGRESS_ARCH arch on arch.RRULE=tbig.RRULE
where occurence=1 and tbig.RRULE is not NULL
"""
selection += " AND (tbig.END_TIME BETWEEN TO_DATE(\'%s\','YYYY-mm-dd HH24:MI:SS') and TO_DATE(\'%s\','YYYY-mm-dd HH24:MI:SS') or (tbig.END_TIME is NULL and not (tbig.STATUS in ('done', 'cancelled'))))" \
% (timewindow[0], timewindow[1])
cursor = connection.cursor()
cursor.execute(selection)
datasets = dictfetchall(cursor)
datasets_dict = {}
for ds in datasets:
datasets_dict.setdefault(ds["SOURCE_RSE"], []).append(ds)
return datasets_dict
def OutputTypeHandler(cursor, name, defaultType, size, precision, scale):
if defaultType == cx_Oracle.LOB:
return cursor.var(cx_Oracle.LONG_STRING, arraysize = cursor.arraysize)
if defaultType == cx_Oracle.CLOB:
return cursor.var(cx_Oracle.LONG_STRING, arraysize = cursor.arraysize)
elif defaultType == cx_Oracle.BLOB:
return cursor.var(cx_Oracle.LONG_BINARY, arraysize = cursor.arraysize)
def transform_into_eq_intervals(in_series, name):
df = pd.Series(in_series, name=name)
df = df.resample('15Min').mean()
df.interpolate(method='linear', limit_direction='forward', inplace=True)
df.index = df.index - df.index[0]
return df
def retreiveStagingStatistics(SEs, taskstoingnore):
cursor=connection.cursor()
SEsStr = ','.join('\''+SE+'\'' for SE in SEs)
query = """select * from (
select START_TIME, PROGRESS_DATA, TOTAL_FILES, RRULE, TASKID, SOURCE_RSE, row_number() over (PARTITION BY SOURCE_RSE order by START_TIME desc) as rn from atlas_pandabigmon.DATACAR_ST_PROGRESS_ARCH
where PROGRESS_RETRIEVED=1 and SOURCE_RSE in (%s)) where rn <= 15""" % (SEsStr)
cursor.execute(query)
data = {}
tasks_to_rucio = {}
for row in cursor:
if row[4] not in taskstoingnore:
intermediate_row = patch_start_time(row)
intermediate_row = transform_into_eq_intervals(intermediate_row, str(row[4]))
data.setdefault(row[5], []).append(intermediate_row)
tasks_to_rucio[row[4]] = row[3]
return data, tasks_to_rucio
def getStaginProgress(taskid):
response = None
try:
req = urllibr.Request(BASE_STAGE_INFO_URL + taskid)
response = urllibr.urlopen(req, timeout=180).read()
response = json.loads(response)
except Exception or HTTPError as e:
_logger.error(e)
return response
def patch_start_time(dbrow):
dformat = "%Y-%m-%d %H:%M:%S"
start = dbrow[0].strftime(dformat)
if isinstance(dbrow[1], cx_Oracle.LOB):
serie=json.loads(dbrow[1].read())
else:
serie=dbrow[1]
if len(serie) > 0:
serie[0] = [start, 0]
serie_dict = {}
for row in serie:
row[0] = datetime.datetime.strptime(row[0],dformat)
serie_dict[row[0]] = row[1]/100.0*dbrow[2]
return serie_dict
def getCachedProgress(se, taskid):
serialized_progress = cache.get('serialized_staging_progress' + se + "_" + str(taskid))
if serialized_progress:
return pickle.loads(serialized_progress)
else:
return None
def setCachedProgress(se, taskid, stagestatus, progress):
progress = pickle.dumps(progress)
timeout = 3600
if stagestatus == 'done':
timeout = 3600 * 24 * 30 * 6
cache.set('serialized_staging_progress' + se + "_" + str(taskid), progress, timeout)
def getOutliers(datasets_dict, stageStat, tasks_to_rucio):
output = {}
output_table = {}
basicstat = None
for se, datasets in datasets_dict.items():
basicstat = stageStat.get(se, [])
for ds in datasets:
progress_info = getCachedProgress(se, ds['TASKID'])
if not progress_info:
progress_info = getStaginProgress(str(ds['TASKID']))
if progress_info:
setCachedProgress(se, ds['TASKID'], ds['STATUS'], progress_info)
if progress_info:
progress_info = patch_start_time((ds['START_TIME'], progress_info, ds['TOTAL_FILES']))
progress_info = transform_into_eq_intervals(progress_info, str(ds['TASKID']))
basicstat.append(progress_info)
tasks_to_rucio[ds['TASKID']] = ds['RRULE']
if basicstat:
datamerged = pd.concat([s for s in basicstat], axis=1)
zscore = datamerged.copy(deep=True)
zscore = zscore.apply(lambda V: scale(V,axis=0,with_mean=True, with_std=True,copy=False),axis=1)
zscore_df = pd.DataFrame.from_dict(dict(zip(zscore.index, zscore.values))).T
outliers = ((zscore_df< -1.5).any().values)
datamerged = datamerged.fillna("_")
list_of_val = datamerged.values.tolist()
timeticks = (datamerged.index / np.timedelta64(1, 'h')).tolist()
for i in range(len(timeticks)):
list_of_val[i] = [timeticks[i]] + list_of_val[i]
tasksids = datamerged.columns.values.tolist()
report = {}
report['series'] = [["Time"]+tasksids] + list_of_val
report['tasksids'] = tasksids
report['outliers'] = outliers.tolist()
output[se] = report
if len(list(filter(lambda x: x, report['outliers']))) > 0:
outliers_tasks_rucio = [(tasksids[idx], tasks_to_rucio.get(int(tasksids[idx]), None)) for idx, state in enumerate(report['outliers']) if state]
output_table.setdefault(se, []).extend(outliers_tasks_rucio)
return {'plotsdata':output, 'tasks_rucio':output_table}
def extractTasksIds(datasets):
tasksIDs = []
for se,datasets in datasets.items():
for dataset in datasets:
tasksIDs.append(dataset["TASKID"])
return tasksIDs
def getStagingTailsData(request):
initRequest(request)
query, wildCardExtension, LAST_N_HOURS_MAX = setupView(request, wildCardExt=True)
timewindow = query['modificationtime__castdate__range']
if 'source' in request.GET:
source = request.GET['source']
else:
source = None
datasets = getStagingDatasets(timewindow, source)
tasks = extractTasksIds(datasets)
setOfSEs = datasets.keys()
outliers = None
if len(setOfSEs) > 0:
stageStat, tasks_to_rucio = retreiveStagingStatistics(setOfSEs, taskstoingnore=tasks)
outliers = getOutliers(datasets, stageStat, tasks_to_rucio)
return JsonResponse(outliers, safe=False)
```
#### File: panda-bigmon-core/core/ErrorCodes.py
```python
import logging
class ErrorCodes:
errorFields = ('pilot','exe','sup','ddm','brokerage','jobdispatcher','taskbuffer')
errorCodes = {}
errorStages = {}
def __init__(self):
for f in self.errorFields:
self.errorCodes['%serrorcode'%f] = {}
self.errorStages['%serrorcode'%f] = {}
## Panda errors can be found at https://twiki.cern.ch/twiki/bin/view/Atlas/PandaErrorCodes
self.errorCodes['ddmerrorcode'][100] = 'DQ2 server error'
self.errorStages['ddmerrorcode'][100] = 'ddm-start'
self.errorCodes['ddmerrorcode'][200] = 'Could not add output files to dataset'
self.errorStages['ddmerrorcode'][200] = 'ddm-end'
self.errorCodes['ddmerrorcode'][201] = 'Panda server failed to register subscription in DQ2'
self.errorStages['ddmerrorcode'][201] = 'ddm-end'
self.errorCodes['jobdispatchererrorcode'][100] = 'Lost heartbeat'
self.errorStages['jobdispatchererrorcode'][100] = 'time-during'
self.errorCodes['jobdispatchererrorcode'][101] = 'Job recovery failed for three days'
self.errorStages['jobdispatchererrorcode'][101] = 'time-during'
self.errorCodes['jobdispatchererrorcode'][102] = 'No reply to sent job'
self.errorStages['jobdispatchererrorcode'][102] = 'time-during'
self.errorCodes['taskbuffererrorcode'][100] = 'Job expired and killed three days after submission (or killed by user)'
self.errorStages['taskbuffererrorcode'][100] = 'user-during'
self.errorCodes['taskbuffererrorcode'][101] = 'transfer timeout (2weeks)'
self.errorStages['taskbuffererrorcode'][101] = 'time-end'
self.errorCodes['taskbuffererrorcode'][102] = 'Expired three days after submission'
self.errorStages['taskbuffererrorcode'][102] = 'time-end'
self.errorCodes['taskbuffererrorcode'][103] = 'Aborted by executor interface'
self.errorStages['taskbuffererrorcode'][103] = 'panda-during'
self.errorCodes['taskbuffererrorcode'][104] = 'Waiting job timed out'
self.errorStages['taskbuffererrorcode'][104] = 'panda-during'
self.errorCodes['taskbuffererrorcode'][105] = 'Reassigned by rebrokeage'
self.errorStages['taskbuffererrorcode'][105] = 'panda-during'
self.errorCodes['taskbuffererrorcode'][106] = 'Reassigned by server-side retry'
self.errorStages['taskbuffererrorcode'][106] = 'panda-during'
self.errorCodes['taskbuffererrorcode'][107] = 'Retried by pilot'
self.errorStages['taskbuffererrorcode'][107] = 'panda-during'
self.errorCodes['taskbuffererrorcode'][110] = 'Input file lost in SE'
self.errorStages['taskbuffererrorcode'][110] = 'panda-during'
self.errorCodes['piloterrorcode'][1008] = 'General pilot error, consult batch log'
self.errorStages['piloterrorcode'][1008] = 'ddm-start'
self.errorCodes['piloterrorcode'][1097] = 'Get function can not be called for staging input file'
self.errorStages['piloterrorcode'][1097] = 'ddm-start'
self.errorCodes['piloterrorcode'][1098] = 'No space left on local disk'
self.errorStages['piloterrorcode'][1098] = 'athena-during'
self.errorCodes['piloterrorcode'][1099] = 'Get error: Staging input file failed'
self.errorStages['piloterrorcode'][1099] = 'ddm-start'
self.errorCodes['piloterrorcode'][1100] = 'Get error: Replica not found'
self.errorStages['piloterrorcode'][1100] = 'ddm-start'
self.errorCodes['piloterrorcode'][1101] = 'LRC registration error: Connection refused'
self.errorStages['piloterrorcode'][1101] = 'ddm-end'
self.errorCodes['piloterrorcode'][1102] = 'Expected output file does not exist'
self.errorStages['piloterrorcode'][1102] = 'athena-end'
self.errorCodes['piloterrorcode'][1103] = 'No such file or directory'
self.errorStages['piloterrorcode'][1103] = 'ddm-start'
self.errorCodes['piloterrorcode'][1104] = 'User work directory too large'
self.errorStages['piloterrorcode'][1104] = 'user-during'
self.errorCodes['piloterrorcode'][1105] = 'Put error: Failed to add file size and checksum to LFC'
self.errorStages['piloterrorcode'][1105] = 'ddm-end'
self.errorCodes['piloterrorcode'][1106] = 'Payload stdout file too big'
self.errorStages['piloterrorcode'][1106] = 'user-during'
self.errorCodes['piloterrorcode'][1107] = 'Get error: Missing DBRelease file'
self.errorStages['piloterrorcode'][1107] = 'ddm-start'
self.errorCodes['piloterrorcode'][1108] = 'Put error: LCG registration failed'
self.errorStages['piloterrorcode'][1108] = 'ddm-end'
self.errorCodes['piloterrorcode'][1109] = 'Required CMTCONFIG incompatible with WN'
self.errorStages['piloterrorcode'][1109] = 'ddm-start'
self.errorCodes['piloterrorcode'][1110] = 'Failed during setup'
self.errorStages['piloterrorcode'][1110] = 'ddm-start'
self.errorCodes['piloterrorcode'][1111] = 'Exception caught by runJob'
self.errorStages['piloterrorcode'][1111] = 'ddm-start'
self.errorCodes['piloterrorcode'][1112] = 'Exception caught by pilot'
self.errorStages['piloterrorcode'][1112] = 'ddm-start'
self.errorCodes['piloterrorcode'][1113] = 'Get error: Failed to import LFC python module'
self.errorStages['piloterrorcode'][1113] = 'ddm-start'
self.errorCodes['piloterrorcode'][1114] = 'Put error: Failed to import LFC python module'
self.errorStages['piloterrorcode'][1114] = 'ddm-end'
self.errorCodes['piloterrorcode'][1115] = 'NFS SQLite locking problems'
self.errorStages['piloterrorcode'][1115] = 'athena-end'
self.errorCodes['piloterrorcode'][1116] = 'Pilot could not download queuedata'
self.errorStages['piloterrorcode'][1116] = 'ddm-start'
self.errorCodes['piloterrorcode'][1117] = 'Pilot found non-valid queuedata'
self.errorStages['piloterrorcode'][1117] = 'ddm-start'
self.errorCodes['piloterrorcode'][1118] = 'Pilot could not curl space report'
self.errorStages['piloterrorcode'][1118] = 'ddm-start'
self.errorCodes['piloterrorcode'][1119] = 'Pilot aborted due to DDM space shortage'
self.errorStages['piloterrorcode'][1119] = 'ddm-start'
self.errorCodes['piloterrorcode'][1120] = 'Space token descriptor does not match destination path'
self.errorStages['piloterrorcode'][1120] = 'ddm-end'
self.errorCodes['piloterrorcode'][1121] = 'Can not read the xml file for registering output files to dispatcher'
self.errorStages['piloterrorcode'][1121] = 'athena-end'
self.errorCodes['piloterrorcode'][1122] = 'Bad replica entry returned by lfc_getreplicas(): SFN not set in LFC for this guid'
self.errorStages['piloterrorcode'][1122] = 'ddm-start'
self.errorCodes['piloterrorcode'][1123] = 'Missing guid in output file list'
self.errorStages['piloterrorcode'][1123] = 'ddm-end'
self.errorCodes['piloterrorcode'][1124] = 'Output file too large'
self.errorStages['piloterrorcode'][1124] = 'athena-during'
self.errorCodes['piloterrorcode'][1130] = 'Get error: Failed to get POOL file catalog'
self.errorStages['piloterrorcode'][1130] = 'ddm-start'
self.errorCodes['piloterrorcode'][1131] = 'Put function can not be called for staging out'
self.errorStages['piloterrorcode'][1131] = 'ddm-end'
self.errorCodes['piloterrorcode'][1132] = 'LRC registration error (consult log file)'
self.errorStages['piloterrorcode'][1132] = 'ddm-end'
self.errorCodes['piloterrorcode'][1133] = 'Put error: Fetching default storage URL failed'
self.errorStages['piloterrorcode'][1133] = 'ddm-end'
self.errorCodes['piloterrorcode'][1134] = 'Put error: Error in mkdir on localSE, not allowed or no available space'
self.errorStages['piloterrorcode'][1134] = 'ddm-end'
self.errorCodes['piloterrorcode'][1135] = 'Could not get file size in job workdir'
self.errorStages['piloterrorcode'][1135] = 'ddm-end'
self.errorCodes['piloterrorcode'][1136] = 'Put error: Error running md5sum to the file in job workdir'
self.errorStages['piloterrorcode'][1136] = 'ddm-end'
self.errorCodes['piloterrorcode'][1137] = 'Put error: Error in copying the file from job workdir to localSE'
self.errorStages['piloterrorcode'][1137] = 'ddm-end'
self.errorCodes['piloterrorcode'][1138] = 'Put error: could not get the file size on localSE'
self.errorStages['piloterrorcode'][1138] = 'ddm-end'
self.errorCodes['piloterrorcode'][1139] = 'Put error: Problem with copying from job workdir to local SE: size mismatch'
self.errorStages['piloterrorcode'][1139] = 'ddm-end'
self.errorCodes['piloterrorcode'][1140] = 'Put error: Error running md5sum to the file on localSE'
self.errorStages['piloterrorcode'][1140] = 'ddm-end'
self.errorCodes['piloterrorcode'][1141] = 'Put error: Problem with copying from job workdir to local SE: md5sum mismatch'
self.errorStages['piloterrorcode'][1141] = 'ddm-end'
self.errorCodes['piloterrorcode'][1142] = 'Put error: failed to register the file on local SE'
self.errorStages['piloterrorcode'][1142] = 'ddm-end'
self.errorCodes['piloterrorcode'][1143] = 'Failed to chmod trf'
self.errorStages['piloterrorcode'][1143] = 'ddm-start'
self.errorCodes['piloterrorcode'][1144] = 'Job killed by panda server'
self.errorStages['piloterrorcode'][1144] = 'user-during'
self.errorCodes['piloterrorcode'][1145] = 'Get error: md5sum mismatch on input file'
self.errorStages['piloterrorcode'][1145] = 'ddm-start'
self.errorCodes['piloterrorcode'][1146] = 'Trf installation dir does not exist and could not be installed'
self.errorStages['piloterrorcode'][1146] = 'ddm-start'
self.errorCodes['piloterrorcode'][1147] = 'Put error: dccp returned readOnly'
self.errorStages['piloterrorcode'][1147] = 'ddm-end'
self.errorCodes['piloterrorcode'][1148] = 'Put error: Failed to remove readOnly file in dCache'
self.errorStages['piloterrorcode'][1148] = 'ddm-end'
self.errorCodes['piloterrorcode'][1149] = 'wget command failed to download trf'
self.errorStages['piloterrorcode'][1149] = 'ddm-start'
self.errorCodes['piloterrorcode'][1150] = 'Looping job killed by pilot'
self.errorStages['piloterrorcode'][1150] = 'athena-end'
self.errorCodes['piloterrorcode'][1151] = 'Get error: Input file staging timed out'
self.errorStages['piloterrorcode'][1151] = 'ddm-start'
self.errorCodes['piloterrorcode'][1152] = 'Put error: File copy timed out'
self.errorStages['piloterrorcode'][1152] = 'ddm-end'
self.errorCodes['piloterrorcode'][1153] = 'Lost job was not finished'
self.errorStages['piloterrorcode'][1153] = 'athena-end'
self.errorCodes['piloterrorcode'][1154] = 'Failed to register log file'
self.errorStages['piloterrorcode'][1154] = 'athena-end'
self.errorCodes['piloterrorcode'][1155] = 'Failed to move output files for lost job'
self.errorStages['piloterrorcode'][1155] = 'athena-end'
self.errorCodes['piloterrorcode'][1156] = 'Pilot could not recover job'
self.errorStages['piloterrorcode'][1156] = 'athena-end'
self.errorCodes['piloterrorcode'][1157] = 'Could not create log file'
self.errorStages['piloterrorcode'][1157] = 'athena-end'
self.errorCodes['piloterrorcode'][1158] = 'Reached maximum number of recovery attempts'
self.errorStages['piloterrorcode'][1158] = 'athena-end'
self.errorCodes['piloterrorcode'][1159] = 'Job recovery could not read PoolFileCatalog.xml file (guids lost)'
self.errorStages['piloterrorcode'][1159] = 'athena-end'
self.errorCodes['piloterrorcode'][1160] = 'LRC registration error: file name string limit exceeded 250'
self.errorStages['piloterrorcode'][1160] = 'ddm-end'
self.errorCodes['piloterrorcode'][1161] = 'Job recovery could not generate xml for remaining output files'
self.errorStages['piloterrorcode'][1161] = 'athena-end'
self.errorCodes['piloterrorcode'][1162] = 'LRC registration error: Non-unique LFN'
self.errorStages['piloterrorcode'][1162] = 'ddm-end'
self.errorCodes['piloterrorcode'][1163] = 'Grid proxy not valid'
self.errorStages['piloterrorcode'][1163] = 'ddm-start'
self.errorCodes['piloterrorcode'][1164] = 'Get error: Local input file missing'
self.errorStages['piloterrorcode'][1164] = 'ddm-start'
self.errorCodes['piloterrorcode'][1165] = 'Put error: Local output file missing'
self.errorStages['piloterrorcode'][1165] = 'ddm-end'
self.errorCodes['piloterrorcode'][1166] = 'Put error: File copy broken by SIGPIPE'
self.errorStages['piloterrorcode'][1166] = 'ddm-end'
self.errorCodes['piloterrorcode'][1167] = 'Get error: Input file missing in PoolFileCatalog.xml'
self.errorStages['piloterrorcode'][1167] = 'ddm-start'
self.errorCodes['piloterrorcode'][1168] = 'Get error: Total file size too large'
self.errorStages['piloterrorcode'][1168] = 'user-start'
self.errorCodes['piloterrorcode'][1169] = 'Put error: LFC registration failed'
self.errorStages['piloterrorcode'][1169] = 'ddm-end'
self.errorCodes['piloterrorcode'][1170] = 'Error running adler32 on the file in job workdir'
self.errorStages['piloterrorcode'][1170] = 'ddm-start'
self.errorCodes['piloterrorcode'][1171] = 'Get error: adler32 mismatch on input file'
self.errorStages['piloterrorcode'][1171] = 'ddm-start'
self.errorCodes['piloterrorcode'][1172] = 'Put error: Problem with copying from job workdir to local SE: adler32 mismatch'
self.errorStages['piloterrorcode'][1172] = 'ddm-end'
self.errorCodes['piloterrorcode'][1173] = 'PandaMover staging error: File is not cached'
self.errorStages['piloterrorcode'][1173] = 'athena-end'
self.errorCodes['piloterrorcode'][1174] = 'PandaMover transfer failure'
self.errorStages['piloterrorcode'][1174] = 'athena-end'
self.errorCodes['piloterrorcode'][1175] = 'Get error: Problem with copying from local SE to job workdir: size mismatch'
self.errorStages['piloterrorcode'][1175] = 'ddm-start'
self.errorCodes['piloterrorcode'][1176] = 'Pilot has no child processes (job wrapper has either crashed or did not send final status)'
self.errorStages['piloterrorcode'][1176] = 'panda-end'
self.errorCodes['piloterrorcode'][1177] = 'Voms proxy not valid'
self.errorStages['piloterrorcode'][1177] = 'ddm-start'
self.errorCodes['piloterrorcode'][1178] = 'Get error: No input files are staged'
self.errorStages['piloterrorcode'][1178] = 'ddm-start'
self.errorCodes['piloterrorcode'][1179] = 'Get error: Failed to get LFC replicas'
self.errorStages['piloterrorcode'][1179] = 'ddm-start'
self.errorCodes['piloterrorcode'][1180] = 'Get error: Globus system error'
self.errorStages['piloterrorcode'][1180] = 'ddm-start'
self.errorCodes['piloterrorcode'][1181] = 'Put error: Globus system error'
self.errorStages['piloterrorcode'][1181] = 'ddm-end'
self.errorCodes['piloterrorcode'][1182] = 'Get error: Failed to get LFC replica'
self.errorStages['piloterrorcode'][1182] = 'ddm-start'
self.errorCodes['piloterrorcode'][1183] = 'LRC registration error: Guid-metadata entry already exists'
self.errorStages['piloterrorcode'][1183] = 'ddm-end'
self.errorCodes['piloterrorcode'][1184] = 'Put error: PoolFileCatalog could not be found in workdir'
self.errorStages['piloterrorcode'][1184] = 'ddm-end'
self.errorCodes['piloterrorcode'][1185] = 'Put error: Error running adler32 on the file in job workdir'
self.errorStages['piloterrorcode'][1185] = 'ddm-end'
self.errorCodes['piloterrorcode'][1186] = 'Software directory does not exist'
self.errorStages['piloterrorcode'][1186] = 'panda-start'
self.errorCodes['piloterrorcode'][1187] = 'Athena metadata is not available'
self.errorStages['piloterrorcode'][1187] = 'athena-end'
self.errorCodes['piloterrorcode'][1188] = 'lcg-getturls failed'
self.errorStages['piloterrorcode'][1188] = 'panda-during'
self.errorCodes['piloterrorcode'][1189] = 'lcg-getturls was timed-out'
self.errorStages['piloterrorcode'][1189] = 'panda-during'
self.errorCodes['piloterrorcode'][1190] = 'LFN too long (exceeding limit of 150 characters)'
self.errorStages['piloterrorcode'][1190] = 'panda-during'
self.errorCodes['piloterrorcode'][1191] = 'Illegal zero file size'
self.errorStages['piloterrorcode'][1191] = 'ddm-end'
self.errorCodes['piloterrorcode'][1192] = 'DBRelease file has not been transferred yet'
self.errorStages['piloterrorcode'][1192] = 'ddm-start'
self.errorCodes['piloterrorcode'][1194] = 'File verification failed'
self.errorStages['piloterrorcode'][1194] = 'panda-during'
self.errorCodes['piloterrorcode'][1195] = 'Command timed out'
self.errorStages['piloterrorcode'][1195] = 'panda-during'
self.errorCodes['piloterrorcode'][1198] = 'Can not check the child process status from the heartbeat process'
self.errorStages['piloterrorcode'][1198] = 'panda-during'
self.errorCodes['piloterrorcode'][1199] = 'Could not create the directory'
self.errorStages['piloterrorcode'][1199] = 'panda-start'
self.errorCodes['piloterrorcode'][1200] = 'Job terminated by unknown kill signal'
self.errorStages['piloterrorcode'][1200] = 'panda-during'
self.errorCodes['piloterrorcode'][1201] = 'Job killed from the batch system: SIGTERM'
self.errorStages['piloterrorcode'][1201] = 'panda-during'
self.errorCodes['piloterrorcode'][1202] = 'Job killed from the batch system: SIGQUIT'
self.errorStages['piloterrorcode'][1202] = 'panda-during'
self.errorCodes['piloterrorcode'][1203] = 'Job killed from the batch system: SIGSEGV'
self.errorStages['piloterrorcode'][1203] = 'panda-during'
self.errorCodes['piloterrorcode'][1204] = 'Job killed from the batch system: SIGXCPU'
self.errorStages['piloterrorcode'][1204] = 'panda-during'
self.errorCodes['piloterrorcode'][1205] = 'Job killed by user'
self.errorStages['piloterrorcode'][1205] = 'user-during'
self.errorCodes['piloterrorcode'][1210] = 'No athena output'
self.errorStages['piloterrorcode'][1210] = 'athena-end'
self.errorCodes['piloterrorcode'][1211] = 'Missing installation'
self.errorStages['piloterrorcode'][1211] = 'panda-end'
self.errorCodes['piloterrorcode'][1212] = 'Athena ran out of memory'
self.errorStages['piloterrorcode'][1212] = 'athena-during'
self.errorCodes['piloterrorcode'][1220] = 'Job failed due to unknown reason (consult log file)'
self.errorStages['piloterrorcode'][1220] = 'athena-end'
self.errorCodes['piloterrorcode'][1187] = 'Payload metadata is not available'
#self.errorStages['piloterrorcode'][1187] = 'athena-end'
self.errorCodes['exeerrorcode'][99] = 'Transformation error code mismatch'
self.errorStages['exeerrorcode'][99] = 'athena-end'
self.errorCodes['exeerrorcode'][100] = 'Transformation not found in run directory'
self.errorStages['exeerrorcode'][100] = 'ddm-start'
for code in range ( 1000, 2000 ):
#try:
if code in self.errorCodes['piloterrorcode']:
self.errorCodes['exeerrorcode'][code] = self.errorCodes['piloterrorcode'][code]
if code in self.errorStages['piloterrorcode']:
self.errorStages['exeerrorcode'][code] = self.errorStages['piloterrorcode'][code]
#except Exception as e:
# print e.__class__, e.__doc__, e.message, 'bigpanda_logstash'
#logging.error(e)
# errors at http://alxr.usatlas.bnl.gov/lxr/source/atlas/Tools/PyJobTransformsCore/share/atlas_error_categories.db?v=current
self.errorCodes['exeerrorcode'][60000] = 'segmentation violation'
self.errorStages['exeerrorcode'][60000] = 'athena-during'
self.errorCodes['exeerrorcode'][60010] = 'segmentation fault'
self.errorStages['exeerrorcode'][60010] = 'athena-during'
self.errorCodes['exeerrorcode'][60100] = 'CBNT_Athena::execute() error'
self.errorStages['exeerrorcode'][60100] = 'athena-during'
self.errorCodes['exeerrorcode'][60101] = 'TRTDigitization::execute() error'
self.errorStages['exeerrorcode'][60101] = 'athena-during'
self.errorCodes['exeerrorcode'][60200] = 'egammaShowerShape: Cluster is neither in Barrel nor in Endcap cannot calculate ShowerShape'
self.errorStages['exeerrorcode'][60200] = 'athena-during'
self.errorCodes['exeerrorcode'][60201] = 'LArEMECEnergyCorrection::CalculateChargeCollection error'
self.errorStages['exeerrorcode'][60201] = 'athena-during'
self.errorCodes['exeerrorcode'][60600] = 'Transform Keyboard interrupt'
self.errorStages['exeerrorcode'][60600] = 'athena-during'
self.errorCodes['exeerrorcode'][60701] = 'CBNT_Audit could not allocate memory'
self.errorStages['exeerrorcode'][60701] = 'athena-during'
self.errorCodes['exeerrorcode'][61000] = 'ApplicationMgr Failed to load modules'
self.errorStages['exeerrorcode'][61000] = 'athena-during'
self.errorCodes['exeerrorcode'][61010] = 'DllClassManager Could not load module'
self.errorStages['exeerrorcode'][61010] = 'athena-during'
self.errorCodes['exeerrorcode'][61020] = 'DllClassManager Entry point failure in module'
self.errorStages['exeerrorcode'][61020] = 'athena-during'
self.errorCodes['exeerrorcode'][61100] = 'EventLoopMgr Unable to initialize Algorithm'
self.errorStages['exeerrorcode'][61100] = 'athena-during'
self.errorCodes['exeerrorcode'][61200] = 'ServiceManager Unable to initialize Service'
self.errorStages['exeerrorcode'][61200] = 'athena-during'
self.errorCodes['exeerrorcode'][62000] = 'ServiceManager Finalization of service AthenaSealSvc failed'
self.errorStages['exeerrorcode'][62000] = 'athena-during'
self.errorCodes['exeerrorcode'][62100] = 'pixelRoI service_i: can not locate service DetectorStore'
self.errorStages['exeerrorcode'][62100] = 'athena-during'
self.errorCodes['exeerrorcode'][62200] = 'pool::PersistencySvc::UserDatabase::connectForRead: PFN is not existing in the catalog'
self.errorStages['exeerrorcode'][62200] = 'athena-during'
self.errorCodes['exeerrorcode'][62300] = 'ServiceManager: unable to initialize Service: EventSelector'
self.errorStages['exeerrorcode'][62300] = 'athena-during'
self.errorCodes['exeerrorcode'][62400] = 'JobOptionsSvc error'
self.errorStages['exeerrorcode'][62400] = 'athena-during'
self.errorCodes['exeerrorcode'][62500] = 'PartPropSvc: could not open PDT file'
self.errorStages['exeerrorcode'][62500] = 'athena-during'
self.errorCodes['exeerrorcode'][62510] = 'PartPropSvc: unable to access any PDT file'
self.errorStages['exeerrorcode'][62510] = 'athena-during'
self.errorCodes['exeerrorcode'][62600] = 'AthenaCrash'
self.errorStages['exeerrorcode'][62600] = 'athena-during'
self.errorCodes['exeerrorcode'][62700] = 'DetectorStore: no valid proxy for default object'
self.errorStages['exeerrorcode'][62700] = 'athena-during'
self.errorCodes['exeerrorcode'][62800] = 'JobOptionsSvc: unable to set property'
self.errorStages['exeerrorcode'][62800] = 'athena-during'
self.errorCodes['exeerrorcode'][62900] = 'DllClassManager: system Error'
self.errorStages['exeerrorcode'][62900] = 'athena-during'
self.errorCodes['exeerrorcode'][62910] = 'ApplicationMgr: failure loading declared DLL\'s'
self.errorStages['exeerrorcode'][62910] = 'athena-during'
self.errorCodes['exeerrorcode'][63000] = 'Transform python errors'
self.errorStages['exeerrorcode'][63000] = 'athena-during'
self.errorCodes['exeerrorcode'][63010] = 'Transform python syntax error'
self.errorStages['exeerrorcode'][63010] = 'athena-during'
self.errorCodes['exeerrorcode'][63020] = 'Transform python import error'
self.errorStages['exeerrorcode'][63020] = 'athena-during'
self.errorCodes['exeerrorcode'][63100] = 'Transform argument errors'
self.errorStages['exeerrorcode'][63100] = 'athena-during'
self.errorCodes['exeerrorcode'][63110] = 'maxEvents argument: Too few events requested'
self.errorStages['exeerrorcode'][63110] = 'user-during'
self.errorCodes['exeerrorcode'][63111] = 'maxEvents argument: Too many events requested'
self.errorStages['exeerrorcode'][63111] = 'user-during'
self.errorCodes['exeerrorcode'][63200] = 'Transform definition errors'
self.errorStages['exeerrorcode'][63200] = 'athena-during'
self.errorCodes['exeerrorcode'][63300] = 'Transform environment errors'
self.errorStages['exeerrorcode'][63300] = 'athena-during'
self.errorCodes['exeerrorcode'][63400] = 'Transform unknown exceptions'
self.errorStages['exeerrorcode'][63400] = 'athena-during'
self.errorCodes['exeerrorcode'][63500] = 'Transform execution timeout'
self.errorStages['exeerrorcode'][63500] = 'athena-during'
self.errorCodes['exeerrorcode'][63600] = 'Transform execution retries exhausted'
self.errorStages['exeerrorcode'][63600] = 'athena-during'
self.errorCodes['exeerrorcode'][63900] = 'Transform file errors'
self.errorStages['exeerrorcode'][63900] = 'athena-during'
self.errorCodes['exeerrorcode'][64000] = 'Transform input file errors'
self.errorStages['exeerrorcode'][64000] = 'athena-during'
self.errorCodes['exeerrorcode'][64010] = 'Transform input file not found'
self.errorStages['exeerrorcode'][64010] = 'athena-during'
self.errorCodes['exeerrorcode'][64020] = 'Transform input file not readable'
self.errorStages['exeerrorcode'][64020] = 'athena-during'
self.errorCodes['exeerrorcode'][64030] = 'Transform input file empty'
self.errorStages['exeerrorcode'][64030] = 'athena-during'
self.errorCodes['exeerrorcode'][64031] = 'Transform input file contains too few events'
self.errorStages['exeerrorcode'][64031] = 'athena-during'
self.errorCodes['exeerrorcode'][64032] = 'Transform input file contains too many events'
self.errorStages['exeerrorcode'][64032] = 'athena-during'
self.errorCodes['exeerrorcode'][64033] = 'Transform input file: Event counting failed'
self.errorStages['exeerrorcode'][64033] = 'athena-during'
self.errorCodes['exeerrorcode'][64040] = 'Transform input file corrupted'
self.errorStages['exeerrorcode'][64040] = 'athena-during'
self.errorCodes['exeerrorcode'][64100] = 'Transform output file errors'
self.errorStages['exeerrorcode'][64100] = 'athena-during'
self.errorCodes['exeerrorcode'][64110] = 'Transform output file not found'
self.errorStages['exeerrorcode'][64110] = 'athena-during'
self.errorCodes['exeerrorcode'][64120] = 'Transform output file not readable'
self.errorStages['exeerrorcode'][64120] = 'athena-during'
self.errorCodes['exeerrorcode'][64130] = 'Transform output file empty'
self.errorStages['exeerrorcode'][64130] = 'athena-during'
self.errorCodes['exeerrorcode'][64131] = 'Transform output file contains too few events'
self.errorStages['exeerrorcode'][64131] = 'athena-during'
self.errorCodes['exeerrorcode'][64132] = 'Transform output file contains too many events'
self.errorStages['exeerrorcode'][64132] = 'athena-during'
self.errorCodes['exeerrorcode'][64133] = 'Transform output file: Event counting failed'
self.errorStages['exeerrorcode'][64133] = 'athena-during'
self.errorCodes['exeerrorcode'][64140] = 'Transform output file corrupted'
self.errorStages['exeerrorcode'][64140] = 'athena-during'
self.errorCodes['exeerrorcode'][64150] = 'Transform output file already exists'
self.errorStages['exeerrorcode'][64150] = 'athena-during'
self.errorCodes['exeerrorcode'][64200] = 'Error in transform configuration file'
self.errorStages['exeerrorcode'][64200] = 'athena-during'
self.errorCodes['exeerrorcode'][65000] = 'Problems with Database'
self.errorStages['exeerrorcode'][65000] = 'athena-during'
self.errorCodes['exeerrorcode'][65100] = 'Problems with DBRelease'
self.errorStages['exeerrorcode'][65100] = 'athena-during'
self.errorCodes['exeerrorcode'][65110] = 'DBRelease not setup'
self.errorStages['exeerrorcode'][65110] = 'athena-during'
self.errorCodes['exeerrorcode'][65120] = 'Wrong version of DBRelease setup'
self.errorStages['exeerrorcode'][65120] = 'athena-during'
self.errorCodes['exeerrorcode'][65130] = 'Problems with the DBRelease tarfile'
self.errorStages['exeerrorcode'][65130] = 'athena-during'
self.errorCodes['exeerrorcode'][65200] = 'Problems with geometry tag'
self.errorStages['exeerrorcode'][65200] = 'athena-during'
self.errorCodes['exeerrorcode'][65210] = 'Mismatch between Geometry Tag in transform argument geometryVersion and in input file'
self.errorStages['exeerrorcode'][65210] = 'athena-during'
self.errorCodes['exeerrorcode'][66000] = 'Bad file descriptor'
self.errorStages['exeerrorcode'][66000] = 'athena-during'
self.errorCodes['exeerrorcode'][69999] = 'Unknown Transform error'
self.errorStages['exeerrorcode'][69999] = 'athena-during'
self.errorCodes['exeerrorcode'][10000] = 'Athena/Transformation error'
self.errorStages['exeerrorcode'][10000] = 'athena-during'
self.errorCodes['exeerrorcode'][10010] = 'Core dump from CoreDumpSvc'
self.errorStages['exeerrorcode'][10010] = 'athena-during'
self.errorCodes['exeerrorcode'][10100] = 'At/Tr connection error'
self.errorStages['exeerrorcode'][10100] = 'athena-during'
self.errorCodes['exeerrorcode'][10102] = 'Nova DB problems'
self.errorStages['exeerrorcode'][10102] = 'athena-during'
self.errorCodes['exeerrorcode'][10103] = 'Calibration DB problems'
self.errorStages['exeerrorcode'][10103] = 'athena-during'
self.errorCodes['exeerrorcode'][10104] = 'Oracle error ORA-03113'
self.errorStages['exeerrorcode'][10104] = 'panda-during'
self.errorCodes['exeerrorcode'][10110] = 'Conditions database problems'
self.errorStages['exeerrorcode'][10110] = 'athena-during'
self.errorCodes['exeerrorcode'][10120] = 'nfs lock problems with sqlite database'
self.errorStages['exeerrorcode'][10120] = 'athena-during'
self.errorCodes['exeerrorcode'][10130] = 'Lost connection to MySQL server'
self.errorStages['exeerrorcode'][10130] = 'panda-during'
self.errorCodes['exeerrorcode'][10140] = 'Oracle error ORA-02391: exceeded simultaneous SESSIONS_PER_USER limit'
self.errorStages['exeerrorcode'][10140] = 'panda-during'
self.errorCodes['exeerrorcode'][10200] = 'Athena crashes'
self.errorStages['exeerrorcode'][10200] = 'athena-during'
self.errorCodes['exeerrorcode'][10210] = 'Athena init failed'
self.errorStages['exeerrorcode'][10210] = 'athena-during'
self.errorCodes['exeerrorcode'][10212] = 'Missing PFN in PoolFileCatalog'
self.errorStages['exeerrorcode'][10212] = 'athena-during'
self.errorCodes['exeerrorcode'][10213] = 'AuditorSvc init failed'
self.errorStages['exeerrorcode'][10213] = 'athena-during'
self.errorCodes['exeerrorcode'][10214] = 'Pythia DLL not loaded'
self.errorStages['exeerrorcode'][10214] = 'athena-during'
self.errorCodes['exeerrorcode'][10220] = 'Input file corrupted (Wrong input)'
self.errorStages['exeerrorcode'][10220] = 'athena-during'
self.errorCodes['exeerrorcode'][10300] = 'ApplicationMgr Failed to load modules'
self.errorStages['exeerrorcode'][10300] = 'athena-during'
self.errorCodes['exeerrorcode'][10310] = 'DllClassManager Could not load module'
self.errorStages['exeerrorcode'][10310] = 'athena-during'
self.errorCodes['exeerrorcode'][10400] = 'Problems loading dynamic libraries'
self.errorStages['exeerrorcode'][10400] = 'athena-during'
self.errorCodes['exeerrorcode'][10410] = 'Problem loading shared library'
self.errorStages['exeerrorcode'][10410] = 'athena-during'
self.errorCodes['exeerrorcode'][10420] = 'ApplicationMgr: failure loading declared DLL\'s'
self.errorStages['exeerrorcode'][10420] = 'athena-during'
self.errorCodes['exeerrorcode'][10430] = 'Problems loading shared libraries in LD_PRELOAD '
self.errorStages['exeerrorcode'][10430] = 'athena-during'
self.errorCodes['exeerrorcode'][10500] = 'JobOptions errors'
self.errorStages['exeerrorcode'][10500] = 'user-during'
self.errorCodes['exeerrorcode'][10510] = 'JobOptions file not found'
self.errorStages['exeerrorcode'][10510] = 'user-during'
self.errorCodes['exeerrorcode'][10520] = 'Error in jobOptions'
self.errorStages['exeerrorcode'][10520] = 'user-during'
self.errorCodes['exeerrorcode'][10600] = 'Athena Keyboard interrupt'
self.errorStages['exeerrorcode'][10600] = 'user-during'
self.errorCodes['exeerrorcode'][10700] = 'Athena StoreGateSvc errors'
self.errorStages['exeerrorcode'][10700] = 'athena-during'
self.errorCodes['exeerrorcode'][10710] = 'StoreGateSvc retrieve errors'
self.errorStages['exeerrorcode'][10710] = 'athena-during'
self.errorCodes['exeerrorcode'][10711] = 'StoreGateSvc retrieve(default): No valid proxy for object'
self.errorStages['exeerrorcode'][10711] = 'athena-during'
self.errorCodes['exeerrorcode'][10712] = 'StoreGateSvc retrieve(non-const): No valid proxy for object'
self.errorStages['exeerrorcode'][10712] = 'athena-during'
self.errorCodes['exeerrorcode'][10713] = 'StoreGateSvc retrieve(const): No valid proxy for object'
self.errorStages['exeerrorcode'][10713] = 'athena-during'
self.errorCodes['exeerrorcode'][10720] = 'StoreGateSvc record: object not added to store'
self.errorStages['exeerrorcode'][10720] = 'athena-during'
self.errorCodes['exeerrorcode'][10800] = 'Athena DetectorStore errors'
self.errorStages['exeerrorcode'][10800] = 'athena-during'
self.errorCodes['exeerrorcode'][10810] = 'DetectorStore retrieve errors'
self.errorStages['exeerrorcode'][10810] = 'athena-during'
self.errorCodes['exeerrorcode'][10811] = 'DetectorStore retrieve(default): No valid proxy for object'
self.errorStages['exeerrorcode'][10811] = 'athena-during'
self.errorCodes['exeerrorcode'][10812] = 'DetectorStore retrieve(non-const): No valid proxy for object'
self.errorStages['exeerrorcode'][10812] = 'athena-during'
self.errorCodes['exeerrorcode'][10813] = 'DetectorStore retrieve(const): No valid proxy for object'
self.errorStages['exeerrorcode'][10813] = 'athena-during'
self.errorCodes['exeerrorcode'][10820] = 'DetectorStore record: object not added to store'
self.errorStages['exeerrorcode'][10820] = 'athena-during'
self.errorCodes['exeerrorcode'][10900] = 'Problems with software installation'
self.errorStages['exeerrorcode'][10900] = 'athena-during'
self.errorCodes['exeerrorcode'][10910] = 'Missing system libraries'
self.errorStages['exeerrorcode'][10910] = 'athena-during'
self.errorCodes['exeerrorcode'][10920] = 'Missing libraries'
self.errorStages['exeerrorcode'][10920] = 'athena-during'
self.errorCodes['exeerrorcode'][11000] = 'Athena non-zero exit'
self.errorStages['exeerrorcode'][11000] = 'athena-during'
self.errorCodes['exeerrorcode'][13400] = 'Athena unknown exception'
self.errorStages['exeerrorcode'][13400] = 'athena-during'
self.errorCodes['exeerrorcode'][13410] = 'Athena python exception'
self.errorStages['exeerrorcode'][13410] = 'athena-during'
self.errorCodes['exeerrorcode'][13420] = 'Athena C++ exception'
self.errorStages['exeerrorcode'][13420] = 'athena-during'
self.errorCodes['exeerrorcode'][14100] = 'Athena output file errors'
self.errorStages['exeerrorcode'][14100] = 'athena-during'
self.errorCodes['exeerrorcode'][14110] = 'Athena pool.root file too large (root opened second file)'
self.errorStages['exeerrorcode'][14110] = 'athena-during'
self.errorCodes['exeerrorcode'][15010] = 'Geant4 got stuck in event'
self.errorStages['exeerrorcode'][15010] = 'athena-during'
self.errorCodes['exeerrorcode'][15000] = 'Problems with ElementLink'
self.errorStages['exeerrorcode'][15000] = 'athena-during'
self.errorCodes['transexitcode'] = {}
self.errorStages['transexitcode'] = {}
self.errorCodes['transexitcode'][1] = 'Unspecified error, consult log file'
self.errorStages['transexitcode'][1] = 'athena-during'
self.errorCodes['transexitcode'][2] = 'Payload core dump'
self.errorStages['transexitcode'][2] = 'athena-during'
self.errorCodes['transexitcode'][3] = 'Transform setup error'
self.errorStages['transexitcode'][3] = 'athena-during'
self.errorCodes['transexitcode'][4] = 'Failure to convert transform arguments to correct type'
self.errorStages['transexitcode'][4] = 'athena-during'
self.errorCodes['transexitcode'][5] = 'Argument out of allowed range'
self.errorStages['transexitcode'][5] = 'athena-during'
self.errorCodes['transexitcode'][6] = 'TRF_SEGVIO - Segmentation violation'
self.errorStages['transexitcode'][6] = 'athena-during'
self.errorCodes['transexitcode'][7] = 'Problem with an argument given to the transform'
self.errorStages['transexitcode'][7] = 'athena-during'
self.errorCodes['transexitcode'][8] = 'Inconsistent dataset value in input file list'
self.errorStages['transexitcode'][8] = 'athena-during'
self.errorCodes['transexitcode'][9] = 'Error found with transform input file'
self.errorStages['transexitcode'][9] = 'athena-during'
self.errorCodes['transexitcode'][10] = 'ATH_FAILURE - Athena non-zero exit'
self.errorStages['transexitcode'][10] = 'athena-during'
self.errorCodes['transexitcode'][11] = 'Error when handling transform output file'
self.errorStages['transexitcode'][11] = 'athena-during'
self.errorCodes['transexitcode'][12] = 'Problem in setting up the substep graph'
self.errorStages['transexitcode'][12] = 'athena-during'
self.errorCodes['transexitcode'][13] = 'Problem trying to generate requested outputs from given inputs - graph give no path to do this'
self.errorStages['transexitcode'][13] = 'athena-during'
self.errorCodes['transexitcode'][14] = 'Key error in a transform internal dictionary'
self.errorStages['transexitcode'][14] = 'athena-during'
self.errorCodes['transexitcode'][15] = 'No events to be processed in the input file - skipEvents is higher than actual event number'
self.errorStages['transexitcode'][15] = 'athena-during'
self.errorCodes['transexitcode'][16] = 'Invalid selection in a choice restricted argument'
self.errorStages['transexitcode'][16] = 'athena-during'
self.errorCodes['transexitcode'][17] = 'Mandatory argument is missing'
self.errorStages['transexitcode'][17] = 'athena-during'
self.errorCodes['transexitcode'][18] = 'Invalid configuration of a reduction job'
self.errorStages['transexitcode'][18] = 'athena-during'
self.errorCodes['transexitcode'][19] = 'Problem when trying to apply steering to the substep graph'
self.errorStages['transexitcode'][19] = 'athena-during'
self.errorCodes['transexitcode'][26] = 'TRF_ATHENACRASH - Athena crash'
self.errorStages['transexitcode'][26] = 'athena-during'
self.errorCodes['transexitcode'][30] = 'TRF_PYT - transformation python error'
self.errorStages['transexitcode'][30] = 'athena-during'
self.errorCodes['transexitcode'][31] = 'TRF_ARG - transformation argument error'
self.errorStages['transexitcode'][31] = 'athena-during'
self.errorCodes['transexitcode'][32] = 'TRF_DEF - transformation definition error'
self.errorStages['transexitcode'][32] = 'athena-during'
self.errorCodes['transexitcode'][33] = 'TRF_ENV - transformation environment error'
self.errorStages['transexitcode'][33] = 'athena-during'
self.errorCodes['transexitcode'][34] = 'TRF_EXC - transformation exception'
self.errorStages['transexitcode'][34] = 'athena-during'
self.errorCodes['transexitcode'][35] = 'Internal error while generating transform reports'
self.errorStages['transexitcode'][35] = 'athena-during'
self.errorCodes['transexitcode'][36] = 'Call to external metadata program failed'
self.errorStages['transexitcode'][36] = 'athena-during'
self.errorCodes['transexitcode'][37] = 'General runtime error'
self.errorStages['transexitcode'][37] = 'athena-during'
self.errorCodes['transexitcode'][38] = 'Event count validation failed'
self.errorStages['transexitcode'][38] = 'athena-during'
self.errorCodes['transexitcode'][39] = 'Failed to setup DBRelease properly'
self.errorStages['transexitcode'][39] = 'athena-during'
self.errorCodes['transexitcode'][40] = 'Athena crash - consult log file'
self.errorStages['transexitcode'][40] = 'athena-during'
self.errorCodes['transexitcode'][41] = 'TRF_OUTFILE - output file error'
self.errorStages['transexitcode'][41] = 'athena-during'
self.errorCodes['transexitcode'][42] = 'TRF_CONFIG - transform config file error'
self.errorStages['transexitcode'][42] = 'athena-during'
self.errorCodes['transexitcode'][50] = 'TRF_DB - problems with database'
self.errorStages['transexitcode'][50] = 'athena-during'
self.errorCodes['transexitcode'][51] = 'TRF_DBREL_TARFILE - Problems with the DBRelease tarfile'
self.errorStages['transexitcode'][51] = 'athena-during'
self.errorCodes['transexitcode'][52] = 'Problem while attempting to merge output files'
self.errorStages['transexitcode'][52] = 'athena-during'
self.errorCodes['transexitcode'][60] = 'TRF_GBB_TIME - GriBB - output limit exceeded (time, memory, CPU)'
self.errorStages['transexitcode'][60] = 'athena-during'
self.errorCodes['transexitcode'][64] = 'General failure in transform substep executor'
self.errorStages['transexitcode'][64] = 'athena-during'
self.errorCodes['transexitcode'][65] = 'Non-zero exit code from transform substep executor'
self.errorStages['transexitcode'][65] = 'athena-during'
self.errorCodes['transexitcode'][66] = 'Validation failure in transform substep executor'
self.errorStages['transexitcode'][66] = 'athena-during'
self.errorCodes['transexitcode'][67] = 'Transform substep executor timed out'
self.errorStages['transexitcode'][67] = 'athena-during'
self.errorCodes['transexitcode'][68] = 'Errors found in substep executor logfile'
self.errorStages['transexitcode'][68] = 'athena-during'
self.errorCodes['transexitcode'][69] = 'Transform substep executor setup failed'
self.errorStages['transexitcode'][69] = 'athena-during'
self.errorCodes['transexitcode'][70] = 'Transform substep executor wrapper script problem'
self.errorStages['transexitcode'][70] = 'athena-during'
self.errorCodes['transexitcode'][71] = 'Problem with substep executor logfile'
self.errorStages['transexitcode'][71] = 'athena-during'
self.errorCodes['transexitcode'][72] = 'Problem getting AMI tag info'
self.errorStages['transexitcode'][72] = 'athena-during'
self.errorCodes['transexitcode'][74] = 'Input file failed validation'
self.errorStages['transexitcode'][74] = 'athena-during'
self.errorCodes['transexitcode'][75] = 'Output file failed validation'
self.errorStages['transexitcode'][75] = 'athena-during'
self.errorCodes['transexitcode'][79] = 'Copying input file failed'
self.errorStages['transexitcode'][79] = 'ddm-start'
self.errorCodes['transexitcode'][80] = 'file in trf definition not found, using the expandable syntax'
self.errorStages['transexitcode'][80] = 'athena-during'
self.errorCodes['transexitcode'][81] = 'file in trf definition not found, using the expandable syntax -- pileup case'
self.errorStages['transexitcode'][81] = 'athena-during'
self.errorCodes['transexitcode'][85] = 'analysis output merge crash - consult log file'
self.errorStages['transexitcode'][85] = 'athena-during'
self.errorCodes['transexitcode'][98] = 'Oracle error - session limit reached'
self.errorStages['transexitcode'][98] = 'panda-during'
self.errorCodes['transexitcode'][99] = 'TRF_UNKNOWN - unknown transformation error'
self.errorStages['transexitcode'][99] = 'athena-during'
self.errorCodes['transexitcode'][102] = 'One of the output files did not get produced by the job'
self.errorStages['transexitcode'][102] = 'athena-end'
self.errorCodes['transexitcode'][104] = 'Copying the output file to local SE failed (md5sum or size mismatch, or LFNnonunique)'
self.errorStages['transexitcode'][104] = 'ddm-end'
self.errorCodes['transexitcode'][126] = 'Transformation not executable - consult log file'
self.errorStages['transexitcode'][126] = 'athena-start'
self.errorCodes['transexitcode'][127] = 'Transformation not installed in CE'
self.errorStages['transexitcode'][127] = 'panda-start'
self.errorCodes['transexitcode'][129] = 'Transform received signal SIGHUP'
self.errorStages['transexitcode'][129] = 'athena-during'
self.errorCodes['transexitcode'][130] = 'Transform received signal SIGINT'
self.errorStages['transexitcode'][130] = 'athena-during'
self.errorCodes['transexitcode'][131] = 'Transform received signal SIGQUIT'
self.errorStages['transexitcode'][131] = 'athena-during'
self.errorCodes['transexitcode'][132] = 'Transform received signal SIGILL'
self.errorStages['transexitcode'][132] = 'athena-during'
self.errorCodes['transexitcode'][133] = 'Transform received signal SIGTRAP'
self.errorStages['transexitcode'][133] = 'athena-during'
self.errorCodes['transexitcode'][134] = 'New trf: Transform received signal SIGABRT; Old trf: Athena core dump or timeout, or conddb DB connect exception'
self.errorStages['transexitcode'][134] = 'athena-during'
self.errorCodes['transexitcode'][135] = 'Transform received signal SIGBUS'
self.errorStages['transexitcode'][135] = 'athena-during'
self.errorCodes['transexitcode'][136] = 'Transform received signal SIGFPE'
self.errorStages['transexitcode'][136] = 'athena-during'
self.errorCodes['transexitcode'][137] = 'Transform received signal SIGKILL'
self.errorStages['transexitcode'][137] = 'athena-during'
self.errorCodes['transexitcode'][138] = 'Transform received signal SIGUSR1'
self.errorStages['transexitcode'][138] = 'athena-during'
self.errorCodes['transexitcode'][139] = 'Transform received signal SIGSEGV'
self.errorStages['transexitcode'][139] = 'athena-during'
self.errorCodes['transexitcode'][140] = 'Transform received signal SIGUSR2'
self.errorStages['transexitcode'][140] = 'athena-during'
self.errorCodes['transexitcode'][141] = "New trf: Transform received signal SIGPIPE; Old trf: No input file available - check availability of input dataset at site"
self.errorStages['transexitcode'][141] = 'ddm-start'
self.errorCodes['transexitcode'][142] = 'Transform received signal SIGALRM'
self.errorStages['transexitcode'][142] = 'athena-during'
self.errorCodes['transexitcode'][143] = 'Transform received signal SIGTERM'
self.errorStages['transexitcode'][143] = 'athena-during'
self.errorCodes['transexitcode'][145] = 'Transform received signal SIGCHLD'
self.errorStages['transexitcode'][145] = 'athena-during'
self.errorCodes['transexitcode'][146] = 'Transform received signal SIGCONT'
self.errorStages['transexitcode'][146] = 'athena-during'
self.errorCodes['transexitcode'][147] = 'Transform received signal SIGSTOP'
self.errorStages['transexitcode'][147] = 'athena-during'
self.errorCodes['transexitcode'][148] = 'Transform received signal SIGTSTP'
self.errorStages['transexitcode'][148] = 'athena-during'
self.errorCodes['transexitcode'][149] = 'Transform received signal SIGTTIN'
self.errorStages['transexitcode'][149] = 'athena-during'
self.errorCodes['transexitcode'][150] = 'Transform received signal SIGTTOU'
self.errorStages['transexitcode'][150] = 'athena-during'
self.errorCodes['transexitcode'][151] = 'Transform received signal SIGURG'
self.errorStages['transexitcode'][151] = 'athena-during'
self.errorCodes['transexitcode'][152] = 'Transform received signal SIGXCPU'
self.errorStages['transexitcode'][152] = 'athena-during'
self.errorCodes['transexitcode'][153] = 'Transform received signal SIGXFSZ'
self.errorStages['transexitcode'][153] = 'athena-during'
self.errorCodes['transexitcode'][154] = 'Transform received signal SIGVTALRM'
self.errorStages['transexitcode'][154] = 'athena-during'
self.errorCodes['transexitcode'][155] = 'Transform received signal SIGPROF'
self.errorStages['transexitcode'][155] = 'athena-during'
self.errorCodes['transexitcode'][156] = 'Transform received signal SIGWINCH'
self.errorStages['transexitcode'][156] = 'athena-during'
self.errorCodes['transexitcode'][157] = 'Transform received signal SIGIO'
self.errorStages['transexitcode'][157] = 'athena-during'
self.errorCodes['transexitcode'][158] = 'Transform received signal SIGPWR'
self.errorStages['transexitcode'][158] = 'athena-during'
self.errorCodes['transexitcode'][159] = 'Transform received signal SIGSYS'
self.errorStages['transexitcode'][159] = 'athena-during'
self.errorCodes['transexitcode'][162] = 'Transform received signal SIGRTMIN'
self.errorStages['transexitcode'][162] = 'athena-during'
self.errorCodes['transexitcode'][166] = 'PanDA Mover: SRM_INVALID_PATH, No such file or directory'
self.errorStages['transexitcode'][166] = 'panda-during'
self.errorCodes['transexitcode'][192] = 'Transform received signal SIGRTMAX'
self.errorStages['transexitcode'][192] = 'athena-during'
self.errorCodes['transexitcode'][200] = 'Log file not transferred to destination'
self.errorStages['transexitcode'][200] = 'ddm-end'
self.errorCodes['transexitcode'][220] = 'Proot: An exception occurred in the user analysis code'
self.errorStages['transexitcode'][220] = 'athena-during'
self.errorCodes['transexitcode'][221] = 'Proot: Framework decided to abort the job due to an internal problem'
self.errorStages['transexitcode'][221] = 'athena-during'
self.errorCodes['transexitcode'][222] = 'Proot: Job completed without reading all input files'
self.errorStages['transexitcode'][222] = 'athena-during'
self.errorCodes['transexitcode'][223] = 'Proot: Input files cannot be opened'
self.errorStages['transexitcode'][223] = 'athena-during'
for code in ( 1008, 1098, 1112, 1116, 1117, 1118, 1119, 1163, 1177, 1178 ):
if code in self.errorCodes['piloterrorcode']:
self.errorCodes['transexitcode'][code] = self.errorCodes['piloterrorcode'][code]
if code in self.errorStages['piloterrorcode']:
self.errorStages['transexitcode'][code] = self.errorStages['piloterrorcode'][code]
self.errorCodes['transexitcode'][1198] = 'Can\'t check the child process status from the heartbeat process'
self.errorStages['transexitcode'][1198] = 'athena-during'
self.errorCodes['transexitcode'][2100] = "MyProxyError 2100: server name not specified"
self.errorStages['transexitcode'][2100] = 'panda-start'
self.errorCodes['transexitcode'][2101] = "MyProxyError 2101: voms attributes not specified"
self.errorStages['transexitcode'][2101] = 'panda-start'
self.errorCodes['transexitcode'][2102] = "MyProxyError 2102: user DN not specified"
self.errorStages['transexitcode'][2102] = 'panda-start'
self.errorStages['transexitcode'][2102] = 'panda-start'
self.errorCodes['transexitcode'][2103] = "MyProxyError 2103: pilot owner DN not specified"
self.errorStages['transexitcode'][2103] = 'panda-start'
self.errorCodes['transexitcode'][2104] = "MyProxyError 2104: invalid path for the delegated proxy"
self.errorStages['transexitcode'][2104] = 'panda-start'
self.errorCodes['transexitcode'][2105] = "MyProxyError 2105: invalid pilot proxy path"
self.errorStages['transexitcode'][2105] = 'panda-start'
self.errorCodes['transexitcode'][2106] = "MyProxyError 2106: no path to delegated proxy specified"
self.errorStages['transexitcode'][2106] = 'panda-start'
self.errorCodes['transexitcode'][2200] = "MyProxyError 2200: myproxy-init not available in PATH"
self.errorStages['transexitcode'][2200] = 'panda-start'
self.errorCodes['transexitcode'][2201] = "MyProxyError 2201: myproxy-logon not available in PATH"
self.errorStages['transexitcode'][2201] = 'panda-start'
self.errorCodes['transexitcode'][2202] = "MyProxyError 2202: myproxy-init version not valid"
self.errorStages['transexitcode'][2202] = 'panda-start'
self.errorCodes['transexitcode'][2203] = "MyProxyError 2203: myproxy-logon version not valid"
self.errorStages['transexitcode'][2203] = 'panda-start'
self.errorCodes['transexitcode'][2300] = "MyProxyError 2300: proxy delegation failed"
self.errorStages['transexitcode'][2300] = 'panda-start'
self.errorCodes['transexitcode'][2301] = "MyProxyError 2301: proxy retrieval failed"
self.errorStages['transexitcode'][2301] = 'panda-start'
self.errorCodes['transexitcode'][2400] = "MyProxyError 2400: security violation. Logname and DN do not match"
self.errorStages['transexitcode'][2400] = 'panda-start'
self.errorCodes['transexitcode'][2500] = "MyProxyError 2500: there is no a valid proxy"
self.errorStages['transexitcode'][2500] = 'panda-start'
self.errorCodes['transexitcode'][2501] = "MyProxyError 2501: voms-proxy-info not available in PATH"
self.errorStages['transexitcode'][2501] = 'panda-start'
self.errorCodes['transexitcode'][3000] = 'curl failed to download pilot wrapper'
self.errorStages['transexitcode'][3000] = 'panda-start'
self.errorCodes['transexitcode'][3001] = 'Failed to download pilot code'
self.errorStages['transexitcode'][3001] = 'panda-start'
# dq2_cr error codes
self.errorCodes['transexitcode'][10020] = 'dq2_cr environment variables not properly defined'
self.errorStages['transexitcode'][10020] = 'ddm-end'
self.errorCodes['transexitcode'][10030] = 'dq2_cr getVUID error'
self.errorStages['transexitcode'][10030] = 'ddm-end'
self.errorCodes['transexitcode'][10040] = 'dq2_cr queryFilesInDataset error'
self.errorStages['transexitcode'][10040] = 'ddm-end'
self.errorCodes['transexitcode'][10050] = 'dq2_cr getLocation error'
self.errorStages['transexitcode'][10050] = 'ddm-end'
self.errorCodes['transexitcode'][10060] = 'dq2_cr requested protocol is not supported'
self.errorStages['transexitcode'][10060] = 'ddm-end'
self.errorCodes['transexitcode'][10070] = 'dq2_cr EC_MAIN error, check logfile'
self.errorStages['transexitcode'][10070] = 'ddm-end'
self.errorCodes['transexitcode'][10080] = 'dq2_cr PFNfromLFC error'
self.errorStages['transexitcode'][10080] = 'ddm-end'
self.errorCodes['transexitcode'][10090] = 'dq2_cr file size check failed'
self.errorStages['transexitcode'][10090] = 'ddm-end'
self.errorCodes['transexitcode'][10100] = 'dq2_cr could not create LFC directory'
self.errorStages['transexitcode'][10100] = 'ddm-end'
self.errorCodes['transexitcode'][10110] = 'dq2_cr LS error'
self.errorStages['transexitcode'][10110] = 'ddm-end'
self.errorCodes['transexitcode'][10120] = 'dq2_cr could not get dataset state from DQ2 server'
self.errorStages['transexitcode'][10120] = 'ddm-end'
self.errorCodes['transexitcode'][10130] = 'dq2_cr could not load ToA'
self.errorStages['transexitcode'][10130] = 'ddm-end'
self.errorCodes['transexitcode'][10140] = 'dq2_cr could not parse XML'
self.errorStages['transexitcode'][10140] = 'ddm-end'
self.errorCodes['transexitcode'][10150] = 'dq2_cr FileNotFound error'
self.errorStages['transexitcode'][10150] = 'ddm-end'
# ----------------------------------------------------------------------
# D A Y A B A Y E R R O R S
# ----------------------------------------------------------------------
self.errorCodes['transexitcode'][1000001] = 'ERROR message'
self.errorStages['transexitcode'][0o010001] = 'panda-start'
self.errorCodes['transexitcode'][1000002] = 'FATAL message'
self.errorStages['transexitcode'][1000002] = 'panda-start'
self.errorCodes['transexitcode'][1000003] = 'segmentation violation message'
self.errorStages['transexitcode'][1000003] = 'panda-start'
self.errorCodes['transexitcode'][1000004] = 'IOError message'
self.errorStages['transexitcode'][1000004] = 'panda-start'
self.errorCodes['transexitcode'][1000005] = 'ValueError message'
self.errorStages['transexitcode'][1000005] = 'panda-start'
def getErrorCodes(self):
return self.errorFields, self.errorCodes, self.errorStages
```
#### File: core/filebrowser/views.py
```python
import logging
import re
import json
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.template.loader import get_template
from django.conf import settings
from .utils import get_rucio_file, get_rucio_pfns_from_guids, fetch_file, get_filebrowser_vo, \
remove_folder, get_fullpath_filebrowser_directory, list_file_directory
from core.oauth.utils import login_customrequired
from core.common.models import Filestable4, FilestableArch
from core.views import DateTimeEncoder, initSelfMonitor
from datetime import datetime
_logger = logging.getLogger('bigpandamon-filebrowser')
filebrowserDateTimeFormat = "%Y %b %d %H:%M:%S"
hostname = "bigpanda.cern.ch"
@login_customrequired
def index(request):
"""
index -- filebrowser's default page
:param request: Django's HTTP request
:type request: django.http.HttpRequest
"""
try:
initSelfMonitor(request)
except:
_logger.exception('Failed to init self monitor')
errors = {}
_logger.debug("index started - " + datetime.now().strftime("%H:%M:%S") + " ")
### check that all expected parameters are in URL
# 'site' is not mandatory anymore, so removing it from the list
expectedFields = ['guid', 'scope', 'lfn']
for expectedField in expectedFields:
try:
request.GET[expectedField]
except:
msg = 'Missing expected GET parameter %s. ' % expectedField
_logger.error(msg)
if 'missingparameter' not in errors.keys():
errors['missingparameter'] = ''
errors['missingparameter'] += msg
### if all expected GET parameters are present, execute file lookup
pfns = []
scope = ''
lfn = ''
guid = ''
site = ''
pattern_string='^[a-zA-Z0-9.\-_]+$'
pattern_site = '^[a-zA-Z0-9.,\-_\/]+$'
pattern_guid='^(\{){0,1}[0-9a-zA-Z]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12}(\}){0,1}$'
try:
guid = request.GET['guid']
if re.match(pattern_guid, guid) is None:
guid = None
if 'improperformat' not in errors.keys():
errors['improperformat'] = ''
errors['improperformat'] += 'guid: %s ' % (request.GET['guid'])
except:
pass
try:
site = request.GET['site']
if re.match(pattern_site, site) is None:
site = None
if 'improperformat' not in errors.keys():
errors['improperformat'] = ''
errors['improperformat'] += 'site: %s ' % (request.GET['site'])
except:
pass
try:
lfn = request.GET['lfn']
if re.match(pattern_string, lfn) is None:
lfn = None
if 'improperformat' not in errors.keys():
errors['improperformat'] = ''
errors['improperformat'] += 'lfn: %s ' % (request.GET['lfn'])
except:
pass
try:
scope = request.GET['scope']
if re.match(pattern_string, scope) is None:
scope = None
if 'improperformat' not in errors.keys():
errors['improperformat'] = ''
errors['improperformat'] += 'scope: %s ' % (request.GET['scope'])
except:
pass
# check if size of logfile is too big return to user error message with rucio cli command to download it locally
max_sizemb = 1000
sizemb = -1
fsize = []
try:
fileid = int(request.GET['fileid'])
except:
fileid = -1
lquery = {'type': 'log'}
if lfn and len(lfn) > 0:
lquery['lfn'] = lfn
fsize.extend(Filestable4.objects.filter(**lquery).values('fsize', 'fileid', 'status'))
if len(fsize) == 0:
fsize.extend(FilestableArch.objects.filter(**lquery).values('fsize', 'fileid', 'status'))
if len(fsize) > 0:
try:
if fileid > 0:
sizemb = round(int([f['fsize'] for f in fsize if f['fileid'] == fileid][0])/1000/1000)
else:
sizemb = round(int([f['fsize'] for f in fsize][0])/1000/1000)
except:
_logger.warning("ERROR!!! Failed to calculate log tarball size in MB")
_logger.debug("index step1 - " + datetime.now().strftime("%H:%M:%S") + " ")
### download the file
files = []
dirprefix = ''
tardir = ''
if sizemb > max_sizemb:
_logger.warning('Size of the requested log is {} MB which is more than limit {} MB'.format(sizemb, max_sizemb))
errormessage = """The size of requested log is too big ({}MB).
Please try to download it locally using Rucio CLI by the next command:
rucio download {}:{}""".format(sizemb, scope, lfn)
data = {
'errormessage': errormessage
}
return render_to_response('errorPage.html', data, content_type='text/html')
if not (guid is None or lfn is None or scope is None):
files, errtxt, dirprefix, tardir = get_rucio_file(scope,lfn, guid, 100)
else:
errormessage = ''
if guid is None:
errormessage = 'No guid provided.'
elif lfn is None:
errormessage = 'No lfn provided.'
elif scope is None:
errormessage = 'No scope provided.'
_logger.warning(errormessage)
data = {
'errormessage': errormessage
}
return render_to_response('errorPage.html', data, content_type='text/html')
if not len(files):
msg = 'Something went wrong while the log file downloading. [guid=%s, site=%s, scope=%s, lfn=%s] \n' % \
(guid, site, scope, lfn)
_logger.warning(msg)
errors['download'] = msg
if len(errtxt):
if 'download' not in errors:
errors['download'] = ''
errors['download'] += errtxt
_logger.debug("index step2 - " + datetime.now().strftime("%H:%M:%S") + " ")
totalLogSize = 0
if type(files) is list and len(files) > 0:
for file in files:
totalLogSize += file['size'] if 'size' in file and file['size'] > 0 else 0
# from B to MB
if totalLogSize > 0:
totalLogSize = round(totalLogSize*1.0/1024/1024, 2)
### return the file page
### set request response data
data = {
'request': request,
'errors': errors,
'pfns': pfns,
'files': files,
'dirprefix': dirprefix,
'tardir': tardir,
'scope': scope,
'lfn': lfn,
'site': site,
'guid': guid,
'MEDIA_URL': settings.MEDIA_URL,
'viewParams' : {'MON_VO': str(get_filebrowser_vo()).upper()},
'HOSTNAME': hostname,
'totalLogSize': totalLogSize,
'nfiles': len(files),
}
_logger.debug("index step3 - " + datetime.now().strftime("%H:%M:%S") + " ")
if 'json' not in request.GET:
status = 200
# return 500 if most probably there were issue
if 'download' in errors and errors['download'] and len(errors['download']) > 0:
if len(fsize) > 0 and 'status' in fsize[0] and fsize[0]['status'] != 'failed' and sizemb <= 0:
status = 500
return render_to_response('filebrowser/filebrowser_index.html', data, RequestContext(request), status=status)
else:
resp = HttpResponse(json.dumps(data, cls=DateTimeEncoder), content_type='application/json')
_logger.debug("index step4 - " + datetime.now().strftime("%H:%M:%S") + " ")
return resp
def api_single_pandaid(request):
"""
api_single_pandaid -- return log file URL for a single PanDA job
:param request: Django's HTTP request
:type request: django.http.HttpRequest
"""
errors = {}
### check that all expected parameters are in URL
# expectedFields = ['guid', 'site', 'scope', 'lfn']
expectedFields = ['pandaid']
for expectedField in expectedFields:
try:
if len(request.GET[expectedField]) < 1:
msg = 'Missing expected GET parameter %s. ' % expectedField
if 'missingparameter' not in errors.keys():
errors['missingparameter'] = ''
errors['missingparameter'] += msg
except:
msg = 'Missing expected GET parameter %s. ' % expectedField
_logger.error(msg)
if 'missingparameter' not in errors.keys():
errors['missingparameter'] = ''
errors['missingparameter'] += msg
### if all expected GET parameters are present, execute file lookup
pfns = []
scope = ''
lfn = ''
guid = ''
site = ''
pandaid = None
status = ''
query = {}
query['type'] = 'log'
try:
pandaid = int(request.GET['pandaid'])
except:
pass
query['pandaid'] = pandaid
file_properties = []
try:
file_properties = Filestable4.objects.filter(**query).values('pandaid', 'guid', \
'scope', 'lfn', 'destinationse', 'status')
except:
pass
if len(file_properties):
file_properties = file_properties[0]
try:
guid = file_properties['guid']
except:
pass
try:
site = file_properties['destinationse']
except:
pass
try:
lfn = file_properties['lfn']
except:
pass
try:
scope = file_properties['scope']
except:
pass
try:
status = file_properties['status']
except:
pass
if 'missingparameter' not in errors.keys():
pfns, errtxt = get_rucio_pfns_from_guids(guids=[guid], site=[site], \
lfns=[lfn], scopes=[scope])
if len(errtxt):
if 'lookup' not in errors:
errors['lookup'] = ''
errors['lookup'] += errtxt
### download the file
files = []
dirprefix = ''
tardir = ''
if len(pfns):
pfn = pfns[0]
files, errtxt, dirprefix, tardir = fetch_file(pfn, guid, unpack=False, listfiles=False)
if not len(pfns):
msg = 'File download failed. [pfn=%s guid=%s, site=%s, scope=%s, lfn=%s]' % \
(pfn, guid, site, scope, lfn)
_logger.warning(msg)
errors['download'] = msg
if len(errtxt):
if 'download' in errors:
errors['download'] += errtxt
else: # file not found in DB
if 'lookup' not in errors:
errors['lookup'] = ''
errors['lookup'] += 'Log file for this job has not been found. '
### return the file page
url = None
data = { \
'pandaid': pandaid, \
'url': url, \
'errors': errors, \
'pfns': pfns, \
'scope': scope, \
'lfn': lfn, \
'site': site, \
'guid': guid, \
'status': status, \
'timestamp': datetime.utcnow().isoformat() \
}
if not len(errors):
url = 'http://' + hostname + \
settings.MEDIA_URL + dirprefix + '/' + lfn
data['url'] = url
### set request response data
return render_to_response('filebrowser/filebrowser_api_single_pandaid.html', {'data': data}, RequestContext(request))
elif 'pandaid' not in request.GET.keys() or pandaid == None:
t = get_template('filebrowser/filebrowser_api_single_pandaid.html')
context = RequestContext(request, {'data':data})
return HttpResponse(t.render(context), status=400)
elif not len(file_properties):
t = get_template('filebrowser/filebrowser_api_single_pandaid.html')
context = RequestContext(request, {'data':data})
return HttpResponse(t.render(context), status=404)
else:
t = get_template('filebrowser/filebrowser_api_single_pandaid.html')
context = RequestContext(request, {'data':data})
return HttpResponse(t.render(context), status=400)
def get_job_log_file_path(pandaid, filename=''):
"""
Download log tarball of a job and return path to a local copy of memory_monitor_output.txt file
:param pandaid:
:param filename: str, if empty the function returm path to tarball folder
:return: file_path: str
"""
file_path = None
files = []
scope = ''
lfn = ''
guid = ''
dirprefix = ''
tardir = ''
query = {}
query['type'] = 'log'
query['pandaid'] = int(pandaid)
values = ['pandaid', 'guid', 'scope', 'lfn']
file_properties = []
file_properties.extend(Filestable4.objects.filter(**query).values(*values))
if len(file_properties) == 0:
file_properties.extend(FilestableArch.objects.filter(**query).values(*values))
if len(file_properties):
file_properties = file_properties[0]
try:
guid = file_properties['guid']
except:
pass
try:
lfn = file_properties['lfn']
except:
pass
try:
scope = file_properties['scope']
except:
pass
if guid and lfn and scope:
# check if files are already available in common CEPH storage
tarball_path = get_fullpath_filebrowser_directory() + '/' + guid.lower() + '/' + scope + '/'
files, err, tardir = list_file_directory(tarball_path, 100)
_logger.debug('tarball path is {} \nError message is {} \nGot tardir: {}'.format(tarball_path, err, tardir))
if len(files) == 0 and len(err) > 0:
# download tarball
_logger.debug('log tarball has not been downloaded, so downloading it now')
files, errtxt, dirprefix, tardir = get_rucio_file(scope, lfn, guid)
_logger.debug('Got files for dir: {} and tardir: {}. Error message: {}'.format(dirprefix, tardir, errtxt))
if type(files) is list and len(files) > 0 and len(filename) > 0:
for f in files:
if f['name'] == filename:
file_path = tarball_path + '/' + tardir + '/' + filename
_logger.debug('Final path of {} file: {}'.format(filename, file_path))
return file_path
def delete_files(request):
"""
Clear subfolder containing log files
:param request:
:return:
"""
### check that path to logs is provided
guid = None
try:
guid = request.GET['guid']
except:
msg = 'Missing guid GET parameter'
_logger.error(msg)
### clean folder if guid provided
if guid is not None:
logdir = remove_folder(guid)
data = {'message':'The folder was cleaned ' + logdir}
return HttpResponse(json.dumps(data), content_type='application/json')
else:
return HttpResponse(status=404)
```
#### File: core/iDDS/rawsqlquery.py
```python
from django.db import connection
from core.libs.exlib import dictfetchall
from core.iDDS.useconstants import SubstitleValue
subtitleValue = SubstitleValue()
def getTransforms(requestid):
sqlpar = {"requestid": requestid}
sql = """
select r.request_id, wt.transform_id
from atlas_idds.requests r
full outer join (
select request_id, workprogress_id from atlas_idds.workprogresses
) wp on (r.request_id=wp.request_id)
full outer join atlas_idds.wp2transforms wt on (wp.workprogress_id=wt.workprogress_id)
where r.request_id=:requestid
"""
cur = connection.cursor()
cur.execute(sql, sqlpar)
rows = dictfetchall(cur)
cur.close()
return rows
def getRequests(query_params):
condition = '(1=1)'
sqlpar = {}
if query_params and len(query_params) > 0:
query_params = subtitleValue.replaceInverseKeys('requests', query_params)
if 'reqstatus' in query_params:
sqlpar['rstatus'] = query_params['reqstatus']
condition = 'r.status = :rstatus'
sql = """
select r.request_id, r.scope, r.name, r.status, tr.transform_id, tr.transform_status, tr.in_status, tr.in_total_files, tr.in_processed_files, tr.out_status, tr.out_total_files, tr.out_processed_files
from ATLAS_IDDS.requests r
full outer join (
select t.request_id, t.transform_id, t.status transform_status, in_coll.status in_status, in_coll.total_files in_total_files,
in_coll.processed_files in_processed_files, out_coll.status out_status, out_coll.total_files out_total_files,
out_coll.processed_files out_processed_files
from ATLAS_IDDS.transforms t
full outer join (select coll_id , transform_id, status, total_files, processed_files from ATLAS_IDDS.collections where relation_type = 0) in_coll on (t.transform_id = in_coll.transform_id)
full outer join (select coll_id , transform_id, status, total_files, processed_files from ATLAS_IDDS.collections where relation_type = 1) out_coll on (t.transform_id = out_coll.transform_id)
) tr on (r.request_id=tr.request_id)
where %s
""" % condition
cur = connection.cursor()
cur.execute(sql, sqlpar)
rows = dictfetchall(cur)
cur.close()
return rows
```
#### File: core/libs/dropalgorithm.py
```python
import random
import copy
import logging
import time
from django.db import connection
from django.utils import timezone
from core.libs.job import is_event_service
from core.common.models import JediJobRetryHistory
from core.settings.local import dbaccess
_logger = logging.getLogger('bigpandamon')
def drop_job_retries(jobs, jeditaskid, **kwards):
"""
Dropping algorithm for jobs belong to a single task
Mandatory job's attributes:
PANDAID
JOBSTATUS
PROCESSINGTYPE
JOBSETID
SPECIALHANDLING
:param jobs: list
:param jeditaskid: int
:return:
"""
start_time = time.time()
is_return_dropped_jobs = False
if 'is_return_dropped_jobs' in kwards:
is_return_dropped_jobs = True
drop_list = []
droppedIDs = set()
drop_merge_list = set()
# get job retry history for a task
retryquery = {
'jeditaskid': jeditaskid
}
extra = """
OLDPANDAID != NEWPANDAID
AND RELATIONTYPE IN ('', 'retry', 'pmerge', 'merge', 'jobset_retry', 'es_merge', 'originpandaid')
"""
retries = JediJobRetryHistory.objects.filter(**retryquery).extra(where=[extra]).order_by('newpandaid').values()
_logger.info('Got {} retries whereas total number of jobs is {}: {} sec'.format(len(retries), len(jobs),
(time.time() - start_time)))
hashRetries = {}
for retry in retries:
hashRetries[retry['oldpandaid']] = retry
newjobs = []
for job in jobs:
dropJob = 0
pandaid = job['pandaid']
if not is_event_service(job):
if pandaid in hashRetries:
retry = hashRetries[pandaid]
if retry['relationtype'] in ('', 'retry') or (job['processingtype'] == 'pmerge' and job['jobstatus'] in ('failed', 'cancelled') and retry['relationtype'] == 'merge'):
dropJob = retry['newpandaid']
else:
if job['jobsetid'] in hashRetries and hashRetries[job['jobsetid']]['relationtype'] == 'jobset_retry':
dropJob = 1
else:
if job['pandaid'] in hashRetries and job['jobstatus'] not in ('finished', 'merging'):
if hashRetries[job['pandaid']]['relationtype'] == 'retry':
dropJob = 1
# if hashRetries[job['pandaid']]['relationtype'] == 'es_merge' and job['jobsubstatus'] == 'es_merge':
# dropJob = 1
if dropJob == 0:
if job['jobsetid'] in hashRetries and hashRetries[job['jobsetid']]['relationtype'] == 'jobset_retry':
dropJob = 1
if job['jobstatus'] == 'closed' and job['jobsubstatus'] in ('es_unused', 'es_inaction',):
dropJob = 1
if dropJob == 0 and not is_return_dropped_jobs:
# and not (
# 'processingtype' in request.session['requestParams'] and request.session['requestParams'][
# 'processingtype'] == 'pmerge')
if job['processingtype'] != 'pmerge':
newjobs.append(job)
else:
drop_merge_list.add(pandaid)
elif dropJob == 0:
newjobs.append(job)
else:
if pandaid not in droppedIDs:
droppedIDs.add(pandaid)
drop_list.append({'pandaid': pandaid, 'newpandaid': dropJob})
_logger.info('{} jobs dropped: {} sec'.format(len(jobs) - len(newjobs), time.time() - start_time))
drop_list = sorted(drop_list, key=lambda x: -x['pandaid'])
jobs = newjobs
return jobs, drop_list, drop_merge_list
def insert_dropped_jobs_to_tmp_table(query, extra):
"""
:return: extra sql query
"""
newquery = copy.deepcopy(query)
# insert retried pandaids to tmp table
if dbaccess['default']['ENGINE'].find('oracle') >= 0:
tmpTableName = "ATLAS_PANDABIGMON.TMP_IDS1DEBUG"
else:
tmpTableName = "TMP_IDS1DEBUG"
transactionKey = random.randrange(1000000)
new_cur = connection.cursor()
jeditaskid = newquery['jeditaskid']
ins_query = """
INSERT INTO {0}
(ID,TRANSACTIONKEY,INS_TIME)
select pandaid, {1}, TO_DATE('{2}', 'YYYY-MM-DD') from (
select unique pandaid from (
select j.pandaid, j.jeditaskid, j.eventservice, j.specialhandling, j.jobstatus, j.jobsetid, j.jobsubstatus, j.processingtype,
h.oldpandaid, h.relationtype, h.newpandaid
from (
select ja4.pandaid, ja4.jeditaskid, ja4.eventservice, ja4.specialhandling, ja4.jobstatus, ja4.jobsetid, ja4.jobsubstatus, ja4.processingtype
from ATLAS_PANDA.JOBSARCHIVED4 ja4 where ja4.jeditaskid = {3}
union
select ja.pandaid, ja.jeditaskid, ja.eventservice, ja.specialhandling, ja.jobstatus, ja.jobsetid, ja.jobsubstatus, ja.processingtype
from ATLAS_PANDAARCH.JOBSARCHIVED ja where ja.jeditaskid = {4}
) j
LEFT JOIN
ATLAS_PANDA.jedi_job_retry_history h
ON (h.jeditaskid = j.jeditaskid AND h.oldpandaid = j.pandaid)
OR (h.oldpandaid=j.jobsetid and h.jeditaskid = j.jeditaskid)
)
where
(oldpandaid is not null
AND oldpandaid != newpandaid
AND relationtype in ('', 'retry', 'pmerge', 'merge', 'jobset_retry', 'es_merge', 'originpandaid')
AND
((
(oldpandaid = pandaid and NOT (eventservice is not NULL and not specialhandling like '%sc:%')
AND (relationtype='' OR relationtype='retry'
or (processingtype='pmerge'
and jobstatus in ('failed','cancelled')
and relationtype='merge')
)
)
OR
(
(oldpandaid = pandaid and eventservice in (1,2,4,5) and specialhandling not like '%sc:%')
AND
(
(jobstatus not IN ('finished', 'merging') AND relationtype='retry')
OR
(jobstatus='closed' and (jobsubstatus in ('es_unused', 'es_inaction')))
)
)
)
OR (oldpandaid=jobsetid and relationtype = 'jobset_retry')
)
)
OR (jobstatus='closed' and (jobsubstatus in ('es_unused', 'es_inaction')))
)
""".format(tmpTableName, transactionKey, timezone.now().strftime("%Y-%m-%d"), jeditaskid, jeditaskid)
new_cur.execute(ins_query)
# form an extra query condition to exclude retried pandaids from selection
extra += " AND pandaid not in ( select id from {0} where TRANSACTIONKEY = {1})".format(tmpTableName, transactionKey)
return extra, transactionKey
```
#### File: core/libs/elasticsearch.py
```python
from core.settings.local import ES
from elasticsearch import Elasticsearch
def create_esatlas_connection(verify_certs=True, timeout=2000, max_retries=10,
retry_on_timeout=True):
"""
Create a connection to ElasticSearch cluster
"""
esHost = None
esUser = None
esPassword = None
if 'esHost' in ES:
#esHost = ES['esHost']
esHost = ES['esHost'][0:8] + '1' + ES['esHost'][8:]
if 'esUser' in ES:
esUser = ES['esUser']
if 'esPassword' in ES:
esPassword = ES['esPassword']
try:
connection = Elasticsearch(
['https://{0}/es'.format(esHost)],
http_auth=(esUser, esPassword),
verify_certs=verify_certs,
timeout=timeout,
max_retries=max_retries,
retry_on_timeout=retry_on_timeout,
ca_certs='/etc/pki/tls/certs/ca-bundle.trust.crt'
)
return connection
except Exception as ex:
print(ex)
return None
```
#### File: core/libs/tasksPlots.py
```python
import time
from datetime import timedelta, datetime
import json
import numpy as np
from django.http import HttpResponse
from core.libs.CustomJSONSerializer import NpEncoder
from core.libs.cache import getCacheEntry, setCacheEntry
from core.libs.exlib import insert_to_temp_table, get_tmp_table_name
from core.libs.task import drop_duplicates, add_job_category, job_consumption_plots
from core.pandajob.models import Jobsdefined4, Jobsarchived, Jobswaiting4, Jobsactive4, Jobsarchived4
pandaSites = {}
def getJobsData(request):
data = {
'error': '',
'data': [],
}
idList = request.GET.get('idtasks', '')
tasksList = getCacheEntry(request, idList, isData=True)
if len(tasksList) == 0:
return HttpResponse(data, status=500, content_type='application/json')
else:
results = get_jobs_plot_data(tasksList)
if len(results['error']) > 0:
data['error'] = results['error']
else:
data['data'] = results['plot_data']
return HttpResponse(json.dumps(data, cls=NpEncoder), content_type='application/json')
def get_jobs_plot_data(taskid_list):
error = ''
plots_list = []
MAX_JOBS = 1000000
MAX_ENTRIES__IN = 100
extra_str = "(1=1)"
query = {}
if len(taskid_list) < MAX_ENTRIES__IN:
query["jeditaskid__in"] = taskid_list
query["jobstatus__in"] = ['finished', 'failed']
else:
# insert taskids to temp DB table
tmp_table_name = get_tmp_table_name()
tk_taskids = insert_to_temp_table(taskid_list)
extra_str += " AND jeditaskid in (select id from {} where TRANSACTIONKEY={} ) ".format(tmp_table_name, tk_taskids)
values = 'actualcorecount', 'eventservice', 'specialhandling', 'modificationtime', 'jobsubstatus', 'pandaid', \
'jobstatus', 'jeditaskid', 'processingtype', 'maxpss', 'starttime', 'endtime', 'computingsite', \
'jobsetid', 'jobmetrics', 'nevents', 'hs06', 'hs06sec', 'cpuconsumptiontime', 'parentid', 'attemptnr', \
'processingtype', 'transformation', 'creationtime'
jobs = []
jobs.extend(Jobsdefined4.objects.filter(**query).extra(where=[extra_str]).values(*values))
jobs.extend(Jobswaiting4.objects.filter(**query).extra(where=[extra_str]).values(*values))
jobs.extend(Jobsactive4.objects.filter(**query).extra(where=[extra_str]).values(*values))
jobs.extend(Jobsarchived4.objects.filter(**query).extra(where=[extra_str]).values(*values))
jobs.extend(Jobsarchived.objects.filter(**query).extra(where=[extra_str]).values(*values))
print("Number of found jobs: {}".format(len(jobs)))
print("Number of sites: {}".format(len(set([j['computingsite'] for j in jobs]))))
if len(jobs) > MAX_JOBS:
error = 'Too many jobs to prepare plots. Please decrease the selection of tasks and try again.'
else:
# drop duplicate jobs
jobs = drop_duplicates(jobs, id='pandaid')
# determine jobs category (build, run or merge)
jobs = add_job_category(jobs)
# prepare data for job consumption plots
plots_list = job_consumption_plots(jobs)
return {'plot_data': plots_list, 'error': error}
```
#### File: core/reports/views.py
```python
import json
from django.views.decorators.cache import never_cache
from django.http import HttpResponse
from django.shortcuts import render_to_response
from core.views import initRequest, DateEncoder
from core.reports import MC16aCPReport, ObsoletedTasksReport, TitanProgressReport
@never_cache
def report(request):
initRequest(request)
step = 0
response = None
if 'requestParams' in request.session and 'campaign' in request.session['requestParams'] and request.session['requestParams']['campaign'].upper() == 'MC16':
reportGen = MC16aCPReport.MC16aCPReport()
response = reportGen.prepareReportJEDI(request)
return response
if 'requestParams' in request.session and 'campaign' in request.session['requestParams'] and request.session['requestParams']['campaign'].upper() == 'MC16C':
reportGen = MC16aCPReport.MC16aCPReport()
response = reportGen.prepareReportJEDIMC16c(request)
return response
if 'requestParams' in request.session and 'campaign' in request.session['requestParams'] and request.session['requestParams']['campaign'].upper() == 'MC16A' and 'type' in request.session['requestParams'] and request.session['requestParams']['type'].upper() == 'DCC':
reportGen = MC16aCPReport.MC16aCPReport()
resp = reportGen.getDKBEventsSummaryRequestedBreakDownHashTag(request)
dump = json.dumps(resp, cls=DateEncoder)
return HttpResponse(dump, content_type='application/json')
if 'requestParams' in request.session and 'obstasks' in request.session['requestParams']:
reportGen = ObsoletedTasksReport.ObsoletedTasksReport()
response = reportGen.prepareReport(request)
return response
if 'requestParams' in request.session and 'titanreport' in request.session['requestParams']:
reportGen = TitanProgressReport.TitanProgressReport()
response = reportGen.prepareReport(request)
return response
if 'requestParams' in request.session and 'step' in request.session['requestParams']:
step = int(request.session['requestParams']['step'])
if step == 0:
response = render_to_response('reportWizard.html', {'nevents': 0}, content_type='text/html')
else:
if 'reporttype' in request.session['requestParams'] and request.session['requestParams']['reporttype'] == 'rep0':
reportGen = MC16aCPReport.MC16aCPReport()
response = reportGen.prepareReport()
return response
``` |
{
"source": "jilccallforcode/callforcode2021",
"score": 4
} |
#### File: callforcode2021/pythonscripts/citiforcasts.py
```python
import pandas as pd
import requests
import json
#df_counties = pd.read_csv('Topcities.csv', encoding="utf-8")
df_counties = pd.read_csv('200nccities.csv', encoding="utf-8")
print(df_counties.head(2))
def getTemp(names, latitudes, longitudes, radius=2000):
dfv= pd.DataFrame(columns = ['city','lat', 'lng','apiurl'])
for name, lat, lng in zip(names, latitudes, longitudes):
url='https://api.weather.gov/points/{},{}'.format(lat, lng )
print(url)
results = requests.get(url).json()['properties']
try:
apiurl=results['forecastHourly']
print(apiurl)
df2 = {'city':name,'lat':lat, 'lng':lng,'apiurl':apiurl}
dfv = dfv.append(df2, ignore_index=True)
print(df2)
except:
print("oops")
return(dfv)
nctemps = getTemp(names=df_counties['city'],
latitudes=df_counties['lat'],
longitudes=df_counties['lng']
)
nctemps.head()
nctemps.to_csv('200nccities_tempapi.csv')
``` |
{
"source": "jileidan/coretabs_django1",
"score": 2
} |
#### File: jileidan/coretabs_django1/views.py
```python
from django.http import HttpResponse
def index(request):
return HttpResponse("Hi there, this is our new home page.")
def hello(request, name):
return HttpResponse("Hello {}".format(name))
``` |
{
"source": "jileiwang/CJ-Glo",
"score": 3
} |
#### File: CJ-Glo/tools/character_count.py
```python
from collections import defaultdict
import codecs
def count(corpus, output_file):
debug = False
dic = defaultdict(int)
other = set()
fout = codecs.open(output_file, 'w', 'utf8')
for line in open(corpus, 'r'):
words = line.split()
for word in words:
if len(word) % 3 == 0:
for i in xrange(len(word) / 3):
dic[word[i:i+3]] += 1
else:
other.add(word)
fout.write('%i %i\n' % (len(dic), len(other)))
record_list = [(y, x) for x, y in dic.items()]
record_list.sort()
record_list.reverse()
i = 0
for x, y in record_list:
#print y.decode('utf8'), x
try:
yy = y.decode('GBK')
except:
print y
yy = 'N/A'
fout.write('%s %i\n' % (yy, x))
i += 1
if i > 10 and debug:
break
other_list = list(other)
other_list.sort()
for item in other_list:
#print item.decode('utf8')
item2 = item.decode('utf8')
fout.write(item2)
fout.write('\n')
i += 1
if i > 20 and debug:
break
fout.close()
if __name__ =='__main__':
count('data/train.zh_parsed', 'output/count.zh')
count('data/train.ja_parsed', 'output/count.ja')
```
#### File: CJ-Glo/tools/count_sentence_length.py
```python
vocab_file = "output/vocab.cj_001"
def getVocab():
vocab = set()
for line in open(vocab_file):
if len(line) >= 3:
vocab.add(line.split()[0])
return vocab
def count(vocab, filename, lang):
dic = {}
for line in open(filename):
cnt = 0
for w in line.split():
v = lang + w
if v in vocab:
cnt += 1
dic[cnt] = dic.get(cnt, 0) + 1
return dic
if __name__ == "__main__":
vocab = getVocab()
zh = count(vocab, 'data/train.zh_parsed', '1')
ja = count(vocab, 'data/train.ja_parsed', '2')
zhLine = ""
jaLine = ""
for i in xrange(331):
zhLine += "%i," % zh.get(i, 0)
jaLine += "%i," % ja.get(i, 0)
print zhLine[:-1]
print jaLine[:-1]
```
#### File: CJ-Glo/tools/generate_mapping_table.py
```python
from collections import defaultdict
"""
The input file is download from:
http://lotus.kuee.kyoto-u.ac.jp/~chu/pubdb/LREC2012/kanji_mapping_table.txt
Removed the introduction text in the head, only character triples remained
in input file.
"""
def check_kanji_length():
"""
Checked by this function, the first item of each triple is excatly one
Kanji charachter.
"""
fin = open('data/kanji_mapping_table_clean.txt', 'r')
for line in fin:
triple = line.split()
kanji = triple[0]
if len(kanji) != 3:
print kanji, len(kanji)
break
def check_duplicated_kanji():
"""
Checked by this function, No duplicated Kanjis in this mapping table's
first column
"""
fin = open('data/kanji_mapping_table_clean.txt', 'r')
kanji_set = set()
for line in fin:
triple = line.split()
kanji = triple[0]
if kanji in kanji_set:
print kanji
break
kanji_set.add(kanji)
print "There are", len(kanji_set), "Kanjis in the mapping table."
def write_tuple_to_file(tuple, filename):
fout = open(filename, 'w')
for kanji, simple_chinese in tuple:
fout.write("%s %s\n" % (kanji, simple_chinese))
fout.close()
def kanji_2_simple_chinese():
"""
Result:
There are 569 N/A, 5205 single corresponding Simple Chinese
charachters, 582 mutiple corresponding Simple Chinese charachters,
in total 6356 lines.
A Kanji Character is mapping to X Simple Chinese Character:
{0: 569, 1: 5205, 2: 538, 3: 41, 4: 1, 6: 2}
"""
fin = open('data/kanji_mapping_table_clean.txt', 'r')
single_result = []
mutiple_result = []
single = 0
mutiple = 0
na = 0
dic = {}
for line in fin:
triple = line.split()
kanji = triple[0]
simple_chinese = triple[2]
if simple_chinese == 'N/A':
na += 1
elif len(simple_chinese) <= 3:
single += 1
single_result.append((kanji, simple_chinese))
else:
mutiple += 1
simple_chinese = ''.join(simple_chinese.split(','))
mutiple_result.append((kanji, simple_chinese))
key = len(simple_chinese) / 3
dic[key] = dic.get(key, 0) + 1
# write_tuple_to_file(single_result, "output/mapping_table/single_01.txt")
# write_tuple_to_file(mutiple_result, "output/mapping_table/mutiple_01.txt")
result = single_result + mutiple_result
result.sort()
write_tuple_to_file(result, "output/mapping_table/kanji2simplec.txt")
print "There are", na, "N/A,",
print single, "single corresponding Simple Chinese charachters,",
print mutiple, "mutiple corresponding Simple Chinese charachters,",
print "in total", (na + single + mutiple), "lines."
dic[0] = na
dic[1] = single
print "A Kanji Character is mapping to X Simple Chinese Character:"
print dic
def simple_chinese_2_kanji():
"""
A Simple Chinese Character is mapping to X Kanji Character:
{1: 3873, 2: 903, 3: 189, 4: 34, 5: 5, 6: 1, 7: 1}
"""
fin = open('data/kanji_mapping_table_clean.txt', 'r')
dic = defaultdict(list)
for line in fin:
triple = line.split()
kanji = triple[0]
simple_chinese = triple[2]
if simple_chinese == 'N/A':
continue
elif len(simple_chinese) <= 3:
dic[simple_chinese].append(kanji)
else:
for character in simple_chinese.split(','):
dic[character].append(kanji)
result = [(x, ''.join(y)) for x, y in dic.items()]
result.sort()
write_tuple_to_file(result, "output/mapping_table/simplec2kanji.txt")
dic2 = defaultdict(int)
for key in dic:
dic2[len(dic[key])] += 1
print "A Simple Chinese Character is mapping to X Kanji Character:"
print dic2
def find_same_shape_character():
"""
The shape of a common character in 3 categories are sometimes the same, sometimes not.
Only consider lines without "N/A", there are 5 patterns:
Type Kanji Tranditional Simple
0 A A A
1 A A B
2 A B A
3 A B B
4 A B C
This function counts these types, and save to corresponding files.
"""
fin = open('data/kanji_mapping_table_clean.txt', 'r')
dic = defaultdict(list)
type_set = [[] for _ in xrange(6)] # Type 5 is 'N/A'
for line in fin:
triple = line.split()
if 'N/A' in triple:
type_set[5].append(line)
continue
kanji = triple[0]
tranditional = triple[1]
simple = triple[2]
for c_kanji in kanji.split(','):
for c_tranditional in tranditional.split(','):
for c_simple in simple.split(','):
if c_kanji == c_tranditional and c_kanji == c_simple:
type_set[0].append(line)
elif c_kanji == c_tranditional:
type_set[1].append(line)
elif c_kanji == c_simple:
type_set[2].append(line)
elif c_tranditional == c_simple:
type_set[3].append(line)
else:
type_set[4].append(line)
for i in xrange(6):
fout = open("output/mapping_table/character_shape_type_%i.txt" % i, "w")
for line in type_set[i]:
fout.write(line)
fout.close()
def find_same_shape_character_single():
"""
Same with find_same_shape_character(), but only deal with line that in each type
there is only 1 character.
"""
fin = open('data/kanji_mapping_table_clean.txt', 'r')
dic = defaultdict(list)
type_set = [[] for _ in xrange(6)] # Type 5 is line with ','
for line in fin:
if ',' in line:
type_set[5].append(line)
continue
triple = line.split()
if 'N/A' in triple:
continue
c_kanji = triple[0]
c_tranditional = triple[1]
c_simple = triple[2]
if c_kanji == c_tranditional and c_kanji == c_simple:
type_set[0].append(line)
elif c_kanji == c_tranditional:
type_set[1].append(line)
elif c_kanji == c_simple:
type_set[2].append(line)
elif c_tranditional == c_simple:
type_set[3].append(line)
else:
type_set[4].append(line)
for i in xrange(6):
fout = open("output/mapping_table/character_shape_type_single_%i.txt" % i, "w")
for line in type_set[i]:
fout.write(line)
fout.close()
if __name__ == '__main__':
# check_kanji_length()
# check_duplicated_kanji()
# kanji_2_simple_chinese()
# simple_chinese_2_kanji()
# find_same_shape_character()
find_same_shape_character_single()
```
#### File: CJ-Glo/tools/WordVecDict.py
```python
import argparse
import numpy as np
import sys
class WordVecDict(object):
def __init__(self, vocab_file, vectors_file):
self.vocab_file = vocab_file
self.vectors_file = vectors_file
self.generate()
def generate(self):
with open(self.vocab_file, 'r') as f:
words = [x.rstrip().split(' ')[0] for x in f.readlines()]
with open(self.vectors_file, 'r') as f:
vectors = {}
for line in f:
vals = line.rstrip().split(' ')
vectors[vals[0]] = [float(x) for x in vals[1:]]
vocab_size = len(words)
vocab = {w: idx for idx, w in enumerate(words)}
ivocab = {idx: w for idx, w in enumerate(words)}
vector_dim = len(vectors[ivocab[0]])
W = np.zeros((vocab_size, vector_dim))
for word, v in vectors.items():
if word == '<unk>' or word == '2</s>':
continue
W[vocab[word], :] = v
# normalize each word vector to unit variance
W_norm = np.zeros(W.shape)
d = (np.sum(W ** 2, 1) ** (0.5))
W_norm = (W.T / d).T
self.W = W_norm
self.vocab = vocab
self.ivocab = ivocab
def distance(self, word1, word2, verbose = False):
if not word1 in self.vocab:
print('Word: %s Out of dictionary!\n' % word1)
return
if not word2 in self.vocab:
print('Word: %s Out of dictionary!\n' % word2)
return
vec_result = np.copy(self.W[self.vocab[word1], :])
vec_norm = np.zeros(vec_result.shape)
d = (np.sum(vec_result ** 2,) ** (0.5))
vec_norm = (vec_result.T / d).T
dist = np.dot(self.W, vec_norm.T)
#for term in input_term.split(' '):
index = self.vocab[word1]
dist[index] = -np.Inf
a = np.argsort(-dist)#[:N]
idx = 0
for x in a:
# in same language
if self.ivocab[x][0] == word2[0]:
idx += 1
if verbose:
print idx, self.ivocab[x], dist[x]
if self.ivocab[x] == word2:
return idx, dist[x]
print("Error: %s -> %s %f" % (word1, word2))
return -1, -1
def analogy(self, words, verbose = False):
"""
words = <w0, w1, w2, w3>
Find distance of (w1 - w0 + w2) and w3
"""
v0 = np.copy(self.W[self.vocab[words[0]], :])
v1 = np.copy(self.W[self.vocab[words[1]], :])
v2 = np.copy(self.W[self.vocab[words[2]], :])
v = np.add(v1, v2)
vec_result = np.subtract(v, v0)
vec_norm = np.zeros(vec_result.shape)
d = (np.sum(vec_result ** 2,) ** (0.5))
vec_norm = (vec_result.T / d).T
dist = np.dot(self.W, vec_norm.T)
#for term in input_term.split(' '):
for i in [0, 1, 2]:
index = self.vocab[words[i]]
dist[index] = -np.Inf
a = np.argsort(-dist)#[:N]
idx = 0
for x in a:
# in same language
if self.ivocab[x][0] == words[3][0]:
idx += 1
if verbose:
print idx, self.ivocab[x], dist[x]
if self.ivocab[x] == words[3]:
return idx, dist[x]
print("Error")
return -1, -1
def sentenceVec(self, sentence):
"""
input sentence = <w0, w1, ...>
calculate the average data of corresponding <v0, v1, v2>
"""
sen = []
for w in sentence:
if w in self.vocab:
sen.append(w)
if len(sen) == 0:
return None
tmp = self.W[0, :]
vec = np.zeros(tmp.shape)
for w in sen:
v = np.copy(self.W[self.vocab[w], :])
vec = np.add(vec, v)
vec = vec / (len(sen) * 1.0)
return vec
def readWordPairs():
path = "experiment/experiment2_data.txt"
fin = open(path, 'r')
wordPairList = []
for line in fin.readlines():
if len(line) > 2:
words = line.split()
w1 = "2" + words[0].strip()
w2 = "1" + words[1].strip()
wordPairList.append((w1, w2))
return wordPairList
if __name__ == "__main__":
print "reading word embeddings"
wordVecDict = WordVecDict("output/vocab.cj_001", "output/batch_02/ctrl_s_0.03.vec.txt")
print "reading word pairs"
wordPairList = readWordPairs()
print "calculating distances"
ja_wc = 22748
zh_wc = 25219
result = []
# ja, zh word pair
ja_rate = 0
zh_rate = 0
cnt = 0
for word1, word2 in wordPairList:
cnt += 1
idx1, dist1 = wordVecDict.distance(word1, word2)
ja_rate += 1 - idx1 * 1.0 / ja_wc
idx2, dist2 = wordVecDict.distance(word2, word1)
zh_rate += 1 - idx2 * 1.0 / zh_wc
item = (word1, word2, idx1, dist1, idx2, dist2)
result.append(item)
print word1, word2, idx1, dist1, idx2, dist2
ja_rate /= cnt
zh_rate /= cnt
print "Total", cnt, "pairs, ja_rate", ja_rate, "zh_rate", zh_rate
``` |
{
"source": "j-i-l/EndemicPy",
"score": 3
} |
#### File: endemic/nw_spread/Spreading.py
```python
__author__ = '<NAME>'
import random
import pickle
from collections import defaultdict
from numpy import vectorize, array, where, absolute, zeros, count_nonzero
from numpy import random as nrand
from numpy import copy as n_copy
from numpy.ma import divide
from numpy import append as n_append
from numpy import sum as n_sum
from numpy import max as n_max
from Queue import Empty, PriorityQueue
from copy import copy, deepcopy
from collections import Counter
from RateDistribution import Distro
def _pick_rand_el(a_list):
"""
Returns and removes a random element from a list.
"""
rand_index = nrand.randint(0, len(a_list))
# swap element with last one (to use pop on it)
a_list[rand_index], a_list[-1] = a_list[-1], a_list[rand_index]
# remove and return the element
return a_list.pop()
def _get_rand_el(a_list):
"""
Returns random element from a list without removing it.
"""
return a_list[nrand.randint(0, len(a_list))]
# to do: define attribute self._seed,
# then nrand = np.random.RandomState(self._seed) so to produce reproducible
# output
class Scenario():
def __init__(self, contact_structure, pathogen, treatment=None, **params):
"""
Arguments:
- contact_structure: An object of the contact_structure class
(one of its subclasses), holding an ensemble of Host class
objects.
The spreading process will take place on this object.
- pathogen: An object of the pathogen class, holding an
ensemble of Strain class objects.
- treatment: An object of the treatment class, holding an
ensemble of drug class objects.
- **params: Additional parameters for the simulation:
- t_max: gives the maximal time after which the simulation
stops. Defaults=1000
- t_burn: time window for the exclusive spread of the wild type.
- dt: the time step at which the current status should be recorded.
- default_susceptibility: Value [0,1] to be used for any missing
susceptibility.
Default=1.
- single_transmission: Boolean (default: False) indicating whether
on contact between a carrying and a susceptible individual
several transmission events should be possible or not.
Note: Traditional SIS, SIR or similar processes are not
restrained to a single transmission per contact.
TODO: Is this even present?
- ignore_dyn_nodes_in_log: If True, nodes that expand their
lifespan are not set to -2 in self.log but keep their state or
they may undergo future state changes (e.g. recovery).
This option only has an effect if the contact_structure is a
Temporal graph with specified node lifespans (arguments
nodes_start and nodes_end of the TemporalGraph instance)
"""
self.contact_structure = contact_structure
self.pathogen = pathogen
# Function does most of the init part. If called, the scenario is set
# back to the beginning and a new simulation can be run.
self.set()
# init of optional and internal arguments
self._default_susceptibility = 1 # by default hosts are susceptible
# by default drugs do not increase mutation/selection rates
self._default_drug_selection_factor = 1
# should a contact lead to multiple transmission events:
self.single_transmission = params.get('single_transmission', False)
# set the default value for the time step used to regularly test if a
# quasi steady state is reached
self._dt = params.get(
'dt', # issue: not sure yet about the default value
5 / float(
min(self.pathogen.trans_rates)
) if float(
min(self.pathogen.trans_rates)
) != 0. else 5 / float(
min(self.pathogen.rec_rates)
)
)
# set arguments that can be provided but don't need to
for opt_arg in [
'default_susceptibility', 'default_drug_selection_factor'
]:
if opt_arg in params:
setattr(self, '_' + opt_arg, params.pop(opt_arg))
print '<{0:s}> was specified as an argument.'.format(opt_arg)
# specify number of decimals to round the log times
self._log_time_rounding = params.get('log_time_rounding', 2)
self._ignore_dyn_nodes_in_log = params.get(
'ignore_dyn_nodes_in_log', False
)
self._time_rounding = params.get('time_rounding', 4)
self._resolve_hots_pathogen_relations()
# by default do not consider selection
self.skip_selection = True
# run through all the pathogens provided
for strain_id in xrange(self.pathogen.n):
# get the rates at which this strain 'mutates' to another strain
a_selection = self.pathogen.select_rates[strain_id]
# if this strain changes with a certain rate into another strain
if any([rate != 0 for rate in a_selection]):
# we need to consider selection in the simulation
self.skip_selection = False
self.treatment = treatment
# if the treatment argument is provided
if treatment is None:
# we can ignore treatment (can always be changed later)
self.skip_treatment = True
else:
# we need to take into account treatment
self.skip_treatment = False
self._resolve_treatment_pathogen_relations()
# this will be a list of booleans (index: strain_id, value:
# treating yes/no)
self.treating = []
self.selecting = []
self._inf_file_o = None
# This will be the stream to an output file to write incremental steps
# into.
def _set_seed(self, seed=None):
"""
Create a new seed, set the seed and return it.
Parameters:
-----------
:param use_seed: If provided this seed will be used to set the RNG
use_seed can be a string in which case it will be treated as the
path to a pickle file containing the seed.
:return: a numpy random seed
"""
# # save seed
# with open('test_seed.p', 'wb') as f:
# pickle.dump(nrand.get_state(), f, protocol=2)
# load seed
if isinstance(seed, str):
with open(seed, 'rb') as f:
nrand.set_state(pickle.load(f))
elif seed is not None:
nrand.set_state(seed)
else:
# init seed
nrand.seed()
return nrand.get_state()
@staticmethod
def _cut_times(recover_time, start_times, stop_times, inf_times, nn):
"""
Filter the nn list and inf_times for realistic events (within an active
connection (dynamic) or within the duration the node is
infected (both)
:param recover_time: time of recovery of the infected node.
:param start_times: start times of the contacts (is just 0 in the
static case)
:param stop_times: stop times of the contacts (is just the recover time
in the static case)
:param inf_times: time to infection for each of the nearest neighbours
:param nn: list of the nearest neighbours (note a neighbour figures
potentially several times as multiple transmissions per contact can
occur.
:return:
"""
# set the infection times to 'from now'
inf_times += start_times
# set the end of possible transmission to either the interaction end or
# the recover time
stop_times = where(
stop_times < recover_time,
stop_times,
recover_time
)
# keep only the neighbours in nn which have an infection time smaller
# than the nodes recover_time
return (
nn[inf_times < stop_times],
inf_times[inf_times < stop_times],
)
def _create_neighbour_events(
self, inf_event, nn, inf_times, node_id, token_id
):
"""
This method will be used in any of the (further below defined)
event_handler_... methods if we are dealing with a static network
:param inf_event:
:param nn:
:param inf_times:
:param node_id:
:param token_id:
:return:
"""
# if this event is a mutation from one Strain to another, update the
# count of the old Strain
if not inf_event:
# set the type of infection to mutation
self.current_infection_type[node_id] = 1
self.simulation_log['mutations'].append(
# to do: if we have a dynamic network the degree is replaced
# with the node_id, ideally we should report the activity of
# this node
# to do: ideally we add self.contact_structure.degree
(
self.t, token_id, len(self.contact_structure.nn[node_id])
) if self.contact_structure.is_static else
(self.t, token_id, node_id)
)
current_status = self.current_view[node_id]
if current_status != -1: # to avoid the 'initial mutation' problem
self._count_per_strains[self.current_view[node_id]] -= 1
else:
# set the type of infection to 'through selection'
self.current_infection_type[node_id] = 0
# set the current status of the node to the Strain he is infected with
self.current_view[node_id] = token_id
# put all the infection events of neighbours into the queue
for x in xrange(inf_times.size):
self.queue.put_nowait(
Event(
self.t + inf_times[x],
nn[x],
token_id,
True,
node_id
)
)
# update the counter of the Strain that is infecting.
self._count_per_strains[token_id] += 1
def _create_transmission_events(
self, node_id, token_id, recover_time, therapy_id=-1
):
"""
This method is used whenever a transition is made, so in
reset_transmission (e.g. treatment-notreatment) and the
transmission events need to be redrawn.
Note: this method is not used to handle events. It only creates
new ones.
Note: this is probably only correct for a delay of 0
:param recover_time: must be given in dt from current time
:param node_id:
:param token_id:
:param therapy_id:
:return: List of transmission events to be put in the queue
"""
# set id static or dynamic
if self.contact_structure.is_static:
get_neighbours = self._get_neighbours_static
else:
get_neighbours = self._get_neighbours_dynamic
# draw infection times for all neighbours
# (use delay + therapy if therapy_id is not none)
nn, _recover_time, inf_times, start_times, stop_time = get_neighbours(
node_id, token_id
)
# we can use self._get_neighbours_static/dynamic here as they don't cut
# the inf_times with the recover time min the _recover_time which we
# don't use as we keep the one given as arg.
# Get nn and times conditioned on recover_time
if therapy_id != -1:
delay = self.therapy_delays[therapy_id][node_id]
if recover_time > delay: # if recovery is after the delay.
inf_times = where(
start_times + inf_times <= delay,
inf_times,
delay + (inf_times - delay) * self.therapy_trans_facts[
therapy_id
][token_id] ** (-1)
)
nn, inf_times = self._cut_times(
recover_time, start_times, stop_time, inf_times, nn
)
# inf_event = 1 as these are infections only
for x in xrange(inf_times.size):
self.queue.put_nowait(
Event(
self.t + inf_times[x],
nn[x],
token_id,
True,
node_id
)
)
def _get_neighbours_static(self, node_id, token_id):
"""
This method will be used in any of the (further below defined)
event_handler_... methods if we are dealing with a static network
:param node_id: the index (id) of the node that is being infected.
:type node_id: int
:param token_id: the identifier for the token the node gets.
:return: tuple with a list of nearest neighbours, the recover time,
the infection times for each neighbour, the starting time of the
interactions (here this is 0 as we have a static network) and the
stop times of each interaction (here this is just the recover time
of the infected node as we are in the static case)
"""
recover_time = self.pathogen.rec_dists[token_id].get_val()
nn = []
inf_times = []
t_inf = 0
for n_neigh in self.contact_structure.nn[node_id]:
t_inf = self.pathogen.trans_dists[token_id].get_val()
while t_inf <= recover_time:
nn.append(n_neigh)
inf_times.append(t_inf)
t_inf += self.pathogen.trans_dists[token_id].get_val()
# the last 2 returned arguments are the start and stop condition for
# the neighbours, which simply boil down to self.t and the recover_time
# in the static case
return array(nn), recover_time, array(inf_times), 0., recover_time
def _get_neighbours_static_single(self, node_id, token_id):
"""
This method will be used in any of the (further below defined)
event_handler_... methods if we are dealing with a static network
and if we constrain a contact between a carrying and a susceptible
individual to only ever transmit once.
Note: You should have a particular reason why to use this function,
if you don't use the _get_neighbours_static function. You can do so
by simply not specifying the single_transmission parameter when
initializing a scenario.
Parameter:
----------
:param node_id: the index (id) of the node that is being infected.
:type node_id: int
:param token_id: the identifier for the token the node gets.
:type token_id: int
:return: tuple with a list of nearest neighbours, the recover time,
the infection times for each neighbour, the starting time of the
interactions (here this is 0 as we have a static network) and the
stop times of each interaction (here this is the recover time
of the infected node as we are in the static case)
"""
nn = copy(self.contact_structure.nn[node_id])
recover_time = self.pathogen.rec_dists[token_id].get_val()
inf_times = self.pathogen.trans_dists[token_id].v_get(nn) if nn.size \
else array([])
return nn, recover_time, inf_times, 0., recover_time
def _get_neighbours_dynamic(self, node_id, token_id):
"""
This method will be used in any of the (further below defined)
event_handler_... methods if we are dealing with a dynamic network
:param node_id:
:param token_id:
:return:
"""
recover_time = self.pathogen.rec_dists[token_id].get_val()
# returns all relevant connections: node_ids, start_times, stop_times
diff_nn, _start_times, _stop_times = self.contact_structure.get_events(
node_id,
self.t,
recover_time
)
# cut the start_times with the current time:
_start_times = where(
_start_times >= self.t,
_start_times - self.t,
0.0
)
_stop_times -= self.t
nn = []
inf_times = []
start_times = []
stop_times = []
# TODO: This is list op on np array, not ideal!
for i in xrange(len(diff_nn)):
stop_time = _stop_times[i]
start_time = _start_times[i]
t_inf = self.pathogen.trans_dists[token_id].get_val()
while t_inf + start_time <= stop_time:
nn.append(diff_nn[i])
inf_times.append(t_inf)
start_times.append(start_time)
stop_times.append(stop_time)
t_inf += self.pathogen.trans_dists[token_id].get_val()
return array(
nn
), recover_time, array(
inf_times
), array(
start_times
), array(
stop_times
)
def _get_neighbours_dynamic_single(self, node_id, token_id):
"""
This method will be used in any of the (further below defined)
event_handler_... methods if we are dealing with a dynamic network
:param node_id:
:param token_id:
:return:
"""
recover_time = self.pathogen.rec_dists[token_id].get_val()
nn, start_times, stop_times = self.contact_structure.get_events(
node_id, self.t, recover_time
)
# this should return id_list, start_array, stop_array
inf_times = self.pathogen.trans_dists[token_id].v_get(nn) if nn.size \
else array([])
# cut the start_times with the current time:
start_times = where(
start_times >= self.t,
start_times - self.t,
0.0
)
stop_times -= self.t
return nn, recover_time, inf_times, start_times, stop_times
def set(self, graph=None):
"""
This method allow to set and reset the entire scenario, i.e. the
scenario is set back to the time t=0 before any initial infections
took place.
It can be called when doing multiple simulations with the same
parameters.
If the parameter graph is provided, then the topology of the contact
network is updated. (see the method update_topology in the
ContactNetwork class for details).
:param graph: Graph object from the nw_construct package
:return:
"""
# this will store self.current_view at various times
# Note: this will be replaced with self.outcome so feel free to ignore
self.log = defaultdict(list)
# holds detailed information about what happened during a simulation
self.simulation_log = {
# holds a dict with all the setup parameters at the starting time
# of a new phase.
'phases': {},
# holds all mutation events.
'mutations': [],
# the adjacency matrix of the contact network
'adjacency': {},
# keeps track of any change of the status (e.g. artificial
# infection event)
'modifications': {},
# keeps track of any parameter alternations during the simulation
'param_alternation': {}
}
self.outcome = {}
self.t = self.contact_structure.t_start if not \
self.contact_structure.is_static else 0
# holds the number of infected individuals for each strain
self._count_per_strains = array([0 for _ in xrange(self.pathogen.n)])
# Note: used to contain the status of the host population overt time -
# will be redefined and is not in use.
self._counts_over_time = zeros((1, self.pathogen.n))
# initialize the status (-1 means susceptible): everyone is susceptible
self.current_view = [-1 for _ in xrange(self.contact_structure.n)]
# indicate whether host was infected through infection or mutation.
# 0: infection, 1: mutation
self.current_infection_type = [
-1 for _ in xrange(self.contact_structure.n)
] # -1: not infected
# initialize the status of whether or not a host is under treatment.
# (-1: not infected
self.current_therapy = [-1 for _ in xrange(self.contact_structure.n)]
# Dict that can be used to pass on values between phases.
self._phase_passon = {}
# initialize the priorityqueue which will hold all the events
# (infection, recovering, mutation, ...)
self.queue = PriorityQueue()
# update the topology of the contact structure if a new topology is
# provided.
if graph:
self.contact_structure.update_topology(graph)
return None
def _resolve_hots_pathogen_relations(self):
"""
It might be that some hosts are not or less susceptible to some
pathogen strains. This method properly established the relation
between each host and the present pathogen strains.
See ContactNetwork for more details.
:return:
"""
# run through all the hosts
for host_id in xrange(len(self.contact_structure._susceptible)):
# get the susceptibility status for this host
a_suscept = self.contact_structure._susceptible[host_id]
# get the default value either from the ContactNetwork object
if 'Default' in a_suscept:
default = a_suscept.pop('Default')
# or from self
else:
default = self._default_susceptibility
# initialize all susceptibilities as the default
self.contact_structure.susceptible[host_id] = [
default for _ in xrange(self.pathogen.n)
]
# if other values are provided (e.g. wild_type: 0.5) convert the
# pathogen strain name to its id and set the susceptibility for
# this strain
for strain_name in a_suscept:
self.contact_structure.susceptible[
host_id
][
self.pathogen.ids[strain_name]
] = a_suscept[strain_name]
return 0
def _resolve_treatment_pathogen_relations(self):
"""
This method defines all treatment related parameters. All relevant
parameters are stored in self.therapy_* arguments which are in the
form of lists. The index of the list is equivalent to the therapy
id which can be mapped to the therapy name with the
scenario.treatment.names dict.
A mapping between pathogen strain and corresponding drug is given by
self.therapy_strain_map
:return:
"""
nbr_of_therapies = self.treatment.n
# initialize the probabilities and therapy delays (node specific)
self.therapy_probas = [[]] * nbr_of_therapies
self.therapy_delays = [[]] * nbr_of_therapies
# initialize the list containing for each therapy a factor that will
# multiply the transmission rate
self.therapy_trans_facts = [{}] * nbr_of_therapies
# same thing for the recovery rate
self.therapy_recover_facts = [{}] * nbr_of_therapies
# and the same thing for the selection rate (selection rate: rate at
# which a strain can mutate to another one)
self.therapy_select_facts = [{}] * nbr_of_therapies
# the two dicts below will contain a mapping between therapy and
# strain id and vice versa
self.therapy_strain_id_map = {}
self.strain_therapy_id_map = {}
for a_therapy in self.treatment.therapies:
# get the therapy id
its_id = self.treatment.ids[a_therapy.name]
# initialize the mapping between therapy and strains
self.therapy_strain_id_map[its_id] = []
# set the treatment probabilities for each node
if type(a_therapy.treatment_proba) is float:
self.therapy_probas[its_id] = [
a_therapy.treatment_proba for _ in range(
self.contact_structure.n
)
]
# to do: implement other cases (not uniform)
else:
raise ValueError('Needs to be implemented')
# self.therapy_probas[its_id] = a_therapy.treatment_proba
# if the therapy comes with a delay, handle the delay properly
if type(a_therapy.delay) is float:
self.therapy_delays[its_id] = [
a_therapy.delay for _ in range(self.contact_structure.n)
]
# to do: implement other cases (not uniform)
else:
raise ValueError('Needs to be implemented')
# self.therapy_delays[its_id] = a_therapy.delay
# self.pathogen.ids is a dict like so: {'wild_type': 0,...} so it
# links the name to an id
for strain_name in self.pathogen.ids:
strain_id = self.pathogen.ids[strain_name]
# the try except statement is to test whether the strain_name
# is present in a_therapy.drug.trans- mission_factor
try:
self.therapy_trans_facts[its_id][
strain_id
] = a_therapy.drug.transmission_factor[strain_name]
self.therapy_strain_id_map[its_id].append(strain_id)
# if the strain_name is not present use the default value
except KeyError:
self.therapy_trans_facts[its_id][
strain_id
] = a_therapy.drug.transmission_factor['Default']
# same story for the recover factor
try:
self.therapy_recover_facts[its_id][
strain_id
] = a_therapy.drug.recover_factor[strain_name]
self.therapy_strain_id_map[its_id].append(strain_id)
# als here, use default if strain_name is not there
except KeyError:
self.therapy_recover_facts[its_id][
strain_id
] = a_therapy.drug.recover_factor['Default']
# and again same for the selection factor
try:
self.therapy_select_facts[its_id][
strain_id
] = a_therapy.drug.selection_factor[strain_name]
self.therapy_strain_id_map[its_id].append(strain_id)
except KeyError:
self.therapy_select_facts[its_id][
strain_id
] = a_therapy.drug.selection_factor['Default']
if self.therapy_select_facts[its_id][strain_id] != 1:
self.skip_selection = False
# it might be we added a strain_id several times to
# self.therapy_strain_id_map[its_id] if so remove the
# duplicates
self.therapy_strain_id_map[its_id] = list(
set(self.therapy_strain_id_map[its_id])
)
# initialize the strain therapy map
self.strain_therapy_id_map[strain_id] = []
# run through all the therapies
for therapy_id in self.therapy_strain_id_map:
# run through all the strain ids for a given therapy (i.e. all the
# strains that are treated with it)
for strain_id in self.therapy_strain_id_map[therapy_id]:
# try to append the therapy id to the mapping in the other
# direction, i.e. to get all the applied therapies given a
# strain id
try:
self.strain_therapy_id_map[strain_id].append(therapy_id)
# if the therapy id was the first one, create the list.
except KeyError:
self.strain_therapy_id_map[strain_id] = [therapy_id]
return 0
# just some custom errors to get more specific output if something
# goes wrong
class WrongPathogenError(Exception):
pass
class NoneImplementationError(Exception):
pass
class InitiateInfectionError(Exception):
pass
class WrongImplementationError(Exception):
pass
# TODO: This method had major conflicts in the last merge > TO CHECK
def _initiate_infection(self, strain, ):
"""
Parameters:
-----------
:param strain: dict, key is the name of a strain, value is a list of
node id's or the name of another pathogen strain or 'random'.
If the value is 'random' then one random host is infected.
If the value gives another pathogen strain, then a randomly
chosen individual that is currently infected with the indicated
pathogen strain is chosen and will be infected with the strain
provided in the key.
If the value is another dict, it can contain the keys 't_inf',
'host' and 'nbr_infectins'. 't_inf' specifies the time of the
infection and 'host' can either be a list of node id's, another
pathogen name or 'random'. The default value of 'host' is
'random'. 'nbr_infections' specifies how many host should be
infected, default is 1.
Eg. - strain = {'wild_type':[1,5,10]}
Infects hosts 1,5 and 10 with the wild type strain.
- strain = {'wild_type': 'random'}
Infects a single, randomly chose node with the
wild-type
- strain = {'mutant_1': 'wild_type'}
Infect a randomly chose host that is currently infected
with the wild_type strain with the mutant_1 strain.
- strain = {'wild_type': {
't_inf': 10, 'host': 'random',
}}
Infect two random hosts at time 10.
If t_inf is not provided then the current time of the
scenario, i.e. self.t, is used as infection time.
- strain = {'wild_type': {
't_inf': [0, 10],
'host': [['random'], [0, 1, 2]]
}}
Infect a random host with the wild-type at t = 0 and
the hosts 0, 1, and 2 at t = 10.
"""
for name in strain:
# make sure the strain exists
if name not in self.pathogen.ids.keys():
raise self.WrongPathogenError(
"There is no pathogen strain with the name \
<%s>." % name
)
# if node ids are given as values for the specified strain
# TODO: first if is from other branch
# if a dictionary is given, infect random node at time t_inf
if isinstance(strain[name], dict):
t_inf = strain[name].get(
't_inf', self.t
)
hosts = strain[name].get(
'host', 'random' # default is random
)
# carry out the infection at each t_inf
def _expander(_keys, _values):
if isinstance(_keys, list):
if isinstance(_values, list):
if isinstance(_values[0],list):
return _keys, _values
else:
return _keys, [_values for _ in _keys]
else:
return _keys, [[_values] for _ in _keys]
else:
if isinstance(_values, list):
return [_keys], [_values]
else:
return [_keys], [[_values]]
t_inf, hosts = _expander(t_inf, hosts)
for i in xrange(len(t_inf)):
a_t_inf = t_inf[i]
if not self.contact_structure.is_static:
candidate_nodes = \
self.contact_structure.get_nodes_by_lifetime(
a_t_inf
)
else:
candidate_nodes = range(self.contact_structure.n)
if len(candidate_nodes) < 1:
raise self.InitiateInfectionError(
"""
No host at time %s to be infected.
""" % a_t_inf
)
# now for all the infection times we have a set of present
# hosts
for a_host in hosts[i]:
if a_host == 'random':
the_host = random.choice(
candidate_nodes
)
self.queue.put_nowait(
Event(
a_t_inf, the_host,
self.pathogen.ids[name], False,
)
)
# if the host is specified by id
elif isinstance(a_host, int):
if a_host not in candidate_nodes:
raise self.InitiateInfectionError(
"""
The host with ID %s does not exist at
the time it should be infected, %s.
""" % (a_host, a_t_inf)
)
self.queue.put_nowait(
Event(
a_t_inf,
a_host,
self.pathogen.ids[name],
False,
)
)
# the host must be a specific strain
else:
if a_t_inf != self.t:
raise self.InitiateInfectionError(
"""
The targeted infection of a host
infected with a specific pathogen
is only possible if the infection
time is the current time of the
simulation.
Current time: %s
Time of infection: %s
""" % (self.t, a_t_inf)
)
# get the ids of of name and hosts
target_id = self.pathogen.ids[a_host]
mut_id = self.pathogen.ids[name]
# check if the target pathogen is present in the
# population, if not raise an error.
potentials = [
filter(
lambda x: x in filter(
lambda _x: self.current_view[
_x] == target_id,
range(self.contact_structure.n)
), candidate_nodes
)
]
if not potentials:
raise self.InitiateInfectionError(
"""
There are no host infected with %s at
the moment.
""" % a_host
)
# chose at random one specified infected
# host -> new_mutated.
new_mutated = random.choice(potentials)
# now that we have chose our host to mutate, we
# need to filter the existing event queue for any
# events associated with this host (e.g. this host
# will not infect its neighbours with its previous
# strain anylonger.
# copy the event queue into a list
pending_events = []
while True:
try:
pending_events.append(
self.queue.get_nowait())
except Empty:
break
# remove all entries where new_mutated infects plus
# the one where he recovers an recreate the queue
for an_event in pending_events:
# filter out infections where new_mutated is
# the source
if an_event[1][3] != new_mutated:
# do not take the recover event for
# new_mutated
if an_event[1][0] != new_mutated:
self.queue.put_nowait(an_event)
# add infection event of new_mutated with hosts
# make sure that the infection overwrites
# new_mutated's old status (use inf_event = False)
self.queue.put_nowait(
Event(a_t_inf, new_mutated, mut_id, False,)
)
# type(strain[name]) is not str:
elif isinstance(strain[name], list):
for node_id in strain[name]:
# create infection events for the specified node.
# See Event class for further details
self.queue.put_nowait(
Event(
self.t,
node_id,
self.pathogen.ids[name],
False,
)
)
# in this case we need to choose at random an individual and create
# an infection event
elif strain[name] == 'random':
random_node_id = nrand.randint(0, self.contact_structure.n)
self.queue.put_nowait(
Event(
self.t,
random_node_id,
self.pathogen.ids[name],
False,
)
)
# if self._count_per_strains[random_node_id] == 0:
# if another strain is the value
elif strain[name] in self.pathogen.ids.keys():
# get the ids of of name and strain[name]
target_id = self.pathogen.ids[strain[name]]
mut_id = self.pathogen.ids[name]
# check if strain[name] is present in the population, if not
# raise error.
potentials = [
xx[0] for xx in filter(
lambda x: x[1] == target_id,
zip(range(self.contact_structure.n), self.current_view)
)
]
if not potentials:
raise self.InitiateInfectionError(
'There are no host infected with \
%s.' % strain[name]
)
# chose randomly one of the strain[name] infected
# hosts -> new_mutated.
new_mutated = random.choice(potentials)
# now that we have chose our host to mutate, we need to filter
# the existing event queue for any events associated with this
# host (e.g. this host will not infect its neighbours with its
# previous strain any- longer.
# copy the event queue into a list
pending_events = []
while True:
try:
pending_events.append(self.queue.get_nowait())
except Empty:
break
# remove all entries where new_mutated infects plus the one
# where he recovers an recreate the queue
for an_event in pending_events:
# filter out infections where new_mutated is the source
if an_event[1][3] != new_mutated:
# do not take the recover event for new_mutated
if an_event[1][0] != new_mutated:
self.queue.put_nowait(an_event)
# add infection event of new_mutated with strain[name]
# make sure that the infection overwrites new_mutated's old
# status (use inf_event = False)
self.queue.put_nowait(
Event(self.t, new_mutated, mut_id, False,)
)
else:
raise self.InitiateInfectionError(
'The new infection event failed. Have a look at how to \
specify an infection event:\n{}'.format(
self._initiate_infection.__doc__
)
)
return 0
# # this method is not used anymore. Could be removed.
# def initiate_infection(self, strain, ):
# """
# Function to set the initial seed for an infection.
#
# Arguments:
# - strains: dict, key the name of a strain, value a list of node
# id's or 'random'. If the value is 'random' then one random
# host is infected.
# Eg. strain = {'wild_type':[1,5,10]}: infects _hosts 1,5 and
# 10 with the wild type strain.
# """
# self.t = 0
# for name in strain:
# if name not in self.pathogen.ids.keys():
# raise self.WrongPathogenError(
# "There is no pathogen strain with the name <%s>." % name)
# if type(strain[name]) is not str:
# for node_id in strain[name]:
# self.current_view[node_id] = self.pathogen.ids[name]
# else:
# self.current_view[
# nrand.randint(0, self.contact_structure.n)
# ] = self.pathogen.ids[name]
# self._init_queue()
# return 0
# # unused method can be removed (along with self.initiate_infection)
# def _init_queue(self, ):
# """
# Initiate the priority queue according to self.current_view
# """
# for node_id in xrange(self.contact_structure.n):
# if self.current_view[node_id] != -1:
# self.queue.put_nowait(
# Event(self.t, node_id, self.current_view[node_id], True,)
# )
# self.current_view[node_id] = -1
# return 0
# here below follow several _handle_event... functions each one of these
# take an event (node id, token id, inf type, source) as an argument
# (see Event class for further details) and digest it. Based on the event
# self.current_view will be updated an new events will be created
# method to handle events if we have both treatment and mutation/selection.
# Maybe best start with the method self._handle_event_simple as this is for
# the most trivial case (no treatment, no selection/mutation)
# with selection & treatment
def _handle_event_combined(self, an_event, get_neighbours):
"""
This method handles the events in a spreading process with selectionr
(mutation + selection) and treatment.
:param an_event:
:return:
"""
node_id, token_id, inf_event, source = an_event
if token_id == -1: # the Event is recovering
old_strain_id = self.current_view[node_id]
self.contact_structure.susceptible[
node_id
][old_strain_id] = self.pathogen.rec_types[old_strain_id]
# set the node status back to susceptible
self.current_view[node_id] = -1
self.current_infection_type[node_id] = -1
# set the treatment status back to not treated
self.current_therapy[node_id] = -1
self._count_per_strains[old_strain_id] -= 1
# the Event is an infection or selection
else:
# infection of infected host: do nothing
if inf_event and self.current_view[node_id] != -1:
# NOTE: if super-infections are possible, here they take place
pass
# infection of susceptible host or mutation
else:
if nrand.rand() < self.contact_structure.susceptible[
node_id
][token_id] or not inf_event:
nn, recover_time, inf_times, start_times, stop_times = \
get_neighbours(node_id, token_id)
# ##
# This is the part devoted to selection and treatment
# ##
if self.selecting[token_id]:
selected_strain_id, selection_times = \
self.pathogen.get_selected(token_id)
if selection_times[selected_strain_id] < recover_time:
recover_time = selection_times[selected_strain_id]
new_token = selected_strain_id
new_inf_event = False
else:
new_token, new_inf_event = -1, True
else:
new_token, new_inf_event = -1, True
if self.treating[token_id]:
therapy_ids = self.strain_therapy_id_map[token_id]
# to do: gather the various times and chose at random
# one, not the smallest as now.
# print self.therapy_select_facts
# issue: this does not work if we have more than one
# therapy.
for therapy_id in therapy_ids:
if nrand.rand() < self.therapy_probas[
therapy_id
][node_id]:
# set the therapy
self.current_therapy[node_id] = therapy_id
delay = self.therapy_delays[
therapy_id][node_id]
# will recover after treatment delay
if recover_time > delay:
recover_time = delay + (
recover_time - delay
) * self.therapy_recover_facts[
therapy_id
][token_id] ** (-1)
if self.selecting[token_id]:
selection_times = [
delay +
(selection_times[x] - delay) *
self.therapy_select_facts[
therapy_id
][x] ** (-1)
for x in xrange(
len(selection_times)
)
] # x is the id of potential mutant
selected_strain_id = \
selection_times.index(
min(selection_times)
)
if recover_time > selection_times[
selected_strain_id
]:
recover_time = selection_times[
selected_strain_id
]
new_token, new_inf_event = \
selected_strain_id, False
inf_times = where(
start_times + inf_times <= delay,
inf_times,
delay + (inf_times - delay)
* self.therapy_trans_facts[
therapy_id
][token_id] ** (-1)
)
# ##
# cutting the infection times is only needed because of the
# treatment
nn, inf_times = self._cut_times(
recover_time,
start_times,
stop_times,
inf_times,
nn
)
self.queue.put_nowait(
Event(
self.t + recover_time,
node_id,
new_token,
new_inf_event,
)
)
self._create_neighbour_events(
inf_event,
nn,
inf_times,
node_id,
token_id
)
return 0
# no selection, no treatment (but might still need to handle selections)
def _handle_event_simple(self, an_event, get_neighbours):
"""
This method handles events in a spreading process without treatment nor
selection.
:param an_event:
:return:
"""
# token_id is the id of the pathogen -1 means recovering/dead
node_id, token_id, inf_event, source = an_event
# the Event is recovering
if token_id == -1:
# what was the old status of that node
old_strain_id = self.current_view[node_id]
# self.pathogen.rec_types: a list indicating how one recovers after
# an infection. The index is the pathogen id and the value is
# either 0,1 meaning ether back to susceptible or resistant.
self.contact_structure.susceptible[
node_id
][old_strain_id] = self.pathogen.rec_types[old_strain_id]
# set the node back to the uninfected state
self.current_view[node_id] = -1
# set the infection type back
self.current_infection_type[node_id] = -1
# set the treatment status back to not treated
self.current_therapy[node_id] = -1
# update the count of number of infected for that strain
self._count_per_strains[old_strain_id] -= 1
# the Event is an infection
else:
# infection of infected host: do nothing
if inf_event and self.current_view[node_id] != -1:
# NOTE: if super-infections are possible, here they take place
pass
# infection of susceptible host or mutation
else:
# this reads: if the node is susceptible (this is all before
# the 'or not') or it is actually not an infection event
# (a mutation in this case)
if nrand.rand() < self.contact_structure.susceptible[
node_id
][token_id] or not inf_event:
nn, recover_time, inf_times, start_times, stop_times = \
get_neighbours(node_id, token_id)
# ##
# This is the method without selection nor treatment,
# so not much to be done here
# ##
nn, inf_times = self._cut_times(
recover_time,
start_times,
stop_times,
inf_times,
nn
)
self.queue.put_nowait(
Event(
self.t + recover_time,
node_id,
-1,
True,
)
) # put the recover event
self._create_neighbour_events(
inf_event,
nn,
inf_times,
node_id,
token_id
)
# set the treatment status back to untreated in any case
self.current_therapy[node_id] = -1
return 0
# this is the incremental version of the simple event handler. In this
# version each change is written to an output file
def _handle_event_simple_inc(self, an_event, get_neighbours):
"""
This method handles events in a spreading process without treatment nor
selection.
:param an_event:
:param get_neighbours:
:param inc_file: opened file to write changes into
:return:
"""
node_id, token_id, inf_event, source = an_event
# token_id is the id of the pathogen -1 means recovering/dead
if token_id == -1:
# the Event is recovering
old_strain_id = self.current_view[node_id]
# what was the old status of that node.
# self.pathogen.rec_types: a list indicating how one recovers
# after an infection. The index is the pathogen id and the value is
# either 0,1 meaning ether back to susceptible or resistant.
self.contact_structure.susceptible[
node_id
][old_strain_id] = self.pathogen.rec_types[old_strain_id]
self.current_view[node_id] = -1
# set the node back to the uninfected state
self.current_infection_type[node_id] = -1
# set the infection type back
self._count_per_strains[old_strain_id] -= 1
# update the count of number of infected for that strain
self._inc_file_o.write(
'%s, %s\n' % (
self.t, self.contact_structure.all_nodes[node_id]
)
)
else:
# the Event is an infection
if inf_event and self.current_view[node_id] != -1:
# infection of infected host: do nothing
pass
# NOTE: if super-infections are possible, here they would take
# place
else:
# infection of susceptible host or mutation
if nrand.rand() < self.contact_structure.susceptible[node_id][
token_id
] or not inf_event:
nn, recover_time, inf_times, start_times, stop_times = \
get_neighbours(node_id, token_id)
# : if the node is susceptible (this is all before the 'or
# not') or it is actually not an infection event (a
# mutation in this case).
self._inc_file_o.write(
'%s, %s, %s\n' % (
self.t,
self.contact_structure.all_nodes[node_id],
self.contact_structure.all_nodes[source] if
source is not None else 'seed'
)
)
# This is the method without selection nor treatment, so
# not much to be done here
nn, inf_times = self._cut_times(
recover_time, start_times, stop_times, inf_times,
nn
)
self.queue.put_nowait(
Event(self.t + recover_time, node_id, -1, True,)
)
# put the recover event
self._create_neighbour_events(
inf_event, nn, inf_times, node_id, token_id
)
# cerate and add the infection events for the neighbours.
return 0
def _handle_event_selection(self, an_event, get_neighbours):
"""
This method handles events in a spreading process with selection
(mutation + selection) but without treatment.
:param an_event:
:return:
"""
node_id, token_id, inf_event, source = an_event
old_strain_id = self.current_view[node_id]
if token_id == -1: # the Event is recovering
self.contact_structure.susceptible[
node_id
][old_strain_id] = self.pathogen.rec_types[old_strain_id]
self.current_view[node_id] = -1
self.current_infection_type[node_id] = -1
# set the treatment status back to not treated
self.current_therapy[node_id] = -1
self._count_per_strains[old_strain_id] -= 1
else: # the event is an infection or selection
# infection of infected host: do nothing
if inf_event and old_strain_id != -1:
# NOTE: if super-infections are possible, here they take place
pass
else: # infection of susceptible host or selection/mutation
if nrand.rand() < self.contact_structure.susceptible[
node_id][token_id] or not inf_event:
(
nn, recover_time, inf_times, start_times,
stop_times
) = get_neighbours(node_id, token_id)
# This is the part devoted to selection
# ##
# determine the strain that is selected for and the time
# at which the mutation will take place see
# Pathogen.get_selected method for more details new_token,
# new_inf_event = -1, True # if the mutation arises after
# recovering, the subsequent event is simply: recovered
# infections of the neighbours is now as without the
# selection/mutation as we assured that recover time is
# either the true recover time or the mutation time.
# ##
if self.selecting[token_id]:
(
selected_strain_id,
selection_times
) = self.pathogen.get_selected(token_id)
if selection_times[
# if the mutation is before the recovering
selected_strain_id
] < recover_time:
recover_time = selection_times[
# adjust the time of "recover" from the
# current infection.
selected_strain_id]
# set the token and infection event status
new_token = selected_strain_id
new_inf_event = False
# for a subsequent event.
nn, inf_times = self._cut_times(
recover_time, start_times, stop_times, inf_times,
nn
)
self.queue.put_nowait(
Event(
self.t + recover_time, node_id, new_token,
new_inf_event,
)
)
# when writing new_token and new_inf_event into the queue,
# it's either just the recover event or the mutation event.
self._create_neighbour_events(
inf_event, nn, inf_times, node_id, token_id
)
# set the treatment status back to not treated
self.current_therapy[node_id] = -1
return 0
# with only treatment
def _handle_event_treatment(self, an_event, get_neighbours):
"""
This method handles the events in a spreading process with treatment
but without selection.
:param an_event:
:return:
"""
node_id, token_id, inf_event, source = an_event
if token_id == -1: # the Event is recovering
old_strain_id = self.current_view[node_id]
# issue: import those into the namespace of spreading:
# eg. self._contact_network__susceptible...
self.contact_structure.susceptible[
node_id
][old_strain_id] = self.pathogen.rec_types[old_strain_id]
self.current_view[node_id] = -1
self.current_infection_type[node_id] = -1
# set the treatment status back to not treated
self.current_therapy[node_id] = -1
self._count_per_strains[old_strain_id] -= 1
else: # the Event is an infection or selection
# infection of infected host: do nothing
if inf_event and self.current_view[node_id] != -1:
# NOTE: if super-infections are possible, here they take place
pass
else: # infection of susceptible host or mutation/selection
if nrand.rand() < self.contact_structure.susceptible[
node_id
][token_id] or not inf_event:
(
nn, recover_time, inf_times, start_times,
stop_times
) = get_neighbours(node_id, token_id)
# This is the part devoted to treatment
# ##
# knowing the id of the pathogen strain, we
therapy_ids = self.strain_therapy_id_map[token_id]
# get the id of the therapy (or therapies) that applies to
# this strain.
# issue: at the moment this approach takes only into
# account one therapy per pathogen strain
for therapy_id in therapy_ids:
# if this pathogen is treated at the moment and in
# this particular case we treat:
# To Do: see combined case: you can move
# self.treating condition outside the therapy_id loop
if self.treating[token_id] \
and nrand.rand() < self.therapy_probas[
therapy_id
][node_id]:
# set the therapy status
self.current_therapy[node_id] = therapy_id
# get the delay until treatment
delay = self.therapy_delays[therapy_id][node_id]
# if node has not recovered during the delay
if recover_time > delay:
# # define a new recover time, that is the
# delay plus a new recover time under treatment
# print 'rec_1', recover_time
recover_time = delay + (
recover_time - delay
) * self.therapy_recover_facts[
therapy_id][token_id] ** (-1)
# print 'rec_2', recover_time
# # define potential new infection times
# new_inf_times = delay + \
# self.pathogen.trans_dists[
# token_id
# ].v_get(nn) if nn.size else array([])
# if the infection time is bigger than the
# delay, take the modified infection time.
# print 'therapy factor',
# self.therapy_trans_facts[
# therapy_id
# ][token_id] print 'inf_1', inf_times
inf_times = where(
# from now until infection is smaller
start_times + inf_times <= delay,
inf_times,
# delay + new_inf_times
delay + (
inf_times - delay
) * self.therapy_trans_facts[
therapy_id][token_id] ** (-1)
)
# get the time from now until start and stop
# ##
nn, inf_times = self._cut_times(
recover_time, start_times, stop_times, inf_times,
nn
)
self.queue.put_nowait(
Event(self.t + recover_time, node_id, -1, True, )
)
self._create_neighbour_events(
inf_event, nn, inf_times, node_id, token_id
)
return 0
# this method makes things moving.
def run(self, phases):
"""
This method will work through a list of phases. Each phase is a
dictionary and each entry in this dictionary can be seen as a task.
You might just specify a single task for a phase or several but in
order to keep things tractable, use as few tasks as possible for a
phase.
All tasks
Below are all possible tasks listed.
Possible tasks:
'new_infection': {strain_name: 'random'/IDs/other_strain}
This will introduce a strain into the population.
The strain_name specifies which strain should be
introduced, 'random'/IDs/other_strain will specifies who should
be infected.
(see self._initiate_infection method for details)
'parameter_alternation': {strain_name: {some_rate: value}}}
This will change parameters of the pathogen.
(see self._parameter_alternation method for detail
Note: if Default values were set for the mutation selection
rate they need to be set again.
't_start': float
't_stop': float
If t_stop is provided the method will try to take out events
from the event queue until it finds an event with t <= t_stop
(or some other halt condition is met - like the
assert_survival task below)
Note: it might be that a phase does not run until t_stop but
ends earlier (e.g. if 'building_up' task if provided -
see below). For such cases we cannot tell at what time the
phase ends and thus at what time the next phase should
start. For this there is the self._pase_passon attribute
which will be used
'dt': float
Can be used to specify the time interval for which the current
status should be reported. This only really is useful if
'explicit': True (see below).
'with_treatment': boolean
'with_selection': boolean
If provided will force the scenario to use the specific event
handler. If not provided both default to False.
'treating': [strain_name, ...]
If only some strains should be treated you can specify them
with the 'treating' task. If 'treating' is not provided but
'with_treatment': True, all strains are treated (if the
scenario has drug for them).
So, best always provide the 'treating' task.
'assert_survival': [strain_name, ...]
If this task is provided, the simulation stops if one of the
specified strain dies out.
'halt_condition': [strain_name, ...]
If provided the halt_condition task will check if the
specified strains are in QSS and if so, stop the simulation.
'building_up': {strain_name: value, ref} the value indicates the
amount of refs that should be infected with the indicated
strain. If during the phase this values is attained
(or exceeded) the phase is stopped and the scenario time
(self.t) is returned. The building up can either be relative or
absolute.
For relative building up, a typical configuration could look as
follows:
'building_up': {
'relative': {mutant_1: (0.9, total_infected)}
}
This reads: the building up stops once the mutant_1 strain
reached 90% of the total number of infected.
Alternatively 'total_infected' can be replaced by 'total'
which would mean the total number of hosts or by any other
name of present strains, e.g. the wild_type.
'explicit': integer.
This task forces the scenario to provide detailed status
reports over the entire simulation. If explicit==1, then on
every self.dt the self.get_outcome is written into
self.simulation_log.
If explicit==2, then on every self.dt the current status is
written into self.log (slowdown!).
'incremental': string.
This task only works if explicit==True. It will write every
event to the specified output file. Note that the file is
opened in append mode so to be able to write several phases
into the same file. Be sure to change the file name for each
run as otherwise several runs might be written into the same
file.
'shuffle': dict. with 'source', 'target', 'substitute' and 'mode':
This task will shuffle in the specified way the infection
status of hosts.
'target' must be a list of pathogen names and/or 'susc' for
susceptible indicating the group of hosts withing which a
shuffle should take place.
'source' a list of pathogen names and/or 'susc' for
susceptible indicating the group that should be distributed
within the target group.
'substitute' a list of pathogen names and/or 'susc' if this
list is not empty, all hosts of the source group will get
assigned randomly a state from the substitute list.
'mode': optional argument (default value is 'keep') determines
how the recover times should be handled.
Possible values are 'keep', 'reset', 'source', 'both'.
'keep' will not reset the recover times and 'reset' will
reset all the recover times. 'source' only resets the
recover times of all the source nodes and 'both' will reset
the recover times for both 'source' and 'target'
example: 'shuffle': {
'target' = [wild_type, mutant_1],
'source': [wild_type, mutant_1],
'substitute': []
}
This will redistribute the status of all the hosts infected
with either wild_type or mutant_1 among themselves.
Note that only the current views and the node ids in the
priority queue are updated.
'state_change': dict with either 'node' or 'relative' and an
optional 'mode'. If 'node', for each node id give a new token.
New tokens are either pathogen names or 'susc' for susceptible
(later 'r' for resistant)
If 'relative' a dict needs to be provided, specifying what
token to replace (key) with how much of what e.g.
'wild_type': (0.5, 'mutant_1') reads replace 0.5 of the
wild_types with a mutant_1 token. If 'mode' is present the
event queue is reset with the given mode. If mode is 'keep'
then the newly introduced tokens will keep the recover times of
their previous token, with exception of those that were
initially susceptible. If mode is 'reset' all the recover times
are reset.
Infection times for neighbouring nodes are reset no matter
what.
Example:
- 'state_change': {1: 'mutant_1', 2: 'wild_type', ...}
will simply change the token of node 1 to 'mutant_1'
and of node 2 to 'wild_type'.
Note that the pathogenic parameters are not reset, i.e.
node 1 will keep its previous recover time and continue
to infect with its previous token.
- 'state_change': {'mode': 'reset', 1: 'mutant_1'}
will change the token for node 1 to mutant_1 and draw a
new recovery time for node_1 as well as new infection
events for its neighbours.
'reset_recovery': boolean. If true, all the recover times will be
reset (drawn from the recover rate dists)
Note: reset_recovery will automatically reset all the
transmission times.
'reset_transmission': True or dict. If True then all the
transmission times are reset without resetting the recovery
times. If dict, then the reset mode is taken from the dict.
E.g.
'reset_transmission': {'mode': 'reset'}
will also reset the recover times. Possible values for
'mode' are 'reset' and 'keep'.
Default is 'keep'.
'add_treatment': adds a new treatment to the existing scenario.
Note that the treatment status is not changed, i.e. just
adding a new treatment will not lead to an actual treatment.
You'll need to provide the 'with_treatment': True task to run
the simulation with a treatment.
"""
try:
self.simulation_log['scenario'].append(phases)
except KeyError:
self.simulation_log['scenario'] = [phases]
self.simulation_log['adjacency'][self.t] = self.contact_structure.nn
# issue: should the adjacency be written in each phase?
self._update_phase_in_sim_log()
for _i in xrange(len(phases)):
phase = deepcopy(phases[_i])
# get or set a new seed for this phase
self.seed = self._set_seed(phase.get('seed', None))
# update the seed in the phases log (if no seed was provided use
# the one generated just before
self.simulation_log['scenario'][-1][_i]['seed'] = self.seed
with_run = True # will call self._run for this phase
# check if previous phase imposes parameters:
if self._phase_passon:
to_pass = self._phase_passon.keys()
for param in to_pass:
phase[param] = self._phase_passon.pop(param)
if 'parameter_alternation' in phase:
# note: only transmission_rate and recover_rate changes work.
alternations = phase.pop('parameter_alternation')
self.simulation_log['param_alternation'][self.t] = alternations
self._parameter_alternation(alternations)
with_run = False
if 'new_infection' in phase:
infection = phase.pop('new_infection')
self._initiate_infection(infection)
try:
# ToDo: use the 'modifications' key for other events too
self.simulation_log['modifications']['new_infection'] = (
self.t, infection
)
except KeyError:
self.simulation_log['modifications'] = {
'new_infection': (self.t, infection)
}
with_run = False
if 'introducing' in phase:
# to do: introduce with a certain rate
# introduction_scenario = phase.pop('introducing')
with_run = False
# which: which strain is introduced
# rate: at which rate it is introduced
# target: set of potential target hosts
if 'reset_transmission' in phase:
reset_transmission = phase.pop('reset_transmission', None)
to_reset = reset_transmission.get('target_nodes')
# either 'gradual' or 'immediate'
transition_type = reset_transmission.get('transition_type')
if isinstance(to_reset, (bool, int)) and to_reset:
# reset all the transmission events if the value is True
mode = phase.get('mode', 'keep')
self._sync_event_queue(
mode=mode,
targets=None,
transition=transition_type
)
elif isinstance(to_reset, list):
if not any([isinstance(an_el, str) for an_el in to_reset]):
# in this case for each node the reset status is set
self._sync_event_queue(
mode='keep',
targets=to_reset,
transition=transition_type
)
else:
# list of tokens (could be with therapies)
targets = []
for a_token in to_reset:
if ';' in a_token:
the_token, the_therapy = a_token.split(';')
else:
the_token, the_therapy = a_token, None
token_id = self.pathogen.ids[the_token]
adding_targets = filter(
lambda k: self.current_view[k] == token_id,
xrange(len(self.current_view))
)
if the_therapy is not None:
therapy_id = self.treatment.ids[the_therapy]
adding_targets = filter(
lambda l: self.current_therapy[
l] == therapy_id,
adding_targets
)
targets.extend(adding_targets)
mode = phase.get('mode', 'keep')
self._sync_event_queue(
mode=mode,
targets=targets,
transition=transition_type
)
elif isinstance(to_reset, str):
# reset for a certain strain only.
# e.g. mutant, wild_type, wild_type;drug1
if ';' in to_reset:
the_token, the_therapy = to_reset.split(';')
else:
the_token, the_therapy = to_reset, None
token_id = self.pathogen.ids[the_token]
# get all the nodes which currently have that status
targets = filter(lambda j: self.current_view[
j] == token_id, xrange(len(self.current_view)))
# further filter for the applied treatment (if any)
if the_therapy is not None:
therapy_id = self.treatment.ids[the_therapy]
targets = filter(
lambda a_target: self.current_therapy[
a_target] == therapy_id,
targets
)
# the the recovery type
mode = phase.get('mode', 'keep')
self._sync_event_queue(
mode=mode,
targets=targets,
transition=transition_type
)
with_run = False
if 'reset_recovery' in phase:
if phase['reset_recovery']:
self._sync_event_queue(mode='reset')
if 'state_change' in phase:
state_change = phase.pop('state_change')
try:
self.simulation_log['modifications']['state_change'] = (
self.t, state_change
)
except KeyError:
self.simulation_log['modifications'] = {
'state_change': (self.t, state_change)
}
# define the mode that is later passed to sync_event_queue fct
if 'mode' in state_change:
to_mode = state_change.pop('mode')
else:
to_mode = 'changed'
for a_key in state_change:
if type(a_key) is int:
self.current_view[a_key] = self.pathogen.ids[
state_change[a_key]
]
try:
nodes_to_sync[a_key] = 1
except NameError:
nodes_to_sync = [0] * self.contact_structure.n
nodes_to_sync[a_key] = 1
if to_mode == 'changed':
to_mode = nodes_to_sync
elif a_key == 'relative':
rel_replacement = state_change['relative']
# 'wt': (0.5, 'mt')
for to_replace in rel_replacement:
old_token = self.pathogen.ids[
to_replace
] if to_replace in self.pathogen.ids else -1
(
fraction_to_replace,
new_token_name
) = rel_replacement[to_replace]
# new status is either a pathogen or susceptible
new_token = self.pathogen.ids[
new_token_name
] if new_token_name in self.pathogen.ids \
else -1
nodes_old_token = filter(
lambda i: self.current_view[i] == old_token,
range(len(self.current_view))
)
if not len(nodes_old_token):
raise ValueError(
'No nodes are present with the old '
'token'
)
# choose a fraction_to_replace nodes among
# nodes_old_token
length = len(nodes_old_token)
# always round down to the next int
nbr_to_replace = int(fraction_to_replace * length)
# replace the nodes token
for i in xrange(nbr_to_replace):
self.current_view[
nodes_old_token[i]] = new_token
try:
nodes_to_sync[nodes_old_token[i]] = 1
except NameError:
nodes_to_sync = [
0] * self.contact_structure.n
nodes_to_sync[nodes_old_token[i]] = 1
if to_mode == 'changed':
to_mode = nodes_to_sync
else:
raise ValueError(
'The state_change phase is not understood, '
'please either use "relative" or node ids.'
)
# update the strain counts
for strain_id in self.pathogen.ids.values():
self._count_per_strains[
strain_id] = self.current_view.count(strain_id)
# reset the event queue if the mode has not been adjusted yet
# (this should not happen)
if to_mode == 'changed':
to_mode = 'keep'
self._sync_event_queue(mode=to_mode)
with_run = False
if 'shuffle' in phase:
shuffle = phase.pop('shuffle')
# write it to the simulation log
# print self.current_view
# print self.current_view.count(0), self.current_view.count(1)
# print self._count_per_strains
try:
self.simulation_log[
'modifications']['shuffle'] = (self.t, shuffle)
except KeyError:
self.simulation_log['modifications'] = {
'shuffle': (self.t, shuffle)
}
# get the id for target and source tokens
target_ids = [
self.pathogen.ids[name] if name != 'susc'
else -1 for name in shuffle['target']
]
source_ids = [
self.pathogen.ids[name] if name != 'susc'
else -1 for name in shuffle['source']
]
# get the substitute token (if any)
if 'substitute' in shuffle and shuffle['substitute']:
substitute_ids = [
self.pathogen.ids[name] if name != 'susc'
else -1 for name in shuffle['substitute']
]
else:
substitute_ids = []
# lists of all the nodes with target resp source tokens
targets = []
# list of all the tokens to redistribute
# (a token is a tuple here (token, therapy_id, infection_type)
sources_tokens = []
# build up the new current view
new_current_view = []
new_current_therapy = []
new_current_infection_type = []
current_view = list(self.current_view)
current_therapy = list(self.current_therapy)
current_infection_type = list(self.current_infection_type)
# this will hold the new node id at the index (the old id)
node_id_map = []
for node in xrange(len(current_view)):
node_id_map.append(node)
# this might change later
new_current_view.append(current_view[node])
new_current_therapy.append(current_therapy[node])
new_current_infection_type.append(
current_infection_type[node]
)
# if the node belongs to target group, add it to targets
if current_view[node] in target_ids:
targets.append(node)
# if the node belongs to the source group add it to sources
if current_view[node] in source_ids:
# add the node's token to token to redistribute list
sources_tokens.append(
(
node,
current_view[node],
current_therapy[node],
current_infection_type[node]
)
)
# if we have substitutes for the source token, choose a
# random substitute. NOTE: substitutes only work for
# tokens not for therapies
if substitute_ids:
new_current_view[-1] = _get_rand_el(substitute_ids)
# ToDo: check if len(targets) is the same as len(source_tokens)
# if not, we cannot simply run through the sources_tokens...
# as long as not all token have been redistributed
while len(sources_tokens):
# pick a node from the targets
a_node = _pick_rand_el(targets)
# get this node a new token
(
old_node, new_token, new_therapy,
new_infection_type
) = _pick_rand_el(sources_tokens)
# give the node the new token
new_current_view[a_node] = new_token
# the old node id maps to the new now.
node_id_map[old_node] = a_node
new_current_therapy[a_node] = new_therapy
new_current_infection_type[a_node] = new_infection_type
# the token are redistributed now
# update the currents view with the new view
self.current_view = new_current_view
self.current_therapy = new_current_therapy
self.current_infection_type = new_current_infection_type
# now we need to sync the event queue:
updated_queue = []
while True:
try:
event = self.queue.get_nowait()
except Empty:
break
else:
# map the old node the its new identity
if event[1][3]:
new_source = node_id_map[event[1][3]]
else:
new_source = event[1][3]
event = Event(
event[0],
node_id_map[event[1][0]],
event[1][1],
event[1][2],
new_source
)
updated_queue.append(event)
while len(updated_queue):
self.queue.put_nowait(updated_queue.pop())
with_run = False
if 'add_treatment' in phase:
treatment_to_add = phase.pop('add_treatment')
if self.treatment is not None:
self.treatment.add_new_treatment(treatment_to_add)
else:
self.treatment = treatment_to_add
self.skip_treatment = False
self._resolve_treatment_pathogen_relations()
# most of the specified tasks are taken care of by now. However,
# if t_stop is provided we still need to carry out the actual
# simulation. This is done with the self._run method
if with_run:
# counts_length = len(self._counts_over_time)
# if counts_length < int(t_stop):
# self._counts_over_time.extend(
# [
# None for _ in xrange(
# int(t_stop) - len(self._counts_over_time)
# )
# ])
# for _ in xrange(
# max(0, int(t_stop) - counts_length) + 10
# ): #+10 is just a margin
# self._counts_over_time = n_append(
# self._counts_over_time,
# [[0 for _ in xrange(self.pathogen.n)]],
# 0
# )
t_stop = phase.pop('t_stop', None)
# call self._run method and pass the remaining tasks on to it
self._run(
t_start=phase.pop('t_start', self.t),
t_stop=t_stop,
**phase
)
if self._inf_file_o:
self._inf_file_o.close()
# close the file stream to the incremental output file if
# it exists.
self._inf_file_o = None
self._update_phase_in_sim_log()
# self.log[round(self.t, self._log_time_rounding)].append(
# copy(self.current_view)
# )
# Note: There are quite some implications when passing the
# after_phase_outcome to both the Scenario.log and the
# Scenario.outcome, changing one will also change the other.
after_phase_outcome = self.get_outcome
self.log[
round(self.t, self._log_time_rounding)
].append(after_phase_outcome)
# TODO: was an option
# self.log[
# round(self.t, self._log_time_rounding)
# ].append(self.get_current_view)
try:
self.outcome[self.t].append(after_phase_outcome)
except (KeyError, AttributeError):
self.outcome[self.t] = [after_phase_outcome]
# TODO: alternative
# self.outcome[self.t] = [self.get_outcome]
# if 'assert_survival' in phase:
# to_survive = phase.pop('assert_survival')
# break_here = False
# for strain_name in to_survive:
# if strain_name not in self.outcome[
# 'logs'][self.t]['abundance'].keys():
# break_here = True
# print '%s got extinct.'%(strain_name)
# if break_here:
# break
return 0
def _run(
self,
t_stop,
with_selection=False,
with_treatment=False,
halt_condition=False,
assert_survival=False,
t_start=None,
**params
):
"""
Run the scenario from t_start to t_stop with the given conditions
:param assert_survival: If list of strain names, it is checked for
survival of those strains.
:param t_start: Gives the starting time
:param t_stop: Gives the end time.
:param with_selection: If True, the selection/mutation is considered.
:param with_treatment: If True, the specified treatment is applied.
NOTE: If True, consider to put the <treating> argument to specify
which strains are treated.
:param halt_condition: If list of strain names, it is checked for
quasi_stability for those strains
:param params: Collection of optional arguments (remaining tasks):
- treating: Dict indicating which strain is treated.
Eg.. treating={'wild_type': True, 'Default': False}
If 'Default' is given, this value will be applied to all
strains missing in the dictionary.
- dt: The time interval after which to update self.log
"""
if t_start is not None:
self.t = t_start
# self.t = round(t_start, 4)
# specify the new run in self.simulation_log
# self._update_phase_in_sim_log(
# selection=with_selection,
# therapy=with_treatment,
# t_stop=t_stop,
# assert_survival=assert_survival,
# params=params
# )
# determine if the network is static or dynamic and set the appropriate
# methods.
if self.contact_structure.is_static:
if self.single_transmission:
get_neighbours = self._get_neighbours_static_single
else:
get_neighbours = self._get_neighbours_static
else:
if self.single_transmission:
get_neighbours = self._get_neighbours_dynamic_single
else:
get_neighbours = self._get_neighbours_dynamic
# define the event_handler as the simple method for now.
# This will be adapted if needed in the next lines
if 'incremental' in params:
event_handler = self._handle_event_simple_inc
self._inc_file_o = open(params['incremental'], 'a')
else:
event_handler = self._handle_event_simple
self.treating = []
self.selecting = []
# if no selection parameters where provided when initializing the
# scenario no selection will be attempted no matter what is specified
# in the phase we are currently in.
# if self.skip_selection:
# with_selection = False
# same goes for treatment. If no treatment was specified when
# initializing (and the task 'add_treatment' was never provided so far)
# we skip the treatment.
if with_treatment:
# if we have treatment, clarify which strain to treat
if 'treating' in params:
treat_dict = params.get('treating')
def_val = True
if 'Default' in treat_dict:
def_val = treat_dict.pop('Default')
self.treating = [def_val for _ in xrange(self.pathogen.n)]
for strain_name in treat_dict:
self.treating[
self.pathogen.ids[strain_name]
] = treat_dict[strain_name]
else:
#if treating is missing all strains are treated
self.treating = [True for _ in xrange(self.pathogen.n)]
# if we have treatment and selection, we need to use the combined
# event handler
if with_selection:
if 'selecting' in params:
selecting_dict = params.get('selecting')
def_val = True
if 'Default' in selecting_dict:
def_val = selecting_dict.pop('Default')
self.selecting = [def_val for _ in xrange(self.pathogen.n)]
for strain_name in selecting_dict:
self.selecting[
self.pathogen.ids[strain_name]
] = selecting_dict[strain_name]
else:
self.selecting = [True for _ in xrange(self.pathogen.n)]
event_handler = self._handle_event_combined
# if it is only treatment, the treatment event handler is the one
# to use
else:
event_handler = self._handle_event_treatment
elif with_selection:
if 'selecting' in params:
selecting_dict = params.get('selecting')
def_val = True
if 'Default' in selecting_dict:
def_val = selecting_dict.pop('Default')
self.selecting = [def_val for _ in xrange(self.pathogen.n)]
for strain_name in selecting_dict:
self.selecting[
self.pathogen.ids[strain_name]
] = selecting_dict[strain_name]
else:
self.selecting = [True for _ in xrange(self.pathogen.n)]
# at this point we know that only selection is on, so use the
# selection event handler.
event_handler = self._handle_event_selection
# check if the time interval for reporting is specified, if not use
# default one.
dt = params.get('dt', self._dt)
# get the next time at which the status of the hosts should be reported
# or halt condition to be checked
t_next_bin = self.t + dt
# check if we have some halt conditions that might lead to a
# termination of the simulation before t_stop.
with_halt_condition = False
if assert_survival:
with_halt_condition = True
targeted_strains = array(
[self.pathogen.ids[strain_name]
for strain_name in assert_survival]
)
break_condition = self._check_survival
if halt_condition:
with_halt_condition = True
targeted_strains = array(
[self.pathogen.ids[strain_name]
for strain_name in halt_condition]
)
break_condition = self._quasistable
# to do: basically get rid of the if statement below (is handled in if
# with_halt_condition)
# stop as soon as one of the specified stains goes extinct
if False: # assert_survival:
surviving_strain_ids = array(
[self.pathogen.ids[strain_name]
for strain_name in assert_survival]
)
# with_logging = params.get('explicit', False)
done = False
# TO DO: start for new structure.The wile loop can be put after the
# running conditions and each condition defines its proper stepper
# function.
# TO DO: problem of combining conditions remains.
def stepper(self):
# get the next event
(time, n_event) = self.queue.get_nowait()
# update the time of the scenario
self.t = round(time, self._time_rounding)
# self._counts_over_time[int(self.t)] = self._count_per_strains
# pass the event to the event handler
event_handler(n_event, get_neighbours)
# the new time is after the checking time
if self.t >= t_next_bin:
# check for the condition
for strain_id in surviving_strain_ids:
if not self.current_view.count(strain_id):
return 1
return 0
while self.t < t_stop:
try:
if stepper(self):
break
except Empty:
# if logger_mode == 1:
self.log[
round(self.t, self._log_time_rounding)
].append(self.get_outcome)
# elif logger_mode == 2:
# self.log[
# round(self.t, self._log_time_rounding)
# ].append(copy(self.current_view))
# TODO: was alternative
# self.log[
# round(self.t, self._log_time_rounding)
# ].append(self.get_current_view)
break
"""
while self.t < t_stop and not done:
try:
# get the next event
(time, n_event) = self.queue.get_nowait()
# update the time of the scenario
self.t = round(time, self._time_rounding)
# self._counts_over_time[
# int(self.t)] = self._count_per_strains
# pass the event to the event handler
event_handler(n_event, get_neighbours)
# the new time is after the checking time
if self.t >= t_next_bin:
# check for the condition
for strain_id in surviving_strain_ids:
if not self.current_view.count(strain_id):
break
except Empty:
self.log[
round(self.t, self._log_time_rounding)
].append(copy(self.current_view))
break
"""
logger_mode = params.get('explicit', 0)
# if we have a halt condition this part will conduct the simulation
# print 'should be logging in mode %s every %s time step' %\
# (logger_mode, dt)
if with_halt_condition:
halt = False
# should the run be with explicit logs work the event queue until
# we either hit t_stop or the halt condition
while self.t < t_stop and not halt:
try:
# get the next event
(time, n_event) = self.queue.get_nowait()
# update the time of the scenario
self.t = round(time, self._time_rounding)
# self._counts_over_time[
# int(self.t)] = self._count_per_strains
# pass the event to the event handler
event_handler(n_event, get_neighbours)
# the new time is after the checking time
if self.t >= t_next_bin:
if logger_mode:
if logger_mode == 1:
self.log[
round(self.t, self._log_time_rounding)
].append(self.get_outcome)
elif logger_mode == 2:
self.log[
round(self.t, self._log_time_rounding)
].append(copy(self.current_view))
t_next_bin += dt
# check if we are in _quasistable state (QSS) if yes,
# stop the sim
# if self._quasistable(focus_strain_ids,
# surviving_strain_ids):
if break_condition(targeted_strains):
halt = True
# if we were not logging, write to the log now.
# this should not be needed as we will write in the
# self.outcome as soon as the phase stops
# if not with_logging:
# self.log[
# round(self.t, self._log_time_rounding)
# ].append(copy(self.current_view))
# if no more events are to handle the sim is over (obviously)
except Empty:
# we don't write into to log we'll write into self.outcome
# self.log[
# round(self.t, self._log_time_rounding)
# ].append(copy(self.current_view))
# TODO: Probably something missing
break
# if we are in the case where a strain should build up its prevalence
elif 'building_up' in params:
# handle the stop condition
strains_stop_cond = params.pop('building_up')
if 'absolute' in strains_stop_cond:
abs_cond = strains_stop_cond.pop('absolute')
else:
abs_cond = None
if 'relative' in strains_stop_cond:
rel_cond = strains_stop_cond.pop('relative')
else:
rel_cond = None
if abs_cond:
abs_id_stop_cond = {
self.pathogen.ids[name]: abs_cond[name]
for name in abs_cond
}
# building_status = {_id: self._count_per_strains[_id] for _id
# in abs_id_stop_cond}
# check if the stop condition is already met
if any(
self._count_per_strains[_id] >= abs_id_stop_cond[_id]
for _id in abs_id_stop_cond
):
return 0
if rel_cond:
rel_id_stop_cond = {}
for name in rel_cond:
_id = self.pathogen.ids[name]
if 'total' in rel_cond[name][1]:
if rel_cond[name][1] == 'total_infected':
# put condition on total set of present pathogens
rel_id_stop_cond[_id] = (
rel_cond[name][0],
self.pathogen.ids.values()
)
# if building up criterion is met, stop the phase
if self._count_per_strains[
_id] >= rel_id_stop_cond[_id][0] * sum(
[self._count_per_strains[path_id]
for path_id in self.pathogen.ids.values()]
):
return 0
else:
rel_id_stop_cond[_id] = (rel_cond[name][0], 'all')
# if building up criterion is met already, stop
# the phase
if self._count_per_strains[
_id] >= rel_id_stop_cond[
_id][0] * self.contact_structure.n:
return 0
else:
rel_id_stop_cond[_id] = (
rel_cond[name][0],
self.pathogen.ids[rel_cond[name][1]]
)
# if the condition is matched already end phase
if self._count_per_strains[
_id
] >= rel_id_stop_cond[
_id][0] * self._count_per_strains[
rel_id_stop_cond[_id][1]]:
return 0
# rel_id_stop_cond = {
# self.pathogen.ids[name]: (
# rel_cond[name][0],
# self.pathogen.ids[rel_cond[name][1]]
# )
# for name in rel_cond
# }
# if any(
# self._count_per_strains[_id] >=
# rel_id_stop_cond[
# _id][0] * self._count_per_strains[
# rel_id_stop_cond[_id][1]]
# for _id in rel_id_stop_cond):
# return 0
# clarify which type of condition is active and define
# appropriate tests:
# to do: revert back to using self._count_per_strains as soon as
# count_per_strains is reliable again
if not abs_cond and rel_cond:
def test_cond(self):
for s_id in rel_id_stop_cond:
condition = rel_id_stop_cond[s_id][1]
ref_val = condition if type(
condition) in [float, int] else (
sum(
# self.current_view.count(strain_id)
# for strain_id in self.pathogen.ids.values()
self._count_per_strains[strain_id]
for strain_id in self.pathogen.ids.values()
)
if type(condition) is list
else self.contact_network.n
)
if self._count_per_strains[s_id] >= rel_id_stop_cond[
s_id
][0] * ref_val:
# TODO: was alternative
# if self.current_view.count(s_id) >=
# rel_id_stop_cond[s_id][0] * ref_val:
print 'reached fract.', self._count_per_strains, [
self.current_view.count(i)
for i in self.pathogen.ids.values()
]
# if we stop, we need to provide a new starting
# time for the next phase
self._phase_passon = {'t_start': self.t}
return 1
# if any(
# self._count_per_strains[_id] >=
# rel_id_stop_cond[
# _id][0] * self._count_per_strains[
# rel_id_stop_cond[_id][1]]
# for _id in rel_id_stop_cond):
# self._phase_passon = {'t_start': self.t}
# return 1
else:
return 0
elif not rel_cond and abs_cond:
def test_cond(self):
if any((
self._count_per_strains[_id] >=
abs_id_stop_cond[_id] for _id in abs_id_stop_cond
)):
self._phase_passon = {'t_start': self.t}
return 1
else:
return 0
else:
def test_cond(self):
if any((
self._count_per_strains[_id] >=
rel_id_stop_cond[_id][0] * self._count_per_strains[
rel_id_stop_cond[_id][1]]
for _id in rel_id_stop_cond
)):
self._phase_passon = {'t_start': self.t}
return 1
if any((
self._count_per_strains[_id] >=
abs_id_stop_cond[_id] for _id in abs_id_stop_cond
)):
self._phase_passon = {'t_start': self.t}
return 1
else:
return 0
# run the phase:
if logger_mode:
while self.t < t_stop:
try:
(time, n_event) = self.queue.get_nowait()
self.t = round(time, self._time_rounding)
event_handler(n_event, get_neighbours)
if self.t >= t_next_bin:
if logger_mode == 1:
self.log[
round(self.t, self._log_time_rounding)
].append(self.get_outcome)
elif logger_mode == 2:
self.log[
round(self.t, self._log_time_rounding)
].append(copy(self.current_view))
self.log[
round(self.t, self._log_time_rounding)
].append(copy(self.current_view))
while self.t >= t_next_bin:
t_next_bin += dt
# TODO: alt
# self.log[
# round(self.t, self._log_time_rounding)
# ].append(self.get_current_view)
if test_cond(self):
return 0
except Empty:
if logger_mode == 1:
self.log[
round(self.t, self._log_time_rounding)
].append(self.get_outcome)
elif logger_mode == 2:
self.log[
round(self.t, self._log_time_rounding)
].append(copy(self.current_view))
# TODO: alternative
# self.log[
# round(self.t, self._log_time_rounding)
# ].append(self.get_current_view)
break
else:
while self.t < t_stop:
try:
(time, n_event) = self.queue.get_nowait()
self.t = round(time, self._time_rounding)
event_handler(n_event, get_neighbours)
if test_cond(self):
return 0
except Empty:
self.log[
round(self.t, self._log_time_rounding)
].append(self.get_outcome)
# TODO: alternative
# self.log[
# round(self.t, self._log_time_rounding)
# ].append(self.get_current_view)
break
# if there was neither a halt condition nor a building_up, this part
# will conduct the simulation
else:
if logger_mode:
# print 'is in logger mode.
# Current time: %s, next_check %s' % (self.t, t_next_bin)
while self.t < t_stop:
try:
(time, n_event) = self.queue.get_nowait()
self.t = round(time, self._time_rounding)
event_handler(n_event, get_neighbours)
if self.t >= t_next_bin:
if logger_mode == 1:
self.log[
round(self.t, self._log_time_rounding)
].append(self.get_outcome)
elif logger_mode == 2:
self.log[
round(self.t, self._log_time_rounding)
].append(copy(self.current_view))
while self.t >= t_next_bin:
t_next_bin += dt
except Empty:
if logger_mode == 1:
self.log[
round(self.t, self._log_time_rounding)
].append(self.get_outcome)
elif logger_mode == 2:
self.log[
round(self.t, self._log_time_rounding)
].append(copy(self.current_view))
# TODO: alternative
# self.log[
# round(self.t, self._log_time_rounding)
# ].append(self.get_current_view)
break
else:
while self.t < t_stop:
try:
(time, n_event) = self.queue.get_nowait()
self.t = round(time, self._time_rounding)
event_handler(n_event, get_neighbours)
except Empty:
# self.log[
# round(self.t, self._log_time_rounding)
# ].append(copy(self.current_view))
self.log[
round(self.t, self._log_time_rounding)
].append(self.get_outcome)
# TODO: alternative
# self.log[
# round(self.t, self._log_time_rounding)
# ].append(self.get_current_view)
break
# print 'treatment', with_treatment
return 0
def _parameter_alternation(self, alternations):
"""
E.g. {wild_type: {'transmission_rate': 1}} will set the transmission
rate of the wild_type to 1.
:param alternations: a dictionary holding stain names as keys and a
dictionary as values. Each of the value-dictionaries can contain
rates as keys and the new desired value as values.
:return:
"""
for strain_name in alternations:
# get its id
its_id = self.pathogen.ids[strain_name]
if 'transmission_rate' in alternations[strain_name]:
new_rate = alternations[strain_name]['transmission_rate']
self.pathogen.trans_rates[its_id] = new_rate
self.pathogen.trans_dists[its_id] = Distro(
new_rate ** (-1), 10000
)
if 'recover_rate' in alternations[strain_name]:
new_rate = alternations[strain_name]['recover_rate']
self.pathogen.rec_rates[its_id] = new_rate
self.pathogen.rec_dists[its_id] = Distro(
new_rate ** (-1), 10000
)
# to do: finish with recover_type
if 'selection_rate' in alternations[strain_name]:
new_rates = alternations[strain_name]['selection_rate']
self.pathogen.update_selection(
concerns=its_id,
new_rates=new_rates
)
# to do: method should create list of instantaneous infection events then
# let the list be digested by appropriate event handlers.
def _sync_event_queue(
self, mode='keep', targets=None, transition='gradual'
):
"""
This method will consume and recreate the event queue based on the
status on self.current_view and the mode specified in the mode arg.
:type mode: str; list; dict
:type targets: None; list
:param mode: either a string ('keep', 'reset') or a dict
{'wild_type': 'keep'/'reset', ...} or a list [0, 1, ...]
indicating the whether to reset (1) or to keep (0) the recover
time for each node.
:param targets: list of node id for which the transmission events
should be reset. The default is None leading to all transmission
events for all nodes being reset.
:return:
"""
if transition == 'gradual':
keep_therapy = True
elif transition == 'immediate':
keep_therapy = False
else:
raise ValueError(
'The transition argument must either be "gradual" '
'or "immediate"'
)
# get the reset mode
general, pathogen_specific = False, False
node_specific = False
if type(mode) is list:
node_specific = True
elif type(mode) is dict:
pathogen_specific = True
else: # node_specific == True
general = True
event_queue_to_check = []
kept_events = []
# d
# mutators = []
# recoverors = []
while True:
try:
event = self.queue.get_nowait()
except Empty:
break
else:
if targets and (
event[1][0] not in targets
or event[1][3] not in targets
):
# if the node in question is not in the target list AND an
# infection is not from a node from the target list it'll
# go back to the queue
kept_events.append(event)
# if it is recovery or mutation add it to the events to check
elif event[1][1] == -1 or not event[1][2]:
# in any other case only keep the recover event
event_queue_to_check.append(event)
else:
pass # this is a target node infecting another
# put all the events back that will not be changed
for an_event in kept_events:
self.queue.put_nowait(an_event)
# get all the nodes that have a token at the moment
nodes_to_deal = map(
lambda x: 1 if self.current_view[x] != -1 else 0,
range(len(self.current_view))
)
# if targets is specified, further filter the list for nodes specified
# in targets
if targets is not None:
nodes_to_deal = map(
lambda x: 1 if x in targets and nodes_to_deal[x] else 0,
xrange(len(nodes_to_deal))
)
while len(event_queue_to_check):
event = event_queue_to_check.pop()
node_id = event[1][0]
new_token = event[1][1]
new_inf = event[1][2]
if nodes_to_deal[node_id]:
nodes_to_deal[node_id] = 0
token_id = self.current_view[node_id]
# handle the recover event
recover_time = event[0] - self.t
# ToDo: effects of a therapy on recover_time is not implemented
if keep_therapy:
therapy_id = self.current_therapy[node_id]
else:
therapy_id = -1
# reset the current_therapy status
self.current_therapy[node_id] = therapy_id
if not new_inf:
# if the event was a mutation but we stop treatment,
# we need a recover time
recover_time = self.pathogen.rec_dists[
token_id].get_val()
new_inf = True
new_token = -1 # recover instead of mutation
if general: # all nodes are treated the same
if mode != 'keep': # redraw the recover time
recover_time = self.pathogen.rec_dists[
token_id].get_val()
elif pathogen_specific:
pathogen_name = self.pathogen.names[token_id]
if pathogen_name in mode and mode[
pathogen_name] == 'reset':
recover_time = self.pathogen.rec_dists[
token_id].get_val()
else: # for each node separate. 1 means reset, 0 keep
if mode[node_id]:
recover_time = self.pathogen.rec_dists[
token_id].get_val()
# now we have the appropriate recover time. The transmission
# events remain create the transmission events and add them to
# the queue
self._create_transmission_events(
node_id, token_id, recover_time, therapy_id
)
# add the recover event to the queue (or mutation)
self.queue.put_nowait(
Event(
self.t + recover_time,
node_id,
new_token,
new_inf,
)
)
else:
# can happen if a node appears several times in an event
pass
# would this not mean: there are target nodes (with a token) that have
# no recover event?
if nodes_to_deal.count(1):
while 1 in nodes_to_deal:
node_id = nodes_to_deal.index(1)
nodes_to_deal[node_id] = 0
token_id = self.current_view[node_id]
if token_id != -1:
# TODO: create self._create_recover_event to handle
# selection we have no recover time so in any way we need
# to redraw one
recover_time = self.pathogen.rec_dists[token_id].get_val()
if keep_therapy:
therapy_id = self.current_therapy[node_id]
else:
therapy_id = -1
self.current_therapy[node_id] = -1
self._create_transmission_events(
node_id, token_id, recover_time, therapy_id
)
# add the recover event to the queue
self.queue.put_nowait(
Event(self.t + recover_time, node_id, -1, True,)
)
# to do: this method needs some make over ( is not and should not be used
# at the moment )
# - self._counts_over_time is not properly defined anymore
def _quasistable(
self, quasi_stable_strain_ids=None, surviving_strain_ids=None
):
"""
Stability check.
If stable return True, else return False
"""
if quasi_stable_strain_ids is not None:
i_1 = int(self.t / 3.)
i_2 = 2 * i_1
max_diff = n_max(absolute(
divide(
n_sum(
self._counts_over_time[i_1:i_2], axis=0
),
n_sum(self._counts_over_time[i_2:], axis=0)
)[quasi_stable_strain_ids]
))
if abs(1 - max_diff) >= 0.02:
return False
else:
print '_quasistable at t= ', self.t
return True
if surviving_strain_ids is not None:
if not count_nonzero(
self._counts_over_time[int(self.t)][surviving_strain_ids]
):
print 'protected strain died out at t= ', self.t
return True
else:
return False
return False
def _check_survival(self, surviving_strain_ids):
"""
Check if one of the strains specified in surviving_strain_ids went
extinct.
:param surviving_strain_ids:
:return:
"""
# if not count_nonzero(self._counts_over_time[int(self.t)][
# surviving_strain_ids]):
if not all(
[
s_id in self.current_view
for s_id in surviving_strain_ids
]
):
print 'a protected strain died out at t= ', self.t
return 1
else:
return 0
@property
def get_outcome(self):
"""
This function should be called at the end of each phase.
It computes all the necessary properties and returns them.
Should be strain specific:
{time1:
{
'seed': numpy.random.seed # that was set at the beginning of
the phase
'network': {
'n': size,
'degree_count': {0: how many nodes with deg 0
}
},
'wild_type':
{
'count': total number of infected
'degree_count': {
# will count the nbr of infected indivs per degree
0:...,
},
'acquired' how many acquired the type through mutation:
'degree_acquired': {
# how many node of deg 0 acquired through mutation
0: ...,
}
}
'mutant': ...
},
time2: ...
}
"""
_output = {
'seed': self.seed
}
if self.contact_structure.is_static:
# ToDo: the degree should be directly accessible from
# self.contact_structure
degrees = []
for node in xrange(self.contact_structure.n):
degrees.append(
len(
self.contact_structure.nn[node]
)
)
degree_count = {}
observed_degrees = list(set(degrees))
# Get nodes per degree
nodes_per_degree = {deg: [] for deg in observed_degrees}
for node_id in xrange(self.contact_structure.n):
nodes_per_degree[degrees[node_id]].append(node_id)
for a_degree in observed_degrees:
degree_count[a_degree] = len(nodes_per_degree[a_degree])
_output['network'] = {
'n': self.contact_structure.n,
'degree_count': degree_count,
}
for strain_id in self.pathogen.names.keys():
name = self.pathogen.names[strain_id]
_output[name] = {}
count = self.current_view.count(strain_id)
_output[name]['count'] = copy(count)
strain_acquired = 0
strain_degree_count = {}
strain_degree_acquired = {}
for a_degree in observed_degrees:
strain_degree_count[a_degree] = 0
strain_degree_acquired[a_degree] = 0
for node_id in nodes_per_degree[a_degree]:
if self.current_view[node_id] == strain_id:
strain_degree_count[a_degree] += 1
if self.current_infection_type[node_id] == 1:
strain_acquired += 1
strain_degree_acquired[a_degree] += 1
_output[name]['degree_count'] = copy(strain_degree_count)
_output[name]['acquired'] = copy(strain_acquired)
_output[name]['degree_acquired'] = copy(strain_degree_acquired)
else:
# ToDo: report some info about the current network
# _output = {
# 'network': {
# 'n': self.contact_structure.n,
# 'degree_count': degree_count,
# }
# }
for strain_id in self.pathogen.names.keys():
# report active nodes
name = self.pathogen.names[strain_id]
_output[name] = {}
count = self.current_view.count(strain_id)
_output[name]['count'] = copy(count)
strain_acquired = 0
# in active nodes
for node_id in xrange(self.contact_structure.n):
if self.current_view[node_id] == strain_id:
if self.current_infection_type[node_id] == 1:
strain_acquired += 1
_output[name]['acquired'] = copy(strain_acquired)
return _output
def _old_get_outcome(self):
survived = {}
degree_infected_fraction = {}
# acquire_type = {}
degree_count = {} # just a count for each degree
# Keys are the strain names, values are dict{degree: mutant count}
# degree_spec_acquire_count = {}
degrees = []
# degrees = {}
for node in xrange(self.contact_structure.n):
degrees.append(
len(
self.contact_structure.nn[node]
)
)
# degrees[node] = len(
# #to do: makes sense to define degree in contact_structure
# self.contact_structure.nn[node])
# observed_degrees = list(set(degrees.values()))
observed_degrees = list(set(degrees))
# for each degree a list of node ids
_degree_nodes = {deg: [] for deg in observed_degrees}
infection_count_per_degree = {deg: {} for deg in observed_degrees}
# mutant_count_per_degree = {deg: {} for deg in observed_degrees}
# the degree for each host
# note: could also look at the average nearest neighbour degree...
# to do: we need just the last time here, so just use self.t?!
times = self.log.keys()
times.sort()
# fraction of infected hosts
survived[-1] = self.current_view.count(-1) / float(
self.contact_structure.n)
for strain_id in self.pathogen.ids.values():
the_count = self.current_view.count(strain_id)
survived[self.pathogen.names[
strain_id]] = the_count / float(self.contact_structure.n)
# to do: run through the strains rather than the nodes
for node_id in xrange(self.contact_structure.n):
_degree_nodes[degrees[node_id]].append(node_id)
# issue: probably not what causes the deviation...
state = copy(self.current_view[node_id])
# state = last[node_id]
the_strain = state
if the_strain != -1:
the_strain = self.pathogen.names[the_strain]
try:
infection_count_per_degree[degrees[node_id]][the_strain] += 1
except KeyError:
for strain_name in self.pathogen.names.values():
infection_count_per_degree[
degrees[node_id]][strain_name] = 0.0
infection_count_per_degree[degrees[node_id]][the_strain] = 1
# Get degree specific acquire type
degree_spec_acquire_type = []
for degree in degrees:
for a_node in _degree_nodes[degree]:
degree_spec_acquire_type[degree].append(
self.current_infection_type[a_node]
)
degree_count = Counter(degrees)
for degree in infection_count_per_degree:
for strain_id in infection_count_per_degree[degree]:
try:
degree_infected_fraction[degree][
strain_id
] = infection_count_per_degree[
degree
][strain_id] / float(
degree_count[degree]
) if degree_count[degree] != 0. else 0.
except KeyError:
degree_infected_fraction[
degree
] = {
strain_id:
infection_count_per_degree[degree][strain_id]
/ float(degree_count[degree])
} \
if degree_count[degree] != 0. else {strain_id: 0.}
return {
'abundance': survived,
'abundance_degree_specific': degree_infected_fraction
}
def _update_phase_in_sim_log(self, **params):
self.simulation_log['phases'][self.t] = params
self.simulation_log[
'phases'][self.t]['network'] = self.contact_structure.info
self.simulation_log[
'phases'][self.t]['pathogen'] = self.pathogen.info
if self.treatment is not None:
self.simulation_log[
'phases'][self.t]['treatment'] = self.treatment.info
self.simulation_log[
'phases'
][self.t]['acquire_type'] = copy(self.current_infection_type)
return 0
# ToDo: WRITE A METHOD THAT RENDERS SELF.LOG AND SELF.SIMULATION_LOG MORE
# READABLE
@property
def get_current_view(self):
if not self.contact_structure.has_dynamic_nodes or (
self.contact_structure.has_dynamic_nodes
and self._ignore_dyn_nodes_in_log
):
return copy(self.current_view) # default behaviour
else:
# Set all nodes that exceeded their lifespan (given by nodes_end)
# to a special state -2
new_current_view = map(
lambda i: self.current_view[i]
if self.contact_structure.nodes_end[i] > self.t else -2,
xrange(len(self.current_view)))
return copy(new_current_view)
# to transform the priority queue holding the upcoming events into a
# pickleabel list
def __getstate__(self):
d = dict(self.__dict__)
queue = d.pop('queue')
event_queue_list = []
while True:
try:
event_queue_list.append(queue.get_nowait())
except Empty:
break
d['event_queue_list'] = event_queue_list
return d
# to load the pickled event list back into a priority queue
def __setstate__(self, d):
if 'event_queue_list' in d:
event_queue_list = d.pop('event_queue_list')
d['queue'] = PriorityQueue()
while len(event_queue_list):
d['queue'].put_nowait(event_queue_list.pop())
self.__dict__.update(d)
# issue: Make this a static method of Spreading?
def Event(time, node, token, inf_event, source=None):
"""
Arguments:
- time: float, used to order the events in the priority queue
- node: name of the affected host
- token: The putative new infectious status of the host
- inf_event: Whether the Event is an infection (True) or a
mutation(False).
- source: The id of the node where the infection came from
"""
return time, (node, token, inf_event, source)
``` |
{
"source": "Jilermo/GUI-for-the-control-of-differen-robots",
"score": 3
} |
#### File: Jilermo/GUI-for-the-control-of-differen-robots/guimotoman.py
```python
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
import numpy as np
from matplotlib.widgets import Slider,CheckButtons,Button
from robolink import *
fig, ax = plt.subplots()
plt.subplots_adjust(left=0,bottom=0.28,right=1,top=1)
ax = plt.axes(projection = "3d")
def mmatrix(*matrices):
n=0
for m in matrices:
if (n==0):
ma=m
n=n+1
elif (n==1):
r=np.dot(ma,m)
n=n+1
else:
r=np.dot(r,m)
return r
def dibujar():
plt.draw()
plt.pause(0.001)
def sind(t):
res=np.sin(t*np.pi/180)
return res
def cosd(t):
res=np.cos(t*np.pi/180)
return res
def setaxis(lim=2):
x1=-lim
x2=lim
y1=-lim
y2=lim
z1=-lim
z2=lim
ax.set_xlim3d(x1,x2)
ax.set_ylim3d(y1,y2)
ax.set_zlim3d(z1,z2)
ax.view_init(elev=30,azim=40)
ax.grid(True)
def sistemafijo(rango=1):
x=[0,1*rango]
y=[0,1*rango]
z=[0,1*rango]
ax.plot3D(x,[0,0],[0,0],color='red')
ax.plot3D([0,0],y,[0,0],color='green')
ax.plot3D([0,0],[0,0],z,color='blue')
def rotax(t):
Rx=np.array(([1,0,0,0],[0,cosd(t),-sind(t),0],[0,sind(t),cosd(t),0],[0,0,0,1]))
return Rx
def rotay(t):
Ry=np.array(([cosd(t),0,sind(t),0],[0,1,0,0],[-sind(t),0,cosd(t),0],[0,0,0,1]))
return Ry
def rotaz(t):
Rz=np.array(([cosd(t),-sind(t),0,0],[sind(t),cosd(t),0,0],[0,0,1,0],[0,0,0,1]))
return Rz
def rotaxf(t,r):
px=r[0,3]
py=r[1,3]
pz=r[2,3]
Rx=np.array(([1,0,0,0],[0,cosd(t),-sind(t),0],[0,sind(t),cosd(t),0],[0,0,0,1]))
Rx=np.dot(Rx,r)
Rx[0,3]=px
Rx[1,3]=py
Rx[2,3]=pz
return Rx
def rotayf(t,r):
px=r[0,3]
py=r[1,3]
pz=r[2,3]
Ry=np.array(([cosd(t),0,sind(t),0],[0,1,0,0],[-sind(t),0,cosd(t),0],[0,0,0,1]))
Ry=np.dot(Ry,r)
Ry[0,3]=px
Ry[1,3]=py
Ry[2,3]=pz
return Ry
def rotazf(t,r):
px=r[0,3]
py=r[1,3]
pz=r[2,3]
Rz=np.array(([cosd(t),-sind(t),0,0],[sind(t),cosd(t),0,0],[0,0,1,0],[0,0,0,1]))
Rz=np.dot(Rz,r)
Rz[0,3]=px
Rz[1,3]=py
Rz[2,3]=pz
return Rz
def trasx(Dx):
Tx=np.array(([[1,0,0,Dx],[0,1,0,0],[0,0,1,0],[0,0,0,1]]))
return Tx
def trasy(Dy):
Ty=np.array(([[1,0,0,0],[0,1,0,Dy],[0,0,1,0],[0,0,0,1]]))
return Ty
def trasz(Dz):
Tz=np.array(([[1,0,0,0],[0,1,0,0],[0,0,1,Dz],[0,0,0,1]]))
return Tz
def minv(R):
r=np.zeros((4,4))
a=np.zeros((3,3))
p=np.zeros((3,1))
a[0,0]=R[0,0]
a[0,1]=R[0,1]
a[0,2]=R[0,2]
a[1,0]=R[1,0]
a[1,1]=R[1,1]
a[1,2]=R[1,2]
a[2,0]=R[2,0]
a[2,1]=R[2,1]
a[2,2]=R[2,2]
a=np.transpose(a)
r[0,0]=a[0,0]
r[0,1]=a[0,1]
r[0,2]=a[0,2]
r[1,0]=a[1,0]
r[1,1]=a[1,1]
r[1,2]=a[1,2]
r[2,0]=a[2,0]
r[2,1]=a[2,1]
r[2,2]=a[2,2]
a=-1*a
p[0,0]=R[0,3]
p[1,0]=R[1,3]
p[2,0]=R[2,3]
p1=np.dot(a,p)
r[0,3]=p1[0,0]
r[1,3]=p1[1,0]
r[2,3]=p1[2,0]
r[3,3]=1
return r
def sistemamovil(r,rango=1):
ux=r[0,0]
uy=r[1,0]
uz=r[2,0]
vx=r[0,1]
vy=r[1,1]
vz=r[2,1]
wx=r[0,2]
wy=r[1,2]
wz=r[2,2]
px=r[0,3]
py=r[1,3]
pz=r[2,3]
ax.plot3D([px,px+ux*rango],[py,py+uy*rango],[pz,pz+uz*rango],color='red') #Dibuja eje movil u
ax.plot3D([px,px+vx*rango],[py,py+vy*rango],[pz,pz+vz*rango],color='green') #Dibuja eje movil v
ax.plot3D([px,px+wx*rango],[py,py+wy*rango],[pz,pz+wz*rango],color='blue') #Dibuja eje movil w
def ppp(d1,d2,d3):
t0=np.eye(4)
t01=trasz(d1)@rotax(-90)
t12=trasz(d2)@rotax(-90)@rotay(90)
t23=trasz(d3)@rotaz(180)
t02=t01@t12
t03=t02@t23
sistemafijo()
sistemamovil(t01)
sistemamovil(t02)
sistemamovil(t03)
ax.plot3D([t0[0,3],t01[0,3]],[t0[1,3],t01[1,3]],[t0[2,3],t01[2,3]],color='red')
ax.plot3D([t01[0,3],t02[0,3]],[t01[1,3],t02[1,3]],[t01[2,3],t02[2,3]],color='red')
ax.plot3D([t02[0,3],t03[0,3]],[t02[1,3],t03[1,3]],[t02[2,3],t03[2,3]],color='red')
def rpp(t1,d2,d3):
t0=np.eye(4)
t01=rotaz(t1)
t12=trasz(d2)
t23=rotay(90)@trasz(d3)
t02=t01@t12
t03=t02@t23
sistemafijo()
sistemamovil(t01)
sistemamovil(t02)
sistemamovil(t03)
ax.plot3D([t0[0,3],t01[0,3]],[t0[1,3],t01[1,3]],[t0[2,3],t01[2,3]],color='red')
ax.plot3D([t01[0,3],t02[0,3]],[t01[1,3],t02[1,3]],[t01[2,3],t02[2,3]],color='red')
ax.plot3D([t02[0,3],t03[0,3]],[t02[1,3],t03[1,3]],[t02[2,3],t03[2,3]],color='red')
def rrp(t1,t2,d3):
t0=np.eye(4)
t01=rotaz(t1)
t12=trasz(5)@rotay(90)@rotaz(90)@rotaz(t2)
t23=rotay(90)@rotaz(-90)@trasz(d3)
t02=t01@t12
t03=t02@t23
sistemafijo()
sistemamovil(t01)
sistemamovil(t02)
sistemamovil(t03)
ax.plot3D([t0[0,3],t01[0,3]],[t0[1,3],t01[1,3]],[t0[2,3],t01[2,3]],color='red')
ax.plot3D([t01[0,3],t02[0,3]],[t01[1,3],t02[1,3]],[t01[2,3],t02[2,3]],color='red')
ax.plot3D([t02[0,3],t03[0,3]],[t02[1,3],t03[1,3]],[t02[2,3],t03[2,3]],color='red')
def rrr(t1,t2,t3):
t0=np.eye(4)
t01=rotaz(t1)
t12=trasz(4)@rotax(90)@rotaz(t2)
t23=trasx(4)@rotaz(t3)
t34=trasx(4)@rotay(90)@rotaz(-90)
t02=t01@t12
t03=t02@t23
t04=t03@t34
sistemafijo()
sistemamovil(t01)
sistemamovil(t02)
sistemamovil(t03)
sistemamovil(t04)
ax.plot3D([t0[0,3],t01[0,3]],[t0[1,3],t01[1,3]],[t0[2,3],t01[2,3]],color='red')
ax.plot3D([t01[0,3],t02[0,3]],[t01[1,3],t02[1,3]],[t01[2,3],t02[2,3]],color='red')
ax.plot3D([t02[0,3],t03[0,3]],[t02[1,3],t03[1,3]],[t02[2,3],t03[2,3]],color='red')
ax.plot3D([t03[0,3],t04[0,3]],[t03[1,3],t04[1,3]],[t03[2,3],t04[2,3]],color='red')
def scara(t1,t2,d3,t4):
t0=np.eye(4)
t01=rotaz(t1)@trasz(4)
t12=trasx(4)
t23=rotaz(t2)@trasz(-1)
t34=trasx(4)@rotax(180)@rotaz(-90)
t45=trasz(d3)
t56=rotaz(t4)@trasz(1)
t02=t01@t12
t03=t02@t23
t04=t03@t34
t05=t04@t45
t06=t05@t56
sistemafijo()
sistemamovil(t01)
sistemamovil(t02)
sistemamovil(t03)
sistemamovil(t04)
sistemamovil(t05)
sistemamovil(t06)
ax.plot3D([t0[0,3],t01[0,3]],[t0[1,3],t01[1,3]],[t0[2,3],t01[2,3]],color='red')
ax.plot3D([t01[0,3],t02[0,3]],[t01[1,3],t02[1,3]],[t01[2,3],t02[2,3]],color='red')
ax.plot3D([t02[0,3],t03[0,3]],[t02[1,3],t03[1,3]],[t02[2,3],t03[2,3]],color='red')
ax.plot3D([t03[0,3],t04[0,3]],[t03[1,3],t04[1,3]],[t03[2,3],t04[2,3]],color='red')
ax.plot3D([t04[0,3],t05[0,3]],[t04[1,3],t05[1,3]],[t04[2,3],t05[2,3]],color='red')
ax.plot3D([t05[0,3],t06[0,3]],[t05[1,3],t06[1,3]],[t05[2,3],t06[2,3]],color='red')
def cobras800(t1,t2,d3,t4):
t0=np.eye(4)
t01=rotaz(t1)@trasz(342)
t12=trasx(425)
t23=rotaz(t2)@trasz(56)
t34=trasx(375)
t45=trasz(-210)@trasz(d3)
t56=rotax(180)@rotaz(-180)@rotaz(t4)
t02=t01@t12
t03=t02@t23
t04=t03@t34
t05=t04@t45
t06=t05@t56
sistemafijo(100)
#sistemamovil(t01,100)
#sistemamovil(t02,100)
sistemamovil(t03,100)
#sistemamovil(t04,100)
#sistemamovil(t05,100)
sistemamovil(t06,100)
ax.plot3D([t0[0,3],t01[0,3]],[t0[1,3],t01[1,3]],[t0[2,3],t01[2,3]],color='red')
ax.plot3D([t01[0,3],t02[0,3]],[t01[1,3],t02[1,3]],[t01[2,3],t02[2,3]],color='red')
ax.plot3D([t02[0,3],t03[0,3]],[t02[1,3],t03[1,3]],[t02[2,3],t03[2,3]],color='red')
ax.plot3D([t03[0,3],t04[0,3]],[t03[1,3],t04[1,3]],[t03[2,3],t04[2,3]],color='red')
ax.plot3D([t04[0,3],t05[0,3]],[t04[1,3],t05[1,3]],[t04[2,3],t05[2,3]],color='red')
ax.plot3D([t05[0,3],t06[0,3]],[t05[1,3],t06[1,3]],[t05[2,3],t06[2,3]],color='red')
def ur5(t1,t2,t3,t4,t5,t6):
t0=np.eye(4)
t01=rotaz(t1)@trasz(89.2)#
t12=trasy(-134.2)@rotax(90)@rotaz(t2)#
t23=trasy(425)
t34=trasz(-118.45)@rotaz(t3)#
t45=trasx(392.25)@rotaz(t4)#
t56=trasz(94.75)@rotax(-90)@rotaz(t5)#
t67=trasz(94.75)
t78=trasx(82.5)@rotay(90)@rotaz(-90)@rotaz(t6)#
t02=t01@t12
t03=t02@t23
t04=t03@t34
t05=t04@t45
t06=t05@t56
t07=t06@t67
t08=t07@t78
sistemafijo(100)
#sistemamovil(t01,100)
sistemamovil(t02,100)
sistemamovil(t03,100)
#sistemamovil(t04,100)
sistemamovil(t05,100)
sistemamovil(t06,100)
sistemamovil(t07,100)
#sistemamovil(t08,100)
ax.plot3D([t0[0,3],t01[0,3]],[t0[1,3],t01[1,3]],[t0[2,3],t01[2,3]],color='red')
ax.plot3D([t01[0,3],t02[0,3]],[t01[1,3],t02[1,3]],[t01[2,3],t02[2,3]],color='red')
ax.plot3D([t02[0,3],t03[0,3]],[t02[1,3],t03[1,3]],[t02[2,3],t03[2,3]],color='red')
ax.plot3D([t03[0,3],t04[0,3]],[t03[1,3],t04[1,3]],[t03[2,3],t04[2,3]],color='red')
ax.plot3D([t04[0,3],t05[0,3]],[t04[1,3],t05[1,3]],[t04[2,3],t05[2,3]],color='red')
ax.plot3D([t05[0,3],t06[0,3]],[t05[1,3],t06[1,3]],[t05[2,3],t06[2,3]],color='red')
ax.plot3D([t06[0,3],t07[0,3]],[t06[1,3],t07[1,3]],[t06[2,3],t07[2,3]],color='red')
ax.plot3D([t07[0,3],t08[0,3]],[t07[1,3],t08[1,3]],[t07[2,3],t08[2,3]],color='red')
def motoman(tb,t1a,t2a,t3a,t4a,t5a,t6a,t7a,t1b,t2b,t3b,t4b,t5b,t6b,t7b):
T0=np.eye(4)
Ti=rotaz(tb)
Ti1=Ti@trasz(8)
Ti2=Ti1@trasx(1.57)
Ti3=Ti2@trasy(2.5)@rotax(-90)
Tib3=Ti2@trasy(-2.5)@rotax(90)
T01=Ti3@rotaz(270)@rotaz(t1a)@trasz(.3);
T12=trasx(-1.09)
T23=trasz(2.5)
T34=trasx(1.09)@rotay(90)@rotaz(t2a)
T45=rotay(-90)@trasx(1.2)
T56=trasz(1)
T67=trasx(-1.2)
T78=trasz(0.45)@rotaz(t3a)
T89=trasz(2.225)
T910=trasx(1.04)
T1011=trasz(1.225)
T1112=trasx(-1.04)@rotay(90)@rotaz(t4a)
T1213=rotay(-90)@trasx(-0.98)
T1314=trasz(1.4)
T1415=trasx(0.98)
T1516=trasz(0.7)@rotaz(t5a)
T1617=trasz(0.7)
T1718=trasx(-0.86)
T1819=trasz(1.4)
T1920=trasx(0.86)@rotay(90)@rotaz(t6a)
T2021=rotay(-90)@trasx(0.8)
T2122=trasz(0.9)
T2223=trasx(-0.8)
T2324=trasz(0.9)@rotaz(90)@rotaz(t7a)
T02=T01@T12
T03=T02@T23
T04=T03@T34
T05=T04@T45
T06=T05@T56
T07=T06@T67
T08=T07@T78
T09=T08@T89
T10=T09@T910
T11=T10@T1011
T12=T11@T1112
T13=T12@T1213
T14=T13@T1314
T15=T14@T1415
T16=T15@T1516
T17=T16@T1617
T18=T17@T1718
T19=T18@T1819
T20=T19@T1920
T21=T20@T2021
T22=T21@T2122
T23=T22@T2223
T24=T23@T2324
print(T24)
Tb01=Tib3@rotaz(270)@rotaz(t1b)@trasz(0.3)
Tb12=trasx(-1.09)
Tb23=trasz(2.5)
TB34=trasx(1.09)@rotay(90)@rotaz(t2b)
Tb45=rotay(-90)@trasx(1.2)
Tb56=trasz(1)
Tb67=trasx(-1.2)
Tb78=trasz(0.45)@rotaz(t3b)
Tb89=trasz(2.225)
TB910=trasx(1.04)
Tb1011=trasz(1.225)
Tb1112=trasx(-1.04)@rotay(90)@rotaz(t4b)
Tb1213=rotay(-90)@trasx(-0.98)
Tb1314=trasz(1.4)
Tb1415=trasx(0.98)
Tb1516=trasz(0.7)@rotaz(t5b)
Tb1617=trasz(0.7)
Tb1718=trasx(-0.86)
Tb1819=trasz(1.4)
Tb1920=trasx(0.86)@rotay(90)@rotaz(t6b)
Tb2021=rotay(-90)@trasx(0.8)
Tb2122=trasz(0.9)
Tb2223=trasx(-0.8)
Tb2324=trasz(0.9)@rotaz(90)@rotaz(t7b)
Tb02=Tb01@Tb12
Tb03=Tb02@Tb23
Tb04=Tb03@TB34
Tb05=Tb04@Tb45
Tb06=Tb05@Tb56
Tb07=Tb06@Tb67
Tb08=Tb07@Tb78
Tb09=Tb08@Tb89
Tb10=Tb09@TB910
Tb11=Tb10@Tb1011
Tb12=Tb11@Tb1112
Tb13=Tb12@Tb1213
Tb14=Tb13@Tb1314
Tb15=Tb14@Tb1415
Tb16=Tb15@Tb1516
Tb17=Tb16@Tb1617
Tb18=Tb17@Tb1718
Tb19=Tb18@Tb1819
Tb20=Tb19@Tb1920
Tb21=Tb20@Tb2021
Tb22=Tb21@Tb2122
Tb23=Tb22@Tb2223
Tb24=Tb23@Tb2324
sistemafijo()
sistemamovil(T0)
sistemamovil(T01)
sistemamovil(T04)
sistemamovil(T08)
sistemamovil(T12)
sistemamovil(T16)
sistemamovil(T20)
sistemamovil(T24)
sistemamovil(T0)
sistemamovil(Tb01)
sistemamovil(Tb04)
sistemamovil(Tb08)
sistemamovil(Tb12)
sistemamovil(Tb16)
sistemamovil(Tb20)
sistemamovil(Tb24)
ax.plot3D([Ti[0,3], Ti1[0,3]], [Ti[1,3], Ti1[1,3]], [Ti[2,3], Ti1[2,3]],color='red')
ax.plot3D([Ti1[0,3], Ti2[0,3]], [Ti1[1,3], Ti2[1,3]], [Ti1[2,3], Ti2[2,3]],color='red')
ax.plot3D([Ti2[0,3], Ti3[0,3]], [Ti2[1,3], Ti3[1,3]], [Ti2[2,3], Ti3[2,3]],color='red')
ax.plot3D([Ti3[0,3], T01[0,3]], [Ti3[1,3], T01[1,3]], [Ti3[2,3], T01[2,3]],color='red')
ax.plot3D([T01[0,3], T02[0,3]], [T01[1,3], T02[1,3]], [T01[2,3], T02[2,3]],color='red')
ax.plot3D([T02[0,3], T03[0,3]], [T02[1,3], T03[1,3]], [T02[2,3], T03[2,3]],color='red')
ax.plot3D([T03[0,3], T04[0,3]], [T03[1,3], T04[1,3]], [T03[2,3], T04[2,3]],color='red')
ax.plot3D([T04[0,3], T05[0,3]], [T04[1,3], T05[1,3]], [T04[2,3], T05[2,3]],color='red')
ax.plot3D([T05[0,3], T06[0,3]], [T05[1,3], T06[1,3]], [T05[2,3], T06[2,3]],color='red')
ax.plot3D([T06[0,3], T07[0,3]], [T06[1,3], T07[1,3]], [T06[2,3], T07[2,3]],color='red')
ax.plot3D([T07[0,3], T08[0,3]], [T07[1,3], T08[1,3]], [T07[2,3], T08[2,3]],color='red')
ax.plot3D([T08[0,3], T09[0,3]], [T08[1,3], T09[1,3]], [T08[2,3], T09[2,3]],color='red')
ax.plot3D([T09[0,3], T10[0,3]], [T09[1,3], T10[1,3]], [T09[2,3], T10[2,3]],color='red')
ax.plot3D([T10[0,3], T11[0,3]], [T10[1,3], T11[1,3]], [T10[2,3], T11[2,3]],color='red')
ax.plot3D([T11[0,3], T12[0,3]], [T11[1,3], T12[1,3]], [T11[2,3], T12[2,3]],color='red')
ax.plot3D([T12[0,3], T13[0,3]], [T12[1,3], T13[1,3]], [T12[2,3], T13[2,3]],color='red')
ax.plot3D([T13[0,3], T14[0,3]], [T13[1,3], T14[1,3]], [T13[2,3], T14[2,3]],color='red')
ax.plot3D([T14[0,3], T15[0,3]], [T14[1,3], T15[1,3]], [T14[2,3], T15[2,3]],color='red')
ax.plot3D([T15[0,3], T16[0,3]], [T15[1,3], T16[1,3]], [T15[2,3], T16[2,3]],color='red')
ax.plot3D([T16[0,3], T17[0,3]], [T16[1,3], T17[1,3]], [T16[2,3], T17[2,3]],color='red')
ax.plot3D([T17[0,3], T18[0,3]], [T17[1,3], T18[1,3]], [T17[2,3], T18[2,3]],color='red')
ax.plot3D([T18[0,3], T19[0,3]], [T18[1,3], T19[1,3]], [T18[2,3], T19[2,3]],color='red')
ax.plot3D([T19[0,3], T20[0,3]], [T19[1,3], T20[1,3]], [T19[2,3], T20[2,3]],color='red')
ax.plot3D([T20[0,3], T21[0,3]], [T20[1,3], T21[1,3]], [T20[2,3], T21[2,3]],color='red')
ax.plot3D([T21[0,3], T22[0,3]], [T21[1,3], T22[1,3]], [T21[2,3], T22[2,3]],color='red')
ax.plot3D([T22[0,3], T23[0,3]], [T22[1,3], T23[1,3]], [T22[2,3], T23[2,3]],color='red')
ax.plot3D([T23[0,3], T24[0,3]], [T23[1,3], T24[1,3]], [T23[2,3], T24[2,3]],color='red')
ax.plot3D([Ti[0,3], Ti1[0,3]], [Ti[1,3], Ti1[1,3]], [Ti[2,3], Ti1[2,3]],color='red')
ax.plot3D([Ti1[0,3], Ti2[0,3]], [Ti1[1,3], Ti2[1,3]], [Ti1[2,3], Ti2[2,3]],color='red')
ax.plot3D([Ti2[0,3], Tib3[0,3]], [Ti2[1,3], Tib3[1,3]], [Ti2[2,3], Tib3[2,3]],color='red')
ax.plot3D([Tib3[0,3], Tb01[0,3]], [Tib3[1,3], Tb01[1,3]], [Tib3[2,3], Tb01[2,3]],color='red')
ax.plot3D([Tb01[0,3], Tb02[0,3]], [Tb01[1,3], Tb02[1,3]], [Tb01[2,3], Tb02[2,3]],color='red')
ax.plot3D([Tb02[0,3], Tb03[0,3]], [Tb02[1,3], Tb03[1,3]], [Tb02[2,3], Tb03[2,3]],color='red')
ax.plot3D([Tb03[0,3], Tb04[0,3]], [Tb03[1,3], Tb04[1,3]], [Tb03[2,3], Tb04[2,3]],color='red')
ax.plot3D([Tb04[0,3], Tb05[0,3]], [Tb04[1,3], Tb05[1,3]], [Tb04[2,3], Tb05[2,3]],color='red')
ax.plot3D([Tb05[0,3], Tb06[0,3]], [Tb05[1,3], Tb06[1,3]], [Tb05[2,3], Tb06[2,3]],color='red')
ax.plot3D([Tb06[0,3], Tb07[0,3]], [Tb06[1,3], Tb07[1,3]], [Tb06[2,3], Tb07[2,3]],color='red')
ax.plot3D([Tb07[0,3], Tb08[0,3]], [Tb07[1,3], Tb08[1,3]], [Tb07[2,3], Tb08[2,3]],color='red')
ax.plot3D([Tb08[0,3], Tb09[0,3]], [Tb08[1,3], Tb09[1,3]], [Tb08[2,3], Tb09[2,3]],color='red')
ax.plot3D([Tb09[0,3], Tb10[0,3]], [Tb09[1,3], Tb10[1,3]], [Tb09[2,3], Tb10[2,3]],color='red')
ax.plot3D([Tb10[0,3], Tb11[0,3]], [Tb10[1,3], Tb11[1,3]], [Tb10[2,3], Tb11[2,3]],color='red')
ax.plot3D([Tb11[0,3], Tb12[0,3]], [Tb11[1,3], Tb12[1,3]], [Tb11[2,3], Tb12[2,3]],color='red')
ax.plot3D([Tb12[0,3], Tb13[0,3]], [Tb12[1,3], Tb13[1,3]], [Tb12[2,3], Tb13[2,3]],color='red')
ax.plot3D([Tb13[0,3], Tb14[0,3]], [Tb13[1,3], Tb14[1,3]], [Tb13[2,3], Tb14[2,3]],color='red')
ax.plot3D([Tb14[0,3], Tb15[0,3]], [Tb14[1,3], Tb15[1,3]], [Tb14[2,3], Tb15[2,3]],color='red')
ax.plot3D([Tb15[0,3], Tb16[0,3]], [Tb15[1,3], Tb16[1,3]], [Tb15[2,3], Tb16[2,3]],color='red')
ax.plot3D([Tb16[0,3], Tb17[0,3]], [Tb16[1,3], Tb17[1,3]], [Tb16[2,3], Tb17[2,3]],color='red')
ax.plot3D([Tb17[0,3], Tb18[0,3]], [Tb17[1,3], Tb18[1,3]], [Tb17[2,3], Tb18[2,3]],color='red')
ax.plot3D([Tb18[0,3], Tb19[0,3]], [Tb18[1,3], Tb19[1,3]], [Tb18[2,3], Tb19[2,3]],color='red')
ax.plot3D([Tb19[0,3], Tb20[0,3]], [Tb19[1,3], Tb20[1,3]], [Tb19[2,3], Tb20[2,3]],color='red')
ax.plot3D([Tb20[0,3], Tb21[0,3]], [Tb20[1,3], Tb21[1,3]], [Tb20[2,3], Tb21[2,3]],color='red')
ax.plot3D([Tb21[0,3], Tb22[0,3]], [Tb21[1,3], Tb22[1,3]], [Tb21[2,3], Tb22[2,3]],color='red')
ax.plot3D([Tb22[0,3], Tb23[0,3]], [Tb22[1,3], Tb23[1,3]], [Tb22[2,3], Tb23[2,3]],color='red')
ax.plot3D([Tb23[0,3], Tb24[0,3]], [Tb23[1,3], Tb24[1,3]], [Tb23[2,3], Tb24[2,3]],color='red')
def accmotoman(tb,t1a,t2a,t3a,t4a,t5a,t6a,t7a,t1b,t2b,t3b,t4b,t5b,t6b,t7b):
T0=np.eye(4)
Ti=trasz(893.5)
Ti1=Ti@trasx(92.5)@rotaz(tb)
Ti2=Ti1@trasx(100)@trasz(306.5)
Ti3=Ti2@rotax(-90)@rotaz(-180)@rotaz(t1a)@trasz(265)
Tib3=Ti2@rotax(90)@rotaz(-180)@rotaz(t1b)@trasz(265)
T01=Ti3@rotax(-90)@rotaz(t2a)
T12=trasz(-80)
T23=trasy(-90)
T34=trasz(80)
T45=trasy(-90)@rotax(90)@rotaz(t3a)
T56=trasz(90)
T67=trasy(-80)
T78=trasz(90)
T89=trasy(80)@rotax(-90)@rotaz(t4a)
T910=trasz(80)
T1011=trasy(-90)
T1112=trasz(-80)
T1213=trasy(-90)@rotax(90)@rotaz(t5a)
T1314=trasz(90)
T1415=trasy(80)
T1516=trasz(90)
T1617=trasy(-80)@rotax(-90)@rotaz(t6a)
T1718=trasz(-80)
T1819=trasy(-87.5)
T1920=trasz(80)
T2021=trasy(-87.5)@rotax(90)@rotaz(t7a)
T02=T01@T12
T03=T02@T23
T04=T03@T34
T05=T04@T45
T06=T05@T56
T07=T06@T67
T08=T07@T78
T09=T08@T89
T10=T09@T910
T11=T10@T1011
T12=T11@T1112
T13=T12@T1213
T14=T13@T1314
T15=T14@T1415
T16=T15@T1516
T17=T16@T1617
T18=T17@T1718
T19=T18@T1819
T20=T19@T1920
T21=T20@T2021
print("derecho ++++++")
print(T21)
Tb01=Tib3@rotax(-90)@rotaz(t2b)
Tb12=trasz(-80)
Tb23=trasy(-90)
Tb34=trasz(80)
Tb45=trasy(-90)@rotax(90)@rotaz(t3b)
Tb56=trasz(90)
Tb67=trasy(-80)
Tb78=trasz(90)
Tb89=trasy(80)@rotax(-90)@rotaz(t4b)
Tb910=trasz(80)
Tb1011=trasy(-90)
Tb1112=trasz(-80)
Tb1213=trasy(-90)@rotax(90)@rotaz(t5b)
Tb1314=trasz(90)
Tb1415=trasy(80)
Tb1516=trasz(90)
Tb1617=trasy(-80)@rotax(-90)@rotaz(t6b)
Tb1718=trasz(-80)
Tb1819=trasy(-87.5)
Tb1920=trasz(80)
Tb2021=trasy(-87.5)@rotax(90)@rotaz(t7b)
Tb02=Tb01@Tb12
Tb03=Tb02@Tb23
Tb04=Tb03@Tb34
Tb05=Tb04@Tb45
Tb06=Tb05@Tb56
Tb07=Tb06@Tb67
Tb08=Tb07@Tb78
Tb09=Tb08@Tb89
Tb10=Tb09@Tb910
Tb11=Tb10@Tb1011
Tb12=Tb11@Tb1112
Tb13=Tb12@Tb1213
Tb14=Tb13@Tb1314
Tb15=Tb14@Tb1415
Tb16=Tb15@Tb1516
Tb17=Tb16@Tb1617
Tb18=Tb17@Tb1718
Tb19=Tb18@Tb1819
Tb20=Tb19@Tb1920
Tb21=Tb20@Tb2021
print("Izquierdo ++++++")
print(Tb21)
sistemafijo(100)
sistemamovil(T0,100)
sistemamovil(T01,100)
sistemamovil(T05,100)
sistemamovil(T09,100)
sistemamovil(T13,100)
sistemamovil(T17,100)
sistemamovil(T21,100)
sistemamovil(Tb01,100)
sistemamovil(Tb05,100)
sistemamovil(Tb09,100)
sistemamovil(Tb13,100)
sistemamovil(Tb17,100)
sistemamovil(Tb21,100)
ax.plot3D([T0[0,3], Ti[0,3]], [T0[1,3], Ti[1,3]], [T0[2,3], Ti[2,3]],color='red')
ax.plot3D([Ti[0,3], Ti1[0,3]], [Ti[1,3], Ti1[1,3]], [Ti[2,3], Ti1[2,3]],color='red')
ax.plot3D([Ti1[0,3], Ti2[0,3]], [Ti1[1,3], Ti2[1,3]], [Ti1[2,3], Ti2[2,3]],color='red')
ax.plot3D([Ti2[0,3], Ti3[0,3]], [Ti2[1,3], Ti3[1,3]], [Ti2[2,3], Ti3[2,3]],color='red')
ax.plot3D([Ti3[0,3], T01[0,3]], [Ti3[1,3], T01[1,3]], [Ti3[2,3], T01[2,3]],color='red')
ax.plot3D([T01[0,3], T02[0,3]], [T01[1,3], T02[1,3]], [T01[2,3], T02[2,3]],color='red')
ax.plot3D([T02[0,3], T03[0,3]], [T02[1,3], T03[1,3]], [T02[2,3], T03[2,3]],color='red')
ax.plot3D([T03[0,3], T04[0,3]], [T03[1,3], T04[1,3]], [T03[2,3], T04[2,3]],color='red')
ax.plot3D([T04[0,3], T05[0,3]], [T04[1,3], T05[1,3]], [T04[2,3], T05[2,3]],color='red')
ax.plot3D([T05[0,3], T06[0,3]], [T05[1,3], T06[1,3]], [T05[2,3], T06[2,3]],color='red')
ax.plot3D([T06[0,3], T07[0,3]], [T06[1,3], T07[1,3]], [T06[2,3], T07[2,3]],color='red')
ax.plot3D([T07[0,3], T08[0,3]], [T07[1,3], T08[1,3]], [T07[2,3], T08[2,3]],color='red')
ax.plot3D([T08[0,3], T09[0,3]], [T08[1,3], T09[1,3]], [T08[2,3], T09[2,3]],color='red')
ax.plot3D([T09[0,3], T10[0,3]], [T09[1,3], T10[1,3]], [T09[2,3], T10[2,3]],color='red')
ax.plot3D([T10[0,3], T11[0,3]], [T10[1,3], T11[1,3]], [T10[2,3], T11[2,3]],color='red')
ax.plot3D([T11[0,3], T12[0,3]], [T11[1,3], T12[1,3]], [T11[2,3], T12[2,3]],color='red')
ax.plot3D([T12[0,3], T13[0,3]], [T12[1,3], T13[1,3]], [T12[2,3], T13[2,3]],color='red')
ax.plot3D([T13[0,3], T14[0,3]], [T13[1,3], T14[1,3]], [T13[2,3], T14[2,3]],color='red')
ax.plot3D([T14[0,3], T15[0,3]], [T14[1,3], T15[1,3]], [T14[2,3], T15[2,3]],color='red')
ax.plot3D([T15[0,3], T16[0,3]], [T15[1,3], T16[1,3]], [T15[2,3], T16[2,3]],color='red')
ax.plot3D([T16[0,3], T17[0,3]], [T16[1,3], T17[1,3]], [T16[2,3], T17[2,3]],color='red')
ax.plot3D([T17[0,3], T18[0,3]], [T17[1,3], T18[1,3]], [T17[2,3], T18[2,3]],color='red')
ax.plot3D([T18[0,3], T19[0,3]], [T18[1,3], T19[1,3]], [T18[2,3], T19[2,3]],color='red')
ax.plot3D([T19[0,3], T20[0,3]], [T19[1,3], T20[1,3]], [T19[2,3], T20[2,3]],color='red')
ax.plot3D([T20[0,3], T21[0,3]], [T20[1,3], T21[1,3]], [T20[2,3], T21[2,3]],color='red')
ax.plot3D([Ti[0,3], Ti1[0,3]], [Ti[1,3], Ti1[1,3]], [Ti[2,3], Ti1[2,3]],color='red')
ax.plot3D([Ti1[0,3], Ti2[0,3]], [Ti1[1,3], Ti2[1,3]], [Ti1[2,3], Ti2[2,3]],color='red')
ax.plot3D([Ti2[0,3], Tib3[0,3]], [Ti2[1,3], Tib3[1,3]], [Ti2[2,3], Tib3[2,3]],color='red')
ax.plot3D([Tib3[0,3], Tb01[0,3]], [Tib3[1,3], Tb01[1,3]], [Tib3[2,3], Tb01[2,3]],color='red')
ax.plot3D([Tb01[0,3], Tb02[0,3]], [Tb01[1,3], Tb02[1,3]], [Tb01[2,3], Tb02[2,3]],color='red')
ax.plot3D([Tb02[0,3], Tb03[0,3]], [Tb02[1,3], Tb03[1,3]], [Tb02[2,3], Tb03[2,3]],color='red')
ax.plot3D([Tb03[0,3], Tb04[0,3]], [Tb03[1,3], Tb04[1,3]], [Tb03[2,3], Tb04[2,3]],color='red')
ax.plot3D([Tb04[0,3], Tb05[0,3]], [Tb04[1,3], Tb05[1,3]], [Tb04[2,3], Tb05[2,3]],color='red')
ax.plot3D([Tb05[0,3], Tb06[0,3]], [Tb05[1,3], Tb06[1,3]], [Tb05[2,3], Tb06[2,3]],color='red')
ax.plot3D([Tb06[0,3], Tb07[0,3]], [Tb06[1,3], Tb07[1,3]], [Tb06[2,3], Tb07[2,3]],color='red')
ax.plot3D([Tb07[0,3], Tb08[0,3]], [Tb07[1,3], Tb08[1,3]], [Tb07[2,3], Tb08[2,3]],color='red')
ax.plot3D([Tb08[0,3], Tb09[0,3]], [Tb08[1,3], Tb09[1,3]], [Tb08[2,3], Tb09[2,3]],color='red')
ax.plot3D([Tb09[0,3], Tb10[0,3]], [Tb09[1,3], Tb10[1,3]], [Tb09[2,3], Tb10[2,3]],color='red')
ax.plot3D([Tb10[0,3], Tb11[0,3]], [Tb10[1,3], Tb11[1,3]], [Tb10[2,3], Tb11[2,3]],color='red')
ax.plot3D([Tb11[0,3], Tb12[0,3]], [Tb11[1,3], Tb12[1,3]], [Tb11[2,3], Tb12[2,3]],color='red')
ax.plot3D([Tb12[0,3], Tb13[0,3]], [Tb12[1,3], Tb13[1,3]], [Tb12[2,3], Tb13[2,3]],color='red')
ax.plot3D([Tb13[0,3], Tb14[0,3]], [Tb13[1,3], Tb14[1,3]], [Tb13[2,3], Tb14[2,3]],color='red')
ax.plot3D([Tb14[0,3], Tb15[0,3]], [Tb14[1,3], Tb15[1,3]], [Tb14[2,3], Tb15[2,3]],color='red')
ax.plot3D([Tb15[0,3], Tb16[0,3]], [Tb15[1,3], Tb16[1,3]], [Tb15[2,3], Tb16[2,3]],color='red')
ax.plot3D([Tb16[0,3], Tb17[0,3]], [Tb16[1,3], Tb17[1,3]], [Tb16[2,3], Tb17[2,3]],color='red')
ax.plot3D([Tb17[0,3], Tb18[0,3]], [Tb17[1,3], Tb18[1,3]], [Tb17[2,3], Tb18[2,3]],color='red')
ax.plot3D([Tb18[0,3], Tb19[0,3]], [Tb18[1,3], Tb19[1,3]], [Tb18[2,3], Tb19[2,3]],color='red')
ax.plot3D([Tb19[0,3], Tb20[0,3]], [Tb19[1,3], Tb20[1,3]], [Tb19[2,3], Tb20[2,3]],color='red')
ax.plot3D([Tb20[0,3], Tb21[0,3]], [Tb20[1,3], Tb21[1,3]], [Tb20[2,3], Tb21[2,3]],color='red')
def animsistemamovilx(t):
n=0
while n<t:
ax.cla()
setaxis(-1,1,-1,1,-1,1)
r=rotax(n)
sistemafijo()
sistemamovil(r)
n=n+1
dibujar()
def animsistemamovily(t):
n=0
while n<t:
ax.cla()
setaxis(-1,1,-1,1,-1,1)
r=rotay(n)
sistemafijo()
sistemamovil(r)
n=n+1
dibujar()
def animsistemamovilz(t):
n=0
while n<t:
ax.cla()
setaxis()
r=rotaz(n)
sistemafijo()
sistemamovil(r)
n=n+1
dibujar()
def muevemoscax(t):
n=0
while n<t:
ax.cla()
setaxis()
r=rotax(n)
ax.scatter(0,0.4,0.6,'o')
Auvw=np.array([[0],[0.4],[0.6]])
Axyz=np.dot(r,Auvw)
x=Axyz[0,0]
y=Axyz[1,0]
z=Axyz[2,0]
sistemafijo()
sistemamovil(r)
ax.scatter(x,y,z,'o')
n=n+1
dibujar()
def muevemoscay(t):
n=0
while n<t:
ax.cla()
setaxis()
r=rotay(n)
ax.scatter(0,0.4,0.6,'o')
Auvw=np.array([[0],[0.4],[0.6]])
Axyz=np.dot(r,Auvw)
x=Axyz[0,0]
y=Axyz[1,0]
z=Axyz[2,0]
sistemafijo()
sistemamovil(r)
ax.scatter(x,y,z,'o')
n=n+1
dibujar()
def muevemoscaz(t):
n=0
while n<t:
ax.cla()
setaxis()
r=rotaz(n)
ax.scatter(0,0.4,0.6,'o')
Auvw=np.array([[0],[0.4],[0.6]])
Axyz=np.dot(r,Auvw)
x=Axyz[0,0]
y=Axyz[1,0]
z=Axyz[2,0]
sistemafijo()
sistemamovil(r)
ax.scatter(x,y,z,'o')
n=n+1
dibujar()
def dibujarcaja(d=1,w=1,l=1,r=0):
#setaxis()
a1=np.array([[0],[0],[0],[1]], dtype=object)
b1=np.array([[0],[0],[l],[1]], dtype=object)
c1=np.array([[0],[w],[l],[1]], dtype=object)
d1=np.array([[0],[w],[0],[1]], dtype=object)
e1=np.array([[d],[0],[0],[1]], dtype=object)
f1=np.array([[d],[0],[l],[1]], dtype=object)
g1=np.array([[d],[w],[l],[1]], dtype=object)
h1=np.array([[d],[w],[0],[1]], dtype=object)
a=np.dot(r,a1)
b=np.dot(r,b1)
c=np.dot(r,c1)
d=np.dot(r,d1)
e=np.dot(r,e1)
f=np.dot(r,f1)
g=np.dot(r,g1)
h=np.dot(r,h1)
ax.plot3D([a[0,0],b[0,0]],[a[1,0],b[1,0]],[a[2,0],b[2,0]],color='red')
ax.plot3D([a[0,0],d[0,0]],[a[1,0],d[1,0]],[a[2,0],d[2,0]],color='red')
ax.plot3D([a[0,0],e[0,0]],[a[1,0],e[1,0]],[a[2,0],e[2,0]],color='red')
ax.plot3D([b[0,0],c[0,0]],[b[1,0],c[1,0]],[b[2,0],c[2,0]],color='red')
ax.plot3D([b[0,0],f[0,0]],[b[1,0],f[1,0]],[b[2,0],f[2,0]],color='red')
ax.plot3D([c[0,0],d[0,0]],[c[1,0],d[1,0]],[c[2,0],d[2,0]],color='red')
ax.plot3D([c[0,0],g[0,0]],[c[1,0],g[1,0]],[c[2,0],g[2,0]],color='red')
ax.plot3D([d[0,0],h[0,0]],[d[1,0],h[1,0]],[d[2,0],h[2,0]],color='red')
ax.plot3D([e[0,0],h[0,0]],[e[1,0],h[1,0]],[e[2,0],h[2,0]],color='red')
ax.plot3D([e[0,0],f[0,0]],[e[1,0],f[1,0]],[e[2,0],f[2,0]],color='red')
ax.plot3D([g[0,0],f[0,0]],[g[1,0],f[1,0]],[g[2,0],f[2,0]],color='red')
ax.plot3D([g[0,0],h[0,0]],[g[1,0],h[1,0]],[g[2,0],h[2,0]],color='red')
def animcajax(t):
n=0
while n<t:
ax.cla()
setaxis()
r=rotax(n)
dibujarcaja(r=r)
n=n+1
sistemafijo()
dibujar()
def animcajay(t):
n=0
while n<t:
ax.cla()
setaxis()
r=rotay(n)
dibujarcaja(r=r)
n=n+1
sistemafijo()
dibujar()
def animcajaz(t):
n=0
while n<t:
ax.cla()
setaxis()
r=rotaz(n)
dibujarcaja(r=r)
n=n+1
sistemafijo()
dibujar()
def animcajaxyz(t1,t2,t3,t4):
n=0
while n<t1:
ax.cla()
setaxis()
r=rotaz(n)
dibujarcaja(r=r)
n=n+1
sistemafijo()
dibujar()
Rc=r
n=0
while n<t2:
ax.cla()
setaxis()
r=rotax(n)
r=np.dot(r,Rc)
dibujarcaja(r=r)
n=n+1
sistemafijo()
dibujar()
Rc=r
n=0
while n<t3:
ax.cla()
setaxis()
r=rotay(n)
r=np.dot(Rc,r)
dibujarcaja(r=r)
n=n+1
sistemafijo()
dibujar()
Rc=r
n=0
while n<t4:
ax.cla()
setaxis()
r=rotax(n)
r=np.dot(r,Rc)
dibujarcaja(r=r)
n=n+1
sistemafijo()
dibujar()
# Ryft4 Rzft2 Rxft1 I Rxmt3 Rzmt5
def animcajaxyz2(t1,t2,t3,t4,t5):
n1=0
n2=0
n3=0
n4=0
n5=0
while n1<t1:
ax.cla()
setaxis()
r=mmatrix(rotay(n4),rotaz(n2),rotax(n1),rotax(n3),rotaz(n5))
dibujarcaja(r=r)
n1=n1+1
sistemafijo()
sistemamovil(r)
dibujar()
Rc=r
n=0
while n2<t2:
ax.cla()
setaxis()
r=mmatrix(rotay(n4),rotaz(n2),rotax(n1),rotax(n3),rotaz(n5))
dibujarcaja(r=r)
n2=n2+1
sistemafijo()
sistemamovil(r)
dibujar()
Rc=r
n=0
while n3<t3:
ax.cla()
setaxis()
r=mmatrix(rotay(n4),rotaz(n2),rotax(n1),rotax(n3),rotaz(n5))
dibujarcaja(r=r)
n3=n3+1
sistemafijo()
sistemamovil(r)
dibujar()
Rc=r
n=0
while n4<t4:
ax.cla()
setaxis()
r=mmatrix(rotay(n4),rotaz(n2),rotax(n1),rotax(n3),rotaz(n5))
dibujarcaja(r=r)
n4=n4+1
sistemafijo()
sistemamovil(r)
dibujar()
Rc=r
n=0
while n5<t5:
ax.cla()
setaxis()
r=mmatrix(rotay(n4),rotaz(n2),rotax(n1),rotax(n3),rotaz(n5))
dibujarcaja(r=r)
n5=n5+1
sistemafijo()
sistemamovil(r)
dibujar()
def animcajaxyzt(Dx,t1,t2):
n=0
while n<Dx+0.01:
ax.cla()
setaxis(4)
r=trasx(n)
print(r)
dibujarcaja(r=r)
n=n+0.2
sistemafijo()
sistemamovil(r)
dibujar()
Rc=r
n=0
while n<t1+0.01:
ax.cla()
setaxis(4)
r=rotaz(n)
r=np.dot(Rc,r)
dibujarcaja(r=r)
n=n+5
sistemafijo()
sistemamovil(r)
dibujar()
Rc=r
n=0
while n<t2+0.01:
ax.cla()
setaxis(4)
r=rotaxf(n,Rc)
dibujarcaja(r=r)
n=n+5
sistemafijo()
sistemamovil(r)
dibujar()
def animcajaxyzt2(Dx,Dy,t1,t2):
n=0
while n<Dx+0.01:
ax.cla()
setaxis(4)
r=trasx(n)
a=minv(r)
a1=np.linalg.inv(r)
print('incio')
print('r')
print(np.round(r,3))
print('a')
print(np.round(a,3))
print('a1')
print(np.round(a1,3))
print('fin')
dibujarcaja(r=r)
n=n+0.2
sistemafijo()
sistemamovil(r)
dibujar()
Rc=r
n=0
while n<Dy+0.01:
ax.cla()
setaxis(4)
r=trasy(n)
r=np.dot(Rc,r)
a=minv(r)
a1=np.linalg.inv(r)
print('incio')
print('r')
print(np.round(r,3))
print('a')
print(np.round(a,3))
print('a1')
print(np.round(a1,3))
print('fin')
dibujarcaja(r=r)
n=n+0.2
sistemafijo()
sistemamovil(r)
dibujar()
Rc=r
n=0
while n<t1+0.01:
ax.cla()
setaxis(4)
r=rotaz(n)
r=np.dot(Rc,r)
a=minv(r)
a1=np.linalg.inv(r)
print('incio')
print('r')
print(np.round(r,3))
print('a')
print(np.round(a,3))
print('a1')
print(np.round(a1,3))
print('fin')
dibujarcaja(r=r)
n=n+5
sistemafijo()
sistemamovil(r)
dibujar()
Rc=r
n=0
while n<t2+0.01:
ax.cla()
setaxis(4)
r=rotaxf(n,Rc)
a=minv(r)
a1=np.linalg.inv(r)
print('incio')
print('r')
print(np.round(r,3))
print('a')
print(np.round(a,3))
print('a1')
print(np.round(a1,3))
print('fin')
dibujarcaja(r=r)
n=n+5
sistemafijo()
sistemamovil(r)
dibujar()
def animejeresaotro():
n=0
while n<3+0.01:
ax.cla()
setaxis(10)
tab=trasx(n)
n=n+0.2
sistemafijo()
sistemamovil(tab)
dibujar()
Rtab=tab
n=0
while n<5+0.01:
ax.cla()
setaxis(10)
tab=trasy(n)
tab=np.dot(Rtab,tab)
n=n+0.2
sistemafijo()
sistemamovil(tab)
dibujar()
Rtab=tab
n=0
while n<45+0.01:
ax.cla()
setaxis(10)
tab=rotax(n)
tab=np.dot(Rtab,tab)
n=n+5
sistemafijo()
sistemamovil(tab)
dibujar()
n=0
while n>-5-0.01:
ax.cla()
setaxis(10)
tac=trasx(n)
n=n-0.2
sistemafijo()
sistemamovil(tac)
sistemamovil(tab)
dibujar()
Rtac=tac
n=0
while n>-4-0.01:
ax.cla()
setaxis(10)
tac=trasy(n)
tac=np.dot(Rtac,tac)
n=n-0.2
sistemafijo()
sistemamovil(tac)
sistemamovil(tab)
dibujar()
tba=minv(tab)
tbc=np.dot(tba,tac)
n=0
while n>-6-0.01:
ax.cla()
setaxis(10)
#ntbc=rotazf(n,tbc)
ntbc=np.dot(trasy(n),tbc)
tac=np.dot(tab,ntbc)
n=n-0.2
sistemafijo()
sistemamovil(tac)
sistemamovil(tab)
dibujar()
def animppp(d1,d2,d3):
n1=0
n2=0
n3=0
while n1<d1+0.01:
ax.cla()
setaxis(10)
ppp(n1,n2,n3)
n1=n1+0.2
dibujar()
while n2<d2+0.01:
ax.cla()
setaxis(10)
ppp(n1,n2,n3)
n2=n2+0.2
dibujar()
while n3<d3+0.01:
ax.cla()
setaxis(10)
ppp(n1,n2,n3)
n3=n3+0.2
dibujar()
def animrpp(t1,d2,d3):
n1=0
n2=2
n3=1
while n1<t1+0.01:
ax.cla()
setaxis(5)
rpp(n1,n2,n3)
n1=n1+5
dibujar()
while n2<d2+0.01:
ax.cla()
setaxis(5)
rpp(n1,n2,n3)
n2=n2+0.2
dibujar()
while n3<d3+0.01:
ax.cla()
setaxis(5)
rpp(n1,n2,n3)
n3=n3+0.2
dibujar()
def animrrp(t1,t2,d3):
n1=0
n2=0
n3=1
while n1<t1+0.01:
ax.cla()
setaxis(5)
rrp(n1,n2,n3)
n1=n1+5
dibujar()
while n2<t2+0.01:
ax.cla()
setaxis(5)
rrp(n1,n2,n3)
n2=n2+5
dibujar()
while n3<d3+0.01:
ax.cla()
setaxis(5)
rrp(n1,n2,n3)
n3=n3+0.2
dibujar()
def animrrr(t1,t2,t3):
n1=0
n2=0
n3=0
while n1<t1+0.01:
ax.cla()
setaxis(5)
rrr(n1,n2,n3)
n1=n1+5
dibujar()
while n2<t2+0.01:
ax.cla()
setaxis(5)
rrr(n1,n2,n3)
n2=n2+5
dibujar()
while n3<t3+0.01:
ax.cla()
setaxis(5)
rrr(n1,n2,n3)
n3=n3+5
dibujar()
def animscara(t1,t2,d3,t4):
n1=0
n2=0
n3=1
n4=0
while n1<t1+0.01:
ax.cla()
setaxis(5)
scara(n1,n2,n3,n4)
n1=n1+5
dibujar()
while n2<t2+0.01:
ax.cla()
setaxis(5)
scara(n1,n2,n3,n4)
n2=n2+5
dibujar()
while n3<d3+0.01:
ax.cla()
setaxis(5)
scara(n1,n2,n3,n4)
n3=n3+0.2
dibujar()
while n4<t4+0.01:
ax.cla()
setaxis(5)
scara(n1,n2,n3,n4)
n4=n4+5
dibujar()
def animcobras800(t1,t2,d3,t4):
n1=0
n2=0
n3=1
n4=0
while n1<t1+0.01:
ax.cla()
setaxis(1000)
cobras800(n1,n2,n3,n4)
n1=n1+5
dibujar()
while n2<t2+0.01:
ax.cla()
setaxis(1000)
cobras800(n1,n2,n3,n4)
n2=n2+5
dibujar()
while n3<d3+0.01:
ax.cla()
setaxis(1000)
cobras800(n1,n2,n3,n4)
n3=n3+5
dibujar()
while n4<t4+0.01:
ax.cla()
setaxis(1000)
cobras800(n1,n2,n3,n4)
n4=n4+5
dibujar()
def animur5(t1,t2,t3,t4,t5,t6):
n1=0
n2=0
n3=0
n4=0
n5=0
n6=0
while n1<t1+0.01:
ax.cla()
setaxis(1000)
ur5(n1,n2,n3,n4,n5,n6)
n1=n1+5
dibujar()
while n2<t2+0.01:
ax.cla()
setaxis(1000)
ur5(n1,n2,n3,n4,n5,n6)
n2=n2+5
dibujar()
while n3<t3+0.01:
ax.cla()
setaxis(1000)
ur5(n1,n2,n3,n4,n5,n6)
n3=n3+5
dibujar()
while n4<t4+0.01:
ax.cla()
setaxis(1000)
ur5(n1,n2,n3,n4,n5,n6)
n4=n4+5
dibujar()
while n5<t5+0.01:
ax.cla()
setaxis(1000)
ur5(n1,n2,n3,n4,n5,n6)
n5=n5+5
dibujar()
while n6<t6+0.01:
ax.cla()
setaxis(1000)
ur5(n1,n2,n3,n4,n5,n6)
n6=n6+5
dibujar()
def animotoman(tb,t1a,t2a,t3a,t4a,t5a,t6a,t7a,t1b,t2b,t3b,t4b,t5b,t6b,t7b):
nb=0
n1a=0
n2a=0
n3a=0
n4a=0
n5a=0
n6a=0
n7a=0
n1b=0
n2b=0
n3b=0
n4b=0
n5b=0
n6b=0
n7b=0
while n1a<t1a+0.01:
ax.cla()
setaxis(15)
motoman(nb,n1a,n2a,n3a,n4a,n5a,n6a,n7a,n1b,n2b,n3b,n4b,n5b,n6b,n7b)
n1a=n1a+5
dibujar()
while n2a<t2a+0.01:
ax.cla()
setaxis(15)
motoman(nb,n1a,n2a,n3a,n4a,n5a,n6a,n7a,n1b,n2b,n3b,n4b,n5b,n6b,n7b)
n2a=n2a+5
dibujar()
while n3a<t3a+0.01:
ax.cla()
setaxis(15)
motoman(nb,n1a,n2a,n3a,n4a,n5a,n6a,n7a,n1b,n2b,n3b,n4b,n5b,n6b,n7b)
n3a=n3a+5
dibujar()
while n4a<t4a+0.01:
ax.cla()
setaxis(15)
motoman(nb,n1a,n2a,n3a,n4a,n5a,n6a,n7a,n1b,n2b,n3b,n4b,n5b,n6b,n7b)
n4a=n4a+5
dibujar()
while n5a<t5a+0.01:
ax.cla()
setaxis(15)
motoman(nb,n1a,n2a,n3a,n4a,n5a,n6a,n7a,n1b,n2b,n3b,n4b,n5b,n6b,n7b)
n5a=n5a+5
dibujar()
while n6a<t6a+0.01:
ax.cla()
setaxis(15)
motoman(nb,n1a,n2a,n3a,n4a,n5a,n6a,n7a,n1b,n2b,n3b,n4b,n5b,n6b,n7b)
n6a=n6a+5
dibujar()
while n7a<t7a+0.01:
ax.cla()
setaxis(15)
motoman(nb,n1a,n2a,n3a,n4a,n5a,n6a,n7a,n1b,n2b,n3b,n4b,n5b,n6b,n7b)
n7a=n7a+5
dibujar()
while n1b<t1b+0.01:
ax.cla()
setaxis(15)
motoman(nb,n1a,n2a,n3a,n4a,n5a,n6a,n7a,n1b,n2b,n3b,n4b,n5b,n6b,n7b)
n1b=n1b+5
dibujar()
while n2b<t2b+0.01:
ax.cla()
setaxis(15)
motoman(nb,n1a,n2a,n3a,n4a,n5a,n6a,n7a,n1b,n2b,n3b,n4b,n5b,n6b,n7b)
n2b=n2b+5
dibujar()
while n3b<t3b+0.01:
ax.cla()
setaxis(15)
motoman(nb,n1a,n2a,n3a,n4a,n5a,n6a,n7a,n1b,n2b,n3b,n4b,n5b,n6b,n7b)
n3b=n3b+5
dibujar()
while n4b<t4b+0.01:
ax.cla()
setaxis(15)
motoman(nb,n1a,n2a,n3a,n4a,n5a,n6a,n7a,n1b,n2b,n3b,n4b,n5b,n6b,n7b)
n4b=n4b+5
dibujar()
while n5b<t5b+0.01:
ax.cla()
setaxis(15)
motoman(nb,n1a,n2a,n3a,n4a,n5a,n6a,n7a,n1b,n2b,n3b,n4b,n5b,n6b,n7b)
n5b=n5b+5
dibujar()
while n6b<t6b+0.01:
ax.cla()
setaxis(15)
motoman(nb,n1a,n2a,n3a,n4a,n5a,n6a,n7a,n1b,n2b,n3b,n4b,n5b,n6b,n7b)
n6b=n6b+5
dibujar()
while n7b<t7b+0.01:
ax.cla()
setaxis(15)
motoman(nb,n1a,n2a,n3a,n4a,n5a,n6a,n7a,n1b,n2b,n3b,n4b,n5b,n6b,n7b)
n7b=n7b+5
dibujar()
while nb<tb+0.01:
ax.cla()
setaxis(15)
motoman(nb,n1a,n2a,n3a,n4a,n5a,n6a,n7a,n1b,n2b,n3b,n4b,n5b,n6b,n7b)
nb=nb+5
dibujar()
nb=0
n1a=0
n2a=0
n3a=0
n4a=0
n5a=0
n6a=0
n7a=0
n1b=0
n2b=0
n3b=0
n4b=0
n5b=0
n6b=0
n7b=0
sbrazo=False
pasar=False
RDK=Robolink()
robotb=RDK.Item('base')
robotd=RDK.Item('der')
roboti=RDK.Item('izq')
axnb=plt.axes([0.1,0.2,0.3,0.03])
axn1=plt.axes([0.1,0.15,0.3,0.03])
axn2=plt.axes([0.1,0.1,0.3,0.03])
axn3=plt.axes([0.1,0.05,0.3,0.03])
axn4=plt.axes([0.55,0.2,0.3,0.03])
axn5=plt.axes([0.55,0.15,0.3,0.03])
axn6=plt.axes([0.55,0.1,0.3,0.03])
axn7=plt.axes([0.55,0.05,0.3,0.03])
axcb1=plt.axes([0.7,0.25,0.23,0.12])
snb=Slider(axnb, 'rot b', -180, 180.0, valinit=0)
sn1=Slider(axn1, 'rot 1', -180, 180.0, valinit=0)
sn2=Slider(axn2, 'rot 2', -180, 180.0, valinit=0)
sn3=Slider(axn3, 'rot 3', -180, 180.0, valinit=0)
sn4=Slider(axn4, 'rot 4', -180, 180.0, valinit=0)
sn5=Slider(axn5, 'rot 5', -180, 180.0, valinit=0)
sn6=Slider(axn6, 'rot 6', -180, 180.0, valinit=0)
sn7=Slider(axn7, 'rot 7', -180, 180.0, valinit=0)
chkbox1=CheckButtons(axcb1,['cambiar brazo'],[False])
def update(val):
global nb
global n1a
global n2a
global n3a
global n4a
global n5a
global n6a
global n7a
global n1b
global n2b
global n3b
global n4b
global n5b
global n6b
global n7b
global sbrazo
if not pasar:
if (not sbrazo):
ax.cla()
setaxis(1500)
nb=snb.val
n1a=sn1.val
n2a=sn2.val
n3a=sn3.val
n4a=sn4.val
n5a=sn5.val
n6a=sn6.val
n7a=sn7.val
accmotoman(nb,n1a,n2a,n3a,n4a,n5a,n6a,n7a,n1b,n2b,n3b,n4b,n5b,n6b,n7b)
valoresb=[nb]
valoresd=[n1a,n2a,n3a,n4a,n5a,n6a,n7a]
valoresi=[n1b,n2b,n3b,n4b,n5b,n6b,n7b]
robotb.MoveJ(valoresb)
robotd.MoveJ(valoresd)
roboti.MoveJ(valoresi)
dibujar()
else:
ax.cla()
setaxis(1500)
nb=snb.val
n1b=sn1.val
n2b=sn2.val
n3b=sn3.val
n4b=sn4.val
n5b=sn5.val
n6b=sn6.val
n7b=sn7.val
accmotoman(nb,n1a,n2a,n3a,n4a,n5a,n6a,n7a,n1b,n2b,n3b,n4b,n5b,n6b,n7b)
valoresb=[nb]
valoresd=[n1a,n2a,n3a,n4a,n5a,n6a,n7a]
valoresi=[n1b,n2b,n3b,n4b,n5b,n6b,n7b]
robotb.MoveJ(valoresb)
robotd.MoveJ(valoresd)
roboti.MoveJ(valoresi)
dibujar()
def seleccionbrazo(label):
global sbrazo
sbrazo=not sbrazo
global pasar
global nb
global n1a
global n2a
global n3a
global n4a
global n5a
global n6a
global n7a
global n1b
global n2b
global n3b
global n4b
global n5b
global n6b
global n7b
pasar=True
if (not sbrazo):
sn1.set_val(n1a)
sn2.set_val(n2a)
sn3.set_val(n3a)
sn4.set_val(n4a)
sn5.set_val(n5a)
sn6.set_val(n6a)
sn7.set_val(n7a)
else:
sn1.set_val(n1b)
sn2.set_val(n2b)
sn3.set_val(n3b)
sn4.set_val(n4b)
sn5.set_val(n5b)
sn6.set_val(n6b)
sn7.set_val(n7b)
pasar=False
ax.cla()
setaxis(1500)
accmotoman(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)
snb.on_changed(update)
sn1.on_changed(update)
sn2.on_changed(update)
sn3.on_changed(update)
sn4.on_changed(update)
sn5.on_changed(update)
sn6.on_changed(update)
sn7.on_changed(update)
chkbox1.on_clicked(seleccionbrazo)
dibujar()
#Motoman CSDA10F
``` |
{
"source": "jilgue/spotify-ripper",
"score": 2
} |
#### File: jilgue/spotify-ripper/setup.py
```python
from setuptools import setup, find_packages
import os
def create_default_dir():
default_dir = os.path.normpath(os.path.realpath(
(os.path.join(os.path.expanduser("~"), ".spotify-ripper"))))
if not os.path.exists(default_dir):
print("Creating default settings directory: " +
default_dir)
os.makedirs(default_dir.encode("utf-8"))
def _read(fn):
path = os.path.join(os.path.dirname(__file__), fn)
return open(path).read()
setup(
name='spotify-ripper',
version='2.10.5',
packages=find_packages(exclude=["tests"]),
scripts=['spotify_ripper/main.py'],
include_package_data=True,
zip_safe=False,
# Executable
entry_points={
'console_scripts': [
'spotify-ripper = main:main',
],
},
# Additional data
package_data={
'': ['README.rst', 'LICENCE']
},
# Requirements
install_requires=[
'pyspotify==2.0.5',
'colorama==0.3.3',
'mutagen==1.30',
'requests>=2.3.0',
'schedule>=0.3.1',
],
# Metadata
author='<NAME>',
author_email='<EMAIL>',
description='a small ripper for Spotify that rips Spotify URIs '
'to audio files',
license='MIT',
keywords="spotify ripper mp3 ogg vorbis flac opus acc mp4 m4a",
url='https://github.com/jrnewell/spotify-ripper',
download_url='https://github.com/jrnewell/spotify-ripper/tarball/2.10.5',
classifiers=[
'Topic :: Multimedia :: Sound/Audio',
'Topic :: Multimedia :: Sound/Audio :: Capture/Recording',
'License :: OSI Approved :: MIT License',
'Environment :: Console',
"Intended Audience :: Developers",
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
],
long_description=_read('README.rst'),
)
create_default_dir()
``` |
{
"source": "jilimcaoco/MPProjects",
"score": 2
} |
#### File: scripts/analysis/get_docking_statistics.py
```python
import sys, os, os.path
def read_outdock_file_write_extract_all(infilename,outfile1,outfile2,dic_zinc_id,dic_zinc_id_scored,dic_zincid_error_bump,dic_zincid_error_clash,dic_zincid_error_no_match,dic_zincid_error_no_valid,dic_zincid_error_skip,\
dic_zincid_error_miss):
#def read_outdock_file_write_extract_all(path,infilename,extractfilename,maxenergy):
infile = open(infilename,'r')
lines = infile.readlines()
infile.close()
# if (os.path.exists(extractfilename)):
# outfile = open(extractfilename,'a')
# else:
# outfile = open(extractfilename,'w')
flag_read = False
flag_stat = False
## close SDIFILE
## total minimization steps = 801219
## total number of hierarchies: 16114
## total number of orients (matches): 20811601
## total number of conformations (sets): 2147921
## total number of nodes (confs): 4984826
## total number of complexes: 2685046286
## end of file encountered
##Date and Time: 20171013 174229.9
##elapsed time (sec): 5484.6802 (hour): 1.5235
minsteps = 0 ; numhier = 0 ; numorient = 0; numconf = 0; numnode = 0; numcomplex = 0 ; secs = 0.0; hours = 0.0
#zincid_dic = {}
#zincid_broken_dic = {}
for line in lines:
splitline = line.split()
if len(splitline) ==0:
continue
if "we" == splitline[0]: # we reached the end of the file, docking results.
flag_read = False
if flag_read:
if len(splitline) == 21:
#just output the ZINCID
zincid = splitline[1]
dic_zinc_id[zincid] = 1
dic_zinc_id_scored[zincid] = 1
outfile1.write("%s\n"%(zincid))
outfile2.write("%s\n"%(zincid))
elif ( len(splitline) > 2 ) and ( "ZINC" in splitline[1] ):
zincid = splitline[1]
dic_zinc_id[zincid] = 1
if "bump" in line:
dic_zincid_error_bump[zincid] = 1
elif "no_match" in line:
dic_zincid_error_no_match[zincid] = 1
elif "No viable poses." in line:
dic_zincid_error_no_valid[zincid] = 1
elif "clash" in line:
dic_zincid_error_clash[zincid] = 1
elif "skip" in line:
dic_zincid_error_skip[zincid] = 1
else:
print line
dic_zincid_error_miss[zincid] = 1
outfile2.write("%s\n"%(zincid))
elif flag_stat:
if "minimization" in line:
minsteps = int(splitline[4])
if "hierarchies" in line:
numhier = int(splitline[4])
if "orients" in line:
numorient = int(splitline[5])
if "conformations" in line:
numconf = int(splitline[5])
if "nodes" in line:
numnode = int(splitline[5])
if "complexes" in line:
numcomplex = int(splitline[4])
if "elapsed time" in line:
secs = float(splitline[3])
hours = float(splitline[5])
if "mol#" == splitline[0]: # start of docking resutls
flag_read = True
if splitline[0] == "close" and splitline[1] == "SDIFILE":
flag_read = False
flag_stat = True
# outfile.close()
return minsteps, numhier, numorient, numconf, numnode, numcomplex, secs, hours
def main():
if len(sys.argv) != 4:
print "error: this program takes 3 argument "
print "(0) path where dock directorys are. "
print "(1) dirlist where outdock file are. "
print "(2) prefix name of the count all files to be written. "
exit()
path = sys.argv[1]
filename1 = sys.argv[2]
output = sys.argv[3]
# stats
totminsteps = 0 ; totnumhier = 0 ; totnumorient = 0; totnumconf = 0; totnumnode = 0; totnumcomplex = 0 ; totsecs = 0.0; tothours = 0.0
tot_dic_zinc_id = {} ; tot_dic_zinc_id_scored = {};
tot_dic_zincid_error_bump = {} ; tot_dic_zincid_error_no_match = {}; tot_dic_zincid_error_clash = {} ; tot_dic_zincid_error_no_valid = {} ; tot_dic_zincid_error_skip = {} ; tot_dic_zincid_error_miss = {}
if (os.path.exists(output)):
print "%s exists. stop. " % output
exit()
print "(1) dirlist = " + filename1
print "(2) output = " + output
fh = open(filename1)
# remove extension.
splitfilename = output.split(".")
if(splitfilename[-1]!="txt"):
print "uhoh. %s should have .txt extension. exiting..."
exit()
filename_prefix = ''
for i in range(len(splitfilename)-1):
filename_prefix = filename_prefix+splitfilename[i]+'.'
outfile1 = open(filename_prefix+"scored.zincid",'w')
outfile2 = open(filename_prefix+"docked.zincid",'w')
for line in fh:
print line
#splitline = line.split()
#pathname = line.split()[0]
filename = line.split()[0]+'/OUTDOCK'
#read_outdock_file_write_extract_all(pathname,filename,output,max_energy)
tempminsteps, tempnumhier, tempnumorient, tempnumconf, tempnumnode, tempnumcomplex, tempsecs, temphours = read_outdock_file_write_extract_all(path+"/"+filename,outfile1,outfile2,tot_dic_zinc_id,tot_dic_zinc_id_scored \
,tot_dic_zincid_error_bump,tot_dic_zincid_error_no_match,tot_dic_zincid_error_clash,tot_dic_zincid_error_no_valid,\
tot_dic_zincid_error_skip,tot_dic_zincid_error_miss)
totminsteps = totminsteps + tempminsteps
totnumhier = totnumhier + tempnumhier
totnumorient = totnumorient + tempnumorient
totnumconf = totnumconf + tempnumconf
totnumnode = totnumnode + tempnumnode
totnumcomplex = totnumcomplex + tempnumcomplex
totsecs = totsecs + tempsecs
tothours = tothours + temphours
#print len(tot_dic_zinc_id.keys())
#print "totsecs =" + str(totsecs)
print "total_min_steps = " + str(totminsteps)
print "total_num_hier = " + str(totnumhier)
print "tot_num_orient = " + str(totnumorient)
print "tot_num_conf = " + str(totnumconf)
print "tot_num_node = " + str(totnumnode)
print "tot_num_complex = " + str(totnumcomplex)
print "total_secs = " + str(totsecs)
print "total_hours = " + str(tothours)
print "==========="
print "total_docked = " + str(len(tot_dic_zinc_id.keys()))
print "total_scored = " + str(len(tot_dic_zinc_id_scored.keys()))
print "=====no pose (some overlap with scored and one another) ======"
print "bump error = " + str(len(tot_dic_zincid_error_bump))
print "no_match error = " + str(len(tot_dic_zincid_error_no_match))
print "clash error = " + str(len(tot_dic_zincid_error_clash))
print "no_valid_pose error = " + str(len(tot_dic_zincid_error_no_valid))
print "miscellaneous error = " + str(len(tot_dic_zincid_error_miss))
print "skip error = " + str(len(tot_dic_zincid_error_skip))
outfile1.close()
outfile2.close()
main()
```
#### File: scripts/analysis/logAUC.py
```python
import os
import sys
import itertools
from optparse import OptionParser
import mmmutils
import enrich
def logAUC(indir):
print 100*enrich.get_roc_own_logAUC(indir=indir)
def main(argv):
description = "Calculate AUC and logAUC."
usage = "%prog [options]"
version = "%prog *version 200801* created by <NAME>"
parser = OptionParser(usage=usage, description=description,
version=version)
parser.set_defaults(indir='.')
parser.add_option("-i", "--indir", dest="indir",
help="input directory (default: %default)")
options, args = parser.parse_args(args=argv[1:])
if len(args) != 0:
parser.error("program takes no positional arguments.\n" +
" Use --help for more information.")
logAUC(options.indir)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
```
#### File: scripts/analysis/mol2extend.py
```python
import string
import sys
import sybyl2dock
import atom_color_table
import collections
import gzip
import operator
import floydwarshall # all pairs shortest paths routine
import shortestpaths # from one point to all others
import geometry # for distance function
import unionfind2
import divisive_clustering
import munkreskuhn
class Mol2(object):
'''reads mol2 files into a bunch of lists (for speed instead of objects).
reads multi-mol2 files containing the same molecule but in diff confs,
only xyzs are read in separately and no checking is done to ensure the
molecule is the same. (garbage in garbage out).'''
def blankNew(self):
'''create and blank everything'''
self.name = "fake" # at some point these need to be capped at 9 characters
self.protName = "fake" # yeah this too, 9 characters only
self.atomNum = []
self.atomName = [] # all this stuff might need to get optimized
self.atomXyz = [] # kept for every conformation
self.inputEnergy = [] # kept for every conformation
self.inputHydrogens = [] # for every conf. 0 means input. 1 means reset.
#2 means rotated. 3 means mix-n-match so who knows
self.atomType = []
self.atomCharge = [] # read in but overriden by solv data
self.atomBonds = []
self.bondNum = []
self.bondStart = []
self.bondEnd = []
self.bondType = []
self.xyzCount = -1
self.origXyzCount = -1
self.smiles = "fake"
self.longname = "fake"
self.bondDists = None
self.rmsdTable = None
#also want to keep copies of the other nums, names, types, charges & bonds
#we can delete this in most cases but sometimes there will be an extra
#hydrogen or something that means we have to keep all this for
#everything
self.atomNumAll = []
self.atomNameAll = []
self.atomTypeAll = []
self.atomChargeAll = []
self.atomBondsAll = []
self.bondNumAll = []
self.bondStartAll = []
self.bondEndAll = []
self.bondTypeAll = []
def __init__(self):
'''makes a totally fake version, for copying into'''
self.blankNew()
def __init__(
self, mol2fileName=None, nameFileName=None, mol2text=None,
mol2textList=None):
'''reads in the file, or a bunch of lines, etc.'''
self.blankNew()
#read from files/text
if mol2text is not None:
for line in mol2text:
self.processLine(line)
self.xyzCount += 1
self.origXyzCount = self.xyzCount
while len(self.inputEnergy) < self.xyzCount:
self.inputEnergy.append(9999.99)
while len(self.inputHydrogens) < self.xyzCount:
self.inputHydrogens.append(0)
if mol2textList is not None: # reads in a list of list of lines as well
for oneMol2 in mol2textList:
for line in oneMol2:
self.processLine(line)
self.xyzCount += 1
self.origXyzCount = self.xyzCount
while len(self.inputEnergy) < self.xyzCount:
self.inputEnergy.append(9999.99)
while len(self.inputHydrogens) < self.xyzCount:
self.inputHydrogens.append(0)
#read the name.txt file which is one line and is made by the toolchain
if nameFileName is not None:
try:
#name.txt is grepped out from the dict file which has a random format
namefile = open(nameFileName, 'r')
firstLine = namefile.readlines()[0]
tokens = firstLine.split()
maxsplit = 0
if 9 == len(tokens[2]) and "P" == tokens[2][0]:
maxsplit = 6 # decoys name.txt file
elif 12 == len(tokens[0]) and "T" == tokens[0][0] and \
9 != len(tokens[1]):
maxsplit = 2 # dbgen file
elif tokens[0] == 'name.txt': # new dbstart
maxsplit = 6
else:
maxsplit = 4 # ligands name.txt file
#do the split again, with maxsplit
tokens = firstLine.split(None, maxsplit) # None forces whitespace
if tokens[0] == 'name.txt': # new dbstart
self.name = tokens[2]
self.protName = "none"
self.smiles = string.strip(tokens[3])
self.longname = string.strip(tokens[5])
elif tokens[0] == 'name.cxcalc.txt': # new dbstart
self.name = tokens[2]
self.protName = tokens[3]
self.smiles = string.strip(tokens[4])
self.longname = string.strip(tokens[7])
elif 7 == len(tokens): # decoys name.txt file
self.name = tokens[1]
self.protName = tokens[2]
self.smiles = string.strip(tokens[3])
self.longname = string.strip(tokens[6])
elif 5 == len(tokens): # ligands name.txt file
self.name = tokens[1]
self.protName = "none"
self.smiles = string.strip(tokens[2])
self.longname = string.strip(tokens[4])
elif 3 == len(tokens): # dbgen name.txt file
self.name = tokens[0]
self.protName = "none"
self.smiles = string.strip(tokens[1])
self.longname = string.strip(tokens[2])
#print self.name, self.protName, self.smiles, self.longname #debug
except StopIteration: # end of file
namefile.close()
if mol2fileName is not None:
if mol2fileName.endswith(".gz"):
mol2file = gzip.GzipFile(mol2fileName, 'r')
else:
mol2file = open(mol2fileName, 'r')
self.phase = 0
try:
for line in mol2file:
self.processLine(line)
except StopIteration:
mol2file.close()
finally:
self.xyzCount += 1 # really this needs to be done
self.origXyzCount = self.xyzCount
while len(self.inputEnergy) < self.xyzCount:
self.inputEnergy.append(9999.99)
while len(self.inputHydrogens) < self.xyzCount:
self.inputHydrogens.append(0)
#print self.atomName, self.atomType, self.atomCharge
#print self.bondStart, self.bondEnd, self.bondType
#print self.xyzCount
def processLine(self, line):
'''reads a single line, processes it'''
if line[:17] == "@<TRIPOS>MOLECULE":
self.phase = 1 # header phase
elif line[:13] == "@<TRIPOS>ATOM":
self.phase = 2 # atoms phase
self.xyzCount += 1 # first one is numbered 0
self.atomXyz.append([]) # list of lists
self.atomNumAll.append([])
self.atomNameAll.append([])
self.atomTypeAll.append([])
self.atomChargeAll.append([])
elif line[:13] == "@<TRIPOS>BOND":
self.phase = 3 # bonds phase
self.atomBondsAll.append([])
self.bondNumAll.append([])
self.bondStartAll.append([])
self.bondEndAll.append([])
self.bondTypeAll.append([])
elif line[:9] == "@<TRIPOS>":
self.phase = 0 # fake phase that reads nothing...
elif line[0] == '#': # comment line, ignore
pass
elif len(line) == 1: # comment line, ignore
pass
else:
if 1 == self.phase:
if self.name == "fake":
self.name = string.strip(line)
tokens = string.split(string.strip(line))
if len(line) > 1 and tokens[0][0:7] == "mmff94s":
self.inputEnergy.append(float(tokens[2]))
self.inputHydrogens.append(0)
self.phase = 0 # rest of header ignored
elif 2 == self.phase:
tokens = string.split(line)
if 0 == self.xyzCount: # only read in some stuff for first mol2
self.atomNum.append(int(tokens[0]))
self.atomName.append(tokens[1])
self.atomType.append(tokens[5])
self.atomCharge.append(float(tokens[-1])) # last column is charge
self.atomBonds.append([]) # start new list
#read everything here, can delete for space if not different
self.atomNumAll[self.xyzCount].append(int(tokens[0]))
self.atomNameAll[self.xyzCount].append(tokens[1])
self.atomTypeAll[self.xyzCount].append(tokens[5])
# last column is charge
self.atomChargeAll[self.xyzCount].append(float(tokens[-1]))
#always always always do this, changing coordinates
self.atomXyz[self.xyzCount].append(
(float(tokens[2]), float(tokens[3]), float(tokens[4])))
elif 3 == self.phase:
tokens = string.split(line)
if 0 == self.xyzCount:
#bonds only read in for first molecule, assumed the same after
self.bondNum.append(int(tokens[0]))
self.bondStart.append(int(tokens[1]))
self.bondEnd.append(int(tokens[2]))
self.bondType.append(tokens[3])
self.atomBonds[int(tokens[1]) - 1].append(
(int(tokens[2]) - 1, tokens[3]))
self.atomBonds[int(tokens[2]) - 1].append(
(int(tokens[1]) - 1, tokens[3]))
self.bondNumAll.append(int(tokens[0]))
self.bondStartAll.append(int(tokens[1]))
self.bondEndAll.append(int(tokens[2]))
self.bondTypeAll.append(tokens[3])
self.atomBondsAll.append([int(tokens[1]) - 1].append(
(int(tokens[2]) - 1, tokens[3])))
self.atomBondsAll.append([int(tokens[2]) - 1].append(
(int(tokens[1]) - 1, tokens[3])))
def copy(self):
'''returns new mol2 object'''
newM = Mol2()
newM.name = self.name
newM.protName = self.protName
newM.atomNum = self.atomNum
newM.atomName = self.atomName
newM.atomXyz = self.atomXyz[:] # only this
newM.inputEnergy = self.inputEnergy[:] # and this
newM.xyzCount = self.xyzCount # and this
newM.origXyzCount = self.origXyzCount # and this
newM.inputHydrogens = self.inputHydrogens[:] # and this are to changed
newM.atomType = self.atomType
newM.atomCharge = self.atomCharge
newM.atomBonds = self.atomBonds
newM.bondNum = self.bondNum
newM.bondStart = self.bondStart
newM.bondEnd = self.bondEnd
newM.bondType = self.bondType
newM.smiles = self.smiles
newM.longname = self.longname
try:
newM.colorConverter = self.colorConverter
newM.dockNum = self.dockNum
newM.colorNum = self.colorNum
except AttributeError:
pass # this is fine
newM.bondDists = None # don't copy this, regenerate
newM.atomNumAll = self.atomNumAll[:]
newM.atomNameAll = self.atomNameAll[:]
newM.atomTypeAll = self.atomTypeAll[:]
newM.atomChargeAll = self.atomChargeAll[:]
newM.atomBondsAll = self.atomBondsAll[:]
newM.bondNumAll = self.bondNumAll[:]
newM.bondStartAll = self.bondStartAll[:]
newM.bondEndAll = self.bondEndAll[:]
newM.bondTypeAll = self.bondTypeAll[:]
return newM # new Mol2 object that can be manipulated
def initFromDb2Lines(self, mlines):
'''reads data from lines in the db2 file. start with a blank, init obj.
mlines is lines starting with M'''
tokens = []
for mline in mlines[0:4]: # 1st 4 mlines only
tokens.append(string.split(mline))
self.name = tokens[0][1] # 2d array, 0th line, 1st token
self.protName = tokens[0][2]
self.smiles = tokens[2][1]
self.longname = tokens[3][1]
self.atomNum = []
self.atomName = [] # all this stuff might need to get optimized
self.atomXyz = [] # kept for every conformation
self.inputEnergy = [] # kept for every conformation
self.inputHydrogens = [] # for every conf. 0 means input. 1 means reset.
#2 means rotated. 3 means mix-n-match so who knows
self.atomType = []
self.atomCharge = [] # read in but overriden by solv data
self.atomBonds = []
self.bondNum = []
self.bondStart = []
self.bondEnd = []
self.bondType = []
self.xyzCount = -1
self.origXyzCount = -1
def keepConfsOnly(self, first, last):
''' delete input confs outside of the first, last range'''
if last > self.xyzCount: # can't copy beyond end
last = self.xyzCount
self.xyzCount = last - first
self.atomXyz = self.atomXyz[first:last]
self.inputEnergy = self.inputEnergy[first:last]
self.inputHydrogens = self.inputHydrogens[first:last]
def bondsBetween(self, atomNum, atomOther):
'''returns number of bonds between any 2 atoms (see bondsBetweenActual)
atomNum and otherNum is the mol2 numbering (1-based). '''
actualNum = atomNum - 1 # assume 1-based to 0-based conversion
actualOther = atomOther - 1 # assume 1-based to 0-based conversion
return self.bondedToActual(actualNum, atomOther)
def bondsBetweenActual(self, actualNum, actualOther):
'''returns the number of bonds between the two atom numbers specified.
directly bonded => 1
one atom in between => 2
two atoms => 3, etc.'''
if self.bondDists is None: # generate them
self.calcBondDists()
row = self.bondDistsOrderKeys[actualNum]
col = self.bondDistsOrderKeys[actualOther]
return self.bondDists[row][col]
def distFromAtoms(self, atoms):
'''using a particular set of atoms (usually rigid component) as 0,
find the bond distance to all other atoms'''
neighbors = {}
for atomNum in xrange(len(self.atomNum)):
neighbors[atomNum] = []
for otherBond in self.atomBonds[atomNum]:
neighbors[atomNum].append((otherBond[0], 1))
dists = shortestpaths.shortestPaths(neighbors.keys(), neighbors, 0, atoms)
return dists
def calcBondDists(self):
'''uses floyd warshall all pairs shortest paths algorithm to generate
the # of bonds between all pairs of atoms. cache results and don't redo'''
neighbors = {}
for atomNum in xrange(len(self.atomNum)):
neighbors[atomNum] = []
for otherBond in self.atomBonds[atomNum]:
neighbors[atomNum].append((otherBond[0], 1))
distances, orderKeys = floydwarshall.floydWarshall(neighbors)
self.bondDists = distances
self.bondDistsOrderKeys = orderKeys
#no return, we're done here
def bondedTo(
self, atomNum, firstName, bondsAway=1, lastBond=None, returnAtom=False):
'''returns true if atomNum has a bond to any other atom with a name starting
with firstName. atomNum is the mol2 numbering (1-based). bondsAway
controls how far the search proceeds before checking. must be exact.'''
actualNum = atomNum - 1 # assume 1-based to 0-based conversion
return self.bondedToActual(
actualNum, firstName, bondsAway, lastBond, returnAtom)
def bondedToAll(self, atomNum, firstName, bondsAway=1, lastBond=None):
'''returns all atoms bonded, for other functions to process. calls
bondedToActualAll, see for more documentation'''
actualNum = atomNum - 1 # assume 1-based to 0-based conversion
return self.bondedToActualAll(actualNum, firstName, bondsAway, lastBond)
def bondedToActual(
self, actualNum, firstName, bondsAway=1, lastBond=None, returnAtom=False):
'''returns true if actualNum has a bond to any other atom with a name
starting with firstName. actualNum is the 0-based numbering. bondsAway
controls how far the search proceeds before checking. must be exact.
lastBond controls the type of the lastBond found (to the one just before
the ones being checked) useful for finding only certain types of bonds.
if returnAtom is True, it returns the atom number of the atom that matched.
'''
bondedAwayNums = self.bondedToActualAll(
actualNum, firstName, bondsAway, lastBond)
for anAtomNum in bondedAwayNums[bondsAway]:
if -1 != string.find(self.atomType[anAtomNum], firstName):
if not returnAtom:
return True # found a matching atom at the other end of a bond
else: # returnAtom is True
return True, self.atomNum[anAtomNum]
if not returnAtom:
return False # no bonded atoms with that first letter
else: # returnAtom is True
return False, False
def bondedToActualAll(
self, actualNum, firstName, bondsAway=1, lastBond=None):
'''returns all atoms a certain number of bonds away, obeying lastBond as
bondedToActual documents'''
bondedAwayNums = collections.defaultdict(list) # defaults to empty list
bondedAwayNums[0].append(actualNum)
checked = 0
while checked < bondsAway: # check up to bondsaway bonds away from start
for startNum in bondedAwayNums[checked]: # for each atom in cur level
for otherBond in self.atomBonds[startNum]:
#check to make sure otherBond[0] not in any previous list
okayToAdd = True
for checkList in bondedAwayNums.itervalues():
if otherBond[0] in checkList:
okayToAdd = False
if okayToAdd and (lastBond is not None) and \
(checked + 1 == bondsAway):
if lastBond != "*": # same as none, basically allow anything
if -1 == otherBond[1].find(lastBond): # -1 means no match
okayToAdd = False # which means this bond isn't correct
if okayToAdd:
bondedAwayNums[checked + 1].append(otherBond[0])
checked += 1 # move 1 more away
return bondedAwayNums
def isAtomBondedOtherThan(self, atomNum, count, otherThan):
'''for each atom, if it is bonded to any of [count] atoms that are not of
type [otherThan], return true'''
bondedAwayNums = self.bondedToAll(atomNum, "")[1] # only want 1 bond away
otherThanCount = 0
for atomNumActual in bondedAwayNums:
if self.atomType[atomNumActual] not in otherThan:
otherThanCount += 1
if otherThanCount not in count:
return True
else:
return False
def convertDockTypes(self, parameterFileName=None):
'''adds self.dockNum to each atom record based on sybyl2dock'''
dockConverter = sybyl2dock.AtomConverter(parameterFileName)
self.dockNum = [] # new data on each dock atom number
for atomNumber in self.atomNum:
self.dockNum.append(dockConverter.convertMol2atomNum(self, atomNumber))
def addColors(self, parameterFileName=None):
'''adds self.colorNum to each atom record based on rules'''
colorConverter = atom_color_table.ColorTable(parameterFileName)
self.colorConverter = colorConverter # save for later use in output
self.colorNum = [] # map from atom to colors
for atomNumber in self.atomNum:
self.colorNum.append(colorConverter.convertMol2color(self, atomNumber))
def countConfs(self):
'''returns the number of conformations in the file'''
return len(self.atomXyz)
def getXyz(self, xyzCount, atomNum):
'''returns the xyz for an atom number and an xyz count (conformation)'''
atomIndex = self.atomNum.index(atomNum)
return self.atomXyz[xyzCount][atomIndex]
def getXyzManyConfs(self, xyzCounts, atomIndex):
'''returns a lits of xyzs for many confs of one atom'''
xyzConfs = []
for xyzCount in xyzCounts:
xyzConfs.append(self.atomXyz[xyzCount][atomIndex])
return xyzConfs
def getCostMatrix(self, xyzOne, xyzTwo, hydrogens=True):
'''helper function that computes cost matrix between 2 sets of atom xyz'''
costMatrix = [] # list of lists
for atomIndex in xrange(len(self.atomXyz[xyzOne])):
if hydrogens or ('H' != self.atomTypeAll[xyzOne][atomIndex][0]):
rowMatrix = []
for otherIndex in xrange(len(self.atomXyz[xyzTwo])):
if hydrogens or ('H' != self.atomTypeAll[xyzTwo][otherIndex][0]):
if self.atomTypeAll[xyzOne][atomIndex][0] == \
self.atomTypeAll[xyzTwo][otherIndex][0]:
distSquared = geometry.distL2Squared3(
self.atomXyz[xyzOne][atomIndex],
self.atomXyz[xyzTwo][otherIndex])
else:
distSquared = sys.float_info.max
rowMatrix.append(distSquared)
costMatrix.append(rowMatrix)
return costMatrix
def rearrangeAccordingToMatches(self, matches, xyzOne):
'''for xyzone, return its atomXyz coordinates but rearrange them
according to matches. matches[x][0] is the right order,
matches[x][1] is the order xyzone is in. some atomXyz coordinates
may be left out (protonation)'''
newList = []
matches.sort(key=operator.itemgetter(0))
for match in matches:
newList.append(self.atomXyz[xyzOne][match[1]])
return newList
def remapAtomXyzDealWithProtonation(self):
'''checks to see if any atomXyz have a different number of points.
if so, we assume it is because of protonation. use munkres-kuhn to compute
the map between 2 arbitrary differing atomXyz sets, then remap atomxyz to a
new list so that it can be passed into divisive clustering routines
without problems due to different lengths of coordinates.
if none are different, return self.atomXyz as this will work fine.'''
initClusters = self.breakIntoClustersByAtomCount()
if 1 == len(initClusters): # everything is fine
return self.atomXyz
else: # there are at least 2 different protonation states
newPointListList = [None for count in xrange(len(self.atomXyz))]
for listMember in initClusters[0]: # add the first list as normal
newPointListList[listMember] = self.atomXyz[listMember]
arbitraryFirst = initClusters[0][0] # arbitrary xyz from first list
for otherList in initClusters[1:]: # all other lists
costMatrix = self.getCostMatrix(arbitraryFirst, otherList[0])
matches = munkreskuhn.assignAndReturnMatches(costMatrix)
for otherXyz in otherList:
newXyz = self.rearrangeAccordingToMatches(matches, otherXyz)
newPointListList[otherXyz] = newXyz
return newPointListList
def getAdvancedRMSD(self, xyzOne, xyzTwo, hydrogens=True):
'''calculates RMSD with/without using hydrogens at all.'''
#first, a quick check to see if they are identical, since sometimes the
#code will run forever if that happens
if len(self.atomXyz[xyzOne]) == len(self.atomXyz[xyzTwo]):
if self.atomXyz[xyzOne] == self.atomXyz[xyzTwo]:
return 0.0 # lists are identical, any rmsd is 0.0
costMatrix = self.getCostMatrix(xyzOne, xyzTwo, hydrogens=hydrogens)
matches = munkreskuhn.assignAndReturnMatches(costMatrix)
sumSquared = 0.0
for oneMatch in matches:
sumSquared += oneMatch[2]
rmsd = (sumSquared / len(matches)) ** 0.5
#following comment encloses debugging in cases where the distances are inf
"""
if rmsd > 100:
print costMatrix
print matches
print self.atomTypeAll[xyzOne]
print self.atomTypeAll[xyzTwo]
sys.exit(1)
"""
return rmsd
def getRMSD(self, xyzOne, xyzTwo):
'''calculates just the rmsd of the two conformations'''
sumSquared = 0.0
for atomIndex in xrange(len(self.atomXyz[xyzOne])):
sumSquared += geometry.distL2Squared3(
self.atomXyz[xyzOne][atomIndex], self.atomXyz[xyzTwo][atomIndex])
rmsd = (sumSquared / len(self.atomXyz[xyzOne])) ** 0.5
return rmsd
def getRMSDtable(
self, forceRedo=False, advanced=False, clusterLimit=None,
startRmsdTable=None, keepRmsdList=False):
'''for each conformation (xyzcount) to all others, find the rmsd. return
as dictionary of dictionaries of rmsds'''
if clusterLimit is not None:
self.getRMSDtableLimited(
forceRedo, advanced, clusterLimit,
startRmsdTable=startRmsdTable, keepRmsdList=keepRmsdList)
elif self.rmsdTable is None or forceRedo:
if startRmsdTable is None:
self.rmsdTable = collections.defaultdict(dict) # make new
for xyzCount in xrange(len(self.atomXyz)): # now set to 0
for otherXyz in xrange(xyzCount + 1, len(self.atomXyz)): # just half
self.rmsdTable[xyzCount][otherXyz] = 0.0
self.rmsdTable[otherXyz][xyzCount] = 0.0
else:
self.rmsdTable = startRmsdTable
self.rmsdList = []
for xyzCount in xrange(len(self.atomXyz)):
for otherXyz in xrange(xyzCount + 1, len(self.atomXyz)): # just half
if not advanced:
rmsd = self.getRMSD(xyzCount, otherXyz)
else:
rmsd = self.getAdvancedRMSD(xyzCount, otherXyz)
self.rmsdTable[xyzCount][otherXyz] += rmsd # add to this, either 0
self.rmsdTable[otherXyz][xyzCount] += rmsd # or already the prot rmsd
if keepRmsdList:
self.rmsdList.append((rmsd, otherXyz, xyzCount))
if keepRmsdList:
self.rmsdList.sort(key=operator.itemgetter(0))
return self.rmsdTable
def breakIntoClustersByAtomCount(self):
countToClusters = collections.defaultdict(list)
for count, atomXyzList in enumerate(self.atomXyz):
countToClusters[len(atomXyzList)].append(count)
startClusters = []
clustKeys = countToClusters.keys()
clustKeys.sort() # this way, returned lists are in order of ascending len
for clusterKey in clustKeys:
clusterList = countToClusters[clusterKey]
startClusters.append(clusterList)
return startClusters
def getRMSDtableLimited(
self, forceRedo=False, advanced=False,
clusterLimit=None, startRmsdTable=None, keepRmsdList=False):
'''for each conformation (xyzcount) to all others, find the rmsd. return
as dictionary of dictionaries of rmsds. but first divisively cluster until
cluster size is below cluster limit, because all pairs is just too slow.'''
if self.rmsdTable is None or forceRedo:
#have to precluster based on atom count, since sometimes hydrogens
#pop into or out of existence (called protonation by some)
#startClusters = self.breakIntoClustersByAtomCount()
#clusts = divisive_clustering.divisiveClustering(self.atomXyz, \
# limit=clusterLimit, numClusters=sys.maxint, \
# startClusters=startClusters, verbose=True, overlap=50)
newXyzList = self.remapAtomXyzDealWithProtonation()
clusts = divisive_clustering.divisiveClustering(
newXyzList, limit=clusterLimit, numClusters=sys.maxint,
verbose=False, overlap=50)
if startRmsdTable is None:
self.rmsdTable = collections.defaultdict(dict) # make new
for xyzCount in xrange(len(self.atomXyz)): # now set to 0
for otherXyz in xrange(xyzCount + 1, len(self.atomXyz)): # just half
self.rmsdTable[xyzCount][otherXyz] = 0.0
self.rmsdTable[otherXyz][xyzCount] = 0.0
else:
self.rmsdTable = startRmsdTable
self.rmsdList = []
for xyzCount in xrange(len(self.atomXyz)):
thisCluster = []
for cluster in clusts:
if xyzCount in cluster:
thisCluster.extend(cluster) # extend to add, since clusters overlap
for otherXyz in xrange(xyzCount + 1, len(self.atomXyz)): # just half
#print xyzCount, otherXyz
if xyzCount < otherXyz:
if otherXyz in thisCluster:
if not advanced:
rmsd = self.getRMSD(xyzCount, otherXyz)
else:
rmsd = self.getAdvancedRMSD(xyzCount, otherXyz)
else:
rmsd = sys.maxsize
self.rmsdTable[xyzCount][otherXyz] += rmsd
self.rmsdTable[otherXyz][xyzCount] += rmsd
if keepRmsdList:
self.rmsdList.append((rmsd, otherXyz, xyzCount))
if keepRmsdList:
self.rmsdList.sort(key=operator.itemgetter(0))
return self.rmsdTable
def getRMSDlist(self):
'''gets the rmsds between all pairs as a list of rmsd, conf, conf tuples'''
self.getRMSDtable(keepRmsdList=True)
return self.rmsdList
def getRMSDclusters(self, rmsdCutoff=None, numClusters=1):
'''uses the rmsdlist to make clusters of conformations based on rmsd.
goes until either the rmsdCutoff is reached or numClusters is reached.
using the numClusters will make this run very slowly.
uses single linkage to make a new cluster.'''
self.getRMSDtable() # make the table, or ensure it is made
#self.rmsdList is a tuple of (rmsd, conf, conf)
clusters = unionfind2.unionFind()
for xyzCount in xrange(len(self.atomXyz)):
clusters.find(xyzCount) # initialize all these to singleton clusters
if rmsdCutoff is None:
rmsdCutoff = self.rmsdList[-1][0] + 1.0 # make it never happen
for rmsdTuple in self.rmsdList:
if rmsdTuple[0] > rmsdCutoff:
break # quit combining things!
clusters.union(rmsdTuple[1], rmsdTuple[2])
return clusters.toLists()
def getRMSDclustersAll(self, rmsdCutoff=None, numClusters=1):
'''uses the rmsdlist to make clusters of conformations based on rmsd.
goes until either the rmsdCutoff is reached or numClusters is reached.
using the numClusters will make this run very slowly. uses ALL linkage not
single linkage.'''
self.getRMSDtable() # make the table, or ensure it is made
#self.rmsdList is a tuple of (rmsd, conf, conf)
#self.rmsdTable is a dict of [conf][conf] -> rmsd
clusters = unionfind2.unionFind()
for xyzCount in xrange(len(self.atomXyz)):
clusters.find(xyzCount) # init
if rmsdCutoff is None:
rmsdCutoff = self.rmsdList[-1][0] + 1.0 # make it never happen
for rmsdTuple in self.rmsdList:
if rmsdTuple[0] > rmsdCutoff:
break # quit combining things!
#have to do all linkage not just single.. oh my
if clusters.different(rmsdTuple[1], rmsdTuple[2]): # else already joined
combine = True
clusterOne = clusters.getList(rmsdTuple[1])
clusterTwo = clusters.getList(rmsdTuple[2])
#print clusterOne, clusterTwo,
for clusterOneRep in clusterOne:
for clusterTwoRep in clusterTwo:
thisRMSD = self.rmsdTable[clusterOneRep][clusterTwoRep]
#print thisRMSD,
if thisRMSD > rmsdTuple[0]: # means we can't combine yet
combine = False
break
if not combine:
break
#print combine
if combine:
clusters.union(rmsdTuple[1], rmsdTuple[2])
return clusters.toLists()
def divisiveClustering(self):
'''takes all conformations. bisects them along the longest dimension
(in N*atoms*3 space). Repeat on the biggest remaining cluster until there
are numClusters left, return the clusters as lists.'''
numClusters = min(20, int(self.origXyzCount/3.))
#print numClusters #debugging, find out target # of clusters
clusters = divisive_clustering.divisiveClustering(self.atomXyz, numClusters)
return clusters
def writeMol2File(self, outFile, whichXyz=None):
'''writes the data to an already open file. don't close it.'''
if whichXyz is None:
whichXyz = range(self.xyzCount)
for oneXyz in whichXyz:
outFile.write("@<TRIPOS>MOLECULE\n")
if self.protName == "fake": # don't write fake
outFile.write(self.name + "\n")
else:
outFile.write(self.name + " " + self.protName + "\n")
outFile.write("%5d %5d %5d %5d %5d\n" % (len(self.atomNum),
len(self.bondNum), 0, 0, 0))
outFile.write("SMALL\nUSER_CHARGES\n\n")
outFile.write("mmff94s_NoEstat = %5.2f\n" % self.inputEnergy[oneXyz])
outFile.write("@<TRIPOS>ATOM\n")
for oneAtom in xrange(len(self.atomNum)):
outFile.write(
"%7d %6s % 8.4f % 8.4f % 8.4f %5s 1 <0> % 8.4f\n"
% (
self.atomNum[oneAtom], string.ljust(self.atomName[oneAtom], 6),
self.atomXyz[oneXyz][oneAtom][0],
self.atomXyz[oneXyz][oneAtom][1],
self.atomXyz[oneXyz][oneAtom][2],
string.ljust(self.atomType[oneAtom], 5),
self.atomCharge[oneAtom]))
outFile.write("@<TRIPOS>BOND\n")
for oneBond in xrange(len(self.bondNum)):
outFile.write(
"%6d%5d%5d %2s\n" % (
self.bondNum[oneBond], self.bondStart[oneBond],
self.bondEnd[oneBond], string.ljust(self.bondType[oneBond], 2)))
def writeMol2(self, outName, whichXyz=None):
'''writes the data to a mol2 file.'''
outFile = open(outName, 'w')
self.writeMol2File(outFile, whichXyz)
outFile.close()
def addSolvDataPartialCharges(self, partialCharges):
'''adds partial charge data from list, must be in correct order'''
for count in xrange(len(self.atomCharge)):
self.atomCharge[count] = partialCharges[count]
#that's it. no return.
def readDockMol2file(
mol2fileName, recdes=False, ligdes=False, charge=False, elec=False):
'''reads a dock output mol2 file, since each ligand has different connectivity
this returns a list of Mol2 data classes instead of just one big one.
if desired, can return the receptor desolvation as a list and/or
the polar ligand desolvation scores as well'''
mol2lines = []
mol2data = []
mol2rd = []
mol2ld = []
mol2charge = []
mol2elec = []
mol2file = open(mol2fileName, 'r')
mol2names = []
for mol2line in mol2file:
if mol2line[:17] == "@<TRIPOS>MOLECULE":
if len(mol2lines) > 0:
mol2data.append(Mol2(mol2text=mol2lines))
mol2lines = []
if mol2line[0] != "#" and len(mol2line) > 1:
mol2lines.append(mol2line)
if mol2line[:32] == '########## Name:':
mol2names.append(string.split(mol2line)[2])
if mol2line[:32] == '########## Ligand Polar Desolv:':
mol2ld.append(float(string.split(mol2line)[4]))
if mol2line[:32] == '########## Receptor Desolvation:':
mol2rd.append(float(string.split(mol2line)[3]))
if mol2line[:32] == '########## Ligand Charge:':
mol2charge.append(float(string.split(mol2line)[3]))
if mol2line[:32] == '########## Electrostatic:':
mol2elec.append(float(string.split(mol2line)[2]))
if len(mol2lines) > 0:
mol2data.append(Mol2(mol2text=mol2lines))
for count, oneMol2 in enumerate(mol2data):
oneMol2.name = mol2names[count]
retList = [mol2data]
if recdes:
retList.append(mol2rd)
if ligdes:
retList.append(mol2ld)
if charge:
retList.append(mol2charge)
if elec:
retList.append(mol2elec)
return tuple(retList)
if 0 == string.find(sys.argv[0], "mol2extend.py"):
#if called from commandline, read in all arguments as mol2 files and print
#out statistics about the molecule or confirm that it was read in who knows
for mol2file in sys.argv[1:]:
mol2data = Mol2(mol2file)
print mol2data.name,
print len(mol2data.atomName), len(mol2data.bondStart) # debugging???
mol2data.convertDockTypes()
print mol2data.dockNum
```
#### File: docking/scripts/check_sample_native.py
```python
import os
import rdkit
from rdkit.Chem.rdmolfiles import MolFromPDBFile
from rdkit.Chem.rdmolfiles import MolFromMol2Block
from rdkit.Chem.AllChem import GetBestRMS
# have we sampled the native conformation?
# adapted from: https://chem-workflows.com/articles/2019/07/18/building-a-multi-molecule-mol2-reader-for-rdkit/
def Mol2MolSupplier(file,sanitize=True):
m = None
line_index=0
start_line=None
with open(file, 'r') as f:
line =f.readline()
line_index += 1
# skip down to the beginning of the first molecule
while not line.startswith("@<TRIPOS>MOLECULE") and not f.tell() == os.fstat(f.fileno()).st_size:
line = f.readline()
line_index += 1
while not f.tell() == os.fstat(f.fileno()).st_size:
if line.startswith("@<TRIPOS>MOLECULE"):
mol = []
mol.append(line)
start_line = line_index
line = f.readline()
line_index += 1
while not line.startswith("@<TRIPOS>MOLECULE"):
mol.append(line)
line = f.readline()
line_index += 1
if f.tell() == os.fstat(f.fileno()).st_size:
mol.append(line)
break
mol[-1] = mol[-1].rstrip() # removes blank line at file end
block = ",".join(mol).replace(',','') + "\n"
m=MolFromMol2Block(block,sanitize=sanitize)
yield (start_line, m)
def check_sample_native(xtal_lig_pdb, sample_confs_mol2):
xtal_lig = MolFromPDBFile(xtal_lig_pdb)
xtal_lig.SetProp("_Name", 'xtal')
rms_scores = []
for start_line, sample_conf in Mol2MolSupplier(sample_confs_mol2, sanitize=False):
rms_score = GetBestRMS(xtal_lig, sample_conf)
print("start_line: {} rms: {}".format(start_line, rms_score))
rms_scores.append((start_line, rms_score))
return rms_score
```
#### File: scripts/common/pdbMoveColumns.py
```python
import string
import sys
import os
def pdbMoveColumns(inputPdbName, outputPdbName):
dots = (34, 42, 50, 57, 63) # should be the periods in the columns
deleteFrom = 22 # where to delete spaces from
outputPdb = open(outputPdbName, 'w')
inputPdb = open(inputPdbName, 'r')
for line in inputPdb:
if string.find(line, 'ATOM') == 0:
problems = 0
for dot in dots:
try:
if line[dot] != '.': # this is not good
problems += 1
except IndexError: # might not be that long
pass # no problem
if problems == 0: # great
outputPdb.write(line)
elif problems > 1: # find where they should be
extra = 1
while line[dots[0] + extra] != '.' and extra < 20: # at 20, give up
extra += 1
if extra != 20:
newLine = line[:deleteFrom]
newLine += line[deleteFrom + extra:]
outputPdb.write(newLine)
else:
outputPdb.write(line) # write these anyway
outputPdb.close()
inputPdb.close()
if -1 != string.find(sys.argv[0], 'pdbMoveColumns.py'): # main program
inputPdbName = sys.argv[1]
outputPdbName = sys.argv[2]
pdbMoveColumns(inputPdbName, outputPdbName)
```
#### File: scripts/common/phi.py
```python
import struct
import array
import sys
import string
import os
import math
import copy
#import gzip, bz2 #for compressed file reading (not enabled yet)
# format follows
# character*20 toplabel
# character*10 head,character*60 title
# real*4 phi(65,65,65) #or now 193,193,193
# character*16 botlabel
# real*4 scale, oldmid(3)
def grid_size_from_file_size(file_size):
grid_bytes = file_size - 162 # 162 is number of fixed bytes in a grid file
grid_points = grid_bytes / 4.0 # 4 bytes per float
grid_size = grid_points ** (1.0/3.0) # Cube root of grid points is size
grid_size = int(math.ceil(grid_size))
return grid_size
class phi(object):
def __init__(self, phiFileName=False, is64=False, gridSizes=None):
'''reads the phi file from disk'''
if gridSizes is None:
gridSizes = (None,)
self.oldmid = [0., 0., 0.]
self.__minsmaxs = None
self.__boundaries = None
if phiFileName: # otherwise just creating an empty phi map for writing
for gridSize in gridSizes:
if gridSize is None:
gridSize = grid_size_from_file_size(os.stat(phiFileName).st_size)
print "Determined size to be", gridSize
try:
phiFile = open(phiFileName, 'rb') # b is for binary, r is for read
tempArray = array.array('f')
junk = struct.unpack('4s', phiFile.read(4))
(check,) = struct.unpack('4s', phiFile.read(4))
if check == "now ": # this changed, but this is now correct
#print "32bit phimap"
pass
else:
#print "64bit phimap"
is64 = True
if not is64:
(temptop,) = struct.unpack('16s', phiFile.read(16))
self.toplabel = check + temptop
else:
(temptop,) = struct.unpack('20s', phiFile.read(20))
self.toplabel = temptop
#print "toplabel:", self.toplabel
junk = struct.unpack('8s', phiFile.read(8))
if is64:
junk = struct.unpack('8s', phiFile.read(8))
(self.head,) = struct.unpack('10s', phiFile.read(10))
#print "head:", self.head
(self.title,) = struct.unpack('60s', phiFile.read(60))
#print "title:", self.title
junk = struct.unpack('8s', phiFile.read(8))
if is64:
junk = struct.unpack('8s', phiFile.read(8))
#next line raises error if grid too big
#GxGxG -> packed into an array xyz order samplePhi = array.array('f')
tempArray.fromfile(phiFile, gridSize**3)
tempArray.byteswap()
#for count in xrange(gridSize**3):
# bats = phiFile.read(4) #raw characters
# blah = struct.unpack('>f', bats)[0] #always big-endian
# tempArray.append(blah)
junk = struct.unpack('8s', phiFile.read(8))
if is64:
junk = struct.unpack('8s', phiFile.read(8))
self.gridDimension = gridSize
self.phiArray = tempArray
break # read successfully, just go on and read the last bits
except EOFError:
phiFile.close()
(self.botlabel,) = struct.unpack('16s', phiFile.read(16))
#print "botlabel:", self.botlabel
junk = struct.unpack('8s', phiFile.read(8))
if is64:
junk = struct.unpack('8s', phiFile.read(8))
#>ffff on next line forces big-endian reading
(self.scale, self.oldmid[0], self.oldmid[1], self.oldmid[2],) = \
struct.unpack('>ffff', phiFile.read(16))
#print "scale, oldmid:", self.scale, self.oldmid
junk = struct.unpack('4s', phiFile.read(4))
phiFile.close()
def copyPhi(self):
'''make a deep copy of the phimap that can be edited without disturbing the
original.'''
newPhi = phi()
newPhi.oldmid = self.oldmid
newPhi.toplabel = self.toplabel
newPhi.head = self.head
newPhi.title = self.title
newPhi.botlabel = self.botlabel
newPhi.scale = self.scale
newPhi.phiArray = self.phiArray
newPhi.gridDimension = self.gridDimension
newPhi.__minsmaxs = None
newPhi.__boundaries = None
return newPhi
def write(self, phiFileName=False):
'''write data to member data structure manually,
then call this to write to file
the pad lines reproduce the binary padding of an original
fortran formatted phi file'''
if phiFileName: # do nothing if no filename given
outArray = copy.deepcopy(self.phiArray)
outArray.byteswap() # switch endianness back, only for writing
phiFile = open(phiFileName, 'wb') # b may be unnecessary, have to check
phiFile.write(struct.pack('4b', 0, 0, 0, 20)) # pad
phiFile.write(struct.pack('20s', self.toplabel))
phiFile.write(struct.pack('8b', 0, 0, 0, 20, 0, 0, 0, 70)) # pad
phiFile.write(struct.pack('10s', self.head))
phiFile.write(struct.pack('60s', self.title))
phiFile.write(struct.pack('4b', 0, 0, 0, 70)) # pad, always same
phiFile.write(struct.pack('>l', len(outArray)*4)) # diff. pad sometimes
#print "writing this many data points in phimap:", len(outArray)
outArray.tofile(phiFile) # array
phiFile.write(struct.pack('>l', len(outArray)*4)) # diff. pad sometimes
phiFile.write(struct.pack('4b', 0, 0, 0, 16)) # pad, always same
phiFile.write(struct.pack('16s', self.botlabel))
phiFile.write(struct.pack('8b', 0, 0, 0, 16, 0, 0, 0, 16)) # pad
phiFile.write(struct.pack(
'>ffff', self.scale, self.oldmid[0], self.oldmid[1], self.oldmid[2]))
#> on previous line forces big-endian writing
phiFile.write(struct.pack('4b', 0, 0, 0, 16)) # pad
phiFile.close()
def trimPhi(self, newmidIndices, newSize):
'''for a new center index and a desired cubic grid size, trim the current
phimap and return the new trimmed phimap'''
plusMinus = (newSize - 1) / 2 # how many to add or subtract from the center
newPhi = phi()
newPhi.oldmid = self.getXYZlist(newmidIndices) # only change of these data
newPhi.toplabel = self.toplabel
newPhi.head = self.head
newPhi.title = self.title
newPhi.botlabel = self.botlabel
newPhi.scale = self.scale
#the phiArray does change
newPhi.phiArray = array.array('f')
for oldIndexZ in xrange(
newmidIndices[2] - plusMinus, newmidIndices[2] + plusMinus + 1):
for oldIndexY in xrange(
newmidIndices[1] - plusMinus, newmidIndices[1] + plusMinus + 1):
for oldIndexX in xrange(
newmidIndices[0] - plusMinus, newmidIndices[0] + plusMinus + 1):
if oldIndexX >= 0 and oldIndexX < self.gridDimension and \
oldIndexY >= 0 and oldIndexY < self.gridDimension and \
oldIndexZ >= 0 and oldIndexZ < self.gridDimension:
newPhi.phiArray.append(
self.getValue(oldIndexX, oldIndexY, oldIndexZ))
else:
newPhi.phiArray.append(0.0) # outside the original grid.
#print "total array size is:", len(newPhi.phiArray)
return newPhi
def findPhiCorners(self, newmidIndices, newSize):
'''for a new center index and a desired cubic grid size, find the new
corners of the phimap'''
plusMinus = (newSize - 1) / 2 # how many to add or subtract from the center
lowerLeft = \
[newmidIndices[0] - plusMinus, newmidIndices[1] - plusMinus,
newmidIndices[2] - plusMinus]
upperRight = \
[newmidIndices[0] + plusMinus + 1, newmidIndices[1] + plusMinus + 1,
newmidIndices[2] + plusMinus + 1]
return lowerLeft, upperRight
def findNewPhiIndices(self, newmidIndices, newSize):
'''for a new center index and a desired cubic grid size, return xyz coords
of each coordinate in the new box'''
coordList = []
plusMinus = (newSize - 1) / 2 # how many to add or subtract from the center
for oldIndexZ in xrange(
newmidIndices[2] - plusMinus, newmidIndices[2] + plusMinus + 1):
for oldIndexY in xrange(
newmidIndices[1] - plusMinus, newmidIndices[1] + plusMinus + 1):
for oldIndexX in xrange(
newmidIndices[0] - plusMinus, newmidIndices[0] + plusMinus + 1):
coordList.append((oldIndexX, oldIndexY, oldIndexZ))
return coordList
def getMinsMaxs(self):
'''finds the positions of the extreme grid corners'''
if self.__minsmaxs is None:
mins, maxs = [], []
for center in self.oldmid:
mins.append(center - ((self.gridDimension - 1.)/(2. * self.scale)))
maxs.append(center + ((self.gridDimension - 1.)/(2. * self.scale)))
self.__minsmaxs = mins, maxs
return self.__minsmaxs
def getMinMaxValues(self):
'''finds the minimum and maximum value'''
return min(self.phiArray), max(self.phiArray)
def getMeanAbsoluteValues(self):
'''takes the abs value of each phi value, then the average'''
sum = 0.0
for value in self.phiArray:
sum += math.fabs(value)
return sum/float(len(self.phiArray))
def getMeanValues(self):
'''mean of all phi values'''
sum = 0.0
for value in self.phiArray:
sum += value
return sum/float(len(self.phiArray))
def getMaxValues(self):
'''just the max'''
return max(self.phiArray)
def countValues(self):
'''counts the occurence of each value'''
counts = {}
for value in self.phiArray:
if value in counts:
counts[value] += 1
else:
counts[value] = 1
return counts
def histogramValues(self, width=1., useMin=None, useMax=None):
'''makes a basic histogram'''
ends = list(self.getMinMaxValues())
if useMin is not None:
ends[0] = useMin
if useMax is not None:
ends[1] = useMax
bars = int(math.ceil((ends[1] - ends[0]) / width) + 1)
counts = [0 for x in xrange(bars)]
for value in self.phiArray:
if value >= ends[0] and value <= ends[1]:
counts[int(math.floor((value - ends[0]) / width))] += 1
return counts
def getXYZlist(self, xyz):
'''changes list to x,y,z calls getXYZ'''
return self.getXYZ(xyz[0], xyz[1], xyz[2])
def getXYZ(self, xInd, yInd, zInd):
'''returns the xyz coordinate of the center of the box'''
mins, maxs = self.getMinsMaxs()
gap = 1./self.scale
return mins[0]+(xInd*gap), mins[1]+(yInd*gap), mins[2]+(zInd*gap)
def getValueList(self, xyz):
'''changes list into x, y, z then calls getValue'''
return self.getValue(xyz[0], xyz[1], xyz[2])
def getValue(self, xInd, yInd, zInd):
'''for a given set of indices, return the value in the array'''
index = int(zInd*(self.gridDimension**2.) + yInd*self.gridDimension + xInd)
return self.phiArray[index]
def getValueListCheckBounds(self, xyzList, retValueIfBad=0):
'''passes to getValueCheckBounds'''
return self.getValueCheckBounds(
xyzList[0], xyzList[1], xyzList[2], retValueIfBad)
def getValueCheckBounds(self, xInd, yInd, zInd, retValueIfBad=0):
'''does grid bounds checking first, returns retValueIfBad if outside grid,
otherwise call getvalue'''
if xInd >= 0 and xInd < self.gridDimension and \
yInd >= 0 and yInd < self.gridDimension and \
zInd >= 0 and zInd < self.gridDimension:
return self.getValue(xInd, yInd, zInd)
else:
return retValueIfBad
def setValueList(self, xyz, value):
'''calls setValue with expanded xyz into items'''
self.setValue(xyz[0], xyz[1], xyz[2], value)
def setValue(self, xInd, yInd, zInd, value):
'''puts the value into the phi array'''
index = int(zInd*(self.gridDimension**2.) + yInd*self.gridDimension + xInd)
self.phiArray[index] = value
def transform(self, threshold=6.0, inside=-2.0, outside=-1.0):
'''for every value in the array, change it to inside or outside,
destructively overwrites old values'''
for index in xrange(len(self.phiArray)):
value = self.phiArray[index]
if value < threshold:
where = outside
else:
where = inside
self.phiArray[index] = where
def subtract(self, other):
'''subtract other from self, destructively write over self'''
self.modify(other, -1)
def add(self, other):
'''add other to self, destructively write over self.'''
self.modify(other, 1)
def modify(self, other, change):
'''modify other to self, destructively write over self. allows +-/etc
presume without checking that grids are compatible (same mid etc)'''
for index in xrange(len(self.phiArray)):
value = other.phiArray[index]
#save = self.phiArray[index]
self.phiArray[index] += (value * change)
#if self.phiArray[index] != 0.0:
# print self.phiArray[index], value, save, index
def findBoundaries(
self, inside=-2.0, border=2, pointXYZ=None, pointList=None):
'''finds the extreme x,y,z positions that enclose all inside positions'''
if self.__boundaries is None: # need to calculate it
if pointXYZ is not None:
self.__boundaries = self.findPointMinsMaxs(pointXYZ, pointList)
else:
self.__boundaries = [self.gridDimension, self.gridDimension,
self.gridDimension], [0, 0, 0]
for x in xrange(self.gridDimension):
for y in xrange(self.gridDimension):
for z in xrange(self.gridDimension):
if x < self.__boundaries[0][0] or x > self.__boundaries[1][0] or \
y < self.__boundaries[0][1] or y > self.__boundaries[1][1] or \
z < self.__boundaries[0][2] or z > self.__boundaries[1][2]:
value = self.getValue(x, y, z)
if value == inside:
indices = (x, y, z)
for coord in xrange(3):
self.__boundaries[0][coord] = min(
self.__boundaries[0][coord], indices[coord])
self.__boundaries[1][coord] = max(
self.__boundaries[1][coord], indices[coord])
for coord in range(3):
self.__boundaries[0][coord] = max(
0, self.__boundaries[0][coord] - border)
self.__boundaries[1][coord] = min(
self.gridDimension, self.__boundaries[1][coord]+border)
return self.__boundaries
def getBoundaryLengths(self, inside=-2.0, border=2):
'''calls findBoundaries if necessary, returns the lengths (max-min)'''
if self.__boundaries is None: # need to calculate it
self.findBoundaries(inside, border)
lengths = [self.__boundaries[1][0] - self.__boundaries[0][0],
self.__boundaries[1][1] - self.__boundaries[0][1],
self.__boundaries[1][2] - self.__boundaries[0][2]]
return lengths
def createFromGrid(
self, grid, gridSize, defaultValue=0.0, toplabel="",
head="", title="", botlabel="", lowestGridSize=65):
'''does grid->phi data structure conversion'''
self.toplabel = toplabel[:20] # easy stuff first
self.head = head[:10]
self.title = title[:60]
self.botlabel = botlabel[:16]
lens = [len(grid), len(grid[0]), len(grid[0][0])]
#have to expand to valid gridSize
newGridSize = 0
for possibleGridSize in self.gridSizes:
good = True
if possibleGridSize < lowestGridSize:
good = False
for oneLength in lens:
if oneLength > possibleGridSize:
good = False
if good:
newGridSize = possibleGridSize
self.gridDimension = newGridSize
#now take care of the grid
self.phiArray = array.array('f')
for z in xrange(self.gridDimension):
for y in xrange(self.gridDimension):
for x in xrange(self.gridDimension):
if x < lens[0] and y < lens[1] and z < lens[2]:
self.phiArray.append(grid[x][y][z][0])
else: # outside real grid
self.phiArray.append(defaultValue)
#scale and oldmid are all that is left
self.scale = 1./gridSize
for coord in xrange(3):
self.oldmid[coord] = grid[0][0][0][coord + 1] \
- (gridSize / 2.) + (self.gridDimension / self.scale) / 2.
#data should be ready for writing now
def findPointMinsMaxs(self, pointXYZ, pointList):
minsPts = pointXYZ[0][1:]
maxsPts = pointXYZ[0][1:]
for point in pointList:
xyz = pointXYZ[point-1][1:]
for coord in range(3):
minsPts[coord] = min(minsPts[coord], xyz[coord])
maxsPts[coord] = max(maxsPts[coord], xyz[coord])
newMins = list(self.getIndices(minsPts))
newMaxs = list(self.getIndices(maxsPts)) # so they initialize to pts
return newMins, newMaxs
def getIndices(self, pt):
'''helper function to find the box a point is in'''
mins, maxs = self.getMinsMaxs()
gridSize = 1./self.scale
xIndex = int(math.floor((pt[0]-mins[0])/gridSize))
yIndex = int(math.floor((pt[1]-mins[1])/gridSize))
zIndex = int(math.floor((pt[2]-mins[2])/gridSize))
#print xIndex, yIndex, zIndex, mins, pt, maxs
return xIndex, yIndex, zIndex
def trilinear_interpolation(self, point):
'''for a given point, find the box it is in, the trilinearly interpolate
and return the value at that point. this is in kT, as that is what phiMaps
hold. for usual applications, you want to take this times the charge and
0.5924 to put it in kcal/mol'''
ptX, ptY, ptZ = self.getIndices(point)
values = [0. for count in xrange(8)]
values[7] = self.getValue(ptX, ptY, ptZ)
values[6] = self.getValue(ptX, ptY, ptZ + 1) - values[7]
values[5] = self.getValue(ptX, ptY + 1, ptZ) - values[7]
values[4] = self.getValue(ptX + 1, ptY, ptZ) - values[7]
values[3] = self.getValue(ptX, ptY + 1, ptZ + 1) - values[7] - \
values[6] - values[5]
values[2] = self.getValue(ptX + 1, ptY, ptZ + 1) - values[7] - \
values[6] - values[4]
values[1] = self.getValue(ptX + 1, ptY + 1, ptZ) - values[7] - \
values[5] - values[4]
values[0] = self.getValue(ptX + 1, ptY + 1, ptZ + 1) - values[7] - \
values[6] - values[5] - values[4] - values[3] - values[2] - values[1]
gridPoint = self.getXYZ(ptX, ptY, ptZ)
fraction = [0. for count in xrange(3)]
for count in xrange(3):
fraction[count] = point[count] - gridPoint[count]
returnPhiValue = values[0] * fraction[0] * fraction[1] * fraction[2] + \
values[1] * fraction[0] * fraction[1] + \
values[2] * fraction[0] * fraction[2] + \
values[3] * fraction[1] * fraction[2] + values[4] * fraction[0] + \
values[5] * fraction[1] + values[6] * fraction[2] + values[7]
#print values, fraction, returnPhiValue
return returnPhiValue
def trimToBoxCenterAndSize(self, corners, center, dimensions):
'''given a box, find the new center and size of a valid phimap based on
this current phimap'''
#print corners, center, dimensions
#print self.scale, self.oldmid
#find the midpoint and corners
centerIndices = self.getIndices(center)
newmid = self.getXYZlist(centerIndices) # becomes the new oldmid
onecorner = self.getIndices(corners[0:3])
twocorner = [coord + 1 for coord in self.getIndices(corners[3:6])]
#phimap grid can only be cubic
biggestDimension = 0
if twocorner[1] - onecorner[1] > twocorner[0] - onecorner[0]:
biggestDimension = 1
if (twocorner[2] - onecorner[2] >
twocorner[biggestDimension] - onecorner[biggestDimension]):
biggestDimension = 2
newSize = twocorner[biggestDimension] - onecorner[biggestDimension]
if 0 == newSize % 2: # if size is even, that's not allowed, so,
newSize += 1 # make it odd
return centerIndices, newSize
def trimToBox(self, corners, center, dimensions):
'''given a box (see box.py) trim so that the box is enclosed but not more.
returns the new trimmed phimap'''
centerIndices, newSize = self.trimToBoxCenterAndSize(
corners, center, dimensions)
return self.trimPhi(centerIndices, newSize), centerIndices, newSize
if -1 != string.find(sys.argv[0], "phi.py"):
#if (len(sys.argv) > 1): #want to test output of phimaps
# phiData.write(sys.argv[2])
if (len(sys.argv) > 1):
phiSize = int(sys.argv[2])
else:
phiSize = None
phiData = phi(sys.argv[1], gridSizes=(phiSize,))
#print phiData.countValues()
#print phiData.getMinMaxValues()
print phiData.getMeanAbsoluteValues()
print phiData.scale
print phiData.oldmid
```
#### File: scripts/common/request-reserved-slot.py
```python
import lockfile
import os
import psutil
import stat
import sys
WORLD_PERMISSIONS = stat.S_IRUSR | stat.S_IWUSR | \
stat.S_IRGRP | stat.S_IWGRP | \
stat.S_IROTH | stat.S_IWOTH
EXECUTE_PERMISSIONS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
SYSTEM_LOCK_DIR = '/dev/shm/dock-process-reservations'
LOCK_FILE = os.path.join(SYSTEM_LOCK_DIR, 'lock')
RESERVATION_FILE = os.path.join(SYSTEM_LOCK_DIR, 'in-use')
LIMIT_FILE = os.path.join(SYSTEM_LOCK_DIR, 'max-reservations')
def get_processor_count():
import multiprocessing
return multiprocessing.cpu_count()
def setup():
if not os.path.exists(SYSTEM_LOCK_DIR):
os.makedirs(SYSTEM_LOCK_DIR)
os.chmod(SYSTEM_LOCK_DIR, WORLD_PERMISSIONS | EXECUTE_PERMISSIONS)
if not os.path.exists(RESERVATION_FILE):
open(RESERVATION_FILE, 'w').close()
os.chmod(RESERVATION_FILE, WORLD_PERMISSIONS)
if not os.path.exists(LIMIT_FILE) or os.stat(LIMIT_FILE).st_size == 0:
with open(LIMIT_FILE, 'w') as f:
f.write("{0:d}".format(get_processor_count()))
os.chmod(LIMIT_FILE, WORLD_PERMISSIONS)
def get_reservation_number(pid):
with open(LIMIT_FILE, 'r') as f:
max_reservations = int(f.read())
slots = [None] * max_reservations
with lockfile.FileLock(LOCK_FILE) as lock:
with open(RESERVATION_FILE, 'r+') as f:
for num, line in enumerate(f):
line = line.strip()
opid = int(line) if line else None
slots[num] = opid
if slots.count(None) == 0:
for num, opid in enumerate(slots):
if opid is not None and not psutil.pid_exists(opid):
slots[num] = None
try:
reservation_number = slots.index(None)
slots[reservation_number] = pid
f.seek(0)
f.truncate()
f.write("\n".join('' if pid is None else str(pid) for pid in slots))
except ValueError:
reservation_number = None
return reservation_number
def main(args, stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr):
setup()
if len(args) > 0:
reservation_pid = int(args[0])
else:
reservation_pid = os.getppid()
reservation_number = get_reservation_number(reservation_pid)
if reservation_number is not None:
stdout.write("{0:d}\n".format(reservation_number))
return 0
else:
stderr.write("No available reservations in {0}\n".format(SYSTEM_LOCK_DIR))
return -1
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
```
#### File: docking/scripts/create_VDW_DX.py
```python
import struct
import sys
import array
# written by <NAME>
def write_out_dx_file(file,xn,yn,zn,dx,dy,dz,origin,values):
fileh = open(file,'w')
#object 1 class gridpositions counts 40 40 40
#origin 35.31 27.576 18.265
#delta 0.5 0 0
#delta 0 0.5 0
#delta 0 0 0.5
#object 2 class gridconnections counts 40 40 40
#object 3 class array type float rank 0 items 64000 data follows
fileh.write('object 1 class gridpositions counts %d %d %d\n' % (xn,yn,zn))
#fileh.write('origin %6.2f %6.2f %6.2f\n' % (origin[0],origin[1],origin[2]))
fileh.write('origin %7.4f %7.4f %7.4f\n' % (origin[0],origin[1],origin[2]))
fileh.write('delta %6.6f 0 0\n' % dx)
fileh.write('delta 0 %6.6f 0\n' % dy)
fileh.write('delta 0 0 %6.6f\n' % dz)
fileh.write('object 2 class gridconnections counts %d %d %d\n' % (xn,yn,zn))
fileh.write('object 3 class array type float rank 0 items %d data follows\n' % len(values))
count = 1
for value in values:
if (value == 0.0):
fileh.write('%d' % 0)
else:
fileh.write('%f' % value)
# print newline after 3rd number.
if (count == 3):
fileh.write('\n')
count = 0
# print space after number but not at the end of the line.
else:
fileh.write(' ')
count = count + 1
# if the last line has less than 3 numbers then print the a newline.
if (count < 3):
fileh.write('\n')
fileh.close()
def read_bump(bump_file):
bump_open = open(bump_file,'r')
bump_read = bump_open.readlines()
bump_open.close()
x_coords = []
y_coords = []
z_coords = []
for line in bump_read[0:2]:
#print(line)
line = line.strip().split()
spacing = '0.200'
if line[0] == spacing:
print(line)
box_corner_x = float(line[1])
box_corner_y = float(line[2])
box_corner_z = float(line[3])
x_dim = int(line[4])
y_dim = int(line[5])
z_dim = int(line[6])
return(x_dim, y_dim, z_dim, [box_corner_x, box_corner_y, box_corner_z])
def construct_box(dx_file_name, origin, spacing, xn, yn, zn, values):
matrix = []
for x in range(xn):
y_list = []
for y in range(yn):
z_list = []
for z in range(zn):
z_list.append(0)
y_list.append(z_list)
matrix.append(y_list)
index = 0
for k in range(zn):
for j in range(yn):
for i in range(xn):
matrix[i][j][k] = values[index]
index += 1
write_out_list = []
for i in range(xn):
for j in range(yn):
for k in range(zn):
write_out_list.append(matrix[i][j][k])
dx = spacing
dy = spacing
dz = spacing
write_out_dx_file(dx_file_name,xn,yn,zn,dx,dy,dz,origin,write_out_list)
def read_bump(bump_file):
bump_open = open(bump_file,'r')
bump_read = bump_open.readlines()
bump_open.close()
x_coords = []
y_coords = []
z_coords = []
for line in bump_read[0:2]:
line = line.strip().split()
spacing = '0.200'
if line[0] == spacing:
print(line)
box_corner_x = float(line[1])
box_corner_y = float(line[2])
box_corner_z = float(line[3])
x_dim = int(line[4])
y_dim = int(line[5])
z_dim = int(line[6])
x_coords = [box_corner_x]
y_coords = [box_corner_y]
z_coords = [box_corner_z]
bump_string = ''
for line in bump_read[2:]:
line = line.strip()
bump_string+=line
spacing = float(0.200)
for i in range(x_dim-1):
new_val = box_corner_x+spacing
x_coords.append(new_val)
box_corner_x = new_val
for i in range(y_dim-1):
new_val = box_corner_y+spacing
y_coords.append(new_val)
box_corner_y = new_val
for i in range(z_dim-1):
new_val = box_corner_z+spacing
z_coords.append(new_val)
box_corner_z = new_val
values = []
count = 0
for z in z_coords:
for y in y_coords:
for x in x_coords:
if bump_string[count] == "F":
values.append(float(1))
else:
values.append(float(0))
count += 1
return(values, [x_dim, y_dim, z_dim], [x_coords[0], y_coords[0], z_coords[0]])
def main():
vdw_file = "vdw.vdw"
values, dims, origin = read_bump("vdw.bmp")
x_dim = dims[0]
y_dim = dims[1]
z_dim = dims[2]
spacing = 0.200
vdwFile = open(vdw_file, 'rb') # b is for binary, r is for read
tempArray = array.array('f')
tempArray.fromfile(vdwFile, x_dim * y_dim * z_dim)
tempArray.byteswap()
construct_box("vdw_energies_repulsive.dx", origin, spacing, x_dim, y_dim, z_dim, tempArray)
tempArray = array.array('f')
tempArray.fromfile(vdwFile, x_dim * y_dim * z_dim)
tempArray.byteswap()
construct_box("vdw_energies_attractive.dx", origin, spacing, x_dim, y_dim, z_dim, tempArray)
construct_box("vdw.dx", origin, spacing, x_dim, y_dim, z_dim, values)
main()
```
#### File: docking/scripts/dockovalent_install_warhead.py
```python
from openeye.oechem import *
import os, string, sys
def transform(smirks, mol):
def main(argv):
qmol = oechem.OEQMol()
if not oechem.OEParseSmirks(qmol, options.smirks):
oechem.OEThrow.Fatal("Unable to parse SMIRKS: %s" % options.smirks)
umr = oechem.OEUniMolecularRxn()
if not umr.Init(qmol):
oechem.OEThrow.Fatal("Failed to initialize reaction with %s SMIRKS" % options.smirks)
umr.SetClearCoordinates(True)
covalent_mol = OEGraphMol()
OEParseSmiles(covalent_mol)
OEUniMolecularRxn(
```
#### File: scripts/dude_scripts/0003b_write_out_ligands_decoys.py
```python
import os, sys
###################################
# written by <NAME>
# 10/2017 - 4/2018
#
####################################
def write_out_lig_decoys(lig_list, dec_list):
output = open("ligands.smi", 'w')
for lig in lig_list:
name = lig[0]
smiles = lig[1]
if "_" in name:
name = name.split("_")[0]
output.write(smiles+" "+name+"\n")
output.close()
output1 = open("decoys.smi", 'w')
output2 = open("decoy_protomers.smi", 'w')
for dec in dec_list:
name = dec[0]
smiles = dec[1]
prot_smiles = dec[2]
if "_" in name:
name = name.split("_")[0]
output1.write(smiles+" "+name+"\n")
output2.write(prot_smiles+" "+name+"\n")
output1.close()
output2.close()
pwd = os.getcwd()+"/"
decoy_dir = pwd+"decoys/"
if not os.path.isdir(decoy_dir):
os.system("mkdir "+decoy_dir)
os.chdir(decoy_dir)
os.system("cp ../decoys.smi .")
os.system("cp ../decoy_protomers.smi .")
def check_int(prot_id):
try:
int(prot_id)
return(True)
except ValueError:
return(False)
def main():
pwd = os.getcwd()+"/"
if len(sys.argv) != 3:
print("Syntax: python 0004b_write_ligands_decoys.py smiles_dir new_dir_name")
sys.exit()
system_dir = sys.argv[1]+"/"
new_dir_name = sys.argv[2]
if not os.path.isdir(system_dir):
print(system_dir+" does not exist")
sys.exit()
decoy_files = [name for name in os.listdir(system_dir) if (os.path.isfile(system_dir+name) and name.endswith("_final_property_matched_decoys.txt"))]
lig_list = []
dec_list = []
repeat_list = []
for decoy_file in decoy_files:
decoy_file_name = system_dir+decoy_file
if os.path.isfile(decoy_file_name):
open_fin = open(decoy_file_name, 'r')
read_fin = open_fin.readlines()
open_fin.close()
for line in read_fin:
splitline = line.strip().split()
if len(splitline) > 0:
if splitline[0] == "LIGAND:":
lig_smiles = splitline[1]
lig_name = splitline[2]
if lig_name not in repeat_list:
repeat_list.append(lig_name)
lig_list.append([lig_name, lig_smiles])
if splitline[0] == "DECOY":
dec_smiles = splitline[2]
dec_name = splitline[3]
dec_prot_id = splitline[10]
int_or_not = check_int(dec_prot_id)
if int_or_not == False: ### make sure it does not have a protomer ID
if dec_name not in repeat_list:
repeat_list.append(dec_name)
dec_list.append([dec_name, dec_smiles, dec_prot_id])
full_new_dir_path = pwd+new_dir_name+"/"
if not os.path.isdir(full_new_dir_path):
os.system("mkdir "+full_new_dir_path)
os.chdir(full_new_dir_path)
write_out_lig_decoys(sorted(lig_list), sorted(dec_list))
main()
```
#### File: scripts/dude_scripts/calc_props.py
```python
from __future__ import print_function, absolute_import
import os, sys
from rdkit import Chem as C
from rdkit.Chem import Descriptors as D
from rdkit.Chem import rdMolDescriptors as CD
def get_stuff(smiles):
mol = C.MolFromSmiles(smiles)
#hac = D.HeavyAtomCount(mol)
mw = CD.CalcExactMolWt(mol)
logp = C.Crippen.MolLogP(mol)
rotB = D.NumRotatableBonds(mol)
HBA = CD.CalcNumHBA(mol)
HBD = CD.CalcNumHBD(mol)
q = C.GetFormalCharge(mol)
print(mw, logp, rotB, HBA, HBD)
print("MW is ",mw)
print("logP is ",logp)
print("Rotatable Bonds is ",rotB)
print("HB Donors is ",HBD)
print("HB Acceptors is ",HBA)
print("Formal Charge is ",q)
return(mw, logp, rotB, HBA, HBD, q)
def main():
pwd = os.getcwd()+"/"
smiles = sys.argv[1]
get_stuff(smiles)
main()
``` |
{
"source": "Jilingwei/LeetCodeLearning",
"score": 4
} |
#### File: code/python/two_sum.py
```python
class two_sum:
def twoSum(nums, target):
lens = len(nums)
j = -1
for i in range(0, lens):
temp = nums[(i+1):]
if (target - nums[i]) in temp:
j = temp.index(target - nums[i])
break
if j >= 0:
return [j, i]
if __name__ == "__main__":
Res = twoSum([3, 3], 6)
print(Res)
``` |
{
"source": "jilinskycloud/meta-skycloud",
"score": 2
} |
#### File: gw_web/_autoConfig/autoConfig.py
```python
import subprocess
import json
import os
import os.path
import sqlite3
from os import path
import base64
from Crypto.Cipher import AES
import redis
import time
r = redis.StrictRedis(host='localhost', port=6370, db=0, charset="utf-8", decode_responses=True)
global Xcount
Xcount = 0
class AutoConf:
def __init__(self):
print("This is the class...")
'''
f1 = "/tmp/flask_daemon.log"
f2 = "/tmp/hb_daemon.log"
f3 = "/tmp/http_daemon.log"
f4 = "/tmp/autoC_daemon.log"
f5 = "/tmp/logs_daemon.log"
self.genFile(f1)
self.genFile(f2)
self.genFile(f3)
self.genFile(f4)
self.genFile(f5)
'''
'''
def genFile(self, path):
print("cleating Files")
try:
str = "rm "+path
os.system(str)
#open(path, 'x')
except FileExistsError:
pass
'''
def read_file(self, path_):
file_p = "/mnt/config_t.text"
if path.exists(file_p) == True:
with open(file_p,'r') as data:
dd = data.read()
return dd
else:
return 'null'
def format_data(self, data):
dd = data.split(',')
print(dd)
nonce = base64.b64decode(dd[0])
ciphertext = base64.b64decode(dd[1])
mac = base64.b64decode(dd[2])
key = base64.b64decode(dd[3])
print("Data Formate Function...")
print(key)
print(ciphertext)
cipher = AES.new(key, AES.MODE_EAX, nonce)
#cipher = AES.new(key, AES.MODE_CCM, nonce)
plaintext = cipher.decrypt(ciphertext)
print(plaintext)
plaintext = plaintext.decode("utf-8")
plaintext = plaintext.split("~")
data = {
"enable_post_data": plaintext[0],
"cache_size": plaintext[1],
"gw_pass": plaintext[2],
"heart_beat": plaintext[3],
"server_socket": plaintext[4],
"data_interval": plaintext[5],
"serial_no": plaintext[6],
"gw_uname": plaintext[7],
"rssi_range": plaintext[8],
"sniffer_type": plaintext[9],
"protoc": plaintext[10]
}
try:
cipher.verify(mac)
with open("/www/web/_netw/conf/ble_conf.text", "w") as f:
json.dump(data, f, indent=4)
return data
except ValueError:
print("Key incorrect or message corrupted")
def insert_sqlite(self, data):
os.system("rm /www/web/gw_FlaskDb.db")
conn = sqlite3.connect('/www/web/gw_FlaskDb.db')
conn.execute('CREATE TABLE login (id INTEGER PRIMARY KEY AUTOINCREMENT, username TEXT, password TEXT)')
print("Table created successfully");
# Insert Data to Login table
print("UNAME:: ",data['gw_uname'])
print("PASS:: ", data['gw_pass'])
conn.execute("INSERT INTO login (username,password) VALUES (?,?)",(data['gw_uname'], data['gw_pass']) )
conn.commit()
msg = "Record successfully added"
def re_write(self, data1):
print("update Usb data...")
dd = data1.split(',')
print(dd)
nonce = base64.b64decode(dd[0])
ciphertext = base64.b64decode(dd[1])
mac = base64.b64decode(dd[2])
key = base64.b64decode(dd[3])
cipher = AES.new(key, AES.MODE_EAX, nonce)
#cipher = AES.new(key, AES.MODE_CCM, nonce)
plaintext = cipher.decrypt(ciphertext)
print(plaintext)
plaintext = plaintext.decode("utf-8")
plt = plaintext.split("~")
#print("--------------------------",plt[6])
srl = plt[6]
srl = srl.split("-")
srl[1] = str(int( srl[1]) + 1).zfill(5)
plt[6] = srl[0]+"-"+srl[1]
print(plt[6])
re_data = plt[0]+"~"+plt[1]+"~"+plt[2]+"~"+plt[3]+"~"+plt[4]+"~"+plt[5]+"~"+plt[6]+"~"+plt[7]+"~"+plt[8]+"~"+plt[9]+"~"+plt[10]
re_data = str.encode(re_data)
cipher12 = AES.new(key, AES.MODE_EAX)
re_data = self.bs64(cipher12.nonce),self.bs64(cipher12.encrypt(re_data)),self.bs64(cipher12.digest()),self.bs64(key)
return re_data
def get_key(self, data):
dd = data.split(',')
key = dd[3]
return key
def bs64(self, vl):
return str(base64.b64encode(vl), 'utf-8')
def read_usb(self):
path1 = "/dev/sda"
for i in range(3):
path_ = path1+str(i)
mm = path.exists(path_)
print("this is the available USB path", mm)
if mm == True:
os.system("mount "+path_ +" /mnt/")
data1 = self.read_file(path_)
if data1 != 'null':
data = self.format_data(data1)
print("Lets do Configuration!!!!!!")
print(data)
self.insert_sqlite(data)
print("thi is done really")
re_data = self.re_write(data1)
with open("/mnt/config_t.text",'w') as f:
f.write(str(re_data))
key_ = self.get_key(data1)
d1 = {
"auto_config" : "yes",
"key" : key_
}
with open("/www/web/_autoConfig/config.txt",'w') as jsonfile:
json.dump(d1, jsonfile, indent=4)
print(d1)
os.system("umount "+path_)
return 1
else:
print("File not exist!!!!")
return 0
def main():
d1 = json.load(open('/www/web/_autoConfig/config.txt','r'))
print(d1['auto_config'])
obj = AutoConf()
if d1['auto_config'] == 'no':
print("Going to config the GateWay...")
r.mset({"blink-green": "0.2|green66|20|autoConf|check"})
print(r.mget("blink-green"))
obj.read_usb()
elif d1['auto_config'] == 'yes':
print("GateWay Already Configured!")
r.mset({"blink-green": "0.009|green66|40|autoConf|check"})
##MAIN FUNCTION
if __name__ == '__main__':
while True:
if os.path.exists("/var/run/ProcLevel.pid") == True:
f = open("/var/run/ProcLevel.pid","r")
pNo = f.read()
f.close()
#print(pNo)
while pNo == "1":
main()
pNo = "2"
f= open("/var/run/ProcLevel.pid","w+")
f.write(pNo)
f.close()
time.sleep(2)
break
```
#### File: files/gw_web/gw_Main.py
```python
from flask import Flask
from flask import escape
from flask import url_for
from flask import request
from flask import render_template
from flask import flash
from flask import redirect
from flask import session
from flask import jsonify
from jinja2 import Template
import psutil
import time
import json
import sqlite3
import os
import redis
import subprocess
r = redis.StrictRedis(host='localhost', port=6370, db=0, charset="utf-8", decode_responses=True)
app = Flask(__name__)
app.secret_key = b'_5#y2L"F4Q8z\n\xec]/'
global Xcount
Xcount = 0
conn = sqlite3.connect('/www/web/gw_FlaskDb.db')
def log(log_str):
global Xcount
print("in log...........:: ",Xcount)
log_str = str(log_str)+" \n"
Xcount = Xcount+1
with open('/tmp/flask_daemon.log', 'a') as outfile:
outfile.write(log_str)
if Xcount > 10:
os.system("rm /tmp/flask_daemon.log")
Xcount = 0
return
#log("Opened database successfully");
conn.close()
@app.route('/getcmd', methods=['GET', 'POST'])
def getcmd():
if request.method == 'POST':
log("Get Command Function.......")
input_json = request.get_json(force=True)
os.system(input_json)
dictToReturn = {'answer':42}
return jsonify(dictToReturn)
@app.route('/resetBle', methods=['GET', 'POST'])
def resetBle():
if 'username' in session:
reset_ble = request.form['reset_ble']
if request.method == 'POST':
log("Switch ON/OFF BLE : "+reset_ble)
reset_ble = request.form['reset_ble']
#stt_ble = int(os.popen('cat /sys/class/leds/rst_ble62/brightness').read())
#if reset_ble == 'off':
# os.system("echo 0 > /sys/class/leds/rst_ble62/brightness")
# return redirect(url_for('settings'))
#elif reset_ble == 'on':
# os.system("echo 1 > /sys/class/leds/rst_ble62/brightness")
os.system("echo 0 > /sys/class/leds/rst_ble62/brightness")
time.sleep(2)
os.system("echo 1 > /sys/class/leds/rst_ble62/brightness")
return redirect(url_for('settings'))
else:
return redirect(url_for('login'))
@app.route('/reboot')
def reboot():
log("System Reboot Function......")
os.system("reboot")
ipis = cm("ifconfig eth0| egrep -o '([[:digit:]]{1,3}\.){3}[[:digit:]]{1,3}'")
ipis = ipis.split("\n")
#print("--------------------------------",ipis[0])
return "<div style='background-color:red; background-color: #e4e0e0; margin: 0px; width: 700px; text-align: center; padding: 15px; color: black; margin-left: auto; margin-right: auto;'>Device Going to Reboot! To Access Web Please <a href='http://"+ipis[0]+":5000/'>Click Here</a> After 2 minutes...</div>"
# ===================MYSQL FUNCTIONS==========================
@app.route('/delProfile/<ids>')
def delProfile(ids=None):
conn = sqlite3.connect('/www/web/gw_FlaskDb.db')
log("Delete Profile ID IS :: "+ids)
f = conn.execute("DELETE FROM login where id=?", (ids,))
conn.commit()
conn.close()
log("Delete Login User Function......")
flash("Deleted successfully")
return redirect(url_for('settings'))
#=============================================================
#=====================WEB-PAGE FUNCTIONS======================
#=============================================================
# ============================================================INDEX
@app.route('/')
@app.route('/index/')
@app.route('/index')
def index():
if 'username' in session:
log("Index Page Function......")
return redirect(url_for('dashboard'))
return redirect(url_for('login'))
# ============================================================DASHBOARD
@app.route('/dashboard')
@app.route('/dashboard/')
def dashboard():
if 'username' in session:
log("Dashboard Page Function......")
u_name = escape(session['username'])
log(session.get('device1'))
#while(1):
gw_serial = json.load(open('/www/web/_netw/conf/ble_conf.text','r'))
data = {}
data['serial'] = gw_serial['serial_no']
data['cpu'] = psutil.cpu_percent()
data['stats'] = psutil.cpu_stats()
data['cpu_freq'] = psutil.cpu_freq()
data['cpu_load'] = psutil.getloadavg()
data['ttl_memo'] = round(psutil.virtual_memory().total/1048576)
data['ttl_memo_used'] = round(psutil.virtual_memory().used/1048576)
data['ttl_memo_avai'] = round(psutil.virtual_memory().available/1048576)
data['swp_memo'] = psutil.swap_memory()
data['hostname'] =cm("hostname")
data['routeM'] = 'TC0981'
data['FirmV'] = 'v3.0.11_sniffer_TainCloud_r864'
data['lTime'] = cm('date')
data['runTime'] = cm('uptime')
data['network'] = cm("ifconfig eth0| egrep -o '([[:digit:]]{1,3}\.){3}[[:digit:]]{1,3}'")
data['mount'] = psutil.disk_partitions(all=False)
data['disk_io_count'] = psutil.disk_io_counters(perdisk=False, nowrap=True)
data['net_io_count'] = psutil.net_io_counters(pernic=False, nowrap=True)
data['nic_addr'] = psutil.net_if_addrs()
data['tmp'] = psutil.sensors_temperatures(fahrenheit=False)
data['boot_time'] = psutil.boot_time()
data['c_user'] = psutil.users()
data['reload'] = time.time()
return render_template('dashboard.html', data=data)
else:
return redirect(url_for('login'))
@app.route('/devices')
def devices():
if 'username' in session:
log("Dashboard Page Function......")
#obj = r.scan_iter()
data = r.lrange("scanned", 0, -1)
whitelisted = r.lrange("white_listed", 0, -1)
ln = len(whitelisted)
#print("----------------------------------------------------------------------",whitelisted)
return render_template('devices.html', data=data, r_obj=r, blk_ble=whitelisted, ln=ln)
else:
return redirect(url_for('login'))
def cm(dt):
log("Inner CMD Function......Dashboard Page")
klog = subprocess.Popen(dt, shell=True, stdout=subprocess.PIPE).stdout
klog1 = klog.read()
pc = klog1.decode()
return pc
# ============================================================MQTT-CONSOLE
@app.route('/console-logs')
@app.route('/console-logs/')
def mqtt_on():
if 'username' in session:
log("Console Logs Function......")
klog = subprocess.Popen("dmesg", shell=True, stdout=subprocess.PIPE).stdout
klog1 = klog.read()
pc = klog1.decode()
flask = subprocess.Popen("cat /tmp/flask_daemon.log", shell=True, stdout=subprocess.PIPE).stdout
flask = flask.read()
flask_log = flask.decode()
hb = subprocess.Popen("cat /tmp/hb_daemon.log", shell=True, stdout=subprocess.PIPE).stdout
hb = hb.read()
hb_log = hb.decode()
_http = subprocess.Popen("cat /tmp/http_daemon.log", shell=True, stdout=subprocess.PIPE).stdout
_http = _http.read()
_http_log = _http.decode()
autoC = subprocess.Popen("cat /tmp/autoC_daemon.log", shell=True, stdout=subprocess.PIPE).stdout
autoC = autoC.read()
autoC_log = autoC.decode()
return render_template('console-logs.html', data=pc, flask_log=flask_log, hb_log=hb_log, _http_log=_http_log, autoC_log=autoC_log)
else:
return redirect(url_for('login'))
# =============================================================BLE CONNECT
@app.route('/network', methods=['GET', 'POST'])
def network():
if 'username' in session:
log("Network Page Function......")
if request.method == 'POST':
if request.form['sniffer_type'] == 'IBeacon':
log("Its Beacon---------------")
result = request.form.to_dict()
log(result)
with open("/www/web/_netw/conf/ble_conf.text", "w") as f:
json.dump(result, f, indent=4)
flash("Network Configuration Updated")
elif request.form['sniffer_type'] == 'Wifi':
log("Its Wifi---------------")
result = request.form.to_dict()
log(result)
with open("/www/web/_netw/conf/wifi_conf.text", "w") as f:
json.dump(result, f, indent=4)
flash(" Network Configuration Updated")
else:
log("form data error")
log("restart hb!")
log(os.system("cat /var/run/heartbeat.pid"))
pi = open("/var/run/heartbeat.pid", 'r')
pid_ = pi.read()
pi.close()
#print(pid_)
os.system('kill -s 10 ' + pid_)
log("restart ble_post!")
if 'a' == 'a':
pi1 = open("/var/run/ble_post.pid", 'r')
pid_1 = pi1.read()
log("This is the post data pid.....")
log(pid_1)
pi1.close()
os.system('kill -s 10 ' + pid_1)
else:
proc = subprocess.Popen(["python3 /www/web/_netw/_httplib.py"], stdout=subprocess.PIPE, shell=True)
log(proc)
d1 = json.load(open('/www/web/_netw/conf/ble_conf.text','r'))
d2 = json.load(open('/www/web/_netw/conf/wifi_conf.text','r'))
return render_template('network.html', d1=d1, d2=d2)
else:
return redirect(url_for('login'))
@app.route('/blk_list', methods=['POST'])
def blk_list():
if 'username' in session:
if request.method == 'POST':
blk_mac = request.form['blacklisted']
tab = request.form['tab']
log("---------------------------------------------"+tab)
r.rpush("white_listed", blk_mac)
flash("Added to White List", 'add mac')
return redirect(url_for('devices'))
else:
return redirect(url_for('login'))
@app.route('/white_list_get/<wht_mac>')
def white_list_get(wht_mac=None):
if 'username' in session:
log("blacklisted Bacons Page!")
log(wht_mac)
obj = r.scan_iter()
blk_ble = r.lrange("white_listed", 0, -1)
log(blk_ble)
if not wht_mac in blk_ble:
r.rpush("white_listed", wht_mac)
flash('Mac Added to White List.', 'scan_ble')
else:
flash('Mac Already Exist is White List.', 'scan_ble')
return redirect(url_for('devices'))
else:
return redirect(url_for('login'))
@app.route('/blk_del/<blk_del_mac>')
def blk_del(blk_del_mac=None):
if 'username' in session:
r.lrem("white_listed", -1, blk_del_mac)
flash("Deleted successfully", 'mac_del')
return redirect(url_for('devices'))
else:
return redirect(url_for('login'))
@app.route('/status', methods=['GET', 'POST'])
def status():
if request.method == 'POST':
tip_top = r.get('hbeat')
return tip_top
return 'ok'
# =============================================================Settings
@app.route('/settings/', methods=['GET', 'POST'])
def settings():
error = None
data = []
rec=[]
if 'username' in session:
if request.method == 'POST':
log("Setting Data Received")
data.append(request.form['name'])
data.append(request.form['pass'])
log(data)
conn = sqlite3.connect('/www/web/gw_FlaskDb.db')
conn.execute("INSERT INTO login (username,password) VALUES (?,?)",(data[0], data[1]) )
conn.commit()
conn.close()
msg = "Record successfully added"
flash("Login Details Added successfully")
conn = sqlite3.connect('/www/web/gw_FlaskDb.db')
f = conn.execute("SELECT * FROM login")
rec = f.fetchall()
#print(rec)
conn.close()
stt_ble = os.popen('cat /sys/class/leds/rst_ble62/brightness').read()
log("This is the BLE Reset State:: "+stt_ble)
if int(stt_ble) == 1 or int(stt_ble) == 255:
stt_ble = "ON"
else:
stt_ble = "OFF"
#print(rec)
autoCon = json.load(open('/www/web/_autoConfig/config.txt','r'))
return render_template('settings.html', error=error, data=data, rec=rec, autoCon=autoCon, stt_ble=stt_ble)
else:
return redirect(url_for('login'))
@app.route('/update_autoCon', methods=['POST'])
def update_autoCon():
if 'username' in session:
if request.method == 'POST':
log("This is the Configuration Status::"+request.form['conf_status'])
conf_status = request.form['conf_status']
with open('/www/web/_autoConfig/config.txt', 'r+') as f:
data = json.load(f)
data['auto_config'] = conf_status # <--- add `id` value.
f.seek(0) # <--- should reset file position to the beginning.
json.dump(data, f, indent=4)
f.truncate()
return redirect(url_for('settings'))
else:
return redirect(url_for('login'))
# ============================================================SCAN BLE PAGE
@app.route('/scan_ble')
def scan_ble():
os.system("python3 /www/web/_netw/scan_ble.py")
log("SCAN BLE FUNCTION")
return redirect(url_for('devices'))
# ============================================================LOGIN PAGE
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
#print(_mysql.initLogin_(mysql))
if request.method == 'POST':
u_name = request.form['username']
u_pass = request.form['password']
flag = 0
conn = sqlite3.connect('/www/web/gw_FlaskDb.db')
f = conn.execute("SELECT * FROM login WHERE username=? and password=?", (u_name, u_pass))
#print(f)
v = f.fetchall()
if(len(v) > 0):
flag = 0
else:
flag = -1
#print(v)
conn.close()
if(flag == -1):
error = 'Invalid Credentials. Please try again.'
else:
session['username'] = request.form['username']
flash('You were successfully logged in')
return redirect(url_for('index'))
return render_template('login.html', error=error)
# ============================================================LOGOUT PAGE
@app.route('/logout')
def logout():
# remove the username from the session if it's there
session.pop('username', None)
return redirect(url_for('index'))
if __name__ == '__main__' :
if os.path.exists("/tmp/flask_daemon.log") == False:
print("File not exist CEATING IT")
open("/tmp/flask_daemon.log", "w").close()
else:
print("log file exists")
while True:
if os.path.exists("/var/run/ProcLevel.pid") == True:
f = open("/var/run/ProcLevel.pid","r")
pNo = f.read()
f.close()
if pNo == "2":
pNo = "3"
f= open("/var/run/ProcLevel.pid","w+")
f.write(pNo)
f.close()
break
app.run(host='0.0.0.0', port=5000) #, debug = True) #, threaded = True, ssl_context='adhoc') #Ssl_context = Context ,
```
#### File: gw_web/_netw/scan_ble.py
```python
import subprocess
import redis
import json
import os
import time
import serial
r = redis.StrictRedis(host='localhost', port=6370, db=0, charset="utf-8", decode_responses=True)
r1 = redis.StrictRedis(host='localhost', port=6370, db=1, charset="utf-8", decode_responses=True)
def data_split(data):
a = data
mac = a[6:18]
rssi = a[18:20]
rssi = int(rssi, 16) -256
adv = a[20:38]
uuid = a[38:70]
maj = a[70:74]
mina = a[74:78]
tx = a[78:80]
tx = int(tx, 16) -256
received = {'mac':mac, 'rssi':rssi, 'adv':adv, 'uuid':uuid, 'maj':maj, 'mina':mina, 'tx':tx}
return received
def verify_mac(v_mac):
obj = r.scan_iter()
blk_ble = r.lrange("white_listed", 0, -1)
print(blk_ble)
if v_mac in blk_ble:
#r.rpush("white_listed", wht_mac)
return 1
else:
return 0
def insert_r1(data1):
print("This is R1 data insert Function!--------------------------------------------------------------------------00000000000000000000000000000000000000000000000000")
print(data1)
r1.hmset(data1['mac'], {'mac':data1['mac'], 'rssi':data1['rssi'], 'adv':data1['adv'], 'uuid':data1['uuid'], 'maj':data1['maj'], 'mina':data1['mina'], 'tx':data1['tx']})
r1.expire(data1['mac'], 300)
print(r1.hgetall(data1['mac']))
d1 = json.load(open('/www/web/_netw/conf/ble_conf.text','r'))
print(d1['enable_post_data'])
post_ble = d1['enable_post_data']
def kill_proc():
print("Kill other ble process!")
proc = subprocess.Popen(["ps ax | grep 'python3 /www/web/_netw/_httplib.py' | grep -v grep | awk '{print $1}'"], stdout=subprocess.PIPE, shell=True)
(pid, err) = proc.communicate()
pid = str(pid,'utf-8')
#print("PID of Process is :: ", pid)
cmd = "kill -9 " + str(pid)
os.system(cmd)
def scan_it():
a=1
r.mset({"blink-blue":"0.1|blue67|0|scanBle|check"})
# Open Serial Port !!!!!!!
ser = serial.Serial('/dev/ttymxc2')
ser.baudrate = 115200
ser.close()
ser.open()
#!!!!!!!!!!!!!!!!!!!!!!!!!
while(a != 80):
data = ser.read(42).hex()
print(data)
if len(data) > 0:
print("The data length is ", len(data))
print(type(data))
if len(data) == 84:
data1 = data_split(data)
print("fkng RSSI----------------------------------------------------------",data1['rssi'])
r.lpush("scanned", data1)
r.expire("scanned", 400)
print(r1.lrange(data1['mac'],0,-1))
a=a+1
ser.close()
def turn_post(stat):
with open('/www/web/_netw/conf/ble_conf.text', 'r+') as f:
data = json.load(f)
data['enable_post_data'] = stat # <--- add `id` value.
f.seek(0) # <--- should reset file position to the beginning.
json.dump(data, f, indent=4)
f.truncate()
if post_ble == 'on':
stat = 'off'
turn_post(stat)
kill_proc()
scan_it()
stat = 'on'
turn_post(stat)
pNo = "4"
f= open("/var/run/ProcLevel.pid","w+")
f.write(pNo)
f.close()
os.system("/etc/init.d/http_daemon start")
elif post_ble == 'off':
kill_proc()
scan_it()
pNo = "4"
f= open("/var/run/ProcLevel.pid","w+")
f.write(pNo)
f.close()
os.system("/etc/init.d/http_daemon start")
``` |
{
"source": "JIllchen487/StanCode101",
"score": 4
} |
#### File: SC101_Assignments/SC101_Assignment6/boggle.py
```python
import time
# This is the file name of the dictionary txt file
# we will be checking if a word exists by searching through it
FILE = 'dictionary.txt'
dic = []
def main():
"""
TODO:
"""
start = time.time()
####################
global dic
dic = read_dictionary()
# print(dic)
l1 = input_row('1 row of letters: ')
# l1 = ['f', 'y', 'c', 'l']
l2 = input_row('2 row of letters: ')
# l2 = ['i', 'o', 'm', 'g']
l3 = input_row('3 row of letters: ')
# l3 = ['o', 'r', 'i', 'l']
l4 = input_row('4 row of letters: ')
# l4 = ['h', 'j', 'h', 'u']
boggle_board = create_board([l1, l2, l3, l4])
find_words(boggle_board)
####################
end = time.time()
print('----------------------------------')
print(f'The speed of your boggle algorithm: {end - start} seconds.')
def read_dictionary():
"""
This function reads file "dictionary.txt" stored in FILE
and appends words in each line into a Python list
"""
with open(FILE, 'r') as f:
for word in f:
dic.append(word[:-1])
return dic
def input_row(pmp):
a = input(pmp)
lst = a.split()
for i in lst:
if (not i.isalpha()) or len(i) != 1:
print('Illegal Input')
return
return lst
def create_board(list_of_lists):
board = {}
for i in range(4):
lst = list_of_lists[i]
for j in range(4):
board[(j, i)] = lst[j]
return board
def find_words(board):
"""
:param board: (dictionary) A dictionary that is constructed by inputs of the boggle board
:return: does not return anything but print out all the words with count
"""
chosen = []
for x in range(4):
for y in range(4):
coordinates = (x, y)
forming_word = board[coordinates]
visited = [coordinates]
find_words_helper(coordinates, board, chosen, forming_word, visited)
print(len(chosen))
def find_words_helper(coordinates, board, chosen_words, forming_word, visited):
"""
:param coordinates: (tuple) the pivot point to start with when searching for words
:param board: (dictionary) A dictionary that is constructed by inputs of the boggle board
:param chosen_words: (list) contains all the chosen vocab
:param visited: (list) contains all the coordinates of chosen neighbors
:return: does not return anything but print out all the words with count
"""
global dic
neighbors = neighbor(coordinates, board)
# Base Case
if forming_word in dic and len(forming_word) >= 4:
if forming_word not in chosen_words:
print('Found: ', forming_word)
chosen_words.append(forming_word)
# Choose
for neighbor_coordinates in neighbors:
if neighbor_coordinates not in visited:
new_word = forming_word + neighbors[neighbor_coordinates]
if has_prefix(new_word):
visited.append(neighbor_coordinates)
# Explore
find_words_helper(neighbor_coordinates, board, chosen_words, new_word, visited)
# Un-choose
visited.pop()
def neighbor(coordinates, board):
neighbors = {}
x = coordinates[0]
y = coordinates[1]
for i in range(-1, 2):
neighbor_x = x + i
if 0 <= neighbor_x <= 3:
for j in range(-1, 2):
neighbor_y = y + j
if 0 <= neighbor_y <= 3 and (i, j) != (0, 0):
neighbors[(neighbor_x, neighbor_y)] = board[(neighbor_x, neighbor_y)]
return neighbors
def has_prefix(sub_s):
"""
:param sub_s: (str) A substring that is constructed by neighboring letters on a 4x4 square grid
:return: (bool) If there is any words with prefix stored in sub_s
"""
for ele in dic:
if ele.startswith(sub_s):
return True
return False
if __name__ == '__main__':
main()
``` |
{
"source": "JillCowan/12-Mission_to_Mars",
"score": 3
} |
#### File: JillCowan/12-Mission_to_Mars/scrape_mars.py
```python
from bs4 import BeautifulSoup as bs
import requests
from splinter import Browser
import pandas as pd
import numpy as np
import time
def init_browser():
executable_path = {"executable_path": "chromedriver.exe"}
return Browser("chrome", **executable_path, headless=False)
def scrape():
mars = {}
browser= init_browser()
#Scrape NASA Mars News site for News title and paragraph text
url= 'https://mars.nasa.gov/news/'
response= requests.get(url)
soup= bs(response.text, 'html.parser')
#get news title and paragraph text and save as variables
news_title= soup.find("div", class_="content_title").text
news_p= soup.find("div", class_="rollover_description_inner").text
#Scrape JPL Featured Space images. Use splinter to find current feature Mars full-size image and assign to variable
url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'
browser.visit(url)
html= browser.html
im_soup=bs(html,'html.parser')
featured_image= im_soup.select('li.slide a.fancybox')
#make a list of just the data-fancybox-hrefs
img_list = [i.get('data-fancybox-href') for i in featured_image]
#combine the base url with the first img url
base_url = 'https://www.jpl.nasa.gov'
featured_image_url = base_url + img_list[0]
#Mars weather twitter. Scrape latest Mars weather tweet from the page and save as a variable.
url= 'https://twitter.com/marswxreport?lang=en'
tw_response= requests.get(url)
tw_soup= bs(tw_response.text, 'html.parser')
#create list of weather tweets
weather_list= []
for weather_info in tw_soup.find_all("p", class_="TweetTextSize TweetTextSize--normal js-tweet-text tweet-text"):
weather_list.append(weather_info.text.strip())
#add conditional to get Sol info
for tweet in reversed (weather_list):
if tweet[:3]=="InS":
mars_weather=tweet
#Mars facts
mars_url= 'https://space-facts.com/mars/'
mars_table= pd.read_html(mars_url)
mars_table
mars_df= mars_table[0]
mars_df.set_index(0, inplace=True)
mars_df.index.names= [None]
mars_df.columns= ['']
mars_df_html=mars_df.to_html()
mars_df_html
mars_df_html.replace('\n','')
mars_html= mars_df.to_html()
#Mars Hemispheres
mars_hem_url= 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
mars_hem_response= requests.get(mars_hem_url)
soup= bs(mars_hem_response.text, 'html.parser')
mars_hem= soup.find_all('a', class_='itemLink product-item')
hem_image_urls=[]
for hem_image in mars_hem:
im_title= hem_image.find('h3').text
link= "https://astrogeology.usgs.gov" + hem_image['href']
im_request= requests.get(link)
soup= bs(im_request.text, "html.parser")
img_tag=soup.find("div", class_="downloads").find('ul').find('li')
img_url= img_tag.a['href']
hem_image_urls.append({"Title":im_title, "Image_url": img_url})
mars= {
"title": news_title,
"text": news_p,
"image": featured_image_url,
"weather": mars_weather,
"facts": mars_html,
"hemispheres": hem_image_urls
}
browser.quit()
return mars
``` |
{
"source": "jilleb/mib1-toolbox",
"score": 3
} |
#### File: mib1-toolbox/Tools/extract-cff.py
```python
import struct
import sys
import os
import zlib
try:
from PIL import Image
except ImportError:
sys.exit(""" You are missing the PIL module!
install it by running:
pip install image""")
try:
from progressbar import ProgressBar, Percentage, Bar
except ImportError:
sys.exit(""" You are missing the progressbar module!
install it by running:
pip install progressbar""")
if len(sys.argv) != 3:
print ("usage: extract-cff.py # <outdir>")
sys.exit(1)
out_dir = sys.argv[2]
if not os.path.exists(out_dir):
os.mkdir(out_dir)
def mkdir_path(path):
if not os.access(path, os.F_OK):
os.mkdir(path)
data = open(sys.argv[1],'rb').read() # Open File with path in sys.argv[1] in mode 'r' reading and 'b' binary mode
offset = 0
counterRGBA = 0
counterL = 0
counterP = 0
offset = 24
(toc_size, ) = struct.unpack_from('<I', data, offset) # unpack '<' little-endian, 'I' unsigned-int; get first entry of the returned tuple
offset = 28
(num_files, ) = struct.unpack_from('<I', data, offset)
print ("Num of files: \t %d"%(num_files))
offset = offset + 4 + (toc_size*4)
offset = offset + (num_files *20)
i = 0
offset_array = []
path_array = []
size_array = []
# id_array will be a list of offsets
# path_array will be a list of paths
#go through the entire table of contents to get all paths and offsets
while (i < num_files):
(path_len,) = struct.unpack_from('<I', data, offset)
offset = offset + 12
(file_size,) = struct.unpack_from('<I', data, offset)
offset = offset + 4
(file_offset,) = struct.unpack_from('<I', data, offset)
offset = offset + 4
(file_path,) = struct.unpack_from("%ds" % path_len, data, offset)
#file_path = "\\"+ file_path
offset_array.append(file_offset)
path_array.append(file_path)
size_array.append(file_size)
#go on to the next offset
offset = offset+path_len
#print ("%d - %x - %s "%(i,file_offset,file_path))
i = i + 1
j = 0
pbar = ProgressBar(widgets=[Percentage(), Bar()], maxval=num_files).start()
print ("Extracting files...")
while (j < num_files):
offset = offset_array[j]
path = path_array[j]
size = size_array[j]
#create path
folder, file = os.path.split(path)
#folder = os.path.split(path)
folder = out_dir + folder
if not os.path.exists(folder):
os.makedirs(folder)
file = folder + "\\" + file
output_file = open(file,"wb+")
#read data at offset
file_data = data[offset:offset+size]
#binary_data = binascii.unhexlify(dataset)
output_file.write(file_data)
output_file.close()
pbar.update(j)
j = j+1
pbar.finish()
print ("Done")
``` |
{
"source": "Jille/darn",
"score": 2
} |
#### File: Jille/darn/main.py
```python
import uuid
from datetime import datetime, timedelta
import time
import random
from networking import *
import re
import sys
import smtplib
from email.mime.text import MIMEText
import signal, os
def split_hostname(node):
m = re.match(r'^(.+?)(?::(\d+))?$', node)
return (m.group(1), int(m.group(2)))
class DARNode:
def __init__(self, name):
self.name = name
self.connection = None
self.expecting_pong = False
self.failed = False
self.config = None
self.config_version = 0
self.testament = None
self.node_key = None
self.maintenance_mode = False
def connect(self):
assert self.connection is None
(hostname, port) = split_hostname(self.name)
self.connection = DARNHost(self._initialize_outbound_connection, self._receive_data, self._report_error)
self.connection.setHost(hostname, port)
self.connection.connect()
def adopt_connection(self, host):
(hostname, port) = split_hostname(self.name)
host.setHost(hostname, port)
if self.connection is None:
host.change_callbacks(self._initialize_outbound_connection, self._receive_data, self._report_error)
self.connection = host
else:
host.merge(self.connection)
def set_config(self, config, version, testament, node_key):
self.config = config
self.config_version = version
self.testament = testament
self.node_key = node_key
def send_ping(self):
ping_packet = {
'type': 'ping',
'ttl': 15,
'config_version': self.config_version,
}
self.expecting_pong = True
darn.debug("Sending ping to friend node %s, config version %d" % (self.name, self.config_version))
self.connection.send(ping_packet)
"""Push my configuration, testament and node key to this node."""
def push_config(self, other):
config_push = {
'type': 'config',
'ttl': '20',
'config': other.config,
'testament': other.testament,
'node_key': other.node_key,
'config_version': other.config_version,
}
darn.debug("Pushing my %s configuration to node %s" % (other.name, self.name))
self.connection.send(config_push)
def _initialize_outbound_connection(self, host):
assert host == self.connection
self.connection.send_priority({'hostname': darn.mynode.name})
def _receive_data(self, host, data):
assert host == self.connection
darn.debug("DARN Host Data from identified host %s: %s" % (self.name, data))
if 'type' not in data:
host.destroy()
return
if data['type'] == "config":
darn.info("Noted configuration for identified host: %s" % self.name)
self.set_config(data['config'], data['config_version'], data['testament'], data['node_key'])
elif data['type'] == "ping":
darn.debug("Received ping from friend node %s" % self.name)
config_version = data['config_version']
if darn.maintenance_shutdown:
if config_version != 0:
maintenance_packet = {
'hostname': darn.mynode.name,
'type': 'maintenance',
}
self.connection.send(maintenance_packet)
return
pong_packet = {
'type': 'pong',
'ttl': 15,
}
if config_version != darn.mynode.config_version:
darn.info("Friend node %s has older config of mine (version %s), pushing new config version %s"
% (self.name, config_version, darn.mynode.config_version))
self.push_config(darn.mynode)
self.connection.send(pong_packet)
elif data['type'] == "pong":
darn.debug("Received pong from friend node %s" % self.name)
self.expecting_pong = False
self.failed = False
elif data['type'] == "error":
darn.info("Received error from friend node %s" % self.name)
darn.receive_error_event(self, data)
elif data['type'] == "signoff":
darn.info("Received signoff event from node %s, success=%s: %s" % (self.name, data['success'], data['message']))
darn.process_error_event_signoff(self.name, data['id'], data['success'])
elif data['type'] == "maintenance":
self.config = None
self.config_version = 0
else:
darn.info("Received unknown packet type %s from node %s" % (data['type'], self.name))
def _report_error(self, host, exctype, error):
assert host == self.connection
darn.info("Error while connecting to node %s: %s" % (self.name, error))
class DARN:
VERSION = "0.1"
SEND_PINGS=1
LOG_DEBUG=0
def log(self, severity, message):
hostname = "unknown"
if hasattr(self, 'mynode'):
hostname = self.mynode.name
print "%s: DARN[%s][%s]: %s" % (datetime.now(), hostname, severity, message)
def info(self, message):
self.log("info", message)
def debug(self, message):
if DARN.LOG_DEBUG:
self.log("debug", message)
"""Create a DARN object. Read config from given file. """
def __init__(self, configfile):
self.info("Initialising DARN version " + DARN.VERSION)
self.configfile = configfile
self.net = DARNetworking()
self.running = False
self.nodes = {}
self.error_seq = 1
self.error_events = []
self.maintenance_shutdown = False
self.reload()
(host, port) = split_hostname(self.mynode.name)
host = ''
if 'bind_host' in self.mynode.config:
host = self.mynode.config['bind_host']
self.debug("Going to listen on host %s port %s" % (host if host != '' else '*', port))
self.net.create_server_socket(host, port, lambda *_: None, self.data_from_unidentified_host)
for node in self.mynode.config['nodes']:
name = node['hostname']
self.nodes[name] = DARNode(name)
self.nodes[name].connect()
def data_from_unidentified_host(self, host, data):
self.debug("DARN Host connected to me: %s and sent: %s" % (host, data))
if 'hostname' not in data:
host.destroy()
return
if data['hostname'] in self.nodes:
node = self.nodes[data['hostname']]
else:
node = DARNode(data['hostname'])
self.nodes[data['hostname']] = node
node.adopt_connection(host)
def stop(self):
self.info("Stopping")
self.running = False
"""Start the DARN daemon. This call blocks until stop() is called. """
def run(self):
if self.running:
return
self.info("Starting")
self.running = True
self.net.add_timer(0, self.check_nodes)
# This method blocks until there are no more timers to run
self.net.run()
"""
Start checking all nodes. This generates a list of 'ping' calls to the
networking layer. If no succesful pong comes back for a given node,
an error event is generated. This is checked asynchronously, so this
call does not block.
"""
def check_nodes(self):
self.debug("About to check friend nodes")
if not self.running:
return
if not DARN.SEND_PINGS:
return
for name in self.mynode.config['nodes']:
node = self.nodes[name['hostname']]
node.send_ping()
self.net.add_timer(10, self.check_timeouts)
self.net.add_timer(15, self.check_nodes)
def handle_error_event(self, event, callback):
victim_config = self.nodes[event['victim']].config
if not 'email' in victim_config:
callback(False, "Cannot send e-mail regarding failure of victim %s: no e-mail address known" % event['victim'])
email = victim_config['email']
if not 'smtp' in self.mynode.config or not 'sender' in self.mynode.config['smtp'] or not 'host' in self.mynode.config['smtp']:
callback(False, "Cannot send e-mail regarding failure of victim %s: no valid smtp configuration" % event['victim'])
body = "Error event report fired!\n"
body += "Event report ID: %s\n" % event['id']
body += "Time: %s\n" % datetime.now()
body += "Victim: %s\n" % event['victim']
body += "Message: %s\n" % event['message']
msg = MIMEText(body)
msg['Subject'] = "DARN! Error event report"
msg['From'] = self.mynode.config['smtp']['sender']
msg['To'] = email
email_succeeded = None
email_error = None
try:
s = smtplib.SMTP(self.mynode.config['smtp']['host'])
recipients_failed = s.sendmail(self.mynode.config['smtp']['sender'], [email], msg.as_string())
s.quit()
if len(recipients_failed) > 0:
email_succeeded = False
email_error = "Failed to send to some recipients: " + str(recipients_failed)
else:
email_succeeded = True
except Exception as e:
email_succeeded = False
email_error = str(e)
callback(email_succeeded, email_error)
"""
Received an error event. Process it by sending an e-mail, and send a
sign-off reply. 'node' is the sender of this error event; the victim
is in event['victim'].
"""
def receive_error_event(self, node, event):
self.debug("Received error event for node %s" % node.name)
if event['victim'] not in self.nodes or self.nodes[event['victim']].config is None:
self.info("Received error event about victim %s, but I don't have its node config, so can't inform it" % event['victim'])
signoff_packet = {
'type': 'signoff',
'id': event['id'],
'message': "Can't signoff, don't have a node config for this node",
'success': False,
}
node.connection.send(signoff_packet)
return
def handle_message(success, message):
signoff_packet = {
'type': 'signoff',
'id': event['id'],
'message': message,
'success': success,
}
node.connection.send(signoff_packet)
self.handle_error_event(event, handle_message)
"""
Check if any of the hosts we checked earlier didn't respond yet.
Generate error events for every host that seems to be down.
"""
def check_timeouts(self):
if not self.running:
return
for victim in self.mynode.config['nodes']:
victim = victim['hostname']
node = self.nodes[victim]
if not node.expecting_pong:
continue
if not node.config:
self.info("Expected pong from friend %s, but did not receive any; however, don't have node configuration, so silent ignore" % victim)
continue
if not node.testament:
self.info("Expected pong from friend %s, but did not receive any; however, we don't know its testament, so silent ignore" % victim)
continue
if node.failed:
self.info("Expected pong from friend %s, but did not receive any, host is probably still down" % victim)
continue
self.info("Expected pong from friend %s, but did not receive any, generating error event" % victim)
node.failed = True
self.error_seq = self.error_seq + 1
error_event = {
'type': 'error',
'id': str(uuid.uuid1(None, self.error_seq)),
'victim': victim,
'ttl': 20,
'message': "%s failed to receive response from %s within 30 seconds" % (self.mynode.name, victim),
}
error_event_status = {
'testament': node.testament,
'current_index': None,
'timeout': datetime.fromtimestamp(0),
'node_failed': False, # XXX reporter_failed?
}
self.error_events.append((error_event, error_event_status))
self.pump_error_events()
"""
For every error event that's still active, check if we need to send it
to the next node in the victim's testament list.
"""
def pump_error_events(self):
self.debug("Pumping %d error events" % len(self.error_events))
for (event, event_status) in self.error_events:
self.debug("Error event has status: %s" % event_status)
if event_status['timeout'] <= datetime.now() or event_status['node_failed']:
if event_status['current_index'] is None:
# this event was never sent anywhere (or all nodes failed and we're trying them all again)
event_status['current_index'] = 0
else:
event_status['current_index'] += 1
if len(event_status['testament']) <= event_status['current_index']:
self.info("All testament nodes for a victim failed. Starting over.")
event_status['current_index'] = None
event_status['timeout'] = datetime.now() + timedelta(minutes=5)
event_status['node_failed'] = False
continue
current_node = event_status['testament'][event_status['current_index']]
event_status['timeout'] = datetime.now() + timedelta(seconds=20)
event_status['node_failed'] = False
if current_node == self.mynode.name:
self.info("Trying to handle error event about victim %s myself" % event['victim'])
def handle_response(success, message):
successstr = "Failed"
if success:
successstr = "Succeeded"
self.info("%s to handle error event about victim %s myself: %s" % (successstr, event['victim'], message))
self.process_error_event_signoff(current_node, event['id'], success)
self.handle_error_event(event, handle_response)
else:
self.info("Sending error event about victim %s to node %s" % (event['victim'], current_node))
if current_node not in self.nodes:
self.nodes[current_node] = DARNode(current_node)
self.nodes[current_node].connect()
self.nodes[current_node].connection.send(event)
"""
Process an error-event sign-off packet from a node. If the sign-off is
succesful, forget about the error event. If it's unsuccesfull, immediately
mark the error event so that it is sent to the next testament node.
"""
def process_error_event_signoff(self, node, id, success):
self.debug("Received error event signoff packet from node %s, success %s" % (node, success))
new_error_events = []
for (event, event_status) in self.error_events:
if event['id'] == id:
victim = event['victim']
self.debug("Packet is about victim %s" % victim)
if success:
self.info("Node %s succesfully signed-off error event about victim %s" % (node, victim))
continue
else:
self.info("Node %s failed to handle error event about victim %s" % (node, victim))
event_status['node_failed'] = True
new_error_events.append((event, event_status))
# TODO: Niet steeds opnieuw schrijven maar gewoon een dict gebruiken
self.error_events = new_error_events
self.pump_error_events()
"""
"""
def reload(self):
config = self.load_config(self.configfile)
self.mynode = DARNode(config['hostname'])
self.mynode.set_config(config, int(time.time()), self.generate_testament(config), self.generate_node_key())
self.info("Loaded configuration version %s" % self.mynode.config_version)
self.push_config()
"""
Load configuration from the given file.
"""
def load_config(self, configfile):
fh = open(configfile)
cfg = json.load(fh)
fh.close()
return cfg
"""
Generate testament from configuration. See module
documentation for more information about the testament.
"""
def generate_testament(self, config):
nodes = []
for node in config['nodes']:
nodes.append(node['hostname'])
return nodes
"""
Generate a node key. See module documentation for more information
about the testament.
"""
def generate_node_key(self):
return "four"
def enable_maintenance(self):
self.maintenance_shutdown = True
maintenance_packet = {
'hostname': self.mynode.name,
'type': 'maintenance',
}
for name in self.mynode.config['nodes']:
node = self.nodes[name['hostname']]
node.connection.send(maintenance_packet)
self.net.add_timer(10, lambda: sys.exit(0))
"""Push configuration, testament and node key to all nodes."""
def push_config(self):
for hostname in self.nodes:
self.nodes[hostname].push_config(self.mynode)
if __name__ == "__main__":
darn = DARN(sys.argv[1])
def _reload(a, b):
darn.reload()
signal.signal(signal.SIGHUP, _reload)
def _maintenance(a, b):
darn.enable_maintenance()
signal.signal(signal.SIGUSR1, _maintenance)
darn.run()
```
#### File: Jille/darn/networking.py
```python
import asyncore
import Queue
import json
import re
import time
import socket
import sys
class DARNMessage:
def __init__(self, type, expire):
self.type = type
# XXX: expire naleven
self.expire = expire
def toString(self):
return json.dumps([self.type, self.getPayload()])
def getPayload(self):
return None
class DARNMessagePing(DARNMessage):
def __init__(self, configversion, expiry):
DARNMessage.__init__(self, "ping", expiry)
self.configversion = configversion
def getPayload(self):
return [self.configversion]
class DARNMessagePong(DARNMessage):
def __init__(self, expiry):
DARNMessage.__init__(self, "pong", expiry)
class DARNHost:
def __init__(self, connect_callback, data_callback, error_callback):
self.host = None
self.port = None
self.socket = None
self.msgqueue = Queue.Queue(0)
self.connect_callback = connect_callback
self.data_callback = data_callback
self.error_callback = error_callback
def setSocket(self, sock):
self.socket = sock
def setHost(self, host, port):
self.host = host
self.port = port
def change_callbacks(self, connect_callback, data_callback, error_callback):
self.connect_callback = connect_callback
self.data_callback = data_callback
self.error_callback = error_callback
def connect(self):
sock = DARNSocket(self)
sock.connect(self.host, self.port)
self.setSocket(sock)
def has_socket(self):
return (self.socket is not None)
def handle_connect(self):
self.connect_callback(self)
def handle_connect_error(self, exctype, value):
self.error_callback(self, exctype, value)
def receive_msg(self, msg):
data = json.loads(msg)
self.data_callback(self, data)
def send(self, message):
self.msgqueue.put_nowait(message)
if not self.has_socket():
self.connect()
def send_priority(self, message):
newq = Queue.Queue(0)
newq.put_nowait(message)
while not self.msgqueue.empty():
newq.put_nowait(self.msgqueue.get_nowait())
self.msgqueue = newq
if not self.has_socket():
self.connect()
def merge(self, other):
assert self.host == other.host
assert self.port == other.port
assert self.socket is not None
self.socket.manager = other
other.setSocket(self.socket)
self.socket = None
while not self.msgqueue.empty():
other.msgqueue.put_nowait(self.msgqueue.get_nowait())
self.destroy()
def lost_socket(self):
self.socket = None
if not self.host:
self.destroy()
def destroy(self):
self.connect_callback = None
self.data_callback = None
self.msgqueue = None
if self.has_socket():
self.socket.close()
self.socket = None
class DARNSocket(asyncore.dispatcher):
def __init__(self, manager, *args):
asyncore.dispatcher.__init__(self, *args)
self.manager = manager
self.outbuf = ''
self.inbuf = ''
def connect(self, host, port):
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.handle_error = self.handle_connect_error
asyncore.dispatcher.connect(self, (host, port))
def handle_connect(self):
self.manager.handle_connect()
self.handle_error = asyncore.dispatcher.handle_error
def handle_connect_error(self):
exctype, value = sys.exc_info()[:2]
self.manager.handle_connect_error(exctype, value)
def handle_close(self):
self.manager.lost_socket()
self.close()
def handle_read(self):
self.inbuf += self.recv(8192)
# XXX: re precompilen
m = re.match(r"^(\d+):", self.inbuf)
while m:
datalen = len(m.group(0)) + int(m.group(1)) + 1
if len(self.inbuf) >= datalen:
self.manager.receive_msg(self.inbuf[len(m.group(0)):datalen-1])
self.inbuf = self.inbuf[datalen:]
else:
break
m = re.match(r"^(\d+):", self.inbuf)
if re.match(r"^\D", self.inbuf):
self.close()
def writable(self):
if len(self.outbuf) > 0:
return True
return (not self.manager.msgqueue.empty())
def handle_write(self):
if len(self.outbuf) == 0:
try:
msg = self.manager.msgqueue.get_nowait()
except Queue.Empty:
return
str = json.dumps(msg)
self.outbuf = "%d:%s\n" % (len(str), str)
sent = self.send(self.outbuf)
self.outbuf = self.outbuf[sent:]
class DARNServerSocket(asyncore.dispatcher):
def __init__(self, host, port, connect_callback, data_callback):
asyncore.dispatcher.__init__(self)
self.connect_callback = connect_callback
self.data_callback = data_callback
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind((host, port))
self.listen(5)
def handle_accept(self):
pair = self.accept()
if pair is not None:
sock, addr = pair
print 'Incoming connection from %s' % repr(addr)
host = DARNHost(self.connect_callback, self.data_callback, lambda *_: None)
host.setSocket(DARNSocket(host, sock))
class DARNetworking:
def __init__(self):
self.timers = []
def create_server_socket(self, host, port, connect_callback, data_callback):
self.server = DARNServerSocket(host, port, connect_callback, data_callback)
def add_timer(self, stamp, what):
self.timers.append((time.time() + stamp, what))
def get_first_timer(self):
if len(self.timers) == 0:
return None
first = (0, self.timers[0][0], self.timers[0][1])
for (idx, (stamp, what)) in enumerate(self.timers):
if stamp < first[1]:
first = (idx, stamp, what)
return first
def run(self):
while True:
now = time.time()
first = self.get_first_timer()
if first:
idx, stamp, what = first
if stamp <= now:
what()
del self.timers[idx]
continue
timeout = stamp - now
else:
timeout = None
if timeout < 0:
timeout = 0
asyncore.loop(timeout=timeout, count=1)
``` |
{
"source": "jillelee/pyclaw",
"score": 2
} |
#### File: pyclaw/classic/solver copy.py
```python
r"""
Module containing the classic Clawpack solvers.
This module contains the pure and wrapped classic clawpack solvers. All
clawpack solvers inherit from the :class:`ClawSolver` superclass which in turn
inherits from the :class:`~pyclaw.solver.Solver` superclass. These
are both pure virtual classes; the only solver classes that should be instantiated
are the dimension-specific ones, :class:`ClawSolver1D` and :class:`ClawSolver2D`.
"""
from clawpack.pyclaw.util import add_parent_doc
from clawpack.pyclaw.solver import Solver
from clawpack.pyclaw.limiters import tvd
# ============================================================================
# Generic Clawpack solver class
# ============================================================================
class ClawSolver(Solver):
r"""
Generic classic Clawpack solver
All Clawpack solvers inherit from this base class.
.. attribute:: mthlim
Limiter(s) to be used. Specified either as one value or a list.
If one value, the specified limiter is used for all wave families.
If a list, the specified values indicate which limiter to apply to
each wave family. Take a look at pyclaw.limiters.tvd for an enumeration.
``Default = limiters.tvd.minmod``
.. attribute:: order
Order of the solver, either 1 for first order (i.e., Godunov's method)
or 2 for second order (Lax-Wendroff-LeVeque).
``Default = 2``
.. attribute:: source_split
Which source splitting method to use: 1 for first
order Godunov splitting and 2 for second order Strang splitting.
``Default = 1``
.. attribute:: fwave
Whether to split the flux jump (rather than the jump in Q) into waves;
requires that the Riemann solver performs the splitting.
``Default = False``
.. attribute:: step_source
Handle for function that evaluates the source term.
The required signature for this function is:
def step_source(solver,state,dt)
.. attribute:: kernel_language
Specifies whether to use wrapped Fortran routines ('Fortran')
or pure Python ('Python'). ``Default = 'Fortran'``.
.. attribute:: verbosity
The level of detail of logged messages from the Fortran solver.
``Default = 0``.
"""
# ========== Generic Init Routine ========================================
def __init__(self,riemann_solver=None,claw_package=None):
r"""
See :class:`ClawSolver` for full documentation.
Output:
- (:class:`ClawSolver`) - Initialized clawpack solver
"""
self.num_ghost = 2
self.limiters = tvd.minmod
self.order = 2
self.source_split = 1
self.fwave = False
self.step_source = None
self.kernel_language = 'Fortran'
self.verbosity = 0
self.cfl_max = 1.0
self.cfl_desired = 0.9
self._mthlim = self.limiters
self._method = None
self.dt_old = None
# Call general initialization function
super(ClawSolver,self).__init__(riemann_solver,claw_package)
# ========== Time stepping routines ======================================
def step(self,solution,take_one_step,tstart,tend):
r"""
Evolve solution one time step
The elements of the algorithm for taking one step are:
1. Pick a step size as specified by the base solver attribute :func:`get_dt`
2. A half step on the source term :func:`step_source` if Strang splitting is
being used (:attr:`source_split` = 2)
3. A step on the homogeneous problem :math:`q_t + f(q)_x = 0` is taken
4. A second half step or a full step is taken on the source term
:func:`step_source` depending on whether Strang splitting was used
(:attr:`source_split` = 2) or Godunov splitting
(:attr:`source_split` = 1)
This routine is called from the method evolve_to_time defined in the
pyclaw.solver.Solver superclass.
:Input:
- *solution* - (:class:`~pyclaw.solution.Solution`) solution to be evolved
:Output:
- (bool) - True if full step succeeded, False otherwise
"""
self.get_dt(solution.t,tstart,tend,take_one_step)
self.cfl.set_global_max(0.)
if self.source_split == 2 and self.step_source is not None:
self.step_source(self,solution.states[0],self.dt/2.0)
self.step_hyperbolic(solution)
# Check here if the CFL condition is satisfied.
# If not, return # immediately to evolve_to_time and let it deal with
# picking a new step size (dt).
if self.cfl.get_cached_max() >= self.cfl_max:
return False
if self.step_source is not None:
# Strang splitting
if self.source_split == 2:
self.step_source(self,solution.states[0],self.dt/2.0)
# Godunov Splitting
if self.source_split == 1:
self.step_source(self,solution.states[0],self.dt)
return True
def _check_cfl_settings(self):
pass
def _allocate_workspace(self,solution):
pass
def step_hyperbolic(self,solution):
r"""
Take one homogeneous step on the solution.
This is a dummy routine and must be overridden.
"""
raise Exception("Dummy routine, please override!")
def _set_mthlim(self):
r"""
Convenience routine to convert users limiter specification to
the format understood by the Fortran code (i.e., a list of length num_waves).
"""
self._mthlim = self.limiters
if not isinstance(self.limiters,list): self._mthlim=[self._mthlim]
if len(self._mthlim)==1: self._mthlim = self._mthlim * self.num_waves
if len(self._mthlim)!=self.num_waves:
raise Exception('Length of solver.limiters is not equal to 1 or to solver.num_waves')
def _set_method(self,state):
r"""
Set values of the solver._method array required by the Fortran code.
These are algorithmic parameters.
"""
import numpy as np
#We ought to put method and many other things in a Fortran
#module and set the fortran variables directly here.
self._method =np.empty(7, dtype=int,order='F')
self._method[0] = self.dt_variable
self._method[1] = self.order
if self.num_dim==1:
self._method[2] = 0 # Not used in 1D
elif self.dimensional_split:
self._method[2] = -1 # First-order dimensional splitting
else:
self._method[2] = self.transverse_waves
self._method[3] = self.verbosity
self._method[4] = 0 # Not used for PyClaw (would be self.source_split)
self._method[5] = state.index_capa + 1
self._method[6] = state.num_aux
def setup(self,solution):
r"""
Perform essential solver setup. This routine must be called before
solver.step() may be called.
"""
# This is a hack to deal with the fact that petsc4py
# doesn't allow us to change the stencil_width (num_ghost)
solution.state.set_num_ghost(self.num_ghost)
# End hack
self._check_cfl_settings()
self._set_mthlim()
if(self.kernel_language == 'Fortran'):
if self.fmod is None:
so_name = 'clawpack.pyclaw.classic.classic'+str(self.num_dim)
self.fmod = __import__(so_name,fromlist=['clawpack.pyclaw.classic'])
self._set_fortran_parameters(solution)
self._allocate_workspace(solution)
elif self.num_dim>1:
raise Exception('Only Fortran kernels are supported in multi-D.')
self._allocate_bc_arrays(solution.states[0])
super(ClawSolver,self).setup(solution)
def _set_fortran_parameters(self,solution):
r"""
Pack parameters into format recognized by Clawpack (Fortran) code.
Sets the solver._method array and the cparam common block for the Riemann solver.
"""
self._set_method(solution.state)
# The reload here is necessary because otherwise the common block
# cparam in the Riemann solver doesn't get flushed between running
# different tests in a single Python session.
reload(self.fmod)
solution.state.set_cparam(self.fmod)
solution.state.set_cparam(self.rp)
def __del__(self):
r"""
Delete Fortran objects, which otherwise tend to persist in Python sessions.
"""
if(self.kernel_language == 'Fortran'):
del self.fmod
super(ClawSolver,self).__del__()
# ============================================================================
# ClawPack 1d Solver Class
# ============================================================================
class ClawSolver1D(ClawSolver):
r"""
Clawpack evolution routine in 1D
This class represents the 1d clawpack solver on a single grid. Note that
there are routines here for interfacing with the fortran time stepping
routines and the Python time stepping routines. The ones used are
dependent on the argument given to the initialization of the solver
(defaults to python).
"""
__doc__ += add_parent_doc(ClawSolver)
def __init__(self, riemann_solver=None, claw_package=None):
r"""
Create 1d Clawpack solver
Output:
- (:class:`ClawSolver1D`) - Initialized 1d clawpack solver
See :class:`ClawSolver1D` for more info.
"""
self.num_dim = 1
self.reflect_index = [1]
super(ClawSolver1D,self).__init__(riemann_solver, claw_package)
# ========== Homogeneous Step =====================================
def step_hyperbolic(self,solution):
r"""
Take one time step on the homogeneous hyperbolic system.
:Input:
- *solution* - (:class:`~pyclaw.solution.Solution`) Solution that
will be evolved
"""
import numpy as np
state = solution.states[0]
grid = state.grid
self._apply_bcs(state)
num_eqn,num_ghost = state.num_eqn,self.num_ghost
if(self.kernel_language == 'Fortran'):
mx = grid.num_cells[0]
dx,dt = grid.delta[0],self.dt
dtdx = np.zeros( (mx+2*num_ghost) ) + dt/dx
rp1 = self.rp.rp1._cpointer
self.qbc,cfl = self.fmod.step1(num_ghost,mx,self.qbc,self.auxbc,dx,dt,self._method,self._mthlim,self.fwave,rp1)
elif(self.kernel_language == 'Python'):
q = self.qbc
aux = self.auxbc
# Limiter to use in the pth family
limiter = np.array(self._mthlim,ndmin=1)
dtdx = np.zeros( (2*self.num_ghost+grid.num_cells[0]) )
# Find local value for dt/dx
if 'method' not in state.problem_data:
if state.index_capa>=0:
# print("nonuniform")
dtdx = self.dt / (grid.delta[0] * aux[state.index_capa,:])
else:
# print("uniform")
# print("time step: ", self.dt)
dtdx += self.dt / grid.delta[0]
elif state.problem_data['method'] == 'h_box':
# print("hbox")
xpxc = state.problem_data['xpxc']
dtdx = self.dt / (grid.delta[0] * aux[state.index_capa,:])
dtdx_hbox = np.zeros( (2*self.num_ghost+grid.num_cells[0]) )
dtdx_hbox += self.dt / (grid.delta[0] * xpxc)
# dtdx_hbox += self.dt / (grid.delta[0] * grid.num_cells[0] / (grid.num_cells[0] - 1))
elif state.problem_data['method'] == 'h_box_wave':
# print("hbox")
xpxc = state.problem_data['xpxc']
dtdx = self.dt / (grid.delta[0] * aux[state.index_capa,:])
dtdx_hbox = np.zeros( (2*self.num_ghost+grid.num_cells[0]) )
dtdx_hbox += self.dt / (grid.delta[0] * xpxc)
# Solve Riemann problem at each interface
# print("length of q: ", q.shape)
q_l=q[:,:-1].copy()
q_r=q[:,1:].copy()
if state.aux is not None:
aux_l=aux[:,:-1].copy()
aux_r=aux[:,1:].copy()
else:
aux_l = None
aux_r = None
if 'method' not in state.problem_data:
# normal case
wave,s,amdq,apdq = self.rp(q_l,q_r,aux_l,aux_r,state.problem_data)
# Update loop limits, these are the limits for the Riemann solver
# locations, which then update a grid cell value
# We include the Riemann problem just outside of the grid so we can
# do proper limiting at the grid edges
# LL | | UL
# | LL | | | | ... | | | UL | |
# | |
LL = self.num_ghost - 1
UL = self.num_ghost + grid.num_cells[0] + 1
# Update q for Godunov update
for m in xrange(num_eqn):
q[m,LL:UL] -= dtdx[LL:UL]*apdq[m,LL-1:UL-1]
q[m,LL-1:UL-1] -= dtdx[LL-1:UL-1]*amdq[m,LL-1:UL-1]
elif state.problem_data['method'] == 'h_box':
# # add corrections
wave,s,amdq,apdq,f_corr_l,f_corr_r = self.rp(q_l,q_r,aux_l,aux_r,state.problem_data)
LL = self.num_ghost - 1
UL = self.num_ghost + grid.num_cells[0] + 1
# Update q for Godunov update
for m in xrange(num_eqn):
q[m,LL:UL] -= dtdx[LL:UL]*(apdq[m,LL-1:UL-1] - f_corr_r[m,LL-1:UL-1])
q[m,LL-1:UL-1] -= dtdx[LL-1:UL-1]*(amdq[m,LL-1:UL-1] + f_corr_l[m,LL-1:UL-1])
elif state.problem_data['method'] == 'h_box_wave':
# # add corrections
state.problem_data['arrival_state'] = False
wave,s,amdq,apdq,q_hbox_initial,aux_hbox = self.rp(q_l,q_r,aux_l,aux_r,state.problem_data)
LL = self.num_ghost - 1
UL = self.num_ghost + grid.num_cells[0] + 1
# Update q for Godunov update
iw = state.problem_data['wall_position'] + self.num_ghost - 1
q_last = q[:,iw:iw+2].copy()
for m in xrange(num_eqn):
q[m,LL:UL] -= dtdx[LL:UL]*apdq[m,LL-1:UL-1]
q[m,LL-1:UL-1] -= dtdx[LL-1:UL-1]*amdq[m,LL-1:UL-1]
# check the arrivals
q[:,iw:iw+2] = q_last[:,:] # reset the wall cells
dt = self.dt
num_waves = self.num_waves
dx = grid.delta[0] * xpxc
alpha = state.problem_data['fraction']
arrival_times = np.array([0.0])
for mw in xrange(num_waves):
if (s[mw,iw-1] > 0 and (s[mw,iw-1] * dt > alpha * dx)):
arrival_times = np.append(arrival_times, alpha * dx / s[mw,iw-1])
if (s[mw,iw+1] < 0 and ( (-s[mw,iw+1]) * dt > (1 - alpha) * dx ) ):
arrival_times = np.append(arrival_times, -(1 - alpha) * dx / s[mw,iw+1])
arrival_times.sort()
n_arrival_times = len(arrival_times)
if n_arrival_times == 1 :
state.problem_data['arrival_state'] = False
else:
state.problem_data['arrival_state'] = True
s_cells = np.zeros((num_waves, 3, n_arrival_times))
s_cells[:,:,0] = s[:, iw-1:iw+2].copy()
wave_cells = np.zeros((num_eqn, num_waves, 3, n_arrival_times))
wave_cells[:,:,:,0] = wave[:,:,iw-1:iw+2].copy()
if state.problem_data['arrival_state'] == False:
q[:,iw] -= dt/(alpha * dx) * apdq[:,iw-1]
q[:,iw+1] -= dt/((1 - alpha)*dx) * amdq[:,iw+1]
for mw in xrange(num_waves):
if (s[mw,iw] < 0):
q[:,iw-1] -= dt/dx * ( max(0, -s[mw,iw] * dt - alpha * dx) / (-s[mw,iw] * dt) * wave[:,mw,iw] )
q[:,iw] -= dt/(alpha * dx) * (min(-s[mw,iw] * dt, alpha * dx) / (-s[mw,iw] * dt) * wave[:,mw,iw] )
elif (s[mw,iw] > 0):
q[:,iw+1] -= dt/((1 - alpha)*dx) * (min(s[mw,iw] * dt, (1 - alpha) * dx) / (s[mw,iw] * dt) * wave[:,mw,iw] )
q[:,iw+2] -= dt/dx * ( (max(0, s[mw,iw] * dt - (1 - alpha) * dx) / s[mw,iw] * dt) * wave[:,mw,iw] )
if state.problem_data['arrival_state'] == True:
## update q_hbox
for i in xrange(1, n_arrival_times):
q_hbox = q_hbox_initial.copy()
for mw in xrange(num_waves):
if s[mw,iw-2] > 0:
q_hbox[:,0] -= arrival_times[i] / dx * (max(0, s[mw,iw-2] * arrival_times[i] - alpha * dx) / (s[mw,iw-2] * arrival_times[i]) * wave[:,mw,iw-2])
if s[mw, iw-1] < 0:
q_hbox[:,0] -= arrival_times[i] / dx * (min(-s[mw,iw-1] * arrival_times[i], (1 - alpha) * dx) / (-s[mw,iw-1] * arrival_times[i]) * wave[:,mw,iw-1])
if s_cells[mw,0,i] > 0:
for j in xrange(i):
q_hbox[:,0] -= (arrival_times[j+1] - arrival_times[j]) / dx * (wave_cells[:,mw,0,j])
if s_cells[mw,0,i] * arrival_times[i] > alpha * dx - 1e-14:
# check the arrival wave
wave_cells[:,mw,0,i] = 0.0
if s_cells[mw,1,i] < 0:
for j in xrange(i):
q_hbox[:,0] -= (arrival_times[i] - arrival_times[j]) / dx * (wave_cells[:,mw,1,j])
if s_cells[mw,1,i] > 0:
for j in xrange(i):
q_hbox[:,1] -= (arrival_times[i] - arrival_times[j]) / dx * (wave_cells[:,mw,1,j])
if s_cells[mw,2,i] < 0:
for j in xrange(i):
q_hbox[:,1] -= (arrival_times[j+1] - arrival_times[j]) / dx * (wave_cells[:,mw,2,j])
if (-s_cells[mw,2,i] * arrival_times[i]) > (1 - alpha) * dx - 1e-14:
# check the arrival wave
wave_cells[:,mw,2,i] = 0.0
if s[mw,iw+1] > 0:
q_hbox[:,1] -= arrival_times[i] / dx * (min(s[mw,iw+1] * arrival_times[i], alpha * dx) / (-s[mw,iw+1] * arrival_times[i]) * wave[:,mw,iw+1])
if s[mw,iw+2] < 0:
q_hbox[:,1] -= arrival_times[i] / dx * (max(0, -s[mw,iw+2] * arrival_times[i] - (1 - alpha) * dx) / (-s[mw,iw+2] * arrival_times[i]) * wave[:,mw,iw+2])
wave_cells[:,:,1,i],s_cells[:,1,i],amdq_arr,apdq_arr = self.rp(q_hbox[:,0],q_hbox[:,1],aux_hbox[:,0],aux_hbox[:,1],state.problem_data)
## update q[iw-1], q[iw], q[iw+1] and q[iw+2]
arrival_times = np.append(arrival_times, dt)
n_arrival_times = len(arrival_times)
for mw in xrange(num_waves):
for i in xrange(n_arrival_times-1):
if s_cells[mw,0,i] > 0:
q[:,iw] -= (arrival_times[i+1] - arrival_times[i]) / (alpha * dx) * (wave_cells[:,mw,0,i])
if s_cells[mw,2,i] < 0:
q[:,iw+1] -= (arrival_times[i+1] - arrival_times[i]) / ((1 - alpha) * dx) * (wave_cells[:,mw,2,i])
if s_cells[mw,1,i] < 0:
q[:,iw-1] -= (dt - arrival_times[i]) / dx * ( max(0, -s_cells[mw,1,i] * (dt - arrival_times[i]) - alpha * dx) / (-s_cells[mw,1,i] * (dt - arrival_times[i])) * wave_cells[:,mw,1,i] )
q[:,iw] -= (dt - arrival_times[i]) / (alpha * dx) * ( min(-s_cells[mw,1, i] * (dt - arrival_times[i]), alpha * dx) / (-s_cells[mw,1,i] * (dt - arrival_times[i])) * wave_cells[:,mw,1,i] )
if s_cells[mw,1,i] > 0:
q[:,iw+1] -= (dt - arrival_times[i]) / ((1 - alpha) * dx) * ( min(s_cells[mw,1, i] * (dt - arrival_times[i]), (1 - alpha) * dx) / (s_cells[mw,1,i] * (dt - arrival_times[i])) * wave_cells[:,mw,1,i] )
q[:,iw+2] -= (dt - arrival_times[i]) / dx * ( max(0, s_cells[mw,1,i] * (dt - arrival_times[i]) - (1- alpha) * dx) / (s_cells[mw,1,i] * (dt - arrival_times[i])) * wave_cells[:,mw,1,i] )
# Compute maximum wave speed
# add additional conditions for h-box
cfl = 0.0
if 'method' not in state.problem_data:
for mw in xrange(wave.shape[1]):
smax1 = np.max(dtdx[LL:UL]*s[mw,LL-1:UL-1])
smax2 = np.max(-dtdx[LL-1:UL-1]*s[mw,LL-1:UL-1])
cfl = max(cfl,smax1,smax2)
elif state.problem_data['method'] == 'h_box':
# print("h_box corrected dtdx")
for mw in xrange(wave.shape[1]):
smax1 = np.max(dtdx_hbox[LL:UL]*s[mw,LL-1:UL-1])
smax2 = np.max(-dtdx_hbox[LL-1:UL-1]*s[mw,LL-1:UL-1])
cfl = max(cfl,smax1,smax2)
elif state.problem_data['method'] == 'h_box_wave':
# print("h_box corrected dtdx")
for mw in xrange(wave.shape[1]):
smax1 = np.max(dtdx_hbox[LL:UL]*s[mw,LL-1:UL-1])
smax2 = np.max(-dtdx_hbox[LL-1:UL-1]*s[mw,LL-1:UL-1])
cfl = max(cfl,smax1,smax2)
# If we are doing slope limiting we have more work to do
if self.order == 2:
# Initialize flux corrections
f = np.zeros( (num_eqn,grid.num_cells[0] + 2*self.num_ghost) )
# Apply Limiters to waves
if (limiter > 0).any():
wave = tvd.limit(state.num_eqn,wave,s,limiter,dtdx)
# Compute correction fluxes for second order q_{xx} terms
dtdxave = 0.5 * (dtdx[LL-1:UL-1] + dtdx[LL:UL])
if self.fwave:
for mw in xrange(wave.shape[1]):
sabs = np.abs(s[mw,LL-1:UL-1])
om = 1.0 - sabs*dtdxave[:UL-LL]
ssign = np.sign(s[mw,LL-1:UL-1])
for m in xrange(num_eqn):
f[m,LL:UL] += 0.5 * ssign * om * wave[m,mw,LL-1:UL-1]
else:
for mw in xrange(wave.shape[1]):
sabs = np.abs(s[mw,LL-1:UL-1])
om = 1.0 - sabs*dtdxave[:UL-LL]
for m in xrange(num_eqn):
f[m,LL:UL] += 0.5 * sabs * om * wave[m,mw,LL-1:UL-1]
# Update q by differencing correction fluxes
for m in xrange(num_eqn):
q[m,LL:UL-1] -= dtdx[LL:UL-1] * (f[m,LL+1:UL] - f[m,LL:UL-1])
else: raise Exception("Unrecognized kernel_language; choose 'Fortran' or 'Python'")
self.cfl.update_global_max(cfl)
state.set_q_from_qbc(num_ghost,self.qbc)
if state.num_aux > 0:
state.set_aux_from_auxbc(num_ghost,self.auxbc)
# ============================================================================
# ClawPack 2d Solver Class
# ============================================================================
class ClawSolver2D(ClawSolver):
r"""
2D Classic (Clawpack) solver.
Solve using the wave propagation algorithms of <NAME>'s
Clawpack code (www.clawpack.org).
In addition to the attributes of ClawSolver1D, ClawSolver2D
also has the following options:
.. attribute:: dimensional_split
If True, use dimensional splitting (Godunov splitting).
Dimensional splitting with Strang splitting is not supported
at present but could easily be enabled if necessary.
If False, use unsplit Clawpack algorithms, possibly including
transverse Riemann solves.
.. attribute:: transverse_waves
If dimensional_split is True, this option has no effect. If
dimensional_split is False, then transverse_waves should be one of
the following values:
ClawSolver2D.no_trans: Transverse Riemann solver
not used. The stable CFL for this algorithm is 0.5. Not recommended.
ClawSolver2D.trans_inc: Transverse increment waves are computed
and propagated.
ClawSolver2D.trans_cor: Transverse increment waves and transverse
correction waves are computed and propagated.
Note that only the fortran routines are supported for now in 2D.
"""
__doc__ += add_parent_doc(ClawSolver)
no_trans = 0
trans_inc = 1
trans_cor = 2
def __init__(self,riemann_solver=None, claw_package=None):
r"""
Create 2d Clawpack solver
See :class:`ClawSolver2D` for more info.
"""
self.dimensional_split = True
self.transverse_waves = self.trans_inc
self.num_dim = 2
self.reflect_index = [1,2]
self.aux1 = None
self.aux2 = None
self.aux3 = None
self.work = None
super(ClawSolver2D,self).__init__(riemann_solver, claw_package)
def _check_cfl_settings(self):
if (not self.dimensional_split) and (self.transverse_waves==0):
cfl_recommended = 0.5
else:
cfl_recommended = 1.0
if self.cfl_max > cfl_recommended:
import warnings
warnings.warn('cfl_max is set higher than the recommended value of %s' % cfl_recommended)
warnings.warn(str(self.cfl_desired))
def _allocate_workspace(self,solution):
r"""
Pack parameters into format recognized by Clawpack (Fortran) code.
Sets the method array and the cparam common block for the Riemann solver.
"""
import numpy as np
state = solution.state
num_eqn,num_aux,num_waves,num_ghost,aux = state.num_eqn,state.num_aux,self.num_waves,self.num_ghost,state.aux
#The following is a hack to work around an issue
#with f2py. It involves wastefully allocating three arrays.
#f2py seems not able to handle multiple zero-size arrays being passed.
# it appears the bug is related to f2py/src/fortranobject.c line 841.
if aux is None: num_aux=1
grid = state.grid
maxmx,maxmy = grid.num_cells[0],grid.num_cells[1]
maxm = max(maxmx, maxmy)
# These work arrays really ought to live inside a fortran module
# as is done for sharpclaw
self.aux1 = np.empty((num_aux,maxm+2*num_ghost),order='F')
self.aux2 = np.empty((num_aux,maxm+2*num_ghost),order='F')
self.aux3 = np.empty((num_aux,maxm+2*num_ghost),order='F')
mwork = (maxm+2*num_ghost) * (5*num_eqn + num_waves + num_eqn*num_waves)
self.work = np.empty((mwork),order='F')
# ========== Hyperbolic Step =====================================
def step_hyperbolic(self,solution):
r"""
Take a step on the homogeneous hyperbolic system using the Clawpack
algorithm.
Clawpack is based on the Lax-Wendroff method, combined with Riemann
solvers and TVD limiters applied to waves.
"""
if(self.kernel_language == 'Fortran'):
state = solution.states[0]
grid = state.grid
dx,dy = grid.delta
mx,my = grid.num_cells
maxm = max(mx,my)
self._apply_bcs(state)
qold = self.qbc.copy('F')
rpn2 = self.rp.rpn2._cpointer
if (self.dimensional_split) or (self.transverse_waves==0):
rpt2 = rpn2 # dummy value; it won't be called
else:
rpt2 = self.rp.rpt2._cpointer
if self.dimensional_split:
#Right now only Godunov-dimensional-splitting is implemented.
#Strang-dimensional-splitting could be added following dimsp2.f in Clawpack.
self.qbc, cfl_x = self.fmod.step2ds(maxm,self.num_ghost,mx,my, \
qold,self.qbc,self.auxbc,dx,dy,self.dt,self._method,self._mthlim,\
self.aux1,self.aux2,self.aux3,self.work,1,self.fwave,rpn2,rpt2)
self.qbc, cfl_y = self.fmod.step2ds(maxm,self.num_ghost,mx,my, \
self.qbc,self.qbc,self.auxbc,dx,dy,self.dt,self._method,self._mthlim,\
self.aux1,self.aux2,self.aux3,self.work,2,self.fwave,rpn2,rpt2)
cfl = max(cfl_x,cfl_y)
else:
self.qbc, cfl = self.fmod.step2(maxm,self.num_ghost,mx,my, \
qold,self.qbc,self.auxbc,dx,dy,self.dt,self._method,self._mthlim,\
self.aux1,self.aux2,self.aux3,self.work,self.fwave,rpn2,rpt2)
self.cfl.update_global_max(cfl)
state.set_q_from_qbc(self.num_ghost,self.qbc)
if state.num_aux > 0:
state.set_aux_from_auxbc(self.num_ghost,self.auxbc)
else:
raise NotImplementedError("No python implementation for step_hyperbolic in 2D.")
# ============================================================================
# ClawPack 3d Solver Class
# ============================================================================
class ClawSolver3D(ClawSolver):
r"""
3D Classic (Clawpack) solver.
Solve using the wave propagation algorithms of <NAME>'s
Clawpack code (www.clawpack.org).
In addition to the attributes of ClawSolver, ClawSolver3D
also has the following options:
.. attribute:: dimensional_split
If True, use dimensional splitting (Godunov splitting).
Dimensional splitting with Strang splitting is not supported
at present but could easily be enabled if necessary.
If False, use unsplit Clawpack algorithms, possibly including
transverse Riemann solves.
.. attribute:: transverse_waves
If dimensional_split is True, this option has no effect. If
dim_plit is False, then transverse_waves should be one of
the following values:
ClawSolver3D.no_trans: Transverse Riemann solver
not used. The stable CFL for this algorithm is 0.5. Not recommended.
ClawSolver3D.trans_inc: Transverse increment waves are computed
and propagated.
ClawSolver3D.trans_cor: Transverse increment waves and transverse
correction waves are computed and propagated.
Note that only Fortran routines are supported for now in 3D --
there is no pure-python version.
"""
__doc__ += add_parent_doc(ClawSolver)
no_trans = 0
trans_inc = 11
trans_cor = 22
def __init__(self, riemann_solver=None, claw_package=None):
r"""
Create 3d Clawpack solver
See :class:`ClawSolver3D` for more info.
"""
# Add the functions as required attributes
self.dimensional_split = True
self.transverse_waves = self.trans_cor
self.num_dim = 3
self.reflect_index = [1,2,3]
self.aux1 = None
self.aux2 = None
self.aux3 = None
self.work = None
super(ClawSolver3D,self).__init__(riemann_solver, claw_package)
# ========== Setup routine =============================
def _allocate_workspace(self,solution):
r"""
Allocate auxN and work arrays for use in Fortran subroutines.
"""
import numpy as np
state = solution.states[0]
num_eqn,num_aux,num_waves,num_ghost,aux = state.num_eqn,state.num_aux,self.num_waves,self.num_ghost,state.aux
#The following is a hack to work around an issue
#with f2py. It involves wastefully allocating three arrays.
#f2py seems not able to handle multiple zero-size arrays being passed.
# it appears the bug is related to f2py/src/fortranobject.c line 841.
if(aux is None): num_aux=1
grid = state.grid
maxmx,maxmy,maxmz = grid.num_cells[0],grid.num_cells[1],grid.num_cells[2]
maxm = max(maxmx, maxmy, maxmz)
# These work arrays really ought to live inside a fortran module
# as is done for sharpclaw
self.aux1 = np.empty((num_aux,maxm+2*num_ghost,3),order='F')
self.aux2 = np.empty((num_aux,maxm+2*num_ghost,3),order='F')
self.aux3 = np.empty((num_aux,maxm+2*num_ghost,3),order='F')
mwork = (maxm+2*num_ghost) * (31*num_eqn + num_waves + num_eqn*num_waves)
self.work = np.empty((mwork),order='F')
# ========== Hyperbolic Step =====================================
def step_hyperbolic(self,solution):
r"""
Take a step on the homogeneous hyperbolic system using the Clawpack
algorithm.
Clawpack is based on the Lax-Wendroff method, combined with Riemann
solvers and TVD limiters applied to waves.
"""
if(self.kernel_language == 'Fortran'):
state = solution.states[0]
grid = state.grid
dx,dy,dz = grid.delta
mx,my,mz = grid.num_cells
maxm = max(mx,my,mz)
self._apply_bcs(state)
qnew = self.qbc
qold = qnew.copy('F')
rpn3 = self.rp.rpn3._cpointer
if (self.dimensional_split) or (self.transverse_waves==0):
rpt3 = rpn3 # dummy value; it won't be called
rptt3 = rpn3 # dummy value; it won't be called
else:
rpt3 = self.rp.rpt3._cpointer
rptt3 = self.rp.rptt3._cpointer
if self.dimensional_split:
#Right now only Godunov-dimensional-splitting is implemented.
#Strang-dimensional-splitting could be added following dimsp3.f in Clawpack.
q, cfl_x = self.fmod.step3ds(maxm,self.num_ghost,mx,my,mz, \
qold,qnew,self.auxbc,dx,dy,dz,self.dt,self._method,self._mthlim,\
self.aux1,self.aux2,self.aux3,self.work,1,self.fwave,rpn3,rpt3,rptt3)
q, cfl_y = self.fmod.step3ds(maxm,self.num_ghost,mx,my,mz, \
q,q,self.auxbc,dx,dy,dz,self.dt,self._method,self._mthlim,\
self.aux1,self.aux2,self.aux3,self.work,2,self.fwave,rpn3,rpt3,rptt3)
q, cfl_z = self.fmod.step3ds(maxm,self.num_ghost,mx,my,mz, \
q,q,self.auxbc,dx,dy,dz,self.dt,self._method,self._mthlim,\
self.aux1,self.aux2,self.aux3,self.work,3,self.fwave,rpn3,rpt3,rptt3)
cfl = max(cfl_x,cfl_y,cfl_z)
else:
q, cfl = self.fmod.step3(maxm,self.num_ghost,mx,my,mz, \
qold,qnew,self.auxbc,dx,dy,dz,self.dt,self._method,self._mthlim,\
self.aux1,self.aux2,self.aux3,self.work,self.fwave,rpn3,rpt3,rptt3)
self.cfl.update_global_max(cfl)
state.set_q_from_qbc(self.num_ghost,self.qbc)
if state.num_aux > 0:
state.set_aux_from_auxbc(self.num_ghost,self.auxbc)
else:
raise NotImplementedError("No python implementation for step_hyperbolic in 3D.")
``` |
{
"source": "Jille/salt",
"score": 2
} |
#### File: salt/output/__init__.py
```python
from __future__ import print_function
from __future__ import absolute_import
import re
import os
import sys
import errno
import logging
import traceback
# Import salt libs
import salt.loader
import salt.utils
import salt.ext.six as six
from salt.utils import print_cli
# Are you really sure !!!
# dealing with unicode is not as simple as setting defaultencoding
# which can break other python modules imported by salt in bad ways...
# reloading sys is not either a good idea...
# reload(sys)
# sys.setdefaultencoding('utf-8')
log = logging.getLogger(__name__)
def try_printout(data, out, opts, **kwargs):
'''
Safely get the string to print out, try the configured outputter, then
fall back to nested and then to raw
'''
try:
printout = get_printout(out, opts)(data, **kwargs)
if printout is not None:
return printout.rstrip()
except (KeyError, AttributeError, TypeError):
log.debug(traceback.format_exc())
try:
printout = get_printout('nested', opts)(data, **kwargs)
if printout is not None:
return printout.rstrip()
except (KeyError, AttributeError, TypeError):
log.error('Nested output failed: ', exc_info=True)
printout = get_printout('raw', opts)(data, **kwargs)
if printout is not None:
return printout.rstrip()
def get_progress(opts, out, progress):
'''
Get the progress bar from the given outputter
'''
return salt.loader.raw_mod(opts,
out,
'rawmodule',
mod='output')['{0}.progress_iter'.format(out)](progress)
def update_progress(opts, progress, progress_iter, out):
'''
Update the progress iterator for the given outputter
'''
# Look up the outputter
try:
progress_outputter = salt.loader.outputters(opts)[out]
except KeyError: # Outputter is not loaded
log.warning('Progress outputter not available.')
return False
progress_outputter(progress, progress_iter)
def progress_end(progress_iter):
try:
progress_iter.stop()
except Exception:
pass
return None
def display_output(data, out=None, opts=None, **kwargs):
'''
Print the passed data using the desired output
'''
if opts is None:
opts = {}
display_data = try_printout(data, out, opts, **kwargs)
output_filename = opts.get('output_file', None)
log.trace('data = {0}'.format(data))
try:
# output filename can be either '' or None
if output_filename:
if not hasattr(output_filename, 'write'):
ofh = salt.utils.fopen(output_filename, 'a') # pylint: disable=resource-leakage
fh_opened = True
else:
# Filehandle/file-like object
ofh = output_filename
fh_opened = False
try:
fdata = display_data
if isinstance(fdata, six.text_type):
try:
fdata = fdata.encode('utf-8')
except (UnicodeDecodeError, UnicodeEncodeError):
# try to let the stream write
# even if we didn't encode it
pass
if fdata:
if six.PY3:
ofh.write(fdata.decode())
else:
ofh.write(fdata)
ofh.write('\n')
finally:
if fh_opened:
ofh.close()
return
if display_data:
print_cli(display_data)
except IOError as exc:
# Only raise if it's NOT a broken pipe
if exc.errno != errno.EPIPE:
raise exc
def get_printout(out, opts=None, **kwargs):
'''
Return a printer function
'''
if opts is None:
opts = {}
if 'output' in opts and opts['output'] != 'highstate':
# new --out option, but don't choke when using --out=highstate at CLI
# See Issue #29796 for more information.
out = opts['output']
if out == 'text':
out = 'txt'
elif out is None or out == '':
out = 'nested'
if opts.get('progress', False):
out = 'progress'
opts.update(kwargs)
if 'color' not in opts:
def is_pipe():
'''
Check if sys.stdout is a pipe or not
'''
try:
fileno = sys.stdout.fileno()
except AttributeError:
fileno = -1 # sys.stdout is StringIO or fake
return not os.isatty(fileno)
if opts.get('force_color', False):
opts['color'] = True
elif opts.get('no_color', False) or is_pipe() or salt.utils.is_windows():
opts['color'] = False
else:
opts['color'] = True
outputters = salt.loader.outputters(opts)
if out not in outputters:
# Since the grains outputter was removed we don't need to fire this
# error when old minions are asking for it
if out != 'grains':
log.error('Invalid outputter {0} specified, fall back to nested'.format(out))
return outputters['nested']
return outputters[out]
def out_format(data, out, opts=None, **kwargs):
'''
Return the formatted outputter string for the passed data
'''
return try_printout(data, out, opts, **kwargs)
def string_format(data, out, opts=None, **kwargs):
'''
Return the formatted outputter string, removing the ANSI escape sequences.
'''
raw_output = try_printout(data, out, opts, **kwargs)
ansi_escape = re.compile(r'\x1b[^m]*m')
return ansi_escape.sub('', raw_output)
def html_format(data, out, opts=None, **kwargs):
'''
Return the formatted string as HTML.
'''
ansi_escaped_string = string_format(data, out, opts, **kwargs)
return ansi_escaped_string.replace(' ', ' ').replace('\n', '<br />')
def strip_esc_sequence(txt):
'''
Replace ESC (ASCII 27/Oct 33) to prevent unsafe strings
from writing their own terminal manipulation commands
'''
if isinstance(txt, six.string_types):
return txt.replace('\033', '?')
else:
return txt
```
#### File: base/_modules/runtests_helpers.py
```python
from __future__ import absolute_import
import fnmatch
import os
import re
import fnmatch
import tempfile
# Import salt libs
import salt.utils
# Import 3rd-party libs
import salt.ext.six as six
SYS_TMP_DIR = os.path.realpath(
# Avoid ${TMPDIR} and gettempdir() on MacOS as they yield a base path too long
# for unix sockets: ``error: AF_UNIX path too long``
# Gentoo Portage prefers ebuild tests are rooted in ${TMPDIR}
os.environ.get('TMPDIR', tempfile.gettempdir()) if not salt.utils.is_darwin() else '/tmp'
)
# This tempdir path is defined on tests.integration.__init__
TMP = os.path.join(SYS_TMP_DIR, 'salt-tests-tmpdir')
def get_salt_temp_dir():
return TMP
def get_salt_temp_dir_for_path(*path):
return os.path.join(TMP, *path)
def get_sys_temp_dir_for_path(*path):
return os.path.join(SYS_TMP_DIR, *path)
def get_invalid_docs():
'''
Outputs the functions which do not have valid CLI example, or are missing a
docstring.
'''
allow_failure = (
'cmd.win_runas',
'cp.recv',
'glance.warn_until',
'ipset.long_range',
'libcloud_dns.get_driver',
'log.critical',
'log.debug',
'log.error',
'log.exception',
'log.info',
'log.warning',
'lowpkg.bin_pkg_info',
'lxc.run_cmd',
'nspawn.restart',
'nspawn.stop',
'pkg.expand_repo_def',
'pip.iteritems',
'runtests_decorators.depends',
'runtests_decorators.depends_will_fallback',
'runtests_decorators.missing_depends',
'runtests_decorators.missing_depends_will_fallback',
'state.apply',
'status.list2cmdline',
'swift.head',
'travisci.parse_qs',
'vsphere.clean_kwargs',
'vsphere.disconnect',
'vsphere.get_service_instance_via_proxy',
'vsphere.gets_service_instance_via_proxy',
'vsphere.supports_proxies',
'vsphere.test_vcenter_connection',
'vsphere.wraps',
)
allow_failure_glob = (
'runtests_helpers.*',
'vsphere.*',
)
nodoc = set()
noexample = set()
for fun, docstring in six.iteritems(__salt__['sys.doc']()):
if fun in allow_failure:
continue
else:
for pat in allow_failure_glob:
if fnmatch.fnmatch(fun, pat):
matched_glob = True
break
else:
matched_glob = False
if matched_glob:
continue
if not isinstance(docstring, six.string_types):
nodoc.add(fun)
elif isinstance(docstring, dict) and not re.search(r'([E|e]xample(?:s)?)+(?:.*):?', docstring):
noexample.add(fun)
return {'missing_docstring': sorted(nodoc),
'missing_cli_example': sorted(noexample)}
def modules_available(*names):
'''
Returns a list of modules not available. Empty list if modules are all available
'''
not_found = []
for name in names:
if '.' not in name:
name = name + '.*'
if not fnmatch.filter(list(__salt__), name):
not_found.append(name)
return not_found
```
#### File: unit/modules/test_boto_elb.py
```python
from __future__ import absolute_import
import logging
from copy import deepcopy
# import Python Third Party Libs
# pylint: disable=import-error
try:
import boto
import boto.ec2.elb
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
try:
from moto import mock_ec2, mock_elb
HAS_MOTO = True
except ImportError:
HAS_MOTO = False
def mock_ec2(self):
'''
if the mock_ec2 function is not available due to import failure
this replaces the decorated function with stub_function.
Allows boto_vpc unit tests to use the @mock_ec2 decorator
without a "NameError: name 'mock_ec2' is not defined" error.
'''
def stub_function(self):
pass
return stub_function
def mock_elb(self):
'''
if the mock_ec2 function is not available due to import failure
this replaces the decorated function with stub_function.
Allows boto_vpc unit tests to use the @mock_ec2 decorator
without a "NameError: name 'mock_ec2' is not defined" error.
'''
def stub_function(self):
pass
return stub_function
# pylint: enable=import-error
# Import Salt Libs
import salt.config
import salt.loader
import salt.modules.boto_elb as boto_elb
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import skipIf, TestCase
from tests.support.mock import NO_MOCK, NO_MOCK_REASON
log = logging.getLogger(__name__)
region = 'us-east-1'
access_key = '<KEY>'
secret_key = '<KEY>'
conn_parameters = {'region': region, 'key': access_key, 'keyid': secret_key,
'profile': {}}
boto_conn_parameters = {'aws_access_key_id': access_key,
'aws_secret_access_key': secret_key}
instance_parameters = {'instance_type': 't1.micro'}
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(HAS_BOTO is False, 'The boto module must be installed.')
@skipIf(HAS_MOTO is False, 'The moto module must be installed.')
class BotoElbTestCase(TestCase, LoaderModuleMockMixin):
'''
TestCase for salt.modules.boto_elb module
'''
def setup_loader_modules(self):
opts = salt.config.DEFAULT_MASTER_OPTS
utils = salt.loader.utils(opts, whitelist=['boto'])
funcs = salt.loader.minion_mods(opts, utils=utils)
return {
boto_elb: {
'__opts__': opts,
'__utils__': utils,
'__salt__': funcs
}
}
def setUp(self):
TestCase.setUp(self)
# __virtual__ must be caller in order for _get_conn to be injected
boto_elb.__virtual__()
@mock_ec2
@mock_elb
def test_register_instances_valid_id_result_true(self):
'''
tests that given a valid instance id and valid ELB that
register_instances returns True.
'''
conn_ec2 = boto.ec2.connect_to_region(region, **boto_conn_parameters)
conn_elb = boto.ec2.elb.connect_to_region(region,
**boto_conn_parameters)
zones = [zone.name for zone in conn_ec2.get_all_zones()]
elb_name = 'TestRegisterInstancesValidIdResult'
conn_elb.create_load_balancer(elb_name, zones, [(80, 80, 'http')])
reservations = conn_ec2.run_instances('ami-08389d60')
register_result = boto_elb.register_instances(elb_name,
reservations.instances[0].id,
**conn_parameters)
self.assertEqual(True, register_result)
@mock_ec2
@mock_elb
def test_register_instances_valid_id_string(self):
'''
tests that given a string containing a instance id and valid ELB that
register_instances adds the given instance to an ELB
'''
conn_ec2 = boto.ec2.connect_to_region(region, **boto_conn_parameters)
conn_elb = boto.ec2.elb.connect_to_region(region,
**boto_conn_parameters)
zones = [zone.name for zone in conn_ec2.get_all_zones()]
elb_name = 'TestRegisterInstancesValidIdResult'
conn_elb.create_load_balancer(elb_name, zones, [(80, 80, 'http')])
reservations = conn_ec2.run_instances('ami-08389d60')
boto_elb.register_instances(elb_name, reservations.instances[0].id,
**conn_parameters)
load_balancer_refreshed = conn_elb.get_all_load_balancers(elb_name)[0]
registered_instance_ids = [instance.id for instance in
load_balancer_refreshed.instances]
log.debug(load_balancer_refreshed.instances)
self.assertEqual([reservations.instances[0].id], registered_instance_ids)
@mock_ec2
@mock_elb
def test_deregister_instances_valid_id_result_true(self):
'''
tests that given an valid id the boto_elb deregister_instances method
removes exactly one of a number of ELB registered instances
'''
conn_ec2 = boto.ec2.connect_to_region(region, **boto_conn_parameters)
conn_elb = boto.ec2.elb.connect_to_region(region,
**boto_conn_parameters)
zones = [zone.name for zone in conn_ec2.get_all_zones()]
elb_name = 'TestDeregisterInstancesValidIdResult'
load_balancer = conn_elb.create_load_balancer(elb_name, zones,
[(80, 80, 'http')])
reservations = conn_ec2.run_instances('ami-08389d60')
load_balancer.register_instances(reservations.instances[0].id)
deregister_result = boto_elb.deregister_instances(elb_name,
reservations.instances[0].id,
**conn_parameters)
self.assertEqual(True, deregister_result)
@mock_ec2
@mock_elb
def test_deregister_instances_valid_id_string(self):
'''
tests that given an valid id the boto_elb deregister_instances method
removes exactly one of a number of ELB registered instances
'''
conn_ec2 = boto.ec2.connect_to_region(region, **boto_conn_parameters)
conn_elb = boto.ec2.elb.connect_to_region(region,
**boto_conn_parameters)
zones = [zone.name for zone in conn_ec2.get_all_zones()]
elb_name = 'TestDeregisterInstancesValidIdString'
load_balancer = conn_elb.create_load_balancer(elb_name, zones,
[(80, 80, 'http')])
reservations = conn_ec2.run_instances('ami-08389d60', min_count=2)
all_instance_ids = [instance.id for instance in reservations.instances]
load_balancer.register_instances(all_instance_ids)
boto_elb.deregister_instances(elb_name, reservations.instances[0].id,
**conn_parameters)
load_balancer_refreshed = conn_elb.get_all_load_balancers(elb_name)[0]
expected_instances = deepcopy(all_instance_ids)
expected_instances.remove(reservations.instances[0].id)
actual_instances = [instance.id for instance in
load_balancer_refreshed.instances]
self.assertEqual(actual_instances, expected_instances)
@mock_ec2
@mock_elb
def test_deregister_instances_valid_id_list(self):
'''
tests that given an valid ids in the form of a list that the boto_elb
deregister_instances all members of the given list
'''
conn_ec2 = boto.ec2.connect_to_region(region, **boto_conn_parameters)
conn_elb = boto.ec2.elb.connect_to_region(region,
**boto_conn_parameters)
zones = [zone.name for zone in conn_ec2.get_all_zones()]
elb_name = 'TestDeregisterInstancesValidIdList'
load_balancer = conn_elb.create_load_balancer(elb_name, zones,
[(80, 80, 'http')])
reservations = conn_ec2.run_instances('ami-08389d60', min_count=3)
all_instance_ids = [instance.id for instance in reservations.instances]
load_balancer.register_instances(all_instance_ids)
# reservations.instances[:-1] refers to all instances except list
# instance
deregister_instances = [instance.id for instance in
reservations.instances[:-1]]
expected_instances = [reservations.instances[-1].id]
boto_elb.deregister_instances(elb_name, deregister_instances,
**conn_parameters)
load_balancer_refreshed = conn_elb.get_all_load_balancers(elb_name)[0]
actual_instances = [instance.id for instance in
load_balancer_refreshed.instances]
self.assertEqual(actual_instances, expected_instances)
``` |
{
"source": "jillesca/devasc",
"score": 3
} |
#### File: devasc/parsing_api_formats/helper.py
```python
from datetime import date
## User Class
class User:
def __init__(self):
self.id = None
self.first_name = None
self.last_name = None
self.birth_date = None
self.address = None
self.score = None
# Print the object
def __repr__(self):
return str(self.__dict__)
# User object serialization
def serializeUser(object):
if isinstance(object, User):
return object.__dict__
if isinstance(object, date):
return object.__str__()
## Used For MiniDom
# Print the tags of a nodeList object
def printTags(nodeList):
for node in nodeList:
if node.nodeName != '#text':
print(node.nodeName)
# Recursively print the node list childern's name (tag) and its value
def printNodes (nodeList, level=0):
for node in nodeList:
if node.nodeName != '#text':
print( (" ")*level + node.nodeName + ':' + node.firstChild.data)
printNodes(node.childNodes, level+1)
``` |
{
"source": "jillguardian/wireframe2code",
"score": 2
} |
#### File: wireframe2code/test/test_html.py
```python
import os
from pathlib import Path
import pytest
from cv2 import cv2
import file
from sketch.capture import Capture
from sketch.wireframe import Wireframe, Location
from web.writer import Html
from web.writer import sort
from bs4 import BeautifulSoup
@pytest.fixture(scope="module")
def basedir():
base_path = Path(__file__).parent
return base_path
@pytest.fixture(scope="module")
def tempdir():
with file.tempdir() as tempdir:
yield tempdir
@pytest.fixture(scope="module")
def wireframe():
path = os.path.join(os.path.dirname(__file__), 'resources/clean_wireframe_sketch.jpg')
capture = Capture(cv2.imread(path))
yield Wireframe(capture)
def test_sort_widgets_by_location(wireframe):
widgets = wireframe.widgets()
widgets = sort(widgets)
actual = [widget.location for widget in widgets]
expected = [
Location((0, 0), (1, 1)),
Location((2, 0)),
Location((3, 0), (3, 3)),
Location((2, 1)),
Location((0, 2)),
Location((1, 2), (2, 2)),
Location((0, 3), (2, 3)),
]
assert actual == expected
def test_generate_html_file_from_wireframe(wireframe, basedir, tempdir):
tempdir = Path(tempdir)
html = Html(tempdir)
html.write(wireframe)
apath = (tempdir / "index.html").resolve()
epath = (basedir / "resources/clean_wireframe_sketch.html").resolve()
with open(apath, 'r') as afile, open(epath, 'r') as efile:
actual = BeautifulSoup(afile)
expected = BeautifulSoup(efile)
assert actual == expected
def test_copy_assets_to_destination_directory(wireframe, tempdir):
html = Html(tempdir)
html.write(wireframe)
files = os.listdir(tempdir)
assert set(files) == {
'index.html',
'style.css',
'favicon.ico',
'bootstrap.min.css',
'bootstrap.min.js'
}
``` |
{
"source": "jillh510/video_story_arcs",
"score": 3
} |
#### File: jillh510/video_story_arcs/webserver.py
```python
from flask import Flask
from flask import render_template
from flask import jsonify
from flask import request
from playlists import get_all_playlists, create_playlists, get_all_categories, add_new_category, add_new_topic, get_all_topics
from db import connect_to_db
ALL_DBS = None
app = Flask(__name__)
@app.route('/')
def index():
# return render_template('index.html', greeting='here we are then')
return "index"
@app.route('/hello/')
def hello():
return render_template('index.html', greeting='here we are')
@app.route('/tools/')
def tools():
return render_template('tools.html')
@app.route('/api/1.0/create_playlists', methods = ['POST'])
def do_create_playlists():
create_playlists(ALL_DBS)
retval = get_all_playlists(ALL_DBS)
return jsonify({'all_playlists': retval})
@app.route('/api/1.0/get_playlists', methods = ['POST'])
def get_playlists():
retval = get_all_playlists(ALL_DBS)
return jsonify({'all_playlists': retval})
@app.route('/api/1.0/get_all_categories', methods = ['POST'])
def get_categories():
retval = get_all_categories(ALL_DBS)
return jsonify({'all_categories': retval})
@app.route('/api/1.0/get_all_topics', methods = ['POST'])
def get_topics():
retval = get_all_topics(ALL_DBS)
return jsonify({'all_topics': retval})
@app.route('/api/1.0/add_category', methods = ['POST'])
def add_category():
retval = add_new_category(request.json, ALL_DBS)
return retval
@app.route('/api/1.0/add_topic', methods = ['POST'])
def add_topic():
retval = add_new_topic(request.json, ALL_DBS)
return jsonify({'return_code': retval})
@app.route('/api/1.0/<string:api_call>', methods = ['POST'])
def generic_api_call(api_call):
if not request.json:
abort(400)
param1 = request.json.get('param1', 'no param 1')
param2 = request.json.get('param2', 'no param 2')
retval = {'param_1': param1,
'api_call': api_call,
'param_2': param2}
return jsonify(retval)
if __name__ == '__main__':
# debug = True makes the server restart when the Python files change. TODO: make it
# depend on whether we're running locally or in production.
ALL_DBS = connect_to_db()
# create_playlists(ALL_DBS)
app.run(debug = True)
``` |
{
"source": "jillhubbard/cs261-priority-queue",
"score": 3
} |
#### File: jillhubbard/cs261-priority-queue/test_naive_priority_queue.py
```python
import unittest
import time
from naive_priority_queue import NaivePriorityQueue
from job import Job
class TestNaivePriorityQueue(unittest.TestCase):
"""
Initialization
"""
def test_instantiation(self):
"""
A NaivePriorityQueue exists.
"""
try:
NaivePriorityQueue()
except NameError:
self.fail("Could not instantiate NaivePriorityQueue.")
# def test_internal(self):
# """
# A NaivePriorityQueue uses a list to store its data.
# """
# pq = NaivePriorityQueue()
# self.assertEqual(list, type(pq.data))
# def test_enqueue_one_internal(self):
# """
# Enqueueing a value adds it to the internal list.
# """
# pq = NaivePriorityQueue()
# j = Job(5, 'The')
# pq.enqueue(j)
# self.assertEqual(j, pq.data[0])
# def test_enqueue_two_internal(self):
# """
# Enqueueing two values results in the first enqueued value being the first
# one in the list, and the second value being the last one in the list.
# """
# pq = NaivePriorityQueue()
# first = Job(5, 'new')
# second = Job(6, 'moon')
# pq.enqueue(first)
# pq.enqueue(second)
# self.assertEqual(first, pq.data[0])
# self.assertEqual(second, pq.data[1])
# def test_enqueue_three_internal(self):
# """
# Enqueueing three values results in the first enqueued value being the first
# one in the list, and the third value being the last one in the list.
# """
# pq = NaivePriorityQueue()
# first = Job(5, 'rode')
# second = Job(6, 'high')
# third = Job(7, 'in')
# pq.enqueue(first)
# pq.enqueue(second)
# pq.enqueue(third)
# self.assertEqual(first, pq.data[0])
# self.assertEqual(second, pq.data[1])
# self.assertEqual(third, pq.data[2])
# def test_dequeue_one(self):
# """
# Dequeuing from a single-element queue returns the single value.
# """
# pq = NaivePriorityQueue()
# j = Job(5, 'the')
# pq.enqueue(j)
# self.assertEqual(j, pq.dequeue())
# def test_dequeue_one_internal(self):
# """
# Dequeuing from a single-element queue removes it from the internal list.
# """
# pq = NaivePriorityQueue()
# job = Job(5, 'crown')
# pq.enqueue(job)
# self.assertEqual(1, len(pq.data))
# _ = pq.dequeue()
# self.assertEqual(0, len(pq.data))
# # Hint: NaivePriorityQueues perform a linear search. Don't optimize.
# def test_dequeue_two(self):
# """
# Dequeuing from a two-element queue returns the one with highest priority.
# """
# pq = NaivePriorityQueue()
# lower_priority = Job(1, 'of')
# higher_priority = Job(3, 'the')
# pq.enqueue(higher_priority)
# pq.enqueue(lower_priority)
# self.assertEqual(higher_priority, pq.dequeue())
# def test_dequeue_two_internal(self):
# """
# Dequeuing from a two-element queue removes the job with the highest
# priority from the list.
# """
# pq = NaivePriorityQueue()
# lower_priority = Job(1, 'metropolis')
# higher_priority = Job(3, 'shining')
# pq.enqueue(higher_priority)
# pq.enqueue(lower_priority)
# _ = pq.dequeue()
# self.assertEqual(lower_priority, pq.data[0])
# self.assertEqual(1, len(pq.data))
# def test_dequeue_three(self):
# """
# Dequeuing from a three-element queue returns the jobs with the highest
# priority.
# """
# pq = NaivePriorityQueue()
# lower_priority = Job(1, 'like')
# middle_priority = Job(3, 'who')
# higher_priority = Job(5, 'on')
# pq.enqueue(higher_priority)
# pq.enqueue(lower_priority)
# pq.enqueue(middle_priority)
# self.assertEqual(higher_priority, pq.dequeue())
# self.assertEqual(middle_priority, pq.dequeue())
# self.assertEqual(lower_priority, pq.dequeue())
# def test_dequeue_three_internal(self):
# """
# Dequeuing from a three-element queue removes each dequeued value from
# the internal list, highest-priority first.
# """
# pq = NaivePriorityQueue()
# lower_priority = Job(1, 'top')
# middle_priority = Job(3, 'of')
# higher_priority = Job(5, 'this')
# pq.enqueue(higher_priority)
# pq.enqueue(lower_priority)
# pq.enqueue(middle_priority)
# _ = pq.dequeue()
# self.assertEqual(lower_priority, pq.data[0])
# _ = pq.dequeue()
# self.assertEqual(lower_priority, pq.data[0])
# """
# Emptiness
# """
# def test_empty(self):
# """
# A queue is initially empty.
# """
# pq = NaivePriorityQueue()
# self.assertTrue(pq.is_empty())
# def test_not_empty(self):
# """
# A queue with one enqueued value is not empty.
# """
# pq = NaivePriorityQueue()
# pq.enqueue(Job(1, 'People'))
# self.assertFalse(pq.is_empty())
# def test_empty_after_dequeue(self):
# """
# A queue with one enqueued value is empty after dequeuing.
# """
# pq = NaivePriorityQueue()
# pq.enqueue(Job(1, 'was'))
# _ = pq.dequeue()
# self.assertTrue(pq.is_empty())
# def test_not_empty_multiple(self):
# """
# A queue with two enqueued values is not empty after dequeuing only one.
# """
# pq = NaivePriorityQueue()
# pq.enqueue(Job(1, 'hustling'))
# pq.enqueue(Job(3, 'arguing and bustling'))
# _ = pq.dequeue()
# self.assertFalse(pq.is_empty())
# def test_initial_dequeue(self):
# """
# Dequeuing from an empty queue returns None.
# """
# pq = NaivePriorityQueue()
# self.assertIsNone(pq.dequeue())
# """
# Algorithmic complexity
# """
# def test_enqueue_efficiency(self):
# """
# Enqueing a value is always O(1).
# """
# time_samples = []
# for _ in range(0, 1000):
# pq = NaivePriorityQueue()
# start_time = time.time()
# pq.enqueue('fake')
# end_time = time.time()
# time_samples.append(end_time - start_time)
# small_average_enqueue_time = sum(time_samples) / float(len(time_samples))
# large_queue = NaivePriorityQueue()
# for _ in range(0, 1000000):
# large_queue.enqueue('fake')
# large_time_samples = []
# for _ in range(0, 1000):
# start_time = time.time()
# large_queue.enqueue('fake')
# end_time = time.time()
# large_time_samples.append(end_time - start_time)
# large_average_enqueue_time = sum(large_time_samples) / float(len(large_time_samples))
# self.assertAlmostEqual(small_average_enqueue_time, large_average_enqueue_time, delta=small_average_enqueue_time)
# # While enqueing naively is efficient... what is the complexity of dequeuing?
# def test_dequeue_efficiency(self):
# """
# Dequeuing a value is O(n).
# """
# print("This test will take a while...") # See the comment below.
# time_samples = []
# for _ in range(0, 1000):
# pq = NaivePriorityQueue()
# pq.enqueue('fake')
# start_time = time.time()
# pq.dequeue()
# end_time = time.time()
# time_samples.append(end_time - start_time)
# small_average_dequeue_time = sum(time_samples) / float(len(time_samples))
# large_queue = NaivePriorityQueue()
# for _ in range(0, 1000000):
# large_queue.enqueue('fake')
# large_time_samples = []
# for _ in range(0, 1000):
# start_time = time.time()
# large_queue.dequeue()
# end_time = time.time()
# large_time_samples.append(end_time - start_time)
# large_average_dequeue_time = sum(large_time_samples) / float(len(large_time_samples))
# self.assertNotAlmostEqual(small_average_dequeue_time, large_average_dequeue_time, delta=small_average_dequeue_time)
# Notice how the last test takes time to "prove."
# By studying *algorithm analysis*, you can prove the efficiency deductively,
# with formal proofs, rather than with long-running tests.
def fake_value():
return f"FAKE {time.time()}"
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jillianchang/LatinScansion",
"score": 3
} |
#### File: LatinScansion/latin_scansion/scansion.py
```python
import functools
import logging
from typing import Iterable, List, Optional, Tuple
import pynini
from pynini.lib import rewrite
from . import scansion_pb2
def _chunk(fst: pynini.Fst) -> List[Tuple[str, str]]:
"""Chunks a string transducer into tuples.
This function is given a string transducer of the form:
il1 il2 il3 il4 il5 il6
ol1 eps eps ol2 eps ol3
And returns the list:
[(il1 il2 il3, ol1), (il4 il5, ol2), (il6, ol3)]
It thus recovers the "many-to-one" alignment.
Args:
fst: a string transducer containing the alignment.
Returns:
A list of string, char tuples.
"""
# Input epsilon-normalization and removal forces a sensible alignment.
fst = pynini.epsnormalize(fst).rmepsilon()
assert (
fst.properties(pynini.STRING, True) == pynini.STRING
), "FST is not a string automaton"
alignment: List[Tuple[str, str]] = []
state = 0
arc = fst.arcs(state).value()
assert arc.ilabel, f"Input label leaving state {state} contains epsilon"
ilabels = bytearray([arc.ilabel])
assert arc.olabel, f"Output label leaving state {state} contains epsilon"
olabel = arc.olabel
for state in range(1, fst.num_states() - 1):
arc = fst.arcs(state).value()
assert (
arc.ilabel
), f"Input label leaving state {state} contains epsilon"
# A non-epsilon olabel signals a new chunk.
if arc.olabel:
alignment.append((ilabels.decode("utf8"), chr(olabel)))
ilabels.clear()
olabel = arc.olabel
ilabels.append(arc.ilabel)
assert (
ilabels
), f"Input label leaving penultimate state {state} contains epsilon"
alignment.append((ilabels.decode("utf8"), chr(olabel)))
return alignment
def scan_verse(
normalize_rule: pynini.Fst,
pronounce_rule: pynini.Fst,
variable_rule: pynini.Fst,
syllable_rule: pynini.Fst,
weight_rule: pynini.Fst,
hexameter_rule: pynini.Fst,
text: str,
number: int = 0,
) -> scansion_pb2.Verse:
"""Scans a single verse of poetry.
Args:
normalize_rule: the normalization rule.
pronounce_rule: the pronunciation rule.
variable_rule: the rule for introducing pronunciation variation.
syllable_rule: the syllabification rule.
weight_rule: the weight rule.
hexameter_rule: the hexameter rule.
text: the input text.
number: an optional verse number (defaulting to -1).
Returns:
A populated Verse message.
"""
verse = scansion_pb2.Verse(number=number, text=text)
try:
verse.norm = rewrite.top_rewrite(
# We need escapes for normalization since Pharr uses [ and ].
pynini.escape(verse.text),
normalize_rule,
)
except rewrite.Error:
logging.error("Rewrite failure (verse %d)", verse.number)
return verse
try:
verse.raw_pron = rewrite.top_rewrite(verse.norm, pronounce_rule)
except rewrite.Error:
logging.error("Rewrite failure (verse %d)", verse.number)
return verse
var = verse.raw_pron @ variable_rule
syllable = pynini.project(var, "output") @ syllable_rule
weight = pynini.project(syllable, "output") @ weight_rule
foot = pynini.project(weight, "output") @ hexameter_rule
if foot.start() == pynini.NO_STATE_ID:
verse.defective = True
logging.warning(
"Defective verse (verse %d): %r", verse.number, verse.norm
)
return verse
# Works backwards to obtain intermediate structure.
foot = pynini.arcmap(pynini.shortestpath(foot), map_type="rmweight")
weight = pynini.shortestpath(weight @ pynini.project(foot, "input"))
syllable = pynini.shortestpath(syllable @ pynini.project(weight, "input"))
# Writes structure to message.
verse.var_pron = pynini.project(syllable, "input").string()
foot_chunks = _chunk(foot)
weight_chunks = _chunk(weight)
syllable_chunks = _chunk(syllable)
# These help us preserve the multi-alignment.
weight_chunk_idx = 0
syllable_chunk_idx = 0
for weight_codes, foot_code in foot_chunks:
# The foot type enum uses the ASCII decimals; see scansion.proto.
foot = verse.foot.add(type=ord(foot_code))
for weight_code in weight_codes:
syllable_codes, exp_weight_code = weight_chunks[weight_chunk_idx]
assert (
weight_code == exp_weight_code
), f"Weight code mismatch: {weight_code!r} != {exp_weight_code!r}"
weight_chunk_idx += 1
# Skips over whitespace between words.
if weight_code.isspace():
# We also advance one step in the syllable chunking or this
# will become misaligned.
syllable_chunk_idx += 1
continue
syllable = foot.syllable.add(weight=ord(weight_code))
for syllable_code in syllable_codes:
var_codes, exp_syllable_code = syllable_chunks[
syllable_chunk_idx
]
assert syllable_code == exp_syllable_code, (
"Syllable code mismatch: "
f"{syllable_code!r} != {exp_syllable_code!r}"
)
syllable_chunk_idx += 1
# Skips over whitespace between words.
if syllable_code.isspace():
continue
if syllable_code == "O":
syllable.onset = var_codes
elif syllable_code in ("-", "U"):
syllable.nucleus = var_codes
elif syllable_code == "C":
syllable.coda = var_codes
else:
raise AssertionError(
f"Unknown syllable code: {syllable_code}"
)
return verse
def scan_document(
normalize_rule: pynini.Fst,
pronounce_rule: pynini.Fst,
variable_rule: pynini.Fst,
syllable_rule: pynini.Fst,
weight_rule: pynini.Fst,
hexameter_rule: pynini.Fst,
verses: Iterable[str],
name: Optional[str] = None,
) -> scansion_pb2.Document:
"""Scans an entire document.
Args:
normalize_rule: the normalization rule.
pronounce_rule: the pronunciation rule.
variable_rule: the rule for introducing pronunciation variation.
meter_rule: the rule for constraining pronunciation variation to scan.
verses: an iterable of verses to scan.
name: optional metadata about the source.
Returns:
A populated Document message.
"""
document = scansion_pb2.Document(name=name)
# This binds the rule nmes ahead of time.
curried = functools.partial(
scan_verse,
normalize_rule,
pronounce_rule,
variable_rule,
syllable_rule,
weight_rule,
hexameter_rule,
)
scanned_verses = 0
defective_verses = 0
for number, verse in enumerate(verses, 1):
# TODO(kbg): the `append` method copies the message to avoid circular
# references. Would we improve performance using the `add` method and
# passing the empty message to be mutated?
scanned = curried(verse, number)
document.verse.append(scanned)
if scanned.defective:
defective_verses += 1
else:
scanned_verses += 1
logging.info("%d verses scanned", scanned_verses)
logging.info("%d verses defective", defective_verses)
return document
``` |
{
"source": "jillianchiam/Perry-Against-the-Coyote",
"score": 3
} |
#### File: jillianchiam/Perry-Against-the-Coyote/shooting.py
```python
import pygame, sys
import os
import math
import random
from pygame.locals import *
pygame.init()
width, height = 640, 480
screen = pygame.display.set_mode((width, height))
keys = [False, False, False, False, False]
playerpos = [250, 200]
acc=[0,0]
list_of_hats=[]
pygame.display.set_caption('Perry Against the Coyotes')
countdown=100
countdown1=0
coyotes=[[640,100]]
healthvalue=194
current_path = os.path.dirname(r'''C:\Users\jilli\AppData\Local\Programs\Python\Python36\shooting.py''')
resource_path = os.path.join(current_path, 'resources')
image_path = os.path.join(resource_path, 'images')
player = pygame.image.load(os.path.join(image_path, 'perry.png'))
background = pygame.image.load(os.path.join(image_path, 'background.png'))
sunflower = pygame.image.load(os.path.join(image_path, 'sunflower.png'))
hat = pygame.image.load(os.path.join(image_path, 'perryhat.png'))
coyoteimg1 = pygame.image.load(os.path.join(image_path, 'coyote.png'))
coyoteimg = coyoteimg1
healthbar = pygame.image.load(os.path.join(image_path, 'healthbar.png'))
health = pygame.image.load(os.path.join(image_path, 'health.png'))
gameover=pygame.image.load(os.path.join(image_path, 'gameover.png'))
youwin=pygame.image.load(os.path.join(image_path, 'youwin.png'))
"""
renders the game and characters
"""
running = 1
exitcode = 0
while running:
countdown-=1
screen.fill(0)
for x in range(width//background.get_width()+1): # range() can only work with integers, but dividing
#with the / operator always results in a float value
for y in range(height//background.get_height()+1):
screen.blit(background,(x*100,y*100))
screen.blit(sunflower,(0,30))
screen.blit(sunflower,(0,135))
screen.blit(sunflower,(0,240))
screen.blit(sunflower,(0,345 ))
position = pygame.mouse.get_pos()
angle = math.atan2(position[1]-(playerpos[1]+32), position[0]-(playerpos[0]+26))
playerrotates = pygame.transform.rotate(player, 360-angle*57.29)
playerpos1 = (playerpos[0]-playerrotates.get_rect().width/2, playerpos[1]-playerrotates.get_rect().height/2)
screen.blit(playerrotates, playerpos1)
for perryhat in list_of_hats:
index=0
velx = math.cos(perryhat[0])*10 #10 is the speed of the arrow
vely = math.sin(perryhat[0])*10
perryhat[1] = perryhat[1] + velx
perryhat[2] = perryhat[2] + vely
if perryhat[1] < -64 or perryhat[2] > 640 or perryhat[2] < -64 or perryhat[2] > 480:
list_of_hats.pop(index) #If no index is specified, a.pop() removes and
# returns the last item in the list.
index = index + 1
for projectile in list_of_hats:
list_of_hats1 = pygame.transform.rotate(hat, 360-projectile[0]*57.29) # multiply radians by approximately 57.29 or 360/2ฯ
screen.blit(list_of_hats1, (projectile[1], projectile[2]))
if countdown==0:
coyotes.append([640, random.randint(50,430)])
countdown=100-(countdown1*2)
if countdown1>=35:
countdown1=35
else:
countdown1+=5
index=0
for coyote in coyotes:
if coyote[0]<-64:
coyotes.pop(index)
coyote[0]-=7
coyoterect=pygame.Rect(coyoteimg.get_rect())
coyoterect.top=coyote[1]
coyoterect.left=coyote[0]
if coyoterect.left<64:
healthvalue -= random.randint(5,20)
coyotes.pop(index)
index1 = 0
for perryhat in list_of_hats: #rect here store rectangular coordinates
hatrect = pygame.Rect(hat.get_rect())
hatrect.left=perryhat[1]
hatrect.top=perryhat[2]
if coyoterect.colliderect(hatrect):
acc[0]+=1
coyotes.pop(index) # pop() removes and returns last object or obj from the list
list_of_hats.pop(index1)
index1 += 1
index+=1
for coyote in coyotes:
screen.blit(coyoteimg, coyote)
for coyote in coyotes:
screen.blit(coyoteimg, coyote)
font = pygame.font.Font(None, 22)
survivedtext = font.render(str((90000-pygame.time.get_ticks())/60000)+":"+str((90000-pygame.time.get_ticks())/1000%60).zfill(2), True, (0,0,0))
textRect = survivedtext.get_rect()
textRect.topright=[640,5]
screen.blit(survivedtext, textRect)
screen.blit(healthbar, (5,5))
for perryhealth in range(healthvalue):
screen.blit(health, (perryhealth+8, 8))
pygame.display.flip() # Update the full display Surface to the screen
for event in pygame.event.get(): #event is for actions made by user
#like pressing a key
if event.type == QUIT:
pygame.quit()
sys.exit()
pygame.display.update()
if event.type == pygame.KEYDOWN:
left = -10
right = 10
up = 10
down = -10
if event.key==K_w or event.key == pygame.K_UP:
keys[0]=True
elif event.key==K_s or event.key == pygame.K_LEFT:
keys[1]=True
elif event.key==K_a or event.key == pygame.K_DOWN:
keys[2]=True
elif event.key==K_d or event.key == pygame.K_RIGHT:
keys[3]=True
if event.type == pygame.KEYUP:
if event.key==pygame.K_w or event.key == pygame.K_UP:
keys[0]=False
elif event.key==K_a or event.key == pygame.K_LEFT:
keys[1]=False
elif event.key==K_s or event.key == pygame.K_DOWN:
keys[2]=False
elif event.key==pygame.K_d or event.key == pygame.K_RIGHT:
keys[3]=False
if event.type==pygame.MOUSEBUTTONDOWN:
position=pygame.mouse.get_pos()
acc[1]+=1
list_of_hats.append([math.atan2(position[1]-(playerpos1[1]+32),
position[0]-(playerpos1[0]+26)),
playerpos1[0]+32,
playerpos1[1]+32])
if keys[0]:
playerpos[1]= playerpos[1] - 10
elif keys[1]:
playerpos[1]= playerpos[1] + 10
elif keys[2]:
playerpos[0] = playerpos[0] - 10
elif keys[3]:
playerpos[0] = playerpos[0] + 10
if pygame.time.get_ticks()>=90000:
running=0
exitcode=1
if healthvalue <= 0:
running=0
exitcode=0
if acc[1]!=0:
accuracy=acc[0]*1.0/acc[1]*100
else:
accuracy=0
def initialize_gameover_font():
pygame.font.init()
font = pygame.font.Font(None, 24)
def produce_text_on_screen():
textRect = text.get_rect()
textRect.centerx = screen.get_rect().centerx
textRect.centery = screen.get_rect().centery+24
result = gameover if exitcode == 0 else youwin
screen.blit(result, (0,0))
screen.blit(text, textRect)
if exitcode==0:
initialize_gameover_font()
text = font.render("Accuracy: "+str(accuracy)+"%", True, (255, 0, 0))
produce_text_on_screen()
else:
initialize_gameover_font()
text = font.render("Accuracy: "+str(accuracy)+"%", True, (0, 255, 0))
produce_text_on_screen()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit(0)
pygame.display.flip()
``` |
{
"source": "jillianderson8/cmpt733finalproject",
"score": 3
} |
#### File: 1_import/src/shp2geojson.py
```python
import json
import os
import argparse
import geopandas as gpd
def shp2geojson(inFile, outFile):
trees = gpd.read_file(inFile)
try:
os.remove(outFile)
except OSError:
pass
trees.to_file(outFile, driver='GeoJSON')
parser = argparse.ArgumentParser(description='Takes in a shapefile and writes a corresponding GeoJSON file.')
parser.add_argument('input_path', help='Path to the directory containing the shapefile to convert.')
parser.add_argument('output_path', help='File path of the resulting GeoJSON file.')
args = parser.parse_args()
shp2geojson(args.input_path, args.output_path)
```
#### File: 3_tileAnnotate/src/tileAnnotate.py
```python
from PIL import Image
from PIL import ImageFile
import math
import argparse
import json
def annotate(mapping, allxycw, i, j, tile_width, tile_height, result_width, result_height, outfilename):
"""
Write out labels in the format required by darknet from the tile with the
properties provided
"""
# Filter trees
xycwh = [(x,y,c,w,w) for x,y,c,w in allxycw if (x >= (j*tile_width)) &
(x <= j*tile_width+result_width) &
(y >= i*tile_height) &
(y <= i*tile_height+result_height)]
x2ratio = lambda x: (x-j*tile_width)/result_width
y2ratio = lambda y: (y-i*tile_height)/result_height
w2ratio = lambda w: w/result_width
h2ratio = lambda h: h/result_height
toprint = [str(mapping[c]) + ' ' + # Class
str(x2ratio(x)) + ' ' + # Box Center x
str(y2ratio(y)) + ' ' + # Box Center y
str(w2ratio(w)) + ' ' + # Box width
str(h2ratio(h)) # Box height
for x,y,c,w,h in xycwh]
try:
# Save Annotation
with open("{filename}.{i}.{j}.txt".format(i=i, j=j, filename=outfilename), "w") as f:
for line in toprint:
f.write(line+'\n')
except Exception as e:
print(e)
def tile(image, img_width, img_height, tile_height, tile_width, outfilename, outextension, mapping, on_tile_saved, allxycw):
"""
Divide the given image into tiles
"""
# Tile Images
num_tiles_vert = math.ceil(img_height/tile_height)
num_tiles_horiz= math.ceil(img_width/tile_width)
for i in range(0, num_tiles_vert):
result_height = min(img_height - i*tile_height, tile_height)
for j in range(0, num_tiles_horiz):
result_width = min(img_width - j*tile_width, tile_width)
box = (j*tile_width, i*tile_height, j*tile_width+result_width, i*tile_height+result_height)
tile = image.crop(box)
try:
# Save Tile Image
tile.convert('RGB').save('{filename}.{i}.{j}.{extension}'.format(i=i, j=j, filename=outfilename, extension=outextension), 'JPEG')
except Exception as e:
print(e)
if on_tile_saved:
# on_tile_saved(mapping, allxycw, i, j, tile_width, tile_height, result_width, result_height, outfilename)
annotate(mapping, allxycw, i, j, tile_width, tile_height, result_width, result_height, outfilename)
def tileAnnotate(image_in, json_in, outfilename, outextension, tile_height, tile_width):
# Load Image
Image.MAX_IMAGE_PIXELS = 500000000
image = Image.open(image_in)
img_width, img_height = image.size
mapping = None
allxycw = None
if json_in:
# Load GeoJSON data
trees = json.load(open(json_in))
allxycw = [(x['properties']['box_x'],
x['properties']['box_y'],
x['properties']['class'],
80)
for x in trees['features']]
# Create Class Mapping
classes = sorted(set([c for x,y,c,w in allxycw]))
mapping = dict(zip(classes, list(range(0,len(classes)))))
with open(outfilename + '_classes.json', 'w+') as fp:
json.dump(mapping, fp)
tile(image, img_width, img_height, tile_height, tile_width, outfilename, outextension, mapping, annotate if mapping else None, allxycw)
if __name__ == '__main__':
# Parser
parser = argparse.ArgumentParser(description='Takes an image file as input and writes tiles that together match the original image.')
parser.add_argument('image_in', help='File path of the image to split into tiles')
parser.add_argument('json_in', help='File path to the JSON containing objects to assign to tiles.')
parser.add_argument('output_path_prefix', help='Path and file prefix of the resulting tiles (tile numbers are filled in to complete the resulting file names)')
parser.add_argument('output_file_extension', help='File suffix of the resulting tiles (appended to output_path_prefix and tile numbers)')
parser.add_argument('tile_height', type=int, help='Height in pixels of the resulting tiles')
parser.add_argument('tile_width', type=int, help='Width in pixels of the resulting tiles')
args = parser.parse_args()
tileAnnotate(args.image_in, args.json_in, args.output_path_prefix, args.output_file_extension, args.tile_height, args.tile_width)
```
#### File: webApp/src/treecount.py
```python
import argparse
import os
import sys
from tileAnnotate import tileAnnotate
def counttrees(classes, temp, darknetpath, imagefile, cfgfile, weightsfile, outputfile, errorfile):
tileAnnotate(imagefile, None, temp + "/tile", "jpg", 448, 448)
tiles = os.listdir(temp)
with open(temp + "/valid.txt", "w") as valid:
for tile in tiles:
valid.write(tile + os.linesep)
with open(temp + "/names.txt", "w") as names:
for tree in classes:
names.write(tree + os.linesep)
results = temp + "/results"
with open(temp + "/counttrees.data", "w") as f:
f.write("classes={}{}".format(len(classes), os.linesep))
f.write("valid_dir={}/{}".format(temp, os.linesep))
f.write("valid={}/valid.txt{}".format(temp, os.linesep))
f.write("names={}/names.txt{}".format(temp, os.linesep))
f.write("results={}/{}".format(results, os.linesep))
os.mkdir(results)
command = "{darknetpath}/darknet detector valid {temp}/counttrees.data {cfgfile} {weightsfile} -out {results}".format(
darknetpath=darknetpath, temp=temp, cfgfile=cfgfile, weightsfile=weightsfile, results="_")
if errorfile:
command += " 2> " + errorfile
if outputfile:
command += " > " + outputfile
code = os.system(command)
if code:
raise Exception("An error occured during the prediction proccess. Error code: " + str(int(code / 256)));
if __name__ == "__main__":
parser = argparse.ArgumentParser(prog="treecount")
parser.add_argument("--darknetpath", required=True)
parser.add_argument("--imagefile", required=True)
parser.add_argument("--cfgfile", required=True)
parser.add_argument("--weightsfile", required=True)
parser.add_argument("--confidence", required=False, type=float, default=.5)
args = parser.parse_args()
results = counttrees([ "coconut", "banana", "mango", "papaya" ], args.darknetpath, args.imagefile, args.cfgfile, args.weightsfile)
```
#### File: webApp/src/visual.py
```python
import os
from PIL import Image, ImageDraw, ImageFont
from collections import defaultdict
def untile(classes, temp, width, height, threshold):
predictions = []
for cl in classes:
f = '{temp}/results/_{f}.txt'.format(temp=temp, f=cl)
if os.path.isfile(f):
predictions += [[cl] + line.strip().split(' ') for line in open(f)]
untiled_predictions = []
result = Image.new("RGB", (width, height))
draw = ImageDraw.Draw(result)
dull = defaultdict(lambda : (196,196,196))
bright = defaultdict(lambda : (255,255,255))
dull['banana'] = (196,196,0)
bright['banana'] = (255,255,0)
i = 0
y = 0
while os.path.isfile('{temp}/tile.{i}.0.jpg'.format(temp=temp, i=i)):
j = 0
x = 0
while os.path.isfile('{temp}/tile.{i}.{j}.jpg'.format(temp=temp, i=i, j=j)):
tile = Image.open('{temp}/tile.{i}.{j}.jpg'.format(temp=temp, i=i, j=j))
dx, dy = tile.size
result.paste(tile, (x, y, x + dx, y + dy))
for line in ((float(p[3]), float(p[4]), float(p[5]), float(p[6]), p[0], float(p[2])) for p in predictions if p[1] == 'tile.{i}.{j}.jpg'.format(i=i, j=j) and float(p[2]) >= threshold):
untiled_predictions.append({ 'tree' : line[4], 'confidence' : line[5], 'box' : (x+line[0], y+line[1], x+line[2], y+line[3]) })
label = "{confidence:.0%} {cl}".format(cl=line[4], confidence=float(line[5]))
draw.line((x+line[0], y+line[1], x+line[0], y+line[3]), fill=dull[line[4]], width=1)
draw.line((x+line[0], y+line[1], x+line[2], y+line[1]), fill=bright[line[4]], width=2)
draw.line((x+line[0], y+line[3], x+line[2], y+line[3]), fill=dull[line[4]], width=1)
draw.line((x+line[2], y+line[1], x+line[2], y+line[3]), fill=dull[line[4]], width=1)
draw.text((x+line[0] + 3, y+line[1]), label, fill=bright[line[4]])
j += 1
x += dx
i += 1
y += dy
return untiled_predictions
``` |
{
"source": "jillianhenderson/the-angry-user-experiment",
"score": 3
} |
#### File: jillianhenderson/the-angry-user-experiment/BiometricFeatures.py
```python
import pandas as pd
import seaborn as sns
import time as clk
import calendar as cal
import numpy as np
import os
import re
import traceback as trace
from datetime import datetime,timedelta
import math as mt
import matplotlib.pyplot as plt
import numpy as np
def validateData(aColumn):
if len(aColumn) > 0:
valid = True
else:
valid = False
return valid
def print_full(x):
pd.set_option('display.max_rows', len(x))
print x
pd.reset_option('display.max_rows')
def rollingMean(df,freq):
## Function based on python reply from stackoverflow (user2689410's originally posted Aug. 27, 2013)
def f(x):
#dslice = col[x-pd.datetools.to_offset(freq).delta/2+timedelta(0,0,1):
# x+pd.datetools.to_offset(freq).delta/2]
dslice = col[x-pd.datetools.to_offset(freq).delta+timedelta(0,0,1):x]
return dslice.mean()
data = df.copy()
dfRS = pd.DataFrame()
idx = pd.Series(data.index.to_pydatetime(), index=data.index)
for colname, col in data.iteritems():
rollingMean = idx.apply(f)
rollingMean.name = "Rolling Mean"
dfRS = dfRS.join(rollingMean,how='outer')
return dfRS
def rollingCount(df,freq):
## Function based on python reply from stackoverflow (user2689410's originally posted Aug. 27, 2013)
def f(x):
#dslice = col[x-pd.datetools.to_offset(freq).delta/2+timedelta(0,0,1):
# x+pd.datetools.to_offset(freq).delta/2]
dslice = col[x-pd.datetools.to_offset(freq).delta+timedelta(0,0,1):x]
return dslice.count()
data = df.copy()
dfRS = pd.DataFrame()
idx = pd.Series(data.index.to_pydatetime(), index=data.index)
for colname, col in data.iteritems():
rollingCount = idx.apply(f)
rollingCount.name = "Rolling Count"
dfRS = dfRS.join(rollingCount,how='outer')
return dfRS
##-----------------------------------------
## KEYBOARD DYNAMICS
##-----------------------------------------
def getRidOfDoubleDown(df):
dfKey = df.copy()
doubleDown = []
lastrow = 1
for row in dfKey.iterrows():
try:
## Keep the first instance the key is pressed down and the first instance the key is Up.
if (row[1]["pressed"] == 0 and lastrow==0) or (row[1]["pressed"] == 1 and lastrow==1):
doubleDown.append(row[0])
lastrow = row[1]["pressed"]
except:
lastrow = row[1]["pressed"]
#print row
trace.print_exc()
for i in doubleDown:
dfKey = dfKey.drop(i)
labelN = dfKey.index[-1]
if dfKey["pressed"].loc[labelN] == 0:
#print ' Key ending in Down[0] position ==> drop it.'
dfKey = dfKey.drop(labelN)
## COUNT: ##
dfCounts = dfKey.groupby(["pressed"]).count()
if dfCounts.loc[0]["time"]==dfCounts.loc[1]["time"]:
#print ' ',dfCounts.loc[0]["time"],dfCounts.loc[1]["time"]
#print ' *** SUCCESS ****'
return dfKey
else:
#print ' ',dfCounts.loc[0]["time"],dfCounts.loc[1]["time"]
#print ' *** Could not get rid of Double Down! ***'
dfKey = getRidOfDoubleDown(dfKey)
return dfKey
def getKeyDuration(df):
countDF = df.groupby(["keycode","pressed"]).count()
codes = sorted(df.groupby("keycode").groups.keys())
pieces = []
## for each keycode, check double-down and calculate key duration:
for code in codes:
rule = df["keycode"]==code
if not countDF.loc[code,0]["time"]==countDF.loc[code,1]["time"]:
#print "Counts not equal! keycode: ",code," counts: ",countDF.loc[code,0]["time"]," ",countDF.loc[code,1]["time"]
## If first pressed value == 1, eliminate it. Then Get rid of double down for that key code ##
dff = getRidOfDoubleDown(df[rule])
else:
dff = df[rule]
ruleDown = dff['pressed']==0
ruleUp = dff['pressed']==1
dfDown = dff[ruleDown][["time"]]
dfUp = dff[ruleUp][["time","key type","key","key colour"]]
downtime = [float(int(dfUp["time"].loc[i])-int(dfDown["time"].loc[j]))/1000.0 for i,j in zip(dfUp.index,dfDown.index)]
dateclock = [pd.to_datetime(c,unit='ms') for c in dfUp["time"]]
pieces.append(pd.DataFrame({
"duration": pd.Series(downtime,index=dateclock),
"keycode": pd.Series([code for d in dateclock],index=dateclock),
"key type": pd.Series([t for t in dfUp["key type"]],index=dateclock),
"key": pd.Series([k for k in dfUp["key"]],index=dateclock),
"key colour": pd.Series([k for k in dfUp["key colour"]],index=dateclock)
}))
durationDF = pd.concat(pieces)
durationDF.index = pd.DatetimeIndex(durationDF.index)
durationDF.index.names = ["DateTime"]
return durationDF
def getKeyLatency(df):
countDF = df.groupby(["keycode","pressed"]).count()
codes = sorted(df.groupby("keycode").groups.keys())
pieces = []
## for each keycode, check double-down and calculate key duration:
for code in codes:
rule = df["keycode"]==code
if not countDF.loc[code,0]["time"]==countDF.loc[code,1]["time"]:
#print "Counts not equal! keycode: ",code," counts: ",countDF.loc[code,0]["time"]," ",countDF.loc[code,1]["time"]
## If first pressed value == 1, eliminate it. Then Get rid of double down for that key code ##
dff = getRidOfDoubleDown(df[rule])
else:
dff = df[rule]
pieces.append(dff)
latencyDF = pd.concat(pieces)
latencyDF = latencyDF.sort()
tdeltas = np.diff(latencyDF.index.values)
tdeltas = np.insert(tdeltas,0,np.timedelta64(0,'ns'))
latencyDF['latency'] = pd.Series([td/np.timedelta64(1,'s') for td in tdeltas],index=latencyDF.index)
latencyDF.index = pd.DatetimeIndex(latencyDF.index)
latencyDF.index.names = ["DateTime"]
return latencyDF
def getMouseDynamics(data):
ddx,ddy,ddt = 1,1,1 #minimal pixel movements
indices = [c for c in data.index]
xPosition = [d for d in data['x']]
yPosition = [d for d in data['y']]
mouseClock = [d for d in data['time']]
## CHECK FOR MOUSE SPEED FATER THAN RECORDED:
dx =[(xPosition[i]-xPosition[i-1]) if i>0 else 0 for i in range(len(mouseClock))]
dy = [(yPosition[i]-yPosition[i-1]) if i>0 else 0 for i in range(len(mouseClock))]
ds = [np.sqrt(float(dx[i])**2 + float(dy[i])**2) for i in range(len(mouseClock))]
dt = [(mouseClock[i]-mouseClock[i-1]) if i>0 else 0 for i in range(len(mouseClock))]
xPos,yPos,clock,dateclock = [],[],[],[]
for x,y,s,delt,t,d in zip(xPosition,yPosition,ds,dt,mouseClock,indices):
if delt==0 and s>0:
continue
#print 'Skip row: ',x,y,s,t,d
else:
xPos.append(x)
yPos.append(y)
clock.append(t)
dateclock.append(d)
## COMPUTE DYNAMICS:
N = len(dateclock)
if not N==len(data): print "Eliminated ",len(data)-N," data points where the speed of the mouse was faster than the key-capture program could record."
dx = [float(xPos[i]-xPos[i-1]) if not i==0 else 0.0 for i in range(N)]
dy = [float(yPos[i]-yPos[i-1]) if not i==0 else 0.0 for i in range(N)]
ds = [np.sqrt(x**2 + y**2) for x,y in zip(dx,dy)]
dt = [float(clock[i]-clock[i-1]) if not i==0 else 0.0 for i in range(N)]
theta = [np.arctan(y/x) if x > 0 else 0.0 for x,y in zip(dx,dy)]
dtheta = [(theta[i]-theta[i-1]) if i>0 else 0.0 for i in range(N)]
curve = [th/s if s > 0 else 0.0 for th,s in zip(dtheta,ds)]
dcurve = [(curve[i]-curve[i-1]) if i>0 else 0.0 for i in range(N)]
vx = [x/t if t > 0 else 0.0 for x,t in zip(dx,dt)]
vy = [y/t if t > 0 else 0.0 for y,t in zip(dy,dt)]
vel = [np.sqrt(x**2 + y**2) for x,y in zip(vx,vy)]
vdot = [(vel[i] - vel[i-1])/dt[i] if (i > 1 and dt[i] > 0) else 0.0 for i in range(N)]
jerk = [(vdot[i] - vdot[i-1])/dt[i] if (i > 1 and dt[i] > 0) else 0.0 for i in range(N)]
omega = [th/t if t > 0 else 0.0 for th,t in zip(dtheta,dt)]
## Add data to mouse dynamics pandas dataframe:
mdDict = {
"x" : pd.Series(xPos,index=dateclock),
"y" : pd.Series(yPos,index=dateclock),
"dx" : pd.Series(dx,index=dateclock),
"dy" : pd.Series(dy,index=dateclock),
"ds" : pd.Series(ds,index=dateclock),
"dt" : pd.Series(dt,index=dateclock),
"theta" : pd.Series(theta,index=dateclock),
"dtheta" : pd.Series(dtheta,index=dateclock),
"curve" : pd.Series(curve,index=dateclock),
"dcurve" : pd.Series(dcurve,index=dateclock),
"vx" : pd.Series(vx,index=dateclock),
"vy" : pd.Series(vy,index=dateclock),
"v" : pd.Series(vel,index=dateclock),
"a" : pd.Series(vdot,index=dateclock),
"jerk" : pd.Series(jerk,index=dateclock),
"w" : pd.Series(omega,index=dateclock)
}
mdDF = pd.DataFrame(mdDict)
mdDF.index = pd.DatetimeIndex(mdDF.index)
mdDF.index.names = ["DateTime"]
return mdDF
## PRINTING FUNCTIONS ##
def printKeyTypeDNA(keyGroups,keydataDF,outputFile="",printIt=False):
sns.set_style("white")
labelDict = {'fontsize': 16, 'weight' : 'roman'}
fig,ax = plt.subplots(figsize=(20,5))
for g in keyGroups.groups:
colours = [c for c in keyGroups.get_group(g)['key colour']]
x = [i for i in keyGroups.get_group(g)['keycode'].index]
y = [k for k in keyGroups.get_group(g)['keycode']]
ax.scatter(x,y,s=30,marker='o',c=colours,linewidths=0,alpha=0.5,label=g)
box = ax.get_position()
ax.set_position([box.x0,box.y0,box.width*0.8,box.height])
ax.set_xlim(keydataDF.index[0],keydataDF.index[-1])
ax.legend(loc='center left',bbox_to_anchor=(1,0.5),fancybox=True)
ax.set_ylabel("keycode",fontdict=labelDict)
ax.set_xlabel("clock",fontdict=labelDict)
plt.show()
if printIt:
fig.savefig(outputFile,format='png',dpi=256)
plt.close(fig)
plt.clf()
return
def printKeyTypeDNAwithLabels(keyGroups,keydataDF,labelsDF,outputFile="",printIt=False):
sns.set_style("white")
labelDict = {'fontsize': 16, 'weight' : 'roman'}
fig,ax = plt.subplots(figsize=(18,10))
for g in keyGroups.groups:
colours = [c for c in keyGroups.get_group(g)['key colour']]
x = [i for i in keyGroups.get_group(g)['keycode'].index]
y = [k for k in keyGroups.get_group(g)['keycode']]
#ax.scatter(x,y,s=100,marker='|',c=colours,linewidths=1,alpha=0.8,label=g)
ax.scatter(x,y,s=30,marker='o',c=colours,linewidths=0,alpha=0.5,label=g)
colours = sns.color_palette("GnBu_d",len(labelsDF))
for n,(d,l) in enumerate(zip(labelsDF.index,labelsDF['label'])):
ax.plot([d,d],[0,225],color=colours[n],linewidth=3,alpha=0.5,label=l)
box = ax.get_position()
ax.set_position([box.x0,box.y0+box.height*0.7,box.width,box.height*0.3])
ax.set_xlim(keydataDF.index[0],keydataDF.index[-1])
ax.legend(loc='upper center',bbox_to_anchor=(0.5,-0.4))
ax.set_ylabel("keycode",fontdict=labelDict)
ax.set_xlabel("clock",fontdict=labelDict)
plt.show()
if printIt:
fig.savefig(outputFile,format='png',dpi=256)
plt.close(fig)
plt.clf()
return
def printKeyTypeDNAwithActiveRanges(keyGroups,keydataDF,activeRanges,outputFile="",printIt=False):
sns.set_style("white")
labelDict = {'fontsize': 16, 'weight' : 'roman'}
fig,ax = plt.subplots(figsize=(18,5))
for g in keyGroups.groups:
colours = [c for c in keyGroups.get_group(g)['key colour']]
x = [i for i in keyGroups.get_group(g)['keycode'].index]
y = [k for k in keyGroups.get_group(g)['keycode']]
ax.scatter(x,y,s=30,marker='o',c=colours,linewidths=0,alpha=0.5,label=g)
for (x1,x2) in activeRanges:
ax.fill_betweenx(y=[0,255],x1=[x1,x1],x2=[x2,x2],color="skyblue",alpha=0.1)
ax.legend()
ax.set_xlim(keydataDF.index[0],keydataDF.index[-1])
ax.set_ylabel("keycode",fontdict=labelDict)
ax.set_xlabel("clock",fontdict=labelDict)
plt.show()
if printIt:
fig.savefig(outputFile,format='png',dpi=256)
plt.close(fig)
plt.clf()
return
def printKeyLatency(latencyDF,labelsDF,activeRanges,outputFile="",printIt=False):
rule = latencyDF["pressed"]==0
labelDict = {'fontsize': 16, 'weight' : 'roman'}
sns.set_style("white")
fig,(ax2,ax1,ax) = plt.subplots(nrows=3,ncols=1,figsize=(18,15),sharex=True)
## Plot key latency ##
xdata = latencyDF[rule].index
ydata = latencyDF[rule]["latency"]
colours = [c for c in latencyDF[rule]['key colour']]
ax.scatter(xdata,ydata,s=20,c=colours,linewidths=0,alpha=0.8)
ax.set_xlim(latencyDF.index[0],latencyDF.index[-1])
ax.plot([latencyDF.index[0],latencyDF.index[-1]],[latencyDF["latency"].quantile(0.01),latencyDF["latency"].quantile(0.01)],color='k',
linewidth=2,linestyle="--",alpha=0.5,label="1percent of data below this line")
colours = sns.color_palette("GnBu_d",len(labelsDF))
for n,(d,l) in enumerate(zip(labelsDF.index,labelsDF['label'])):
ax.plot([d,d],[0,3],color=colours[n],linewidth=3,alpha=0.5,label=l)
ax.text(d-timedelta(seconds=1),0.15,str(n),fontsize=12,color='grey',weight="bold",horizontalalignment='right',verticalalignment='bottom')
ax.set_ylabel("key latency (s)",fontdict=labelDict)
box = ax.get_position()
ax.set_position([box.x0,box.y0+box.height*0.3,box.width,box.height*0.7])
ax.legend(loc='upper center',bbox_to_anchor=(0.5,-0.3))
ax.set_ylim(-0.01,1.0)
## HIGHLIGHT CALM TYPING ##
x1,x2 = labelsDF.iloc[1]["Key Data Ranges"]
ax.fill_betweenx(y=[0,255],x1=[x1,x1],x2=[x2,x2],color="skyblue",alpha=0.2)
ax1.fill_betweenx(y=[0,255],x1=[x1,x1],x2=[x2,x2],color="skyblue",alpha=0.2)
ax2.fill_betweenx(y=[0,255],x1=[x1,x1],x2=[x2,x2],color="skyblue",alpha=0.2)
## HIGHLIGHT RAPID TYPING UNDER PRESSURE##
x1,x2 = labelsDF.iloc[13]["Key Data Ranges"]
ax.fill_betweenx(y=[0,255],x1=[x1,x1],x2=[x2,x2],color="skyblue",alpha=0.2)
ax1.fill_betweenx(y=[0,255],x1=[x1,x1],x2=[x2,x2],color="skyblue",alpha=0.2)
ax2.fill_betweenx(y=[0,255],x1=[x1,x1],x2=[x2,x2],color="skyblue",alpha=0.2)
## Active Ranges ##
for (x1,x2) in activeRanges:
## Rolling mean:
freq = "1S"
roll = rollingMean(latencyDF[rule][x1+timedelta(seconds=1):x2][["latency"]],freq)
roll=roll.fillna(0.0)
rollY = [y for y in roll["Rolling Mean"]]
xdata = [i for i in roll.index]
ax1.plot(xdata,rollY,color='r',linewidth=3,alpha=0.2)
## Rolling count:
count = rollingCount(latencyDF[rule][x1+timedelta(seconds=1):x2][["latency"]],freq)
count=count.fillna(0.0)
countY = [y for y in count["Rolling Count"]]
xdata = [i for i in count.index]
ax2.plot(xdata,countY,color='g',linewidth=2,alpha=0.2)
ax1.text(xdata[-2],8.,"rolling 1-second MEAN",fontsize=12,color='grey',weight="bold",horizontalalignment='right',verticalalignment='bottom')
ax1.set_ylim(0,10)
ax2.text(xdata[-2],17.,"rolling 1-second COUNT",fontsize=12,color='grey',weight="bold",horizontalalignment='right',verticalalignment='bottom')
ax2.set_ylim(0,20)
plt.show()
if printIt:
fig.savefig(outputFile,format='png',dpi=256)
plt.close(fig)
plt.clf()
return
def printKeyDuration(durationDF,labelsDF,activeRanges,outputFile="",printIt=False):
keyGroups = durationDF.groupby('key type')
labelDict = {'fontsize': 16, 'weight' : 'roman'}
sns.set_style("white")
fig,(ax2,ax1,ax) = plt.subplots(nrows=3,ncols=1,figsize=(18,15),sharex=True)
for g in keyGroups.groups:
colours = [c for c in keyGroups.get_group(g)['key colour']]
x = [i for i in keyGroups.get_group(g)['duration'].index]
y = [k for k in keyGroups.get_group(g)['duration']]
ax.scatter(x,y,s=30,marker='o',c=colours,linewidths=0,alpha=0.5,label=g)
ax.plot([durationDF.index[0],durationDF.index[-1]],[durationDF["duration"].quantile(0.01),durationDF["duration"].quantile(0.01)],
color='k',linewidth=2,linestyle="--",alpha=0.5,label="1percent of data below this line")
colours = sns.color_palette("GnBu_d",len(labelsDF))
for n,(d,l) in enumerate(zip(labelsDF.index,labelsDF['label'])):
ax.plot([d,d],[0,3],color=colours[n],linewidth=3,alpha=0.5,label=l)
ax.text(d-timedelta(seconds=1),0.15,str(n),fontsize=12,color='grey',weight="bold",horizontalalignment='right',verticalalignment='bottom')
box = ax.get_position()
ax.set_position([box.x0,box.y0+box.height*0.3,box.width,box.height*0.7])
ax.legend(loc='upper center',bbox_to_anchor=(0.5,-0.3))
ax.set_xlim(durationDF.index[0],durationDF.index[-1])
ax.set_ylim(0,0.2)
ax.set_ylabel("key duration (s)",fontdict=labelDict)
## HIGHLIGHT CALM TYPING ##
x1,x2 = labelsDF.iloc[1]["Key Data Ranges"]
ax.fill_betweenx(y=[0,255],x1=[x1,x1],x2=[x2,x2],color="skyblue",alpha=0.2)
ax1.fill_betweenx(y=[0,255],x1=[x1,x1],x2=[x2,x2],color="skyblue",alpha=0.2)
ax2.fill_betweenx(y=[0,255],x1=[x1,x1],x2=[x2,x2],color="skyblue",alpha=0.2)
## HIGHLIGHT RAPID TYPING UNDER PRESSURE##
x1,x2 = labelsDF.iloc[13]["Key Data Ranges"]
ax.fill_betweenx(y=[0,255],x1=[x1,x1],x2=[x2,x2],color="skyblue",alpha=0.2)
ax1.fill_betweenx(y=[0,255],x1=[x1,x1],x2=[x2,x2],color="skyblue",alpha=0.2)
ax2.fill_betweenx(y=[0,255],x1=[x1,x1],x2=[x2,x2],color="skyblue",alpha=0.2)
## HIGHLIGHT KEYBOARD SMASH##
x1,x2 = labelsDF.iloc[10]["Key Data Ranges"]
ax.fill_betweenx(y=[0,255],x1=[x1,x1],x2=[x2,x2],color="grey",alpha=0.2)
ax1.fill_betweenx(y=[0,255],x1=[x1,x1],x2=[x2,x2],color="grey",alpha=0.2)
ax2.fill_betweenx(y=[0,255],x1=[x1,x1],x2=[x2,x2],color="grey",alpha=0.2)
x1,x2 = labelsDF.iloc[11]["Key Data Ranges"]
ax.fill_betweenx(y=[0,255],x1=[x1,x1],x2=[x2,x2],color="grey",alpha=0.2)
ax1.fill_betweenx(y=[0,255],x1=[x1,x1],x2=[x2,x2],color="grey",alpha=0.2)
ax2.fill_betweenx(y=[0,255],x1=[x1,x1],x2=[x2,x2],color="grey",alpha=0.2)
## Rolling mean:
freq = "1S"
roll = durationDF[["duration"]].resample(freq)
roll = roll.fillna(0.0)
rollY = [y for y in roll["duration"]]
xdata = [i for i in roll.index]
ax1.plot(xdata,rollY,color='r',linewidth=3,alpha=0.2)
## Rolling count:
count = durationDF[["duration"]].resample(freq,how="count")
count = count.fillna(0.0)
countY = [y for y in count["duration"]]
ax2.plot(xdata,countY,color='g',linewidth=3,alpha=0.2)
ax1.text(durationDF.index[-2],2.5,"1-second resample MEAN",fontsize=12,color='grey',weight="bold",horizontalalignment='right',verticalalignment='bottom')
ax1.set_ylim(0,3)
ax1.set_xlim(durationDF.index[0],durationDF.index[-1])
ax2.text(durationDF.index[-2],12.,"1-second resample COUNT",fontsize=12,color='grey',weight="bold",horizontalalignment='right',verticalalignment='bottom')
ax2.set_ylim(0,15)
ax2.set_xlim(durationDF.index[0],durationDF.index[-1])
plt.show()
if printIt:
fig.savefig(outputFile,format='png',dpi=256)
plt.close(fig)
plt.clf()
return
def printmouseVelocity(mousedataDF,keydataDF,vDF,labelsDF,outputFile="",printIt=False):
sns.set_style("white")
labelDict = {'fontsize': 16, 'weight' : 'roman'}
td = timedelta(seconds=30)
N = len(labelsDF)
fig,axes = plt.subplots(nrows=N,ncols=2,figsize=(18,N*5))
axes=fig.axes
axi=0
ranges = [(labelsDF.index[i-1],labelsDF.index[i]) if i > 0 else (mousedataDF.index[0],labelsDF.index[i]) for i in range(len(labelsDF))]
for r,l in zip(ranges,labelsDF['label']):
# PLOT MOUSE POSITION AND CLICK ##
mdata = mousedataDF[r[0]:r[1]]
colours = sns.color_palette('cubehelix',len(mdata))
ax=axes[axi]
xdata = [x for x in mdata['x']]
ydata = [-y for y in mdata['y']]
ax.scatter(xdata,ydata,s=20,c=colours,alpha=0.8,linewidth=0)
# MOUSE CLICK INFO ##
kdata = keydataDF[r[0]:r[1]]
left = kdata['key']=='left click'
right = kdata['key']=='right click'
for i in kdata[left].index:
if len(mdata[:i])>0:
x = mdata[:i]['x'][-1]
y = mdata[:i]['y'][-1]
ax.scatter(x,-y,s=40,c='w',alpha=0.8,marker="D",linewidth=1)
for i in kdata[right].index:
if len(mdata[:i])>0:
x = mdata[:i]['x'][-1]
y = mdata[:i]['y'][-1]
ax.scatter(x,-y,s=40,c='k',alpha=0.8,marker="D",linewidth=1)
# WRITE LABELS ##
x1,x2 = ax.get_xlim()
y1,y2 = ax.get_ylim()
ax.set_ylim(y1,y2*1.1)
ax.text(x1,y1+((y2-y1)*0.05),l[:65],fontsize=12,color='k',weight="bold",horizontalalignment='left',verticalalignment='bottom')
ax.text(x1,y1,l[80:],fontsize=12,color='k',weight="bold",horizontalalignment='left',verticalalignment='bottom')
axi+=1
# PLOT MOUSE VELOCITY ##
ax=axes[axi]
vdata = vDF[r[0]:r[1]]
xdata = [i for i in vdata.index]
ydata = [v for v in vdata['vnorm']]
ax.plot(xdata,ydata,color='k',linewidth=1,alpha=0.8)
#ax.set_yscale(u'log')
ax.set_ylabel("Mouse Velocity",fontdict=labelDict)
ax.set_xlabel("clock: 30-second span",fontdict=labelDict)
if len(xdata)>0:
x1 = xdata[0]
else:
x1 = r[0]
ax.set_xlim(x1,x1+td)
ax.set_ylim(0.0,0.5)
axi+=1
plt.show()
if printIt:
fig.savefig(outputFile,format='png',dpi=256)
plt.close(fig)
plt.clf()
return
``` |
{
"source": "jilljenn/edm2016",
"score": 2
} |
#### File: edm2016/rnn_prof/cli.py
```python
from __future__ import division
import click
import numpy as np
import pickle
from . import run_irt
from . import run_rnn
from .cliutils import (CommonOptionGroup, ensure_directory_callback, logging_callback,
valid_which_fold, require_value_callback)
from .data.assistments import SKILL_ID_KEY, PROBLEM_ID_KEY, TEMPLATE_ID_KEY
from .data.constants import USER_IDX_KEY, SINGLE
from .data.kddcup import KC_NAME_STARTS_WITH, PROBLEM_NAME, STEP_NAME
from .data.splitting_utils import split_data
from .data.wrapper import load_data, DataOpts
# Setup common options
common_options = CommonOptionGroup()
# System options
common_options.add('--log-level', '-l', type=click.Choice(['warn', 'info', 'debug']),
default='info', help="Set the logging level", extra_callback=logging_callback)
common_options.add('--seed', '-r', type=int, default=0,
help="Random number seed for data splitting and model initialization")
# Data options
common_options.add('--remove-skill-nans/--no-remove-skill-nans', is_flag=True, default=False,
help="Remove interactions from the data set whose skill_id column is NaN. "
"This will occur whether or not the item_id_col is skill_id")
common_options.add('--item-id-col', type=str, nargs=1,
help="(Required) Which column should be used for identifying items from the "
"dataset. Depends on source as to which names are valid.",
extra_callback=require_value_callback((SKILL_ID_KEY, PROBLEM_ID_KEY,
TEMPLATE_ID_KEY, SINGLE,
KC_NAME_STARTS_WITH, PROBLEM_NAME,
STEP_NAME)))
common_options.add('--drop-duplicates/--no-drop-duplicates', default=True,
help="Remove duplicate interactions: only the first row is retained for "
"duplicate row indices in Assistments")
common_options.add('--max-inter', '-m', type=int, default=0, help="Maximum interactions per user",
extra_callback=lambda ctx, param, value: value or None)
common_options.add('--min-inter', type=int, default=2,
help="Minimum number of interactions required after filtering to retain a user",
extra_callback=lambda ctx, param, value: value or None)
common_options.add('--proportion-students-retained', type=float, default=1.0,
help="Proportion of user ids to retain in data set (for testing sensitivity "
"to number of data points). Default is 1.0, i.e., all data retained.")
# Learning options
common_options.add('--num-folds', '-f', type=int, nargs=1, default=5,
help="Number of folds for testing.", is_eager=True)
common_options.add('--which-fold', type=int, nargs=1, default=None, extra_callback=valid_which_fold,
help="If you want to parallelize folds, run several processes with this "
"option set to run a single fold. Folds are numbered 1 to --num-folds.")
# Reporting options
common_options.add('--output', '-o', default='rnn_result',
help="Where to store the pickled output of training",
extra_callback=ensure_directory_callback)
@click.group(context_settings={'help_option_names': ['-h', '--help']})
def cli():
""" Collection of scripts for evaluating RNN proficiency models """
pass
@cli.command('rnn')
@click.argument('source')
@click.argument('data_file')
@click.option('--compress-dim', '-d', type=int, nargs=1, default=100,
help="The dimension to which to compress the input. If -1, will do no compression")
@click.option('--hidden-dim', '-h', type=int, nargs=1, default=100,
help="The number of hidden units in the RNN.")
@click.option('--output-compress-dim', '-od', type=int, nargs=1, default=None,
help="The dimension to which we should compress the output vector. "
"If not passed, no compression will occur.")
@click.option('--test-spacing', '-t', type=int, nargs=1, default=10,
help="How many iterations before running the tests?")
@click.option('--recurrent/--no-recurrent', default=True,
help="Whether to use a recurrent architecture")
@click.option('--use-correct/--no-use-correct', default=True,
help="If True, record correct and incorrect responses as different input dimensions")
@click.option('--num-iters', '-n', type=int, default=50,
help="How many iterations of training to perform on the RNN")
@click.option('--dropout-prob', '-p', type=float, default=0.0,
help="The probability of a node being dropped during training. Default is 0.0 "
"(i.e., no dropout)")
@click.option('--use-hints/--no-use-hints', default=False,
help="Should we add a one-hot dimension to represent whether a student used a hint?")
@click.option('--first-learning-rate', nargs=1, default=30.0, type=float,
help="The initial learning rate. Will decay at rate `decay_rate`. Default is 30.0.")
@click.option('--decay-rate', nargs=1, default=0.99, type=float,
help="The rate at which the learning rate decays. Default is 0.99.")
@common_options
def rnn(common, source, data_file, compress_dim, hidden_dim, output_compress_dim, test_spacing,
recurrent, use_correct, num_iters, dropout_prob, use_hints, first_learning_rate,
decay_rate):
""" RNN based proficiency estimation.
SOURCE specifies the student data source, and should be 'assistments' or 'kddcup'.
DATA_FILE is the filename for the interactions data.
"""
data_opts = DataOpts(num_folds=common.num_folds, item_id_col=common.item_id_col,
concept_id_col=None, template_id_col=None, use_correct=use_correct,
remove_skill_nans=common.remove_skill_nans, seed=common.seed,
use_hints=use_hints,
drop_duplicates=common.drop_duplicates,
max_interactions_per_user=common.max_inter,
min_interactions_per_user=common.min_inter,
proportion_students_retained=common.proportion_students_retained)
data, _, item_ids, _, _ = load_data(data_file, source, data_opts)
num_questions = len(item_ids)
data_folds = split_data(data, num_folds=common.num_folds, seed=common.seed)
run_rnn.run(data_folds, common.num_folds, num_questions, num_iters, output=common.output,
compress_dim=compress_dim, hidden_dim=hidden_dim, test_spacing=test_spacing,
recurrent=recurrent, data_opts=data_opts, dropout_prob=dropout_prob,
output_compress_dim=output_compress_dim,
first_learning_rate=first_learning_rate, decay_rate=decay_rate,
which_fold=common.which_fold)
@cli.command('irt')
@click.argument('source')
@click.argument('data_file')
@click.option('--twopo/--onepo', default=False, help="Use a 2PO model (default is False)")
@click.option('--concept-id-col', type=str, nargs=1,
help="(Required) Which column should be used for identifying "
"concepts from the dataset. Depends on source as to which names are valid. "
"If ``single``, use single dummy concept.",
callback=require_value_callback((SKILL_ID_KEY, PROBLEM_ID_KEY, SINGLE,
TEMPLATE_ID_KEY, KC_NAME_STARTS_WITH)))
@click.option('--template-id-col', type=str, default=None, nargs=1,
help="If using templates, this option is used to specify the column in the dataset "
"you are using to represent the template id")
@click.option('--template-precision', default=None, type=float, nargs=1,
help="Use template_id in IRT learning. Item means will be distributed around a "
"template mean. The precision of that distribution is the argument of this "
"parameter.")
@click.option('--item-precision', default=None, type=float, nargs=1,
help="If using a non-templated model, this is the precision of the Gaussian "
"prior around item difficulties. If using a templated model, it is the "
"precision of the template hyperprior's mean. Default is 1.0.")
@common_options
def irt(common, source, data_file, twopo, concept_id_col, template_precision,
template_id_col, item_precision):
""" Run IRT to get item parameters and compute online metrics on a held-out set of students
SOURCE specifies the student data source, and should be 'assistments' or 'kddcup'.
DATA_FILE is the filename for the interactions data.
"""
if (template_precision is None) != (template_id_col is None):
raise ValueError("template_precision and template_id_col must both be set or both be None")
data_opts = DataOpts(num_folds=common.num_folds, item_id_col=common.item_id_col,
concept_id_col=concept_id_col, template_id_col=template_id_col,
remove_skill_nans=common.remove_skill_nans,
seed=common.seed, use_correct=True,
use_hints=False, drop_duplicates=common.drop_duplicates,
max_interactions_per_user=common.max_inter,
min_interactions_per_user=common.min_inter,
proportion_students_retained=common.proportion_students_retained)
# data, _, _, _, _ = load_data(data_file, source, data_opts)
# data_folds = split_data(data, num_folds=common.num_folds, seed=common.seed)
print(common, source, data_file, twopo, concept_id_col, template_precision,
template_id_col, item_precision)
DATASET = 'movie100k-1fold'
with open('DKT/{:s}.pickle'.format(DATASET), 'rb') as f:
data_folds = pickle.load(f)
run_irt.irt(data_folds, common.num_folds, output=common.output, data_opts=data_opts,
is_two_po=twopo,
template_precision=template_precision,
single_concept=concept_id_col is None,
which_fold=common.which_fold,
item_precision=item_precision)
@cli.command('naive')
@click.argument('source')
@click.argument('data_file')
@common_options
def naive(common, source, data_file):
""" Just report the percent correct across all events.
SOURCE specifies the student data source, and should be 'assistments' or 'kddcup'.
DATA_FILE is the filename for the interactions data.
"""
data_opts = DataOpts(num_folds=common.num_folds, item_id_col=common.item_id_col,
concept_id_col=None, template_id_col=None, use_correct=True,
remove_skill_nans=common.remove_skill_nans, seed=common.seed,
use_hints=True,
drop_duplicates=common.drop_duplicates,
max_interactions_per_user=common.max_inter,
min_interactions_per_user=common.min_inter,
proportion_students_retained=common.proportion_students_retained)
data, _, _, _, _ = load_data(data_file, source, data_opts)
print("Percentage correct in data set is {}".format(data.correct.mean()))
agged = data.groupby(USER_IDX_KEY).correct.agg([np.sum, len]).reset_index()
mask = agged['sum'] <= agged['len'] // 2
agged.loc[mask, 'sum'] = agged.loc['len', mask] - agged.loc['sum', mask]
print("Percent correct for naive classifier is {}".format(agged['sum'].sum() /
agged['len'].sum()))
def main():
cli()
```
#### File: data/DKT/sandbox.py
```python
from scipy.sparse import lil_matrix, coo_matrix, save_npz, load_npz, hstack, diags
from scipy.optimize import newton, brentq
import numpy as np
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def onehotize(col, depth):
nb_events = len(col)
rows = list(range(nb_events))
return coo_matrix(([1] * nb_events, (rows, col)), shape=(nb_events, depth))
def df_to_sparse(df, config, active_features):
# Prepare sparse features
X = {}
X['users'] = onehotize(df['user_idx'], config['nb_users'])
# X['users'] = coo_matrix((len(test_data), nb_users)) # This is equivalent for the test set (all zeroes)
X['items'] = onehotize(df['item_idx'], config['nb_items'])
X['skills'] = onehotize(df['concept_idx'], config['nb_skills'])
X['wins'] = X['skills'].copy()
X['wins'].data = df['wins']
X['fails'] = X['skills'].copy()
X['fails'].data = df['fails']
X_train = hstack([X[agent] for agent in active_features]).tocsr()
y_train = df['correct'].values
return X_train, y_train
X = onehotize([1, 2, 4, 3, 2], 5)
Y = X.copy()
X.data = np.array([2, 3, 5, 9, 11])
# print(X.toarray())
# print(Y.toarray())
LAMBDA = 1e-3
def p(th, d):
print('p', th, d)
return 1 / (1 + np.exp(-(th - d)))
def dll(th, seq):
s = -2 * LAMBDA * th
for d, y in seq:
s += y - p(th, d)
return s
def f(th):
return dll(th, SEQ)
def df(th):
s = -2 * LAMBDA
for d, y in SEQ:
pj = p(th, d)
s -= pj * (1 - pj)
return s
# SEQ = [(3, 1), (4, 0)]
SEQ = [(3, 1)]
# print(newton(f, 1, fprime=df))
# print(brentq(f, -30, 30))
```
#### File: rnn_prof/irt/callbacks.py
```python
import logging
import numpy as np
from .cpd.ogive import OgiveCPD
from .metrics import LOGLI_KEY, MAP_ACCURACY_KEY, AUC_KEY, METRICS_KEYS
LOGGER = logging.getLogger(__name__)
TRAIN_LOG_POST_KEY = 'train log posterior'
ITER_KEY = 'iteration'
TEST_SUFFIX = '_TEST'
MAX_HIST_LEN = 400
MAX_HIST_SAMPLES = 50
HIST_COLOR = np.asarray([.7, .7, .7])
DEFAULT_METRICS = (ITER_KEY, TRAIN_LOG_POST_KEY, LOGLI_KEY, MAP_ACCURACY_KEY, AUC_KEY)
class ConvergenceCallback(object):
"""
Basic callback that checks if convergence conditions on all the learner's node have been met.
Optionally, print or log info statements related to convergence.
"""
def __init__(self, early_stopping=False, log_freq=0, print_freq=100, logger=None):
"""
:param int print_freq: print frequency (if 0, do not print)
:param int log_freq: log frequency (if 0, do not log)
:param bool early_stopping: Whether to stop inference if the sum of held_out nodes'
log-prob_delta's is not positive
:param Logger|None logger: optional logger to use; if not specified, use this module's
"""
self.early_stopping = early_stopping
self.print_freq = print_freq
self.log_freq = log_freq
self.logger = logger or LOGGER
def __call__(self, learner, metrics=None):
"""
:param BayesNetLearner learner: the learner
:param dict|None metrics: Metrics dictionary of depth 1 or 2
(generally structured as: {metric name: array of values}) to log/print. Logs/prints
the last element in the array of values.
:return: whether to continue learning
:rtype: bool
"""
def get_msg_vals():
msg_string = 'Iter %d: Log-Posterior: %.04f, Log10Grad: %0.4f, Log10Diff: %0.4f'
msg_vars = [learner.iter, learner.log_posterior, max_grad, max_diff]
if metrics is not None:
for mkey, mval in metrics.iteritems():
if isinstance(mval, dict):
for node_name, node_metric_val in mval.iteritems():
msg_string += ', %s %s: %%0.4f' % (mkey, node_name)
msg_vars.append(node_metric_val[-1])
else:
msg_string += ', %s: %%0.4f' % mkey
msg_vars.append(mval[-1])
return msg_string, tuple(msg_vars)
max_grad, max_diff = None, None
if self.print_freq > 0 and not learner.iter % self.print_freq:
max_grad, max_diff = self.compute_stats(learner)
print_msg, print_vars = get_msg_vals()
print_msg = '\r' + print_msg
print(print_msg % print_vars)
if self.log_freq > 0 and not learner.iter % self.log_freq:
if max_grad is None:
# compute stats if it hasn't been done yet
max_grad, max_diff = self.compute_stats(learner)
log_string, log_vars = get_msg_vals()
self.logger.info(log_string, *log_vars)
return self.is_converged(learner)
def is_converged(self, learner):
"""
:param BayesNetLearner learner: the learner
:return: whether to continue learning
:rtype: bool
"""
should_continue = not all([n.converged for n in learner.nodes.values() if not n.held_out])
if should_continue and self.early_stopping:
held_out_nodes = [n for n in learner.nodes.values() if n.held_out]
if len(held_out_nodes) == 0:
raise ValueError('There are no held out nodes so early stopping cannot work.')
log_prob_deltas = [n.log_prob_delta for n in held_out_nodes
if n.log_prob_delta is not None]
if len(log_prob_deltas) > 0:
should_continue = sum(log_prob_deltas) > 0
return should_continue
@staticmethod
def compute_stats(learner):
""" Compute the gradient and difference changes across a learner's nodes
:param BayesNetLearner learner: the IRT learner
:return: the maximum of the gradients and the maximum of the iteration-to-iteration diffs
:rtype: float, float
"""
grad_diffs = [np.abs(n.max_grad) for n in learner.nodes.values() if n.max_grad is not None]
diff_diffs = [np.abs(n.max_diff) for n in learner.nodes.values() if n.max_diff is not None]
max_grad = np.log10(np.max(grad_diffs)) if len(grad_diffs) else np.nan
max_diff = np.log10(np.max(diff_diffs)) if len(diff_diffs) else np.nan
return max_grad, max_diff
class RecordingCallback(ConvergenceCallback):
""" Callback function that records basic learning metrics. """
def __init__(self, metrics_to_record=DEFAULT_METRICS, **kwargs):
super(RecordingCallback, self).__init__(**kwargs)
self.metrics = {m: None for m in metrics_to_record}
def __call__(self, learner):
self.record_metrics(learner)
return super(RecordingCallback, self).__call__(learner, metrics=self.metrics)
def record_metrics(self, learner):
""" Record the performance metrics: iteration count, global learner log-posterior, and
the metrics specified at initialization (e.g., log-likelihood, test MAP accuracy) for
all OgiveCPD nodes.
NOTE: The latter performance metrics are dictionaries two levels deep, and should be
accessed as `callback.metrics[AUC_KEY][test_response_node.name]`.
"""
def append_metric(new_value, metric_key, node_key=None, dtype=None):
""" Helper function for appending to (possibly uninitialized) dictionary of metrics,
one (iteration count, log-posterior) or two (e.g., AUC for particular node) levels
deep."""
# initialize dicts/arrays if necessary
dtype = dtype or np.float64
if self.metrics[metric_key] is None:
init_vals = np.nan * np.empty(MAX_HIST_LEN, dtype=dtype)
self.metrics[metric_key] = init_vals if node_key is None else {node_key: init_vals}
elif node_key is not None and node_key not in self.metrics[metric_key]:
init_vals = np.nan * np.empty(MAX_HIST_LEN, dtype=dtype)
self.metrics[metric_key][node_key] = init_vals
# get dictionary element and append
if node_key is None:
metric = self.metrics[metric_key]
else:
metric = self.metrics[metric_key][node_key]
return np.append(metric[1:], new_value)
for mkey in self.metrics:
if mkey == ITER_KEY:
# write iteration count
self.metrics[mkey] = append_metric(learner.iter, mkey, dtype=int)
elif mkey == TRAIN_LOG_POST_KEY:
# write global learner log-posterior
self.metrics[mkey] = append_metric(learner.log_posterior, mkey)
elif mkey in METRICS_KEYS:
# for all other metrics, record values for each node with an OgiveCPD
for node in learner.nodes.itervalues():
if isinstance(node.cpd, OgiveCPD):
metric = node.metrics.compute_metric(mkey)
self.metrics[mkey][node.name] = append_metric(metric, mkey, node.name)
``` |
{
"source": "jilljenn/TF-recomm",
"score": 2
} |
#### File: jilljenn/TF-recomm/dataio.py
```python
from __future__ import absolute_import, division, print_function
import os
import numpy as np
import pandas as pd
import yaml
def build_paths(DATASET_NAME):
DATA_FOLDER = 'data'
CSV_FOLDER = os.path.join(DATA_FOLDER, DATASET_NAME)
CONFIG_FILE = os.path.join(CSV_FOLDER, 'config.yml')
CSV_TRAIN = os.path.join(CSV_FOLDER, 'train.csv')
CSV_TEST = os.path.join(CSV_FOLDER, 'test.csv')
CSV_VAL = os.path.join(CSV_FOLDER, 'val.csv')
Q_NPZ = os.path.join(CSV_FOLDER, 'qmatrix.npz')
return CSV_FOLDER, CSV_TRAIN, CSV_TEST, CSV_VAL, CONFIG_FILE, Q_NPZ
def build_new_paths(DATASET_NAME):
DATA_FOLDER = 'data'
CSV_FOLDER = os.path.join(DATA_FOLDER, DATASET_NAME)
CONFIG_FILE = os.path.join(CSV_FOLDER, 'config.yml')
CSV_ALL = os.path.join(CSV_FOLDER, 'all.csv')
print(CSV_ALL)
Q_NPZ = os.path.join(CSV_FOLDER, 'qmatrix.npz')
SKILL_WINS = os.path.join(CSV_FOLDER, 'skill_wins.npz')
SKILL_FAILS = os.path.join(CSV_FOLDER, 'skill_fails.npz')
return CSV_FOLDER, CSV_ALL, CONFIG_FILE, Q_NPZ, SKILL_WINS, SKILL_FAILS
def get_config(CONFIG_FILE):
with open(CONFIG_FILE) as f:
config = yaml.load(f)
# BATCH_SIZE = 500 # Keskar et al. 2016 ? Generalization gap
return config
def read_process(filename, sep="\t"):
# col_names = ["user", "item", "outcome"]
col_names = ["user", "item", "outcome", "wins", "fails"]
# col_names = ["users", "items", "speech", "outcome", "wins", "fails"]
df = pd.read_csv(filename, sep=sep, header=None, names=col_names, engine='python')
for col in {"user", "item"}:
df[col] = df[col].astype(np.int32)
df["outcome"] = df["outcome"].astype(np.float32)
return df
def get_data(DATASET_NAME):
CSV_FOLDER, CSV_TRAIN, CSV_TEST, CSV_VAL, CONFIG, Q_NPZ = build_paths(DATASET_NAME)
df_train = read_process(CSV_TRAIN, sep=",")
df_val = read_process(CSV_VAL, sep=",")
df_test = read_process(CSV_TEST, sep=",")
return df_train, df_val, df_test
def get_new_data(DATASET_NAME):
CSV_FOLDER, CSV_ALL, CONFIG_FILE, Q_NPZ, SKILL_WINS, SKILL_FAILS = build_new_paths(DATASET_NAME)
df = read_process(CSV_ALL, sep=",")
return df
def get_legend(experiment_args):
dim = experiment_args['d']
short = ''
full = ''
agents = ['users', 'items', 'skills', 'attempts', 'wins', 'fails', 'item_wins', 'item_fails', 'extra']
active = []
for agent in agents:
if experiment_args.get(agent):
short += agent[0] if '_' not in agent else ('W' if '_w' in agent else 'F')
active.append(agent)
short += str(dim)
prefix = ''
if set(active) == {'users', 'items'} and dim == 0:
prefix = 'IRT: '
elif set(active) == {'users', 'items'} and dim > 0:
prefix = 'MIRTb: '
elif set(active) == {'skills', 'attempts'} and dim == 0:
prefix = 'AFM: '
elif set(active) == {'skills', 'wins', 'fails'} and dim == 0:
prefix = 'PFA: '
full = prefix + ', '.join(active) + ' d = {:d}'.format(dim)
latex = prefix + ', '.join(active)#.replace('_', r'\_')
print('get_legend', dim, type(dim), dim == 0, set(active), latex)
return short, full, latex, active
def prepare_folder(path):
if not os.path.isdir(path):
os.makedirs(path)
class ShuffleIterator(object):
"""
Randomly generate batches
"""
def __init__(self, inputs, batch_size=10):
self.inputs = inputs
self.batch_size = batch_size
self.num_cols = len(self.inputs)
self.len = len(self.inputs[0])
self.inputs = np.transpose(np.vstack([np.array(self.inputs[i]) for i in range(self.num_cols)]))
def __len__(self):
return self.len
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
ids = np.random.randint(0, self.len, (self.batch_size,))
out = self.inputs[ids, :]
return [out[:, i] for i in range(self.num_cols)]
class OneEpochIterator(ShuffleIterator):
"""
Sequentially generate one-epoch batches, typically for test data
"""
def __init__(self, inputs, batch_size=10):
super(OneEpochIterator, self).__init__(inputs, batch_size=batch_size)
if batch_size > 0:
self.idx_group = np.array_split(np.arange(self.len), np.ceil(self.len / batch_size))
else:
self.idx_group = [np.arange(self.len)]
self.group_id = 0
def next(self):
if self.group_id >= len(self.idx_group):
self.group_id = 0
raise StopIteration
out = self.inputs[self.idx_group[self.group_id], :]
self.group_id += 1
return [out[:, i] for i in range(self.num_cols)]
```
#### File: jilljenn/TF-recomm/fm_mangaki.py
```python
from config import *
from scipy.sparse import lil_matrix, save_npz, load_npz
from sklearn.metrics import roc_auc_score, mean_squared_error
import pandas as pd
import numpy as np
import dataio
import pickle
import pywFM
os.environ['LIBFM_PATH'] = '/Users/jin/code/libfm/bin/'
df_train, df_val, df_test = dataio.get_data()
def df_to_sparse(df, filename):
nb_events, _ = df.shape
X = lil_matrix((nb_events, USER_NUM + ITEM_NUM))
for i, (user_id, item_id, _) in enumerate(np.array(df)):
X[i, user_id] = 1
X[i, USER_NUM + item_id] = 1
#X[i, USER_NUM + ITEM_NUM + item_id] = nb_wins
#X[i, USER_NUM + 2 * ITEM_NUM + item_id] = nb_fails
save_npz(filename, X.tocsr())
df_to_sparse(df_train, 'X_train.npz')
print('Train done')
df_to_sparse(df_test, 'X_test.npz')
print('Test done')
X_train = load_npz('X_train.npz')
X_test = load_npz('X_test.npz')
print(X_train.shape)
print(X_test.shape)
fm = pywFM.FM(task='regression', num_iter=500, k2=20, rlog=False, learning_method='mcmc', r1_regularization=0.1, r2_regularization=0.1)
model = fm.run(X_train, df_train['outcome'], X_test, df_test['outcome'])
print(mean_squared_error(df_test['outcome'], model.predictions) ** 0.5)
print(X_test[0], df_test['outcome'][0], model.predictions[0])
bundle = {
'mu': model.global_bias,
'W': model.weights,
'V': model.pairwise_interactions
}
with open('fm.pickle', 'wb') as f:
pickle.dump(bundle, f, pickle.HIGHEST_PROTOCOL)
``` |
{
"source": "jilljenn/tryalgo.org",
"score": 3
} |
#### File: tryalgo.org/problems/yaml2html.py
```python
import yaml
import sys
# tuples:
# - nice
# - chapter
# - name
# - links
L = []
def code(url):
for c in ":/.":
url = url.replace(c, " ")
for w in url.split():
if w not in ["http", "www", "acm", "https", "code", "google", "com", "uva", "informatik", "cs"]:
return w
return "?"
for item in yaml.safe_load(open("problems.yaml").read()):
if 'broken' in item:
continue
if 'order' in item:
order = item['order']
else:
order = 4
chapter = item['chapter'].replace("_"," ")
name = item['name']
links = item['links']
L.append((order, chapter, name, links))
L.sort(key = lambda t: (t[1].lower(),t[2].lower()))
hide = len(sys.argv) > 1
if hide:
print('<table class="sortable"><tr><th>difficultรฉ</th><th>problรจme</th><th>รฉnoncรฉ</th></tr>')
else:
print('<table class="sortable"><tr><th>chapitre</th><th>difficultรฉ</th><th>problรจme</th><th>รฉnoncรฉ</th></tr>')
for order, chapter, name, links in L:
hearts = "☆" * order
if hide:
print("<tr><td>%s</td><td>%s</td><td>" % (hearts, name))
else:
print("<tr><td>%s</td><td>%s</td><td>%s</td><td>" % (chapter, hearts, name))
for a in links:
print('<a href="%s">[%s]</a> ' % (a, code(a)))
print("</td></tr>")
print("</table>")
``` |
{
"source": "jillmnolan/cascade-server",
"score": 2
} |
#### File: app/cascade/attack.py
```python
from mongoengine.document import Document, EmbeddedDocument
from mongoengine.fields import DictField, StringField, ListField, URLField, ReferenceField, IntField
from data_model.event import DataModelEvent
import requests
attack_url = 'https://attack.mitre.org'
proxies = {}
class AttackTactic(Document):
name = StringField()
url = URLField()
description = StringField()
order = IntField()
class AttackTechnique(Document):
tactics = ListField(ReferenceField(AttackTactic))
technique_id = StringField(unique=True)
name = StringField()
description = StringField()
url = URLField()
class TechniqueMapping(EmbeddedDocument):
LOW = 'Low'
MODERATE = 'Moderate'
HIGH = 'High'
technique = ReferenceField(AttackTechnique)
tactics = ListField(ReferenceField(AttackTactic))
# Likely will ignore this field
level = StringField(choices=[LOW, MODERATE, HIGH])
# tactics should be a SET, but we are not strictly enforcing that for now
class TacticSet(Document):
tactics = ListField(ReferenceField(AttackTactic))
def intersect(self, other, new=False):
if isinstance(other, TacticSet):
tactics = other.tactics
else:
tactics = other
result = {tactic for tactic in self.tactics if tactic in tactics}
if new:
return TacticSet(tactics=result)
else:
return result
def is_subset(self, other):
if isinstance(other, TacticSet):
tactics = other.tactics
else:
tactics = other
return all(tactic in self.tactics for tactic in tactics)
def refresh_attack():
params = dict(action='ask', format='json')
params['query'] = """
[[Category:Tactic]]
|?Has description
"""
tactic_results = requests.get("{}/{}".format(attack_url, 'api.php'), proxies=proxies, params=params, verify=False).json()
tactics = {}
for page, result in tactic_results['query']['results'].items():
name = result['fulltext']
tactic = AttackTactic.objects(name=name).first()
if tactic is None:
tactic = AttackTactic(name=name)
tactic.url = result['fullurl']
tactic.description = result['printouts']['Has description'][0]
tactic.save()
tactics[tactic.name] = tactic
params['query'] = """
[[Category:Technique]]
|?Has tactic
|?Has ID
|?Has display name
|?Has technical description
|limit=9999
"""
technique_results = requests.get("{}/{}".format(attack_url, 'api.php'), proxies=proxies, params=params, verify=False).json()
for page, result in technique_results['query']['results'].items():
technique_id = result['printouts']['Has ID'][0]
technique = AttackTechnique.objects(technique_id=technique_id).first()
if technique is None:
technique = AttackTechnique(technique_id=technique_id)
technique.name = result['printouts']['Has display name'][0]
technique.url = result['fullurl']
technique.tactics = [tactics[_['fulltext']] for _ in result['printouts']['Has tactic']]
technique.description = result['printouts']['Has technical description'][0]
technique.save()
```
#### File: cascade/query_layers/base.py
```python
from ..data_model.query import QueryTerm, Operation
from ..data_model.event import DataModelQuery
from ..analytics import CascadeAnalytic, AnalyticReference
from ..data_model.parser import lift_query
import logging
from mongoengine import Document, StringField, ReferenceField, EmbeddedDocument
logger = logging.getLogger(__name__)
class DatabaseInfo(Document):
database_type = "BaseDatabase"
name = StringField(required=True, unique=True)
meta = {'abstract': False, 'allow_inheritance': True}
def add_user(self, **kwargs):
raise NotImplementedError()
@classmethod
def get_schemas(cls):
schemas = []
for subcls in cls.__subclasses__():
fields = {k: {'type': type(v).__name__, 'default': (None if hasattr(v.default, '__call__') else v.default)}
for k, v in subcls._fields.items()}
fields.pop('_cls')
fields.pop('id')
schemas.append({'_cls': subcls._class_name, 'fields': fields, 'name': subcls.database_type})
return schemas
class UserDatabaseInfo(EmbeddedDocument):
meta = {'abstract': True, 'allow_inheritance': True}
database = ReferenceField(DatabaseInfo)
def login(self):
""" :rtype: DataModelQueryLayer """
raise NotImplementedError
@classmethod
def get_schemas(cls):
return [{'_cls': subcls._class_name,
'fields': {k: {'type': type(v).__name__, 'default': v.default} for k, v in subcls._fields.items()},
'name': subcls.database_type} for subcls in cls.__subclasses__()]
class QueryError(Exception):
pass
class DataModelQueryLayer(object):
_cache_dir = 'cache'
_missing_cache = False
platform = 'Data Model AST'
@classmethod
def get_data_model(cls, expression):
""" :return (DataModelEventMeta | DataModelEvent, str): """
if isinstance(expression, DataModelQuery):
return expression.object, expression.action
elif isinstance(expression, (CascadeAnalytic, AnalyticReference)):
return cls.get_data_model(expression.query)
elif isinstance(expression, Operation):
event_type = None
event_action = None
for term in expression.terms:
try:
term_object, term_action = cls.get_data_model(term)
except QueryError:
# if there is no term item, then just skip it
continue
if term_object is None and term_action is None:
continue
if (event_type and term_object != event_type) or (event_action and term_action != event_action):
raise QueryError("{} mismatch".format(type(DataModelQuery).__name))
event_type = term_object
event_action = term_action
if event_type is None and event_action is None:
raise QueryError("Unable to identify data model event")
return event_type, event_action
else:
raise QueryError(expression)
@classmethod
def optimize(cls, expression, dereference=False):
try:
optimized = cls._optimize(expression, dereference=dereference)
except QueryError:
return expression
try:
event_type, event_action = cls.get_data_model(expression)
optimized = DataModelQuery(event_type, event_action, query=optimized)
except QueryError:
pass
finally:
return optimized
@classmethod
def _optimize(cls, expression, dereference=False):
if isinstance(expression, (CascadeAnalytic, AnalyticReference)) and dereference:
return cls._optimize(expression.query, dereference=dereference)
if isinstance(expression, DataModelQuery):
return cls._optimize(expression.query, dereference=dereference)
elif isinstance(expression, Operation):
optimized_terms = []
for term in expression.terms:
if isinstance(term, Operation) and term.operator == expression.operator:
optimized_terms.extend(cls._optimize(term, dereference=dereference).terms)
else:
optimized_terms.append(cls._optimize(term, dereference=dereference))
return Operation(terms=optimized_terms, operator=expression.operator)
else:
return expression
@classmethod
def parse_expression(cls, expression, *args, **kwargs):
return expression
def query(self, expression, **kwargs):
""" The query function takes an abstract query over the data model, and fetches the corresponding
content from the database. This function returns a list of events, which are represented as dictionaries of
fields, etc.
:type expression: QueryTerm
:rtype: list[dict]
"""
raise NotImplementedError("'query' not supported for {}".format(type(self)))
@property
def external_analytics(self):
""" Returns a list of the analytics provided by this database.
"""
raise NotImplementedError("'analytics' property not supported for {}".format(type(self)))
class CascadeQueryLayer(DataModelQueryLayer):
platform = 'Data Model Query Language'
@classmethod
def parse_expression(cls, expression, *args, **kwargs):
return lift_query(expression)
```
#### File: cascade-server/app/settings.py
```python
from __future__ import print_function
import os
import socket
import base64
import yaml
import cryptography.fernet
from .utils import import_database_from_file, confirm
url = None
config = None
def load():
# Global variables to avoid loading twice
global url, config
if config is not None:
return config
with open('conf/cascade.yml', 'r') as f:
config = yaml.load(f.read())
server_settings = config['server']
proto = 'https' if server_settings['https']['enabled'] else 'http'
url = '{proto}://{hostname}:{port}'.format(proto=proto, **server_settings)
server_settings['url'] = url
return config
def get_value(key, default, indent=0):
tab = " "
if isinstance(default, dict):
if key:
print("{}{}:".format(tab * indent, key))
return {k: get_value(k, v, indent=indent + int(key is not None)) for k, v in default.items()}
elif isinstance(default, (list, tuple)):
return default
else:
new_value = raw_input("{}{key} ({default}): ".format(tab * indent, key=key, default=str(default).strip())).strip()
if new_value == "":
return default
elif isinstance(default, bool):
# Convert "True" and "Yes" to boolean true
return new_value[0].lower() in ("y", "t")
else:
# Otherwise figure out the initial type and convert it
return type(default)(new_value)
def setup(auto_defaults=False):
placeholder = "<autogenerate>"
with open('conf/defaults.yml', 'r') as f:
defaults = yaml.load(f.read())
defaults['server']['hostname'] = socket.getfqdn().lower()
if auto_defaults:
print("Automatically updated configuration settings for CASCADE based on defaults.yml")
custom_settings = defaults
else:
print("Update configuration settings for CASCADE. Enter nothing to keep the default value")
custom_settings = get_value(None, defaults)
crypto = custom_settings['database']['crypto']
if crypto['fernet'] == placeholder:
crypto['fernet'] = cryptography.fernet.Fernet.generate_key()
if crypto['key'] == placeholder:
crypto['key'] = base64.b64encode(os.urandom(64))
with open('conf/cascade.yml', 'w') as f:
yaml.dump(custom_settings, f, explicit_start=True, indent=4, default_flow_style=False)
print("\nInitializing database...")
for filename in 'attack.bson', 'cascade-analytics.bson', 'default-sessions.bson':
import_database_from_file('misc/{}'.format(filename))
__all__ = ["load_settings", "setup"]
``` |
{
"source": "jillnogold/mlrun",
"score": 2
} |
#### File: api/schemas/background_task.py
```python
import datetime
import enum
import typing
import pydantic
from .object import ObjectKind
class BackgroundTaskState(str, enum.Enum):
succeeded = "succeeded"
failed = "failed"
running = "running"
@staticmethod
def terminal_states():
return [
BackgroundTaskState.succeeded,
BackgroundTaskState.failed,
]
class BackgroundTaskMetadata(pydantic.BaseModel):
name: str
project: typing.Optional[str]
created: typing.Optional[datetime.datetime]
updated: typing.Optional[datetime.datetime]
class BackgroundTaskSpec(pydantic.BaseModel):
pass
class BackgroundTaskStatus(pydantic.BaseModel):
state: BackgroundTaskState
class BackgroundTask(pydantic.BaseModel):
kind: ObjectKind = pydantic.Field(ObjectKind.background_task, const=True)
metadata: BackgroundTaskMetadata
spec: BackgroundTaskSpec
status: BackgroundTaskStatus
``` |
{
"source": "jillpls/pixie",
"score": 3
} |
#### File: pixie/pixie/cache.py
```python
from pixie import users
from pixie import servers
import threading
import time
import operator
def init(threshold=100):
return CacheControl(threshold)
class CacheControl:
cached_users = dict()
cached_servers = dict()
def __init__(self, threshold=100):
self.cached_users = dict()
self.cached_servers = dict()
self.cleaner = threading.Thread(target=self.keep_clean, args=(threshold,), daemon=True)
self.cleaner.start()
def keep_clean(self, threshold=100):
while True:
time.sleep(100)
if len(self.cached_users) > threshold:
self.clean(self.cached_users, threshold)
if len(self.cached_servers) > threshold:
self.clean(self.cached_servers, threshold)
def clean(self, d, threshold):
sorted_ids = sorted(d.values(), key=operator.attrgetter('last_call'), reverse=True)
for i in sorted_ids[int(round(threshold/2)):]:
del d[i.id]
pass
def add_user(self, user):
if not isinstance(user, users.DiscordUser):
return
self.cached_users[user.id] = user
def add_server(self, server):
if not isinstance(server, servers.DiscordServer):
return
self.cached_servers[server.id] = server
def get_user(self, user_id):
return self.cached_users.get(user_id)
def get_server(self, server_id):
return self.cached_users.get(server_id)
```
#### File: pixie/pixie/core.py
```python
import discord
import subprocess
import os
import sys
import traceback
from pixie import messages
from pixie import data
from pixie import debug
from pixie import cache
from pixie import servers
from pixie.messages import MessageWrapper
client = discord.Client()
LOGGER = debug.Debugger()
LOGGER.run()
def run_bot(token=None):
"""Runs the bot
:param token: Token supplied from non-default source, other wise data.DATAPATH/tokens/bot-token will be used
:type token: str, optional
"""
data.init()
data.CACHE = cache.init()
if token is not None:
data.TOKEN = token.strip()
if len(sys.argv) >= 2:
data.TOKEN = sys.argv[1].strip()
elif os.path.isfile(data.DATAPATH + 'tokens/bot-token'):
with open(data.DATAPATH + 'tokens/bot-token', 'r') as f:
data.TOKEN = f.read().strip()
else:
sys.exit("No TOKEN supplied")
client.run(data.TOKEN)
@client.event
async def on_member_join(member):
s = servers.get_server_data(member.guild.id)
channel_id = s.get('join-channel')
if channel_id is None:
return
c = member.guild.get_channel(channel_id)
await c.send(messages.get_string('join-message', 'de').format(member, c))
@client.event
async def on_member_update(before, after):
before_roles = before.roles
after_roles = after.roles
diff = list(set(after_roles) - set(before_roles))
if len(diff) < 1:
return
s = servers.get_server_data(before.guild.id)
if str(diff[0].id) in s.get('welcome-roles'):
channel_id = servers.get_server_data(before.guild.id).get('welcome-channel')
if channel_id is None:
return
c = before.guild.get_channel(channel_id)
await c.send(messages.get_string('welcome-message', 'de').format(before, c))
return
@client.event
async def on_message(message):
"""Called whenever a message is read by the bot.
:param message: The message recieved
:type message: :class:`discord.Message`
"""
# Wrap message to allow additional attributes to be passed along
message = MessageWrapper(message)
# Don't react to bot itself
if message.author == client.user:
return
command = messages.get_command(message, data.CMDCHARS)
message.get_server_data()
# Don't do anything if there was no valid command.
if command is None:
return
# Get args (all strings after the command separated by ' ')
args = messages.get_args(message)
message.args = args
# noinspection PyBroadException
# catch exceptions
try:
msg = messages.handle_commands(message)
except Exception as e:
LOGGER.write(str(e), debug.Debugger.DebugCode.ERROR)
LOGGER.write(traceback.format_exc(), debug.Debugger.DebugCode.ERROR)
msg = '''\
Something went wrong.
This is so sad. Alexa, play Despacito!'''
await message.channel.send(msg)
@client.event
async def on_ready():
"""Called when the bot is ready and running.
"""
if os.path.exists(data.DATAPATH):
print('data already exists')
else:
if not os.path.isfile('create_env.sh'):
return
subprocess.run(['bash', 'create_env.sh'])
print('data created')
```
#### File: pixie/pixie/dev.py
```python
from github import Github
from github import GithubObject
from pixie import messages
from pixie import data
def dev_issue(message, split, label):
"""
create a new issue
:param message: MessageWrapper for formatting & data
:param split: where to split the message content to get the issue content
:param label: the label to apply, TODO: SUPPORT MORE THAN ONE
:return: messages.send_message() result
"""
content = message.content.split(split, 1)[1].strip()
description = ''
if content[0] == '\"':
splits = content[1:].split('\"', 1)
else:
splits = content.split(' ', 1)
name = splits[0]
if len(splits) > 1:
description = splits[1]
description = '[Issue created by {user}]\n'.format(user=message.author.name) + description
make_github_issue(name, description, [label, 'status: pending'])
return messages.send_message(message, 'dev-issue-created')
def cmd_dev(message, args):
"""
cmd switch for dev
:param message: MessageWrapper for formatting & data
:param args: list of arguments after the command
:return: MessageCode result of dev actions
"""
if len(args) == 0:
return messages.MessageCode.UNKNOWN_ARGS
if args[0] == 'help':
return messages.send_message(message, 'dev-help')
if len(args) < 2:
return messages.MessageCode.UNKNOWN_ARGS
if args[0] == 'request':
return dev_issue(message, args[0], 'type: enhancement')
if args[0] == 'bugreport':
return dev_issue(message, args[0], 'type: bug')
return messages.MessageCode.UNKNOWN_ARGS
def label_exists(repo, label):
"""
checks if a label exists in a repository's issue system
:param repo: github.Repository - must be authenticated
:param label: the label name to check
:return: True if the label exists, False otherwise
"""
labels = repo.get_labels()
for l in labels:
if label == l.name:
return True
return False
def make_github_issue(title, body, labels=None):
"""
create a github issue in github.com/data.REPO_OWNER/data.REPO_NAME / TODO: MAKE REPO OWNER AND NAME ARGS
:param title: Issue title
:param body: Issue description
:param labels: Issue labels
:return: Nothing / TODO: RETURN FAILURE / SUCCESS
"""
token = open(data.DATAPATH + 'tokens/' + 'github-token', 'r').read().strip()
g = Github(token)
for repo in g.get_user().get_repos():
print(repo.name)
repo = g.get_repo('%s/%s' % (data.REPO_OWNER, data.REPO_NAME))
label_list = list()
if isinstance(labels, str):
if label_exists(repo, labels):
label_list.append(repo.get_label(labels))
elif labels is not None:
for l in labels:
if label_exists(repo, l):
label_list.append(repo.get_label(l))
if body is None:
body = GithubObject.NotSet
if len(label_list) == 0:
label_list = GithubObject.NotSet
repo.create_issue(title, body=body, labels=label_list)
return
```
#### File: pixie/pixie/servers.py
```python
import discord
from pixie import data, messages, utils
def set_settings(message, args):
if not utils.check_permissions(message, admin=True):
return messages.send_message(message, 'no-permissions')
set_what = args[0][3:]
if set_what == 'welcome' and len(message.channel_mentions) != 0:
channel = message.channel_mentions[0]
if not isinstance(channel, discord.TextChannel):
return messages.MessageCode.UNKNOWN_ARGS
message.server_data.set('welcome-channel', channel.id)
message.server_data.store_settings()
messages.send_custom_message(message, messages.get_string('channel-set').format(channel, 'welcome channel'),
format_content=False)
return messages.MessageCode.SENT
elif set_what == 'welcomeroles' and len(args) > 1:
roles = args[1:]
roles_confirmed = list()
for r in roles:
for gr in message.guild.roles:
if r.lower() == gr.name.lower():
roles_confirmed.append(gr)
confirmed_ids = list()
for r in roles_confirmed:
confirmed_ids.append(r.id)
message.server_data.set('welcome-roles', confirmed_ids)
message.server_data.store_settings()
if len(confirmed_ids) < 1:
return messages.MessageCode.UNKNOWN_ARGS
elif set_what == 'join' and len(message.channel_mentions) != 0:
channel = message.channel_mentions[0]
if not isinstance(channel, discord.TextChannel):
return messages.MessageCode.UNKNOWN_ARGS
message.server_data.set('join-channel', channel.id)
message.server_data.store_settings()
messages.send_custom_message(message, messages.get_string('channel-set').format(channel, 'join channel'),
format_content=False)
return messages.MessageCode.SENT
else:
return messages.MessageCode.UNKNOWN_ARGS
def cmd_server(message, args):
if len(args) == 0:
return messages.MessageCode.UNKNOWN_ARGS
elif args[0] == 'help':
return messages.send_message(message, 'server-help')
elif args[0].startswith('set'):
return set_settings(message, args)
elif args[0] == 'welcomeroles':
roles = list()
for r in message.server_data.get('welcome-roles'):
role = message.guild.get_role(int(r))
roles.append(role.name)
return messages.send_custom_message(message, str(roles))
elif args[0] == 'welcomechannel':
return messages.send_custom_message(message, '<#' + str(message.server_data.get('welcome-channel')) + '>')
elif args[0] == 'joinchannel':
return messages.send_custom_message(message, '<#' + str(message.server_data.get('join-channel')) + '>')
def get_server_data(guild):
if isinstance(guild, int):
id = guild
else:
id = guild.id
if id in data.CACHE.cached_servers:
return data.CACHE.cached_servers[id]
server_data = DiscordServer(id)
server_data.read_data()
return server_data
class DiscordServer(data.DataStorage):
PATHPREFIX = '/server_'
def __init__(self, id=None, guild=None, no_files=False):
if (guild is None and id is None) or (guild is not None and id is not None):
raise ValueError('user or id arguments required')
elif guild is not None and isinstance(guild, discord.User):
self.set('id', guild.id)
elif id is not None and isinstance(id, int):
self.set('id', id)
super(DiscordServer, self).__init__(no_files=no_files)
def read_settings(self):
try:
self.read_data()
except FileNotFoundError:
return
def store_settings(self):
self.write_data()
``` |
{
"source": "jillr/project-config",
"score": 2
} |
#### File: project-config/tools/manage-projects.py
```python
import argparse
import configparser
import logging
import os
import yaml
import github
LOG = logging.getLogger(__name__)
class Client(object):
def load_projects(self):
return yaml.safe_load(open(self.args.projects))
def parse_arguments(self):
parser = argparse.ArgumentParser(
description='manage projects')
parser.add_argument(
'--config-file', dest='config',
default='~/.github-projects.config',
help='path to github-projects.config'),
parser.add_argument(
'-p', dest='projects',
default='github/projects.yaml',
help='path to projects.yaml file')
parser.add_argument(
'--debug', dest='debug', action='store_true',
help='Print debugging output (set logging level to DEBUG instead '
' of default INFO level)')
self.args = parser.parse_args()
def process_projects(self):
projects = self.load_projects()
gh = github.Github(self.config.get('github', 'token'))
orgs = gh.get_user().get_orgs()
orgs_dict = dict(zip([o.login.lower() for o in orgs], orgs))
for item in projects:
LOG.info('Processing project: %s' % item['project'])
self._process_project(item, orgs_dict)
def _process_project(self, item, orgs_dict):
project_split = item['project'].split('/', 1)
org_name = project_split[0]
repo_name = project_split[1]
kwargs = {
'allow_merge_commit': True,
'allow_rebase_merge': False,
'allow_squash_merge': True,
'description': item.get('description', None),
}
options = item.get('options', [])
kwargs['has_downloads'] = 'has-downloads' in options or False
kwargs['has_issues'] = 'has-issues' in options or True
kwargs['has_projects'] = 'has-projects' in options or False
kwargs['has_wiki'] = 'has-wiki' in options or False
try:
org = orgs_dict[org_name.lower()]
except KeyError as e:
LOG.exception(e)
raise
try:
LOG.info('Fetching github info about %s', repo_name)
repo = org.get_repo(repo_name)
except github.GithubException:
# NOTE(pabelanger): We should also allow to import an existing
# project from upstream source.
kwargs['auto_init'] = True
LOG.info(
'Creating %s in github', repo_name)
repo = org.create_repo(
name=repo_name, **kwargs)
return
if repo.archived:
# Repo is archived, we cannot update it.
return
if kwargs['allow_merge_commit'] == repo.allow_merge_commit:
del kwargs['allow_merge_commit']
if kwargs['allow_rebase_merge'] == repo.allow_rebase_merge:
del kwargs['allow_rebase_merge']
if kwargs['allow_squash_merge'] == repo.allow_squash_merge:
del kwargs['allow_squash_merge']
if kwargs['description'] == repo.description:
del kwargs['description']
if kwargs['has_downloads'] == repo.has_downloads:
del kwargs['has_downloads']
if kwargs['has_issues'] == repo.has_issues:
del kwargs['has_issues']
if kwargs['has_projects'] == repo.has_projects:
del kwargs['has_projects']
if kwargs['has_wiki'] == repo.has_wiki:
del kwargs['has_wiki']
if item.get('archived', False):
kwargs['archived'] = True
if kwargs:
LOG.info("Updating %s in github", repo_name)
repo.edit(repo_name, **kwargs)
def read_config(self):
self.config = configparser.ConfigParser()
self.config.read(os.path.expanduser(self.args.config))
def setup_logging(self):
if self.args.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
def main(self):
self.parse_arguments()
self.setup_logging()
self.read_config()
self.process_projects()
def main():
Client().main()
if __name__ == "__main__":
main()
``` |
{
"source": "jillson/dct",
"score": 2
} |
#### File: dct/cards/models.py
```python
from django.db import models
from django.conf import settings
class TagManager(models.Manager):
def get_by_natural_key(self, Name):
return self.get(Name=Name)
class CardManager(models.Manager):
def get_by_natural_key(self, Name):
return self.get(Name=Name)
class DeckManager(models.Manager):
def get_by_natural_key(self, Name,*args):
return self.get(Name=Name)
class SpotManager(models.Manager):
def get_by_natural_key(self, Name, *args):
return self.get(Name=Name)
class VisibilityManager(models.Manager):
def get_by_natural_key(self, Name):
return self.get(Name=Name)
class RowManager(models.Manager):
def get_by_natural_key(self, Name):
return self.get(Name=Name)
class GameManager(models.Manager):
def get_by_natural_key(self, Name):
return self.get(Name=Name)
class Tag(models.Model):
objects = TagManager()
Name = models.CharField(max_length=80)
def __str__(self):
return self.Name
def natural_key(self):
return (self.Name,)
class Card(models.Model):
objects = CardManager()
Name = models.CharField(max_length=80)
Tags = models.ManyToManyField(Tag)
def __str__(self):
return self.Name
def natural_key(self):
return (self.Name,)
class Deck(models.Model):
objects = DeckManager()
Name = models.CharField(max_length=80)
Cards = models.ManyToManyField(Card)
def __str__(self):
return self.Name
def natural_key(self):
return (self.Name,)
class Spot(models.Model):
objects = SpotManager()
Name = models.CharField(max_length=80,blank=True,null=True)
Drop = models.BooleanField(default=False)
Deck = models.ForeignKey(Deck,null=True)
def __str__(self):
return self.Name
def natural_key(self):
return (self.Name,)
class Visibility(models.Model):
objects = VisibilityManager()
Name = models.CharField(max_length=80,blank=True,null=True)
Description = models.CharField(max_length=255,blank=True,null=True)
def __str__(self):
return self.Name
def natural_key(self):
return (self.Name,)
class Row(models.Model):
objects = RowManager()
Name = models.CharField(max_length=80,blank=True,null=True)
Spots = models.ManyToManyField(Spot)
NewRow = models.BooleanField(default=False)
Width = models.IntegerField(default=-1)
Visibility = models.ForeignKey(Visibility,null=True)
def __str__(self):
return self.Name
def natural_key(self):
return (self.Name,)
class Game(models.Model):
objects = GameManager()
Name = models.CharField(max_length=80)
Rows = models.ManyToManyField(Row)
def __str__(self):
return self.Name
def natural_key(self):
return (self.Name,)
class GameInstance(models.Model):
Game = models.ForeignKey(Game)
Players = models.ManyToManyField(settings.AUTH_USER_MODEL)
State = models.TextField(blank=True)
def __str__(self):
return "Instance {} of {}".format(self.id,self.Game.Name)
class Invitation(models.Model):
GameInstance = models.ForeignKey(GameInstance)
Target = models.ForeignKey(settings.AUTH_USER_MODEL)
def __str__(self):
return "Invitation to {}".format(str(self.GameInstance))
```
#### File: dct/cards/serializers.py
```python
from rest_framework import serializers
from .models import Game, GameInstance, Row, Spot, Deck, Card, Invitation
class CardSerializer(serializers.ModelSerializer):
class Meta:
model = Card
fields = '__all__'
class DeckSerializer(serializers.ModelSerializer):
Cards = CardSerializer(many=True)
class Meta:
model = Deck
fields = '__all__'
class SpotSerializer(serializers.HyperlinkedModelSerializer):
Deck = DeckSerializer()
class Meta:
model = Spot
fields = ('Name','Drop','Deck')
class RowSerializer(serializers.HyperlinkedModelSerializer):
Spots = SpotSerializer(many=True)
Visibility = serializers.ReadOnlyField(source='Visibility.Name')
class Meta:
model = Row
fields = ('Name','Spots','NewRow',"Width","Visibility")
class GameSerializer(serializers.HyperlinkedModelSerializer):
Rows = RowSerializer(many=True)
class Meta:
model = Game
fields = ('Name','Rows','url')
class GameInstanceSerializer(serializers.ModelSerializer):
class Meta:
model = GameInstance
fields = ('Game','url')
class InvitationSerializer(serializers.ModelSerializer):
class Meta:
model = Invitation
fields = "__all__"
def create(self, request):
i = Invitation.objects.create(GameInstance=request.get("GameInstance"),
Target=request.get("Target"))
i.save()
return i
``` |
{
"source": "JILLXIA/cv21b",
"score": 2
} |
#### File: JILLXIA/cv21b/get_dr_txt.py
```python
import os
import numpy as np
import torch
from PIL import Image
from tqdm import tqdm
from utils.utils import (letterbox_image, non_max_suppression,
yolo_correct_boxes)
from yolo import YOLO
'''
่ฟ้่ฎพ็ฝฎ็้จ้ๅผ่พไฝๆฏๅ ไธบ่ฎก็ฎmap้่ฆ็จๅฐไธๅ้จ้ๆกไปถไธ็RecallๅPrecisionๅผใ
ๆไปฅๅชๆไฟ็็ๆก่ถณๅคๅค๏ผ่ฎก็ฎ็mapๆไผๆด็ฒพ็กฎ๏ผ่ฏฆๆ
ๅฏไปฅไบ่งฃmap็ๅ็ใ
่ฎก็ฎmapๆถ่พๅบ็RecallๅPrecisionๅผๆ็ๆฏ้จ้ไธบ0.5ๆถ็RecallๅPrecisionๅผใ
ๆญคๅค่ทๅพ็./input/detection-results/้้ข็txt็ๆก็ๆฐ้ไผๆฏ็ดๆฅpredictๅคไธไบ๏ผ่ฟๆฏๅ ไธบ่ฟ้็้จ้ไฝ๏ผ
็ฎ็ๆฏไธบไบ่ฎก็ฎไธๅ้จ้ๆกไปถไธ็RecallๅPrecisionๅผ๏ผไป่ๅฎ็ฐmap็่ฎก็ฎใ
่ฟ้็self.iouๆ็ๆฏ้ๆๅคงๆๅถๆ็จๅฐ็iou๏ผๅ
ทไฝ็ๅฏไปฅไบ่งฃ้ๆๅคงๆๅถ็ๅ็๏ผ
ๅฆๆไฝๅๆกไธ้ซๅๆก็iouๅคงไบ่ฟ้่ฎพๅฎ็self.iou๏ผ้ฃไน่ฏฅไฝๅๆกๅฐไผ่ขซๅ้คใ
ๅฏ่ฝๆไบๅๅญฆ็ฅ้ๆ0.5ๅ0.5:0.95็mAP๏ผ่ฟ้็self.iou=0.5ไธไปฃ่กจmAP0.5ใ
ๅฆๆๆณ่ฆ่ฎพๅฎmAP0.x๏ผๆฏๅฆ่ฎพๅฎmAP0.75๏ผๅฏไปฅๅปget_map.py่ฎพๅฎMINOVERLAPใ
'''
class mAP_Yolo(YOLO):
#---------------------------------------------------#
# ๆฃๆตๅพ็
#---------------------------------------------------#
def detect_image(self,image_id,image):
self.confidence = 0.01
self.iou = 0.5
f = open("./input/detection-results/"+image_id+".txt","w")
image_shape = np.array(np.shape(image)[0:2])
#---------------------------------------------------------#
# ็ปๅพๅๅขๅ ็ฐๆก๏ผๅฎ็ฐไธๅคฑ็็resize
# ไนๅฏไปฅ็ดๆฅresize่ฟ่ก่ฏๅซ
#---------------------------------------------------------#
if self.letterbox_image:
crop_img = np.array(letterbox_image(image, (self.model_image_size[1],self.model_image_size[0])))
else:
crop_img = image.convert('RGB')
crop_img = crop_img.resize((self.model_image_size[1],self.model_image_size[0]), Image.BICUBIC)
photo = np.array(crop_img,dtype = np.float32) / 255.0
photo = np.transpose(photo, (2, 0, 1))
#---------------------------------------------------------#
# ๆทปๅ ไธbatch_size็ปดๅบฆ
#---------------------------------------------------------#
images = [photo]
with torch.no_grad():
images = torch.from_numpy(np.asarray(images))
if self.cuda:
images = images.cuda()
#---------------------------------------------------------#
# ๅฐๅพๅ่พๅ
ฅ็ฝ็ปๅฝไธญ่ฟ่ก้ขๆต๏ผ
#---------------------------------------------------------#
outputs = self.net(images)
output_list = []
for i in range(3):
output_list.append(self.yolo_decodes[i](outputs[i]))
#---------------------------------------------------------#
# ๅฐ้ขๆตๆก่ฟ่กๅ ๅ ๏ผ็ถๅ่ฟ่ก้ๆๅคงๆๅถ
#---------------------------------------------------------#
output = torch.cat(output_list, 1)
batch_detections = non_max_suppression(output, self.num_classes, conf_thres=self.confidence, nms_thres=self.iou)
#---------------------------------------------------------#
# ๅฆๆๆฒกๆๆฃๆตๅบ็ฉไฝ๏ผ่ฟๅ
#---------------------------------------------------------#
try :
batch_detections = batch_detections[0].cpu().numpy()
except:
return
#---------------------------------------------------------#
# ๅฏน้ขๆตๆก่ฟ่กๅพๅ็ญ้
#---------------------------------------------------------#
top_index = batch_detections[:,4] * batch_detections[:,5] > self.confidence
top_conf = batch_detections[top_index,4]*batch_detections[top_index,5]
top_label = np.array(batch_detections[top_index,-1],np.int32)
top_bboxes = np.array(batch_detections[top_index,:4])
top_xmin, top_ymin, top_xmax, top_ymax = np.expand_dims(top_bboxes[:,0],-1),np.expand_dims(top_bboxes[:,1],-1),np.expand_dims(top_bboxes[:,2],-1),np.expand_dims(top_bboxes[:,3],-1)
#-----------------------------------------------------------------#
# ๅจๅพๅไผ ๅ
ฅ็ฝ็ป้ขๆตๅไผ่ฟ่กletterbox_image็ปๅพๅๅจๅดๆทปๅ ็ฐๆก
# ๅ ๆญค็ๆ็top_bboxesๆฏ็ธๅฏนไบๆ็ฐๆก็ๅพๅ็
# ๆไปฌ้่ฆๅฏนๅ
ถ่ฟ่กไฟฎๆน๏ผๅป้ค็ฐๆก็้จๅใ
#-----------------------------------------------------------------#
if self.letterbox_image:
boxes = yolo_correct_boxes(top_ymin,top_xmin,top_ymax,top_xmax,np.array([self.model_image_size[0],self.model_image_size[1]]),image_shape)
else:
top_xmin = top_xmin / self.model_image_size[1] * image_shape[1]
top_ymin = top_ymin / self.model_image_size[0] * image_shape[0]
top_xmax = top_xmax / self.model_image_size[1] * image_shape[1]
top_ymax = top_ymax / self.model_image_size[0] * image_shape[0]
boxes = np.concatenate([top_ymin,top_xmin,top_ymax,top_xmax], axis=-1)
for i, c in enumerate(top_label):
predicted_class = self.class_names[c]
score = str(top_conf[i])
top, left, bottom, right = boxes[i]
f.write("%s %s %s %s %s %s\n" % (predicted_class, score[:6], str(int(left)), str(int(top)), str(int(right)),str(int(bottom))))
f.close()
return
yolo = mAP_Yolo()
image_ids = open('VOCdevkit/VOC2007/ImageSets/Main/test.txt').read().strip().split()
if not os.path.exists("./input"):
os.makedirs("./input")
if not os.path.exists("./input/detection-results"):
os.makedirs("./input/detection-results")
if not os.path.exists("./input/images-optional"):
os.makedirs("./input/images-optional")
for image_id in tqdm(image_ids):
image_path = "./VOCdevkit/VOC2007/JPEGImages/"+image_id+".jpg"
image = Image.open(image_path)
# ๅผๅฏๅๅจไนๅ่ฎก็ฎmAPๅฏไปฅๅฏ่งๅ
# image.save("./input/images-optional/"+image_id+".jpg")
yolo.detect_image(image_id,image)
print("Conversion completed!")
``` |
{
"source": "JillyMan/decision-tree",
"score": 3
} |
#### File: MachineLearning.BayesianNetwork/python-imp/bayes_core.py
```python
import math
RangeType = 'Range'
BinaryType = 'Binary'
class Hipothesis:
def __init__(self, id, name, p):
self.id = id
self.name = name
self.p = p
class Attribute:
def __init__(self, id, name, question, _type):
self.id = id
self.name = name
self.question = question
self.type = _type
class Tag:
def __init__(self, hipothesis, attribute, pp, pm):
self.pp = pp
self.pm = pm
self.attribute = attribute
self.hipothesis = hipothesis
class InputType:
def __init__(self, _type, value):
self.type = _type
self.value = int(value)
class Binary(InputType):
def __init__(self, value):
InputType.__init__(self, BinaryType, value)
class Range(InputType):
def __init__(self, start, end, value):
InputType.__init__(self, RangeType, value)
self.start = int(start)
self.end = int(end)
def normalize(self):
l = self.end - self.start
v = self.value - self.start
return v / l
def phe_func(p, pp, pm):
return (p * pp) / (p * pp + (1-p) * pm)
def calc_probs(pp, pm, p):
phe = phe_func(p, pp, pm)
phne = phe_func(p, 1 - pp, 1 - pm)
return (phe, phne)
def lerp(start, end, t):
return start + (end - start) * t
def interpolate_result_clamp01(phne, ph, phe, r):
if r > 0.5:
return lerp(ph, phe, r)
elif r < 0.5:
return lerp(phne, ph, r)
return ph
def interpolate_result_binary(phne, phe, r):
return phne if r == 0 else phe
``` |
{
"source": "Jillyyy/LaneATT",
"score": 2
} |
#### File: Jillyyy/LaneATT/speed.py
```python
import time
import argparse
import torch
from thop import profile, clever_format
from lib.config import Config
def parse_args():
parser = argparse.ArgumentParser(description="Tool to measure a model's speed")
parser.add_argument("--cfg", default="config.yaml", help="Config file")
parser.add_argument("--model_path", help="Model checkpoint path (optional)")
parser.add_argument('--iters', default=100, type=int, help="Number of times to run the model and get the average")
return parser.parse_args()
# torch.backends.cudnn.benchmark = True
def main():
args = parse_args()
cfg = Config(args.cfg)
device = torch.device('cuda')
model = cfg.get_model(cfg)
model = model.to(device)
test_parameters = cfg.get_test_parameters()
height, width = cfg['datasets']['test']['parameters']['img_size']
if args.model_path is not None:
model.load_state_dict(torch.load(args.model_path)['model'], strict = False)
model.eval()
x = torch.zeros((1, 3, height, width)).to(device) + 1
# Benchmark MACs and params
macs, params = profile(model, inputs=(x,))
macs, params = clever_format([macs, params], "%.3f")
print('MACs: {}'.format(macs))
print('Params: {}'.format(params))
# GPU warmup
for _ in range(100):
model(x)
# Benchmark latency and FPS
t_all = 0
for _ in range(args.iters):
t1 = time.time()
model(x, **test_parameters)
t2 = time.time()
t_all += t2 - t1
print('Average latency (ms): {:.2f}'.format(t_all * 1000 / args.iters))
print('Average FPS: {:.2f}'.format(args.iters / t_all))
if __name__ == '__main__':
main()
``` |
{
"source": "Jillyyy/SPINN",
"score": 3
} |
#### File: Jillyyy/SPINN/demo_viskp.py
```python
import torch
from torchvision.transforms import Normalize
import numpy as np
import cv2
import argparse
import json
import os
from tqdm import tqdm
from models import hmr, SMPL
from utils.imutils import crop
from utils.renderer import Renderer
import config
import constants
from utils.geometry import batch_rodrigues, perspective_projection, estimate_translation
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint', required=True, help='Path to pretrained checkpoint')
# parser.add_argument('--img', type=str, required=True, help='Path to input image')
parser.add_argument('--bbox', type=str, default=None, help='Path to .json file containing bounding box coordinates')
parser.add_argument('--openpose', type=str, default=None, help='Path to .json containing openpose detections')
parser.add_argument('--outfile', type=str, default=None, help='Filename of output images. If not set use input filename.')
def draw_full_skeleton(input_image, joints, all_joints=None, draw_edges=True, vis=None, all_vis=None, radius=None):
"""
joints is 3 x 19. but if not will transpose it.
0: Right ankle
1: Right knee
2: Right hip
3: Left hip
4: Left knee
5: Left ankle
6: Right wrist
7: Right elbow
8: Right shoulder
9: Left shoulder
10: Left elbow
11: Left wrist
12: Neck
13: Head top
14: nose
15: left_eye
16: right_eye
17: left_ear
18: right_ear
"""
joints = joints + 112
print(joints)
if radius is None:
radius = max(4, (np.mean(input_image.shape[:2]) * 0.01).astype(int))
colors = {
'pink': np.array([197, 27, 125]), # L lower leg
'light_pink': np.array([233, 163, 201]), # L upper leg
'light_green': np.array([161, 215, 106]), # L lower arm
'green': np.array([77, 146, 33]), # L upper arm
'red': np.array([215, 48, 39]), # head
'light_red': np.array([252, 146, 114]), # head
'light_orange': np.array([252, 141, 89]), # chest
'purple': np.array([118, 42, 131]), # R lower leg
'light_purple': np.array([175, 141, 195]), # R upper
'light_blue': np.array([145, 191, 219]), # R lower arm
'blue': np.array([0, 0, 255]), # R upper arm
'gray': np.array([130, 130, 130]), #
'white': np.array([255, 255, 255]) #
}
image = input_image.copy()
input_is_float = False
if np.issubdtype(image.dtype, np.float):
input_is_float = True
max_val = image.max()
if max_val <= 2.: # should be 1 but sometimes it's slightly above 1
image = (image * 255).astype(np.uint8)
else:
image = (image).astype(np.uint8)
if joints.shape[0] != 2:
joints = joints.T
joints = np.round(joints).astype(int)
if all_joints is not None:
if all_joints.shape[0] != 2:
all_joints = all_joints.T
all_joints = np.round(all_joints).astype(int)
jcolors = [
'light_pink', 'light_pink', 'light_pink', 'pink', 'pink', 'pink',
'light_blue', 'light_blue', 'light_blue', 'blue', 'blue', 'blue',
'purple', 'purple', 'red', 'green', 'green', 'white', 'white'
]
all_jcolors = [
'light_pink', 'light_pink', 'light_pink', 'light_pink', 'pink', 'pink', 'pink', 'pink',
'light_blue', 'light_blue', 'light_blue', 'light_blue', 'blue', 'blue', 'blue', 'blue',
'purple', 'purple', 'purple', 'purple', 'red', 'green', 'green', 'green', 'white', 'white' ,'white', 'white'
]
# draw all ketpoints
if joints is not None:
print(joints.shape[1])
for i in range(joints.shape[1]):
point = joints[:, i]
# If invisible skip
# if all_vis is not None and all_vis[i] == 0:
# continue
if draw_edges:
# print(radius)
# print(point)
# cv2.circle(image, (100, 60), 3, (0, 0, 213), -1)
cv2.circle(image, (point[0], point[1]), 2, colors['blue'].tolist(),
2)
# cv2.circle(image, (point[0], point[1]), radius-1, colors['blue'].tolist(),
# -1)
# cv2.circle(image, (point[0], point[1]), radius - 2,
# colors['blue'].tolist(), -1)
else:
# cv2.circle(image, (point[0], point[1]), 5, colors['white'], 1)
cv2.circle(image, (point[0], point[1]), radius - 1,
colors['blue'].tolist(), 1)
# cv2.circle(image, (point[0], point[1]), 5, colors['gray'], -1)
return image
def bbox_from_openpose(openpose_file, rescale=1.2, detection_thresh=0.2):
"""Get center and scale for bounding box from openpose detections."""
with open(openpose_file, 'r') as f:
keypoints = json.load(f)['people'][0]['pose_keypoints_2d']
keypoints = np.reshape(np.array(keypoints), (-1,3))
valid = keypoints[:,-1] > detection_thresh
valid_keypoints = keypoints[valid][:,:-1]
center = valid_keypoints.mean(axis=0)
bbox_size = (valid_keypoints.max(axis=0) - valid_keypoints.min(axis=0)).max()
# adjust bounding box tightness
scale = bbox_size / 200.0
scale *= rescale
return center, scale
def bbox_from_json(bbox_file):
"""Get center and scale of bounding box from bounding box annotations.
The expected format is [top_left(x), top_left(y), width, height].
"""
with open(bbox_file, 'r') as f:
bbox = np.array(json.load(f)['bbox']).astype(np.float32)
ul_corner = bbox[:2]
center = ul_corner + 0.5 * bbox[2:]
width = max(bbox[2], bbox[3])
scale = width / 200.0
# make sure the bounding box is rectangular
return center, scale
def process_image(img_file, bbox_file, openpose_file, input_res=224):
"""Read image, do preprocessing and possibly crop it according to the bounding box.
If there are bounding box annotations, use them to crop the image.
If no bounding box is specified but openpose detections are available, use them to get the bounding box.
"""
normalize_img = Normalize(mean=constants.IMG_NORM_MEAN, std=constants.IMG_NORM_STD)
img = cv2.imread(img_file)[:,:,::-1].copy() # PyTorch does not support negative stride at the moment
print(img.shape)
if bbox_file is None and openpose_file is None:
# Assume that the person is centerered in the image
height = img.shape[0]
width = img.shape[1]
center = np.array([width // 2, height // 2])
scale = max(height, width) / 200
else:
if bbox_file is not None:
center, scale = bbox_from_json(bbox_file)
elif openpose_file is not None:
center, scale = bbox_from_openpose(openpose_file)
img = crop(img, center, scale, (input_res, input_res))
img = img.astype(np.float32) / 255.
img = torch.from_numpy(img).permute(2,0,1)
norm_img = normalize_img(img.clone())[None]
return img, norm_img
if __name__ == '__main__':
args = parser.parse_args()
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# Load pretrained model
model = hmr(config.SMPL_MEAN_PARAMS).to(device)
checkpoint = torch.load(args.checkpoint)
model.load_state_dict(checkpoint['model'], strict=False)
# Load SMPL model
smpl = SMPL(config.SMPL_MODEL_DIR,
batch_size=1,
create_transl=False).to(device)
model.eval()
# Setup renderer for visualization
renderer = Renderer(focal_length=constants.FOCAL_LENGTH, img_res=constants.IMG_RES, faces=smpl.faces)
# imgs_path = '/project/hpn_yyy/datasets/preprocess/min_imgs_up-3d'
imgs_path = 'crop_vis'
imgpath_list = []
for dirpath, dirnames, filenames in os.walk(imgs_path):
for f in filenames :
if os.path.splitext(f)[1] == '.png' or os.path.splitext(f)[1] == '.jpg':
imgpath_list.append(os.path.join(dirpath, f))
for imgpath in tqdm(imgpath_list):
# Preprocess input image and generate predictions
img, norm_img = process_image(imgpath, args.bbox, args.openpose, input_res=constants.IMG_RES)
with torch.no_grad():
pred_rotmat, pred_betas, pred_camera = model(norm_img.to(device))
pred_output = smpl(betas=pred_betas, body_pose=pred_rotmat[:,1:], global_orient=pred_rotmat[:,0].unsqueeze(1), pose2rot=False)
pred_vertices = pred_output.vertices
pred_joints = pred_output.joints
# Calculate camera parameters for rendering
camera_translation = torch.stack([pred_camera[:,1], pred_camera[:,2], 2*constants.FOCAL_LENGTH/(constants.IMG_RES * pred_camera[:,0] +1e-9)],dim=-1)
# Convert Weak Perspective Camera [s, tx, ty] to camera translation [tx, ty, tz] in 3D given the bounding box size
batch_size = 1
camera_center = torch.zeros(batch_size, 2, device=device)
pred_keypoints_2d = perspective_projection(pred_joints,
rotation=torch.eye(3, device=device).unsqueeze(0).expand(batch_size, -1, -1),
translation=camera_translation,
focal_length=constants.FOCAL_LENGTH,
camera_center=camera_center)
# print(pred_keypoints_2d.shape)
kp_img = draw_full_skeleton(img.permute(1,2,0).cpu().numpy(), pred_keypoints_2d[0][25:,:].cpu().numpy())
# cv2.imwrite('test_kp.jpg', kp_img[:,:,::-1])
camera_translation = camera_translation[0].cpu().numpy()
pred_vertices = pred_vertices[0].cpu().numpy()
img = img.permute(1,2,0).cpu().numpy()
# Render parametric shape
img_shape = renderer(pred_vertices, camera_translation, img)
# Render side views
aroundy = cv2.Rodrigues(np.array([0, np.radians(90.), 0]))[0]
center = pred_vertices.mean(axis=0)
rot_vertices = np.dot((pred_vertices - center), aroundy) + center
# Render non-parametric shape
img_shape_side = renderer(rot_vertices, camera_translation, np.ones_like(img))
outfile = imgpath.split('.')[0] if args.outfile is None else args.outfile
# Save reconstructions
cv2.imwrite(outfile + '_kp.jpg', kp_img[:,:,::-1])
cv2.imwrite(outfile + '_shape.png', 255 * img_shape[:,:,::-1])
cv2.imwrite(outfile + '_shape_side.png', 255 * img_shape_side[:,:,::-1])
```
#### File: SPINN/models/hs_model.py
```python
import torch
import torch.nn as nn
from models import get_pose_net, SMPL
# from models.resnet import resnet50
# from models.sc_layers_share_global6d import SCFC_Share
from models import HMR_HR
# from models.geometric_layers import orthographic_projection, rodrigues, quat2mat
import numpy as np
class HSModel(nn.Module):
def __init__(self, cfg, is_train, smpl_mean_params, pretrained_checkpoint=None):
super(HSModel, self).__init__()
self.hrnet = get_pose_net(cfg, is_train)
# hidden_neuron_list = [4096,4096]
self.hmr_hr = HMR_HR(cfg, smpl_mean_params)
# self.smpl = SMPL()
if pretrained_checkpoint is not None:
checkpoint = torch.load(pretrained_checkpoint)
try:
self.hrnet.load_state_dict(checkpoint['hrnet'])
except KeyError:
print('Warning: hrnet was not found in checkpoint')
try:
self.hmr_hr.load_state_dict(checkpoint['hmr_hr'])
except KeyError:
print('Warning: hmr_hr was not found in checkpoint')
def forward(self, image):
"""Fused forward pass for the 2 networks
Inputs:
image: size = (B, 3, 224, 224)
Returns:
Regressed SMPL shape: size = (B, 6890, 3)
Weak-perspective camera: size = (B, 3)
SMPL pose parameters (as rotation matrices): size = (B, 24, 3, 3)
SMPL shape parameters: size = (B, 10)
"""
batch_size = image.shape[0]
with torch.no_grad():
outputs = self.hrnet(image)
pred_rotmat, pred_shape, pred_cam = self.hmr_hr(outputs)
# pred_camera = pred_camera_with_global_rot[:,:3] #(B,3)
# pred_global_rot = pred_camera_with_global_rot[:,3:][:,None,:] #(B,1,4)
# pose_cube = pred_theta.view(-1, 4) # (batch_size * 24, 4)
# R = quat2mat(pose_cube).view(batch_size, 23, 3, 3)
# pred_rotmat = R.view(batch_size, 23, 3, 3)
# pred_global_rot = pred_global_rot.view(batch_size, 1, 3, 3)
# pred_rotmat = torch.cat((pred_global_rot,pred_rotmat),dim=1) #(B,24,3,3)
# pred_vertices = self.smpl(pred_rotmat, pred_beta)
return outputs, pred_rotmat, pred_shape, pred_cam
```
#### File: SPINN/utils/evaluate.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
def get_max_preds(batch_heatmaps):
'''
get predictions from score maps
heatmaps: numpy.ndarray([batch_size, num_joints, height, width])
'''
assert isinstance(batch_heatmaps, np.ndarray), \
'batch_heatmaps should be numpy.ndarray'
assert batch_heatmaps.ndim == 4, 'batch_images should be 4-ndim'
batch_size = batch_heatmaps.shape[0]
num_joints = batch_heatmaps.shape[1]
width = batch_heatmaps.shape[3]
heatmaps_reshaped = batch_heatmaps.reshape((batch_size, num_joints, -1))
idx = np.argmax(heatmaps_reshaped, 2)
maxvals = np.amax(heatmaps_reshaped, 2)
maxvals = maxvals.reshape((batch_size, num_joints, 1))
idx = idx.reshape((batch_size, num_joints, 1))
preds = np.tile(idx, (1, 1, 2)).astype(np.float32)
preds[:, :, 0] = (preds[:, :, 0]) % width
preds[:, :, 1] = np.floor((preds[:, :, 1]) / width)
pred_mask = np.tile(np.greater(maxvals, 0.0), (1, 1, 2))
pred_mask = pred_mask.astype(np.float32)
preds *= pred_mask
return preds, maxvals
def calc_dists(preds, target, normalize):
preds = preds.astype(np.float32)
target = target.astype(np.float32)
dists = np.zeros((preds.shape[1], preds.shape[0]))
for n in range(preds.shape[0]):
for c in range(preds.shape[1]):
if target[n, c, 0] > 1 and target[n, c, 1] > 1:
normed_preds = preds[n, c, :] / normalize[n]
normed_targets = target[n, c, :] / normalize[n]
dists[c, n] = np.linalg.norm(normed_preds - normed_targets)
else:
dists[c, n] = -1
return dists
def dist_acc(dists, thr=0.5):
''' Return percentage below threshold while ignoring values with a -1 '''
dist_cal = np.not_equal(dists, -1)
num_dist_cal = dist_cal.sum()
if num_dist_cal > 0:
return np.less(dists[dist_cal], thr).sum() * 1.0 / num_dist_cal
else:
return -1
def accuracy(output, target, hm_type='gaussian', thr=0.5):
'''
Calculate accuracy according to PCK,
but uses ground truth heatmap rather than x,y locations
First value to be returned is average accuracy across 'idxs',
followed by individual accuracies
'''
idx = list(range(output.shape[1]))
norm = 1.0
if hm_type == 'gaussian':
pred, _ = get_max_preds(output)
target, _ = get_max_preds(target)
h = output.shape[2]
w = output.shape[3]
norm = np.ones((pred.shape[0], 2)) * np.array([h, w]) / 10
dists = calc_dists(pred, target, norm)
acc = np.zeros((len(idx) + 1))
avg_acc = 0
cnt = 0
for i in range(len(idx)):
acc[i + 1] = dist_acc(dists[idx[i]])
if acc[i + 1] >= 0:
avg_acc = avg_acc + acc[i + 1]
cnt += 1
avg_acc = avg_acc / cnt if cnt != 0 else 0
if cnt != 0:
acc[0] = avg_acc
return acc, avg_acc, cnt, pred
``` |
{
"source": "JillZhang/cs7614_project4",
"score": 2
} |
#### File: cs7614_project4/experiments/base.py
```python
import csv
import logging
import os
import math
import pickle
import time
import numpy as np
from abc import ABC, abstractmethod
from .plotting import plot_policy_map, plot_value_map
import solvers
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
# Constants (default values unless provided by caller)
OUTPUT_DIR = 'output'
MAX_STEPS = 2000
NUM_TRIALS = 100
if not os.path.exists(os.path.join(os.getcwd(), OUTPUT_DIR)):
os.makedirs(os.path.join(os.getcwd(), OUTPUT_DIR))
if not os.path.exists(os.path.join(os.path.join(os.getcwd(), OUTPUT_DIR), 'images')):
os.makedirs(os.path.join(os.path.join(os.getcwd(), OUTPUT_DIR), 'images'))
class EvaluationStats(object):
def __init__(self):
self.rewards = list()
self.stat_history = list()
self.reward_mean = 0
self.reward_median = 0
self.reward_std = 0
self.reward_max = 0
self.reward_min = 0
self.runs = 0
def add(self, reward):
self.rewards.append(reward)
self.compute()
def compute(self):
reward_array = np.array(self.rewards)
self.runs = len(self.rewards)
self.reward_mean = np.mean(reward_array)
self.reward_median = np.median(reward_array)
self.reward_std = np.std(reward_array)
self.reward_max = np.max(reward_array)
self.reward_min = np.min(reward_array)
self.stat_history.append((
self.reward_mean,
self.reward_median,
self.reward_std,
self.reward_max,
self.reward_min
))
def to_csv(self, file_name):
self.compute()
means, medians, stds, maxes, mins = zip(*self.stat_history)
with open(file_name, 'w') as f:
f.write("step,reward,mean,median,std,max,min\n")
writer = csv.writer(f, delimiter=',')
writer.writerows(zip(range(len(self.rewards)), self.rewards, means, medians, stds, maxes, mins))
def __str__(self):
return 'reward_mean: {}, reward_median: {}, reward_std: {}, reward_max: {}, reward_min: {}, runs: {}'.format(
self.reward_mean,
self.reward_median,
self.reward_std,
self.reward_max,
self.reward_min,
self.runs
)
class ExperimentStats(object):
def __init__(self):
self.policies = list()
self.vs = list()
self.steps = list()
self.step_times = list()
self.rewards = list()
self.deltas = list()
self.converged_values = list()
self.elapsed_time = 0
self.optimal_policy = None
def add(self, policy, v, step, step_time, reward, delta, converged):
self.policies.append(policy)
self.vs.append(v)
self.steps.append(step)
self.step_times.append(step_time)
self.rewards.append(reward)
self.deltas.append(delta)
self.converged_values.append(converged)
def to_csv(self, file_name):
with open(file_name, 'w') as f:
f.write("steps,time,reward,delta,converged\n")
writer = csv.writer(f, delimiter=',')
writer.writerows(zip(self.steps, self.step_times, self.rewards, self.deltas, self.converged_values))
def pickle_results(self, file_name_base, map_shape, step_size=1, only_last=False):
if only_last:
policy = np.reshape(np.argmax(self.policies[-1], axis=1), map_shape)
v = self.vs[-1].reshape(map_shape)
file_name = file_name_base.format('Last')
with open(file_name, 'wb') as f:
pickle.dump({'policy': policy, 'v': v}, f)
else:
l = len(self.policies)
if step_size == 1 and l > 20:
step_size = math.floor(l/20.0)
for i, policy in enumerate(self.policies):
if i % step_size == 0 or i == l-1:
v = self.vs[i].reshape(map_shape)
file_name = file_name_base.format(i)
if i == l-1:
file_name = file_name_base.format('Last')
with open(file_name, 'wb') as f:
pickle.dump({'policy': np.reshape(np.argmax(policy, axis=1), map_shape), 'v': v}, f)
def plot_policies_on_map(self, file_name_base, map_desc, color_map, direction_map, experiment, step_preamble,
details, step_size=1, only_last=False):
if only_last:
policy = np.reshape(np.argmax(self.policies[-1], axis=1), map_desc.shape)
v = self.vs[-1].reshape(map_desc.shape)
policy_file_name = file_name_base.format('Policy', 'Last')
value_file_name = file_name_base.format('Value', 'Last')
title = '{}: {} - {} {}'.format(details.env_readable_name, experiment, 'Last', step_preamble)
p = plot_policy_map(title, policy, map_desc, color_map, direction_map)
p.savefig(policy_file_name, format='png', dpi=150)
p.close()
p = plot_value_map(title, v, map_desc, color_map)
p.savefig(value_file_name, format='png', dpi=150)
p.close()
else:
l = len(self.policies)
if step_size == 1 and l > 20:
step_size = math.floor(l/20.0)
for i, policy in enumerate(self.policies):
if i % step_size == 0 or i == l-1:
policy = np.reshape(np.argmax(policy, axis=1), map_desc.shape)
v = self.vs[i].reshape(map_desc.shape)
file_name = file_name_base.format('Policy', i)
value_file_name = file_name_base.format('Value', i)
if i == l-1:
file_name = file_name_base.format('Policy', 'Last')
value_file_name = file_name_base.format('Value', 'Last')
title = '{}: {} - {} {}'.format(details.env_readable_name, experiment, step_preamble, i)
p = plot_policy_map(title, policy, map_desc, color_map, direction_map)
p.savefig(file_name, format='png', dpi=150)
p.close()
p = plot_value_map(title, v, map_desc, color_map)
p.savefig(value_file_name, format='png', dpi=150)
p.close()
def __str__(self):
return 'policies: {}, vs: {}, steps: {}, step_times: {}, deltas: {}, converged_values: {}'.format(
self.policies,
self.vs,
self.steps,
self.step_times,
self.deltas,
self.converged_values
)
class ExperimentDetails(object):
def __init__(self, env, env_name, env_readable_name, threads, seed):
self.env = env
self.env_name = env_name
self.env_readable_name = env_readable_name
self.threads = threads
self.seed = seed
class BaseExperiment(ABC):
def __init__(self, details, verbose=False, max_steps = MAX_STEPS):
self._details = details
self._verbose = verbose
self._max_steps = max_steps
@abstractmethod
def perform(self):
pass
def log(self, msg, *args):
"""
If the learner has verbose set to true, log the message with the given parameters using string.format
:param msg: The log message
:param args: The arguments
:return: None
"""
if self._verbose:
logger.info(msg.format(*args))
def run_solver_and_collect(self, solver, convergence_check_fn):
stats = ExperimentStats()
t = time.clock()
step_count = 0
optimal_policy = None
best_reward = float('-inf')
while not convergence_check_fn(solver, step_count) and step_count < self._max_steps:
policy, v, steps, step_time, reward, delta, converged = solver.step()
if reward > best_reward:
best_reward = reward
optimal_policy = policy
stats.add(policy, v, steps, step_time, reward, delta, converged)
step_count += 1
self.log('Steps: {} delta: {} converged: {}'.format(step_count, delta, converged))
stats.elapsed_time = time.clock() - t
stats.optimal_policy = stats.policies[-1] # optimal_policy
return stats
def run_policy_and_collect(self, solver, policy, num_trials=NUM_TRIALS):
stats = EvaluationStats()
for i in range(num_trials):
stats.add(np.mean(solver.run_policy(policy, self._max_steps)))
stats.compute()
return stats
``` |
{
"source": "Jiloc/django-hitcount",
"score": 2
} |
#### File: django-hitcount/hitcount/models.py
```python
import datetime
from django.db import models
from django.conf import settings
from django.db.models import F
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey
from django.dispatch import Signal
from django.utils.timezone import now
AUTH_USER_MODEL = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
# SIGNALS #
delete_hit_count = Signal(providing_args=['save_hitcount',])
def delete_hit_count_callback(sender, instance,
save_hitcount=False, **kwargs):
'''
Custom callback for the Hit.delete() method.
Hit.delete(): removes the hit from the associated HitCount object.
Hit.delete(save_hitcount=True): preserves the hit for the associated
HitCount object.
'''
if not save_hitcount:
instance.hitcount.hits = F('hits') - 1
instance.hitcount.save()
delete_hit_count.connect(delete_hit_count_callback)
# EXCEPTIONS #
class DuplicateContentObject(Exception):
'If content_object already exists for this model'
pass
# MANAGERS #
class HitManager(models.Manager):
def filter_active(self, *args, **kwargs):
'''
Return only the 'active' hits.
How you count a hit/view will depend on personal choice: Should the
same user/visitor *ever* be counted twice? After a week, or a month,
or a year, should their view be counted again?
The defaulf is to consider a visitor's hit still 'active' if they
return within a the last seven days.. After that the hit
will be counted again. So if one person visits once a week for a year,
they will add 52 hits to a given object.
Change how long the expiration is by adding to settings.py:
HITCOUNT_KEEP_HIT_ACTIVE = {'days' : 30, 'minutes' : 30}
Accepts days, seconds, microseconds, milliseconds, minutes,
hours, and weeks. It's creating a datetime.timedelta object.
'''
grace = getattr(settings, 'HITCOUNT_KEEP_HIT_ACTIVE', {'days':7})
period = now() - datetime.timedelta(**grace)
queryset = self.get_queryset()
queryset = queryset.filter(created__gte=period)
return queryset.filter(*args, **kwargs)
# MODELS #
class HitCount(models.Model):
'''
Model that stores the hit totals for any content object.
'''
hits = models.PositiveIntegerField(default=0)
modified = models.DateTimeField(default=now)
content_type = models.ForeignKey(ContentType,
verbose_name="content type",
related_name="content_type_set_for_%(class)s",)
object_pk = models.TextField('object ID')
content_object = GenericForeignKey('content_type', 'object_pk')
class Meta:
ordering = ( '-hits', )
#unique_together = (("content_type", "object_pk"),)
get_latest_by = "modified"
db_table = "hitcount_hit_count"
verbose_name = "Hit Count"
verbose_name_plural = "Hit Counts"
def __unicode__(self):
return u'%s' % self.content_object
def save(self, *args, **kwargs):
self.modified = now()
if not self.pk and self.object_pk and self.content_type:
# Because we are using a models.TextField() for `object_pk` to
# allow *any* primary key type (integer or text), we
# can't use `unique_together` or `unique=True` to gaurantee
# that only one HitCount object exists for a given object.
#
# This is just a simple hack - if there is no `self.pk`
# set, it checks the database once to see if the `content_type`
# and `object_pk` exist together (uniqueness). Obviously, this
# is not fool proof - if someone sets their own `id` or `pk`
# when initializing the HitCount object, we could get a duplicate.
if HitCount.objects.filter(
object_pk=self.object_pk).filter(
content_type=self.content_type):
raise DuplicateContentObject, "A HitCount object already " + \
"exists for this content_object."
super(HitCount, self).save(*args, **kwargs)
def hits_in_last(self, **kwargs):
'''
Returns hit count for an object during a given time period.
This will only work for as long as hits are saved in the Hit database.
If you are purging your database after 45 days, for example, that means
that asking for hits in the last 60 days will return an incorrect
number as that the longest period it can search will be 45 days.
For example: hits_in_last(days=7).
Accepts days, seconds, microseconds, milliseconds, minutes,
hours, and weeks. It's creating a datetime.timedelta object.
'''
assert kwargs, "Must provide at least one timedelta arg (eg, days=1)"
period = now() - datetime.timedelta(**kwargs)
return self.hit_set.filter(created__gte=period).count()
def get_content_object_url(self):
'''
Django has this in its contrib.comments.model file -- seems worth
implementing though it may take a couple steps.
'''
pass
class Hit(models.Model):
'''
Model captures a single Hit by a visitor.
None of the fields are editable because they are all dynamically created.
Browsing the Hit list in the Admin will allow one to blacklist both
IP addresses and User Agents. Blacklisting simply causes those hits
to not be counted or recorded any more.
Depending on how long you set the HITCOUNT_KEEP_HIT_ACTIVE , and how long
you want to be able to use `HitCount.hits_in_last(days=30)` you should
probably also occasionally clean out this database using a cron job.
It could get rather large.
'''
created = models.DateTimeField(editable=False)
ip = models.CharField(max_length=40, editable=False)
session = models.CharField(max_length=40, editable=False)
user_agent = models.CharField(max_length=255, editable=False)
user = models.ForeignKey(AUTH_USER_MODEL, null=True, editable=False)
hitcount = models.ForeignKey(HitCount, editable=False)
class Meta:
ordering = ( '-created', )
get_latest_by = 'created'
def __unicode__(self):
return u'Hit: %s' % self.pk
def save(self, *args, **kwargs):
'''
The first time the object is created and saved, we increment
the associated HitCount object by one. The opposite applies
if the Hit is deleted.
'''
if not self.created:
self.hitcount.hits = F('hits') + 1
self.hitcount.save()
self.created = now()
super(Hit, self).save(*args, **kwargs)
objects = HitManager()
def delete(self, save_hitcount=False):
'''
If a Hit is deleted and save_hitcount=True, it will preserve the
HitCount object's total. However, under normal circumstances, a
delete() will trigger a subtraction from the HitCount object's total.
NOTE: This doesn't work at all during a queryset.delete().
'''
delete_hit_count.send(sender=self, instance=self,
save_hitcount=save_hitcount)
super(Hit, self).delete()
class BlacklistIP(models.Model):
ip = models.CharField(max_length=40, unique=True)
class Meta:
db_table = "hitcount_blacklist_ip"
verbose_name = "Blacklisted IP"
verbose_name_plural = "Blacklisted IPs"
def __unicode__(self):
return u'%s' % self.ip
class BlacklistUserAgent(models.Model):
user_agent = models.CharField(max_length=255, unique=True)
class Meta:
db_table = "hitcount_blacklist_user_agent"
verbose_name = "Blacklisted User Agent"
verbose_name_plural = "Blacklisted User Agents"
def __unicode__(self):
return u'%s' % self.user_agent
``` |
{
"source": "jilott/qt-dataflow",
"score": 2
} |
#### File: qtdataflow/tests/test.py
```python
__author__ = 'Tillsten'
from nose.tools import raises, assert_raises
from qtdataflow.model import Schema, Node
def test_schema():
schema = Schema()
n1 = Node()
n2 = Node()
n3 = Node()
schema.add_node(n1)
schema.add_node(n2)
schema.add_node(n3)
assert(n1 in schema.nodes)
assert(n2 in schema.nodes)
schema.delete_node(n1)
assert(n1 not in schema.nodes)
@raises(ValueError)
def test_schema_exception():
schema = Schema()
n1 = Node()
schema.add_node(n1)
schema.add_node(n1)
def test_schema_connections():
schema = Schema()
n1 = Node()
n2 = Node()
n3 = Node()
schema.add_node(n1)
schema.add_node(n2)
assert_raises(ValueError, schema.connect_nodes, n1, n1)
schema.connect_nodes(n1, n2)
assert((n1, n2) in schema.connections)
assert(n1.out_conn[0] is n2)
assert(n2.in_conn[0] is n1)
assert_raises(ValueError, schema.disconnect_nodes, n2, n1)
schema.connect_nodes(n3, n2)
schema.disconnect_nodes(n1, n2)
assert(schema.connections == [(n3, n2)])
assert(n1.out_conn == [])
assert(n2.in_conn == [n3])
def test_schema_tofile():
from StringIO import StringIO
s = Schema()
n1 = Node()
n2 = Node()
s.add_node(n1)
s.add_node(n2)
s.connect_nodes(n1, n2)
f = StringIO()
s.to_disk(f)
f.seek(0)
s2 = Schema()
s2.from_disk(f)
assert(len(s.connections) == len(s2.connections))
assert(len(s.nodes) == len(s2.nodes))
if __name__ == '__main__':
import nose
nose.run()
``` |
{
"source": "JILP/morse",
"score": 3
} |
#### File: app/api/translate.py
```python
import flask
from meli.morse.app.api import api
from meli.morse.app.api.errors import bad_request
from meli.morse.app.api.errors import validation_error
from meli.morse.app.exceptions import ValidationError
from meli.morse.domain.morse import MorseFormat
from meli.morse.domain.morse import MorseTranslator
@api.route('/2text', methods=['POST'])
def translate2text():
try:
verify_request(flask.request, target='text')
except ValidationError as verr:
return validation_error(verr)
req_params = flask.request.get_json()
msg = req_params['msg']
mformat = MorseFormat()
msg_format = msg.get('format')
if msg_format:
mformat.dot = msg_format.get('dot', mformat.dot)
mformat.dash = msg_format.get('dash', mformat.dash)
mformat.intra_char = msg_format.get('intra_char', mformat.intra_char)
mformat.inter_char = msg_format.get('inter_char', mformat.inter_char)
mformat.inter_word = msg_format.get('inter_word', mformat.inter_word)
morse_translator = MorseTranslator(morse_format=mformat)
src = msg['src']
content = msg['content']
try:
if src == 'morse':
translation = morse_translator.morse2text(content)
else:
translation = morse_translator.bits2text(content)
except ValueError as verr:
return bad_request(str(verr))
res = {
'msg': {
'src': 'text',
'content': translation
}
}
return flask.jsonify(res)
@api.route('/2morse', methods=['POST'])
def translate2morse():
try:
verify_request(flask.request, target='morse')
except ValidationError as verr:
return validation_error(verr)
req_params = flask.request.get_json()
msg = req_params['msg']
mformat = MorseFormat()
msg_format = msg.get('format')
if msg_format:
mformat.dot = msg_format.get('dot', mformat.dot)
mformat.dash = msg_format.get('dash', mformat.dash)
mformat.intra_char = msg_format.get('intra_char', mformat.intra_char)
mformat.inter_char = msg_format.get('inter_char', mformat.inter_char)
mformat.inter_word = msg_format.get('inter_word', mformat.inter_word)
morse_translator = MorseTranslator(morse_format=mformat)
src = msg['src']
content = msg['content']
try:
if src == 'text':
translation = morse_translator.text2morse(content)
else:
translation = morse_translator.bits2morse(content)
except ValueError as verr:
return bad_request(str(verr))
res = {
'msg': {
'src': 'morse',
'content': translation,
'format': mformat.to_dict()
}
}
return flask.jsonify(res)
@api.route('/2bits', methods=['POST'])
def translate2bits():
try:
verify_request(flask.request, target='bits')
except ValidationError as verr:
return validation_error(verr)
req_params = flask.request.get_json()
msg = req_params['msg']
mformat = MorseFormat()
msg_format = msg.get('format')
if msg_format:
mformat.dot = msg_format.get('dot', mformat.dot)
mformat.dash = msg_format.get('dash', mformat.dash)
mformat.intra_char = msg_format.get('intra_char', mformat.intra_char)
mformat.inter_char = msg_format.get('inter_char', mformat.inter_char)
mformat.inter_word = msg_format.get('inter_word', mformat.inter_word)
morse_translator = MorseTranslator(morse_format=mformat)
src = msg['src']
content = msg['content']
try:
if src == 'text':
translation = morse_translator.text2bits(content)
else:
translation = morse_translator.morse2bits(content)
except ValueError as verr:
return bad_request(str(verr))
#print()
#print('#'*80)
#print(mformat.to_dict())
#print('#'*80)
res = {
'msg': {
'src': 'bits',
'content': translation
}
}
return flask.jsonify(res)
def verify_request(req, target):
if req.content_type != 'application/json':
raise ValidationError('Invalid content type: "{req.content_type}"')
req_params = req.get_json()
if 'msg' not in req_params:
raise ValidationError('Missing msg attribute')
msg = req_params['msg']
if 'content' not in msg or 'src' not in msg:
raise ValidationError('Message not valid')
src = msg['src']
if src == target or src not in ('text', 'morse', 'bits'):
raise ValidationError('Message source not valid')
content = msg['content']
limit = flask.current_app.config['MESSAGE_SIZE_LIMIT']
if len(content) > limit:
raise ValidationError('Character limit exceeded ({limit})')
```
#### File: meli/morse/config.py
```python
import logging
from logging import StreamHandler
import os
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY') or 'Th1s 5h0uld b3 h4rd 70 gu355'
MESSAGE_SIZE_LIMIT = 1000
@staticmethod
def init_app(app):
pass
class DevConfig(Config):
DEBUG = True
class TestConfig(Config):
TESTING = True
class ProdConfig(Config):
PROD = True
class HerokuConfig(ProdConfig):
@classmethod
def init_app(cls, app):
ProdConfig.init_app(app)
# log to stderr
file_handler = StreamHandler()
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
def config(env):
cfg = {
'dev': DevConfig,
'test': TestConfig,
'prod': ProdConfig,
'heroku': HerokuConfig,
'default': DevConfig
}
return cfg[env]
```
#### File: domain/decoder/naive_decoder.py
```python
from meli.morse.domain.timing.international import InternationalTiming
from meli.morse.domain.decoder.bit_decoder import BitDecoder
class NaiveBitDecoder(BitDecoder):
def decode(self, bit_msg, morse_format):
if not bit_msg or '1' not in bit_msg:
raise ValueError(f'Invalid bit sequence: "{bit_msg}"')
min_len = 99999
# Remove enclosing zeroes transmission noise
bit_msg = bit_msg.strip('0')
parsed = []
prev = None
count = 1
for bit in bit_msg:
if bit not in ('0', '1'):
raise ValueError(f'Invalid bit: "{bit}"')
if bit == prev:
count += 1
continue
if prev == '1' and count < min_len:
min_len = count
if prev is not None: # Avoid first initial None element
parsed.append((prev, count))
count = 1
prev = bit
# Process last '1' bits sequence, (zeros stripped)
if count < min_len:
min_len = count
parsed.append((bit, count))
timing = InternationalTiming(min_len)
normalized = []
for bit, count in parsed:
if bit == '1':
is_dot = abs(timing.dot - count) <= abs(timing.dash - count)
seq = morse_format.dot if is_dot else morse_format.dash
else:
is_intra_char = (abs(timing.intra_char - count)
< abs(timing.inter_char - count))
is_inter_word = (abs(timing.inter_word - count)
< abs(timing.inter_char - count))
if is_intra_char:
seq = morse_format.intra_char
elif is_inter_word:
seq = morse_format.inter_word
else:
seq = morse_format.inter_char
normalized.append(seq)
return ''.join(normalized)
```
#### File: domain/timing/morse_timing.py
```python
class MorseTiming:
def __init__(self, dot, dash, intra_char, inter_char, inter_word):
self.dot = dot
self.dash = dash
self.intra_char = intra_char
self.inter_char = inter_char
self.inter_word = inter_word
``` |
{
"source": "jilsahm/FireWallBuilder",
"score": 3
} |
#### File: FireWallBuilder/modules/sanitizer.py
```python
from enum import Enum
import re
class Patterns( Enum ):
PORTS = re.compile( "^(0|[1-9][0-9]{0,3}|[1-5][0-9]{4}|6([0-4][0-9]{3}|5([0-4][0-9]{2}|5([0-2][0-9]|3[0-5]))))(,(0|[1-9][0-9]{0,3}|[1-5][0-9]{4}|6([0-4][0-9]{3}|5([0-4][0-9]{2}|5([0-2][0-9]|3[0-5]))))){0,14}$" )
IP = re.compile( "^([1-9]?[0-9]|1[0-9][0-9]|2([0-4][0-9]|5[0-5]))(\.([1-9]?[0-9]|1[0-9][0-9]|2([0-4][0-9]|5[0-5]))){3}((:([1-9]?[0-9]|1[0-9][0-9]|2([0-4][0-9]|5[0-5]))(\.([1-9]?[0-9]|1[0-9][0-9]|2([0-4][0-9]|5[0-5]))){3})|\/([1-2]?[0-9]|3[0-2]))?$" )
BITMASK = re.compile( "^[0-7]$" )
COMMENTS = re.compile( "^$" )
COMMA = re.compile( "," )
ID = re.compile( "[1-9][0-9]{0,7}" )
def isValidRule( rules ):
for rule in rules:
if Patterns.PORTS.value.search( rule.ports ) is None:
return False
if Patterns.IP.value.search( rule.ip_from ) is None:
return False
if Patterns.IP.value.search( rule.ip_to ) is None:
return False
if Patterns.BITMASK.value.search( str( rule.directions.value ) ) is None:
return False
if Patterns.BITMASK.value.search( str( rule.protocols.value ) ) is None:
return False
return True
if __name__ == "__main__":
print( Patterns.PORTS.value )
```
#### File: jilsahm/FireWallBuilder/server.py
```python
from flask import render_template, Flask, request, abort, make_response
from modules.options import mapJsonToRule, buildFirewall, Firewall
from modules.sanitizer import isValidRule, Patterns
from modules.dbhandler import buildDatabase, Connection
import json
app = Flask(__name__)
db = "./database/firewall.db"
class ContentLength():
def __init__( self, maxSize ):
self.maxSize = maxSize
self.route = None
def wrappedRoute( self, *args, **kwargs ):
length = request.content_length
if length is not None and length > self.maxSize:
abort( 413 )
return self.route( *args, **kwargs )
def __call__( self, route ):
self.route = route
return self.wrappedRoute
@app.route( "/" )
def index():
return render_template( "index.html" )
@app.route( "/compiler", methods=["POST"] )
@ContentLength( 4096 )
def compiler():
rules = mapJsonToRule( request.get_json() )
if isValidRule( rules ):
outputScript = buildFirewall( rules )
else:
abort( 500 )
return outputScript
@ContentLength( 4096 )
@app.route( "/save", methods=["POST"] )
def save():
rules = mapJsonToRule( request.get_json() )
if isValidRule( rules ):
if 0 >= len( rules ):
return "There has to be at least one rule"
firewall = Firewall( "Testtitile", "0000-00-00", rules )
with Connection( db ) as cursor:
firewall.insert( cursor )
return "Firewall saved"
@ContentLength( 64 )
@app.route( "/show" )
def showSavedFirewalls():
firewall = []
with Connection( db ) as cursor:
firewall = Firewall.fetchAll( cursor )
return json.dumps( firewall )
@ContentLength( 64 )
@app.route( "/load/<fid>", methods=["GET"] )
def loadFirewall( fid ):
if Patterns.ID.value.search( fid ) is None:
abort( 500 )
with Connection( db ) as cursor:
return Firewall.fetchById( fid, cursor )
abort( 500 )
@ContentLength( 64 )
@app.route( "/remove/<fid>", methods=["GET"] )
def removeFirewall( fid ):
if Patterns.ID.value.search( fid ) is None:
abort( 500 )
# TODO
return "TODO"
if __name__ == "__main__":
#app.config['MAX_CONTENT_LENGTH'] = 1024 * 1024
buildDatabase( db, "./database/dbstructure.sql" )
app.run( host= '0.0.0.0' )
``` |
{
"source": "Jim00000/waterwave-simulator",
"score": 2
} |
#### File: Jim00000/waterwave-simulator/wave.py
```python
import numpy as np
from mayavi import mlab
dt = 0.04
C = 16
K = 0.1
height = 6
grid = 100
old_H = np.zeros([grid, grid], dtype=np.float64)
H = np.ones([grid, grid], dtype=np.float64)
new_H = np.zeros([grid, grid], dtype=np.float64)
sz = 31
# small peak
z = np.linspace(-1,1,sz)
x = np.ones((sz, sz))
for i in range(sz):
for j in range(sz):
x[i][j] = z[i]
y = x.T
TMP_H = height * np.exp(-5 * (x ** 2 + y ** 2))
H[20:20+sz, 20:20+sz] += np.copy(TMP_H)
old_H = np.copy(H)
x = np.arange(grid)
y = np.arange(grid)
X, Y = np.meshgrid(x, y)
def update():
global H, old_H, new_H
# Centroid
for i in range(1, grid - 1):
for j in range(1, grid - 1):
P = H[i + 1][j] + H[i - 1][j] + H[i][j + 1] + H[i][j - 1] - 4 * H[i][j]
new_H[i][j] = ( pow(C * dt, 2) * P * 2 + 4 * H[i][j] - old_H[i][j] * (2 - K * dt) ) / (2 + K * dt)
# Four edges
for i in range(1, grid - 1):
P1 = H[i + 1][0] + H[i - 1][0] + H[i][1] - 3 * H[i][0]
P2 = H[i + 1][grid - 1] + H[i - 1][grid - 1] + H[i][grid - 2] - 3 * H[i][grid - 1]
P3 = H[0 + 1][i] + H[0][i + 1] + H[0][i - 1] - 3 * H[0][i]
P4 = H[grid - 2][i] + H[grid - 1][i + 1] + H[grid - 1][i - 1] - 3 * H[grid - 1][i]
new_H[i][0] = ( pow(C * dt, 2) * P1 * 2 + 4 * H[i][0] - old_H[i][0] * (2 - K * dt) ) / (2 + K * dt)
new_H[i][grid - 1] = ( pow(C * dt, 2) * P2 * 2 + 4 * H[i][grid - 1] - old_H[i][grid - 1] * (2 - K * dt) ) / (2 + K * dt)
new_H[0][i] = ( pow(C * dt, 2) * P3 * 2 + 4 * H[0][i] - old_H[0][i] * (2 - K * dt) ) / (2 + K * dt)
new_H[grid - 1][i] = ( pow(C * dt, 2) * P4 * 2 + 4 * H[grid - 1][i] - old_H[grid - 1][i] * (2 - K * dt) ) / (2 + K * dt)
# Four corners
P1 = H[1][0] + H[0][0 + 1] - 2 * H[0][0]
P2 = H[1][grid - 1] + H[0][grid - 2] - 2 * H[0][grid - 1]
P3 = H[grid - 2][0] + H[grid - 1][1] - 2 * H[grid - 1][0]
P4 = H[grid - 2][grid - 1] + H[grid - 1][grid - 2] - 2 * H[grid - 1][grid - 1]
new_H[0][0] = ( pow(C * dt, 2) * P1 * 2 + 4 * H[0][0] - old_H[0][0] * (2 - K * dt) ) / (2 + K * dt)
new_H[0][grid-1] = ( pow(C * dt, 2) * P2 * 2 + 4 * H[0][grid-1] - old_H[0][grid-1] * (2 - K * dt) ) / (2 + K * dt)
new_H[grid-1][0] = ( pow(C * dt, 2) * P3 * 2 + 4 * H[grid-1][0] - old_H[grid-1][0] * (2 - K * dt) ) / (2 + K * dt)
new_H[grid - 1][grid - 1] = ( pow(C * dt, 2) * P4 * 2 + 4 * H[grid - 1][grid - 1] - old_H[grid - 1][grid - 1] * (2 - K * dt) ) / (2 + K * dt)
old_H = np.copy(H)
H = np.copy(new_H)
plt = mlab.surf(H, warp_scale='auto', colormap=u'ocean')
@mlab.animate(delay=10)
def animation():
f = mlab.gcf()
while True:
update()
plt.mlab_source.set(scalars=H)
f.scene.render()
yield
animation()
mlab.title('sequential in Python')
mlab.show()
``` |
{
"source": "jim108dev/pebble-flashcards",
"score": 3
} |
#### File: pebble-flashcards/evaluation/prepare_next_session.py
```python
import logging
from datetime import date
import numpy as np
import pandas as pd
from pandas.core.dtypes.missing import isna
from pandas.core.frame import DataFrame
from algo_sm2 import supermemo_2
from util import get_conf
logging.basicConfig(level=logging.DEBUG)
OUTPUT_COLUMNS = ["id", "text1", "text2", "feedback","start", "stop"]
NOW = date.today()
def countdown(last_stop, waiting_period):
dt_last_stop = date.fromtimestamp(int(last_stop))
delta = NOW - dt_last_stop
days_past = delta.days
return waiting_period - days_past
def check_len(df:DataFrame):
too_long_mask = (df['id'].astype(str).str.len() > 15) | (df['text1'].astype(str).str.len() > 84) | (df['text2'].astype(str).str.len() > 84)
too_long_df = df.loc[too_long_mask]
if not too_long_df.empty:
logging.warning("The following records are too long, the cannot be fully displayed:")
logging.warning(too_long_df)
def main(conf):
df = pd.read_csv(conf.history_filename, sep=';')
df['waiting_period'] = df['feedback_history'].apply(
lambda cs: 0 if isna(cs) else supermemo_2([int(float(v)) for v in str(cs).split('|')]))
df['last_stop'] = df.apply(lambda x: pd.NA if isna(x['stop_history']) else int(float(str(x['stop_history']).split('|')[-1])), axis=1)
df['last_start'] = df.apply(lambda x: pd.NA if isna(x['start_history']) else int(float(str(x['start_history']).split('|')[-1])), axis=1)
df['countdown'] = df.apply(lambda x: 0 if isna(x['stop_history']) else countdown(x['last_stop'], x['waiting_period']), axis=1)
df = df[df['buried'] == False]
df = df[df['countdown'] <= 0]
df['duration'] = df.apply(lambda x: 0 if isna(x['last_start']) or isna(x['last_stop']) else x['last_stop'] - x['last_start'], axis=1)
#df = df.sort_values(by=['duration'], ascending=False)
df = df.nsmallest(conf.max_records, 'countdown')
ref_df = pd.read_csv(conf.reference_filename, sep=';')
check_len(ref_df)
df = df.merge(ref_df, on='id')
df['feedback'] = 0
df['start'] = 0
df['stop'] = 0
#https://stackoverflow.com/questions/29576430/shuffle-dataframe-rows
df = df.sample(frac=1, replace=False).reset_index(drop=True)
logging.debug(df.head())
df.to_csv(path_or_buf=conf.next_session_filename, columns=OUTPUT_COLUMNS, index=False, sep=";")
if __name__ == "__main__":
main(get_conf())
``` |
{
"source": "jim1949/fictional-record",
"score": 3
} |
#### File: 20191113/app/app.py
```python
from flask import Flask, render_template, Response, request
import os
app = Flask(__name__)
@app.route('/')
def index():
"""Frontpage"""
return render_template('index.html')
#robot 001
@app.route('/aIn', methods=['POST', 'GET'])
def ain():
a = os.system(r"export ROS_MASTER_URI=http://192.168.3.4:11311")
return (''),204
@app.route('/aOut', methods=['POST', 'GET'])
def aout():
a = os.system(r"export ROS_MASTER_URI=http://192.168.3.4:11311")
return (''),204
@app.route('/aStop', methods=['POST', 'GET'])
def astop():
a = os.system(r"export ROS_MASTER_URI=http://192.168.3.4:11311")
a = os.system(r"export ROS_MASTER_URI=http://192.168.3.4:11311")
a = os.system(r"export ROS_MASTER_URI=http://192.168.3.4:11311")
return (''),204
#robot 001
@app.route('/aIn2', methods=['POST', 'GET'])
def ain2():
a = os.system(r"export ROS_MASTER_URI=http://192.168.3.4:11311")
return (''),204
@app.route('/aOut2', methods=['POST', 'GET'])
def aout2():
a = os.system(r"export ROS_MASTER_URI=http://192.168.3.4:11311")
return (''),204
@app.route('/aStop2', methods=['POST', 'GET'])
def astop2():
a = os.system(r"export ROS_MASTER_URI=http://192.168.3.4:11311")
a = os.system(r"export ROS_MASTER_URI=http://192.168.3.4:11311")
a = os.system(r"export ROS_MASTER_URI=http://192.168.3.4:11311")
return (''),204
# robot 002
@app.route('/bIn', methods=['POST', 'GET'])
def bin2():
a = os.system(r"export ROS_MASTER_URI=http://192.168.3.15:11311")
return (''),204
@app.route('/bOut', methods=['POST', 'GET'])
def bout2():
a = os.system(r"export ROS_MASTER_URI=http://192.168.3.15:11311")
return (''),204
@app.route('/bStop', methods=['POST', 'GET'])
def bstop2():
a = os.system(r"export ROS_MASTER_URI=http://192.168.3.15:11311")
a = os.system(r"export ROS_MASTER_URI=http://192.168.3.15:11311")
a = os.system(r"export ROS_MASTER_URI=http://192.168.3.15:11311")
return (''),204
# robot 002
@app.route('/bIn2', methods=['POST', 'GET'])
def bin():
a = os.system(r"export ROS_MASTER_URI=http://192.168.3.15:11311")
return (''),204
@app.route('/bOut2', methods=['POST', 'GET'])
def bout():
a = os.system(r"export ROS_MASTER_URI=http://192.168.3.15:11311")
return (''),204
@app.route('/bStop2', methods=['POST', 'GET'])
def bstop():
a = os.system(r"export ROS_MASTER_URI=http://192.168.3.15:11311")
a = os.system(r"export ROS_MASTER_URI=http://192.168.3.15:11311")
a = os.system(r"export ROS_MASTER_URI=http://192.168.3.15:11311")
return (''),204
if __name__ == '__main__':
a = os.system(r"export ROS_MASTER_URI=http://192.168.3.4:11311")
a = os.system(r"export ROS_MASTER_URI=http://192.168.3.15:11311")
app.run(host='0.0.0.0',port=1234, debug=False, threaded=True)
``` |
{
"source": "jim22k/grblas-dev",
"score": 3
} |
#### File: backends/python/exceptions.py
```python
class GrB_Info:
GrB_SUCCESS = object()
# API Errors
GrB_UNINITIALIZED_OBJECT = object()
GrB_NULL_POINTER = object()
GrB_INVALID_VALUE = object()
GrB_INVALID_INDEX = object()
GrB_DOMAIN_MISMATCH = object()
GrB_DIMENSION_MISMATCH = object()
GrB_OUTPUT_NOT_EMPTY = object()
GrB_NO_VALUE = object()
# Execution Errors
GrB_OUT_OF_MEMORY = object()
GrB_INSUFFICIENT_SPACE = object()
GrB_INVALID_OBJECT = object()
GrB_INDEX_OUT_OF_BOUNDS = object()
GrB_PANIC = object()
class GraphBlasException(Exception):
pass
last_error_message = None
def GrB_error():
return last_error_message
def return_error(error, msg=""):
global last_error_message
last_error_message = msg
return error
```
#### File: backends/python/matrix.py
```python
import numba
import numpy as np
from scipy.sparse import csr_matrix
from .base import BasePointer, GraphBlasContainer
from .context import handle_panic, return_error
from .exceptions import GrB_Info
class MatrixPtr(BasePointer):
def set_matrix(self, matrix):
self.instance = matrix
class Matrix(GraphBlasContainer):
def __init__(self, matrix):
assert isinstance(matrix, csr_matrix)
self.matrix = matrix
@classmethod
def new_from_dtype(cls, dtype, nrows, ncols):
matrix = csr_matrix((nrows, ncols), dtype=dtype)
return cls(matrix)
@classmethod
def new_from_existing(cls, other):
matrix = csr_matrix(other)
return cls(matrix)
@classmethod
def get_pointer(cls):
return MatrixPtr()
@handle_panic
def Matrix_new(A: MatrixPtr, dtype: type, nrows: int, ncols: int):
if nrows <= 0:
return_error(GrB_Info.GrB_INVALID_VALUE, "nrows must be > 0")
if ncols <= 0:
return_error(GrB_Info.GrB_INVALID_VALUE, "ncols must be > 0")
matrix = Matrix.new_from_dtype(dtype, nrows, ncols)
A.set_matrix(matrix)
return GrB_Info.GrB_SUCCESS
@handle_panic
def Matrix_dup(C: MatrixPtr, A: Matrix):
matrix = Matrix.new_from_existing(A)
C.set_matrix(matrix)
return GrB_Info.GrB_SUCCESS
@handle_panic
def Matrix_resize(C: Matrix, nrows: int, ncols: int):
if nrows <= 0:
return_error(GrB_Info.GrB_INVALID_VALUE, "nrows must be > 0")
if ncols <= 0:
return_error(GrB_Info.GrB_INVALID_VALUE, "ncols must be > 0")
C.matrix.resize((nrows, ncols))
return GrB_Info.GrB_SUCCESS
# TODO: this is just the essential code; it needs to handle descriptors, masks, accumulators, etc
@handle_panic
def mxm(C, A, B, semiring):
cr, cc = C.shape
ar, ac = A.shape
br, bc = B.shape
if cr != ar:
return_error(GrB_Info.GrB_DIMENSION_MISMATCH, "C.nrows != A.nrows")
if cc != bc:
return_error(GrB_Info.GrB_DIMENSION_MISMATCH, "C.ncols != B.ncols")
if ac != br:
return_error(GrB_Info.GrB_DIMENSION_MISMATCH, "A.nrows != B.ncols")
b = B.tocsc()
d, i, ip = _sparse_matmul(
A.data,
A.indices,
A.indptr,
b.data,
b.indices,
b.indptr,
semiring.plus.op,
semiring.times,
semiring.plus.identity,
C.dtype,
)
C.data = d
C.indices = i
C.indptr = ip
return GrB_Info.GrB_SUCCESS
@numba.njit
def _sparse_matmul(
a_data,
a_indices,
a_indptr,
b_data,
b_indices,
b_indptr,
plus,
times,
identity,
dtype,
):
# Final array size is unknown, so we give ourselves room and then adjust on the fly
tmp_output_size = a_data.size * 2
data = np.empty((tmp_output_size,), dtype=dtype)
indices = np.empty((tmp_output_size,), dtype=a_indices.dtype)
indptr = np.empty((a_indptr.size,), dtype=a_indptr.dtype)
output_counter = 0
for iptr in range(a_indptr.size - 1):
indptr[iptr] = output_counter
for jptr in range(b_indptr.size - 1):
a_counter = a_indptr[iptr]
a_stop = a_indptr[iptr + 1]
b_counter = b_indptr[jptr]
b_stop = b_indptr[jptr + 1]
val = identity
nonempty = False
while a_counter < a_stop and b_counter < b_stop:
a_k = a_indices[a_counter]
b_k = b_indices[b_counter]
if a_k == b_k:
val = plus(val, times(a_data[a_counter], b_data[b_counter]))
nonempty = True
a_counter += 1
b_counter += 1
elif a_k < b_k:
a_counter += 1
else:
b_counter += 1
if nonempty:
if output_counter >= tmp_output_size:
# We filled up the allocated space; copy existing data to a larger array
tmp_output_size *= 2
new_data = np.empty((tmp_output_size,), dtype=data.dtype)
new_indices = np.empty((tmp_output_size,), dtype=indices.dtype)
new_data[:output_counter] = data[:output_counter]
new_indices[:output_counter] = indices[:output_counter]
data = new_data
indices = new_indices
data[output_counter] = val
indices[output_counter] = jptr
output_counter += 1
# Add final entry to indptr (should indicate nnz in the output)
nnz = output_counter
indptr[iptr + 1] = nnz
# Trim output arrays
data = data[:nnz]
indices = indices[:nnz]
return (data, indices, indptr)
```
#### File: grblas/binary/numpy.py
```python
import numpy as np
from .. import operator
_binary_names = {
# Math operations
"add",
"subtract",
"multiply",
"divide",
"logaddexp",
"logaddexp2",
"true_divide",
"floor_divide",
"power",
"remainder",
"mod",
"fmod",
"gcd",
"lcm",
# Trigonometric functions
"arctan2",
"hypot",
# Bit-twiddling functions
"bitwise_and",
"bitwise_or",
"bitwise_xor",
"left_shift",
"right_shift",
# Comparison functions
"greater",
"greater_equal",
"less",
"less_equal",
"not_equal",
"equal",
"logical_and",
"logical_or",
"logical_xor",
"maximum",
"minimum",
"fmax",
"fmin",
# Floating functions
"copysign",
"nextafter",
"ldexp",
}
__all__ = list(_binary_names)
def __dir__():
return __all__
def __getattr__(name):
if name not in _binary_names:
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
numpy_func = getattr(np, name)
operator.BinaryOp.register_new(f"numpy.{name}", lambda x, y: numpy_func(x, y))
return globals()[name]
```
#### File: grblas-dev/grblas/matrix.py
```python
import itertools
import numpy as np
from . import ffi, lib, backend, binary, monoid, semiring
from .base import BaseExpression, BaseType, call
from .dtypes import lookup_dtype, unify, _INDEX
from .exceptions import check_status, NoValue
from .expr import AmbiguousAssignOrExtract, IndexerResolver, Updater
from .mask import StructuralMask, ValueMask
from .operator import get_typed_op
from .vector import Vector, VectorExpression
from .scalar import Scalar, ScalarExpression, _CScalar
from .utils import (
ints_to_numpy_buffer,
values_to_numpy_buffer,
wrapdoc,
_CArray,
_Pointer,
class_property,
)
from . import expr
from ._ss.matrix import ss
ffi_new = ffi.new
class Matrix(BaseType):
"""
GraphBLAS Sparse Matrix
High-level wrapper around GrB_Matrix type
"""
__slots__ = "_nrows", "_ncols", "ss"
_is_transposed = False
_name_counter = itertools.count()
def __init__(self, gb_obj, dtype, *, name=None):
if name is None:
name = f"M_{next(Matrix._name_counter)}"
self._nrows = None
self._ncols = None
super().__init__(gb_obj, dtype, name)
# Add ss extension methods
self.ss = ss(self)
def __del__(self):
gb_obj = getattr(self, "gb_obj", None)
if gb_obj is not None:
# it's difficult/dangerous to record the call, b/c `self.name` may not exist
check_status(lib.GrB_Matrix_free(gb_obj), self)
def __repr__(self, mask=None):
from .formatting import format_matrix
from .recorder import skip_record
with skip_record:
return format_matrix(self, mask=mask)
def _repr_html_(self, mask=None):
from .formatting import format_matrix_html
from .recorder import skip_record
with skip_record:
return format_matrix_html(self, mask=mask)
def __reduce__(self):
# SS, SuiteSparse-specific: export
pieces = self.ss.export(raw=True)
return self._deserialize, (pieces, self.name)
@staticmethod
def _deserialize(pieces, name):
# SS, SuiteSparse-specific: import
return Matrix.ss.import_any(name=name, **pieces)
@property
def S(self):
return StructuralMask(self)
@property
def V(self):
return ValueMask(self)
def __delitem__(self, keys):
del Updater(self)[keys]
def __getitem__(self, keys):
resolved_indexes = IndexerResolver(self, keys)
return AmbiguousAssignOrExtract(self, resolved_indexes)
def __setitem__(self, keys, delayed):
Updater(self)[keys] = delayed
def __contains__(self, index):
extractor = self[index]
if not extractor.resolved_indexes.is_single_element:
raise TypeError(
f"Invalid index to Matrix contains: {index!r}. A 2-tuple of ints is expected. "
"Doing `(i, j) in my_matrix` checks whether a value is present at that index."
)
scalar = extractor.new(name="s_contains")
return not scalar.is_empty
def __iter__(self):
rows, columns, values = self.to_values()
return zip(rows.flat, columns.flat)
def isequal(self, other, *, check_dtype=False):
"""
Check for exact equality (same size, same empty values)
If `check_dtype` is True, also checks that dtypes match
For equality of floating point Vectors, consider using `isclose`
"""
self._expect_type(other, (Matrix, TransposedMatrix), within="isequal", argname="other")
if check_dtype and self.dtype != other.dtype:
return False
if self._nrows != other._nrows:
return False
if self._ncols != other._ncols:
return False
if self._nvals != other._nvals:
return False
if check_dtype:
common_dtype = self.dtype
else:
common_dtype = unify(self.dtype, other.dtype)
matches = Matrix.new(bool, self._nrows, self._ncols, name="M_isequal")
matches << self.ewise_mult(other, binary.eq[common_dtype])
# ewise_mult performs intersection, so nvals will indicate mismatched empty values
if matches._nvals != self._nvals:
return False
# Check if all results are True
return matches.reduce_scalar(monoid.land).value
def isclose(self, other, *, rel_tol=1e-7, abs_tol=0.0, check_dtype=False):
"""
Check for approximate equality (including same size and empty values)
If `check_dtype` is True, also checks that dtypes match
Closeness check is equivalent to `abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)`
"""
self._expect_type(other, (Matrix, TransposedMatrix), within="isclose", argname="other")
if check_dtype and self.dtype != other.dtype:
return False
if self._nrows != other._nrows:
return False
if self._ncols != other._ncols:
return False
if self._nvals != other._nvals:
return False
matches = self.ewise_mult(other, binary.isclose(rel_tol, abs_tol)).new(
dtype=bool, name="M_isclose"
)
# ewise_mult performs intersection, so nvals will indicate mismatched empty values
if matches._nvals != self._nvals:
return False
# Check if all results are True
return matches.reduce_scalar(monoid.land).value
@property
def nrows(self):
n = ffi_new("GrB_Index*")
scalar = Scalar(n, _INDEX, name="s_nrows", empty=True)
call("GrB_Matrix_nrows", [_Pointer(scalar), self])
return n[0]
@property
def ncols(self):
n = ffi_new("GrB_Index*")
scalar = Scalar(n, _INDEX, name="s_ncols", empty=True)
call("GrB_Matrix_ncols", [_Pointer(scalar), self])
return n[0]
@property
def shape(self):
return (self._nrows, self._ncols)
@property
def nvals(self):
n = ffi_new("GrB_Index*")
scalar = Scalar(n, _INDEX, name="s_nvals", empty=True)
call("GrB_Matrix_nvals", [_Pointer(scalar), self])
return n[0]
@property
def _nvals(self):
"""Like nvals, but doesn't record calls"""
n = ffi_new("GrB_Index*")
check_status(lib.GrB_Matrix_nvals(n, self.gb_obj[0]), self)
return n[0]
@property
def T(self):
return TransposedMatrix(self)
def clear(self):
call("GrB_Matrix_clear", [self])
def resize(self, nrows, ncols):
nrows = _CScalar(nrows)
ncols = _CScalar(ncols)
call("GrB_Matrix_resize", [self, nrows, ncols])
self._nrows = nrows.scalar.value
self._ncols = ncols.scalar.value
def to_values(self, *, dtype=None):
"""
GrB_Matrix_extractTuples
Extract the rows, columns and values as a 3-tuple of numpy arrays
"""
nvals = self._nvals
rows = _CArray(size=nvals, name="&rows_array")
columns = _CArray(size=nvals, name="&columns_array")
values = _CArray(size=nvals, dtype=self.dtype, name="&values_array")
n = ffi_new("GrB_Index*")
scalar = Scalar(n, _INDEX, name="s_nvals", empty=True)
scalar.value = nvals
call(
f"GrB_Matrix_extractTuples_{self.dtype.name}",
[rows, columns, values, _Pointer(scalar), self],
)
values = values.array
if dtype is not None:
dtype = lookup_dtype(dtype)
if dtype != self.dtype:
values = values.astype(dtype.np_type) # copies
return (
rows.array,
columns.array,
values,
)
def build(self, rows, columns, values, *, dup_op=None, clear=False, nrows=None, ncols=None):
# TODO: accept `dtype` keyword to match the dtype of `values`?
rows = ints_to_numpy_buffer(rows, np.uint64, name="row indices")
columns = ints_to_numpy_buffer(columns, np.uint64, name="column indices")
values, dtype = values_to_numpy_buffer(values, self.dtype)
n = values.size
if rows.size != n or columns.size != n:
raise ValueError(
f"`rows` and `columns` and `values` lengths must match: "
f"{rows.size}, {columns.size}, {values.size}"
)
if clear:
self.clear()
if nrows is not None or ncols is not None:
if nrows is None:
nrows = self.nrows
if ncols is None:
ncols = self.ncols
self.resize(nrows, ncols)
if n == 0:
return
dup_op_given = dup_op is not None
if not dup_op_given:
dup_op = binary.plus
dup_op = get_typed_op(dup_op, self.dtype)
if dup_op.opclass == "Monoid":
dup_op = dup_op.binaryop
else:
self._expect_op(dup_op, "BinaryOp", within="build", argname="dup_op")
rows = _CArray(rows)
columns = _CArray(columns)
values = _CArray(values, dtype=self.dtype)
call(
f"GrB_Matrix_build_{self.dtype.name}",
[self, rows, columns, values, _CScalar(n), dup_op],
)
# Check for duplicates when dup_op was not provided
if not dup_op_given and self._nvals < n:
raise ValueError("Duplicate indices found, must provide `dup_op` BinaryOp")
def dup(self, *, dtype=None, mask=None, name=None):
"""
GrB_Matrix_dup
Create a new Matrix by duplicating this one
"""
if dtype is not None or mask is not None:
if dtype is None:
dtype = self.dtype
rv = Matrix.new(dtype, nrows=self._nrows, ncols=self._ncols, name=name)
rv(mask=mask)[:, :] << self
else:
new_mat = ffi_new("GrB_Matrix*")
rv = Matrix(new_mat, self.dtype, name=name)
call("GrB_Matrix_dup", [_Pointer(rv), self])
rv._nrows = self._nrows
rv._ncols = self._ncols
return rv
def wait(self):
"""
GrB_Matrix_wait
In non-blocking mode, the computations may be delayed and not yet safe
to use by multiple threads. Use wait to force completion of a Matrix
and make it safe to use as input parameters on multiple threads.
"""
call("GrB_Matrix_wait", [_Pointer(self)])
@classmethod
def new(cls, dtype, nrows=0, ncols=0, *, name=None):
"""
GrB_Matrix_new
Create a new empty Matrix from the given type, number of rows, and number of columns
"""
new_matrix = ffi_new("GrB_Matrix*")
dtype = lookup_dtype(dtype)
rv = cls(new_matrix, dtype, name=name)
if type(nrows) is not _CScalar:
nrows = _CScalar(nrows)
if type(ncols) is not _CScalar:
ncols = _CScalar(ncols)
call("GrB_Matrix_new", [_Pointer(rv), dtype, nrows, ncols])
rv._nrows = nrows.scalar.value
rv._ncols = ncols.scalar.value
return rv
@classmethod
def from_values(
cls,
rows,
columns,
values,
*,
nrows=None,
ncols=None,
dup_op=None,
dtype=None,
name=None,
):
"""Create a new Matrix from the given lists of row indices, column
indices, and values. If nrows or ncols are not provided, they
are computed from the max row and coumn index found.
"""
rows = ints_to_numpy_buffer(rows, np.uint64, name="row indices")
columns = ints_to_numpy_buffer(columns, np.uint64, name="column indices")
values, dtype = values_to_numpy_buffer(values, dtype)
# Compute nrows and ncols if not provided
if nrows is None:
if rows.size == 0:
raise ValueError("No row indices provided. Unable to infer nrows.")
nrows = int(rows.max()) + 1
if ncols is None:
if columns.size == 0:
raise ValueError("No column indices provided. Unable to infer ncols.")
ncols = int(columns.max()) + 1
# Create the new matrix
C = cls.new(dtype, nrows, ncols, name=name)
# Add the data
# This needs to be the original data to get proper error messages
C.build(rows, columns, values, dup_op=dup_op)
return C
@property
def _carg(self):
return self.gb_obj[0]
#########################################################
# Delayed methods
#
# These return a delayed expression object which must be passed
# to __setitem__ to trigger a call to GraphBLAS
#########################################################
def ewise_add(self, other, op=monoid.plus, *, require_monoid=True):
"""
GrB_Matrix_eWiseAdd
Result will contain the union of indices from both Matrices
Default op is monoid.plus.
Unless explicitly disabled, this method requires a monoid (directly or from a semiring).
The reason for this is that binary operators can create very confusing behavior when
only one of the two elements is present.
Examples:
- binary.minus where left=N/A and right=4 yields 4 rather than -4 as might be expected
- binary.gt where left=N/A and right=4 yields True
- binary.gt where left=N/A and right=0 yields False
The behavior is caused by grabbing the non-empty value and using it directly without
performing any operation. In the case of `gt`, the non-empty value is cast to a boolean.
For these reasons, users are required to be explicit when choosing this surprising behavior.
"""
method_name = "ewise_add"
self._expect_type(other, (Matrix, TransposedMatrix), within=method_name, argname="other")
op = get_typed_op(op, self.dtype, other.dtype)
# Per the spec, op may be a semiring, but this is weird, so don't.
if require_monoid:
if op.opclass != "BinaryOp" or op.monoid is None:
self._expect_op(
op,
"Monoid",
within=method_name,
argname="op",
extra_message="A BinaryOp may be given if require_monoid keyword is False",
)
else:
self._expect_op(op, ("BinaryOp", "Monoid"), within=method_name, argname="op")
expr = MatrixExpression(
method_name,
f"GrB_Matrix_eWiseAdd_{op.opclass}",
[self, other],
op=op,
at=self._is_transposed,
bt=other._is_transposed,
)
if self.shape != other.shape:
expr.new(name="") # incompatible shape; raise now
return expr
def ewise_mult(self, other, op=binary.times):
"""
GrB_Matrix_eWiseMult
Result will contain the intersection of indices from both Matrices
Default op is binary.times
"""
method_name = "ewise_mult"
self._expect_type(other, (Matrix, TransposedMatrix), within=method_name, argname="other")
op = get_typed_op(op, self.dtype, other.dtype)
# Per the spec, op may be a semiring, but this is weird, so don't.
self._expect_op(op, ("BinaryOp", "Monoid"), within=method_name, argname="op")
expr = MatrixExpression(
method_name,
f"GrB_Matrix_eWiseMult_{op.opclass}",
[self, other],
op=op,
at=self._is_transposed,
bt=other._is_transposed,
)
if self.shape != other.shape:
expr.new(name="") # incompatible shape; raise now
return expr
def mxv(self, other, op=semiring.plus_times):
"""
GrB_mxv
Matrix-Vector multiplication. Result is a Vector.
Default op is semiring.plus_times
"""
method_name = "mxv"
self._expect_type(other, Vector, within=method_name, argname="other")
op = get_typed_op(op, self.dtype, other.dtype)
self._expect_op(op, "Semiring", within=method_name, argname="op")
expr = VectorExpression(
method_name,
"GrB_mxv",
[self, other],
op=op,
size=self._nrows,
at=self._is_transposed,
)
if self._ncols != other._size:
expr.new(name="") # incompatible shape; raise now
return expr
def mxm(self, other, op=semiring.plus_times):
"""
GrB_mxm
Matrix-Matrix multiplication. Result is a Matrix.
Default op is semiring.plus_times
"""
method_name = "mxm"
self._expect_type(other, (Matrix, TransposedMatrix), within=method_name, argname="other")
op = get_typed_op(op, self.dtype, other.dtype)
self._expect_op(op, "Semiring", within=method_name, argname="op")
expr = MatrixExpression(
method_name,
"GrB_mxm",
[self, other],
op=op,
nrows=self._nrows,
ncols=other._ncols,
at=self._is_transposed,
bt=other._is_transposed,
)
if self._ncols != other._nrows:
expr.new(name="") # incompatible shape; raise now
return expr
def kronecker(self, other, op=binary.times):
"""
GrB_kronecker
Kronecker product or sum (depending on op used)
Default op is binary.times
"""
method_name = "kronecker"
self._expect_type(other, (Matrix, TransposedMatrix), within=method_name, argname="other")
op = get_typed_op(op, self.dtype, other.dtype)
# Per the spec, op may be a semiring, but this is weird, so don't.
self._expect_op(op, ("BinaryOp", "Monoid"), within=method_name, argname="op")
return MatrixExpression(
method_name,
f"GrB_Matrix_kronecker_{op.opclass}",
[self, other],
op=op,
nrows=self._nrows * other._nrows,
ncols=self._ncols * other._ncols,
at=self._is_transposed,
bt=other._is_transposed,
)
def apply(self, op, *, left=None, right=None):
"""
GrB_Matrix_apply
Apply UnaryOp to each element of the calling Matrix
A BinaryOp can also be applied if a scalar is passed in as `left` or `right`,
effectively converting a BinaryOp into a UnaryOp
"""
method_name = "apply"
extra_message = (
"apply only accepts UnaryOp with no scalars or BinaryOp with `left` or `right` scalar."
)
if left is None and right is None:
op = get_typed_op(op, self.dtype)
self._expect_op(
op,
"UnaryOp",
within=method_name,
argname="op",
extra_message=extra_message,
)
cfunc_name = "GrB_Matrix_apply"
args = [self]
expr_repr = None
elif right is None:
if type(left) is not Scalar:
try:
left = Scalar.from_value(left)
except TypeError:
self._expect_type(
left,
Scalar,
within=method_name,
keyword_name="left",
extra_message="Literal scalars also accepted.",
)
op = get_typed_op(op, self.dtype, left.dtype)
if op.opclass == "Monoid":
op = op.binaryop
else:
self._expect_op(
op,
"BinaryOp",
within=method_name,
argname="op",
extra_message=extra_message,
)
cfunc_name = f"GrB_Matrix_apply_BinaryOp1st_{left.dtype}"
args = [_CScalar(left), self]
expr_repr = "{1.name}.apply({op}, left={0})"
elif left is None:
if type(right) is not Scalar:
try:
right = Scalar.from_value(right)
except TypeError:
self._expect_type(
right,
Scalar,
within=method_name,
keyword_name="right",
extra_message="Literal scalars also accepted.",
)
op = get_typed_op(op, self.dtype, right.dtype)
if op.opclass == "Monoid":
op = op.binaryop
else:
self._expect_op(
op,
"BinaryOp",
within=method_name,
argname="op",
extra_message=extra_message,
)
cfunc_name = f"GrB_Matrix_apply_BinaryOp2nd_{right.dtype}"
args = [self, _CScalar(right)]
expr_repr = "{0.name}.apply({op}, right={1})"
else:
raise TypeError("Cannot provide both `left` and `right` to apply")
return MatrixExpression(
method_name,
cfunc_name,
args,
op=op,
nrows=self._nrows,
ncols=self._ncols,
expr_repr=expr_repr,
at=self._is_transposed,
)
def reduce_rows(self, op=monoid.plus):
"""
GrB_Matrix_reduce
Reduce all values in each row, converting the matrix to a vector
Default op is monoid.lor for boolean and monoid.plus otherwise
"""
method_name = "reduce_rows"
op = get_typed_op(op, self.dtype)
self._expect_op(op, ("BinaryOp", "Monoid"), within=method_name, argname="op")
# Using a monoid may be more efficient, so change to one if possible.
# Also, SuiteSparse doesn't like user-defined binarops here.
if op.opclass == "BinaryOp" and op.monoid is not None:
op = op.monoid
return VectorExpression(
method_name,
f"GrB_Matrix_reduce_{op.opclass}",
[self],
op=op,
size=self._nrows,
at=self._is_transposed,
)
def reduce_columns(self, op=monoid.plus):
"""
GrB_Matrix_reduce
Reduce all values in each column, converting the matrix to a vector
Default op is monoid.lor for boolean and monoid.plus otherwise
"""
method_name = "reduce_columns"
op = get_typed_op(op, self.dtype)
self._expect_op(op, ("BinaryOp", "Monoid"), within=method_name, argname="op")
# Using a monoid may be more efficient, so change to one if possible.
# Also, SuiteSparse doesn't like user-defined binarops here.
if op.opclass == "BinaryOp" and op.monoid is not None:
op = op.monoid
return VectorExpression(
method_name,
f"GrB_Matrix_reduce_{op.opclass}",
[self],
op=op,
size=self._ncols,
at=not self._is_transposed,
)
def reduce_scalar(self, op=monoid.plus):
"""
GrB_Matrix_reduce
Reduce all values into a scalar
Default op is monoid.lor for boolean and monoid.plus otherwise
"""
method_name = "reduce_scalar"
op = get_typed_op(op, self.dtype)
if op.opclass == "BinaryOp" and op.monoid is not None:
op = op.monoid
else:
self._expect_op(op, "Monoid", within=method_name, argname="op")
return ScalarExpression(
method_name,
"GrB_Matrix_reduce_{output_dtype}",
[self],
op=op, # to be determined later
)
##################################
# Extract and Assign index methods
##################################
def _extract_element(self, resolved_indexes, dtype=None, name="s_extract"):
if dtype is None:
dtype = self.dtype
else:
dtype = lookup_dtype(dtype)
row, _ = resolved_indexes.indices[0]
col, _ = resolved_indexes.indices[1]
if self._is_transposed:
row, col = col, row
result = Scalar.new(dtype, name=name)
if (
call(f"GrB_Matrix_extractElement_{dtype}", [_Pointer(result), self, row, col])
is not NoValue
):
result._is_empty = False
return result
def _prep_for_extract(self, resolved_indexes):
method_name = "__getitem__"
rows, rowsize = resolved_indexes.indices[0]
cols, colsize = resolved_indexes.indices[1]
if rowsize is None:
# Row-only selection; GraphBLAS doesn't have this method, so we hack it using transpose
row_index = rows
return VectorExpression(
method_name,
"GrB_Col_extract",
[self, cols, colsize, row_index],
expr_repr="{0.name}[{3}, [{2} cols]]",
size=colsize,
dtype=self.dtype,
at=not self._is_transposed,
)
elif colsize is None:
# Column-only selection
col_index = cols
return VectorExpression(
method_name,
"GrB_Col_extract",
[self, rows, rowsize, col_index],
expr_repr="{0.name}[[{2} rows], {3}]",
size=rowsize,
dtype=self.dtype,
at=self._is_transposed,
)
else:
return MatrixExpression(
method_name,
"GrB_Matrix_extract",
[self, rows, rowsize, cols, colsize],
expr_repr="{0.name}[[{2} rows], [{4} cols]]",
nrows=rowsize,
ncols=colsize,
dtype=self.dtype,
at=self._is_transposed,
)
def _assign_element(self, resolved_indexes, value):
row, _ = resolved_indexes.indices[0]
col, _ = resolved_indexes.indices[1]
if type(value) is not Scalar:
try:
value = Scalar.from_value(value)
except TypeError:
self._expect_type(
value,
Scalar,
within="__setitem__",
argname="value",
extra_message="Literal scalars also accepted.",
)
# should we cast?
call(f"GrB_Matrix_setElement_{value.dtype}", [self, _CScalar(value), row, col])
def _prep_for_assign(self, resolved_indexes, value, mask=None, is_submask=False):
method_name = "__setitem__"
rows, rowsize = resolved_indexes.indices[0]
cols, colsize = resolved_indexes.indices[1]
extra_message = "Literal scalars also accepted."
if type(value) is Vector:
if rowsize is None and colsize is not None:
# Row-only selection
row_index = rows
if mask is not None and type(mask.mask) is Matrix:
if is_submask:
# C[i, J](M) << v
raise TypeError(
"Indices for subassign imply Vector submask, "
"but got Matrix mask instead"
)
else:
# C(M)[i, J] << v
# Upcast v to a Matrix and use Matrix_assign
rows = _CArray([rows.scalar.value])
rowsize = _CScalar(1)
new_value = Matrix.new(
value.dtype, nrows=1, ncols=value.size, name=f"{value.name}_as_matrix"
)
new_value[0, :] = value
delayed = MatrixExpression(
method_name,
"GrB_Matrix_assign",
[new_value, rows, rowsize, cols, colsize],
expr_repr="[[{2} rows], [{4} cols]] = {0.name}",
nrows=self._nrows,
ncols=self._ncols,
dtype=self.dtype,
)
else:
if is_submask:
# C[i, J](m) << v
# SS, SuiteSparse-specific: subassign
cfunc_name = "GrB_Row_subassign"
expr_repr = "[{1}, [{3} cols]](%s) << {0.name}" % mask.name
else:
# C(m)[i, J] << v
# C[i, J] << v
cfunc_name = "GrB_Row_assign"
expr_repr = "[{1}, [{3} cols]] = {0.name}"
delayed = MatrixExpression(
method_name,
cfunc_name,
[value, row_index, cols, colsize],
expr_repr=expr_repr,
nrows=self._nrows,
ncols=self._ncols,
dtype=self.dtype,
)
elif colsize is None and rowsize is not None:
# Column-only selection
col_index = cols
if mask is not None and type(mask.mask) is Matrix:
if is_submask:
# C[I, j](M) << v
raise TypeError(
"Indices for subassign imply Vector submask, "
"but got Matrix mask instead"
)
else:
# C(M)[I, j] << v
# Upcast v to a Matrix and use Matrix_assign
cols = _CArray([cols.scalar.value])
colsize = _CScalar(1)
new_value = Matrix.new(
value.dtype, nrows=value.size, ncols=1, name=f"{value.name}_as_matrix"
)
new_value[:, 0] = value
delayed = MatrixExpression(
method_name,
"GrB_Matrix_assign",
[new_value, rows, rowsize, cols, colsize],
expr_repr="[[{2} rows], [{4} cols]] = {0.name}",
nrows=self._nrows,
ncols=self._ncols,
dtype=self.dtype,
)
else:
if is_submask:
# C[I, j](m) << v
# SS, SuiteSparse-specific: subassign
cfunc_name = "GrB_Col_subassign"
expr_repr = "[{1}, [{3} cols]](%s) << {0.name}" % mask.name
else:
# C(m)[I, j] << v
# C[I, j] << v
cfunc_name = "GrB_Col_assign"
expr_repr = "[{1}, [{3} cols]] = {0.name}"
delayed = MatrixExpression(
method_name,
cfunc_name,
[value, rows, rowsize, col_index],
expr_repr=expr_repr,
nrows=self._nrows,
ncols=self._ncols,
dtype=self.dtype,
)
elif colsize is None and rowsize is None:
# C[i, j] << v (mask doesn't matter)
self._expect_type(
value,
Scalar,
within=method_name,
extra_message=extra_message,
)
else:
# C[I, J] << v (mask doesn't matter)
self._expect_type(
value,
(Scalar, Matrix, TransposedMatrix),
within=method_name,
extra_message=extra_message,
)
elif type(value) in {Matrix, TransposedMatrix}:
if rowsize is None or colsize is None:
if rowsize is None and colsize is None:
# C[i, j] << A (mask doesn't matter)
self._expect_type(
value,
Scalar,
within=method_name,
extra_message=extra_message,
)
else:
# C[I, j] << A
# C[i, J] << A (mask doesn't matter)
self._expect_type(
value,
(Scalar, Vector),
within=method_name,
extra_message=extra_message,
)
if is_submask:
# C[I, J](M) << A
# SS, SuiteSparse-specific: subassign
cfunc_name = "GrB_Matrix_subassign"
expr_repr = "[[{2} rows], [{4} cols]](%s) << {0.name}" % mask.name
else:
# C[I, J] << A
# C(M)[I, J] << A
cfunc_name = "GrB_Matrix_assign"
expr_repr = "[[{2} rows], [{4} cols]] = {0.name}"
delayed = MatrixExpression(
method_name,
cfunc_name,
[value, rows, rowsize, cols, colsize],
expr_repr=expr_repr,
nrows=self._nrows,
ncols=self._ncols,
dtype=self.dtype,
at=value._is_transposed,
)
else:
if type(value) is not Scalar:
try:
value = Scalar.from_value(value)
except TypeError:
if rowsize is None or colsize is None:
types = (Scalar, Vector)
else:
types = (Scalar, Matrix, TransposedMatrix)
self._expect_type(
value,
types,
within=method_name,
argname="value",
extra_message=extra_message,
)
if mask is not None and type(mask.mask) is Vector:
if rowsize is None and colsize is not None:
if is_submask:
# C[i, J](m) << c
# SS, SuiteSparse-specific: subassign
cfunc_name = "GrB_Row_subassign"
value_vector = Vector.new(value.dtype, size=mask.mask.size, name="v_temp")
expr_repr = "[{1}, [{3} cols]](%s) << {0.name}" % mask.name
else:
# C(m)[i, J] << c
# C[i, J] << c
cfunc_name = "GrB_Row_assign"
value_vector = Vector.new(value.dtype, size=colsize, name="v_temp")
expr_repr = "[{1}, [{3} cols]] = {0.name}"
# SS, SuiteSparse-specific: assume efficient vector with single scalar
value_vector << value
# Row-only selection
row_index = rows
delayed = MatrixExpression(
method_name,
cfunc_name,
[value_vector, row_index, cols, colsize],
expr_repr=expr_repr,
nrows=self._nrows,
ncols=self._ncols,
dtype=self.dtype,
)
elif colsize is None and rowsize is not None:
if is_submask:
# C[I, j](m) << c
# SS, SuiteSparse-specific: subassign
cfunc_name = "GrB_Col_subassign"
value_vector = Vector.new(value.dtype, size=mask.mask.size, name="v_temp")
else:
# C(m)[I, j] << c
# C[I, j] << c
cfunc_name = "GrB_Col_assign"
value_vector = Vector.new(value.dtype, size=rowsize, name="v_temp")
# SS, SuiteSparse-specific: assume efficient vector with single scalar
value_vector << value
# Column-only selection
col_index = cols
delayed = MatrixExpression(
method_name,
cfunc_name,
[value_vector, rows, rowsize, col_index],
expr_repr="[[{2} rows], {3}] = {0.name}",
nrows=self._nrows,
ncols=self._ncols,
dtype=self.dtype,
)
elif colsize is None and rowsize is None:
# Matrix object, Vector mask, scalar index
# C(m)[i, j] << c
# C[i, j](m) << c
raise TypeError(
"Unable to use Vector mask on single element assignment to a Matrix"
)
else:
# Matrix object, Vector mask, Matrix index
# C(m)[I, J] << c
# C[I, J](m) << c
raise TypeError("Unable to use Vector mask on Matrix assignment to a Matrix")
else:
if is_submask:
if rowsize is None or colsize is None:
if rowsize is None and colsize is None:
# C[i, j](M) << c
raise TypeError("Single element assign does not accept a submask")
else:
# C[i, J](M) << c
# C[I, j](M) << c
raise TypeError(
"Indices for subassign imply Vector submask, "
"but got Matrix mask instead"
)
# C[I, J](M) << c
# SS, SuiteSparse-specific: subassign
cfunc_name = f"GrB_Matrix_subassign_{value.dtype}"
expr_repr = "[[{2} rows], [{4} cols]](%s) = {0}" % mask.name
else:
# C(M)[I, J] << c
# C(M)[i, J] << c
# C(M)[I, j] << c
# C(M)[i, j] << c
if rowsize is None:
rows = _CArray([rows.scalar.value])
rowsize = _CScalar(1)
if colsize is None:
cols = _CArray([cols.scalar.value])
colsize = _CScalar(1)
cfunc_name = f"GrB_Matrix_assign_{value.dtype}"
expr_repr = "[[{2} rows], [{4} cols]] = {0}"
delayed = MatrixExpression(
method_name,
cfunc_name,
[_CScalar(value), rows, rowsize, cols, colsize],
expr_repr=expr_repr,
nrows=self._nrows,
ncols=self._ncols,
dtype=self.dtype,
)
return delayed
def _delete_element(self, resolved_indexes):
row, _ = resolved_indexes.indices[0]
col, _ = resolved_indexes.indices[1]
call("GrB_Matrix_removeElement", [self, row, col])
if backend == "pygraphblas":
def to_pygraphblas(self):
"""Convert to a new `pygraphblas.Matrix`
This does not copy data.
This gives control of the underlying GraphBLAS object to `pygraphblas`.
This means operations on the current `grblas` object will fail!
"""
import pygraphblas as pg
matrix = pg.Matrix(self.gb_obj, pg.types.gb_type_to_type(self.dtype.gb_obj))
self.gb_obj = ffi.NULL
return matrix
@classmethod
def from_pygraphblas(cls, matrix):
"""Convert a `pygraphblas.Matrix` to a new `grblas.Matrix`
This does not copy data.
This gives control of the underlying GraphBLAS object to `grblas`.
This means operations on the original `pygraphblas` object will fail!
"""
dtype = lookup_dtype(matrix.gb_type)
rv = cls(matrix.matrix, dtype)
rv._nrows = matrix.nrows
rv._ncols = matrix.ncols
matrix.matrix = ffi.NULL
return rv
Matrix.ss = class_property(Matrix.ss, ss)
class MatrixExpression(BaseExpression):
__slots__ = "_ncols", "_nrows"
output_type = Matrix
def __init__(
self,
method_name,
cfunc_name,
args,
*,
at=False,
bt=False,
op=None,
dtype=None,
expr_repr=None,
ncols=None,
nrows=None,
):
super().__init__(
method_name,
cfunc_name,
args,
at=at,
bt=bt,
op=op,
dtype=dtype,
expr_repr=expr_repr,
)
if ncols is None:
ncols = args[0]._ncols
if nrows is None:
nrows = args[0]._nrows
self._ncols = ncols
self._nrows = nrows
def construct_output(self, dtype=None, *, name=None):
if dtype is None:
dtype = self.dtype
return Matrix.new(dtype, self._nrows, self._ncols, name=name)
def __repr__(self):
from .formatting import format_matrix_expression
return format_matrix_expression(self)
def _repr_html_(self):
from .formatting import format_matrix_expression_html
return format_matrix_expression_html(self)
@property
def ncols(self):
return self._ncols
@property
def nrows(self):
return self._nrows
class TransposedMatrix:
__slots__ = "_matrix", "_ncols", "_nrows", "__weakref__"
_is_scalar = False
_is_transposed = True
def __init__(self, matrix):
self._matrix = matrix
self._nrows = matrix._ncols
self._ncols = matrix._nrows
def __repr__(self):
from .formatting import format_matrix
return format_matrix(self)
def _repr_html_(self):
from .formatting import format_matrix_html
return format_matrix_html(self)
def new(self, *, dtype=None, mask=None, name=None):
if dtype is None:
dtype = self.dtype
output = Matrix.new(dtype, self._nrows, self._ncols, name=name)
if mask is None:
output.update(self)
else:
output(mask=mask).update(self)
return output
@property
def T(self):
return self._matrix
@property
def gb_obj(self):
return self._matrix.gb_obj
@property
def dtype(self):
return self._matrix.dtype
@wrapdoc(Matrix.to_values)
def to_values(self, *, dtype=None):
rows, cols, vals = self._matrix.to_values(dtype=dtype)
return cols, rows, vals
@property
def _carg(self):
return self._matrix.gb_obj[0]
@property
def name(self):
return f"{self._matrix.name}.T"
@property
def _name_html(self):
return f"{self._matrix._name_html}.T"
# Properties
nrows = Matrix.ncols
ncols = Matrix.nrows
shape = Matrix.shape
nvals = Matrix.nvals
_nvals = Matrix._nvals
# Delayed methods
ewise_add = Matrix.ewise_add
ewise_mult = Matrix.ewise_mult
mxv = Matrix.mxv
mxm = Matrix.mxm
kronecker = Matrix.kronecker
apply = Matrix.apply
reduce_rows = Matrix.reduce_rows
reduce_columns = Matrix.reduce_columns
reduce_scalar = Matrix.reduce_scalar
# Operator sugar
__or__ = Matrix.__or__
__ror__ = Matrix.__ror__
__ior__ = Matrix.__ior__
__and__ = Matrix.__and__
__rand__ = Matrix.__rand__
__iand__ = Matrix.__iand__
__matmul__ = Matrix.__matmul__
__rmatmul__ = Matrix.__rmatmul__
__imatmul__ = Matrix.__imatmul__
# Misc.
isequal = Matrix.isequal
isclose = Matrix.isclose
_extract_element = Matrix._extract_element
_prep_for_extract = Matrix._prep_for_extract
__eq__ = Matrix.__eq__
__bool__ = Matrix.__bool__
__getitem__ = Matrix.__getitem__
__contains__ = Matrix.__contains__
__iter__ = Matrix.__iter__
_expect_type = Matrix._expect_type
_expect_op = Matrix._expect_op
expr.MatrixEwiseAddExpr.output_type = MatrixExpression
expr.MatrixEwiseMultExpr.output_type = MatrixExpression
expr.MatrixMatMulExpr.output_type = MatrixExpression
```
#### File: grblas/monoid/numpy.py
```python
import numpy as np
from .. import operator, binary
from ..dtypes import _supports_complex
_complex_dtypes = {"FC32", "FC64"}
_float_dtypes = {"FP32", "FP64"}
_int_dtypes = {"INT8", "UINT8", "INT16", "UINT16", "INT32", "UINT32", "INT64", "UINT64"}
_bool_int_dtypes = _int_dtypes | {"BOOL"}
_monoid_identities = {
# Math operations
"add": 0,
"multiply": 1,
"logaddexp": dict.fromkeys(_float_dtypes, -np.inf),
"logaddexp2": dict.fromkeys(_float_dtypes, -np.inf),
"gcd": dict.fromkeys(_int_dtypes, 0),
# Trigonometric functions
"hypot": dict.fromkeys(_float_dtypes, 0.0),
# Bit-twiddling functions
"bitwise_and": {dtype: True if dtype == "BOOL" else -1 for dtype in _bool_int_dtypes},
"bitwise_or": dict.fromkeys(_bool_int_dtypes, 0),
"bitwise_xor": dict.fromkeys(_bool_int_dtypes, 0),
# Comparison functions
"equal": {"BOOL": True},
"logical_and": {"BOOL": True},
"logical_or": {"BOOL": True},
"logical_xor": {"BOOL": False},
"maximum": {
"BOOL": False,
"INT8": np.iinfo(np.int8).min,
"UINT8": 0,
"INT16": np.iinfo(np.int16).min,
"UINT16": 0,
"INT32": np.iinfo(np.int32).min,
"UINT32": 0,
"INT64": np.iinfo(np.int64).min,
"UINT64": 0,
"FP32": -np.inf,
"FP64": -np.inf,
},
"minimum": {
"BOOL": True,
"INT8": np.iinfo(np.int8).max,
"UINT8": np.iinfo(np.uint8).max,
"INT16": np.iinfo(np.int16).max,
"UINT16": np.iinfo(np.uint16).max,
"INT32": np.iinfo(np.int32).max,
"UINT32": np.iinfo(np.uint32).max,
"INT64": np.iinfo(np.int64).max,
"UINT64": np.iinfo(np.uint64).max,
"FP32": np.inf,
"FP64": np.inf,
},
"fmax": {
"BOOL": False,
"INT8": np.iinfo(np.int8).min,
"UINT8": 0,
"INT16": np.iinfo(np.int8).min,
"UINT16": 0,
"INT32": np.iinfo(np.int8).min,
"UINT32": 0,
"INT64": np.iinfo(np.int8).min,
"UINT64": 0,
"FP32": -np.inf, # or np.nan?
"FP64": -np.inf, # or np.nan?
},
"fmin": {
"BOOL": True,
"INT8": np.iinfo(np.int8).max,
"UINT8": np.iinfo(np.uint8).max,
"INT16": np.iinfo(np.int16).max,
"UINT16": np.iinfo(np.uint16).max,
"INT32": np.iinfo(np.int32).max,
"UINT32": np.iinfo(np.uint32).max,
"INT64": np.iinfo(np.int64).max,
"UINT64": np.iinfo(np.uint64).max,
"FP32": np.inf, # or np.nan?
"FP64": np.inf, # or np.nan?
},
}
if _supports_complex:
_monoid_identities["fmax"].update(dict.fromkeys(_complex_dtypes, complex(-np.inf, -np.inf)))
_monoid_identities["fmin"].update(dict.fromkeys(_complex_dtypes, complex(np.inf, np.inf)))
_monoid_identities["maximum"].update(dict.fromkeys(_complex_dtypes, complex(-np.inf, -np.inf)))
_monoid_identities["minimum"].update(dict.fromkeys(_complex_dtypes, complex(np.inf, np.inf)))
__all__ = list(_monoid_identities)
def __dir__():
return __all__
def __getattr__(name):
if name not in _monoid_identities:
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
func = getattr(binary.numpy, name)
operator.Monoid.register_new(f"numpy.{name}", func, _monoid_identities[name])
return globals()[name]
```
#### File: grblas-dev/grblas/operator.py
```python
import inspect
import re
import numpy as np
import numba
from collections.abc import Mapping
from functools import lru_cache
from types import FunctionType, ModuleType
from . import ffi, lib, unary, binary, monoid, semiring, op
from .dtypes import lookup_dtype, unify, INT8, _sample_values, _supports_complex
from .exceptions import UdfParseError, check_status_carg
from .expr import InfixExprBase
from .utils import libget
ffi_new = ffi.new
UNKNOWN_OPCLASS = "UnknownOpClass"
def _normalize_type(type_):
return lookup_dtype(type_).name
class OpPath:
def __init__(self, parent, name):
self._parent = parent
self._name = name
def _call_op(op, left, right=None, **kwargs):
if right is None:
if isinstance(left, InfixExprBase):
# op(A & B), op(A | B), op(A @ B)
return getattr(left.left, left.method_name)(left.right, op, **kwargs)
if find_opclass(op)[1] == "Semiring":
raise TypeError(
f"Bad type when calling {op!r}. Got type: {type(left)}.\n"
f"Expected an infix expression, such as: {op!r}(A @ B)"
)
raise TypeError(
f"Bad type when calling {op!r}. Got type: {type(left)}.\n"
"Expected an infix expression or an apply with a Vector or Matrix and a scalar:\n"
f" - {op!r}(A & B)\n"
f" - {op!r}(A, 1)\n"
f" - {op!r}(1, A)"
)
# op(A, 1) -> apply (or select once available)
from .vector import Vector
from .matrix import Matrix, TransposedMatrix
if type(left) in {Vector, Matrix, TransposedMatrix}:
return left.apply(op, right=right, **kwargs)
elif type(right) in {Vector, Matrix, TransposedMatrix}:
return right.apply(op, left=left, **kwargs)
raise TypeError(
f"Bad types when calling {op!r}. Got types: {type(left)}, {type(right)}.\n"
"Expected an infix expression or an apply with a Vector or Matrix and a scalar:\n"
f" - {op!r}(A & B)\n"
f" - {op!r}(A, 1)\n"
f" - {op!r}(1, A)"
)
class TypedOpBase:
__slots__ = "parent", "name", "type", "return_type", "gb_obj", "gb_name", "__weakref__"
def __init__(self, parent, name, type_, return_type, gb_obj, gb_name):
self.parent = parent
self.name = name
self.type = _normalize_type(type_)
self.return_type = _normalize_type(return_type)
self.gb_obj = gb_obj
self.gb_name = gb_name
def __repr__(self):
classname = self.opclass.lower()
if classname.endswith("op"):
classname = classname[:-2]
return f"{classname}.{self.name}[{self.type}]"
@property
def _carg(self):
return self.gb_obj
class TypedBuiltinUnaryOp(TypedOpBase):
__slots__ = ()
opclass = "UnaryOp"
def __call__(self, val):
from .vector import Vector
from .matrix import Matrix, TransposedMatrix
if type(val) in {Vector, Matrix, TransposedMatrix}:
return val.apply(self)
raise TypeError(
f"Bad type when calling {self!r}.\n"
" - Expected type: Vector, Matrix, TransposedMatrix.\n"
f" - Got: {type(val)}.\n"
"Calling a UnaryOp is syntactic sugar for calling apply. "
f"For example, `A.apply({self!r})` is the same as `{self!r}(A)`."
)
class TypedBuiltinBinaryOp(TypedOpBase):
__slots__ = ()
opclass = "BinaryOp"
def __call__(self, left, right=None, *, require_monoid=None):
if require_monoid is not None:
if right is not None:
raise TypeError(
f"Bad keyword argument `require_monoid=` when calling {self!r}.\n"
"require_monoid keyword may only be used when performing an ewise_add.\n"
f"For example: {self!r}(A | B, require_monoid=False)."
)
return _call_op(self, left, require_monoid=require_monoid)
return _call_op(self, left, right)
@property
def monoid(self):
rv = getattr(monoid, self.name, None)
if rv is not None and self.type in rv._typed_ops:
return rv[self.type]
class TypedBuiltinMonoid(TypedOpBase):
__slots__ = "_identity"
opclass = "Monoid"
def __init__(self, parent, name, type_, return_type, gb_obj, gb_name):
super().__init__(parent, name, type_, return_type, gb_obj, gb_name)
self._identity = None
def __call__(self, left, right=None):
return _call_op(self, left, right)
@property
def identity(self):
if self._identity is None:
from .recorder import skip_record
from .vector import Vector
with skip_record:
self._identity = Vector.new(size=1, dtype=self.type, name="").reduce(self).value
return self._identity
@property
def binaryop(self):
return getattr(binary, self.name)[self.type]
class TypedBuiltinSemiring(TypedOpBase):
__slots__ = ()
opclass = "Semiring"
def __call__(self, left, right=None):
if right is not None:
raise TypeError(
f"Bad types when calling {self!r}. Got types: {type(left)}, {type(right)}.\n"
f"Expected an infix expression, such as: {self!r}(A @ B)"
)
return _call_op(self, left)
@property
def binaryop(self):
return getattr(binary, self.name.split("_", 1)[1])[self.type]
@property
def monoid(self):
return getattr(monoid, self.name.split("_", 1)[0])[self.type]
class TypedUserUnaryOp(TypedOpBase):
__slots__ = "orig_func", "numba_func"
opclass = "UnaryOp"
def __init__(self, parent, name, type_, return_type, gb_obj, orig_func, numba_func):
super().__init__(parent, name, type_, return_type, gb_obj, f"{name}_{type_}")
self.orig_func = orig_func
self.numba_func = numba_func
__call__ = TypedBuiltinUnaryOp.__call__
class TypedUserBinaryOp(TypedOpBase):
__slots__ = "orig_func", "numba_func", "_monoid"
opclass = "BinaryOp"
def __init__(self, parent, name, type_, return_type, gb_obj, orig_func, numba_func):
super().__init__(parent, name, type_, return_type, gb_obj, f"{name}_{type_}")
self.orig_func = orig_func
self.numba_func = numba_func
self._monoid = None
@property
def monoid(self):
if self._monoid is None and not self.parent._anonymous:
monoid = Monoid._find(self.name)
if monoid is not None and self.type in monoid._typed_ops: # pragma: no cover
# This may be used by grblas.binary.numpy objects
self._monoid = monoid[self.type]
return self._monoid
__call__ = TypedBuiltinBinaryOp.__call__
class TypedUserMonoid(TypedOpBase):
__slots__ = "binaryop", "identity"
opclass = "Monoid"
def __init__(self, parent, name, type_, return_type, gb_obj, binaryop, identity):
super().__init__(parent, name, type_, return_type, gb_obj, f"{name}_{type_}")
self.binaryop = binaryop
self.identity = identity
binaryop._monoid = self
__call__ = TypedBuiltinMonoid.__call__
class TypedUserSemiring(TypedOpBase):
__slots__ = "monoid", "binaryop"
opclass = "Semiring"
def __init__(self, parent, name, type_, return_type, gb_obj, monoid, binaryop):
super().__init__(parent, name, type_, return_type, gb_obj, f"{name}_{type_}")
self.monoid = monoid
self.binaryop = binaryop
__call__ = TypedBuiltinSemiring.__call__
class ParameterizedUdf:
__slots__ = "name", "__call__", "_anonymous", "__weakref__"
def __init__(self, name, anonymous):
self.name = name
self._anonymous = anonymous
# lru_cache per instance
method = self._call.__get__(self, type(self))
self.__call__ = lru_cache(maxsize=1024)(method)
def _call(self, *args, **kwargs):
raise NotImplementedError()
class ParameterizedUnaryOp(ParameterizedUdf):
__slots__ = "func", "__signature__"
def __init__(self, name, func, *, anonymous=False):
self.func = func
self.__signature__ = inspect.signature(func)
if name is None:
name = getattr(func, "__name__", name)
super().__init__(name, anonymous)
def _call(self, *args, **kwargs):
unary = self.func(*args, **kwargs)
return UnaryOp.register_anonymous(unary, self.name)
class ParameterizedBinaryOp(ParameterizedUdf):
__slots__ = "func", "__signature__", "_monoid", "_cached_call"
def __init__(self, name, func, *, anonymous=False):
self.func = func
self.__signature__ = inspect.signature(func)
self._monoid = None
if name is None:
name = getattr(func, "__name__", name)
super().__init__(name, anonymous)
method = self._call_to_cache.__get__(self, type(self))
self._cached_call = lru_cache(maxsize=1024)(method)
self.__call__ = self._call
def _call_to_cache(self, *args, **kwargs):
binary = self.func(*args, **kwargs)
return BinaryOp.register_anonymous(binary, self.name)
def _call(self, *args, **kwargs):
binop = self._cached_call(*args, **kwargs)
if self._monoid is not None and binop._monoid is None:
# This is all a bit funky. We try our best to associate a binaryop
# to a monoid. So, if we made a ParameterizedMonoid using this object,
# then try to create a monoid with the given arguments.
binop._monoid = binop # temporary!
try:
# If this call is successful, then it will set `binop._monoid`
self._monoid(*args, **kwargs)
except Exception:
binop._monoid = None
assert binop._monoid is not binop
return binop
@property
def monoid(self):
return self._monoid
class ParameterizedMonoid(ParameterizedUdf):
__slots__ = "binaryop", "identity", "__signature__"
def __init__(self, name, binaryop, identity, *, anonymous=False):
if not type(binaryop) is ParameterizedBinaryOp:
raise TypeError("binaryop must be parameterized")
self.binaryop = binaryop
self.__signature__ = binaryop.__signature__
if callable(identity):
# assume it must be parameterized as well, so signature must match
sig = inspect.signature(identity)
if sig != self.__signature__:
raise ValueError(
f"Signatures of binaryop and identity passed to "
f"{type(self).__name__} must be the same. Got:\n"
f" binaryop{self.__signature__}\n"
f" !=\n"
f" identity{sig}"
)
self.identity = identity
if name is None:
name = binaryop.name
super().__init__(name, anonymous)
binaryop._monoid = self
# clear binaryop cache so it can be associated with this monoid
binaryop._cached_call.cache_clear()
def _call(self, *args, **kwargs):
binary = self.binaryop(*args, **kwargs)
identity = self.identity
if callable(identity):
identity = identity(*args, **kwargs)
return Monoid.register_anonymous(binary, identity, self.name)
class ParameterizedSemiring(ParameterizedUdf):
__slots__ = "monoid", "binaryop", "__signature__"
def __init__(self, name, monoid, binaryop, *, anonymous=False):
if type(monoid) not in {ParameterizedMonoid, Monoid}:
raise TypeError("monoid must be of type Monoid or ParameterizedMonoid")
if type(binaryop) is ParameterizedBinaryOp:
self.__signature__ = binaryop.__signature__
if type(monoid) is ParameterizedMonoid and monoid.__signature__ != self.__signature__:
raise ValueError(
f"Signatures of monoid and binaryop passed to "
f"{type(self).__name__} must be the same. Got:\n"
f" monoid{monoid.__signature__}\n"
f" !=\n"
f" binaryop{self.__signature__}\n\n"
"Perhaps call monoid or binaryop with parameters before creating the semiring."
)
elif type(binaryop) is BinaryOp:
if type(monoid) is Monoid:
raise TypeError("At least one of monoid or binaryop must be parameterized")
self.__signature__ = monoid.__signature__
else:
raise TypeError("binaryop must be of type BinaryOp or ParameterizedBinaryOp")
self.monoid = monoid
self.binaryop = binaryop
if name is None:
name = f"{monoid.name}_{binaryop.name}"
super().__init__(name, anonymous)
def _call(self, *args, **kwargs):
monoid = self.monoid
if type(monoid) is ParameterizedMonoid:
monoid = monoid(*args, **kwargs)
binary = self.binaryop
if type(binary) is ParameterizedBinaryOp:
binary = binary(*args, **kwargs)
return Semiring.register_anonymous(monoid, binary, self.name)
class OpBase:
__slots__ = "name", "_typed_ops", "types", "_anonymous", "__weakref__"
_parse_config = None
_initialized = False
_module = None
def __init__(self, name, *, anonymous=False):
self.name = name
self._typed_ops = {}
self.types = {}
self._anonymous = anonymous
def __repr__(self):
return f"{self._modname}.{self.name}"
def __getitem__(self, type_):
type_ = _normalize_type(type_)
if type_ not in self._typed_ops:
raise KeyError(f"{self.name} does not work with {type_}")
return self._typed_ops[type_]
def _add(self, op):
self._typed_ops[op.type] = op
self.types[op.type] = op.return_type
def __delitem__(self, type_):
type_ = _normalize_type(type_)
del self._typed_ops[type_]
del self.types[type_]
def __contains__(self, type_):
type_ = _normalize_type(type_)
return type_ in self._typed_ops
@classmethod
def _remove_nesting(cls, funcname, *, module=None, modname=None, strict=True):
if module is None:
module = cls._module
if modname is None:
modname = cls._modname
if "." not in funcname:
if strict and hasattr(module, funcname):
raise AttributeError(f"{modname}.{funcname} is already defined")
else:
path, funcname = funcname.rsplit(".", 1)
for folder in path.split("."):
if not hasattr(module, folder):
setattr(module, folder, OpPath(module, folder))
module = getattr(module, folder)
modname = f"{modname}.{folder}"
if not isinstance(module, (OpPath, ModuleType)):
raise AttributeError(
f"{modname} is already defined. Cannot use as a nested path."
)
# Can't use `hasattr` here, b/c we use `__getattr__` in numpy namespaces
if strict and funcname in module.__dict__:
raise AttributeError(f"{path}.{funcname} is already defined")
return module, funcname
@classmethod
def _find(cls, funcname):
rv = cls._module
for attr in funcname.split("."):
rv = getattr(rv, attr, None)
if rv is None:
break
return rv
@classmethod
def _initialize(cls):
if cls._initialized:
return
# Read in the parse configs
trim_from_front = cls._parse_config.get("trim_from_front", 0)
delete_exact = cls._parse_config.get("delete_exact", None)
num_underscores = cls._parse_config["num_underscores"]
varnames = tuple(x for x in dir(lib) if x[0] != "_")
for re_str, return_prefix in (
("re_exprs", None),
("re_exprs_return_bool", "BOOL"),
("re_exprs_return_float", "FP"),
("re_exprs_return_complex", "FC"),
):
if re_str not in cls._parse_config:
continue
if "complex" in re_str and not _supports_complex: # pragma: no cover
continue
for r in reversed(cls._parse_config[re_str]):
for varname in varnames:
m = r.match(varname)
if m:
# Parse function into name and datatype
gb_name = m.string
splitname = gb_name[trim_from_front:].split("_")
if delete_exact and delete_exact in splitname:
splitname.remove(delete_exact)
if len(splitname) == num_underscores + 1:
*splitname, type_ = splitname
else:
type_ = None
name = "_".join(splitname).lower()
# Create object for name unless it already exists
if not hasattr(cls._module, name):
obj = cls(name)
setattr(cls._module, name, obj)
if not hasattr(op, name):
setattr(op, name, obj)
else:
obj = getattr(cls._module, name)
gb_obj = getattr(lib, varname)
# Determine return type
if return_prefix == "BOOL":
return_type = "BOOL"
if type_ is None:
type_ = "BOOL"
else:
if type_ is None: # pragma: no cover
raise TypeError(f"Unable to determine return type for {varname}")
if return_prefix is None:
return_type = type_
else:
# Grab the number of bits from type_
num_bits = type_[-2:]
if num_bits not in {"32", "64"}: # pragma: no cover
raise TypeError(f"Unexpected number of bits: {num_bits}")
return_type = f"{return_prefix}{num_bits}"
builtin_op = cls._typed_class(
obj, name, type_, return_type, gb_obj, gb_name
)
obj._add(builtin_op)
cls._initialized = True
class UnaryOp(OpBase):
__slots__ = ()
_module = unary
_modname = "unary"
_typed_class = TypedBuiltinUnaryOp
_parse_config = {
"trim_from_front": 4,
"num_underscores": 1,
"re_exprs": [
re.compile(
"^GrB_(IDENTITY|AINV|MINV|ABS|BNOT)"
"_(BOOL|INT8|UINT8|INT16|UINT16|INT32|UINT32|INT64|UINT64|FP32|FP64|FC32|FC64)$"
),
re.compile(
"^GxB_(LNOT|ONE|POSITIONI1|POSITIONI|POSITIONJ1|POSITIONJ)"
"_(BOOL|INT8|UINT8|INT16|UINT16|INT32|UINT32|INT64|UINT64|FP32|FP64)$"
),
re.compile(
"^GxB_(SQRT|LOG|EXP|LOG2|SIN|COS|TAN|ACOS|ASIN|ATAN|SINH|COSH|TANH|ACOSH"
"|ASINH|ATANH|SIGNUM|CEIL|FLOOR|ROUND|TRUNC|EXP2|EXPM1|LOG10|LOG1P)"
"_(FP32|FP64|FC32|FC64)$"
),
re.compile("^GxB_(LGAMMA|TGAMMA|ERF|ERFC|FREXPX|FREXPE)_(FP32|FP64)$"),
re.compile("^GxB_(IDENTITY|AINV|MINV|ONE|CONJ)_(FC32|FC64)$"),
],
"re_exprs_return_bool": [
re.compile("^GrB_LNOT$"),
re.compile("^GxB_(ISINF|ISNAN|ISFINITE)_(FP32|FP64|FC32|FC64)$"),
],
"re_exprs_return_float": [re.compile("^GxB_(CREAL|CIMAG|CARG|ABS)_(FC32|FC64)$")],
}
@classmethod
def _build(cls, name, func, *, anonymous=False):
if type(func) is not FunctionType:
raise TypeError(f"UDF argument must be a function, not {type(func)}")
if name is None:
name = getattr(func, "__name__", "<anonymous_unary>")
success = False
new_type_obj = cls(name, anonymous=anonymous)
return_types = {}
nt = numba.types
for type_, sample_val in _sample_values.items():
type_ = lookup_dtype(type_)
# Check if func can handle this data type
try:
with np.errstate(divide="ignore", over="ignore", under="ignore", invalid="ignore"):
ret = func(sample_val)
ret_type = lookup_dtype(type(ret))
if ret_type != type_ and (
("INT" in ret_type.name and "INT" in type_.name)
or ("FP" in ret_type.name and "FP" in type_.name)
or ("FC" in ret_type.name and "FC" in type_.name)
or (
type_ == "UINT64"
and ret_type == "FP64"
and return_types.get("INT64") == "INT64"
)
):
# Downcast `ret_type` to `type_`.
# This is what users want most of the time, but we can't make a perfect rule.
# There should be a way for users to be explicit.
ret_type = type_
elif type_ == "BOOL" and ret_type == "INT64" and return_types.get("INT8") == "INT8":
ret_type = INT8
# Numba is unable to handle BOOL correctly right now, but we have a workaround
# See: https://github.com/numba/numba/issues/5395
# We're relying on coercion behaving correctly here
input_type = INT8 if type_ == "BOOL" else type_
return_type = INT8 if ret_type == "BOOL" else ret_type
# JIT the func so it can be used from a cfunc
unary_udf = numba.njit(func)
# Build wrapper because GraphBLAS wants pointers and void return
wrapper_sig = nt.void(
nt.CPointer(return_type.numba_type),
nt.CPointer(input_type.numba_type),
)
if type_ == "BOOL":
if ret_type == "BOOL":
def unary_wrapper(z, x):
z[0] = bool(unary_udf(bool(x[0]))) # pragma: no cover
else:
def unary_wrapper(z, x):
z[0] = unary_udf(bool(x[0])) # pragma: no cover
elif ret_type == "BOOL":
def unary_wrapper(z, x):
z[0] = bool(unary_udf(x[0])) # pragma: no cover
else:
def unary_wrapper(z, x):
z[0] = unary_udf(x[0]) # pragma: no cover
unary_wrapper = numba.cfunc(wrapper_sig, nopython=True)(unary_wrapper)
new_unary = ffi_new("GrB_UnaryOp*")
check_status_carg(
lib.GrB_UnaryOp_new(
new_unary, unary_wrapper.cffi, ret_type.gb_obj, type_.gb_obj
),
"UnaryOp",
new_unary,
)
op = TypedUserUnaryOp(
new_type_obj, name, type_.name, ret_type.name, new_unary[0], func, unary_udf
)
new_type_obj._add(op)
success = True
return_types[type_.name] = ret_type.name
except Exception:
continue
if success:
return new_type_obj
else:
raise UdfParseError("Unable to parse function using Numba")
@classmethod
def register_anonymous(cls, func, name=None, *, parameterized=False):
if parameterized:
return ParameterizedUnaryOp(name, func, anonymous=True)
return cls._build(name, func, anonymous=True)
@classmethod
def register_new(cls, name, func, *, parameterized=False):
module, funcname = cls._remove_nesting(name)
if parameterized:
unary_op = ParameterizedUnaryOp(name, func)
else:
unary_op = cls._build(name, func)
setattr(module, funcname, unary_op)
# Also save it to `grblas.op` if not yet defined
module, funcname = cls._remove_nesting(name, module=op, modname="op", strict=False)
if not hasattr(module, funcname):
setattr(module, funcname, unary_op)
__call__ = TypedBuiltinUnaryOp.__call__
class BinaryOp(OpBase):
__slots__ = "_monoid"
_module = binary
_modname = "binary"
_typed_class = TypedBuiltinBinaryOp
_parse_config = {
"trim_from_front": 4,
"num_underscores": 1,
"re_exprs": [
re.compile(
"^GrB_(FIRST|SECOND|PLUS|MINUS|TIMES|DIV|MIN|MAX)"
"_(BOOL|INT8|UINT8|INT16|UINT16|INT32|UINT32|INT64|UINT64|FP32|FP64|FC32|FC64)$"
),
re.compile(
"GrB_(BOR|BAND|BXOR|BXNOR)" "_(INT8|INT16|INT32|INT64|UINT8|UINT16|UINT32|UINT64)$"
),
re.compile(
"^GxB_(POW|RMINUS|RDIV|PAIR|ANY|ISEQ|ISNE|ISGT|ISLT|ISGE|ISLE|LOR|LAND|LXOR)"
"_(BOOL|INT8|UINT8|INT16|UINT16|INT32|UINT32|INT64|UINT64|FP32|FP64|FC32|FC64)$"
),
re.compile("^GxB_(FIRST|SECOND|PLUS|MINUS|TIMES|DIV)_(FC32|FC64)$"),
re.compile("^GxB_(ATAN2|HYPOT|FMOD|REMAINDER|LDEXP|COPYSIGN)_(FP32|FP64)$"),
re.compile(
"GxB_(BGET|BSET|BCLR|BSHIFT|FIRSTI1|FIRSTI|FIRSTJ1|FIRSTJ"
"|SECONDI1|SECONDI|SECONDJ1|SECONDJ)"
"_(INT8|INT16|INT32|INT64|UINT8|UINT16|UINT32|UINT64)$"
),
],
"re_exprs_return_bool": [
re.compile("^GrB_(LOR|LAND|LXOR|LXNOR)$"),
re.compile(
"^GrB_(EQ|NE|GT|LT|GE|LE)_"
"(BOOL|INT8|UINT8|INT16|UINT16|INT32|UINT32|INT64|UINT64|FP32|FP64)$"
),
re.compile(
"^GxB_(LOR|LAND|LXOR)_"
"(BOOL|INT8|UINT8|INT16|UINT16|INT32|UINT32|INT64|UINT64|FP32|FP64)$"
),
re.compile("^GxB_(EQ|NE)_(FC32|FC64)$"),
],
"re_exprs_return_complex": [re.compile("^GxB_(CMPLX)_(FP32|FP64)$")],
}
@classmethod
def _build(cls, name, func, *, anonymous=False):
if not isinstance(func, FunctionType):
raise TypeError(f"UDF argument must be a function, not {type(func)}")
if name is None:
name = getattr(func, "__name__", "<anonymous_binary>")
success = False
new_type_obj = cls(name, anonymous=anonymous)
return_types = {}
nt = numba.types
for type_, sample_val in _sample_values.items():
type_ = lookup_dtype(type_)
# Check if func can handle this data type
try:
with np.errstate(divide="ignore", over="ignore", under="ignore", invalid="ignore"):
ret = func(sample_val, sample_val)
ret_type = lookup_dtype(type(ret))
if ret_type != type_ and (
("INT" in ret_type.name and "INT" in type_.name)
or ("FP" in ret_type.name and "FP" in type_.name)
or ("FC" in ret_type.name and "FC" in type_.name)
or (
type_ == "UINT64"
and ret_type == "FP64"
and return_types.get("INT64") == "INT64"
)
):
# Downcast `ret_type` to `type_`.
# This is what users want most of the time, but we can't make a perfect rule.
# There should be a way for users to be explicit.
ret_type = type_
elif type_ == "BOOL" and ret_type == "INT64" and return_types.get("INT8") == "INT8":
ret_type = INT8
# Numba is unable to handle BOOL correctly right now, but we have a workaround
# See: https://github.com/numba/numba/issues/5395
# We're relying on coercion behaving correctly here
input_type = INT8 if type_ == "BOOL" else type_
return_type = INT8 if ret_type == "BOOL" else ret_type
# JIT the func so it can be used from a cfunc
binary_udf = numba.njit(func)
# Build wrapper because GraphBLAS wants pointers and void return
wrapper_sig = nt.void(
nt.CPointer(return_type.numba_type),
nt.CPointer(input_type.numba_type),
nt.CPointer(input_type.numba_type),
)
if type_ == "BOOL":
if ret_type == "BOOL":
def binary_wrapper(z, x, y):
z[0] = bool(binary_udf(bool(x[0]), bool(y[0]))) # pragma: no cover
else:
def binary_wrapper(z, x, y):
z[0] = binary_udf(bool(x[0]), bool(y[0])) # pragma: no cover
elif ret_type == "BOOL":
def binary_wrapper(z, x, y):
z[0] = bool(binary_udf(x[0], y[0])) # pragma: no cover
else:
def binary_wrapper(z, x, y):
z[0] = binary_udf(x[0], y[0]) # pragma: no cover
binary_wrapper = numba.cfunc(wrapper_sig, nopython=True)(binary_wrapper)
new_binary = ffi_new("GrB_BinaryOp*")
check_status_carg(
lib.GrB_BinaryOp_new(
new_binary,
binary_wrapper.cffi,
ret_type.gb_obj,
type_.gb_obj,
type_.gb_obj,
),
"BinaryOp",
new_binary,
)
op = TypedUserBinaryOp(
new_type_obj, name, type_.name, ret_type.name, new_binary[0], func, binary_udf
)
new_type_obj._add(op)
success = True
return_types[type_.name] = ret_type.name
except Exception:
continue
if success:
return new_type_obj
else:
raise UdfParseError("Unable to parse function using Numba")
@classmethod
def register_anonymous(cls, func, name=None, *, parameterized=False):
if parameterized:
return ParameterizedBinaryOp(name, func, anonymous=True)
return cls._build(name, func, anonymous=True)
@classmethod
def register_new(cls, name, func, *, parameterized=False):
module, funcname = cls._remove_nesting(name)
if parameterized:
binary_op = ParameterizedBinaryOp(name, func)
else:
binary_op = cls._build(name, func)
setattr(module, funcname, binary_op)
# Also save it to `grblas.op` if not yet defined
module, funcname = cls._remove_nesting(name, module=op, modname="op", strict=False)
if not hasattr(module, funcname):
setattr(module, funcname, binary_op)
@classmethod
def _initialize(cls):
super()._initialize()
# Rename div to cdiv
cdiv = binary.cdiv = BinaryOp("cdiv")
for dtype, ret_type in binary.div.types.items():
orig_op = binary.div[dtype]
op = TypedBuiltinBinaryOp(
cdiv, "cdiv", dtype, ret_type, orig_op.gb_obj, orig_op.gb_name
)
cdiv._add(op)
del binary.div
# Add truediv which always points to floating point cdiv
# We are effectively hacking cdiv to always return floating point values
# If the inputs are FP32, we use DIV_FP32; use DIV_FP64 for all other input dtypes
truediv = binary.truediv = BinaryOp("truediv")
for dtype in binary.cdiv.types:
float_type = "FP32" if dtype == "FP32" else "FP64"
orig_op = binary.cdiv[float_type]
op = TypedBuiltinBinaryOp(
truediv,
"truediv",
dtype,
binary.cdiv.types[float_type],
orig_op.gb_obj,
orig_op.gb_name,
)
truediv._add(op)
# Add floordiv
# cdiv truncates towards 0, while floordiv truncates towards -inf
BinaryOp.register_new("floordiv", lambda x, y: x // y)
def isclose(rel_tol=1e-7, abs_tol=0.0):
def inner(x, y):
return x == y or abs(x - y) <= max(rel_tol * max(abs(x), abs(y)), abs_tol)
return inner
BinaryOp.register_new("isclose", isclose, parameterized=True)
def __init__(self, name, *, anonymous=False):
super().__init__(name, anonymous=anonymous)
self._monoid = None
__call__ = TypedBuiltinBinaryOp.__call__
@property
def monoid(self):
if self._monoid is None and not self._anonymous:
self._monoid = Monoid._find(self.name)
return self._monoid
class Monoid(OpBase):
__slots__ = "_binaryop"
_module = monoid
_modname = "monoid"
_typed_class = TypedBuiltinMonoid
_parse_config = {
"trim_from_front": 4,
"delete_exact": "MONOID",
"num_underscores": 1,
"re_exprs": [
re.compile(
"^GrB_(MIN|MAX|PLUS|TIMES|LOR|LAND|LXOR|LXNOR)_MONOID"
"_(BOOL|INT8|UINT8|INT16|UINT16|INT32|UINT32|INT64|UINT64|FP32|FP64)$"
),
re.compile(
"^GxB_(ANY)_(INT8|UINT8|INT16|UINT16|INT32|UINT32|INT64|UINT64|FP32|FP64)_MONOID$"
),
re.compile("^GxB_(PLUS|TIMES|ANY)_(FC32|FC64)_MONOID$"),
re.compile("^GxB_(EQ|ANY)_BOOL_MONOID$"),
re.compile("^GxB_(BOR|BAND|BXOR|BXNOR)_(UINT8|UINT16|UINT32|UINT64)_MONOID$"),
],
}
@classmethod
def _build(cls, name, binaryop, identity, *, anonymous=False):
if type(binaryop) is not BinaryOp:
raise TypeError(f"binaryop must be a BinaryOp, not {type(binaryop)}")
if name is None:
name = binaryop.name
new_type_obj = cls(name, binaryop, anonymous=anonymous)
if not isinstance(identity, Mapping):
identities = dict.fromkeys(binaryop.types, identity)
explicit_identities = False
else:
identities = identity
explicit_identities = True
for type_, identity in identities.items():
type_ = lookup_dtype(type_)
ret_type = binaryop[type_].return_type
# If there is a domain mismatch, then DomainMismatch will be raised
# below if identities were explicitly given.
# Skip complex dtypes for now, because they segfault!
if type_ != ret_type and not explicit_identities or "FC" in type_.name:
continue
new_monoid = ffi_new("GrB_Monoid*")
func = libget(f"GrB_Monoid_new_{type_.name}")
zcast = ffi.cast(type_.c_type, identity)
check_status_carg(
func(new_monoid, binaryop[type_].gb_obj, zcast), "Monoid", new_monoid[0]
)
op = TypedUserMonoid(
new_type_obj, name, type_.name, ret_type, new_monoid[0], binaryop[type_], identity
)
new_type_obj._add(op)
return new_type_obj
@classmethod
def register_anonymous(cls, binaryop, identity, name=None):
if type(binaryop) is ParameterizedBinaryOp:
return ParameterizedMonoid(name, binaryop, identity, anonymous=True)
return cls._build(name, binaryop, identity, anonymous=True)
@classmethod
def register_new(cls, name, binaryop, identity):
module, funcname = cls._remove_nesting(name)
if type(binaryop) is ParameterizedBinaryOp:
monoid = ParameterizedMonoid(name, binaryop, identity)
else:
monoid = cls._build(name, binaryop, identity)
setattr(module, funcname, monoid)
# Also save it to `grblas.op` if not yet defined
module, funcname = cls._remove_nesting(name, module=op, modname="op", strict=False)
if not hasattr(module, funcname):
setattr(module, funcname, monoid)
def __init__(self, name, binaryop=None, *, anonymous=False):
super().__init__(name, anonymous=anonymous)
self._binaryop = binaryop
if binaryop is not None:
binaryop._monoid = self
@property
def binaryop(self):
if self._binaryop is not None:
return self._binaryop
# Must be builtin
return getattr(binary, self.name)
@property
def identities(self):
return {dtype: val.identity for dtype, val in self._typed_ops.items()}
__call__ = TypedBuiltinMonoid.__call__
class Semiring(OpBase):
__slots__ = "_monoid", "_binaryop"
_module = semiring
_modname = "semiring"
_typed_class = TypedBuiltinSemiring
_parse_config = {
"trim_from_front": 4,
"delete_exact": "SEMIRING",
"num_underscores": 2,
"re_exprs": [
re.compile(
"^GrB_(PLUS|MIN|MAX)_(PLUS|TIMES|FIRST|SECOND|MIN|MAX)_SEMIRING"
"_(INT8|UINT8|INT16|UINT16|INT32|UINT32|INT64|UINT64|FP32|FP64)$"
),
re.compile(
"^GxB_(MIN|MAX|PLUS|TIMES|ANY)"
"_(FIRST|SECOND|PAIR|MIN|MAX|PLUS|MINUS|RMINUS|TIMES"
"|DIV|RDIV|ISEQ|ISNE|ISGT|ISLT|ISGE|ISLE|LOR|LAND|LXOR"
"|FIRSTI1|FIRSTI|FIRSTJ1|FIRSTJ|SECONDI1|SECONDI|SECONDJ1|SECONDJ)"
"_(INT8|UINT8|INT16|UINT16|INT32|UINT32|INT64|UINT64|FP32|FP64)$"
),
re.compile(
"^GxB_(PLUS|TIMES|ANY)_(FIRST|SECOND|PAIR|PLUS|MINUS|TIMES|DIV|RDIV|RMINUS)"
"_(FC32|FC64)$"
),
re.compile(
"^GxB_(BOR|BAND|BXOR|BXNOR)_(BOR|BAND|BXOR|BXNOR)_(UINT8|UINT16|UINT32|UINT64)$"
),
],
"re_exprs_return_bool": [
re.compile("^GrB_(LOR|LAND|LXOR|LXNOR)_(LOR|LAND)_SEMIRING_BOOL$"),
re.compile(
"^GxB_(LOR|LAND|LXOR|EQ|ANY)_(EQ|NE|GT|LT|GE|LE)"
"_(INT8|UINT8|INT16|UINT16|INT32|UINT32|INT64|UINT64|FP32|FP64)$"
),
re.compile(
"^GxB_(LOR|LAND|LXOR|EQ|ANY)_(FIRST|SECOND|PAIR|LOR|LAND|LXOR|EQ|GT|LT|GE|LE)_BOOL$"
),
],
}
@classmethod
def _build(cls, name, monoid, binaryop, *, anonymous=False):
if type(monoid) is not Monoid:
raise TypeError(f"monoid must be a Monoid, not {type(monoid)}")
if type(binaryop) is not BinaryOp:
raise TypeError(f"binaryop must be a BinaryOp, not {type(binaryop)}")
if name is None:
name = f"{monoid.name}_{binaryop.name}"
new_type_obj = cls(name, monoid, binaryop, anonymous=anonymous)
for binary_in, binary_func in binaryop._typed_ops.items():
binary_out = binary_func.return_type
# Unfortunately, we can't have user-defined monoids over bools yet
# because numba can't compile correctly.
if binary_out not in monoid.types:
continue
binary_out = lookup_dtype(binary_out)
new_semiring = ffi_new("GrB_Semiring*")
check_status_carg(
lib.GrB_Semiring_new(new_semiring, monoid[binary_out].gb_obj, binary_func.gb_obj),
"Semiring",
new_semiring,
)
ret_type = monoid[binary_out].return_type
op = TypedUserSemiring(
new_type_obj,
name,
binary_in,
ret_type,
new_semiring[0],
monoid[binary_out],
binary_func,
)
new_type_obj._add(op)
return new_type_obj
@classmethod
def register_anonymous(cls, monoid, binaryop, name=None):
if type(monoid) is ParameterizedMonoid or type(binaryop) is ParameterizedBinaryOp:
return ParameterizedSemiring(name, monoid, binaryop, anonymous=True)
return cls._build(name, monoid, binaryop, anonymous=True)
@classmethod
def register_new(cls, name, monoid, binaryop):
module, funcname = cls._remove_nesting(name)
if type(monoid) is ParameterizedMonoid or type(binaryop) is ParameterizedBinaryOp:
semiring = ParameterizedSemiring(name, monoid, binaryop)
else:
semiring = cls._build(name, monoid, binaryop)
setattr(module, funcname, semiring)
# Also save it to `grblas.op` if not yet defined
module, funcname = cls._remove_nesting(name, module=op, modname="op", strict=False)
if not hasattr(module, funcname):
setattr(module, funcname, semiring)
def __init__(self, name, monoid=None, binaryop=None, *, anonymous=False):
super().__init__(name, anonymous=anonymous)
self._monoid = monoid
self._binaryop = binaryop
@property
def binaryop(self):
if self._binaryop is not None:
return self._binaryop
# Must be builtin
return getattr(binary, self.name.split("_")[1])
@property
def monoid(self):
if self._monoid is not None:
return self._monoid
# Must be builtin
return getattr(monoid, self.name.split("_")[0])
__call__ = TypedBuiltinSemiring.__call__
def get_typed_op(op, dtype, dtype2=None):
if isinstance(op, OpBase):
if dtype2 is not None:
dtype = unify(dtype, dtype2)
return op[dtype]
elif isinstance(op, ParameterizedUdf):
op = op() # Use default parameters of parameterized UDFs
return get_typed_op(op, dtype, dtype2)
elif isinstance(op, TypedOpBase):
return op
else:
raise TypeError(f"Unable to get typed operator from object with type {type(op)}")
def find_opclass(gb_op):
if isinstance(gb_op, OpBase):
opclass = type(gb_op).__name__
elif isinstance(gb_op, TypedOpBase):
opclass = gb_op.opclass
elif isinstance(gb_op, ParameterizedUdf):
gb_op = gb_op() # Use default parameters of parameterized UDFs
gb_op, opclass = find_opclass(gb_op)
else:
opclass = UNKNOWN_OPCLASS
return gb_op, opclass
# Now initialize all the things!
UnaryOp._initialize()
BinaryOp._initialize()
Monoid._initialize()
Semiring._initialize()
```
#### File: grblas/tests/conftest.py
```python
import grblas
import atexit
import itertools
import pytest
def pytest_configure(config):
backend = config.getoption("--backend", "suitesparse")
blocking = config.getoption("--blocking", True)
record = config.getoption("--record", False)
grblas.init(backend, blocking=blocking)
print(f'Running tests with "{backend}" backend, blocking={blocking}, record={record}')
if record:
rec = grblas.Recorder()
rec.start()
def save_records():
with open("record.txt", "w") as f: # pragma: no cover
f.write("\n".join(rec.data))
# I'm sure there's a `pytest` way to do this...
atexit.register(save_records)
def pytest_runtest_setup(item):
if "slow" in item.keywords and not item.config.getoption("--runslow", True): # pragma: no cover
pytest.skip("need --runslow option to run")
@pytest.fixture(autouse=True, scope="function")
def reset_name_counters():
"""Reset automatic names for each test for easier comparison of record.txt"""
grblas.Matrix._name_counter = itertools.count()
grblas.Vector._name_counter = itertools.count()
grblas.Scalar._name_counter = itertools.count()
``` |
{
"source": "jim22k/grblas",
"score": 2
} |
#### File: graphblas/_ss/config.py
```python
from collections.abc import MutableMapping
from numbers import Integral
from graphblas import ffi, lib
from ..dtypes import lookup_dtype
from ..exceptions import _error_code_lookup
from ..utils import values_to_numpy_buffer
class BaseConfig(MutableMapping):
# Subclasses should redefine these
_get_function = None
_set_function = None
_null_valid = {}
_options = {}
_defaults = {}
# We add reverse lookups for _enumerations and _bitwise in __init__
_bitwise = {}
_enumerations = {}
_read_only = set()
_set_ctypes = {
"GxB_Format_Value": "int",
"bool": "int",
}
def __init__(self, parent=None):
for d in self._enumerations.values():
for k, v in list(d.items()):
d[v] = k
for d in self._bitwise.values():
for k, v in list(d.items()):
d[v] = k
self._parent = parent
def __delitem__(self, key):
raise TypeError("Configuration options can't be deleted.")
def __getitem__(self, key):
key = key.lower()
if key not in self._options:
raise KeyError(key)
key_obj, ctype = self._options[key]
is_array = "[" in ctype
val_ptr = ffi.new(ctype if is_array else f"{ctype}*")
if self._parent is None:
info = self._get_function(key_obj, val_ptr)
else:
info = self._get_function(self._parent._carg, key_obj, val_ptr)
if info == lib.GrB_SUCCESS: # pragma: no branch
if is_array:
return list(val_ptr)
elif key in self._enumerations:
return self._enumerations[key][val_ptr[0]]
elif key in self._bitwise:
bitwise = self._bitwise[key]
val = val_ptr[0]
if val in bitwise:
return {bitwise[val]}
rv = set()
for k, v in bitwise.items():
if isinstance(k, str) and val & v and bin(v).count("1") == 1:
rv.add(k)
return rv
return val_ptr[0]
raise _error_code_lookup[info](f"Failed to get info for {key!r}") # pragma: no cover
def __setitem__(self, key, val):
key = key.lower()
if key not in self._options:
raise KeyError(key)
if key in self._read_only:
raise ValueError(f"Config option {key!r} is read-only")
key_obj, ctype = self._options[key]
ctype = self._set_ctypes.get(ctype, ctype)
if key in self._enumerations and isinstance(val, str):
val = val.lower()
val = self._enumerations[key][val]
elif key in self._bitwise and val is not None and not isinstance(val, Integral):
bitwise = self._bitwise[key]
if isinstance(val, str):
val = bitwise[val.lower()]
else:
bits = 0
for x in val:
if isinstance(x, str):
bits |= bitwise[x.lower()]
else:
bits |= x
val = bits
if val is None:
if key in self._defaults:
val = self._defaults[key]
else:
raise ValueError(f"Unable to set default value for {key!r}")
if val is None:
val_obj = ffi.NULL
elif "[" in ctype:
dtype, size = ctype.split("[", 1)
size = size.split("]", 1)[0]
dtype = lookup_dtype(dtype)
vals, dtype = values_to_numpy_buffer(val, dtype.np_type)
if int(size) != vals.size:
raise ValueError(
f"Wrong number of elements when setting {key!r} config. "
f"expected {size}, got {vals.size}: {val}"
)
val_obj = ffi.from_buffer(ctype, vals)
else:
val_obj = ffi.cast(ctype, val)
if self._parent is None:
info = self._set_function(key_obj, val_obj)
else:
info = self._set_function(self._parent._carg, key_obj, val_obj)
if info != lib.GrB_SUCCESS:
raise _error_code_lookup[info](f"Failed to set info for {key!r}")
def __iter__(self):
return iter(sorted(self._options))
def __len__(self):
return len(self._options)
def __repr__(self):
return "{" + ",\n ".join(f"{k!r}: {v!r}" for k, v in self.items()) + "}"
def _ipython_key_completions_(self): # pragma: no cover
return list(self)
```
#### File: graphblas/tests/test_core.py
```python
import pytest
import graphblas
def test_import_special_attrs():
not_hidden = {x for x in dir(graphblas) if not x.startswith("__")}
# Is everything imported?
assert len(not_hidden & graphblas._SPECIAL_ATTRS) == len(graphblas._SPECIAL_ATTRS)
# Is everything special that needs to be?
not_special = {x for x in dir(graphblas) if not x.startswith("_")} - graphblas._SPECIAL_ATTRS
assert not_special == {"backend", "config", "init", "replace"}
# Make sure these "not special" objects don't have objects that look special within them
for attr in not_special:
assert not set(dir(getattr(graphblas, attr))) & graphblas._SPECIAL_ATTRS
def test_bad_init():
# same params is okay
params = dict(graphblas._init_params)
del params["automatic"]
graphblas.init(**params)
# different params is bad
params["blocking"] = not params["blocking"]
with pytest.raises(graphblas.exceptions.GraphblasException, match="different init parameters"):
graphblas.init(**params)
def test_bad_libget():
with pytest.raises(AttributeError, match="GrB_bad_name"):
graphblas.base.libget("GrB_bad_name")
def test_lib_attrs():
for attr in dir(graphblas.lib):
getattr(graphblas.lib, attr)
def test_bad_call():
class bad:
name = "bad"
_carg = 1
with pytest.raises(TypeError, match="Error calling GrB_Matrix_apply"):
graphblas.base.call("GrB_Matrix_apply", [bad, bad, bad, bad, bad])
with pytest.raises(
TypeError, match=r"Call objects: GrB_Matrix_apply\(bad, bad, bad, bad, bad, bad\)"
):
graphblas.base.call("GrB_Matrix_apply", [bad, bad, bad, bad, bad, bad])
```
#### File: graphblas/tests/test_dtype.py
```python
import itertools
import pickle
import string
import numpy as np
import pytest
from graphblas import dtypes, lib
from graphblas.dtypes import lookup_dtype
all_dtypes = [
dtypes.BOOL,
dtypes.INT8,
dtypes.INT16,
dtypes.INT32,
dtypes.INT64,
dtypes.UINT8,
dtypes.UINT16,
dtypes.UINT32,
dtypes.UINT64,
dtypes.FP32,
dtypes.FP64,
]
if dtypes._supports_complex:
all_dtypes.append(dtypes.FC32)
all_dtypes.append(dtypes.FC64)
def test_names():
assert dtypes.BOOL.name == "BOOL"
assert dtypes.INT8.name == "INT8"
assert dtypes.INT16.name == "INT16"
assert dtypes.INT32.name == "INT32"
assert dtypes.INT64.name == "INT64"
assert dtypes.UINT8.name == "UINT8"
assert dtypes.UINT16.name == "UINT16"
assert dtypes.UINT32.name == "UINT32"
assert dtypes.UINT64.name == "UINT64"
assert dtypes.FP32.name == "FP32"
assert dtypes.FP64.name == "FP64"
def test_ctype():
assert dtypes.BOOL.c_type == "_Bool"
assert dtypes.INT8.c_type == "int8_t"
assert dtypes.INT16.c_type == "int16_t"
assert dtypes.INT32.c_type == "int32_t"
assert dtypes.INT64.c_type == "int64_t"
assert dtypes.UINT8.c_type == "uint8_t"
assert dtypes.UINT16.c_type == "uint16_t"
assert dtypes.UINT32.c_type == "uint32_t"
assert dtypes.UINT64.c_type == "uint64_t"
assert dtypes.FP32.c_type == "float"
assert dtypes.FP64.c_type == "double"
def test_gbtype():
assert dtypes.BOOL.gb_obj == lib.GrB_BOOL
assert dtypes.INT8.gb_obj == lib.GrB_INT8
assert dtypes.INT16.gb_obj == lib.GrB_INT16
assert dtypes.INT32.gb_obj == lib.GrB_INT32
assert dtypes.INT64.gb_obj == lib.GrB_INT64
assert dtypes.UINT8.gb_obj == lib.GrB_UINT8
assert dtypes.UINT16.gb_obj == lib.GrB_UINT16
assert dtypes.UINT32.gb_obj == lib.GrB_UINT32
assert dtypes.UINT64.gb_obj == lib.GrB_UINT64
assert dtypes.FP32.gb_obj == lib.GrB_FP32
assert dtypes.FP64.gb_obj == lib.GrB_FP64
def test_lookup_by_name():
for dt in all_dtypes:
assert lookup_dtype(dt.name) is dt
def test_lookup_by_ctype():
for dt in all_dtypes:
if dt.c_type == "float":
# Choose 'float' to match numpy/Python, not C (where 'float' means FP32)
assert lookup_dtype(dt.c_type) is dtypes.FP64
else:
assert lookup_dtype(dt.c_type) is dt
def test_lookup_by_gbtype():
for dt in all_dtypes:
assert lookup_dtype(dt.gb_obj) is dt
def test_lookup_by_dtype():
assert lookup_dtype(bool) == dtypes.BOOL
assert lookup_dtype(int) == dtypes.INT64
assert lookup_dtype(float) == dtypes.FP64
with pytest.raises(TypeError, match="Bad dtype"):
lookup_dtype(None)
def test_unify_dtypes():
assert dtypes.unify(dtypes.BOOL, dtypes.BOOL) == dtypes.BOOL
assert dtypes.unify(dtypes.BOOL, dtypes.INT16) == dtypes.INT16
assert dtypes.unify(dtypes.INT16, dtypes.BOOL) == dtypes.INT16
assert dtypes.unify(dtypes.INT16, dtypes.INT8) == dtypes.INT16
assert dtypes.unify(dtypes.UINT32, dtypes.UINT8) == dtypes.UINT32
assert dtypes.unify(dtypes.UINT32, dtypes.FP32) == dtypes.FP64
assert dtypes.unify(dtypes.INT32, dtypes.FP32) == dtypes.FP64
assert dtypes.unify(dtypes.FP64, dtypes.UINT8) == dtypes.FP64
assert dtypes.unify(dtypes.FP64, dtypes.FP32) == dtypes.FP64
assert dtypes.unify(dtypes.INT16, dtypes.UINT16) == dtypes.INT32
assert dtypes.unify(dtypes.UINT64, dtypes.INT8) == dtypes.FP64
def test_dtype_bad_comparison():
with pytest.raises(TypeError):
assert dtypes.BOOL == object()
with pytest.raises(TypeError):
assert object() != dtypes.BOOL
def test_dtypes_match_numpy():
for key, val in dtypes._registry.items():
try:
if key is int or (isinstance(key, str) and key == "int"):
# For win64, numpy treats int as int32, not int64
# graphblas won't allow this craziness
npval = np.int64
else:
npval = np.dtype(key)
except Exception:
continue
assert dtypes.lookup_dtype(npval) == val, f"{key} of type {type(key)}"
def test_pickle():
for val in dtypes._registry.values():
s = pickle.dumps(val)
val2 = pickle.loads(s)
if val._is_udt: # pragma: no cover
assert val.np_type == val2.np_type
assert val.name == val2.name
else:
assert val == val2
s = pickle.dumps(dtypes._INDEX)
val2 = pickle.loads(s)
assert dtypes._INDEX == val2
def test_unify_matches_numpy():
for type1, type2 in itertools.product(all_dtypes, all_dtypes):
gb_type = dtypes.unify(type1, type2)
np_type = type(type1.np_type.type(0) + type2.np_type.type(0))
assert gb_type is lookup_dtype(np_type), f"({type1}, {type2}) -> {gb_type}"
def test_lt_dtypes():
expected = [
dtypes.BOOL,
dtypes.FP32,
dtypes.FP64,
dtypes.INT8,
dtypes.INT16,
dtypes.INT32,
dtypes.INT64,
dtypes.UINT8,
dtypes.UINT16,
dtypes.UINT32,
dtypes.UINT64,
]
if dtypes._supports_complex:
expected.insert(1, dtypes.FC32)
expected.insert(2, dtypes.FC64)
assert sorted(all_dtypes) == expected
assert dtypes.BOOL < "FP32"
with pytest.raises(TypeError):
assert dtypes.BOOL < 5
def test_bad_register():
record_dtype = np.dtype([("x", np.object_), ("y", np.float64)], align=True)
with pytest.raises(ValueError, match="Python object"):
dtypes.register_new("has_object", record_dtype)
record_dtype = np.dtype([("x", np.bool_), ("y", np.float64)], align=True)
with pytest.raises(ValueError, match="identifier"):
dtypes.register_new("$", record_dtype)
with pytest.raises(ValueError, match="builtin"):
dtypes.register_new("is_builtin", np.int8)
udt = dtypes.register_anonymous(record_dtype)
assert udt.name is not None
with pytest.raises(ValueError, match="name"):
dtypes.register_new("register_new", record_dtype)
with pytest.raises(ValueError, match="name"):
dtypes.register_new("UINT8", record_dtype)
def test_auto_register():
n = np.random.randint(10, 64)
np_type = np.dtype(f"({n},)int16")
assert lookup_dtype(np_type).np_type == np_type
def test_default_names():
from graphblas.dtypes import _default_name
assert _default_name(np.dtype([("x", np.int32), ("y", np.float64)], align=True)) == (
"{'x': INT32, 'y': FP64}"
)
assert _default_name(np.dtype("(29,)uint8")) == "UINT8[29]"
assert _default_name(np.dtype("(3,4)bool")) == "BOOL[3, 4]"
assert _default_name(np.dtype((np.dtype("(5,)float64"), (6,)))) == "FP64[5][6]"
assert _default_name(np.dtype("S5")) == "dtype('S5')"
def test_record_dtype_from_dict():
dtype = dtypes.lookup_dtype({"x": int, "y": float})
assert dtype.name == "{'x': INT64, 'y': FP64}"
def test_dtype_to_from_string():
types = [dtypes.BOOL, dtypes.FP64]
for c in string.ascii_letters:
try:
dtype = np.dtype(c)
types.append(dtype)
except Exception:
pass
for dtype in types:
s = dtypes._dtype_to_string(dtype)
try:
dtype2 = dtypes._string_to_dtype(s)
except Exception:
with pytest.raises(Exception):
lookup_dtype(dtype)
else:
assert dtype == dtype2
```
#### File: graphblas/tests/test_ss_utils.py
```python
import numpy as np
import pytest
from numpy.testing import assert_array_equal
import graphblas as gb
from graphblas import Matrix, Vector
@pytest.mark.parametrize("do_iso", [False, True])
def test_vector_head(do_iso):
v0 = Vector(int, 5)
if do_iso:
values1 = values2 = values3 = [1, 1, 1]
else:
values1 = [10, 20, 30]
values2 = [2, 4, 6]
values3 = [1, 2, 3]
v1 = Vector.from_values([0, 1, 2], values1) # full
v2 = Vector.from_values([1, 3, 5], values2) # bitmap
v3 = Vector.from_values([100, 200, 300], values3) # sparse
assert v1.ss.export()["format"] == "full"
assert v2.ss.export()["format"] == "bitmap"
assert v3.ss.export()["format"] == "sparse"
assert v1.ss.is_iso is do_iso
assert v2.ss.is_iso is do_iso
assert v3.ss.is_iso is do_iso
for dtype in [None, np.float64]:
expected_dtype = np.int64 if dtype is None else dtype
for _ in range(2):
indices, vals = v0.ss.head(2, sort=True, dtype=dtype)
assert_array_equal(indices, [])
assert_array_equal(vals, [])
assert indices.dtype == np.uint64
assert vals.dtype == expected_dtype
indices, vals = v1.ss.head(2, sort=True, dtype=dtype)
assert_array_equal(indices, [0, 1])
assert_array_equal(vals, values1[:2])
assert indices.dtype == np.uint64
assert vals.dtype == expected_dtype
indices, vals = v2.ss.head(2, sort=True, dtype=dtype)
assert_array_equal(indices, [1, 3])
assert_array_equal(vals, values2[:2])
assert indices.dtype == np.uint64
assert vals.dtype == expected_dtype
indices, vals = v3.ss.head(2, sort=True, dtype=dtype)
assert_array_equal(indices, [100, 200])
assert_array_equal(vals, values3[:2])
assert indices.dtype == np.uint64
assert vals.dtype == expected_dtype
@pytest.mark.parametrize("do_iso", [False, True])
def test_matrix_head(do_iso):
A0 = Matrix(int, 5, 5)
if do_iso:
values1 = [1, 1, 1, 1]
values2 = values3 = values4 = [1, 1, 1]
else:
values1 = [1, 2, 3, 4]
values2 = [1, 2, 4]
values3 = values4 = [1, 2, 3]
A1 = Matrix.from_values([0, 0, 1, 1], [0, 1, 0, 1], values1) # fullr
A2 = Matrix.from_values([0, 0, 1], [0, 1, 1], values2) # Bitmap
A3 = Matrix.from_values([5, 5, 10], [4, 5, 10], values3) # CSR
A4 = Matrix.from_values([500, 500, 1000], [400, 500, 1000], values4) # HyperCSR
d = A1.ss.export(raw=True)
assert d["format"] == "fullr"
d["format"] = "fullc"
A5 = Matrix.ss.import_any(**d) # fullc
d = A2.ss.export(raw=True)
assert d["format"] == "bitmapr"
d["format"] = "bitmapc"
A6 = Matrix.ss.import_any(**d) # bitmapc
d = A3.ss.export(raw=True)
assert d["format"] == "csr"
d["format"] = "csc"
d["row_indices"] = d["col_indices"]
del d["col_indices"]
A7 = Matrix.ss.import_any(**d) # csc
d = A4.ss.export(raw=True)
assert d["format"] == "hypercsr"
d["format"] = "hypercsc"
d["row_indices"] = d["col_indices"]
del d["col_indices"]
d["cols"] = d["rows"]
del d["rows"]
A8 = Matrix.ss.import_any(**d) # hypercsc
assert A1.ss.is_iso is do_iso
assert A2.ss.is_iso is do_iso
assert A3.ss.is_iso is do_iso
assert A4.ss.is_iso is do_iso
assert A5.ss.is_iso is do_iso
assert A6.ss.is_iso is do_iso
assert A7.ss.is_iso is do_iso
assert A8.ss.is_iso is do_iso
for dtype in [None, np.float64]:
expected_dtype = np.int64 if dtype is None else dtype
for _ in range(2):
rows, cols, vals = A0.ss.head(2, sort=True, dtype=dtype)
assert_array_equal(rows, [])
assert_array_equal(cols, [])
assert_array_equal(vals, [])
assert rows.dtype == cols.dtype == np.uint64
assert vals.dtype == expected_dtype
rows, cols, vals = A1.ss.head(2, sort=True, dtype=dtype)
assert_array_equal(rows, [0, 0])
assert_array_equal(cols, [0, 1])
assert_array_equal(vals, values1[:2])
assert rows.dtype == cols.dtype == np.uint64
assert vals.dtype == expected_dtype
rows, cols, vals = A2.ss.head(2, sort=True, dtype=dtype)
assert_array_equal(rows, [0, 0])
assert_array_equal(cols, [0, 1])
assert_array_equal(vals, values2[:2])
assert rows.dtype == cols.dtype == np.uint64
assert vals.dtype == expected_dtype
rows, cols, vals = A3.ss.head(2, sort=True, dtype=dtype)
assert_array_equal(rows, [5, 5])
assert_array_equal(cols, [4, 5])
assert_array_equal(vals, values3[:2])
assert rows.dtype == cols.dtype == np.uint64
assert vals.dtype == expected_dtype
rows, cols, vals = A4.ss.head(2, sort=True, dtype=dtype)
assert_array_equal(rows, [500, 500])
assert_array_equal(cols, [400, 500])
assert_array_equal(vals, values4[:2])
assert rows.dtype == cols.dtype == np.uint64
assert vals.dtype == expected_dtype
rows, cols, vals = A5.ss.head(2, sort=True, dtype=dtype)
assert_array_equal(rows, [0, 1])
assert_array_equal(cols, [0, 0])
assert_array_equal(vals, values1[:2])
assert rows.dtype == cols.dtype == np.uint64
assert vals.dtype == expected_dtype
rows, cols, vals = A6.ss.head(2, sort=True, dtype=dtype)
assert_array_equal(rows, [0, 1])
assert_array_equal(cols, [0, 0])
assert_array_equal(vals, values2[:2])
assert rows.dtype == cols.dtype == np.uint64
assert vals.dtype == expected_dtype
rows, cols, vals = A7.ss.head(2, sort=True, dtype=dtype)
assert_array_equal(rows, [4, 5])
assert_array_equal(cols, [5, 5])
assert_array_equal(vals, values3[:2])
assert rows.dtype == cols.dtype == np.uint64
assert vals.dtype == expected_dtype
rows, cols, vals = A8.ss.head(2, sort=True, dtype=dtype)
assert_array_equal(rows, [400, 500])
assert_array_equal(cols, [500, 500])
assert_array_equal(vals, values4[:2])
assert rows.dtype == cols.dtype == np.uint64
assert vals.dtype == expected_dtype
def test_about():
d = {}
about = gb.ss.about
for k in about:
d[k] = about[k]
assert d == about
assert len(d) == len(about)
with pytest.raises(KeyError):
about["badkey"]
assert "SuiteSparse" in about["library_name"]
with pytest.raises(TypeError):
del about["library_name"]
assert "library_name" in repr(about)
def test_global_config():
d = {}
config = gb.ss.config
for k in config:
d[k] = config[k]
assert d == config
assert len(d) == len(config)
for k, v in d.items():
config[k] = v
assert d == config
with pytest.raises(KeyError):
config["badkey"]
with pytest.raises(KeyError):
config["badkey"] = None
config["format"] = "by_col"
assert config["format"] == "by_col"
config["format"] = "by_row"
assert config["format"] == "by_row"
with pytest.raises(TypeError):
del config["format"]
with pytest.raises(KeyError):
config["format"] = "bad_format"
for k in config:
if k in config._defaults:
config[k] = None
else:
with pytest.raises(ValueError):
config[k] = None
with pytest.raises(ValueError, match="Wrong number"):
config["memory_pool"] = [1, 2]
assert "format" in repr(config)
``` |
{
"source": "jim22k/metagraph-igraph",
"score": 2
} |
#### File: metagraph_igraph/algorithms/centrality.py
```python
from metagraph import concrete_algorithm
from metagraph.plugins.numpy.types import NumpyNodeMap, NumpyNodeSet
from metagraph.plugins.core import exceptions
from ..types import IGraph
import numpy as np
import metagraph as mg
import igraph
@concrete_algorithm("centrality.pagerank")
def igraph_pagerank(
graph: IGraph, damping: float, maxiter: int, tolerance: float
) -> NumpyNodeMap:
weights = "weight" if graph.value.is_weighted() else None
opts = igraph.ARPACKOptions()
opts.maxiter = maxiter
opts.tol = tolerance
try:
pr = graph.value.pagerank(
weights=weights,
damping=damping,
implementation="arpack",
arpack_options=opts,
)
except igraph.InternalError as e:
if "Maximum number of iterations reached" in str(e):
raise exceptions.ConvergenceError(
f"failed to converge within {maxiter} iterations"
)
raise
node_ids = None if graph.is_sequential() else graph.value.vs["NodeId"]
return NumpyNodeMap(np.array(pr), node_ids)
@concrete_algorithm("centrality.betweenness")
def igraph_betweenness_centrality(
graph: IGraph, nodes: mg.Optional[NumpyNodeSet], normalize: bool
) -> NumpyNodeMap:
if nodes is not None:
nodes = nodes.value
node_ids = nodes
else:
node_ids = None if graph.is_sequential() else graph.value.vs["NodeId"]
if graph.value.is_weighted():
bc = graph.value.betweenness(vertices=nodes, weights="weight")
else:
bc = graph.value.betweenness(vertices=nodes)
return NumpyNodeMap(np.array(bc), node_ids)
@concrete_algorithm("centrality.closeness")
def closeness_centrality(
graph: IGraph,
nodes: mg.Optional[NumpyNodeSet],
) -> NumpyNodeMap:
if nodes is not None:
nodes = nodes.value
node_ids = nodes
else:
node_ids = None if graph.is_sequential() else graph.value.vs["NodeId"]
cc = graph.value.closeness(
vertices=nodes, mode="in", weights=graph.edge_weight_label
)
return NumpyNodeMap(np.array(cc), node_ids)
@concrete_algorithm("centrality.eigenvector")
def eigenvector_centrality(
graph: IGraph, maxiter: int, tolerance: float
) -> NumpyNodeMap:
weights = "weight" if graph.value.is_weighted() else None
opts = igraph.ARPACKOptions()
opts.maxiter = maxiter
opts.tol = tolerance
eigv = graph.value.eigenvector_centrality(
scale=False, weights=weights, arpack_options=opts
)
node_ids = None if graph.is_sequential() else graph.value.vs["NodeId"]
return NumpyNodeMap(np.array(eigv), node_ids)
```
#### File: metagraph_igraph/algorithms/clustering.py
```python
from metagraph import concrete_algorithm, NodeID
from metagraph.plugins.numpy.types import NumpyNodeMap
from ..types import IGraph
import igraph
import numpy as np
from typing import Tuple
@concrete_algorithm("clustering.triangle_count")
def igraph_triangle_count(graph: IGraph) -> int:
return len(graph.value.cliques(3, 3))
@concrete_algorithm("clustering.connected_components")
def igraph_connected_components(graph: IGraph) -> NumpyNodeMap:
cc = graph.value.components(igraph.WEAK).membership
node_ids = None if graph.is_sequential() else graph.value.vs["NodeId"]
return NumpyNodeMap(np.array(cc), node_ids)
@concrete_algorithm("clustering.strongly_connected_components")
def igraph_strongly_connected_components(graph: IGraph) -> NumpyNodeMap:
cc = graph.value.components(igraph.STRONG).membership
node_ids = None if graph.is_sequential() else graph.value.vs["NodeId"]
return NumpyNodeMap(np.array(cc), node_ids)
@concrete_algorithm("flow.max_flow")
def max_flow(
graph: IGraph, source_node: NodeID, target_node: NodeID
) -> Tuple[float, IGraph]:
aprops = IGraph.Type.compute_abstract_properties(graph, {"edge_dtype"})
g = graph.value
flow = g.maxflow(source_node, target_node, graph.edge_weight_label)
out = g.copy()
flow_vals = map(int, flow.flow) if aprops["edge_dtype"] == "int" else flow.flow
out.es[graph.edge_weight_label] = list(flow_vals)
return flow.value, IGraph(
out,
node_weight_label=graph.node_weight_label,
edge_weight_label=graph.edge_weight_label,
)
@concrete_algorithm("flow.min_cut")
def min_cut(
graph: IGraph,
source_node: NodeID,
target_node: NodeID,
) -> Tuple[float, IGraph]:
"""
Returns the sum of the minimum cut weights and a graph containing only those edges
which are part of the minimum cut.
"""
g = graph.value
cut = g.mincut(source_node, target_node, graph.edge_weight_label)
out = igraph.Graph(len(g.vs), directed=g.is_directed())
if graph.node_weight_label in g.vs.attributes():
out.vs[graph.node_weight_label] = g.vs[graph.node_weight_label]
for edge in g.es[cut.cut]:
out.add_edge(edge.source, edge.target, **edge.attributes())
return cut.value, IGraph(
out,
node_weight_label=graph.node_weight_label,
edge_weight_label=graph.edge_weight_label,
)
```
#### File: metagraph-igraph/metagraph_igraph/registry.py
```python
from metagraph import PluginRegistry
# Use this as the entry_point object
registry = PluginRegistry("metagraph_igraph")
def find_plugins():
# Ensure we import all items we want registered
from . import types, translators, algorithms
registry.register_from_modules(types, translators, algorithms)
return registry.plugins
################
# Import guards
################
``` |
{
"source": "jim22k/metagraph-karateclub",
"score": 2
} |
#### File: plugins/karateclub/algorithms.py
```python
import metagraph as mg
from metagraph import concrete_algorithm
from .. import has_karateclub
from typing import Tuple
if has_karateclub:
import karateclub
import numpy as np
import networkx as nx
from metagraph.plugins.networkx.types import NetworkXGraph
from metagraph.plugins.numpy.types import (
NumpyMatrixType,
NumpyNodeMap,
)
@concrete_algorithm("embedding.train.node2vec")
def karateclub_node2vec_train(
graph: NetworkXGraph,
p: float,
q: float,
walks_per_node: int,
walk_length: int,
embedding_size: int,
epochs: int,
learning_rate: float,
worker_count: int = 1,
) -> Tuple[NumpyMatrixType, NumpyNodeMap]:
trainer = karateclub.Node2Vec(
walk_number=walks_per_node,
walk_length=walk_length,
workers=worker_count,
p=p,
q=q,
dimensions=embedding_size,
epochs=epochs,
learning_rate=learning_rate,
)
old2canonical = {
node: canonical_index
for canonical_index, node in enumerate(graph.value.nodes)
}
relabelled_graph = nx.relabel_nodes(graph.value, old2canonical, copy=True)
trainer.fit(relabelled_graph)
np_embedding_matrix = trainer.get_embedding()
node2index = NumpyNodeMap(
np.arange(len(graph.value.nodes)), nodes=np.array(list(graph.value.nodes))
)
return (np_embedding_matrix, node2index)
@concrete_algorithm("embedding.train.graph2vec")
def karateclub_graph2vec_train(
graphs: mg.List[NetworkXGraph],
subgraph_degree: int,
embedding_size: int,
epochs: int,
learning_rate: float,
worker_count: int = 1,
) -> NumpyMatrixType:
if not all(nx.is_connected(graph.value) for graph in graphs):
raise ValueError("Graphs must be connected")
graph2vec_trainer = karateclub.Graph2Vec(
wl_iterations=subgraph_degree,
dimensions=embedding_size,
workers=worker_count,
epochs=epochs,
learning_rate=learning_rate,
min_count=0,
)
graph2vec_trainer.fit(
[
nx.relabel_nodes(
graph.value,
dict(map(reversed, enumerate(graph.value.nodes))),
copy=True,
)
for graph in graphs
]
)
np_embedding_matrix = graph2vec_trainer.get_embedding()
return np_embedding_matrix
``` |
{
"source": "Jim2E/APMA4903FinalProject",
"score": 3
} |
#### File: APMA4903FinalProject/Selfish-Mining-Simulator/Selfish_Mining.py
```python
import random
import time
import sys
class Selfish_Mining:
#def __init__(self, nb_simulations, alpha, gamma):
def __init__(self, **d):
self.__nb_simulations = d['nb_simulations']
self.__delta = 0 # advance of selfish miners on honests'ones
self.__privateChain = 0 # length of private chain RESET at each validation
self.__publicChain = 0 # length of public chain RESET at each validation
self.__honestsValidBlocks = 0
self.__selfishValidBlocks = 0
self.__counter = 1
# Setted Parameters
self.__alpha = d['alpha']
self.__gamma = d['gamma']
# For results
self.__revenue = None
self.__orphanBlocks = 0
self.__totalMinedBlocks = 0
def write_file(self):
stats_result = [self.__alpha, self.__gamma, self.__nb_simulations,\
self.__honestsValidBlocks, self.__selfishValidBlocks,\
self.__revenue, self.__orphanBlocks, self.__totalMinedBlocks]
with open('results.txt', 'a', encoding='utf-8') as f:
f.write(','.join([str(x) for x in stats_result]) + '\n')
def Simulate(self):
while(self.__counter <= self.__nb_simulations):
# Mining power does not mean the block is actually found
# there is a probability p to find it
r = random.uniform(0, 1) # random number for each simulation
self.__delta = self.__privateChain - self.__publicChain
if r <= float(self.__alpha):
self.On_Selfish_Miners()
else:
self.On_Honest_Miners()
### COPY-PASTE THE 3 LINES BELOW IN THE IF/ELSE TO GET EACH ITERATION RESULTS ###
#self.actualize_results()
#print(self)
#time.sleep(1)
self.__counter += 1
# Publishing private chain if not empty when total nb of simulations reached
self.__delta = self.__privateChain - self.__publicChain
if self.__delta > 0:
self.__selfishValidBlocks += self.__privateChain
self.__publicChain, self.__privateChain = 0,0
self.actualize_results()
print(self)
def On_Selfish_Miners(self):
self.__privateChain += 1
if self.__delta == 0 and self.__privateChain == 2:
self.__privateChain, self.__publicChain = 0,0
self.__selfishValidBlocks += 2
# Publishing private chain reset both public and private chains lengths to 0
def On_Honest_Miners(self):
self.__publicChain += 1
if self.__delta == 0:
# if 1 block is found => 1 block validated as honest miners take advance
self.__honestsValidBlocks += 1
# If there is a competition though (1-1) considering gamma,
# (Reminder: gamma = ratio of honest miners who choose to mine on pool's block)
# --> either it appends the private chain => 1 block for each competitor in revenue
# --> either it appends the honnest chain => 2 blocks for honnest miners (1 more then)
s = random.uniform(0, 1)
if self.__privateChain > 0 and s <= float(self.__gamma):
self.__selfishValidBlocks += 1
elif self.__privateChain > 0 and s > float(self.__gamma):
self.__honestsValidBlocks += 1
#in all cases (append private or public chain) all is reset to 0
self.__privateChain, self.__publicChain = 0,0
elif self.__delta == 2:
self.__selfishValidBlocks += self.__privateChain
self.__publicChain, self.__privateChain = 0,0
def actualize_results(self):
# Total Blocks Mined
self.__totalMinedBlocks = self.__honestsValidBlocks + self.__selfishValidBlocks
# Orphan Blocks
self.__orphanBlocks = self.__nb_simulations - self.__totalMinedBlocks
# Revenue
if self.__honestsValidBlocks or self.__selfishValidBlocks:
self.__revenue = 100*round(self.__selfishValidBlocks/(self.__totalMinedBlocks),3)
# Show message
def __str__(self):
if self.__counter <= self.__nb_simulations:
simulation_message = '\nSimulation ' + str(self.__counter) + ' out of ' + str(self.__nb_simulations) + '\n'
current_stats = 'Private chain : ' + '+ '*int(self.__privateChain) + '\n'\
'public chain : ' + '+ '*int(self.__publicChain) + '\n'
else:
simulation_message = '\n\n' + str(self.__nb_simulations) + ' Simulations Done // publishing private chain if non-empty\n'
current_stats = ''
choosen_parameters = 'Alpha : ' + str(self.__alpha) + '\t||\t' +'Gamma : ' + str(self.__gamma) +'\n'
selfish_vs_honests_stats = \
'Blocks validated by honest miners : ' + str(self.__honestsValidBlocks) + '\n'\
'Blocks validated by selfish miners : ' + str(self.__selfishValidBlocks) + '\n'\
'Expected if they were honests : ' + str(int(self.__alpha * self.__nb_simulations)) + '\n'\
'Number of total blocks mined : ' + str(self.__totalMinedBlocks) + '\n'\
'Number of Orphan blocks : ' + str(self.__orphanBlocks) + '\n'\
'Revenue ratio = PoolBlocks / TotalBlocks : ' + str(self.__revenue) + '%\n'
return simulation_message + current_stats + choosen_parameters + selfish_vs_honests_stats
if len(sys.argv)==4:
dico = {'nb_simulations':int(sys.argv[1]), 'alpha':float(sys.argv[2]), 'gamma':float(sys.argv[3])}
new = Selfish_Mining(**dico)
new.Simulate()
if len(sys.argv)==1:
### TO SAVE MULTIPLE VALUES IN FILE ###
start = time.time()
alphas = list(i/100 for i in range(0, 50, 5)) #50 => 0, 0.5, 0.01
gammas = list(i/100 for i in range(0, 100, 10)) #100 => 0, 1, 0.01
count = 0 #pourcentage done
for alpha in alphas:
for gamma in gammas:
new = Selfish_Mining(**{'nb_simulations':200000, 'alpha':alpha, 'gamma':gamma})
new.Simulate() # took 113 seconds | 155 Ko
new.write_file()
count += 1/len(alphas)
print("progress :" + str(round(count,2)*100) + "%\n")
duration = time.time()-start
print("Tooks " + str(round(duration,2)) + " seconds")
``` |
{
"source": "jim3456/basics",
"score": 3
} |
#### File: basics/helpers/dt_helpers.py
```python
import pytz
class tzAlias(object):
'''
Enum like objec to organize pytz time zones. They are called based on strings, so this will make it easiere in the IDE
'''
eastern=pytz.timezone('US/Eastern')
central=pytz.timezone('US/Central')
pacific=pytz.timezone('US/Pacific')
london=pytz.timezone('Europe/London')
paris=pytz.timezone('Europe/Paris')
utc=pytz.UTC
def isAware(dtObject):
'''
determines if a datetime.datetime or datetime.time object is aware or naive
'''
if hasattr(dtObject,'tzinfo') and not dtObject.tzinfo is None and not dtObject.tzinfo.utcoffset(dtObject) is None:
return(True)
return(False)
def modify_time_zone(dtObject,time_zone=pytz.UTC, old_time_zone=None):
'''
adjusts the time zone on a date time objects.
accepts both aware and unaware objects
For unaware objects it uses the time as 'tacks on' the time zone
For aware objects it translates the time from the old to the new
'''
if time_zone is None:
return dtObject
if isAware(dtObject):
output=time_zone.normalize(dtObject)
else:
if old_time_zone is None:
output=time_zone.localize(dtObject)
else:
output=time_zone.normalize(old_time_zone.localize(dtObject))
return(output)
``` |
{
"source": "Jim61C/learn-to-cluster",
"score": 2
} |
#### File: learn-to-cluster/vegcn/extract.py
```python
from __future__ import division
import torch
import os.path as osp
import numpy as np
from vegcn.datasets import build_dataset
from vegcn.models import build_model
from vegcn.test_gcn_v import test
from utils import create_logger, write_feat, mkdir_if_no_exists
def extract_gcn_v(opath_feat, opath_pred_confs, data_name, cfg, write_gcn_feat = False):
if osp.isfile(opath_feat) and osp.isfile(opath_pred_confs):
print('{} and {} already exist.'.format(opath_feat, opath_pred_confs))
return
cfg.cuda = torch.cuda.is_available()
logger = create_logger()
model = build_model(cfg.model['type'], **cfg.model['kwargs'])
for k, v in cfg.model['kwargs'].items():
setattr(cfg[data_name], k, v)
cfg[data_name].eval_interim = False
dataset = build_dataset(cfg.model['type'], cfg[data_name])
pred_confs, gcn_feat = test(model, dataset, cfg, logger)
if not osp.exists(opath_pred_confs):
logger.info('save predicted confs to {}'.format(opath_pred_confs))
mkdir_if_no_exists(opath_pred_confs)
np.savez_compressed(opath_pred_confs,
pred_confs=pred_confs,
inst_num=dataset.inst_num)
if not osp.exists(opath_feat) and write_gcn_feat:
logger.info('save gcn features to {}'.format(opath_feat))
mkdir_if_no_exists(opath_feat)
write_feat(opath_feat, gcn_feat)
``` |
{
"source": "jim8786453/kiln_share",
"score": 2
} |
#### File: tests/unit/test_http.py
```python
import json
import os
from copy import deepcopy
from eve.tests import TestMinimal
from flask_pymongo import MongoClient
from io import BytesIO
import kiln_share
from kiln_share.settings import MONGO_HOST, MONGO_PORT, MONGO_DBNAME
class TestKilnShare(TestMinimal):
def setUp(self):
self.this_directory = os.path.dirname(os.path.realpath(__file__))
self.settings_file = os.path.join(self.this_directory,
'../../settings.py')
self.connection = None
self.setupDB()
self.app = kiln_share.create_app()
self.test_client = self.app.test_client()
self.domain = self.app.config['DOMAIN']
# Setup some common test users.
self.user1 = [('X-Kiln-Share-Id', 'foo')]
self.user2 = [('X-Kiln-Share-Id', 'bar')]
def setupDB(self):
self.connection = MongoClient(MONGO_HOST, MONGO_PORT)
self.connection.drop_database(MONGO_DBNAME)
def dropDB(self):
self.connection = MongoClient(MONGO_HOST, MONGO_PORT)
self.connection.drop_database(MONGO_DBNAME)
self.connection.close()
def get(self, url, headers=None, content_type='application/json'):
if headers is None:
headers = []
headers.append(('Content-Type', content_type))
r = self.test_client.get(url, headers=headers)
return self.parse_response(r)
def test_multi_tenancy(self):
kiln = {
'name': '<NAME>',
'share_type': 'any',
'location': {
'type': 'Point',
'coordinates': [ 10.321, 5.123 ]
},
'power': 'electric',
'chamber_size': 100,
'max_temperature': 1000,
'cost_per_fire': 10.50,
'description': 'foo bar'
}
headers1 = deepcopy(self.user1)
r = self.post('auth/kilns', headers=headers1, data=kiln)
self.assertEqual(r[1], 201)
r = self.get('auth/kilns', headers=headers1)
self.assertEqual(r[1], 200)
result = r[0]
self.assertEqual(result['_meta']['total'], 1)
# Now with another user.
headers2 = deepcopy(self.user2)
r = self.get('auth/kilns', headers=headers2)
self.assertEqual(r[1], 200)
result = r[0]
self.assertEqual(result['_meta']['total'], 0)
def test_images(self):
headers1 = deepcopy(self.user1)
kiln = {
'name': '<NAME>',
'share_type': 'any',
'location': {
'type': 'Point',
'coordinates': [ 10.321, 5.123 ]
},
'power': 'electric',
'chamber_size': 100,
'max_temperature': 1000,
'cost_per_fire': 10.50,
'description': 'foo bar'
}
r = self.post('auth/kilns', headers=headers1, data=kiln)
kiln_id = r[0]['_id']
# Post an image.
headers1 = deepcopy(self.user1)
headers1.append(('Content-Type', 'multipart/form-data'))
location = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
file_ = open(os.path.join(location, 'img.jpg'));
file_content = file_.read()
data = {
'file': (BytesIO(file_content), 'img.png'),
'kiln': kiln_id
}
# Use test client directly to avoid json encoding.
r = self.test_client.post('auth/images', data=data, headers=headers1)
self.assertEqual(r.status_code, 201)
def test_conversations_and_messages(self):
headers1 = deepcopy(self.user1)
headers2 = deepcopy(self.user2)
headers3 = [('X-Kiln-Share-Id', 'baz')]
# Check no data exists.
for headers in [headers1, headers2, headers3]:
r = self.get('auth/conversations', headers=headers)
self.assertEqual(r[1], 200)
result = r[0]
self.assertEqual(result['_meta']['total'], 0)
# Create a conversation.
data = {
'participants': ['bar']
}
r = self.post('auth/conversations', data=data, headers=headers1)
self.assertEqual(r[1], 201)
conversation_id = r[0]['_id']
# Both users should see the conversation.
for headers in [headers1, headers2]:
r = self.get('auth/conversations', headers=headers)
self.assertEqual(r[1], 200)
result = r[0]
self.assertEqual(result['_meta']['total'], 1)
# But user 3 should not.
r = self.get('auth/conversations', headers=headers3)
self.assertEqual(r[1], 200)
result = r[0]
self.assertEqual(result['_meta']['total'], 0)
# Now send a message.
data = {
'text': 'hello'
}
url = 'auth/conversations/%s/messages' % conversation_id
r = self.post(url, data=data, headers=deepcopy(self.user1))
self.assertEqual(r[1], 201)
# User 3 shouldn't be able to post to the conversation.
r = self.post(url, data=data, headers=headers3)
self.assertEqual(r[1], 403)
# Both users should see the message when fetching the
# conversation.
for headers in [headers1, headers2]:
url = 'auth/conversations/%s' % conversation_id
r = self.get(url, headers=headers)
self.assertEqual(r[1], 200)
result = r[0]
self.assertEqual(len(result['messages']), 1)
``` |
{
"source": "jima80525/command_line_review",
"score": 3
} |
#### File: jima80525/command_line_review/plactest.py
```python
import plac
def main(
include: ('Show only these comma separated commands', 'option', 'i')=None,
exclude: ('Exclude these comma separated commands', 'option', 'e')=None,
limit: ('Smallest value shown', 'option', 'l', int)=0,
*input: ('log files to process')):
print('limit = {0}'.format(limit))
if include:
print('include passed: {0}'.format(include))
if exclude:
print('exclude passed: {0}'.format(exclude))
for filename in input:
with open(filename) as f:
line = f.readline().strip()
print(line)
if __name__ == '__main__':
plac.call(main)
``` |
{
"source": "jima80525/jima_codes_blog",
"score": 3
} |
#### File: jima_codes_blog/published/embed_bokeh.py
```python
import argparse
from bokeh.plotting import figure
from bokeh.server.server import Server
import socket
import sys
def get_local_ip_addr(external):
if not external:
return "localhost"
# NOTE: this algorithm is not perfect. It will definitely not work if you
# do not have an external internet connection
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80)) # known valid external IP address
ip_addr = s.getsockname()[0]
s.close()
return ip_addr
def get_command_line_args():
""" Read command line args, of course."""
parser = argparse.ArgumentParser()
parser.add_argument("-b", "--blue", action="store_true")
parser.add_argument(
"-e",
"--external",
action="store_true",
help="serve on local ip address instead of localhost",
)
parser.add_argument("-p", "--port", default="5006", help="socket port")
args = parser.parse_args()
args.ip_addr = get_local_ip_addr(args.external)
if args.blue:
args.color = "blue"
else:
args.color = "red"
return args
def graph_it(doc):
global args
p = figure(plot_width=400, plot_height=400, title="My Line Plot")
p.line([1, 2, 3, 4, 5], [6, 7, 2, 4, 5], line_width=2, line_color=args.color)
doc.add_root(p)
def start_server(address, port, url, attempts=100):
while attempts:
attempts -= 1
try:
server = Server(
{url: graph_it},
num_procs=1,
port=port,
address=address,
allow_websocket_origin=[f"{address}:{port}",],
)
server.start()
return server, port
except OSError as ex:
if "Address already in use" in str(ex):
print(f"Port {port} busy")
port += 1
else:
raise ex
raise Exception("Failed to find available port")
if __name__ == "__main__":
args = get_command_line_args()
port = int(args.port)
address = args.ip_addr
url = "/"
try:
server, port = start_server(address, port, url)
except Exception as ex:
print("Failed:", ex)
sys.exit()
try:
print(f"Opening Bokeh application on http://{address}:{port}{url}")
server.io_loop.add_callback(server.show, url)
server.io_loop.start()
except KeyboardInterrupt:
print("\nShutting Down")
``` |
{
"source": "jima80525/pyres",
"score": 3
} |
#### File: pyres/test/test_episode.py
```python
import time
import pytest
import pyres.episode
class TestEpisode(object):
""" test the open functionality """
def test_all_params(self):
""" set all parameters """
assert self
episode = pyres.episode.Episode(
date=time.localtime(),
title='title',
url='url',
podcast='podcast',
size=1234,
state=1,
# one of these two required
base_path='base_path',
file_name='file_name'
)
assert episode
elements = episode.as_list()
assert 'title' in elements
assert 'url' in elements
# podcast is not returned in list
# assert 'podcast' in elements
assert 1234 in elements
assert 1 in elements
assert None not in elements
# base_path is used to create file name element - file_name is ignored
# assert 'base_path' in elements
assert 'file_name' not in elements
def test_no_file_name_params(self):
""" set all parameters """
assert self
with pytest.raises(Exception):
pyres.episode.Episode(
date=time.localtime(),
title='title',
url='url',
podcast='podcast',
size=1234,
state=1,
# one of these two required
# base_path='base_path',
# file_name='file_name'
)
def test_file_name(self):
""" set all parameters """
assert self
episode = pyres.episode.Episode(
date=time.localtime(),
title='title',
url='url',
podcast='podcast',
size=1234,
state=1,
# one of these two required
# base_path='base_path',
file_name='file_name'
)
assert episode
elements = episode.as_list()
assert 'title' in elements
assert 'url' in elements
# podcast is not returned in list
# assert 'podcast' in elements
assert 1234 in elements
assert 1 in elements
assert None not in elements
# file name is copied directly if base_path not specified
# assert 'base_path' in elements
assert 'file_name' in elements
def test_no_state_no_size_set(self):
""" create episode without specifying state or size """
assert self
episode = pyres.episode.Episode(
date=time.localtime(),
title='title',
url='url',
podcast='podcast',
# one of these two required
# base_path='base_path',
file_name='file_name'
)
assert episode
elements = episode.as_list()
assert 'title' in elements
assert 'url' in elements
# podcast is not returned in list
# assert 'podcast' in elements
assert None in elements # no size specified
assert 0 in elements # default state
# file name is copied directly if base_path not specified
# assert 'base_path' in elements
assert 'file_name' in elements
assert episode.state == 0
assert not episode.size
```
#### File: pyres/test/test_metadata.py
```python
import pyres.metadata
class TestMetadata(object):
""" Test for metadata file """
def test_one(self):
""" this is merely here to satisfy my anal-retentive freakishness to
maximize test coverage numbers. """
assert self
assert pyres.metadata.package
def test_two(self):
""" see note above """
assert self
assert pyres.metadata.authors_string
```
#### File: pyres/test/test_utils.py
```python
import pytest
import errno
import pyres.utils
from mock import patch
from mock import Mock
class TestMkdir(object):
""" Test the mkdir_p function"""
@patch('pyres.utils.os.makedirs')
def test_dir_creation(self, mkdir):
""" make sure the os funtion is called with the directory name """
assert self
pyres.utils.mkdir_p("fred")
mkdir.assert_called_once_with("fred")
@patch('pyres.utils.os.makedirs')
@patch('pyres.utils.os.path.isdir')
def test_dir_exists(self, isdir, mkdir):
""" make sure no exception is raised on "directory exists" """
assert self
# mock mkdir to raise OSError exception with errno set accordingly
my_error = OSError()
my_error.errno = errno.EEXIST
mkdir.side_effect = my_error
isdir.return_value = True
pyres.utils.mkdir_p("fred")
mkdir.assert_called_once_with("fred")
@patch('pyres.utils.os.makedirs')
@patch('pyres.utils.os.path.isdir')
def test_file_exists(self, isdir, mkdir):
""" make sure exception is raised on if path specified is a file """
assert self
# mock mkdir to raise OSError exception with errno set accordingly
my_error = OSError()
my_error.errno = errno.EEXIST
mkdir.side_effect = my_error
isdir.return_value = False
pytest.raises(OSError, pyres.utils.mkdir_p, 'fred')
@patch('pyres.utils.os.makedirs')
@patch('pyres.utils.os.path.isdir')
def test_bad_errno(self, isdir, mkdir):
""" make sure exception is raised on if errno is not EEXIST """
assert self
# mock mkdir to raise OSError exception with errno set accordingly
my_error = OSError()
my_error.errno = errno.ENOENT
mkdir.side_effect = my_error
isdir.return_value = True
pytest.raises(OSError, pyres.utils.mkdir_p, 'fred')
class TestCleanName(object):
""" Test the clean_name function. This function removes the following
characters from the input string:
/ : * % ? " < > | '
"""
def test_empty_string(self):
""" Tests that empty string doesn't crash """
assert self
test_string = ""
pyres.utils.clean_name(test_string)
assert test_string == ""
def test_none_rasies(self):
""" Passing in None should raise an exception """
assert self
pytest.raises(AttributeError, pyres.utils.clean_name, None)
def test_starting_chars(self):
""" test invalid chars at start of string """
assert self
base_string = "this is a string"
result = pyres.utils.clean_name("///" + base_string)
assert result == base_string
result = pyres.utils.clean_name(":::" + base_string)
assert result == base_string
result = pyres.utils.clean_name("***" + base_string)
assert result == base_string
result = pyres.utils.clean_name("%%%" + base_string)
assert result == base_string
result = pyres.utils.clean_name("???" + base_string)
assert result == base_string
result = pyres.utils.clean_name('"""' + base_string)
assert result == base_string
result = pyres.utils.clean_name("<<<" + base_string)
assert result == base_string
result = pyres.utils.clean_name(">>>" + base_string)
assert result == base_string
result = pyres.utils.clean_name("|||" + base_string)
assert result == base_string
result = pyres.utils.clean_name("'''" + base_string)
assert result == base_string
result = pyres.utils.clean_name(r"\/:*%?\"<>|'" + r"\/:*%?\"<>|'" +
r"\/:*%?\"<>|'" + base_string)
assert result == base_string
def test_ending_chars(self):
""" test invalid chars at end of string """
assert self
base_string = "this is a string"
result = pyres.utils.clean_name(base_string + "///")
assert result == base_string
result = pyres.utils.clean_name(base_string + ":::")
assert result == base_string
result = pyres.utils.clean_name(base_string + "***")
assert result == base_string
result = pyres.utils.clean_name(base_string + "%%%")
assert result == base_string
result = pyres.utils.clean_name(base_string + "???")
assert result == base_string
result = pyres.utils.clean_name(base_string + '"""')
assert result == base_string
result = pyres.utils.clean_name(base_string + "<<<")
assert result == base_string
result = pyres.utils.clean_name(base_string + ">>>")
assert result == base_string
result = pyres.utils.clean_name(base_string + "|||")
assert result == base_string
result = pyres.utils.clean_name(base_string + "'''")
assert result == base_string
result = pyres.utils.clean_name(base_string + r"\/:*%?\"<>|'" +
r"\/:*%?\"<>|'" + r"\/:*%?\"<>|'")
assert result == base_string
def test_mixed_chars(self):
""" test invalid chars at end of string """
assert self
result = pyres.utils.clean_name(r"t|h'i*s is a \"s?tri>ng<")
assert result == "this is a string"
class TestAcroname(object):
""" Test function which produces a three-letter acronym from the podcast
name. There are several special cases for this function which are tested
below. """
def test_none_raises(self):
""" makes sure that passing in none fails """
assert self
pytest.raises(TypeError, pyres.utils.acroname, None)
def test_one_word_name(self):
""" A one-word name should just take the first three letters of the
word if there are that many. """
assert self
result = pyres.utils.acroname("ABCDE")
assert result == "ABC"
result = pyres.utils.acroname("ABCD")
assert result == "ABC"
result = pyres.utils.acroname("ABC")
assert result == "ABC"
result = pyres.utils.acroname("AB")
assert result == "AB"
result = pyres.utils.acroname("A")
assert result == "A"
result = pyres.utils.acroname("")
assert result == ""
def test_two_work_name(self):
""" A two-word name should take the first two letters of the first word
(if there are that many) and only one letter from the second. """
assert self
result = pyres.utils.acroname("ABCDE ZYX")
assert result == "ABZ"
result = pyres.utils.acroname("ABCD ZYX")
assert result == "ABZ"
result = pyres.utils.acroname("ABC ZYX")
assert result == "ABZ"
result = pyres.utils.acroname("AB ZYX")
assert result == "ABZ"
result = pyres.utils.acroname("A ZYX")
assert result == "AZ"
def test_three_work_name(self):
""" A three-word name should take the first letter of each word. """
assert self
result = pyres.utils.acroname("always buy corn downtown")
assert result == "abc"
result = pyres.utils.acroname("always buy corn")
assert result == "abc"
def test_special_cases(self):
""" There are two specials. One for NPR podcasts - we strip that word
off. The second is for Scientific American podcasts. We trim those,
too. """
assert self
result = pyres.utils.acroname("NPR always buy corn downtown")
assert result == "abc"
result = pyres.utils.acroname("Scientific American Podcast always "
"buy corn downtown")
assert result == "abc"
``` |
{
"source": "jimages/Surakarta-AI-Core",
"score": 4
} |
#### File: jimages/Surakarta-AI-Core/board.py
```python
from status import Chess, GameStatus, Direction
class Action(object):
def __init__(self, x, y, eat_pos=None, direction=None):
self.x = x
self.y = y
if direction is not None:
self.is_move = True
self.direction = direction
self.to_x = None
self.to_y = None
else:
self.is_move = False
self.direction = None
self.to_x = eat_pos[0]
self.to_y = eat_pos[1]
def __str__(self):
if self.direction is not None:
s = 'm %d %d ' % (self.x, self.y)
dir_dict = {
Direction.Up: 'u',
Direction.Down: 'd',
Direction.Left: 'l',
Direction.Right: 'r',
Direction.LeftUp: 'lu',
Direction.LeftDown: 'ld',
Direction.RightUp: 'ru',
Direction.RightDown: 'rd'
}
s += dir_dict[self.direction]
else:
s = 'e %d %d %d %d' % (self.x, self.y, self.to_x, self.to_y)
return s
def __eq__(self, obj):
if obj is None:
return False
else:
return self.x == obj.x and self.y == obj.y \
and self.is_move == obj.is_move \
and self.direction == obj.direction \
and self.to_x == obj.to_x and self.to_y == obj.to_y
class Board(object):
'''
A class of chess board for Surakarta game.
'''
def __init__(self):
self.__board = [([Chess.Null] * 6) for i in range(6)]
self.__status = GameStatus.RedMoving
self.new_game()
def __str__(self):
s = ''
for i in self.__board:
for j in i:
if j == Chess.Null:
s += '- '
elif j == Chess.Red:
s += 'R '
else:
s += 'B '
s += '\n'
return s.rstrip('\n')
def __eq__(self, other):
for i in range(6):
for j in range(6):
if self.__board[i][j] != other.__board[i][j]:
return False
return True
def __check_movable(self, x, y):
'''
ๅฆๆ่ฝๅๅบ่่ดๅทฒ็ปๅจๅฏนๆ็ๆญฅๆฐ๏ผๅๅคๆญไธ่ฝ็งปๅจ
'''
chess = self.get_chess(x, y)
if chess == Chess.Null:
return False
if self.__status in [GameStatus.RedWon, GameStatus.BlackWon]:
return False
if chess == Chess.Black and self.__status == GameStatus.RedMoving:
return False
if chess == Chess.Red and self.__status == GameStatus.BlackMoving:
return False
return True
def __get_eat_pos(self, x, y, direction, chess, arc_count, original_x, original_y):
'''ๅคๆญ(x,y)็ๅญๅฏไปฅๅๅฐ็ไฝ็ฝฎ'''
if chess == Chess.Null:
return None, None
# ๅไธช่งๆฏๅไธๅฐๅญ็
if (x, y) in [(0, 0), (0, 5), (5, 0), (5, 5)]:
return None, None
success, x, y = self.__get_target_pos(x, y, direction)
if not success:
pos_list = [
(1, -1), (2, -1), (2, 6), (1, 6),
(4, -1), (3, -1), (3, 6), (4, 6),
(-1, 1), (-1, 2), (6, 2), (6, 1),
(-1, 4), (-1, 3), (6, 3), (6, 4)
]
x_dir = Direction.Down if y <= 2 else Direction.Up
y_dir = Direction.Right if x <= 2 else Direction.Left
if x == -1:
return self.__get_eat_pos(pos_list[y - 1][0],
pos_list[y - 1][1], x_dir, chess, arc_count + 1, original_x, original_y)
elif x == 6:
return self.__get_eat_pos(pos_list[y + 3][0],
pos_list[y + 3][1], x_dir, chess, arc_count + 1, original_x, original_y)
elif y == -1:
return self.__get_eat_pos(pos_list[x + 7][0],
pos_list[x + 7][1], y_dir, chess, arc_count + 1, original_x, original_y)
else: # y == 6
return self.__get_eat_pos(pos_list[x + 11][0],
pos_list[x + 11][1], y_dir, chess, arc_count + 1, original_x, original_y)
else:
new_chess = self.get_chess(x, y)
# ๆณจๆๆไธไธช็นๆฎๆ
ๅตใ
if new_chess == chess and (x != original_x or y != original_y):
return None, None
elif new_chess == Chess.Null:
return self.__get_eat_pos(x, y, direction, chess, arc_count, original_x, original_y)
else:
return (x, y) if arc_count else (None, None)
def __update_status(self):
'''
Update the status of current game.
'''
red, black = 0, 0
for i in self.__board:
for j in i:
if j == Chess.Red:
red += 1
elif j == Chess.Black:
black += 1
if red == 0:
self.__status = GameStatus.BlackWon
elif black == 0:
self.__status = GameStatus.RedWon
elif self.__status == GameStatus.RedMoving:
self.__status = GameStatus.BlackMoving
elif self.__status == GameStatus.BlackMoving:
self.__status = GameStatus.RedMoving
@staticmethod
def __get_target_pos(x, y, direction):
'''
Get the target position of giving position move along the direction.
'''
if direction & Direction.Up:
y -= 1
elif direction & Direction.Down:
y += 1
if direction & Direction.Left:
x -= 1
elif direction & Direction.Right:
x += 1
success = x in range(6) and y in range(6)
return success, x, y
@property
def status(self):
'''
Return the status of current game.
'''
return self.__status
@property
def won(self):
'''
Return whether the red or black has already won.
'''
return self.__status == GameStatus.RedWon \
or self.__status == GameStatus.BlackWon
@property
def board_size(self):
'''
Return the size of board.
'''
return len(self.__board)
def new_game(self):
'''
Reset the whole board and start a new game.
'''
for i in range(6):
if i < 2:
for j in range(6):
self.__board[i][j] = Chess.Black
elif i < 4:
for j in range(6):
self.__board[i][j] = Chess.Null
else:
for j in range(6):
self.__board[i][j] = Chess.Red
self.__status = GameStatus.RedMoving
def get_chess(self, x, y):
'''
Get the status of specific chess on board.
'''
if x not in range(6) or y not in range(6):
return Chess.Null
return self.__board[y][x]
def can_move(self, x, y, direction):
'''
Check if chess on (x, y) can move with giving direction.
'''
if not self.__check_movable(x, y):
return False
success, x, y = self.__get_target_pos(x, y, direction)
if not success:
return False
if self.get_chess(x, y) != Chess.Null:
return False
return True
def get_can_move(self, x, y):
'''
่ทๅพๆไธไธชๅญๅฏไปฅ็งปๅจ็ๆๆ็ไฝ็ฝฎ
'''
dir_list = []
for i in Direction:
if self.can_move(x, y, i):
dir_list.append(i)
return dir_list
def get_can_eat(self, x, y):
'''่ทๅพๅฏไปฅๅฝๅๅญๅฏไปฅๅ็ๆฃ'''
if not self.__check_movable(x, y):
return []
chess_list = []
chess = self.get_chess(x, y)
left = self.__get_eat_pos(x, y, Direction.Left, chess, 0, x, y)
right = self.__get_eat_pos(x, y, Direction.Right, chess, 0, x, y)
up = self.__get_eat_pos(x, y, Direction.Up, chess, 0, x, y)
down = self.__get_eat_pos(x, y, Direction.Down, chess, 0, x, y)
if left[0] is not None:
chess_list.append(left)
if right[0] is not None:
chess_list.append(right)
if up[0] is not None:
chess_list.append(up)
if down[0] is not None:
chess_list.append(down)
return chess_list
def player_move(self, x, y, direction):
'''
Let chess on (x, y) move along the direction.
'''
if not self.__check_movable(x, y):
return False
success, nx, ny = self.__get_target_pos(x, y, direction)
if not success:
return False
if self.get_chess(nx, ny) != Chess.Null:
return False
self.__board[ny][nx] = self.__board[y][x]
self.__board[y][x] = Chess.Null
self.__update_status()
return True
def player_eat(self, x, y, eat_x, eat_y):
chess_list = self.get_can_eat(x, y)
if (eat_x, eat_y) not in chess_list:
return False
chess = self.get_chess(x, y)
self.__board[eat_y][eat_x] = chess
self.__board[y][x] = Chess.Null
self.__update_status()
return True
def apply_action(self, action):
'''
Apply an action to board.
'''
if action.is_move:
return self.player_move(action.x, action.y, action.direction)
else:
return self.player_eat(action.x, action.y,
action.to_x, action.to_y)
# some test
if __name__ == '__main__':
board = Board()
print('current board')
print(board)
print('current status:', board.status)
``` |
{
"source": "jimages/surakarta-cpp",
"score": 3
} |
#### File: application/view/game_view.py
```python
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
import sip
from application.view.target_button import TargetButton
from application.view.chess_button import ChessButton
INTERVAL = 50
LONG_RADIUS = INTERVAL * 4
SHORT_RADIUS = INTERVAL * 2
CHESS_SIZE = 30
class GameView(QWidget):
def __init__(self, *__args):
super().__init__(*__args)
self.click_callback = None
self.target_click_callback = None
self.chess_move_callback = None
self.game_begin_callback = None
self.change_mode_callback = None
self.gen_callback = None
self.targets = []
self.chess_list = []
self._player = -1
self._is_ai_first_go = False
self._init_view()
self._init_timer()
def _init_view(self):
self._setup_buttons()
self.human_radio = QRadioButton("ไบบไบบ", self)
self.human_radio.setGeometry(INTERVAL * 10, 0, 100, 25)
self.ai_radio = QRadioButton("ไบบๆบ", self)
self.ai_radio.setGeometry(INTERVAL * 10 + 100, 0, 100, 25)
mode_button_group = QButtonGroup(self)
mode_button_group.addButton(self.human_radio, 1)
mode_button_group.addButton(self.ai_radio, 2)
mode_button_group.buttonClicked.connect(self._select_mode_radio)
self.first_human_radio = QRadioButton("ไบบๅ
ๆ", self)
self.first_human_radio.setGeometry(INTERVAL * 10, 35, 100, 25)
self.first_human_radio.hide()
self.first_ai_radio = QRadioButton("ๆบๅ
ๆ", self)
self.first_ai_radio.setGeometry(INTERVAL * 10 + 100, 35, 100, 25)
self.first_ai_radio.hide()
first_button_group = QButtonGroup(self)
first_button_group.addButton(self.first_human_radio, 1)
first_button_group.addButton(self.first_ai_radio, 2)
first_button_group.buttonClicked.connect(self._select_first_radio)
self.begin_button = QPushButton(self)
self.begin_button.setStyleSheet("QPushButton{border-radius: 10; background-color: white; color: black;}"
"QPushButton:hover{background-color: lightgray}")
self.begin_button.setText("ๅผๅง")
self.begin_button.setGeometry(INTERVAL * 10, 70, 200, 25)
self.begin_button.clicked.connect(self._click_begin_button)
self.gen_button = QPushButton(self)
self.gen_button.setStyleSheet("QPushButton{border-radius: 10; background-color: white; color: black;}"
"QPushButton:hover{background-color: lightgray}")
self.gen_button.setText("็ๆๆฃ่ฐฑ")
self.gen_button.setGeometry(INTERVAL * 10, 100, 200, 25)
self.gen_button.clicked.connect(self._click_gen_button)
self.red_time_label = QLabel(self)
self.red_time_label.setText("00:00")
self.red_time_label.setStyleSheet("color: red")
self.red_time_label.setGeometry(INTERVAL * 10, 130, 100, 25)
self.blue_time_label = QLabel(self)
self.blue_time_label.setText("00:00")
self.blue_time_label.setStyleSheet("color: blue")
self.blue_time_label.setGeometry(INTERVAL * 10 + 100, 130, 100, 25)
self.list_widget = QListWidget(self)
self.list_widget.setGeometry(INTERVAL * 10, 160, 200, 300)
def _init_timer(self):
self._red_time = 0
self._blue_time = 0
self._timer = QTimer(self)
self._timer.setInterval(1000)
self._timer.timeout.connect(self._timer_operate)
def show_game_end(self, player):
if player == -1:
message = "็บขๆน่ท่"
else:
message = "่ๆน่ท่"
print(message)
def show_targets(self, frames):
self.remove_all_targets()
for frame in frames:
btn = TargetButton(self)
btn.setup_frame(frame)
btn.clicked.connect(self._click_target_btn)
btn.show()
self.targets.append(btn)
def remove_all_targets(self):
for btn in self.targets:
btn.hide()
sip.delete(btn)
self.targets.clear()
def remove_chess(self, tag):
for btn in self.chess_list:
if btn.tag == tag:
self.chess_list.remove(btn)
btn.hide()
sip.delete(btn)
break
def move_chess(self, chess_tag, to_frame):
self._player = -self._player
for chess in self.chess_list:
if chess_tag == chess.tag:
chess.move(to_frame[1] - CHESS_SIZE / 2, to_frame[0] - CHESS_SIZE / 2)
# ็งปๅจๅฎๆฃๅญ่ฆๅ่ฐไฟฎๆนๆฃ็ๆฐๆฎ
self.chess_move_callback(to_frame)
return
def add_move_info(self, tag: int, f: tuple, t: tuple):
text = "tag {tag}: ({fx}, {fy}) -> ({tx}, {ty})".format(tag=tag,
fx=f[0],
fy=f[1],
tx=t[0],
ty=t[1])
item = QListWidgetItem(text)
self.list_widget.addItem(item)
@pyqtSlot()
def _click_gen_button(self):
self.gen_callback()
@pyqtSlot()
def _click_btn(self):
self.click_callback(self.sender().tag)
@pyqtSlot()
def _click_target_btn(self):
self.target_click_callback(self.sender().x, self.sender().y)
@pyqtSlot()
def _select_mode_radio(self):
if self.sender().checkedId() == 1:
self.first_human_radio.hide()
self.first_ai_radio.hide()
self.change_mode_callback(1)
else:
self.first_human_radio.show()
self.first_ai_radio.show()
self.change_mode_callback(2)
@pyqtSlot()
def _click_begin_button(self):
self._player = 1
self._timer.start()
self.begin_button.setEnabled(False)
self.ai_radio.setEnabled(False)
self.human_radio.setEnabled(False)
self.first_human_radio.setEnabled(False)
self.first_ai_radio.setEnabled(False)
self.game_begin_callback(self._is_ai_first_go)
@pyqtSlot()
def _select_first_radio(self):
if self.sender().checkedId() == 1:
self._is_ai_first_go = False
else:
self._is_ai_first_go = True
@pyqtSlot()
def _timer_operate(self):
if self._player == -1:
self._red_time += 1
else:
self._blue_time += 1
time = self._red_time if self._player == -1 else self._blue_time
m = int(time / 60)
if m < 10:
str_m = "0{m}".format(m=m)
else:
str_m = str(m)
s = time - m * 60
if s < 10:
str_s = "0{s}".format(s=s)
else:
str_s = str(s)
if self._player == -1:
self.red_time_label.setText(str_m + ":" + str_s)
else:
self.blue_time_label.setText(str_m + ":" + str_s)
def _setup_buttons(self):
begin_x = INTERVAL * 2
begin_y = INTERVAL * 2
for i in range(0, 24):
btn = ChessButton(self)
if i < 6:
btn.setup_view(True)
btn.setGeometry(begin_x + INTERVAL * i - CHESS_SIZE / 2,
begin_y - CHESS_SIZE / 2,
CHESS_SIZE,
CHESS_SIZE)
elif i < 12:
btn.setup_view(True)
btn.setGeometry(begin_x + INTERVAL * (i - 6) - CHESS_SIZE / 2,
begin_y + INTERVAL - CHESS_SIZE / 2,
CHESS_SIZE,
CHESS_SIZE)
elif i < 18:
btn.setup_view(False)
btn.setGeometry(begin_x + INTERVAL * (i - 12) - CHESS_SIZE / 2,
begin_y + INTERVAL * 4 - CHESS_SIZE / 2,
CHESS_SIZE,
CHESS_SIZE)
else:
btn.setup_view(False)
btn.setGeometry(begin_x + INTERVAL * (i - 18) - CHESS_SIZE / 2,
begin_y + INTERVAL * 5 - CHESS_SIZE / 2,
CHESS_SIZE,
CHESS_SIZE)
btn.setText(str(i + 1))
btn.tag = i + 1
btn.clicked.connect(self._click_btn)
self.chess_list.append(btn)
def paintEvent(self, QPaintEvent):
painter = QPainter(self)
painter.setPen(QColor(166, 66, 250))
# ๅทฆไธ
painter.drawArc(0, 0, LONG_RADIUS, LONG_RADIUS, 0, 270 * 16)
painter.drawArc(INTERVAL, INTERVAL, SHORT_RADIUS, SHORT_RADIUS, 0, 270 * 16)
# ๅทฆไธ
painter.drawArc(0, INTERVAL * 5, LONG_RADIUS, LONG_RADIUS, 90 * 16, 270 * 16)
painter.drawArc(INTERVAL, INTERVAL * 6, SHORT_RADIUS, SHORT_RADIUS, 90 * 16, 270 * 16)
# ๅณไธ
painter.drawArc(INTERVAL * 5, 0, LONG_RADIUS, LONG_RADIUS, -90 * 16, 270 * 16)
painter.drawArc(INTERVAL * 6, INTERVAL, SHORT_RADIUS, SHORT_RADIUS, -90 * 16, 270 * 16)
# ๅณไธ
painter.drawArc(INTERVAL * 5, INTERVAL * 5, LONG_RADIUS, LONG_RADIUS, -180 * 16, 270 * 16)
painter.drawArc(INTERVAL * 6, INTERVAL * 6, SHORT_RADIUS, SHORT_RADIUS, -180 * 16, 270 * 16)
# ็ซ็บฟ
painter.drawLine(INTERVAL * 2, INTERVAL * 2, INTERVAL * 2, INTERVAL * 7)
painter.drawLine(INTERVAL * 3, INTERVAL * 2, INTERVAL * 3, INTERVAL * 7)
painter.drawLine(INTERVAL * 4, INTERVAL * 2, INTERVAL * 4, INTERVAL * 7)
painter.drawLine(INTERVAL * 5, INTERVAL * 2, INTERVAL * 5, INTERVAL * 7)
painter.drawLine(INTERVAL * 6, INTERVAL * 2, INTERVAL * 6, INTERVAL * 7)
painter.drawLine(INTERVAL * 7, INTERVAL * 2, INTERVAL * 7, INTERVAL * 7)
# ๆจช็บฟ
painter.drawLine(INTERVAL * 2, INTERVAL * 2, INTERVAL * 7, INTERVAL * 2)
painter.drawLine(INTERVAL * 2, INTERVAL * 3, INTERVAL * 7, INTERVAL * 3)
painter.drawLine(INTERVAL * 2, INTERVAL * 4, INTERVAL * 7, INTERVAL * 4)
painter.drawLine(INTERVAL * 2, INTERVAL * 5, INTERVAL * 7, INTERVAL * 5)
painter.drawLine(INTERVAL * 2, INTERVAL * 6, INTERVAL * 7, INTERVAL * 6)
painter.drawLine(INTERVAL * 2, INTERVAL * 7, INTERVAL * 7, INTERVAL * 7)
painter.drawLine(INTERVAL * 2, INTERVAL * 7, INTERVAL * 7, INTERVAL * 7)
```
#### File: surakarta-cpp/surakarta_client/cmd.py
```python
import logging
import socket
from nemesis.core import Core
from surakarta.chess import Chess
from surakarta.game import Game
AI_CAMP = -1
host_name = "192.168.199.156"
port = 8999
class Cmd(object):
def __init__(self, ai_camp: int):
self._game = Game(ai_camp, is_debug=True)
self._game.reset_board()
self._ai_core = Core()
self._ai_core.ai_camp = ai_camp
self.step_num = 0
self.socket = socket.socket()
self.socket.connect((host_name, port))
def start(self):
is_ai_first = False
while True:
msg = self.socket.recv(2048).decode()
if len(msg) == 1:
is_ai_first = True
self._ai_core.is_first = True
self._ai_core.ai_camp = 1
self._game.set_camp(1)
if is_ai_first:
is_ai_first = False
self._ai_go()
continue
chess_list = msg.split(" ")
if len(chess_list) != 4:
print(msg)
print("โ ๏ธ ่พๅ
ฅ้่ฏฏ: ็ผบๅฐ่พๅ
ฅๅๆฐ")
break
for i in range(len(chess_list)):
chess_list[i] = int(chess_list[i])
from_chess = self.find_chess(chess_list[1], chess_list[0])
to_chess = self.find_chess(chess_list[3], chess_list[2])
if from_chess.tag == 0:
logging.error(msg)
print("โ ๏ธ ่พๅ
ฅ้่ฏฏ: ่พๅ
ฅไฝ็ฝฎ้่ฏฏ")
break
info = {"from": from_chess, "to": to_chess}
self._game.do_move(info)
self.step_num += 1
self._ai_go()
self.socket.close()
def find_chess(self, x: int, y: int) -> Chess:
"""
ๆพๅฐๆฃๅญ๏ผ่ฟ้็ๅๆ ไธๅค็ไผ ๅ
ฅ็ๅๆ ๅๅฅฝ็ธๅ
:param x: ็บตๅๆ
:param y: ๆจชๅๆ
:return: ๆฃๅญ
"""
return self._game.chess_board[x][y]
def _get_board_info(self) -> dict:
board_info = self._game.last_board_info
if board_info is None:
board_info = {
"board": self._game.chess_board,
"red_num": 12,
"blue_num": 12
}
board_info.update({"step_num": self.step_num})
return board_info
def _ai_go(self):
self._ai_core.playing(self._get_board_info(), self._ai_move_callback)
def _ai_move_callback(self, info: dict):
self._game.do_move(info)
self.step_num += 1
output = '''{x1} {y1} {x2} {y2}'''.format(x1=str(info["from"].y),
y1=str(info["from"].x),
x2=str(info["to"].y),
y2=str(info["to"].x))
self.socket.send(output.encode())
print(output)
if __name__ == '__main__':
cmd = Cmd(AI_CAMP)
cmd.start()
``` |
{
"source": "jimah/healthysnake",
"score": 3
} |
#### File: healthysnake/alerts/core.py
```python
class Alert:
"""The core alert data model, contains information to be passed to a manager."""
def __init__(self, application, dependency, message, severity, metadata=None, source='localhost'):
"""Create a new Alert.
:param message: string data to be sent to the alert manager
:type message: str
:param severity: healthysnake level indicating severity of alert
:type severity: int
:param metadata: optional dictionary of metadata to send
:type metadata: dict
"""
if metadata is None:
metadata = {}
self.application = application
self.dependency = dependency
self.message = message
self.severity = severity
self.metadata = metadata
self.source = source
```
#### File: healthysnake/alerts/manager.py
```python
from .exceptions import ImproperlyConfiguredError
class AbstractAlerterManager:
"""Base AlertManager class to subclass."""
def alert(self, message):
"""Core alerting mechanism.
:param message:
:type message:
:raises .exceptions.ImproperlyConfiguredError when not implemented by parent class
"""
raise ImproperlyConfiguredError('alert function not implemented')
def on_failure(self, exc):
pass
def on_success(self, data):
pass
```
#### File: alerts/slack/manager.py
```python
import json
import requests
from healthysnake.alerts.manager import AbstractAlerterManager
import healthysnake.levels as levels
class SlackAlertManager(AbstractAlerterManager):
SLACK_COLOR_GOOD = 'good'
SLACK_COLOR_DANGER = 'danger'
SLACK_COLOR_WARNING = 'warning'
SLACK_FIRE_EMOJI = ':fire:'
def __init__(self, webhook):
self.webhook_url = webhook
def alert(self, alert_message):
self._send_to_webhook({
'fallback': 'ALERT {0} failed {1}'.format(alert_message.application, alert_message.dependency),
'color': self.slack_color_from_level(alert_message.severity),
'fields': [
{
'title': alert_message.application,
'short': True,
},
{
'title': 'Severity: {0}'.format(levels.level_as_string(alert_message.severity)),
'value': self.how_many_fires(alert_message.severity),
'short': True,
},
{
'title': alert_message.dependency,
'value': alert_message.message,
},
]
})
@staticmethod
def how_many_fires(severity):
if not severity:
return ''
fires = ''
end = int(severity)
i = 0
while i < end:
fires += SlackAlertManager.SLACK_FIRE_EMOJI + ' '
i += 1
return fires
@staticmethod
def slack_color_from_level(severity):
if severity == levels.SOFT:
return SlackAlertManager.SLACK_COLOR_WARNING
elif severity == levels.HARD:
return SlackAlertManager.SLACK_COLOR_DANGER
return SlackAlertManager.SLACK_COLOR_GOOD
def _send_to_webhook(self, payload):
response = requests.post(
self.webhook_url,
data=json.dumps(payload),
headers={
'Content-Type': 'application/json',
},
)
if response.status_code != 200:
raise ValueError(
'Request to slack returned an error %s, the response is:\n%s'
% (response.status_code, response.text)
)
```
#### File: healthysnake/healthysnake/healthcheck.py
```python
import logging
from time import mktime
from datetime import timedelta
from healthysnake import exceptions, levels
from healthysnake.dependency import Dependency
from healthysnake.alerts.core import Alert
class HealthCheck:
"""
Tracks the state of all dependencies.
"""
def __init__(self, name,
logger=logging.getLogger(__name__),
alert_managers=None,
):
"""
:param name: the name of the service running the health check
:type name: str
:param logger: optional logger, defaults to root logger
:type logger: logging.Logger
"""
self.name = name
self.healthy = True
self.dependencies = {}
self._logger = logger
self._services = {}
if alert_managers is None:
alert_managers = []
self._alert_managers = alert_managers
def __str__(self):
return self.status()
def add_dependency(self, name, check_func,
interval=timedelta(seconds=Dependency.DEFAULT_INTERVAL), level=levels.HARD):
"""
Add a dependency to be tracked within the health check.
:param name: name of the dependency
:type name: str
:param check_func: callback function to be run to check the health of a dependency
:type check_func: callable
:param interval: how often it should be checked
:type interval: datetime.timedelta
:param level: severity level for dependency
:type level: int
"""
if name in self._services:
raise exceptions.DependencyAlreadyPresentException(name + ' already present in health check')
srv = Dependency(name, check_func, interval, level)
self._services[name] = srv
def check_dependency(self, name):
"""
Check that the specified dependency is healthy
:param name: the name of the dependency
:type name: str
:return: result of health check
:rtype: bool
"""
if name not in self._services.keys():
raise exceptions.DependencyNotPresentException(name + ' not present in health check dependencies')
return self._services[name].healthy()
def status(self):
"""
Generate a dictionary representing the current health state of the system.
:return: dictionary representation of system state
:rtype: {}
"""
tracked_dependencies = []
for name, dependency in self._services.items():
dependency_healthy = (False, '')
try:
dependency_healthy = dependency.healthy()
except Exception as e:
self._logger.exception(e)
if not dependency_healthy[0]:
for manager in self._alert_managers:
# TODO name the check that failed
manager.alert(Alert(
application=self.name,
dependency=name,
message=dependency_healthy[1],
severity=dependency.level,
))
tracked_dependencies.append({
'name': name,
'healthy': dependency_healthy[0],
'message': dependency_healthy[1],
'level': dependency.level,
'last_updated': mktime(dependency.last_updated.timetuple()),
'next_update': mktime(dependency.next_update().timetuple()),
})
# golf so hard pythonistas wanna fine me
self.healthy = all(d['healthy'] for d in tracked_dependencies if d['level'] != levels.SOFT)
return {
'name': self.name,
'healthy': self.healthy,
'dependencies': tracked_dependencies,
}
```
#### File: healthysnake/tests/test_checker_storge.py
```python
from healthysnake.checkers.storage import DiskCapacityCheck
class TestDiskCapacityCheck(object):
"""
Tests the DiskCapacityCheck checker
"""
def test_initialization(self):
"""
The checker should use sensible defaults
"""
checker = DiskCapacityCheck('/media')
assert checker.mountpoint == '/media'
assert checker.failure_threshold == 90.0
def test_checker_success(self, mocker):
"""
The checker should return True if the storage has not filled up beyond the specified threshold
"""
disk_usage = mocker.Mock(attributes=['percent'])
disk_usage.percent = 89.0
mocker.patch('psutil.disk_usage', mocker.Mock(return_value=disk_usage))
check = DiskCapacityCheck('/media')
assert check() is True
def test_checker_failure(self, mocker):
"""
The checker should return False if the storage has filled up beyond the specified threshold
"""
disk_usage = mocker.Mock(attributes=['percent'])
disk_usage.percent = 91.0
mocker.patch('psutil.disk_usage', mocker.Mock(return_value=disk_usage))
check = DiskCapacityCheck('/media')
assert check() == (False, 'mount point "/media" reached 91.0% capacity')
``` |
{
"source": "jimako1989/python-lab",
"score": 3
} |
#### File: python-lab/extra_long_factorials/extra_long_factorials.py
```python
import math
import os
import random
import re
import sys
def conv_int(m:str) -> int:
return 0 if m == "" else int(m)
def add(m: str, n: str) -> str:
dim = max(len(m), len(n))
if dim < 10:
return str(conv_int(m) + conv_int(n))
m = m.zfill(dim)
n = n.zfill(dim)
result = [0]*int(dim+1)
for digit, (i, j) in enumerate(zip(m[::-1], n[::-1])):
if result[digit] + int(i) + int(j) < 10:
result[digit] += int(i)+int(j)
else:
result[digit+1] = int((result[digit]+int(i)+int(j)) / 10)
result[digit] = (result[digit]+int(i)+int(j)) % 10
return "".join(map(str, result))[::-1].lstrip("0")
def __add(l: list) -> str:
if len(l) == 0:
return "0"
last = l.pop(0) # the biggest num in l
return add(last, __add(l))
def multiply(m: str, n: str) -> str:
if len(m) < 4 and len(n) < 4:
return str(int(m) * int(n))
result = [[0 for j in range(len(n)+1)] for i in range(len(m)+1)]
for __i, __char_m in enumerate(m):
for __j, __char_n in enumerate(n):
# multiply one digit by one digit
result[__i][__j] = int(__char_m) * int(__char_n)
dim = len(m)+len(n) # dim - 1 equals the num of sum. len(range(dim)) = dim
rl_diag = [""]*(dim-1)
for k in range(1, dim): # m,n=2,3 dim=5, range(1,dim)=[1,2,3,4]
diag_total = 0
for i in range(len(m)):
try:
diag_total += result[i][k-i-1] # k=1, range(k)=[0], k=2, range(k)=[0,1]
except Exception as e:
raise Exception(diag_total, e, k, i, m, n, dim, result)
rl_diag[k-1] = str(diag_total) + "0" * (dim-1-k)
return __add(rl_diag)
# Complete the extraLongFactorials function below.
def extraLongFactorials(n: str) -> str:
if n == "1":
return "1"
return multiply(str(n), extraLongFactorials(str(int(n)-1)))
if __name__ == '__main__':
n = int(input())
print(extraLongFactorials(n))
``` |
{
"source": "jimaldon/AutowareAuto",
"score": 3
} |
#### File: ray_ground_classifier_nodes/launch/vlp16_lexus.launch.py
```python
import launch
import launch_ros.actions
def generate_launch_description():
driver = launch_ros.actions.Node(
package='velodyne_node', node_executable='velodyne_block_node_exe',
arguments=['--node_name=vlp16_front'])
classifier = launch_ros.actions.Node(
package='ray_ground_classifier_nodes', node_executable='ray_ground_classifier_block_node_exe',
arguments=['--node_name=ray_ground_classifier'])
return launch.LaunchDescription([driver, classifier])
``` |
{
"source": "JimAnast/Hangman",
"score": 4
} |
#### File: JimAnast/Hangman/hangman.py
```python
import random
all_words = []
with open(r'words.txt', 'r') as f:
for line in f:
for word in line.split():
all_words.append(word)
def get_word():
word = random.choice(all_words)
return word.lower()
def play(word):
word_to_complete = "_" * len(word)
guessed_letters = []
num_of_guessed_letters = 0
correct_guesses = 0
guessed = False
number_of_tries = 5
print("I am thinking of a word that is", len(word), "letters long! Try to guess this word!")
print("you have", number_of_tries, "guesses left!")
while not guessed and number_of_tries > 0:
guess = input("Please enter a letter: ")
if len(guess) == 1 and guess.isalpha():
if guess in guessed_letters:
print("You already guessed the letter", guess)
elif guess not in word:
print("wrong guess!")
number_of_tries -= 1
guessed_letters.append(guess)
else:
print("good guess!")
guessed_letters.append(guess)
number_of_tries -= 1
correct_guesses += 1
word_as_list = list(word_to_complete)
indices = [i for i, letter in enumerate(word) if letter == guess]
for index in indices:
word_as_list[index] = guess
num_of_guessed_letters += 1
word_to_complete = "".join(word_as_list)
if "_" not in word_to_complete:
guessed = True
else:
print("Not a valid guess. Please enter a letter of the English alphabet.")
print(word_to_complete)
if not guessed and number_of_tries >=1:
print("you have", number_of_tries, "guesses left!")
guess_word = input("Please enter the corresponding word: ").lower()
if guess_word == word:
guessed = True
else:
guessed = False
if guessed:
score = 100 + (num_of_guessed_letters * correct_guesses)
print("You win! Your score is:", score)
else:
score = num_of_guessed_letters * correct_guesses
print("You lost. The word was " + word + ". Your score is:", score)
def main():
word = get_word()
play(word)
if __name__ == "__main__":
main()
``` |
{
"source": "JimAnast/Words-with-given-letters-in-given-positions",
"score": 4
} |
#### File: JimAnast/Words-with-given-letters-in-given-positions/LettersInPositions.py
```python
words = ['class', 'dictionary', 'case', 'course', 'java', 'list', 'program', 'python', 'tuple', 'word']
letter = str(input("Enter the letter that you want to search by: "))
position = int(input("Give the position of that letter: "))
def words_letter_position(struct, letter, position):
new_list = []
for word in struct:
dictio = {}
pos = 0
for let in word:
dictio[pos] = let
pos += 1
if dictio[position] == letter:
new_list.append(word)
return new_list
w = words_letter_position(words, letter, position)
print("result=",w)
``` |
{
"source": "jimangel2001/spotify-downloader",
"score": 3
} |
#### File: spotify-downloader/spotdl/internals.py
```python
import os
import sys
from spotdl import const
log = const.log
try:
from slugify import SLUG_OK, slugify
except ImportError:
log.error('Oops! `unicode-slugify` was not found.')
log.info('Please remove any other slugify library and install `unicode-slugify`')
sys.exit(5)
formats = { 0 : 'track_name',
1 : 'artist',
2 : 'album',
3 : 'album_artist',
4 : 'genre',
5 : 'disc_number',
6 : 'duration',
7 : 'year',
8 : 'original_date',
9 : 'track_number',
10 : 'total_tracks',
11 : 'isrc' }
def input_link(links):
""" Let the user input a choice. """
while True:
try:
log.info('Choose your number:')
the_chosen_one = int(input('> '))
if 1 <= the_chosen_one <= len(links):
return links[the_chosen_one - 1]
elif the_chosen_one == 0:
return None
else:
log.warning('Choose a valid number!')
except ValueError:
log.warning('Choose a valid number!')
def trim_song(text_file):
""" Remove the first song from file. """
with open(text_file, 'r') as file_in:
data = file_in.read().splitlines(True)
with open(text_file, 'w') as file_out:
file_out.writelines(data[1:])
return data[0]
def is_spotify(raw_song):
""" Check if the input song is a Spotify link. """
status = len(raw_song) == 22 and raw_song.replace(" ", "%20") == raw_song
status = status or raw_song.find('spotify') > -1
return status
def is_youtube(raw_song):
""" Check if the input song is a YouTube link. """
status = len(raw_song) == 11 and raw_song.replace(" ", "%20") == raw_song
status = status and not raw_song.lower() == raw_song
status = status or 'youtube.com/watch?v=' in raw_song
return status
def format_string(string_format, tags, slugification=False, force_spaces=False):
""" Generate a string of the format '[artist] - [song]' for the given spotify song. """
format_tags = dict(formats)
format_tags[0] = tags['name']
format_tags[1] = tags['artists'][0]['name']
format_tags[2] = tags['album']['name']
format_tags[3] = tags['artists'][0]['name']
format_tags[4] = tags['genre']
format_tags[5] = tags['disc_number']
format_tags[6] = tags['duration']
format_tags[7] = tags['year']
format_tags[8] = tags['release_date']
format_tags[9] = tags['track_number']
format_tags[10] = tags['total_tracks']
format_tags[11] = tags['external_ids']['isrc']
for tag in format_tags:
if slugification:
format_tags[tag] = sanitize_title(format_tags[tag],
ok='-_()[]{}')
else:
format_tags[tag] = str(format_tags[tag])
for x in formats:
format_tag = '{' + formats[x] + '}'
string_format = string_format.replace(format_tag,
format_tags[x])
if const.args.no_spaces and not force_spaces:
string_format = string_format.replace(' ', '_')
return string_format
def sanitize_title(title, ok='-_()[]{}\/'):
""" Generate filename of the song to be downloaded. """
if const.args.no_spaces:
title = title.replace(' ', '_')
# slugify removes any special characters
title = slugify(title, ok=ok, lower=False, spaces=True)
return title
def filter_path(path):
if not os.path.exists(path):
os.makedirs(path)
for temp in os.listdir(path):
if temp.endswith('.temp'):
os.remove(os.path.join(path, temp))
def videotime_from_seconds(time):
if time < 60:
return str(time)
if time < 3600:
return '{0}:{1:02}'.format(time//60, time % 60)
return '{0}:{1:02}:{2:02}'.format((time//60)//60, (time//60) % 60, time % 60)
def get_sec(time_str):
if ':' in time_str:
splitter = ':'
elif '.' in time_str:
splitter = '.'
else:
raise ValueError("No expected character found in {} to split"
"time values.".format(time_str))
v = time_str.split(splitter, 3)
v.reverse()
sec = 0
if len(v) > 0: # seconds
sec += int(v[0])
if len(v) > 1: # minutes
sec += int(v[1]) * 60
if len(v) > 2: # hours
sec += int(v[2]) * 3600
return sec
def get_splits(url):
if '/' in url:
if url.endswith('/'):
url = url[:-1]
splits = url.split('/')
else:
splits = url.split(':')
return splits
# a hacky way to user's localized music directory
# (thanks @linusg, issue #203)
def get_music_dir():
home = os.path.expanduser('~')
# On Linux, the localized folder names are the actual ones.
# It's a freedesktop standard though.
if sys.platform.startswith('linux'):
for file_item in ('.config/user-dirs.dirs', 'user-dirs.dirs'):
path = os.path.join(home, file_item)
if os.path.isfile(path):
with open(path, 'r') as f:
for line in f:
if line.startswith('XDG_MUSIC_DIR'):
return os.path.expandvars(line.strip().split('=')[1].strip('"'))
# On both Windows and macOS, the localized folder names you see in
# Explorer and Finder are actually in English on the file system.
# So, defaulting to C:\Users\<user>\Music or /Users/<user>/Music
# respectively is sufficient.
# On Linux, default to /home/<user>/Music if the above method failed.
return os.path.join(home, 'Music')
```
#### File: spotify-downloader/test/loader.py
```python
from spotdl import const
from spotdl import handle
from spotdl import spotdl
import pytest
def load_defaults():
const.args = handle.get_arguments(raw_args='', to_group=False, to_merge=False)
const.args.overwrite = 'skip'
const.args.log_level = 10
spotdl.args = const.args
spotdl.log = const.logzero.setup_logger(formatter=const._formatter,
level=const.args.log_level)
```
#### File: spotify-downloader/test/test_with_metadata.py
```python
from spotdl import const
from spotdl import internals
from spotdl import spotify_tools
from spotdl import youtube_tools
from spotdl import convert
from spotdl import metadata
from spotdl import spotdl
import loader
import os
loader.load_defaults()
raw_song = 'http://open.spotify.com/track/0JlS7BXXD07hRmevDnbPDU'
def test_metadata():
expect_number = 23
global meta_tags
meta_tags = spotify_tools.generate_metadata(raw_song)
assert len(meta_tags) == expect_number
class TestFileFormat:
def test_with_spaces(self):
expect_title = '<NAME> - Intro'
title = internals.format_string(const.args.file_format, meta_tags)
assert title == expect_title
def test_without_spaces(self):
expect_title = 'David_Andrรฉ_รstby_-_Intro'
const.args.no_spaces = True
title = internals.format_string(const.args.file_format, meta_tags)
assert title == expect_title
def test_youtube_url():
expect_url = 'http://youtube.com/watch?v=rg1wfcty0BA'
url = youtube_tools.generate_youtube_url(raw_song, meta_tags)
assert url == expect_url
def test_youtube_title():
expect_title = 'Intro - <NAME>'
global content
content = youtube_tools.go_pafy(raw_song, meta_tags)
title = youtube_tools.get_youtube_title(content)
assert title == expect_title
def test_check_track_exists_before_download(tmpdir):
expect_check = False
const.args.folder = str(tmpdir)
# prerequisites for determining filename
songname = internals.format_string(const.args.file_format, meta_tags)
global file_name
file_name = internals.sanitize_title(songname)
check = spotdl.check_exists(file_name, raw_song, meta_tags)
assert check == expect_check
class TestDownload:
def test_m4a(self):
expect_download = True
download = youtube_tools.download_song(file_name + '.m4a', content)
assert download == expect_download
def test_webm(self):
expect_download = True
download = youtube_tools.download_song(file_name + '.webm', content)
assert download == expect_download
class TestFFmpeg():
def test_convert_from_webm_to_mp3(self):
expect_return_code = 0
return_code = convert.song(file_name + '.webm',
file_name + '.mp3',
const.args.folder)
assert return_code == expect_return_code
def test_convert_from_webm_to_m4a(self):
expect_return_code = 0
return_code = convert.song(file_name + '.webm',
file_name + '.m4a',
const.args.folder)
assert return_code == expect_return_code
def test_convert_from_m4a_to_mp3(self):
expect_return_code = 0
return_code = convert.song(file_name + '.m4a',
file_name + '.mp3',
const.args.folder)
assert return_code == expect_return_code
def test_convert_from_m4a_to_webm(self):
expect_return_code = 0
return_code = convert.song(file_name + '.m4a',
file_name + '.webm',
const.args.folder)
assert return_code == expect_return_code
def test_convert_from_m4a_to_flac(self):
expect_return_code = 0
return_code = convert.song(file_name + '.m4a',
file_name + '.flac',
const.args.folder)
assert return_code == expect_return_code
class TestAvconv:
def test_convert_from_m4a_to_mp3(self):
expect_return_code = 0
return_code = convert.song(file_name + '.m4a',
file_name + '.mp3',
const.args.folder,
avconv=True)
assert return_code == expect_return_code
class TestEmbedMetadata:
def test_embed_in_mp3(self):
expect_embed = True
global track_path
track_path = os.path.join(const.args.folder, file_name)
embed = metadata.embed(track_path + '.mp3', meta_tags)
assert embed == expect_embed
def test_embed_in_m4a(self):
expect_embed = True
embed = metadata.embed(track_path + '.m4a', meta_tags)
os.remove(track_path + '.m4a')
assert embed == expect_embed
def test_embed_in_webm(self):
expect_embed = False
embed = metadata.embed(track_path + '.webm', meta_tags)
os.remove(track_path + '.webm')
assert embed == expect_embed
def test_embed_in_flac(self):
expect_embed = True
embed = metadata.embed(track_path + '.flac', meta_tags)
os.remove(track_path + '.flac')
assert embed == expect_embed
def test_check_track_exists_after_download():
expect_check = True
check = spotdl.check_exists(file_name, raw_song, meta_tags)
os.remove(track_path + '.mp3')
assert check == expect_check
``` |
{
"source": "jimanx2/newrelic-plugin-agent",
"score": 2
} |
#### File: newrelic_plugin_agent/plugins/couchdb.py
```python
import logging
from newrelic_plugin_agent.plugins import base
LOGGER = logging.getLogger(__name__)
class CouchDB(base.JSONStatsPlugin):
DEFAULT_PATH = '/_node/_local/_stats'
GUID = 'com.meetme.newrelic_couchdb_agent'
HTTP_METHODS = ['COPY', 'DELETE', 'GET', 'HEAD', 'OPTIONS', 'POST', 'PUT']
STATUS_CODES = [200, 201, 202, 301, 304, 400, 401,
403, 404, 405, 409, 412, 500]
def add_datapoints(self, stats):
"""Add all of the data points for a node
:param dict stats: all of the nodes
"""
LOGGER.debug('Stats: %r', stats)
self.add_database_stats(stats['couchdb'])
self.add_request_methods(stats['couchdb']['httpd_request_methods'])
self.add_request_stats(stats['couchdb'], stats['couchdb']['httpd'])
self.add_response_code_stats(stats['couchdb']['httpd_status_codes'])
def add_database_stats(self, stats):
self.add_gauge_value('Database/Open', 'dbs',
stats['open_databases'].get('count', 0),
stats['open_databases'].get('min', 0),
stats['open_databases'].get('max', 0))
self.add_derive_value('Database/IO/Reads', 'iops',
stats['database_reads'].get('count', 0))
self.add_derive_value('Database/IO/Writes', 'iops',
stats['database_writes'].get('count', 0))
self.add_gauge_value('Files/Open', 'files',
stats['open_os_files'].get('count', 0),
stats['open_os_files'].get('min', 0),
stats['open_os_files'].get('max', 0))
def add_request_stats(self, couchdb, httpd):
self.add_derive_value('Requests/Duration', 'seconds',
couchdb['request_time']['value'].get('median', 0))
self.add_derive_value('Requests/Type/Document', 'requests',
httpd['requests'].get('value', 0))
self.add_derive_value('Requests/Type/Bulk', 'requests',
httpd['bulk_requests'].get('value', 0))
self.add_derive_value('Requests/Type/View', 'requests',
httpd['view_reads'].get('value', 0))
self.add_derive_value('Requests/Type/Temporary View', 'requests',
httpd['temporary_view_reads'].get('value', 0))
def add_request_methods(self, stats):
for method in self.HTTP_METHODS:
self.add_derive_value('Requests/Method/%s' % method, 'requests',
stats[method].get('value', 0))
def add_response_code_stats(self, stats):
for code in self.STATUS_CODES:
self.add_derive_value('Requests/Response/%s' % code, 'requests',
stats[str(code)].get('value', 0))
``` |
{
"source": "jimaples/Moreh",
"score": 3
} |
#### File: hebrew_quiz/code/Moreh.py
```python
from Hebrew import *
from Quizzer import *
h = HebrewReference()
print 'HebrewReference includes', h.__dict__.keys()
cnt = ScaledCounter(0.2, 'gallahad')
q = QuestionSet(zip(h.alephbet, h.a_name),'Hebrew Letter Names')
qz = Quiz(test=0)
qz.append(h.alephbet, h.a_name, 'Hebrew Letter Names',
'Select the name for this character:')
qz.append(h.alephbet, h.a_english,'Hebrew Letter Transliteration',
'Select the English transliteration for this character:')
alternate = [ (i[0],'First '+i[2]) for i in zip(h.a_first, h.alephbet, h.a_name) if i[0] != i[1] ]
alternate += [ (i[0],'Final '+i[2]) for i in zip(h.a_final, h.alephbet, h.a_name) if i[0] != i[1] ]
alternate, names = zip(*alternate)
qz.append(alternate, names, 'Hebrew Alternate Letter Names',
'Select the name for this character:')
qz.append([ u'{1:13s}:{0:>4s}'.format(*i) for i in zip(h.vowels, h.v_names) ],
[ '{0:s} as in {1:s}'.format(*i) for i in zip(h.v_english, h.v_word) ],
'Hebrew Vowel Pronunciation', 'How do you pronounce this vowel?')
# Vowels
# Lesson 5 Excercises
######################################################################
# Display Functions
######################################################################
def dispNextQuestion(q):
"""dispNextQuestion(q)
Command-line interface to display a question and check the answer
q.name : Question category
q.prompt : Question prompt
q.q : Question
q.options : Shuffled answer options
q.correct : Index of correct answer
Output the question index
"""
print '\n'*100
print q.name+'\n'+'-'*60+'\n'+q.prompt+'\n '+q.q
for i, a in enumerate(q.options):
print str(i+1)+')',a
guess = raw_input('\nEnter the number for the answer: ')
return str(int(guess)-1)
def dispFeedback(q, correct, progress):
if correct:
print ' Correct!'
else:
print ' The correct answer is', q.correct
##dispNextQuestion(*qz.question())
def test():
while True:
guess = dispNextQuestion(qz.question())
if guess.isdigit:
dispFeedback(*qz.answer(int(guess), verbose=True))
i = raw_input('Press Enter to continue...')
```
#### File: Moreh/hebrew_quiz/models.py
```python
from django.db import models
#from google.appengine.ext import db
def update_model(model, d={}):
# add model parameters based on a dictionary
# updating models is an inline change
for k, v in values.iteritems():
setattr(model, k, v)
class Question(models.Model):
#q, a_opt, a_shift = Quiz.question()
#s_question = db.StringProperty(required=True)
#sl_answers = db.StringListProperty(required=True)
#i_rotate = db.IntegerProperty(required=True)
s_question = models.CharField(max_length=100)
sl_answers = models.CharField(max_length=100)
i_rotate = models.PositiveSmallIntegerField()
def __unicode__(self):
return self.s_question
``` |
{
"source": "jimas95/advanced_mechatronics",
"score": 3
} |
#### File: HW2/HW2-DSP/my_functions.py
```python
import csv
import matplotlib.pyplot as plt # for plotting
import numpy as np
def fft(t, data, dt):
Fs = 1/(t[1]-t[0]) # sample rate
Ts = 1.0/Fs; # sampling interval
ts = np.arange(0,t[-1],Ts) # time vector
y = data # the data to make the fft from
n = len(y) # length of the signal
k = np.arange(n)
T = n/Fs
frq = k/T # two sides frequency range
frq = frq[range(int(n/2))] # one side frequency range
Y = np.fft.fft(y)/n # fft computing and normalization
Y = Y[range(int(n/2))]
return frq,abs(Y)
def subplot(time,data,dataF,freq,fft_signal,freqF,fft_signalF,title):
fig, (ax1, ax2) = plt.subplots(2, 1)
ax1.set_title(title)
ax1.plot(time,data,'black')
ax1.plot(time,dataF,'r')
ax1.set_xlabel('Time')
ax1.set_ylabel('Amplitude')
ax2.loglog(freq,fft_signal,'black') # plotting the fft
ax2.loglog(freqF,fft_signalF,'r') # plotting the fft filtered
ax2.set_xlabel('Freq (Hz)')
ax2.set_ylabel('|Y(freq)|')
plt.show()
def info(time,data):
lengthT = len(time)
lengthD = len(data)
if(lengthT!=lengthD):
print("there is an error in data size")
print("Total number of values --> ",lengthD)
print("Start Time --> ",time[0])
print("Total Time --> ",time[-1])
print("Time dt --> ",time[1]-time[0])
print("sample fre --> ",1/(time[1]-time[0]))
print("Time fre --> ",len(time)/time[-1])
def get_data(file_name):
t = [] # column 0
data = [] # column 1
print("Reading Data -->",file_name)
with open(file_name) as f:
# open the csv file
reader = csv.reader(f)
for row in reader:
t.append(float(row[0])) # leftmost column
data.append(float(row[1])) # second column
print("Getting Data --> Completed")
info(t,data)
return t,data
def plot_data(time,data):
plt.plot(time,data,'b-*')
plt.xlabel('Time [s]')
plt.ylabel('Signal')
plt.title('Signal vs Time')
plt.show()
def plot_data2(time,data1,data2):
plt.plot(time,data1,'b-*')
plt.plot(time,data2,'r-*')
plt.xlabel('Time [s]')
plt.ylabel('Signal')
plt.title('Signal vs Time')
plt.show()
``` |
{
"source": "jimas95/construction-plant",
"score": 2
} |
#### File: src/arm/candle.py
```python
import random
import re
import rospy
from tf.transformations import quaternion_from_euler
from math import pi,cos,sin
from visualization_msgs.msg import Marker,MarkerArray
from geometry_msgs.msg import Vector3,Pose,Point, Quaternion
from std_msgs.msg import ColorRGBA
from arm.srv import get_eef_goal,get_eef_goalResponse,get_eef_goalRequest
class CANDLE():
def __init__(self,status):
self.candle_size = Vector3(x=0.035,y=0.035,z=0.01) # x,y diameter z height
self.arrow_size = Vector3(x=0.025,y=0.01,z=0.01) # x length y,z size
self.colorBlue = ColorRGBA(r=0.0,g=0.0,b=1.0,a=1.0) # Blue
self.colorRed = ColorRGBA(r=1.0,g=0.0,b=0.0,a=1.0) # Red
self.colorYellow = ColorRGBA(r=1.0,g=1.0,b=0.0,a=1.0) # Yellow
self.colorGreen = ColorRGBA(r=0.0,g=1.0,b=0.0,a=1.0) # Green
self.colorPurple = ColorRGBA(r=1.0,g=0.0,b=1.0,a=1.0) # Green
self.status = status
self.polar = {"r":0.22,"theta":0.0, 'zeta':0.0}
self.offset_grasp = [-0.05, 0.028,pi/2]
self.offset_pregrasp = [-0.10, 0.05 ,pi/2]
topic = 'candle_marker_'+str(status)
self.publisher = rospy.Publisher(topic, MarkerArray,queue_size=10)
self.markerArray = MarkerArray()
self.markerArray.markers.append(Marker())
self.markerArray.markers.append(Marker())
self.markerArray.markers.append(Marker())
self.markerType = {"CYLINDER": 3, "ARROW":0}
rospy.Service('get_eef_goal_'+str(status), get_eef_goal, self.srvf_get_pose)
self.colorCandle = self.colorYellow
if (status=="place"): self.colorCandle = self.colorPurple
def new_candle(self):
rospy.logdebug("CANDLE--> creating new Candle position")
self.random_pose()
self.publish_visualize()
def update_param(self):
polar = rospy.get_param(self.status+"/polar_pos")
self.polar = {"r":polar[0],"theta":polar[1],'zeta':polar[2]}
self.offset_grasp = rospy.get_param(self.status+"/offset_grasp")
self.offset_pregrasp = rospy.get_param(self.status+"/offset_pregrasp")
def random_pose(self):
# self.polar['r'] = 0.1+random.random()*0.1
self.polar['theta'] = pi/2*(random.random()-1.0)
def get_candle_point(self,offset):
x = (self.polar['r']+offset[0])*cos(self.polar['theta'])
y = (self.polar['r']+offset[0])*sin(self.polar['theta'])
z = self.candle_size.z/2.0 + self.polar['zeta'] + offset[1]
point = Point(x=x,y=y,z=z)
return point
def add_marker(self,type,size,color,pose,id):
marker = Marker()
marker.header.frame_id = "world"
marker.type = type
marker.action = marker.ADD
marker.scale = size
marker.color = color
marker.pose = pose
marker.id = id
self.markerArray.markers[id] = marker
def publish_visualize(self):
# update values from parameter server
self.update_param()
# add candle marker
self.add_marker( type=self.markerType['CYLINDER'],
size=self.candle_size,
color=self.colorCandle,
pose=self.get_candle_pose(),
id=0)
# add grasp pose arrow
self.add_marker( type=self.markerType['ARROW'],
size=self.arrow_size,
color=self.colorBlue,
pose=self.get_grasp_pose(),
id=1)
# add pre grasp pose arrow
self.add_marker( type=self.markerType['ARROW'],
size=self.arrow_size,
color=self.colorRed,
pose=self.get_pregrasp_pose(),
id=2)
# Publish the MarkerArray
self.publisher.publish(self.markerArray)
def get_candle_pose(self):
return self.get_pose(offset=[0,0],euler=[0,pi/2,self.polar['theta']+pi/2])
def get_grasp_pose(self):
direction = [0, self.offset_grasp[2], self.polar['theta']]
return self.get_pose(offset=self.offset_grasp,euler=direction)
def get_pregrasp_pose(self):
direction = [0, self.offset_pregrasp[2], self.polar['theta']]
return self.get_pose(offset=self.offset_pregrasp,euler=direction)
def get_pose(self,offset,euler):
pos = self.get_candle_point(offset)
quad = quaternion_from_euler(euler[0],euler[1],euler[2])
ori = Quaternion(x=quad[0],y=quad[1],z=quad[2],w=quad[3])
pose = Pose(position = pos, orientation=ori)
return pose
"""Service get eef goal
Empty request
returns the geometry pose of grasp and pregrasp pos
"""
def srvf_get_pose(self,get_eef_goalRequest):
msg = get_eef_goalResponse()
msg.grasp = self.get_grasp_pose()
msg.pregrasp = self.get_pregrasp_pose()
return msg
```
#### File: brain/nodes/ramp.py
```python
import rospy
from rospy.core import NullHandler, logerr
from std_srvs.srv import SetBool
from std_srvs.srv import Empty
from brain.srv import myString,myStringRequest,myStringResponse
import actionlib
import builder.msg
import time
from math import pi
import copy
"""
THIS NODE WILL DRAW/PRINT A 2 LINE RAMP
This is a node controling the path planer
Set coolling or printing mode, and where to go print and when to go for refill
"""
plan_the_line = [
# ("IDLE","NULL"),
("GOTO",["prep_1" ,True]),
("GOTO",["cent_1" ,False]),
("HEAT",True),
("GOTO",["line_1" ,True]),
("INF",False),
("HEAT",False),
("GOTO",["line_1" ,True]),
("INF",False),
("GOTO",["prep_2",True]),
("GOTO",["PREFILL",False]),
("IDLE","NULL"),
("INF",False),
("END","NULL")
]
plan_refill = [
# ("IDLE","NULL"),
("GOTO" ,["REFILL" ,False]),
("FILL","NULL"),
("GOTO" ,["PREFILL" ,True]),
("SLEEP_ARM","NULL"),
("IDLE","NULL"),
("INF",False),
("END","NULL")
]
plan_IDLE = [
("IDLE","NULL"),
("INF",False),
("END","NULL")
]
# WIDTH OF WHEELS --> 0.16
# LENGTH OFFSET OF FUNS --> 0.35
MARGIN_MAX = 0.22
MARGIN_MIN = 0.05
GOTO_POS = {
# refill points
"REFILL": builder.msg.PathPlanInfoGoal(centerX = 0.402, centerY = -0.02 , reverse=True ,range = 0.0 , init_time = 1 , step_size = 1.0, mode = "POINT", direction = 0 , printMD = 0),
"PREFILL": builder.msg.PathPlanInfoGoal(centerX = 0.5 , centerY = -0.02 , reverse=True ,range = 0.0 , init_time = 1 , step_size = 1.0, mode = "POINT", direction = 0 , printMD = 0),
# line one
# "line_1": builder.msg.PathPlanInfoGoal(centerX = 1.44 , centerY = 0.18 , reverse=True ,range = 0.1 , init_time = 0 , step_size = 20 , mode = "LINE", direction = 0 , printMD = 0),
"cent_1": builder.msg.PathPlanInfoGoal(centerX = 1.4 , centerY = 0.15 , reverse=True ,range = 0.0 , init_time = 1 , step_size = 1.0 , mode = "POINT", direction = pi , printMD = 0),
"prep_1": builder.msg.PathPlanInfoGoal(centerX = 0.75 , centerY = 0.15 , reverse=True ,range = 0.0 , init_time = 1 , step_size = 1.0 , mode = "POINT", direction = 0 , printMD = 0),
"prep_2": builder.msg.PathPlanInfoGoal(centerX = 0.75 , centerY = 0.15 , reverse=True ,range = 0.0 , init_time = 1 , step_size = 1.0 , mode = "POINT", direction = pi , printMD = 0),
"line_1" : builder.msg.PathPlanInfoGoal(centerX = 1.45 , centerY = 0.13 , reverse=True ,range = 0.2 , init_time = 0 , step_size = 20 , mode = "LINE", direction = 0 , printMD = 0),
"line_w11": builder.msg.PathPlanInfoGoal(centerX = 1.41 , centerY = 0.16 , reverse=True ,range = 0.12 , init_time = 0 , step_size = 20 , mode = "LINE", direction = 0 , printMD = 0),
"line_w12": builder.msg.PathPlanInfoGoal(centerX = 1.41 , centerY = 0.185 , reverse=True ,range = 0.12 , init_time = 0 , step_size = 20 , mode = "LINE", direction = 0 , printMD = 0),
"line_w21": builder.msg.PathPlanInfoGoal(centerX = 1.41 , centerY = 0.075 , reverse=True ,range = 0.12 , init_time = 0 , step_size = 20 , mode = "LINE", direction = 0 , printMD = 0),
"line_w22": builder.msg.PathPlanInfoGoal(centerX = 1.41 , centerY = 0.045 , reverse=True ,range = 0.12 , init_time = 0 , step_size = 20 , mode = "LINE", direction = 0 , printMD = 0),
}
class BRAIN():
""" INIT """
def __init__(self):
rospy.init_node('brain',anonymous=False,log_level=rospy.INFO)
rospy.on_shutdown(self.shutdown)
rospy.loginfo("Initiating RAMP NODE")
rospy.Service('brain_mode', myString, self.srv_set_mode)
self.use_real = rospy.get_param("/use_real")
self.debug_mode = rospy.get_param("/debug_mode",default=False)
self.current_state = "START"
self.command = False
self.infinity_mode = True
self.new_plan = False
self.id = 0
self.current_plan = plan_the_line
self.new_name = "None"
self.prosimo = 1
# self.prosimo = False
self.goto_name = "line_w1"
self.STATES = {
"START":self.start,
"IDLE":self.idle,
"HEAT":self.heat,
"GOTO":self.goto,
"FILL":self.refill,
"SLEEP_ARM":self.arm_sleep,
"INF" :self.infinity,
"END":rospy.signal_shutdown
}
# Creates the SimpleActionClient and wait for server to come up
self.planner_action = actionlib.SimpleActionClient('action_planner', builder.msg.PathPlanInfoAction)
self.planner_action.wait_for_server()
def start(self,null):
rospy.loginfo("INITILIZING 3D PRINTING A RAMP")
self.heatMODE = False
self.heat(self.heatMODE)
def idle(self,null):
time.sleep(0.05)
"""
CALL PATH PLAN ACTION
input[0] --> where to go, pull msg from list
input[1] --> if reverse mode is on (boolean)
"""
def goto(self,input):
self.update_ramp() # switch line Y position
gowhere = input[0]
plan = copy.deepcopy(GOTO_POS[gowhere])
plan.reverse = input[1]
if(not input[1]):
plan.direction = pi + plan.direction
plan.printMD = self.heatMODE
rospy.loginfo(f"STATE --> GOTO --> {gowhere}")
rospy.loginfo(f"STATE --> GOTO --> START ACTION")
self.planner_action.send_goal(plan) # Sends the goal to the action server.
self.planner_action.wait_for_result()
def refill(self,null):
rospy.loginfo("STATE --> CALLING REFILL SERVICE")
if(not self.debug_mode):
self.refillBuilder()
def infinity(self,null):
if(self.infinity_mode):
rospy.loginfo(f"REPEAT STATE {self.current_plan[self.id-1][0]} ")
self.id = self.id - 2
self.infinity_mode = True
def update_ramp(self):
if(GOTO_POS["line_1"].centerY>MARGIN_MAX):
self.prosimo = -1
if(GOTO_POS["line_1"].centerY<MARGIN_MIN):
self.prosimo = 1
GOTO_POS["line_1"].centerY = GOTO_POS["line_1"].centerY + self.prosimo*0.03
plan_the_line[3] = ("GOTO",["line_1",True])
def execute_state(self):
rospy.loginfo(f"STATE --> {self.current_state} --> START")
self.STATES[self.current_state](self.command)
rospy.loginfo(f"STATE --> {self.current_state} --> DONE")
rospy.loginfo("-----------------")
rospy.loginfo("")
def next_state(self):
self.id = self.id + 1
if(len(self.current_plan)>self.id):
(self.current_state,self.command) = self.current_plan[self.id]
else:
rospy.logwarn("something is wrong, we run out of stages")
"""
Shut down node
"""
def shutdown(self):
rospy.logerr("BRAIN --> SHUT DOWN")
############################ SERVICES ############################
"""
control service for setting modes
"""
def srv_set_mode(self,input):
plan = input.mode
msg_response = myStringResponse()
self.new_plan = True
self.new_name = plan
if(plan=="ll"):
self.goto_name = "line_1"
msg_response.msg = "Set Plan --> plan line " + self.goto_name
elif(plan=="l1"):
self.goto_name = "line_w1"
msg_response.msg = "Set Plan --> plan line " + self.goto_name
self.new_name = "ll"
elif(plan=="l2"):
self.goto_name = "line_w2"
msg_response.msg = "Set Plan --> plan line " + self.goto_name
self.new_name = "ll"
elif(plan=="refill"):
msg_response.msg = "Set Plan --> plan refill"
elif(plan=="next"):
self.infinity_mode = False
msg_response.msg = "Moving to next scenario"
else:
msg_response.msg = "wrong input"
return msg_response
def update_plan(self):
if(self.new_name=="ll"):
self.current_plan = plan_the_line
self.id = 0
elif(self.new_name=="refill"):
self.current_plan = plan_refill
self.id = 0
elif(self.new_name=="next"):
self.infinity_mode = False
else:
rospy.logerr("wrong input")
return
(self.current_state,self.command) = self.current_plan[self.id]
"""
call service to set arm on sleep position
"""
def arm_sleep(self,null):
rospy.loginfo("STATE --> SET ARM TO SLEEP")
if(self.debug_mode):
return
# call service
rospy.wait_for_service("/px100/go_to_sleep")
try:
call_srv = rospy.ServiceProxy("/px100/go_to_sleep", Empty)
resp1 = call_srv()
rospy.loginfo("BRAIN --> SET PX100 TO SLEEP ")
except rospy.ServiceException as e:
rospy.logerr("BRAIN --> Service call failed: %s"%e)
"""
call refilling of candles, ACTIVATE ARM
"""
def refillBuilder(self):
rospy.wait_for_service("/px100/refill")
try:
call_srv = rospy.ServiceProxy("/px100/refill", Empty)
resp1 = call_srv()
rospy.loginfo("BRAIN --> ACTIVATE ARM ")
rospy.loginfo("BRAIN --> REFILLING BUILDER WITH CANDLES ")
except rospy.ServiceException as e:
rospy.logerr("BRAIN --> Service call failed: %s"%e)
"""
ACTIVATE/DEACTIVATE HEATING PLATE
"""
def heat(self,mode):
if(mode):
rospy.loginfo("STATE --> SET HEATING PLATE ON")
else:
rospy.loginfo("STATE --> SET HEATING PLATE OFF")
self.heatMODE = mode
# call service
if(not self.use_real): return
rospy.wait_for_service("/heating_node/heatingMode")
try:
call_srv = rospy.ServiceProxy("/heating_node/heatingMode", SetBool)
resp1 = call_srv(mode)
except rospy.ServiceException as e:
rospy.logerr("BRAIN --> Service call failed: %s"%e)
""" INIT smiley face """
def start():
brain_node = BRAIN()
rate = rospy.Rate(2) # publish freacuancy
rospy.loginfo("okei lets do it")
# main loop
while not rospy.is_shutdown():
if(brain_node.new_plan):
brain_node.new_plan = False
brain_node.update_plan()
brain_node.execute_state()
brain_node.next_state()
if(brain_node.current_state=="END"):
rospy.logerr("END OF PLAN... ")
brain_node.current_plan = plan_IDLE
brain_node.id = 0
rate.sleep()
""" MAIN """
if __name__ == '__main__':
try:
start()
except rospy.ROSInterruptException:
rospy.logerr("BRAIN --> I THINK NODE DIED...?")
```
#### File: brain/nodes/smiley_face.py
```python
import rospy
from rospy.core import NullHandler, logerr
from std_srvs.srv import SetBool
from std_srvs.srv import Empty
import actionlib
import builder.msg
import time
from math import pi
import copy
"""
THIS NODE WILL DRAW/PRINT A SMILEY FACE
This is a node controling the path planer
Set coolling or printing mode, and where to go print and when to go for refill
"""
FLOW = [
# start
("IDLE" ,"NULL"),
("IDLE" ,"NULL"),
("GOTO" ,["PREPRINT",True]),
# back and forth and refill
("GOTO" ,["PREFILL" ,False]),
("GOTO" ,["REFILL" ,False]),
("FILL","NULL"),
("GOTO",["PREPRINT",True]),
# print both eyes
("GOTO",["EYE_1",True]),
("HEAT",True),
("IDLE","NULL"),
("HEAT",False),
("IDLE","NULL"),
("GOTO",["EYE_2",False]),
("HEAT",True),
("IDLE","NULL"),
("HEAT",False),
("IDLE","NULL"),
("GOTO",["PREPRINT",False]),
("IDLE","NULL"),
# print mouth
("GOTO" ,["PREFILL" ,False]),
("GOTO" ,["REFILL" ,False]),
("FILL","NULL"),
("GOTO",["PREPRINT",True]),
("GOTO",["CENMOUTH" ,True]),
("HEAT",True),
("GOTO",["MOUTH" ,True]),
("INF",False),
("HEAT",False),
("GOTO",["MOUTH" ,True]),
("INF",False),
("IDLE","NULL"),
("GOTO",["PREPRINT",False]),
# print head
("GOTO" ,["PREFILL" ,False]),
("GOTO" ,["REFILL" ,False]),
("FILL","NULL"),
("FILL","NULL"),
("GOTO",["PREPRINT",True]),
("GOTO",["SEMICIR" ,True]),
("HEAT",True),
("GOTO",["CIRCLE" ,True]),
("INF",True),
("HEAT",False),
("GOTO",["CIRCLE" ,True]),
("INF",False),
("GOTO",["PREPRINT",False]),
("END","NULL")]
GOTO_POS = {
"REFILL": builder.msg.PathPlanInfoGoal(centerX = 0.402, centerY = -0.02 , reverse=True ,range = 0.0 , init_time = 1 , step_size = 1.0, mode = "POINT", direction = 0 , printMD = 0),
"PREFILL": builder.msg.PathPlanInfoGoal(centerX = 0.5 , centerY = -0.02 , reverse=True ,range = 0.0 , init_time = 1 , step_size = 1.0, mode = "POINT", direction = 0 , printMD = 0),
"PREPRINT": builder.msg.PathPlanInfoGoal(centerX = 0.6 , centerY = 0.15 , reverse=True ,range = 0.0 , init_time = 1 , step_size = 1.0, mode = "POINT", direction = 0 , printMD = 0),
"CIRCLE": builder.msg.PathPlanInfoGoal(centerX = 1.45 , centerY = 0.3 , reverse=True ,range = 0.35 , init_time = 0 , step_size = 40 , mode = "CIRCLE", direction = 0 , printMD = 0),
"SEMICIR": builder.msg.PathPlanInfoGoal(centerX = 1.45 , centerY = 0.3 , reverse=True ,range = 0.35 , init_time = pi , step_size = 30 , mode = "CIRCLE", direction = 0 , printMD = 0),
"EYE_1": builder.msg.PathPlanInfoGoal(centerX = 1.6 , centerY = 0.15 , reverse=True ,range = 0.0 , init_time = 1 , step_size = 1.0, mode = "POINT", direction = 0 , printMD = 0),
"EYE_2": builder.msg.PathPlanInfoGoal(centerX = 1.4 , centerY = 0.15 , reverse=True ,range = 0.0 , init_time = 1 , step_size = 1.0, mode = "POINT", direction = 0 , printMD = 0),
"MOUTH": builder.msg.PathPlanInfoGoal(centerX = 1.5 , centerY = 0.3 , reverse=True ,range = 0.15 , init_time = 0 , step_size = 20 , mode = "LINE", direction = 0 , printMD = 0),
"CENMOUTH": builder.msg.PathPlanInfoGoal(centerX = 1.5 , centerY = 0.3 , reverse=True ,range = 0.0 , init_time = 1 , step_size = 1.0 , mode = "POINT", direction = 0 , printMD = 0)
}
class BRAIN():
""" INIT """
def __init__(self):
rospy.init_node('brain',anonymous=False,log_level=rospy.INFO)
rospy.on_shutdown(self.shutdown)
rospy.loginfo("Initiating smiley face NODE")
self.use_real = rospy.get_param("/use_real")
self.debug_mode = rospy.get_param("/debug_mode",default=False)
self.current_state = "START"
self.command = False
self.id = 0
self.STATES = {
"START":self.start,
"IDLE":self.idle,
"HEAT":self.heat,
"GOTO":self.goto,
"FILL":self.refill,
"SLEEP_ARM":self.set_sleep_arm,
"INF" :self.repeat,
"END":rospy.signal_shutdown
}
# Creates the SimpleActionClient and wait for server to come up
self.planner_action = actionlib.SimpleActionClient('action_planner', builder.msg.PathPlanInfoAction)
self.planner_action.wait_for_server()
def start(self,null):
rospy.loginfo("INITILIZING 3D PRINTING A SMILEY FACE")
self.heatMODE = False
self.heater_mode(False)
def idle(self,ask):
response = 'n'
if(not ask):
time.sleep(2)
response = 'y'
if(self.debug_mode):
response = 'y'
while(response!='y'):
response = input("STOP IDLE MODE ? ")
def set_sleep_arm(self,null):
rospy.loginfo("STATE --> SET ARM TO SLEEP")
if(not self.debug_mode):
self.arm_sleep()
def heat(self,mode):
if(mode):
rospy.loginfo("STATE --> SET HEATING PLATE ON")
self.heater_mode(mode)
self.heatMODE = mode
else:
rospy.loginfo("STATE --> SET HEATING PLATE OFF")
self.heater_mode(mode)
self.heatMODE = mode
def goto(self,input):
gowhere = input[0]
plan = copy.deepcopy(GOTO_POS[gowhere])
plan.reverse = input[1]
if(not input[1]):
plan.direction = pi + plan.direction
plan.printMD = self.heatMODE
rospy.loginfo(f"STATE --> GOTO --> {gowhere}")
rospy.loginfo(f"STATE --> GOTO --> START ACTION")
self.planner_action.send_goal(plan) # Sends the goal to the action server.
self.planner_action.wait_for_result()
def refill(self,null):
rospy.loginfo("STATE --> CALLING REFILL SERVICE")
if(not self.debug_mode):
self.refillBuilder()
def repeat(self,ask = True):
if(ask):
rospy.loginfo(f"DO YOU WANT TO REPEAT STATE {FLOW[self.id-1][0]} ? ")
response = input("yes or no ? ")
else:
response = 'n'
if(response =='y' or response =='yes' or response==""):
rospy.loginfo(f"REPEAT STATE {FLOW[self.id-1][0]} ")
self.id = self.id - 2
return
if(response =='n'):
return
rospy.loginfo("wrong input repeat --> ")
self.repeat(ask)
def done(self):
if(len(FLOW)<=self.id):
return True
else:
return False
def execute_state(self):
rospy.loginfo(f"STATE --> {self.current_state} --> START")
self.STATES[self.current_state](self.command)
rospy.loginfo(f"STATE --> {self.current_state} --> DONE")
rospy.loginfo("-----------------")
rospy.loginfo("")
def print_next_state(self):
if(len(FLOW)>self.id + 1):
rospy.loginfo(f"NEXT STATE IS --> {FLOW[self.id + 1]}")
else:
rospy.logwarn("something is wrong, we run out of stages")
def next_state(self):
self.id = self.id + 1
if(len(FLOW)>self.id):
(self.current_state,self.command) = FLOW[self.id]
else:
rospy.logwarn("something is wrong, we run out of stages")
def shutdown(self):
rospy.logerr("BRAIN --> SHUT DOWN")
############################ SERVICES ############################
"""
set planner on/off
"""
def stopPlan(self,mode):
rospy.wait_for_service('/path_plan/stop')
try:
call_srv = rospy.ServiceProxy('/path_plan/stop', SetBool)
resp1 = call_srv(mode)
rospy.loginfo("BRAIN --> request planer to stop " + resp1.message)
except rospy.ServiceException as e:
rospy.logerr("BRAIN --> Service call failed: %s"%e)
"""
call service to set arm on sleep position
"""
def arm_sleep(self):
rospy.wait_for_service("/px100/go_to_sleep")
try:
call_srv = rospy.ServiceProxy("/px100/go_to_sleep", Empty)
resp1 = call_srv()
rospy.loginfo("BRAIN --> SET PX100 TO SLEEP ")
except rospy.ServiceException as e:
rospy.logerr("BRAIN --> Service call failed: %s"%e)
"""
call refilling of candles, ACTIVATE ARM
"""
def refillBuilder(self):
rospy.wait_for_service("/px100/refill")
try:
call_srv = rospy.ServiceProxy("/px100/refill", Empty)
resp1 = call_srv()
rospy.loginfo("BRAIN --> ACTIVATE ARM ")
rospy.loginfo("BRAIN --> REFILLING BUILDER WITH CANDLES ")
except rospy.ServiceException as e:
rospy.logerr("BRAIN --> Service call failed: %s"%e)
"""
ACTIVATE/DEACTIVATE HEATING PLATE
"""
def heater_mode(self,mode):
if(not self.use_real): return
rospy.wait_for_service("/heating_node/heatingMode")
try:
call_srv = rospy.ServiceProxy("/heating_node/heatingMode", SetBool)
resp1 = call_srv(mode)
except rospy.ServiceException as e:
rospy.logerr("BRAIN --> Service call failed: %s"%e)
""" INIT smiley face """
def start():
brain_node = BRAIN()
rate = rospy.Rate(2) # publish freacuancy
rospy.loginfo("okei lets do it")
# main loop
while not rospy.is_shutdown():
if(brain_node.done()):
break
brain_node.execute_state()
brain_node.print_next_state()
brain_node.next_state()
rate.sleep()
""" MAIN """
if __name__ == '__main__':
try:
start()
except rospy.ROSInterruptException:
rospy.logerr("BRAIN --> I THINK NODE DIED...?")
```
#### File: builder/nodes/odom_noise.py
```python
import rospy
from math import *
import numpy as np
from nav_msgs.msg import Odometry
import tf
import tf2_ros
from geometry_msgs.msg import Vector3,Pose,Point, Quaternion,Transform,TransformStamped,PoseStamped
from tf.transformations import quaternion_from_euler
from geometry_msgs.msg import Twist
last_odom = None
pose = [0.0,0.0,0.0]
a1 = 0.0
a2 = 0.0
a3 = 0.0
a4 = 0.0
new_odom_frame = ""
odom_frame = ""
def callback(data):
global last_odom
global new_odom_frame
global odom_frame
global pose
global a1
global a2
global a3
global a4
q = [ data.pose.pose.orientation.x,
data.pose.pose.orientation.y,
data.pose.pose.orientation.z,
data.pose.pose.orientation.w ]
(r, p, theta2) = tf.transformations.euler_from_quaternion(q)
if(last_odom == None):
last_odom = data
pose[0] = data.pose.pose.position.x
pose[1] = data.pose.pose.position.y
pose[2] = theta2
else:
dx = data.pose.pose.position.x - last_odom.pose.pose.position.x
dy = data.pose.pose.position.y - last_odom.pose.pose.position.y
# rospy.logerr(dx)
# rospy.logerr(dy)
# rospy.logerr("")
trans = sqrt(dx*dx + dy*dy)
q = [ last_odom.pose.pose.orientation.x,
last_odom.pose.pose.orientation.y,
last_odom.pose.pose.orientation.z,
last_odom.pose.pose.orientation.w ]
(r,p, theta1) = tf.transformations.euler_from_quaternion(q)
rot1 = atan2(dy, dx) - theta1
rot2 = theta2-theta1-rot1
# rospy.logerr(trans)
# rospy.logerr(rot1)
# rospy.logerr(rot2)
sd_rot1 = a1*abs(rot1) + a2*trans
sd_rot2 = a1*abs(rot2) + a2*trans
sd_trans = a3*trans + a4*(abs(rot1) + abs(rot2))
trans += np.random.normal(0,sd_trans*sd_trans)
rot1 += np.random.normal(0, sd_rot1*sd_rot1)
rot2 += np.random.normal(0, sd_rot2*sd_rot2)
pose[0] += trans*cos(theta1+rot1)
pose[1] += trans*sin(theta1+rot1)
pose[2] += rot1 + rot2
last_odom = data
rospy.logerr(pose[0])
rospy.logerr(pose[1])
rospy.logerr(pose[2])
pos = Vector3(x = pose[0] - data.pose.pose.position.x, y =pose[1] - data.pose.pose.position.y, z = 0)
quad = quaternion_from_euler(0, 0, pose[2] - theta2)
ori = Quaternion(x=quad[0],y=quad[1],z=quad[2],w=quad[3])
hp_tf = Transform(translation = pos, rotation=ori)
br = tf2_ros.TransformBroadcaster()
t = TransformStamped()
t.header.stamp = rospy.Time.now()
t.header.frame_id = "base_footprint_real"
t.child_frame_id = "base_footprint"
t.transform = hp_tf
br.sendTransform(t)
rospy.logerr("ti fasi ?")
def callback_cmd(data):
global pose
linear = data.linear.x
angular = data.angular.z
sd_rot1 = 0 #angular*1 + linear*0.5
sd_trans = 0 #linear*1.2
trans = np.random.normal(0,sd_trans*sd_trans)
rot1 = np.random.normal(0, sd_rot1*sd_rot1)
pose[0] += trans*cos(rot1) #+ 0.01/200
pose[1] += trans*sin(rot1)
pose[2] += rot1
rospy.logerr(pose[0])
rospy.logerr(pose[1])
rospy.logerr(pose[2])
pos = Vector3(x = pose[0] , y =pose[1] , z = 0)
quad = quaternion_from_euler(0, 0, pose[2])
ori = Quaternion(x=quad[0],y=quad[1],z=quad[2],w=quad[3])
hp_tf = Transform(translation = pos, rotation=ori)
br = tf2_ros.TransformBroadcaster()
t = TransformStamped()
t.header.stamp = rospy.Time.now()
t.header.frame_id = "base_footprint_real"
t.child_frame_id = "base_footprint"
t.transform = hp_tf
br.sendTransform(t)
if __name__ == '__main__':
rospy.init_node('noisy_odometry', anonymous=True)
a1 = 0.05
a2 = 10.0*pi/180.0
a3 = 1.1
a4 = 0.01
odom_topic = "/odom"
new_odom_frame = "odom"
odom_frame = "odom"
# rospy.Subscriber(odom_topic, Odometry, callback)
rospy.Subscriber("/cmd_vel", Twist, callback_cmd)
rospy.spin()
```
#### File: builder/nodes/path_plan.py
```python
import random
import rospy
from std_srvs import srv
from tf.transformations import quaternion_from_euler
from math import pi,cos,sin
from visualization_msgs.msg import Marker
from visualization_msgs.msg import MarkerArray
from geometry_msgs.msg import Vector3,Pose,Point, Quaternion,Transform,TransformStamped,PoseStamped
from std_msgs.msg import ColorRGBA
from nav_msgs.msg import Path
from std_srvs.srv import SetBool,SetBoolResponse
from builder.srv import PathPlanInfo
import time
import actionlib
import builder.msg
import tf2_ros
ARROW_SIZE = Vector3(x=0.3 ,y=0.05,z=0.05) # x length y,z size
SPHERE_SIZE = Vector3(x=0.05 ,y=0.05,z=0.05) # x length y,z size
COLOR_Blue = ColorRGBA(r=0.0,g=0.0,b=1.0,a=1.0) # Blue
COLOR_Red = ColorRGBA(r=1.0,g=0.0,b=0.0,a=1.0) # Red
COLOR_Yellow = ColorRGBA(r=1.0,g=1.0,b=0.0,a=1.0) # Yellow
COLOR_Green = ColorRGBA(r=0.0,g=1.0,b=0.0,a=1.0) # Green
COLOR_Purple = ColorRGBA(r=1.0,g=0.0,b=1.0,a=1.0) # Green
TO_DEGREE = 57.2958
class PLAN():
def __init__(self):
self.center = {"x":0.5,"y":0}
self.step_size = 20
self.step = 2*pi/self.step_size
self.range = 0.5
self.reverse = True
self.MODE = "CIRCLE"
self.direction = 0
self.printMD = False
self.debug_mode = rospy.get_param("/debug_mode",default=False)
def copy_plan_from_msg(self,msg):
self.step_size = msg.step_size
self.init_time = msg.init_time
self.step = 2*pi/self.step_size
self.center['x'] = msg.centerX
self.center['y'] = msg.centerY
self.reverse = msg.reverse
self.range = msg.range
self.MODE = msg.mode
self.direction = msg.direction
self.printMD = msg.printMD
if(self.MODE == "LINE"): # Do only start and finish point of the line, NO STEPS FOR NOW
self.step_size = 2
self.step = 2*pi/self.step_size
def get_goal(self,time):
if(self.MODE=="LINE"):
if(time>0):
self.reverse = True
else:
self.reverse = False
return builder.msg.huntGoal(reverseMD = self.reverse, debugMD = self.debug_mode)
def get_direction(self,time):
if(self.MODE=="LINE" and time>0): # this is for cos
return self.direction + pi
return self.direction
class PLANNER():
def __init__(self):
self.dir = 0
self.offsetDIR = pi/2
self.time = 0
self.stop = False
self.success = False
self.plan = PLAN()
self.huntPT = Point(x=0,y=0,z=0)
self.publisherVis = rospy.Publisher("hunt_point_vis" , MarkerArray,queue_size=10)
self.publishPath = rospy.Publisher("hunt_point_path", Path,queue_size=10)
self.msg_path = Path()
self.markerArray = MarkerArray()
self.markerType = {"ARROW":0,"SPHERE":2}
self.markerArray.markers.append(Marker())
# create hunting action
self._feedback = builder.msg.PathPlanInfoFeedback()
self._result = builder.msg.PathPlanInfoResult()
self._action = actionlib.SimpleActionServer("action_planner", builder.msg.PathPlanInfoAction, execute_cb=self.execute_action, auto_start = False)
self._action.start()
# Creates the SimpleActionClient and wait for server to come up
self.hunting_action = actionlib.SimpleActionClient('action_hunt', builder.msg.huntAction)
self.hunting_action.wait_for_server()
"""
get a new plan to follow
"""
def execute_action(self,goal):
rospy.loginfo("")
rospy.loginfo("")
rospy.loginfo("PATH PLAN --> GOT NEW PLAN")
self.plan.copy_plan_from_msg(goal)
self.drawPLAN()
self.time = goal.init_time
self.publish_visualize()
self.publish_huntTF()
self.success = False
rate = rospy.Rate(20) # publish freacuancy
# start executing the action
while(not self.success):
# check that preempt has not been requested by the client
if self._action.is_preempt_requested():
rospy.logerr("PATH PLAN --> PLANNER ACTION CANCEL" )
self._action.set_preempted()
break
# update & publish the feedback
self._feedback.time = self.time
self._action.publish_feedback(self._feedback)
# publish hunting tf point always (and fast)
self.publish_huntTF()
# play the plan
self.update()
rate.sleep()
if(self.success):
self._result.success = self.success
self._action.set_succeeded(result = self._result)
rospy.loginfo(f"PATH PLAN --> PLANNER ACTION SUCCESS {self.success}")
"""
UPDATE
call navigation hunting action with new point
"""
def update(self):
if(self.goal_reached()):
# disable planer if path is executed
if (self.time>=2*pi):
# rospy.loginfo("PATH PLAN --> FINISH")
self.success = True
else:
rospy.loginfo(f"PATH PLAN --> MODE {self.plan.MODE}")
rospy.loginfo(f"PATH PLAN --> REVE {self.plan.reverse}")
rospy.loginfo(f"PATH PLAN --> TIME {self.time}")
rospy.loginfo(f"PATH PLAN --> POINT {self.huntPT.x:.2f},{self.huntPT.y:.2f}")
rospy.loginfo(f"PATH PLAN --> DIRE {self.dir*TO_DEGREE:.2f}")
rospy.loginfo(f"****************************")
self.next_hp()
# send info tf, plan, arrow got hunting point
self.publish_visualize()
self.publish_huntTF()
self.publish_plan()
# security for publishing target TF before calling action
time.sleep(1.002)
self.publish_huntTF()
# Creates a goal to send to the action server.
goal = self.plan.get_goal(self.time)
self.hunting_action.send_goal(goal) # Sends the goal to the action server.
time.sleep(0.1)
# update time
self.time += self.plan.step
""" calculate new hunting point """
def goal_reached(self):
goalID = self.hunting_action.get_state()
if(goalID==1):
# rospy.loginfo("HUNTING ACTION STATUS --> ACTIVE")
return False
if(goalID==2):
# rospy.loginfo("HUNTING ACTION STATUS --> PREEMPTED")
self.time -= self.plan.step # repeat hunting point
return True
elif(goalID==3):
# rospy.loginfo("HUNTING ACTION STATUS --> SUCCEEDED")
return True
elif(goalID==4):
# rospy.loginfo("HUNTING ACTION STATUS --> ABORTED")
return True
elif(goalID==9):
# rospy.loginfo("HUNTING ACTION STATUS --> LOST")
return True
else:
rospy.logerr(f"HUNTING ACTION STATUS --> ERROR {goalID}")
return False
""" calculate new hunting point """
def next_hp(self):
# calculate new hunting point
if(self.plan.MODE=="CIRCLE"):
pt = self.path_circle(self.time)
if(self.plan.MODE=="LINE"):
pt = self.path_updown(self.time)
if(self.plan.MODE =="POINT"):
pt = Point(x=self.plan.center['x'],y=self.plan.center['y'],z=0)
self.dir = self.plan.direction
# update hunting point
self.huntPT = pt
def drawPLAN(self):
# reset time
self.time = 0
self.msg_path.poses = []
while(self.time<2*pi+0.01):
self.time += self.plan.step
self.next_hp()
pose = self.get_pose()
pose_stamped = PoseStamped(pose = pose)
pose_stamped.header.stamp = rospy.Time.now()
pose_stamped.header.frame_id = "world"
self.msg_path.poses.append(pose_stamped)
self.publish_plan()
# reset time
self.time = 0
def get_pose(self):
euler=[0,0,self.dir]
pos = self.huntPT
quad = quaternion_from_euler(euler[0],euler[1],euler[2])
ori = Quaternion(x=quad[0],y=quad[1],z=quad[2],w=quad[3])
pose = Pose(position = pos, orientation=ori)
return pose
def get_tf(self):
euler=[0,0,self.dir]
pos_temp = self.huntPT
pos = Vector3(x = pos_temp.x, y = pos_temp.y, z = pos_temp.z)
quad = quaternion_from_euler(euler[0],euler[1],euler[2])
ori = Quaternion(x=quad[0],y=quad[1],z=quad[2],w=quad[3])
hp_tf = Transform(translation = pos, rotation=ori)
return hp_tf
"""
equation of circle path, based on time input
"""
def path_circle(self,time):
x = (self.plan.range)*cos(time) + self.plan.center['x']
y = (self.plan.range)*sin(time) + self.plan.center['y']
z = 0
point = Point(x=x,y=y,z=z)
self.dir = time+self.offsetDIR
return point
"""
equation of circle path, based on time input
"""
def path_updown(self,time):
x = self.plan.center['x'] + (self.plan.range)*cos(time)
y = self.plan.center['y']
z = 0
point = Point(x=x,y=y,z=z)
self.dir = self.plan.get_direction(self.time)
return point
"""
create arrow marker object
"""
def add_marker(self,type,size,color,pose,id):
marker = Marker()
marker.header.frame_id = "world"
marker.type = type
marker.action = marker.ADD
marker.scale = size
marker.color = color
marker.pose = pose
marker.id = id
if(id == 0 ):
self.markerArray.markers[id] = marker
else:
self.markerArray.markers.append(marker)
"""
publish visualization arrow of hunting point
"""
def publish_visualize(self):
if(len(self.markerArray.markers)>200):
self.markerArray.markers.pop()
# add arrow at hunt point
col = COLOR_Blue
if(self.plan.reverse):
col = COLOR_Yellow
self.add_marker( type=self.markerType['ARROW'],
size=ARROW_SIZE,
color=col,
pose=self.get_pose(),
id=0)
# keep track
col = COLOR_Blue
if(self.plan.printMD):
col = COLOR_Red
self.add_marker( type=self.markerType['SPHERE'],
size=SPHERE_SIZE,
color=col,
pose=self.get_pose(),
id=len(self.markerArray.markers))
# Publish the MarkerArray
self.publisherVis.publish(self.markerArray)
"""
publish the tf of the hunting point
"""
def publish_huntTF(self):
br = tf2_ros.TransformBroadcaster()
t = TransformStamped()
t.header.stamp = rospy.Time.now()
t.header.frame_id = "world"
t.child_frame_id = "hunt_point"
t.transform = self.get_tf()
br.sendTransform(t)
"""
publish the global path plan
"""
def publish_plan(self):
self.msg_path.header.frame_id = "world"
self.msg_path.header.stamp = rospy.Time.now()
self.publishPath.publish(self.msg_path)
""" MAIN """
if __name__ == '__main__':
try:
# rospy.init_node('path_planning', log_level=rospy.DEBUG)
rospy.init_node('path_planning', log_level=rospy.INFO)
planner = PLANNER()
rospy.spin()
except rospy.ROSInterruptException:
rospy.logerr("PATH PLAN --> DEAD")
``` |
{
"source": "Jimasd/PyProject",
"score": 3
} |
#### File: Jimasd/PyProject/Iris.py
```python
________________________________________________________
#Iris
#Ce projet constitue un regroupement de mรฉthodes diffรฉrentes
#en machine learning appliquรฉs au dataset "Iris". On essayera
#de prรฉdire la classification des fleurs (et toute autre
#variable)
________________________________________________________
#Fonction reset
#Pour effacer les variables
def reset():
df=pd.read_csv("D:\Anaconda\Projets\Iris Flower\iris.csv")
global X,y,X_train,X_test,y_train,y_test
del X,y,X_train,X_test,y_train,y_test
print("Les variables ont รฉtรฉ effacรฉes")
reset()
________________________________________________________
#Rรฉgression linรฉaire
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
%matplotlib inline
reset()
df=pd.read_csv("D:\Anaconda\Projets\Iris Flower\iris.csv")
df
sns.set_style("whitegrid")
#Regardons la vue d'ensemble des rรฉsultats
sns.pairplot(df,hue="species")
plt.matshow(df.corr())
#Nous pouvons observer qu'il y a une forte corrรฉlation entre
#petal width et petal length
#Faisons une rรฉgression linรฉaire, avec petal_width รฉtant la
#variable ร observer
from sklearn.model_selection import train_test_split
y=df["petal_width"]
X=df[["sepal_length","sepal_width","petal_length"]]
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.3,random_state=123)
#On va sรฉparer nos donnรฉs en 2 parties: un training set et
#un testing set
from sklearn.linear_model import LinearRegression
lm=LinearRegression()
lm.fit(X_train,y_train)
#Ceci sera notre estimateur. Avec les donnรฉes du training set
#de X et de y, il dรฉterminera une droite
print("Les coefficients sont:", lm.coef_)
#Maintenant, testons notre modรจle
pred=lm.predict(X_test)
plt.scatter(y_test,pred)
#Est-ce que notre modรจle est bon?
sns.distplot((y_test-pred),bins=50)
#Le modรจle ressemble ร une courbe normale. Il n'y a pas
#beaucoup de overfit
coeficients=pd.DataFrame(lm.coef_,X.columns)
coeficients.columns=["coeficients"]
coeficients
#Donc, si la longueur du pรฉtale monte de 1cm, la largeur
#de celle-ci augmentera de 0.550335cm.
#On peut voir que les sรฉpales ne sont pas fortement corrรฉlรฉs
#avec la grandeur des pรฉtales (ce qui est logique)
________________________________________________________
#Random Forest
#Nous allons prรฉdire si une fleur appartient ร l'espรจce de
#sรฉtosa ou non. Regardons les donnรฉes
reset()
df[df["species"]=="setosa"]["petal_width"].hist(bins=50, label="Setosa")
df[df["species"]!="setosa"]["petal_width"].hist(bins=50, label="Pas setosa")
df[df["species"]=="setosa"]["sepal_width"].hist(bins=50, label="Setosa")
df[df["species"]!="setosa"]["sepal_width"].hist(bins=50, label="Pas setosa")
#Changeons les donnรฉes pour avoir des donnรฉes catรฉgoriques
for i in range(1,len(df["species"])):
if df["species"][i]!="setosa":
df["species"][i]="pas_setosa"
df
final_data=pd.get_dummies(df,"species",drop_first=True)
final_data
#Split les donnรฉes
from sklearn.model_selection import train_test_split
X=final_data.drop("species_setosa",axis=1)
y=final_data["species_setosa"].astype(int)
y
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.30)
#Entrainer les donnรฉes
from sklearn.ensemble import RandomForestClassifier
rfc=RandomForestClassifier(n_estimators=700)
rfc.fit(X_train,y_train)
#Prรฉdiction et conclusion
from sklearn.metrics import confusion_matrix, classification_report
predictions=rfc.predict(X_test)
print("Voici la matrice de confusion et le rapport de classification:")
print(confusion_matrix(y_test,predictions))
print(classification_report(y_test,predictions))
________________________________________________________
#SVM - Support Vector Machine (ร partir de Udemy)
reset()
#En regardant le pairplot de la rรฉgression linรฉaire, on voit
#que la setosa est diffรฉrente des autres
#Split les donnรฉes
from sklearn.cross_validation import train_test_split
X=df.drop("species",axis=1)
y=df["species"]
X_train,X_test,y_train,y_test=train_test_split(X,y, test_size=0.4)
#Grid Search (Prendre le meilleur modรจle)
from sklearn.model_selection import GridSearchCV
parametres={"C":[0.1,1,10,100],"gamma":[1,0.1,0.01,0.001]}
g=GridSearchCV(SVC(),parametres,refit=True,verbose=2)
g.fit(X_train,y_train)
#Prรฉdiction et conclusion
from sklearn.metrics import confusion_matrix, classification_report
predictions=g.predict(X_test)
print("Voici la matrice de confusion et le rapport de classification:")
print(confusion_matrix(y_test,predictions))
print(classification_report(y_test,predictions))
________________________________________________________
#k-NN - K-Nearest Neighbors
reset()
#Split les donnรฉes
X=df.drop("species",axis=1)
y=df["species"]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
#Entrainer les donnรฉes
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=3)
knn.fit(X_train, y_train)
#Prรฉdiction et conclusion
pred = knn.predict(X_test)
print("Voici la matrice de confusion et le rapport de classification:")
print(confusion_matrix(y_test,pred))
print(classification_report(y_test,pred))
``` |
{
"source": "Jimase/Software_Engineering_Team2",
"score": 3
} |
#### File: Jimase/Software_Engineering_Team2/Game.py
```python
import numpy as np
import pygame
import sys
import random
from math import atan2, degrees
from Player import Player
bg = pygame.image.load("images/bg3.png")
click_effect = pygame.image.load("cards_images/click.png")
click_effect_rect = click_effect.get_rect()
r = 330
def in_rect(pos, rect):
"""
้ผ ๆ ๆฏๅฆๅจ็ฎๆ ๅบๅๅ
\n
:param pos: ้ผ ๆ ๅ
ๆ ไฝ็ฝฎ
:param rect: ๆ่ฆๅคๆญ็นๅป็ๅบๅๆก
:return: ้ผ ๆ ๆฏๅฆๅจๅบๅๅ
"""
x, y = pos
rx, ry, rw, rh = rect
if (rx <= x <= rx + rw) and (ry <= y <= ry + rh):
return True
return False
class Card:
def __init__(self, cards, settings):
self.size, self.kind = self.random_a_card(cards)
self.image = pygame.image.load("cards_images/" + str(self.size) + "_" + str(self.kind) + ".png")
self.card_back_image = pygame.image.load("cards_images/back.png")
self.rect = self.image.get_rect()
self.rect.right = settings.screen_width * 0.9
self.rect.top = settings.screen_height
self.rect_canterx = self.rect.centerx
self.rect_cantery = self.rect.centery
self.rate = 0 # ๆ็ปrate
self.current_rate = 0
self.new_rect = self.rect # ๆ่ฝฌไนๅ็rect
# print(self.size,self.kind)
# @staticmethod
def random_a_card(self, cards):
size = np.random.randint(cards.shape[0]) # ไป13ๅผ ็ไธญๆไธไธช
kind = np.argmax(cards[size]) # ็ๆญคๅคงๅฐ็็ ๅ
ถๅ็ง็ฑปๅๆฏๅฆ่ฟๆ๏ผ่ฅๆฒกๆๅๆไธๅผ ็
while cards[size][kind] == 0:
cards = np.delete(cards, size, 0) # ๅ ้คA็็ฌฌไบ่ก
size = np.random.randint(cards.shape[0]) # ไป13ๅผ ็ไธญๆไธไธช
kind = np.argmax(cards[size])
cards[size][kind] = 0
return size, kind
def cards_init(cards_array, settings):
# ๆๅ
็ๅฐฑๅชๆ54ๅผ ๏ผไปAใ2~QใKๅ4ๅผ ๏ผๆ ๅคง็ใๅฐ็
cards = np.ones((13, 4), dtype=int) # ๅๅงๅ ็ฉ้ต๏ผๅ
จ้จๅๅผไธบ1
print(cards)
for i in range(cards.shape[0] * cards.shape[1]):
cards_array.append(Card(cards, settings))
print(cards_array)
def show_cards(screen, cards_array, settings):
"""
ๅผๅงๅจ็ป \n
ไฟฎๆน cards_array ไธญ card ๅๆ \n
:return:
"""
card_box = [settings.screen_height * 0.1, settings.screen_height * 0.8] # ไธ้กถ๏ผไธๅบ
card_card_distance = 12 # ็ไธ็้ด่ท
move_over = False
while not move_over:
# screen.fill((0, 0, 0))
screen.blit(bg, (0, 0))
for i in range(len(cards_array)):
if i == 0:
if cards_array[i].rect.y > card_box[0]:
cards_array[i].rect.y -= card_card_distance
else:
move_over = True
else:
# ๅฆๆไธๆฏ็ฌฌไธๅผ ็๏ผไธyๅๆ ๅคงไบๅไธๅผ ไธ y ๅๆ ๅคงไบ
if cards_array[i].rect.y >= cards_array[i - 1].rect.y + 2 * card_card_distance:
cards_array[i].rect.y -= card_card_distance
screen.blit(cards_array[i].card_back_image, cards_array[i].rect)
pygame.display.update()
pygame.time.wait(5)
def show_insert(screen, settings, temp, cards_in_show, cards_array, player1, player2, turn):
"""
ๆพ็คบๆๅ
ฅๅจ็ป๏ผ ่ฐๆดๆฐๅก็็ๅๆ ๅ่งๅบฆ๏ผ ๅนถๆพๅ
ฅ ๅดๆๅ็ๅก็ ไธญ\n
:param screen: ๅฑๅน
:param settings: ็ชๅฃ่ฎพ็ฝฎ
:param temp: ไธดๆถๅก็ๅญๅจ
:param cards_in_show: ๅดๆๅ็ๅก็
:param cards_array: ไธๅผๅงๅญๆพ็ๅ
จ้จ็ๅก็
:return: ๆ ่ฟๅๅผ
"""
current_rate = 315
change_rate = -36
translation = True # ๅ
ๅนณ็งป
while translation:
if temp[0].rect.x > settings.screen_width * 0.7:
# screen.fill((0, 0, 0))
screen.blit(bg, (0, 0))
for card in cards_array:
screen.blit(card.card_back_image, card.rect)
for card in temp:
card.rect.x -= 10
screen.blit(card.card_back_image, card.rect)
player1.update(turn, screen, settings)
player2.update(turn, screen, settings)
pygame.display.update()
else:
translation = False
# ็กฎๅฎๆ็ปไฝ็ฝฎ
next_card_rate = (current_rate) % 360
for card in cards_in_show:
# ๅฝๅ่งๅบฆ๏ผๅฏนๅบๅ็ๅๆ ๏ผ ไฝไธบcard็ๆ่ฟ่ทฏๅพ็น
y = card.rect.centery - settings.screen_height / 2 # ่ทๅ Y
x = card.rect.centerx - settings.screen_width / 2 # ่ทๅ X
angle = atan2(y, x) # ๆฑๅๆญฃๅๅผ
card.current_rate = int(degrees(angle) % 360) # ่ฝฌๆขๆ่งๅบฆ
card.rect_centerx = np.cos((360 - card.current_rate) * np.pi / 180) * r + settings.screen_width / 2
card.rect_centery = np.sin((360 - card.current_rate) * np.pi / 180) * r + settings.screen_height / 2
# ๆ็ป่ฆๆ่ฝฌ็่งๅบฆ
card.rate = int(next_card_rate)
next_card_rate += change_rate
# ็กฎๅฎๆ็ปไฝ็ฝฎ
next_card_rate = (current_rate + change_rate * len(cards_in_show)) % 360
for card in temp:
# ๅฝๅ่งๅบฆ๏ผๅฏนๅบๅ็ๅๆ ๏ผ ไฝไธบcard็ๆ่ฟ่ทฏๅพ็น
y = card.rect.centery - settings.screen_height / 2 # ่ทๅ Y
x = card.rect.centerx - settings.screen_width / 2 # ่ทๅ X
angle = atan2(y, x) # ๆฑๅๆญฃๅๅผ
card.current_rate = int(degrees(angle) % 360) # ่ฝฌๆขๆ่งๅบฆ
card.rect_centerx = np.cos((360 - card.current_rate) * np.pi / 180) * r + settings.screen_width / 2
card.rect_centery = np.sin((360 - card.current_rate) * np.pi / 180) * r + settings.screen_height / 2
# ๆ็ป่ฆๆ่ฝฌ็่งๅบฆ
card.rate = int(next_card_rate)
cards_in_show.append(card)
next_card_rate += change_rate
del temp
def speed(center1, biger, center2):
max_speed = 6
if biger:
if center1 - center2 > 10:
return int((center1 / center2) * max_speed)
else:
return int((center1 / center2) * 1)
else:
if center2 - center1 > 10:
return int((center2 / center1) * max_speed)
else:
return int((center2 / center1) * 1)
def position_correct(cards_in_show, settings, screen): # ้ๅฏนcards_in_show็ซๆญฃไฝ็ฝฎ
"""
ๅก็ๅฐๅค้ฃ็ๅจ็ป๏ผ่ฐๆดๅๆ
:param cards_in_show: ๅดๆๅ็ๅก็
:param settings: ็ชๅฃ่ฎพ็ฝฎ
:param screen: ๅฑๅน
:return: ๆ ่ฟๅๅผ
"""
for card in cards_in_show:
if card.rect.centerx != card.rect_centerx:
if card.rect.centerx > card.rect_centerx:
card.rect.centerx -= speed(card.rect.centerx, 1, card.rect_centerx)
else:
card.rect.centerx += speed(card.rect.centerx, 0, card.rect_centerx)
if card.rect.centery != card.rect_centery:
if card.rect.centery > card.rect_centery:
card.rect.centery -= speed(card.rect.centery, 1, card.rect_centery)
else:
card.rect.centery += speed(card.rect.centery, 0, card.rect_centery)
if card.current_rate != card.rate:
if card.current_rate > card.rate:
card.current_rate -= 1
else:
card.current_rate += 1
card.rect_centerx = int(np.cos((360 - card.current_rate) * np.pi / 180) * r + settings.screen_width / 2)
card.rect_centery = int(np.sin((360 - card.current_rate) * np.pi / 180) * r + settings.screen_height / 2)
new_card = pygame.transform.rotate(card.card_back_image, card.current_rate % 360) # ็ปๅถๅก่
card.new_rect = new_card.get_rect(center=card.rect.center)
screen.blit(new_card, card.new_rect)
def game(screen, settings):
"""
ๆธธๆไธป็จๅบ \n
:param screen: ๅฑๅน
:param settings: ็ชๅฃ่ฎพ็ฝฎ
:return: ๆๆ ่ฟๅๅผ
"""
background_sound = pygame.mixer.Sound("images/background.mp3")
background_sound.play()
cards_array = []
cards_init(cards_array, settings)
# print(len(cards_array))
# print("over")
show_cards(screen, cards_array, settings)
# ๅจๅบ็็๏ผๅดๆๅ็็
cards_in_show = []
max_nums = 10 # ๅจๅบ็็ๆๅคงๆฐ้
# ๅไธญ้ด็็
center_card = [0, []] # ๆ้กถๅฑ็็๏ผ็ฎๅ็งฏๅ็็
turn = 2 # ้ป่ฎค็ๆธธๆๅ
ๆ็ฉๅฎถ
player1 = Player(1)
player2 = Player(2)
font = pygame.font.Font('images/MIAO.TTF', 80)
click_card_sound = pygame.mixer.Sound("images/cp.ogg")
while True:
# screen.fill((0, 0, 0)) # ๅฑๅนๅกซๅ
็บฏ้ป่ฒ่ๆฏ
screen.blit(bg, (0, 0)) # ็ปๅถ่ๆฏๅพ็
# ่ทๅไบไปถ
events = pygame.event.get()
for event in events:
# ๅฆๆๆ้ฎ็ฑปๅไธบ ็ชๅฃๅณไธ่ง็ๅ
ซๅ ๆ ESC ๏ผ็ฎๅESCๅฅฝๅไธ็ฎก็จ๏ผ
if event.type == pygame.QUIT or event.type == pygame.K_ESCAPE:
# ็จๅบ้ๅบ
pygame.quit()
sys.exit()
elif event.type == pygame.MOUSEBUTTONDOWN:
for card in cards_in_show:
if in_rect(event.pos, card.new_rect):
card.rect.centerx, card.rect.centery = settings.screen_width/2, settings.screen_height/2
if not center_card[0]: # ๅฆๆไธญ้ดๆฒก็
center_card[0] = card
elif card.kind != center_card[0].kind:
center_card[1].append(center_card[0])
center_card[0] = card
else:
center_card[1].append(card)
center_card[1].append(center_card[0])
player1.receive_card(turn, center_card[1])
player2.receive_card(turn, center_card[1])
center_card = [0, []] # ้็ฝฎ
cards_in_show.remove(card)
click_card_sound.play()
turn += 1
if turn > 2:
turn = 1
break
if turn == 1 :
for card in player1.card_array:
if in_rect(event.pos, card.rect):
card.rect.centerx, card.rect.centery = settings.screen_width / 2, settings.screen_height / 2
if not center_card[0]: # ๅฆๆไธญ้ดๆฒก็
center_card[0] = card
player1.card_array.remove(card)
elif card.kind != center_card[0].kind: #่ฑ่ฒไธๅ
center_card[1].append(center_card[0])
center_card[0] = card
player1.card_array.remove(card)
else: #่ฑ่ฒ็ธๅ
center_card[1].append(center_card[0])
player1.receive_card(turn, center_card[1])
player2.receive_card(turn, center_card[1])
center_card = [0, []] # ้็ฝฎ
turn += 1
if turn > 2:
turn = 1
click_card_sound.play()
break
else:
# sorted(player2.card_array, key=lambda x: x.kind)
for card in player2.card_array:
if in_rect(event.pos, card.rect):
card.rect.centerx, card.rect.centery = settings.screen_width / 2, settings.screen_height / 2
if not center_card[0]: # ๅฆๆไธญ้ดๆฒก็
center_card[0] = card
player2.card_array.remove(card)
elif card.kind != center_card[0].kind:
center_card[1].append(center_card[0])
center_card[0] = card
player2.card_array.remove(card)
else:
center_card[1].append(center_card[0])
player1.receive_card(turn, center_card[1])
player2.receive_card(turn, center_card[1])
center_card = [0, []] # ้็ฝฎ
turn += 1
if turn > 2:
turn = 1
click_card_sound.play()
break
player1.card_array=sorted(player1.card_array, key=lambda x: x.kind)
player2.card_array=sorted(player2.card_array, key=lambda x: x.kind)
player1.update(turn, screen, settings)
player2.update(turn, screen, settings)
# ๅฏนไบ็ๅบไธญ็ๆฏๅผ ๅก็
for card in cards_array:
screen.blit(card.card_back_image, card.rect) # ๅฐๆฏๅผ ็ ไปฅ็่ ็ปๅถๅฐๅฑๅนไธ
# ่ฟไธชๆฒกๅคชๅคง็จ๏ผไธป่ฆๆฏ่ฎฉๅก็ๆพ็คบ็ๅฅฝ็ไบ
pygame.draw.line(screen, (100, 100, 100), (card.rect.left, card.rect.top), (card.rect.right, card.rect.top), 1)
# ๅฆๆๅดๆๅ็ๅก็ ๆฐ้ๅฐไบ3๏ผ ๆทปๅ ๆฐๅก็๏ผๆทปๅ ๆฐ้ไธบ ๆๅคงๅผ - ๅฝๅๆฐ้
if len(cards_in_show) <= 3:
print("ๆฝๅก")
temp = []
for i in range(max_nums - len(cards_in_show)):
# ๅฆๆ็ๅบ้่ฟๆ็
if len(cards_array) != 0:
card = random.choice(cards_array)
temp.append(card)
cards_array.remove(card)
else:
break
# ๅฆๆ ไธดๆถๅก็ๅญๅจ ้ๆๅก็
if len(temp) != 0:
# ๆพ็คบๆๅ
ฅๅจ็ป๏ผๅนถ่ฐๆดๆฐๅก็็ๅๆ ๏ผๅๆ่ฝฌ่งๅบฆ
show_insert(screen, settings, temp, cards_in_show, cards_array, player1, player2, turn)
# ๅฆๆ ๅดๆๅ็ๅก็ ๆฐ้ไธบ0
if len(cards_in_show) <= 0:
# ่ฏดๆ ๅบไธๆ ็ไบ๏ผ
background_sound.stop()
if len(player1.card_array) < len(player2.card_array):
print("player1 win")
player1.win(screen, settings)
else:
print("player2 win")
player2.win(screen, settings)
game_wait_close()
mouse_pos = pygame.mouse.get_pos()
for card in cards_in_show:
if in_rect(mouse_pos, card.new_rect):
new_click_effect = pygame.transform.rotate(click_effect, card.current_rate) # ็ปๅถๅก่
card.new_rect = click_effect.get_rect(center=card.new_rect.center)
screen.blit(new_click_effect, card.new_rect)
break
sorted(player1.card_array,key=lambda x: str(x.kind))
if turn == 1:
for card in player1.card_array:
if in_rect(mouse_pos, card.rect):
screen.blit(click_effect, [n-10 for n in card.rect])
break
else:
for card in player2.card_array:
if in_rect(mouse_pos, card.rect):
screen.blit(click_effect, [n-10 for n in card.rect])
break
# ๆดๆฐๅไธญ้ด ็card
if center_card[0]:
screen.blit(center_card[0].image, center_card[0].rect)
# ---------------------------------------
if len(center_card[1]) != 0:
text = "X" + str(len(center_card[1]) + 1)
text_render = font.render(text, True, (0, 255, 0))
text_rect = text_render.get_rect()
screen.blit(text_render, (center_card[0].rect.right + text_rect.width/2, center_card[0].rect.centery - text_rect.height/2))
# ---------------------------------------
# ่ฐๆดๅๆ
position_correct(cards_in_show, settings, screen)
# ๆดๆฐ็ป้ข
pygame.display.update()
def game_wait_close():
while True:
for event in pygame.event.get():
# ๅฆๆๆ้ฎ็ฑปๅไธบ ็ชๅฃๅณไธ่ง็ๅ
ซๅ ๆ ESC ๏ผ็ฎๅESCๅฅฝๅไธ็ฎก็จ๏ผ
if event.type == pygame.QUIT or event.type == pygame.K_ESCAPE:
# ็จๅบๆจๅบ
pygame.quit()
sys.exit()
if __name__ == '__main__':
# ๆญคๅค็ไปฃ็ ไธบไบๆนไพฟ่ฏฅๆไปถๅ็ฌ่ฟ่กๆต่ฏ
import pygame
import settings
settings = settings.Settings()
pygame.init()
screen = pygame.display.set_mode((settings.screen_width, settings.screen_height))
game(screen, settings)
``` |
{
"source": "jimasp/behave-vsc",
"score": 2
} |
#### File: multiroot bad features path/features/environment.py
```python
import parse
from behave import model, register_type
@parse.with_pattern(r"(| flag here)")
def parse_flag(text:str):
return text.strip().lower() == "flag here"
register_type(flag_here=parse_flag)
def before_scenario(context, scenario:model.Scenario): #pylint: disable=unused-argument
if "normal_skip" in scenario.effective_tags:
scenario.skip("Marked with @normal_skip")
return
if "raise_error_in_before_scenario" in scenario.effective_tags:
raise Exception("error in before_scenario hook")
def after_scenario(context, scenario:model.Scenario): #pylint: disable=unused-argument
if "normal_skip" in scenario.effective_tags:
scenario.skip("Marked with @normal_skip")
return
if "raise_error_in_after_scenario" in scenario.effective_tags:
raise Exception("error in after_scenario hook")
```
#### File: grouped/steps/outline_feature_steps.py
```python
from behave import *
@given('I put "{thing}" in a blender')
def step_given_put_thing_into_blender(context, thing):
context.blender = Blender()
context.blender.add(thing)
@when("I switch the blender on")
def step_when_switch_blender_on(context):
context.blender.switch_on()
@then('it should transform into "{other_thing}"')
def step_then_should_transform_into(context, other_thing):
assert context.blender.result == other_thing
# some code elsewhere that we are testing
class Blender(object):
TRANSFORMATION_MAP = {
"apples": "apple juice",
"iPhone": "toxic waste",
"Galaxy Nexus": "toxic waste",
"Red Tree Frog": "mush",
}
def __init__(self):
self.thing = None
self.result = None
@classmethod
def select_result_for(cls, thing):
return cls.TRANSFORMATION_MAP.get(thing, "DIRT")
def add(self, thing):
self.thing = thing
def switch_on(self):
self.result = self.select_result_for(self.thing)
``` |
{
"source": "jimb245/scriptremote",
"score": 2
} |
#### File: test/Suite1/test5.py
```python
import os
import unittest
import srutil
import srio
import credentials
class Test(unittest.TestCase):
def runTest(self):
try:
srio.SR_send('Location1')
self.fail()
except Exception as e:
print str(e)
srutil.SR_delete_project()
```
#### File: test/Suite1/test9.py
```python
import os
import unittest
import srutil
import srio
import credentials
class Test(unittest.TestCase):
def runTest(self):
user = credentials.SRUSER
token = credentials.SRTOKEN
projName = 'TEST(suite1)-Pro/ject9'
jobName = 'Job'
result = srio.SR_start(user, token, projName, jobName)
if (result[0] != srio.SR_ERR) or (result[1] != u'ERROR: SLASH IN PROJECT NAME'):
self.fail()
``` |
{
"source": "jimbach/ctp",
"score": 2
} |
#### File: scripts/dipro/read_orbitals.py
```python
def readOrb(path,molIdx,nsaos_comp):
molcoef=[]
molcoef_add=[]
coefs_full=[]
LastLine=[]
if molIdx!=1 and molIdx!=2:
print "molIdx can only be 1 or 2!"
sys.exit()
molFile=open(path,'r')
readMo='false'
for line in molFile:
#skip comment lines
if line.find('#') == 0:
continue
#stop reading when $end statement is reached
if '$end' in line:
break
#analyse mo-file header
if 'scfconv' in line:
cols=line.split('=')
str0=cols[1].split(' ')
scfconv=int(str0[0])
line = line.strip()
#read eigenvalue and calculate size of mo-block
if 'nsaos' in line:
lineCounter=1
readMo='true'
cols=line.split('=')
nsaos=int(cols[2])
str1=cols[1].split(' ')
eigenvalue=[float(str1[0].replace('D','e'))]
ElementsInLastLine=nsaos%4
if ElementsInLastLine != 0:
NumberOfLines=(nsaos/4+1)
else:
NumberOfLines=(nsaos/4)
continue
#read mo-coefficients
if readMo == 'true':
CoeffString=line
#put the mo-coefficients into molcoef1
if lineCounter < NumberOfLines:
for j in range(4):
molcoef.append( CoeffString[0+j*20:20+j*20] )
lineCounter+=1
elif lineCounter == NumberOfLines and not 'nsaos' in line:
#take care for non-complete lines
if ElementsInLastLine != 0:
for k in range(ElementsInLastLine):
molcoef.append( CoeffString[0+k*20:20+k*20] )
else:
for k in range(4):
molcoef.append( CoeffString[0+k*20:20+k*20] )
for j in range(nsaos_comp):
#generate field with zeros for the other molecule
molcoef_add.append( '0.00000000000000D-00' )
#now glue eigenvalue, coefficients and zeros together
if molIdx == 1:
eigenvalue.extend(molcoef)
eigenvalue.extend(molcoef_add)
else:
eigenvalue.extend(molcoef_add)
eigenvalue.extend(molcoef)
#store complete mo into the mo-vector list
coefs_full.append( eigenvalue )
#re-initialize for next pass
molcoef=[]
molcoef_add=[]
eigenvalue=[]
readMo='false'
molFile.close()
return coefs_full
def getOrbDim(path):
""" extract "scfconv" and "nsaos" from the orbital read """
molFile=open(path,'r')
for line in molFile:
#skip comment lines
if line.find('#') == 0:
continue
#stop reading when $end statement is reached
if '$end' in line:
print "we should never come here!"
break
#analyse mo-file header
if 'scfconv' in line:
cols=line.split('=')
str0=cols[1].split(' ')
scfconv=int(str0[0])
#read size of mo-block
if 'nsaos' in line:
lineCounter=1
readMo='true'
cols=line.split('=')
nsaos=int(cols[2])
break
molFile.close()
return scfconv,nsaos
def writeMo(scfconv,nsaos,coefs_full,name):
"""coefs_full is the mo vector field obtained by readOrb, name should be alpha,beta or mos"""
import sys
outFile=open(name,'w')
if name == "alpha":
outFile.write("$uhfmo_alpha scfconv=%d format(4d20.14)\n#generated by merge_mos.py\n#\n" % (scfconv))
if name == "beta":
outFile.write("$uhfmo_beta scfconv=%d format(4d20.14)\n#generated by merge_mos.py\n#\n" % (scfconv))
if name == "mos":
outFile.write("$scfmo scfconv=%d format(4d20.14)\n#generated by merge_mos.py\n#\n" % (scfconv))
ElementsInLastLineNew=nsaos % 4
for i in range(nsaos):
#loop over mos
outFile.write("%6d a eigenvalue=%19.14lfD+00 nsaos=%d\n" % (i+1,coefs_full[i][0],nsaos))
for j in range(nsaos/4):
#loop over lines
outFile.write("%s%s%s%s\n" % (coefs_full[i][1+j*4],coefs_full[i][2+j*4],coefs_full[i][3+j*4],coefs_full[i][4+j*4]))
if ElementsInLastLineNew > 0:
LastLine=[]
for k in range(ElementsInLastLineNew):
#loop for elements in last line
LastLine.append(coefs_full[i][k+1+(j+1)*4])
str3=''.join(LastLine)
outFile.write("%s\n" % (str3))
outFile.write('$end\n')
outFile.close()
``` |
{
"source": "JimBae/pythonTips",
"score": 3
} |
#### File: JimBae/pythonTips/03_generator.py
```python
import os
import sys
#--------------
# * iterable
# * iterator
# * iteration
# * generator
#--------------
# * iterable : ๋ฐ๋ณต๊ฐ๋ฅํ ๊ฐ์ฒด
# __iter__ or __getitem__ method๊ฐ ์ ์๋ ํ์ด์ฌ์ ๋ชจ๋ ๊ฐ์ฒด.
# * iterator : ๋ฐ๋ณต์
# next() or __next__() method๊ฐ ์ ์๋ ๋ชจ๋ ๊ฐ์ฒด.
# * iteration : ๋ฐ๋ณต
# ๋ฆฌ์คํธ ๊ฐ์ ์ ์ฅ์์์ ์์ดํ
์ ๊ฐ์ ธ์ค๋ ๊ณผ์ .
# * generator
# generator๋ iterator์ด์ง๋ง ๋จ ํ ๋ฒ๋ง ๋ฐ๋ณตํ๋ค.
# ๋ฉ๋ชจ๋ฆฌ์ ๋ชจ๋ ๊ฐ์ ์ ์ฅํ์ง ์๊ธฐ ๋๋ฌธ์ ๊ฐ์ ์ฌ์ฉํ ๋ ์ฆ์ ์์ฑํ๋ค.
# for loop๋ฅผ ์ฌ์ฉํ๊ฑฐ๋ ๋ฐ๋ณตํ๋ ํจ์๋ ๊ตฌ์กฐ์ ์์ฑ๋ ๊ฐ๋ค์ ์ ๋ฌํ์ฌ ๋ฐ๋ณต์ ์ฌ์ฉํ๋ค.
# ๋๋ถ๋ถ generator๋ ํจ์๋ก ๊ตฌํ๋๋ค. ๊ทธ๋ฌ๋ ๊ฐ์ returnํ์ง ์๊ณ , yield(์ฐ์ถ)๋ ๋ฟ์ด๋ค.
# generator ๋ชจ๋ ๊ฒฐ๊ณผ๋ฌผ๋ค์ ๋ฉ๋ชจ๋ฆฌ์ ์ ์ฅํ์ง ์์ผ๋ฉด์ ๋์์, ๋ง์ ์์ ๊ฒฐ๊ณผ ์
์ ๊ณ์ฐํด์ผํ ๋ ์ข๋ค.
# ํนํ ๋ฃจํ ๊ทธ ์์ฒด๋ฅผ ํฌํจํ ๊ณ์ฐ์ ํ ๋.
# fibonacci generator version
def fibonacci(n):
a = b = 1
for i in range(n):
yield a
a, b = b, a+b
for x in fibonacci(100):
print(x)
gen = fibonacci(100)
print(next(gen))
print(next(gen))
print(next(gen))
print(next(gen))
print(next(gen))
# str type์ iterable ์ด์ง๋ง iterator๋ ์๋๋ค.
# next()๋ ๋ชป์ฐ์ง๋ง, iter ๋ฅผ ์ฌ์ฉํ๋ฉด ๋๋ค.
myStr = "abcdefg"
myIter = iter(myStr)
print(next(myIter))
print(next(myIter))
print(next(myIter))
print(next(myIter))
print(next(myIter))
print(next(myIter))
print(next(myIter))
#print(next(myIter)) # error
``` |
{
"source": "jimbaker/clamped",
"score": 2
} |
#### File: clamped/clamped/__init__.py
```python
from six.moves import urllib
from java.io import Serializable
from java.util.concurrent import Callable
from clamp import clamp_base
BarBase = clamp_base("bar")
class BarClamp(BarBase, Callable, Serializable):
def __init__(self):
print "Being init-ed", self
def call(self):
print "Hello, world!"
return 42
``` |
{
"source": "Jimballoons/tempmailwrapper",
"score": 2
} |
#### File: Jimballoons/tempmailwrapper/setup.py
```python
from setuptools import setup, find_packages
import os
def read(filename):
return open(os.path.join(os.path.dirname(__file__), filename)).read()
LONG_DESCRIPTION = read('README.md')
setup(
name="tempmailwrapper",
version="0.0.7",
author="Jimballoons",
author_email="<EMAIL>",
url='https://github.com/Jimballoons/tempmailwrapper',
description="API wrapper for Temp Mail API.",
long_description_content_type="text/markdown",
long_description=LONG_DESCRIPTION,
packages=find_packages(),
install_requires=['requests',],
keywords=['python', 'tempmail', 'temporary', 'email', 'wrapper',],
download_url = 'https://github.com/Jimballoons/tempmailwrapper/archive/refs/tags/0.0.7.tar.gz',
classifiers=[
"Development Status :: 1 - Planning",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3",
"Operating System :: Unix",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
]
)
``` |
{
"source": "jimbarnesrtp/pf2",
"score": 3
} |
#### File: jimbarnesrtp/pf2/buildItems.py
```python
from bs4 import BeautifulSoup
import json
import datetime
from pf2helpers import Pf2Helpers
import os
import re
data_holder = {}
data_holder['name'] = 'Pathfinder 2.0 ItemList v2'
data_holder['date'] = datetime.date.today().strftime("%B %d, %Y")
class ItemBuilder():
item_keywords = ['name','source', 'rarity', 'category', 'subcategory']
blacklist = ['[document]','noscript','header','html','meta','head', 'input','script', 'h1','img','i','a','b','h3']
pf = Pf2Helpers()
weapons = []
def load_all_items(self):
all_data = []
cur_path = os.getcwd()
i = 1
while i < 5:
file_name = cur_path+"/itemcsv/RadGridExport-%s.csv" % i
raw_data = self.pf.load_csv(file_name)
all_data.extend(raw_data)
i += 1
self.load_weapons()
return all_data
def load_weapons(self):
cur_path = os.getcwd()
raw_data = self.pf.load_csv(cur_path+"/itemcsv/BaseWeapons.csv")
link_list = ['name','source', 'rarity', 'group']
self.weapons = self.normalize_data(raw_data, link_list)
#""Price","Damage","Hands","Range","Reload","Bulk","Group"
def normalize_data(self, data_list, link_list):
norm_data = []
for data in data_list:
keys = list(data.keys())
new_data = {}
for key in keys:
if key in link_list:
new_data[key] = self.pf.norm_link(data[key])
elif key == 'pfs':
new_data[key] = self.pf.norm_pfs(data[key])
elif key == 'level':
value = data[key]
if value == "โ":
new_data[key] = value
else:
new_data[key] = int(value)
elif key == 'traits':
new_data[key] = self.pf.norm_multi(data[key])
else:
new_data[key] = data[key]
new_data['link'] = self.pf.norm_url(data['name'])
norm_data.append(new_data)
return norm_data
def populate_data(self, data):
new_data = {}
new_data.update(data)
main = self.pf.load_html(new_data['link'])
if new_data['category'] == "Snares":
new_data = self.parse_others(new_data, main)
if new_data['category'] == "Vehicles":
new_data = self.parse_vehicles(new_data, main)
elif new_data['category'] != "Wands":
new_data = self.parse_regular(new_data, main)
#print('HTML', main)
if new_data['category'] == "Weapons":
new_data = self.parse_weapon_preload_stats(new_data)
return new_data
def parse_vehicles(self, new_data, main):
blacklist = ['[document]','noscript','header','html','meta','head', 'input','script', 'h1','img','h3', 'h2', 'h1']
children = self.pf.split_children_by_rule(main, "<h2")
while("" in children) :
children.remove("")
child_pos = 0
while child_pos < len(children):
if child_pos == 0:
stats = self.pf.parse_text_from_html(children[0], blacklist)
key_words = ['Price', 'Hands', 'Range', 'Category', 'Group', 'Traits', 'Damage', 'Bulk', 'Source', 'Favored Weapon', 'Usage', 'Space', 'Crew',
'Piloting Check', 'AC', 'Fort', 'Hardness', 'HP', 'Immunities', 'Speed', 'Collision','Passengers']
objectified = self.pf.objectify_attributes(stats, key_words)
new_data.update(objectified)
new_data.pop("raw", None)
child_pos += 1
return new_data
def parse_others(self, new_data, main):
blacklist = ['[document]','noscript','header','html','meta','head', 'input','script', 'h1','img','h3']
children = self.pf.split_children_by_rule(main, "<hr")
while("" in children) :
children.remove("")
child_pos = 0
while child_pos < len(children):
if child_pos == 1:
description = self.pf.parse_text_from_html(children[child_pos], blacklist)
new_data['description'] = description
child_pos += 1
return new_data
def parse_regular(self, new_data, main):
blacklist = ['[document]','noscript','header','html','meta','head', 'input','script', 'h1','img','h3']
children = self.pf.split_children_by_rule(main, "<h2")
while("" in children) :
children.remove("")
child_pos = 0
while child_pos < len(children):
if child_pos == 0:
pos = children[0].find("<hr")
stats= self.parse_details(children[0][0:pos])
if new_data['category'] == 'Weapons'and "Category" in stats:
weapon_cat = stats['Category']
stats.pop("Category", None)
stats['weaponCat'] = weapon_cat
for key in stats:
new_data[key.lower()] = stats[key]
new_data.pop("raw", None)
new_data['description'] = self.pf.parse_text_from_html(children[0][pos:], blacklist)
elif child_pos > 0:
if "Critical Specialization Effects" in children[child_pos]:
crit = self.parse_crit(children[child_pos])
new_data['critEffects'] = crit
elif "Traits" in children[child_pos]:
traits = self.parse_traits(children[child_pos])
new_data['traitDetails'] = traits
#print(attack)
child_pos += 1
return new_data
def parse_details(self, text):
blacklist = ['[document]','noscript','header','html','meta','head', 'input','script', 'h1','img','h3']
stripped_text = self.pf.parse_text_from_html(text, blacklist)
#Source Core Rulebook pg. 282 2.0 Price โ (Varies); Damage Varies; Bulk L Hands 1; Range 20 ft. Category Martial Group Bomb; Traits Varies, '
key_words = ['Price', 'Hands', 'Range', 'Category', 'Group', 'Traits', 'Damage', 'Bulk', 'Source', 'Favored Weapon', 'Usage', 'Space', 'Crew',
'Piloting Check', 'AC', 'Fort', 'Hardness', 'HP', 'Immunities', 'Speed', 'Collision','Passengers']
objectified = self.pf.objectify_attributes(stripped_text, key_words)
#objectified.pop("Source", None)
return objectified
def parse_crit(self, text):
blacklist = ['[document]','noscript','header','html','meta','head', 'input','script', 'h1','img','h3', 'h2']
found_list = re.finditer("<b>(.*?)</b>", text)
for match in found_list:
key = text[match.start():match.end()]
#print("Key:", key)
if "Source" not in key:
#print("Match:", text[match.start():])
stripped_text = self.pf.parse_text_from_html(text[match.start():], blacklist)
return stripped_text
def parse_traits(self, text):
#print("Traits:", text)
blacklist = ['[document]','noscript','header','html','meta','head', 'input','script', 'h1','img','h3', 'h2']
traits = []
found_list = re.finditer('<div class="trait-entry">(.*?)</div>', text)
for match in found_list:
trait = {}
#print(match.group())
key = re.findall('<b>(.*?)</b>', match.group())[0]
pos = match.group().find("</b>")
trait[key] = self.pf.parse_text_from_html(match.group()[pos:], blacklist)
traits.append(trait)
return traits
def parse_weapon_preload_stats(self, data):
key_list = ['type', 'range', 'reload']
#print("never called")
weapon_name = data['name']
for weapon in self.weapons:
if weapon['name'] == weapon_name:
for key in weapon.keys():
if key in key_list:
data[key] = weapon[key]
return data
def save_data(self, data):
data_holder['items'] = data
json_data = json.dumps(data_holder, indent=4)
# print(json_data)
filename = "json/items-pf2-v2.json"
f = open(filename, "w")
f.write(json_data)
f.close
return json_data
def main():
bf = ItemBuilder()
data = bf.load_all_items()
norm_data = bf.normalize_data(data, bf.item_keywords)
final_data = []
for data_point in norm_data:
final_data.append(bf.populate_data(data_point))
bf.save_data(final_data)
#bf.save_data(bf.build_monsters())
if __name__ == '__main__':
main()
```
#### File: jimbarnesrtp/pf2/testBuildFeats.py
```python
import unittest
from buildFeats import BuildFeatsV2
import os
class TestBuildFeatsV2(unittest.TestCase):
def setUp(self):
unittest.TestLoader.sortTestMethodsUsing = None
self.func = BuildFeatsV2()
def test(self):
self.assertTrue(True)
def test_load_Feats(self):
directory_path = os.getcwd()
self.assertGreater(len(self.func.pf.load_csv(directory_path+"/featscsv/RadGridExport-1.csv")), 0)
def test_norm_name(self):
self.assertEqual(self.func.pf.norm_link("<u><a href=\"Feats.aspx?ID=2516\">Aberration Kinship</a></u>"), "Aberration Kinship")
def test_norm_pfs(self):
self.assertEqual(self.func.pf.norm_pfs("<img alt=\"PFS Standard\" title=\"PFS Standard\" style=\"height:18px; padding:2px 10px 0px 2px\" src=\"Images\Icons\PFS_Standard.png\">"), "PFS Standard")
def test_norm_pfs_neg(self):
self.assertEqual(self.func.pf.norm_pfs("-"), "Excluded")
def test_norm_source(self):
self.assertEqual(self.func.pf.norm_link("<u><a href=\"Sources.aspx?ID=74\" title=\"Ancestry Guide\">Ancestry Guide</a></u>"), "Ancestry Guide")
def test_norm_rarity(self):
self.assertEqual(self.func.pf.norm_link("<u><a href=\"Traits.aspx?ID=28\">Common</a></u>"), "Common")
def test_norm_traits(self):
self.assertEqual(self.func.pf.norm_traits("<u><a href=\"Traits.aspx?ID=338\">Fleshwarp</a></u>"), ['Fleshwarp'])
def test_norm_multi_traits(self):
self.assertEqual(self.func.pf.norm_traits("<u><a href=\"Traits.aspx?ID=215\">Dhampir</a></u>, <u><a href=\"Traits.aspx?ID=317\">Lineage</a></u>"), ['Dhampir', 'Lineage'])
def test_normurl(self):
self.assertEqual(self.func.pf.norm_url("<u><a href=\"Feats.aspx?ID=2516\">Aberration Kinship</a></u>"), "https://2e.aonprd.com/Feats.aspx?ID=2516")
def test_norm_prereqs(self):
self.assertEqual(self.func.pf.norm_prereqs("trained in <u><a href=\"Skills.aspx?ID=8\">Lore</u></a>"), "trained in Lore")
def test_get_details(self):
#print(self.func.get_details("https://2e.aonprd.com/Feats.aspx?ID=779"))
self.assertIsNotNone(self.func.get_details("https://2e.aonprd.com/Feats.aspx?ID=779"))
def test_normalize_feat_data(self):
directory_path = os.getcwd()
self.assertGreater(len(self.func.normalize_feat_data(self.func.pf.load_csv(directory_path+"/featscsv/RadGridExport-1.csv"))), 0)
#def test_build_feats(self):
#self.assertGreater(len(self.func.build_feats()), 0)
#def test_save_feats(self):
##directory_path = os.getcwd()
#self.func.save_feats(self.func.build_feats())
##file = open(directory_path+"/feats-pf2-v3.json", "r")
##self.assertIsNotNone(file)
if __name__ == '__main__':
unittest.main()
```
#### File: jimbarnesrtp/pf2/testBuildItems.py
```python
import unittest
from buildItems import ItemBuilder
import os
class TestBuildItems(unittest.TestCase):
line_break = "++++++++++++++++++++++++++++++++++"
data = []
norm_data = []
def load_items(self) -> list:
if len(self.data) < 1:
self.data = self.func.load_all_items()
return self.data
def get_normalized_data(self) -> list:
if len(self.norm_data) < 1:
self.norm_data = self.func.normalize_data(self.load_items(), self.func.item_keywords)
#print("Length of Norm Data:", len(self.norm_data))
return self.norm_data
def setUp(self):
unittest.TestLoader.sortTestMethodsUsing = None
self.func = ItemBuilder()
def test(self):
self.assertTrue(True)
def test_load_items(self):
print(self.line_break)
directory_path = os.getcwd()
data = self.func.pf.load_csv(directory_path+"/itemcsv/RadGridExport-1.csv")
#print("Items:", data[1])
self.assertGreater(len(data), 0)
def test_load_all_items(self):
print(self.line_break)
#print("Item:", self.load_items()[4])
self.assertGreater(len(self.load_items()), 0)
def test_normalize_all_items464(self):
print(self.line_break)
data = self.get_normalized_data()
print("Norm_data:", data[464])
self.assertGreater(len(data), 0)
def test_populate_item464(self):
print(self.line_break)
item = self.func.populate_data(self.get_normalized_data()[464])
print("Populated Item:", item)
self.assertIsNotNone(item)
def test_normalize_all_items465(self):
print(self.line_break)
data = self.get_normalized_data()
print("Norm_data:", data[465])
self.assertGreater(len(data), 0)
def test_populate_item465(self):
print(self.line_break)
item = self.func.populate_data(self.get_normalized_data()[465])
print("Populated Item:", item)
self.assertIsNotNone(item)
def test_normalize_all_items1(self):
print(self.line_break)
data = self.get_normalized_data()
print("Norm_data:", data[1])
self.assertGreater(len(data), 0)
def test_populate_item1(self):
print(self.line_break)
item = self.func.populate_data(self.get_normalized_data()[1])
print("Populated Item:", item)
self.assertIsNotNone(item)
def test_normalize_all_items595(self):
print(self.line_break)
data = self.get_normalized_data()
print("Norm_data:", data[595])
self.assertGreater(len(data), 0)
def test_populate_item595(self):
print(self.line_break)
item = self.func.populate_data(self.get_normalized_data()[595])
print("Populated Item:", item)
self.assertIsNotNone(item)
if __name__ == '__main__':
unittest.main()
```
#### File: jimbarnesrtp/pf2/testBuildMonsters.py
```python
import unittest
from buildMonsters import BuildMonsters
import os
class TestBuildMonstersVs(unittest.TestCase):
def setUp(self):
unittest.TestLoader.sortTestMethodsUsing = None
self.func = BuildMonsters()
def test(self):
self.assertTrue(True)
# def test_load_monsters(self):
# directory_path = os.getcwd()
# data = self.func.pf.load_csv(directory_path+"/monstercsv/RadGridExport.csv")
# print("Monsters:", data[2])
# self.assertGreater(len(data), 0)
# def test_normalize_monsters(self):
# directory_path = os.getcwd()
# raw = self.func.pf.load_csv(directory_path+"/monstercsv/RadGridExport.csv")
# print("Monsters2:", self.func.normalize_monster_data(raw)[0])
# self.assertGreater(len(raw), 0)
# def test_get_monster_details(self):
# directory_path = os.getcwd()
# raw = self.func.normalize_monster_data(self.func.pf.load_csv(directory_path+"/monstercsv/RadGridExport.csv"))
# details = self.func.get_details(raw[0])
# #print("Details:", details)
# self.assertIsNotNone(details)
def test_parse_header(self):
text = '<h1 class="title"><NAME></h1> Aapophs possess greater strength and stronger venom than their zyss kin, but they lack zyss\' intelligence and innate magic. Unlike their selfish superiors, aapophs are communal and group together to hunt, wrestle, and sleep curled together in pits. Though they\'re looked down upon and insulted by zyss, most aapophs lack the higher brain functions to recognize when they\'re being insulted, much less plan or execute a rebellion. Aapophs often have unusual physical mutationsโhorns, vestigial tails, or spines protruding from their scalesโyet these variations have little impact on their overall combat prowessโ and combat prowess is the measure by which zyss judge them. <br/> <br/> <b><u><a href="Skills.aspx?ID=5&General=true">Recall Knowledge - Humanoid</a></u> (<u><a href="Skills.aspx?ID=14">Society</a></u>)</b> : DC 20 '
raw = self.func.parse_header(text)
print("Parsed:", raw)
self.assertIsNotNone(raw)
def test_parse_recall(self):
text = 'Recall Knowledge - Humanoid ( Society ) : DC 20 Recall Knowledge - Undead (Religion) : DC 22'
raw = self.func.parse_recall(text)
print("Recall:", raw)
self.assertGreater(len(raw), 0)
def test_parse_stats(self):
text = {'name': '<NAME>', 'family': 'Serpentfolk', 'source': 'Bestiary 2', 'rarity': 'Uncommon', 'size': 'Medium', 'type': 'Humanoid', 'traits': ['Humanoid', 'Mutant', 'Serpentfolk'], 'level': '3', 'spoilers?': 'โ', 'link': 'https://2e.aonprd.com/Monsters.aspx?ID=799'}
monster = self.func.load_monster(text)
print("Monster:", monster)
self.assertIsNotNone(monster)
# def test_parse_stats2(self):
# text = {'name': '<NAME>', 'family': 'Serpentfolk', 'source': 'Bestiary 2', 'rarity': 'Uncommon', 'size': 'Medium', 'type': 'Humanoid', 'traits': ['Humanoid', 'Mutant', 'Serpentfolk'], 'level': '3', 'spoilers?': 'โ', 'link': 'https://2e.aonprd.com/Monsters.aspx?ID=333'}
# monster = self.func.load_monster(text)
# print("Monster:", monster)
# self.assertIsNotNone(monster)
# def test_monster_details(self):
# directory_path = os.getcwd()
# raw = self.func.pf.load_csv(directory_path+"/monstercsv/RadGridExport.csv")
# def test_save_data(self):
# self.assertIsNotNone(self.func.save_data(self.func.build_monsters()))
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jimbelton/essexeld",
"score": 3
} |
#### File: essexeld/test/test_essexeld.py
```python
import httplib
import os
import subprocess
import time
import unittest
mainDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
logFile = mainDir + "/test/essexeld.log"
class TestEssexel(unittest.TestCase):
@classmethod
def setUpClass(cls):
global process
if os.path.exists(logFile):
os.remove(logFile)
process = subprocess.Popen([mainDir + "/target/essexeld"], stderr=open(logFile, "w"))
time.sleep(1)
@classmethod
def tearDownClass(cls):
process.terminate()
def testBadMethod(self):
connection = httplib.HTTPConnection("127.0.0.1", 8080)
connection.request("POST", "/index.html")
response = connection.getresponse()
self.assertEqual(response.status, httplib.METHOD_NOT_ALLOWED) # PUT /index.html
def testBadUrlPrefix(self):
connection = httplib.HTTPConnection("127.0.0.1", 8080)
connection.request("GET", "/index.html")
response = connection.getresponse()
self.assertEqual(response.status, httplib.BAD_REQUEST) # GET /index.html
def testUnblockedUrl(self):
connection = httplib.HTTPConnection("127.0.0.1", 8080)
connection.request("GET", "/urlinfo/1/google.com");
response = connection.getresponse()
self.assertEqual(response.status, httplib.NOT_FOUND) # GET /urlinfo/1/google.com
def testBlockedUrl(self):
connection = httplib.HTTPConnection("127.0.0.1", 8080)
connection.request("GET", "/urlinfo/1/Pornhub.com");
response = connection.getresponse()
self.assertEqual(response.status, httplib.OK) # GET /urlinfo/1/Pornhub.com
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jimbertools/cabot_alert_telegram",
"score": 2
} |
#### File: cabot_alert_telegram/cabot_alert_telegram/models.py
```python
from cabot.cabotapp.alert import AlertPlugin
from cabot.cabotapp.alert import AlertPluginUserData
from django.conf import settings
from django.db import models
from django.template import Context
from django.template import Template
from os import environ as env
import os
extraInfo = env.get('TELEGRAM_BOT_EXTRA')
telegram_template = """{{ {{ extraInfo }} {{ service.name }} {% if service.overall_status == service.PASSING_STATUS %}OK{% else %}{{ service.overall_status }}{% endif %}: {{ scheme }}://{{ host }}{% url 'service' pk=service.id %}. {% if service.overall_status != service.PASSING_STATUS %}Checks failing: {% for check in service.all_failing_checks %}{% if check.check_category == 'Jenkins check' %}{% if check.last_result.error %}{{ check.name }} ({{ check.last_result.error|safe }}) {{jenkins_api}}job/{{ check.name }}/{{ check.last_result.job_number }}/console{% else %}{{ check.name }} {{jenkins_api}}/job/{{ check.name }}/{{check.last_result.job_number}}/console{% endif %}{% else %}{{ check.name }}{% if check.last_result.error %} ({{ check.last_result.error|safe }}){% endif %}{% endif %}{% endfor %}{% endif %}{% if alert %}{% for alias in users %}@{{ alias }}{% endfor %}{% endif %}"""
# This provides the telegram alias for each user.
# Each object corresponds to a User
class TelegramAlert(AlertPlugin):
name = "Telegram"
author = "<NAME>"
def send_alert(self, service, users, duty_officers):
alert = True
telegram_aliases = []
users = list(users) + list(duty_officers)
telegram_aliases = [u.telegram_id for u in TelegramAlertUserData.objects.filter(user__user__in=users) if u.telegram_id]
if service.overall_status == service.WARNING_STATUS:
alert = False # Don't alert at all for WARNING
if service.overall_status == service.ERROR_STATUS:
if service.old_overall_status in (service.ERROR_STATUS, service.ERROR_STATUS):
alert = False # Don't alert repeatedly for ERROR
if service.overall_status == service.PASSING_STATUS:
if service.old_overall_status == service.WARNING_STATUS:
alert = False # Don't alert for recovery from WARNING status
c = Context({
'service': service,
'users': telegram_aliases,
'host': settings.WWW_HTTP_HOST,
'scheme': settings.WWW_SCHEME,
'alert': alert,
'jenkins_api': settings.JENKINS_API,
})
message = Template(telegram_template).render(c)
self._send_telegram_alert(message, service)
def _send_telegram_alert(self, message, service):
telegram_token = env.get('TELEGRAM_BOT_TOKEN')
chat_id = env.get('TELEGRAM_CHAT_ID')
tb = telebot.TeleBot(telegram_token)
tb.send_message(chat_id, message)
class TelegramAlertUserData(AlertPluginUserData):
name = "Telegram Plugin"
telegram_id = models.CharField(max_length=50, blank=True)
``` |
{
"source": "jimbertools/jumpscaleX_threebot",
"score": 3
} |
#### File: helloworld/actors/user.py
```python
from Jumpscale import j
class user(j.baseclasses.threebot_actor):
def _init(self, *args, **kwargs):
bcdb = j.data.bcdb.get("rafy")
self.model = bcdb.model_get(url="jumpscale.rafy.user")
def add(self, username="", schema_out=None, user_session=None):
"""
```in
username = (S)
```
"""
user = self.model.new()
user.username = username
user.save()
response = {"result": True, "error_code": "", "error_message": ""}
return j.data.serializers.json.dumps(response)
```
#### File: ThreeBotPackages/fruum/app.py
```python
import socketio
from Jumpscale import j
import os
from bottle import request, response, Bottle, abort, static_file, template
root = os.path.dirname(os.path.abspath(__file__))
app = Bottle()
app.debug = True
sio = socketio.Server(async_mode="gevent", cors_allowed_origins="*")
appws = socketio.WSGIApp(sio)
@sio.event
def connect(sid, environ):
print("connect ", sid)
@sio.on("fruum:auth")
def auth(sid, data):
return sio.emit(
"fruum:auth",
{
"user": {
"id": "123",
"anonymous": False,
"admin": True,
"blocked": False,
"username": "hamdy",
"displayname": "Hamdy",
"email": "",
"avatar": "",
"created": 0,
"last_login": 0,
"last_logout": 0,
"onboard": 0,
"karma": 0,
"logout_karma": 0,
"watch": [],
"tags": [],
"notifications": [],
"meta": {},
"last_visit": 1569166490774,
"server_now": 1569166490774,
}
},
)
@sio.on("fruum:view")
def view(sid, data):
sio.emit(
"fruum:view",
{
"id": "home",
"breadcrumb": [
{
"id": "home",
"breadcrumb": [],
"parent": None,
"parent_type": "",
"type": "category",
"created": 0,
"updated": 0,
"initials": "HOM",
"header": "Home",
"body": "",
"thumbnail": "",
"sticky": False,
"locked": False,
"visible": True,
"inappropriate": False,
"permission": 0,
"usage": 0,
"user_id": "",
"user_username": "",
"user_displayname": "",
"user_avatar": "",
"react_up": [],
"react_down": [],
"order": 0,
"children_count": 0,
"archived": False,
"archived_ts": 0,
"tags": [],
"attachments": [],
"meta": {},
}
],
"documents": [],
"online": {},
},
)
@sio.on("fruum:profile")
def profile(sid, data):
return "ok"
@sio.on("fruum:delete")
def delete(sid, data):
return "ok"
@sio.on("fruum:archive")
def archive(sid, data):
return "ok"
@sio.on("fruum:restore")
def restore(sid, data):
return "ok"
@sio.on("fruum:add")
def add(sid, data):
return "ok"
@sio.on("fruum:update")
def update(sid, data):
return "ok"
@sio.on("fruum:field")
def field(sid, data):
return "ok"
@sio.on("fruum:watch")
def watch(sid, data):
return "ok"
@sio.on("fruum:unwatch")
def unwatch(sid, data):
return "ok"
@sio.on("fruum:notifications")
def notifications(sid, data):
return "ok"
@sio.on("fruum:notify")
def notify(sid, data):
return "ok"
@sio.on("fruum:unnotify")
def unnotify(sid, data):
return "ok"
@sio.on("fruum:report")
def report(sid, data):
return "ok"
@sio.on("fruum:react")
def react(sid, data):
return "ok"
@sio.on("fruum:search")
def search(sid, data):
return "ok"
@sio.on("fruum:autocomplete")
def autocomplete(sid, data):
return "ok"
@sio.on("fruum:move")
def move(sid, data):
return "ok"
@sio.on("fruum:categories")
def categories(sid, data):
return "ok"
@sio.on("fruum:typing")
def typing(sid, data):
return "ok"
@sio.on("fruum:onboard")
def onboard(sid, data):
return "ok"
@sio.on("fruum:optimize")
def optimize(sid, data):
return "ok"
@sio.on("fruum:user:block")
def block(sid, data):
return "ok"
@sio.on("fruum:user:unblock")
def unblock(sid, data):
return "ok"
@sio.on("fruum:user:remove")
def user_remove(sid, data):
return "ok"
@sio.on("fruum:user:feed")
def feed(sid, data):
return "ok"
@sio.on("fruum:user:list")
def user_list(sid, data):
return "ok"
@sio.event
def disconnect(sid):
print("disconnect ", sid)
@app.route("/static/<filepath:path>")
def server_static(filepath):
return static_file(filepath, root=os.path.join(root, "static"))
def get_host():
parts = request.urlparts
return "{}://{}".format(parts.scheme, parts.netloc)
@app.route("/go/<app_name>")
def home(app_name):
res = template("%s/templates/app.js" % root, {"app_id": app_name})
response.headers["Accept-Ranges"] = "bytes"
response.headers["Access-Control-Allow-Headers"] = "Origin, X-Requested-With, Content-Type, Accept"
response.headers["Access-Control-Allow-Origin"] = "*"
response.headers["Cache-Control"] = "public, max-age=0"
response.headers["Content-Type"] = "application/javascript"
response.headers["Vary"] = "Accept-Encoding"
return res.encode()
@app.route("/_/get/js/<app_name>")
def js(app_name):
res = template("%s/templates/script.js" % root, {})
response.headers["Accept-Ranges"] = "bytes"
response.headers["Access-Control-Allow-Headers"] = "Origin, X-Requested-With, Content-Type, Accept"
response.headers["Access-Control-Allow-Origin"] = "*"
response.headers["Cache-Control"] = "public, max-age=0"
response.headers["Content-Type"] = "application/javascript; charset=utf-8"
response.headers["Vary"] = "Accept-Encoding"
return res.encode()
@app.route("/_/get/style/<app_name>")
def css(app_name):
res = static_file("/static/style.css", root=root)
res.headers["Accept-Ranges"] = "bytes"
res.headers["Access-Control-Allow-Headers"] = "Origin, X-Requested-With, Content-Type, Accept"
res.headers["Access-Control-Allow-Origin"] = "*"
res.headers["Cache-Control"] = "public, max-age=0"
res.headers["Content-Type"] = "text/css; charset=utf-8"
res.headers["Vary"] = "Accept-Encoding"
return res
@app.route("/_/get/html/<app_name>")
def html(app_name):
res = static_file("/static/app.html", root=root)
res.headers["Accept-Ranges"] = "bytes"
res.headers["Access-Control-Allow-Headers"] = "Origin, X-Requested-With, Content-Type, Accept"
res.headers["Access-Control-Allow-Origin"] = "*"
res.headers["Cache-Control"] = "public, max-age=0"
res.headers["Content-Type"] = "text/html; charset=utf-8"
res.headers["Vary"] = "Accept-Encoding"
return res
```
#### File: radicale/app/get.py
```python
import posixpath
from http import client
from urllib.parse import quote
from radicale import httputils, pathutils, storage, xmlutils
from radicale.log import logger
def propose_filename(collection):
"""Propose a filename for a collection."""
tag = collection.get_meta("tag")
if tag == "VADDRESSBOOK":
fallback_title = "Address book"
suffix = ".vcf"
elif tag == "VCALENDAR":
fallback_title = "Calendar"
suffix = ".ics"
else:
fallback_title = posixpath.basename(collection.path)
suffix = ""
title = collection.get_meta("D:displayname") or fallback_title
if title and not title.lower().endswith(suffix.lower()):
title += suffix
return title
class ApplicationGetMixin:
def _content_disposition_attachement(self, filename):
value = "attachement"
try:
encoded_filename = quote(filename, encoding=self.encoding)
except UnicodeEncodeError:
logger.warning("Failed to encode filename: %r", filename, exc_info=True)
encoded_filename = ""
if encoded_filename:
value += "; filename*=%s''%s" % (self.encoding, encoded_filename)
return value
def do_GET(self, environ, base_prefix, path, user):
"""Manage GET request."""
# Redirect to .web if the root URL is requested
if not pathutils.strip_path(path):
web_path = ".web"
if not environ.get("PATH_INFO"):
web_path = posixpath.join(posixpath.basename(base_prefix), web_path)
return (client.FOUND, {"Location": web_path, "Content-Type": "text/plain"}, "Redirected to %s" % web_path)
# Dispatch .web URL to web module
if path == "/.web" or path.startswith("/.web/"):
return self.Web.get(environ, base_prefix, path, user)
if not self.access(user, path, "r"):
return httputils.NOT_ALLOWED
with self.Collection.acquire_lock("r", user):
item = next(self.Collection.discover(path), None)
if not item:
return httputils.NOT_FOUND
if not self.access(user, path, "r", item):
return httputils.NOT_ALLOWED
if isinstance(item, storage.BaseCollection):
tag = item.get_meta("tag")
if not tag:
return httputils.DIRECTORY_LISTING
content_type = xmlutils.MIMETYPES[tag]
content_disposition = self._content_disposition_attachement(propose_filename(item))
else:
content_type = xmlutils.OBJECT_MIMETYPES[item.name]
content_disposition = ""
headers = {"Content-Type": content_type, "Last-Modified": item.last_modified, "ETag": item.etag}
if content_disposition:
headers["Content-Disposition"] = content_disposition
answer = item.serialize()
return client.OK, headers, answer
```
#### File: radicale/app/propfind.py
```python
import itertools
import posixpath
import socket
from http import client
from xml.etree import ElementTree as ET
from radicale import httputils, pathutils, rights, storage, xmlutils
from radicale.log import logger
from Jumpscale import j
def xml_propfind(base_prefix, path, xml_request, allowed_items, user):
"""Read and answer PROPFIND requests.
Read rfc4918-9.1 for info.
The collections parameter is a list of collections that are to be included
in the output.
"""
# A client may choose not to submit a request body. An empty PROPFIND
# request body MUST be treated as if it were an 'allprop' request.
top_tag = xml_request[0] if xml_request is not None else ET.Element(xmlutils.make_tag("D", "allprop"))
props = ()
allprop = False
propname = False
if top_tag.tag == xmlutils.make_tag("D", "allprop"):
allprop = True
elif top_tag.tag == xmlutils.make_tag("D", "propname"):
propname = True
elif top_tag.tag == xmlutils.make_tag("D", "prop"):
props = [prop.tag for prop in top_tag]
if xmlutils.make_tag("D", "current-user-principal") in props and not user:
# Ask for authentication
# Returning the DAV:unauthenticated pseudo-principal as specified in
# RFC 5397 doesn't seem to work with DAVdroid.
return client.FORBIDDEN, None
# Writing answer
multistatus = ET.Element(xmlutils.make_tag("D", "multistatus"))
for item, permission in allowed_items:
write = permission == "w"
response = xml_propfind_response(
base_prefix, path, item, props, user, write=write, allprop=allprop, propname=propname
)
if response:
multistatus.append(response)
return client.MULTI_STATUS, multistatus
def xml_propfind_response(base_prefix, path, item, props, user, write=False, propname=False, allprop=False):
"""Build and return a PROPFIND response."""
if propname and allprop or (props and (propname or allprop)):
raise j.exceptions.Value("Only use one of props, propname and allprops")
is_collection = isinstance(item, storage.BaseCollection)
if is_collection:
is_leaf = item.get_meta("tag") in ("VADDRESSBOOK", "VCALENDAR")
collection = item
else:
collection = item.collection
response = ET.Element(xmlutils.make_tag("D", "response"))
href = ET.Element(xmlutils.make_tag("D", "href"))
if is_collection:
# Some clients expect collections to end with /
uri = pathutils.unstrip_path(item.path, True)
else:
uri = pathutils.unstrip_path(posixpath.join(collection.path, item.href))
href.text = xmlutils.make_href(base_prefix, uri)
response.append(href)
propstat404 = ET.Element(xmlutils.make_tag("D", "propstat"))
propstat200 = ET.Element(xmlutils.make_tag("D", "propstat"))
response.append(propstat200)
prop200 = ET.Element(xmlutils.make_tag("D", "prop"))
propstat200.append(prop200)
prop404 = ET.Element(xmlutils.make_tag("D", "prop"))
propstat404.append(prop404)
if propname or allprop:
props = []
# Should list all properties that can be retrieved by the code below
props.append(xmlutils.make_tag("D", "principal-collection-set"))
props.append(xmlutils.make_tag("D", "current-user-principal"))
props.append(xmlutils.make_tag("D", "current-user-privilege-set"))
props.append(xmlutils.make_tag("D", "supported-report-set"))
props.append(xmlutils.make_tag("D", "resourcetype"))
props.append(xmlutils.make_tag("D", "owner"))
if is_collection and collection.is_principal:
props.append(xmlutils.make_tag("C", "calendar-user-address-set"))
props.append(xmlutils.make_tag("D", "principal-URL"))
props.append(xmlutils.make_tag("CR", "addressbook-home-set"))
props.append(xmlutils.make_tag("C", "calendar-home-set"))
if not is_collection or is_leaf:
props.append(xmlutils.make_tag("D", "getetag"))
props.append(xmlutils.make_tag("D", "getlastmodified"))
props.append(xmlutils.make_tag("D", "getcontenttype"))
props.append(xmlutils.make_tag("D", "getcontentlength"))
if is_collection:
if is_leaf:
props.append(xmlutils.make_tag("D", "displayname"))
props.append(xmlutils.make_tag("D", "sync-token"))
if collection.get_meta("tag") == "VCALENDAR":
props.append(xmlutils.make_tag("CS", "getctag"))
props.append(xmlutils.make_tag("C", "supported-calendar-component-set"))
meta = item.get_meta()
for tag in meta:
if tag == "tag":
continue
clark_tag = xmlutils.tag_from_human(tag)
if clark_tag not in props:
props.append(clark_tag)
if propname:
for tag in props:
prop200.append(ET.Element(tag))
props = ()
for tag in props:
element = ET.Element(tag)
is404 = False
if tag == xmlutils.make_tag("D", "getetag"):
if not is_collection or is_leaf:
element.text = item.etag
else:
is404 = True
elif tag == xmlutils.make_tag("D", "getlastmodified"):
if not is_collection or is_leaf:
element.text = item.last_modified
else:
is404 = True
elif tag == xmlutils.make_tag("D", "principal-collection-set"):
tag = ET.Element(xmlutils.make_tag("D", "href"))
tag.text = xmlutils.make_href(base_prefix, "/")
element.append(tag)
elif (
tag
in (
xmlutils.make_tag("C", "calendar-user-address-set"),
xmlutils.make_tag("D", "principal-URL"),
xmlutils.make_tag("CR", "addressbook-home-set"),
xmlutils.make_tag("C", "calendar-home-set"),
)
and collection.is_principal
and is_collection
):
tag = ET.Element(xmlutils.make_tag("D", "href"))
tag.text = xmlutils.make_href(base_prefix, path)
element.append(tag)
elif tag == xmlutils.make_tag("C", "supported-calendar-component-set"):
human_tag = xmlutils.tag_from_clark(tag)
if is_collection and is_leaf:
meta = item.get_meta(human_tag)
if meta:
components = meta.split(",")
else:
components = ("VTODO", "VEVENT", "VJOURNAL")
for component in components:
comp = ET.Element(xmlutils.make_tag("C", "comp"))
comp.set("name", component)
element.append(comp)
else:
is404 = True
elif tag == xmlutils.make_tag("D", "current-user-principal"):
if user:
tag = ET.Element(xmlutils.make_tag("D", "href"))
tag.text = xmlutils.make_href(base_prefix, "/%s/" % user)
element.append(tag)
else:
element.append(ET.Element(xmlutils.make_tag("D", "unauthenticated")))
elif tag == xmlutils.make_tag("D", "current-user-privilege-set"):
privileges = [("D", "read")]
if write:
privileges.append(("D", "all"))
privileges.append(("D", "write"))
privileges.append(("D", "write-properties"))
privileges.append(("D", "write-content"))
for ns, privilege_name in privileges:
privilege = ET.Element(xmlutils.make_tag("D", "privilege"))
privilege.append(ET.Element(xmlutils.make_tag(ns, privilege_name)))
element.append(privilege)
elif tag == xmlutils.make_tag("D", "supported-report-set"):
# These 3 reports are not implemented
reports = [
("D", "expand-property"),
("D", "principal-search-property-set"),
("D", "principal-property-search"),
]
if is_collection and is_leaf:
reports.append(("D", "sync-collection"))
if item.get_meta("tag") == "VADDRESSBOOK":
reports.append(("CR", "addressbook-multiget"))
reports.append(("CR", "addressbook-query"))
elif item.get_meta("tag") == "VCALENDAR":
reports.append(("C", "calendar-multiget"))
reports.append(("C", "calendar-query"))
for ns, report_name in reports:
supported = ET.Element(xmlutils.make_tag("D", "supported-report"))
report_tag = ET.Element(xmlutils.make_tag("D", "report"))
supported_report_tag = ET.Element(xmlutils.make_tag(ns, report_name))
report_tag.append(supported_report_tag)
supported.append(report_tag)
element.append(supported)
elif tag == xmlutils.make_tag("D", "getcontentlength"):
if not is_collection or is_leaf:
encoding = collection.configuration.get("encoding", "request")
element.text = str(len(item.serialize().encode(encoding)))
else:
is404 = True
elif tag == xmlutils.make_tag("D", "owner"):
# return empty elment, if no owner available (rfc3744-5.1)
if collection.owner:
tag = ET.Element(xmlutils.make_tag("D", "href"))
tag.text = xmlutils.make_href(base_prefix, "/%s/" % collection.owner)
element.append(tag)
elif is_collection:
if tag == xmlutils.make_tag("D", "getcontenttype"):
if is_leaf:
element.text = xmlutils.MIMETYPES[item.get_meta("tag")]
else:
is404 = True
elif tag == xmlutils.make_tag("D", "resourcetype"):
if item.is_principal:
tag = ET.Element(xmlutils.make_tag("D", "principal"))
element.append(tag)
if is_leaf:
if item.get_meta("tag") == "VADDRESSBOOK":
tag = ET.Element(xmlutils.make_tag("CR", "addressbook"))
element.append(tag)
elif item.get_meta("tag") == "VCALENDAR":
tag = ET.Element(xmlutils.make_tag("C", "calendar"))
element.append(tag)
tag = ET.Element(xmlutils.make_tag("D", "collection"))
element.append(tag)
elif tag == xmlutils.make_tag("RADICALE", "displayname"):
# Only for internal use by the web interface
displayname = item.get_meta("D:displayname")
if displayname is not None:
element.text = displayname
else:
is404 = True
elif tag == xmlutils.make_tag("D", "displayname"):
displayname = item.get_meta("D:displayname")
if not displayname and is_leaf:
displayname = item.path
if displayname is not None:
element.text = displayname
else:
is404 = True
elif tag == xmlutils.make_tag("CS", "getctag"):
if is_leaf:
element.text = item.etag
else:
is404 = True
elif tag == xmlutils.make_tag("D", "sync-token"):
if is_leaf:
element.text, _ = item.sync()
else:
is404 = True
else:
human_tag = xmlutils.tag_from_clark(tag)
meta = item.get_meta(human_tag)
if meta is not None:
element.text = meta
else:
is404 = True
# Not for collections
elif tag == xmlutils.make_tag("D", "getcontenttype"):
element.text = xmlutils.get_content_type(item)
elif tag == xmlutils.make_tag("D", "resourcetype"):
# resourcetype must be returned empty for non-collection elements
pass
else:
is404 = True
if is404:
prop404.append(element)
else:
prop200.append(element)
status200 = ET.Element(xmlutils.make_tag("D", "status"))
status200.text = xmlutils.make_response(200)
propstat200.append(status200)
status404 = ET.Element(xmlutils.make_tag("D", "status"))
status404.text = xmlutils.make_response(404)
propstat404.append(status404)
if len(prop404):
response.append(propstat404)
return response
class ApplicationPropfindMixin:
def _collect_allowed_items(self, items, user):
"""Get items from request that user is allowed to access."""
for item in items:
if isinstance(item, storage.BaseCollection):
path = pathutils.unstrip_path(item.path, True)
if item.get_meta("tag"):
permissions = self.Rights.authorized(user, path, "rw")
target = "collection with tag %r" % item.path
else:
permissions = self.Rights.authorized(user, path, "RW")
target = "collection %r" % item.path
else:
path = pathutils.unstrip_path(item.collection.path, True)
permissions = self.Rights.authorized(user, path, "rw")
target = "item %r from %r" % (item.href, item.collection.path)
if rights.intersect_permissions(permissions, "Ww"):
permission = "w"
status = "write"
elif rights.intersect_permissions(permissions, "Rr"):
permission = "r"
status = "read"
else:
permission = ""
status = "NO"
logger.debug("%s has %s access to %s", repr(user) if user else "anonymous user", status, target)
if permission:
yield item, permission
def do_PROPFIND(self, environ, base_prefix, path, user):
"""Manage PROPFIND request."""
if not self.access(user, path, "r"):
return httputils.NOT_ALLOWED
try:
xml_content = self.read_xml_content(environ)
except RuntimeError as e:
logger.warning("Bad PROPFIND request on %r: %s", path, e, exc_info=True)
return httputils.BAD_REQUEST
except socket.timeout:
logger.debug("client timed out", exc_info=True)
return httputils.REQUEST_TIMEOUT
with self.Collection.acquire_lock("r", user):
items = self.Collection.discover(path, environ.get("HTTP_DEPTH", "0"))
# take root item for rights checking
item = next(items, None)
if not item:
return httputils.NOT_FOUND
if not self.access(user, path, "r", item):
return httputils.NOT_ALLOWED
# put item back
items = itertools.chain([item], items)
allowed_items = self._collect_allowed_items(items, user)
headers = {"DAV": httputils.DAV_HEADERS, "Content-Type": "text/xml; charset=%s" % self.encoding}
status, xml_answer = xml_propfind(base_prefix, path, xml_content, allowed_items, user)
if status == client.FORBIDDEN:
return httputils.NOT_ALLOWED
return status, headers, self.write_xml_content(xml_answer)
```
#### File: radicale/auth/__init__.py
```python
from importlib import import_module
from radicale.log import logger
from Jumpscale import j
INTERNAL_TYPES = ("none", "remote_user", "http_x_remote_user", "htpasswd")
def load(configuration):
"""Load the authentication manager chosen in configuration."""
auth_type = configuration.get("auth", "type")
if auth_type in INTERNAL_TYPES:
module = "radicale.auth.%s" % auth_type
else:
module = auth_type
try:
class_ = import_module(module).Auth
except Exception as e:
raise j.exceptions.Base("Failed to load authentication module %r: %s" % (module, e)) from e
logger.info("Authentication type is %r", auth_type)
return class_(configuration)
class BaseAuth:
def __init__(self, configuration):
self.configuration = configuration
def get_external_login(self, environ):
"""Optionally provide the login and password externally.
``environ`` a dict with the WSGI environment
If ``()`` is returned, Radicale handles HTTP authentication.
Otherwise, returns a tuple ``(login, password)``. For anonymous users
``login`` must be ``""``.
"""
return ()
def login(self, login, password):
"""Check credentials and map login to internal user
``login`` the login name
``password`` the password
Returns the user name or ``""`` for invalid credentials.
"""
raise j.exceptions.NotImplemented
```
#### File: radicaleserver/radicale/config.py
```python
import math
import os
from collections import OrderedDict
from configparser import RawConfigParser
from radicale import auth, rights, storage, web
from radicale.log import logger
from Jumpscale import j
DEFAULT_CONFIG_PATH = os.pathsep.join(["?/etc/radicale/config", "?~/.config/radicale/config"])
def positive_int(value):
value = int(value)
if value < 0:
raise j.exceptions.Value("value is negative: %d" % value)
return value
def positive_float(value):
value = float(value)
if not math.isfinite(value):
raise j.exceptions.Value("value is infinite")
if math.isnan(value):
raise j.exceptions.Value("value is not a number")
if value < 0:
raise j.exceptions.Value("value is negative: %f" % value)
return value
def logging_level(value):
if value not in ("debug", "info", "warning", "error", "critical"):
raise j.exceptions.Value("unsupported level: %r" % value)
return value
def filepath(value):
if not value:
return ""
value = os.path.expanduser(value)
if os.name == "nt":
value = os.path.expandvars(value)
return os.path.abspath(value)
def list_of_ip_address(value):
def ip_address(value):
try:
address, port = value.strip().rsplit(":", 1)
return address.strip("[] "), int(port)
except ValueError:
raise j.exceptions.Value("malformed IP address: %r" % value)
return [ip_address(s.strip()) for s in value.split(",")]
def _convert_to_bool(value):
if value.lower() not in RawConfigParser.BOOLEAN_STATES:
raise j.exceptions.Value("Not a boolean: %r" % value)
return RawConfigParser.BOOLEAN_STATES[value.lower()]
# Default configuration
DEFAULT_CONFIG_SCHEMA = OrderedDict(
[
(
"server",
OrderedDict(
[
(
"hosts",
{
"value": "127.0.0.1:5232",
"help": "set server hostnames including ports",
"aliases": ["-H", "--hosts"],
"type": list_of_ip_address,
},
),
(
"max_connections",
{"value": "8", "help": "maximum number of parallel connections", "type": positive_int},
),
(
"max_content_length",
{"value": "100000000", "help": "maximum size of request body in bytes", "type": positive_int},
),
("timeout", {"value": "30", "help": "socket timeout", "type": positive_int}),
(
"ssl",
{
"value": "False",
"help": "use SSL connection",
"aliases": ["-s", "--ssl"],
"opposite": ["-S", "--no-ssl"],
"type": bool,
},
),
(
"certificate",
{
"value": "/sandbox/cfg/ssl/radicale.cert.pem",
"help": "set certificate file",
"aliases": ["-c", "--certificate"],
"type": filepath,
},
),
(
"key",
{
"value": "/sandbox/cfg/ssl/radicale.key.pem",
"help": "set private key file",
"aliases": ["-k", "--key"],
"type": filepath,
},
),
(
"certificate_authority",
{
"value": "",
"help": "set CA certificate for validating clients",
"aliases": ["--certificate-authority"],
"type": filepath,
},
),
("protocol", {"value": "PROTOCOL_TLSv1_2", "help": "SSL protocol used", "type": str}),
("ciphers", {"value": "", "help": "available ciphers", "type": str}),
(
"dns_lookup",
{"value": "True", "help": "use reverse DNS to resolve client address in logs", "type": bool},
),
]
),
),
(
"encoding",
OrderedDict(
[
("request", {"value": "utf-8", "help": "encoding for responding requests", "type": str}),
("stock", {"value": "utf-8", "help": "encoding for storing local collections", "type": str}),
]
),
),
(
"auth",
OrderedDict(
[
(
"type",
{
"value": "none",
"help": "authentication method",
"type": str,
"internal": auth.INTERNAL_TYPES,
},
),
(
"htpasswd_filename",
{"value": "/etc/radicale/users", "help": "htpasswd filename", "type": filepath},
),
("htpasswd_encryption", {"value": "bcrypt", "help": "htpasswd encryption method", "type": str}),
(
"realm",
{
"value": "Radicale - Password Required",
"help": "message displayed when a password is needed",
"type": str,
},
),
("delay", {"value": "1", "help": "incorrect authentication delay", "type": positive_float}),
]
),
),
(
"rights",
OrderedDict(
[
(
"type",
{
"value": "owner_only",
"help": "rights backend",
"type": str,
"internal": rights.INTERNAL_TYPES,
},
),
(
"file",
{
"value": "/etc/radicale/rights",
"help": "file for rights management from_file",
"type": filepath,
},
),
]
),
),
(
"storage",
OrderedDict(
[
(
"type",
{
"value": "multifilesystem",
"help": "storage backend",
"type": str,
"internal": storage.INTERNAL_TYPES,
},
),
(
"filesystem_folder",
{
"value": "/var/lib/radicale/collections",
"help": "path where collections are stored",
"type": filepath,
},
),
(
"max_sync_token_age",
{
"value": "2592000", # 30 days
"help": "delete sync token that are older",
"type": positive_int,
},
),
("hook", {"value": "", "help": "command that is run after changes to storage", "type": str}),
]
),
),
(
"web",
OrderedDict(
[
(
"type",
{
"value": "internal",
"help": "web interface backend",
"type": str,
"internal": web.INTERNAL_TYPES,
},
)
]
),
),
(
"logging",
OrderedDict(
[
("level", {"value": "warning", "help": "threshold for the logger", "type": logging_level}),
("mask_passwords", {"value": "True", "help": "mask passwords in logs", "type": bool}),
]
),
),
("headers", OrderedDict([("_allow_extra", True)])),
(
"internal",
OrderedDict(
[
("_internal", True),
(
"filesystem_fsync",
{"value": "True", "help": "sync all changes to filesystem during requests", "type": bool},
),
("internal_server", {"value": "False", "help": "the internal server is used", "type": bool}),
]
),
),
]
)
def parse_compound_paths(*compound_paths):
"""Parse a compound path and return the individual paths.
Paths in a compound path are joined by ``os.pathsep``. If a path starts
with ``?`` the return value ``IGNORE_IF_MISSING`` is set.
When multiple ``compound_paths`` are passed, the last argument that is
not ``None`` is used.
Returns a dict of the format ``[(PATH, IGNORE_IF_MISSING), ...]``
"""
compound_path = ""
for p in compound_paths:
if p is not None:
compound_path = p
paths = []
for path in compound_path.split(os.pathsep):
ignore_if_missing = path.startswith("?")
if ignore_if_missing:
path = path[1:]
path = filepath(path)
if path:
paths.append((path, ignore_if_missing))
return paths
def load(paths=()):
"""Load configuration from files.
``paths`` a list of the format ``[(PATH, IGNORE_IF_MISSING), ...]``.
"""
configuration = Configuration(DEFAULT_CONFIG_SCHEMA)
for path, ignore_if_missing in paths:
parser = RawConfigParser()
config_source = "config file %r" % path
try:
if not parser.read(path):
config = Configuration.SOURCE_MISSING
if not ignore_if_missing:
raise j.exceptions.Base("No such file: %r" % path)
else:
config = {s: {o: parser[s][o] for o in parser.options(s)} for s in parser.sections()}
except Exception as e:
raise j.exceptions.Base("Failed to load %s: %s" % (config_source, e)) from e
configuration.update(config, config_source, internal=False)
return configuration
class Configuration:
SOURCE_MISSING = {}
def __init__(self, schema):
"""Initialize configuration.
``schema`` a dict that describes the configuration format.
See ``DEFAULT_CONFIG_SCHEMA``.
"""
self._schema = schema
self._values = {}
self._configs = []
values = {}
for section in schema:
values[section] = {}
for option in schema[section]:
if option.startswith("_"):
continue
values[section][option] = schema[section][option]["value"]
self.update(values, "default config")
def update(self, config, source, internal=True):
"""Update the configuration.
``config`` a dict of the format {SECTION: {OPTION: VALUE, ...}, ...}.
Set to ``Configuration.SOURCE_MISSING`` to indicate a missing
configuration source for inspection.
``source`` a description of the configuration source
``internal`` allows updating "_internal" sections and skips the source
during inspection.
"""
new_values = {}
for section in config:
if section not in self._schema or not internal and self._schema[section].get("_internal", False):
raise j.exceptions.Base("Invalid section %r in %s" % (section, source))
new_values[section] = {}
if "_allow_extra" in self._schema[section]:
allow_extra_options = self._schema[section]["_allow_extra"]
elif "type" in self._schema[section]:
if "type" in config[section]:
plugin_type = config[section]["type"]
else:
plugin_type = self.get(section, "type")
allow_extra_options = plugin_type not in self._schema[section]["type"].get("internal", [])
else:
allow_extra_options = False
for option in config[section]:
if option in self._schema[section]:
type_ = self._schema[section][option]["type"]
elif allow_extra_options:
type_ = str
else:
raise j.exceptions.Base("Invalid option %r in section %r in " "%s" % (option, section, source))
raw_value = config[section][option]
try:
if type_ == bool:
raw_value = _convert_to_bool(raw_value)
new_values[section][option] = type_(raw_value)
except Exception as e:
raise j.exceptions.Base(
"Invalid %s value for option %r in section %r in %s: "
"%r" % (type_.__name__, option, section, source, raw_value)
) from e
self._configs.append((config, source, internal))
for section in new_values:
if section not in self._values:
self._values[section] = {}
for option in new_values[section]:
self._values[section][option] = new_values[section][option]
def get(self, section, option):
"""Get the value of ``option`` in ``section``."""
return self._values[section][option]
def get_raw(self, section, option):
"""Get the raw value of ``option`` in ``section``."""
fconfig = self._configs[0]
for config, _, _ in reversed(self._configs):
if section in config and option in config[section]:
fconfig = config
break
return fconfig[section][option]
def sections(self):
"""List all sections."""
return self._values.keys()
def options(self, section):
"""List all options in ``section``"""
return self._values[section].keys()
def copy(self, plugin_schema=None):
"""Create a copy of the configuration
``plugin_schema`` is a optional dict that contains additional options
for usage with a plugin. See ``DEFAULT_CONFIG_SCHEMA``.
"""
if plugin_schema is None:
schema = self._schema
skip = 1 # skip default config
else:
skip = 0
schema = self._schema.copy()
for section, options in plugin_schema.items():
if section not in schema or "type" not in schema[section] or "internal" not in schema[section]["type"]:
raise j.exceptions.Value("not a plugin section: %r" % section)
schema[section] = schema[section].copy()
schema[section]["type"] = schema[section]["type"].copy()
schema[section]["type"]["internal"] = [self.get(section, "type")]
for option, value in options.items():
if option in schema[section]:
raise j.exceptions.Value("option already exists in %r: %r" % (section, option))
schema[section][option] = value
copy = self.__class__(schema)
for config, source, allow_internal in self._configs[skip:]:
copy.update(config, source, allow_internal)
return copy
def inspect(self):
"""Inspect all external config sources and write problems to logger."""
for config, source, internal in self._configs:
if internal:
continue
if config is self.SOURCE_MISSING:
logger.info("Skipped missing %s", source)
else:
logger.info("Parsed %s", source)
```
#### File: radicaleserver/radicale/log.py
```python
import contextlib
import io
import logging
import multiprocessing
import os
import sys
import tempfile
import threading
from radicale import pathutils
from Jumpscale import j
try:
import systemd.journal
except ImportError:
systemd = None
LOGGER_NAME = "radicale"
LOGGER_FORMAT = "[%(ident)s] %(levelname)s: %(message)s"
logger = logging.getLogger(LOGGER_NAME)
class RemoveTracebackFilter(logging.Filter):
def filter(self, record):
record.exc_info = None
return True
removeTracebackFilter = RemoveTracebackFilter()
class IdentLogRecordFactory:
"""LogRecordFactory that adds ``ident`` attribute."""
def __init__(self, upstream_factory):
self.upstream_factory = upstream_factory
self.main_pid = os.getpid()
def __call__(self, *args, **kwargs):
record = self.upstream_factory(*args, **kwargs)
pid = os.getpid()
ident = "%x" % self.main_pid
if pid != self.main_pid:
ident += "%+x" % (pid - self.main_pid)
main_thread = threading.main_thread()
current_thread = threading.current_thread()
if current_thread.name and main_thread != current_thread:
ident += "/%s" % current_thread.name
record.ident = ident
return record
class RwLockWrapper:
def __init__(self):
self._file = tempfile.NamedTemporaryFile()
self._lock = pathutils.RwLock(self._file.name)
self._cm = None
def acquire(self, blocking=True):
assert self._cm is None
if not blocking:
raise j.exceptions.NotImplemented
cm = self._lock.acquire("w")
cm.__enter__()
self._cm = cm
def release(self):
assert self._cm is not None
self._cm.__exit__(None, None, None)
self._cm = None
class ThreadStreamsHandler(logging.Handler):
terminator = "\n"
def __init__(self, fallback_stream, fallback_handler):
super().__init__()
self._streams = {}
self.fallback_stream = fallback_stream
self.fallback_handler = fallback_handler
def createLock(self):
try:
self.lock = multiprocessing.Lock()
except Exception:
# HACK: Workaround for Android
self.lock = RwLockWrapper()
def setFormatter(self, form):
super().setFormatter(form)
self.fallback_handler.setFormatter(form)
def emit(self, record):
try:
stream = self._streams.get(threading.get_ident())
if stream is None:
self.fallback_handler.emit(record)
else:
msg = self.format(record)
stream.write(msg)
stream.write(self.terminator)
if hasattr(stream, "flush"):
stream.flush()
except Exception:
self.handleError(record)
@contextlib.contextmanager
def register_stream(self, stream):
if stream == self.fallback_stream:
yield
return
key = threading.get_ident()
self._streams[key] = stream
try:
yield
finally:
del self._streams[key]
def get_default_handler():
handler = logging.StreamHandler(sys.stderr)
# Detect systemd journal
with contextlib.suppress(ValueError, io.UnsupportedOperation):
journal_dev, journal_ino = map(int, os.environ.get("JOURNAL_STREAM", "").split(":"))
st = os.fstat(sys.stderr.fileno())
if systemd and st.st_dev == journal_dev and st.st_ino == journal_ino:
handler = systemd.journal.JournalHandler(SYSLOG_IDENTIFIER=LOGGER_NAME)
return handler
@contextlib.contextmanager
def register_stream(stream):
"""Register global errors stream for the current thread."""
yield
def setup():
"""Set global logging up."""
global register_stream
handler = ThreadStreamsHandler(sys.stderr, get_default_handler())
logging.basicConfig(format=LOGGER_FORMAT, handlers=[handler])
register_stream = handler.register_stream
log_record_factory = IdentLogRecordFactory(logging.getLogRecordFactory())
logging.setLogRecordFactory(log_record_factory)
set_level(logging.WARNING)
def set_level(level):
"""Set logging level for global logger."""
if isinstance(level, str):
level = getattr(logging, level.upper())
logger.setLevel(level)
if level == logging.DEBUG:
logger.removeFilter(removeTracebackFilter)
else:
logger.addFilter(removeTracebackFilter)
```
#### File: namemanager/actors/namemanager.py
```python
from Jumpscale import j
class namemanager(j.baseclasses.threebot_actor):
def _init(self, **kwargs):
pass
def tcpservice_register(self, name, domain, endpoint, user_session=None):
"""
Registers a tcpservice to be used by tcprouter in j.core.db
```in
name = (S) # service name
domain = (S) # (Server Name Indicator SNI) (e.g a.b.3bots.grid.tf)
endpoint = (S) # TLS endpoint 172.16.31.10:443 "ip:port" 3bot private wireguard ip
```
"""
j.tools.tf_gateway.tcpservice_register(name, domain, endpoint)
return True
def domain_register(self, name, domain, gateway_domain, user_session=None):
"""
Registers a domain in coredns (needs to be authoritative)
```in
name = (S) # 3bot subdomain name
domain = (S) # defaults to "3bots.grid.tf"
gateway_domain = "gateway.grid.tf" (S)
```
"""
j.tools.tf_gateway.domain_register_cname(name, domain, gateway_domain)
return True
```
#### File: threefold/phonebook/TFPhonebookFactory.py
```python
from Jumpscale import j
class TFPhonebookFactory(j.baseclasses.object, j.baseclasses.testtools):
__jslocation__ = "j.threebot.package.phonebook"
def client_get(self):
"""
j.threebot.package.phonebook.client_get()
:return:
"""
self.client = j.servers.threebot.local_start_default()
self.client.actors.package_manager.package_add(
"threebot_phonebook",
git_url="https://github.com/threefoldtech/jumpscaleX_threebot/tree/master/ThreeBotPackages/threefold/phonebook",
)
self.client.reload()
return self.client
def start(self):
self.client = self.client_get()
def test(self, name=""):
"""
kosmos -p 'j.threebot.package.phonebook.test()'
"""
self.start()
j.shell()
print(name)
return "OK"
```
#### File: threefold/tfgrid_token/TFTokenFactory.py
```python
from Jumpscale import j
class TFTokenFactory(j.baseclasses.threebot_factory):
__jslocation__ = "j.threebot.package.token"
_web = False
def start(self):
gedis_client = j.servers.threebot.local_start_default(web=True)
gedis_client.actors.package_manager.package_add(path=self._dirpath)
def client_get(self):
"""
j.threebot.package.token.client_get()
:return:
"""
j.servers.threebot.local_start_default(web=True)
self.client = j.servers.threebot.current.client
self.client.actors.package_manager.package_add(
git_url="https://github.com/threefoldtech/jumpscaleX_threebot/tree/master/ThreeBotPackages/threefold/tfgrid_token"
)
self.client.reload()
""" self.client.reload(namespace="token") """
return self.client
""" def _generate_dummy_data(self, bcdb, timeframe, price_from=0.06):
res = []
tframe = timeframe.lower()
if tframe == "year":
count = 5
elif tframe == "month":
count = 12
elif tframe == "day":
count = 31
elif tframe == "hour":
count = 24
else:
count = 0
for x in range(count):
price = bcdb.model_get(url="tfgrid.token.price.1")
t = price.new()
t.timeframe = timeframe
percent = random.uniform(0.000001, 99.99999)
percent_less = random.uniform(0.000001, 99.99999)
while percent_less > percent:
percent = random.uniform(0.000001, 99.99999)
percent_less = random.uniform(0.000001, 99.99999)
sign = random.uniform(-99, 99)
t.low = str((price_from - (price_from / 100) * percent)) + " USD"
t.high = str((price_from + (price_from / 100) * percent)) + " USD"
if sign > 0:
# bull opening < closing
t.opening = str((price_from - (price_from / 100) * percent_less)) + " USD"
t.closing = str((price_from + (price_from / 100) * percent_less)) + " USD"
else:
# bear opening > closing
t.closing = str((price_from - (price_from / 100) * percent_less)) + " USD"
t.opening = str((price_from + (price_from / 100) * percent_less)) + " USD"
if tframe == "year":
t.time = str(2014 + x) + "/01/01"
elif tframe == "month":
t.time = "2019/" + str(x + 1) + "/01"
elif tframe == "day":
t.time = "2019/10/" + str(x + 1)
elif tframe == "hour":
t.time = 1569888000 + x * 3600
res.append(t)
return res """
def test(self, name=""):
"""
kosmos 'j.threebot.package.token.test()'
"""
if "tf_grid_token" in [b.name for b in j.data.bcdb.instances]:
m = j.data.bcdb.get("tf_grid_token")
else:
m = j.data.bcdb.new("tf_grid_token")
m.models_add(path=self._dirpath + "/models")
gedis_cli = self.client_get()
cl = j.clients.redis.get(port=8901)
assert cl.execute_command("PING")
assert gedis_cli.actors.token.delete_all()
gedis_cli.actors.token.feed_dummy_data_prices("year", 2019, 10, 1, 0.06)
res = gedis_cli.actors.token.list()
assert len(res.prices) == 5
res = gedis_cli.actors.token.get_market()
print("****************************get_market*********res****:%s" % res)
assert res.max_supply == 100000000000
assert res.potential_revenue_per_token_usd > 0
res = gedis_cli.actors.token.get_capacity()
print("****************************get_capacity*********res****:%s" % res)
assert res.compute_units > 19000
assert res.cores > 0
res = gedis_cli.actors.token.find_prices("year")
assert len(res) == 5
res = gedis_cli.actors.token.find_prices("year", from_date="2018/02/02")
assert len(res) == 0
res = gedis_cli.actors.token.find_prices("year", from_date="2018/01/01")
assert len(res) == 1
res = gedis_cli.actors.token.find_prices("year", from_date="2016/02/02", to_date="2018/05/05")
assert len(res) == 2
print("****************************find*********res****:%s" % res)
gedis_cli.actors.token.feed_dummy_data_prices("month", 2019, 10, 1, 0.06)
assert len(gedis_cli.actors.token.find_prices("month")) == 12
gedis_cli.actors.token.feed_dummy_data_prices("day", 2019, 10, 1, 0.06)
assert len(gedis_cli.actors.token.find_prices("day")) == 31
gedis_cli.actors.token.feed_dummy_data_prices("hour", 2019, 10, 1, 0.06)
assert len(gedis_cli.actors.token.find_prices("hour")) == 24
res = gedis_cli.actors.token.find_prices("hour", from_date="2019/10/01 01:55", to_date="2019/10/01 05:42")
assert len(res) == 4
self._log_info("All TESTS DONE")
return "OK"
``` |
{
"source": "jim-billy/terraform-provider-site24x7",
"score": 2
} |
#### File: utilities/importer/site24x7_importer.py
```python
import json
import os, sys
import subprocess
import logging
import argparse
WorkingDirectory = os.getcwd()
# ProjectHome = WorkingDirectory.replace("utilities", "")
# TerraformConfigurationFile = ProjectHome + "main.tf"
EmptyConfigurationFile = WorkingDirectory + os.path.sep + "empty_configuration.tf"
ImportedConfigurationFile = WorkingDirectory + os.path.sep + "output"+ os.path.sep +"imported_configuration.tf"
ImportCommandsFile = WorkingDirectory + os.path.sep + "output"+ os.path.sep +"import_commands.sh"
MonitorsToImportFile = WorkingDirectory + os.path.sep + "monitors_to_import.json"
Site24x7TerraformResourceNameVsAttributeTypesFile = WorkingDirectory + os.path.sep + "conf" + os.path.sep + "resource_vs_attribute_types.json"
TerraformStateFile = WorkingDirectory + os.path.sep + "terraform.tfstate"
MonitorsToImport = None
Site24x7TerraformResourceNameVsAttributeTypes = None
Site24x7TerraformResourceVsMonitorType = {
"site24x7_website_monitor" : "URL",
"site24x7_ssl_monitor" : "SSL_CERT",
"site24x7_rest_api_monitor" : "RESTAPI",
"site24x7_server_monitor" : "SERVER",
}
ResourceNameVsAttributesJSONInState = {}
ResourceNameVsFullConfiguration = {}
Site24x7TerraformResourceName = None
ResourceType = None
TfState = None
S247Importer = None
CommandLineParser = None
class Site24x7Importer:
# Load monitors to import
# Populate empty configuration needed for import
def __init__(self, filePath):
self.file = filePath
self.monitors_to_import = None
# Mandatory input from command line
self.resource_type_in_site24x7 = Site24x7TerraformResourceVsMonitorType[Site24x7TerraformResourceName]
self.resource_name_vs_empty_configuration = {}
self.resource_name_vs_import_commands = {}
self.site24x7_resource_vs_attribute_types = None
self.load_monitors_to_import()
self.load_site24x7_resource_vs_attribute_types()
self.populate_empty_configuration()
# Load monitors to import
def load_monitors_to_import(self):
if not os.path.exists(self.file):
logging.info("Unable to find monitors to import file : "+self.file)
sys.exit(1)
try:
self.monitors_to_import = FileUtil.read_json(self.file)
except Exception as e:
logging.info("Error while loading monitors_to_import.json : "+str(e))
logging.info("Please provide the list of monitors to be imported (eg) [\"123\", \"456\", \"789\"] in monitors_to_import.json ")
logging.info("Monitors to import : "+str(self.monitors_to_import))
def load_site24x7_resource_vs_attribute_types(self):
if not os.path.exists(Site24x7TerraformResourceNameVsAttributeTypesFile):
logging.info("Unable to find resource_vs_attribute_types.json file : ")
sys.exit(1)
try:
self.site24x7_resource_vs_attribute_types = FileUtil.read_json(Site24x7TerraformResourceNameVsAttributeTypesFile)
except Exception as e:
logging.info("Error while loading resource_vs_attribute_types.json : "+str(e))
# logging.info("Monitors to import : "+str(self.site24x7_resource_vs_attribute_types))
# Populate empty configuration needed for import
def populate_empty_configuration(self):
for monitorID in self.monitors_to_import:
resourceName = ResourceType+"_"+monitorID
resourceStr = "resource \""+Site24x7TerraformResourceName+"\" \""+resourceName+ "\" {"
filebuffer = ["\n",resourceStr, "\n", "}","\n"]
logging.debug("Empty Configuration : "+ ''.join(filebuffer))
self.resource_name_vs_empty_configuration[resourceName] = ''.join(filebuffer)
# Import Command
import_command_list = ["terraform", "import", Site24x7TerraformResourceName+"."+resourceName, monitorID]
self.resource_name_vs_import_commands[resourceName] = import_command_list
def import_monitors(self):
terraform_conf_file_data = FileUtil.read(EmptyConfigurationFile)
for monitorID in self.monitors_to_import:
resourceName = self.resource_type_in_site24x7+"_"+monitorID
resource_name_vs_details_in_state = TfState.get_resource_name_vs_details()
if resourceName not in resource_name_vs_details_in_state:
logging.info("Importing the resource : "+resourceName)
terraform_conf_file_data
empty_conf_to_append = self.resource_name_vs_empty_configuration[resourceName]
# Check whether the empty configuration is already present in the file.
if empty_conf_to_append not in terraform_conf_file_data:
FileUtil.append(EmptyConfigurationFile, empty_conf_to_append)
# Execute the import command
Util.execute_command(self.resource_name_vs_import_commands[resourceName])
else:
logging.info("Resource : "+resourceName+" already imported")
# Parse the state file to populate resource_name_vs_attributes
TfState.parse()
def convert_attributes_from_state_file_to_configuration(self):
resource_name_vs_attributes = TfState.get_resource_name_vs_attributes()
print("resource_name_vs_attributes ============= ",resource_name_vs_attributes)
if not resource_name_vs_attributes:
logging.info("Failed to convert attributes from state file to configuration!! resource_name_vs_attributes info is empty in terraform state")
return
for resourceName in self.resource_name_vs_empty_configuration.keys():
configurationBuffer = []
resourceStr = "resource \""+Site24x7TerraformResourceName+"\" \""+resourceName+ "\" {"
configurationBuffer.append("\n")
configurationBuffer.append(resourceStr)
attributesMap = resource_name_vs_attributes[resourceName]
for attribute in attributesMap:
# logging.info(attribute+" : "+str(attributesMap[attribute]))
if attribute == "id":
continue
if attributesMap[attribute]:
formatted_attribute = self.get_formatted_attribute(attribute, attributesMap)
# logging.info("formatted_attribute : "+ str(formatted_attribute))
if formatted_attribute:
configurationBuffer.append(" \n ")
configurationBuffer.append(attribute)
configurationBuffer.append(" = ")
configurationBuffer.append(formatted_attribute)
configurationBuffer.append("\n}\n")
ResourceNameVsFullConfiguration[resourceName] = self.get_formatted_configuration(configurationBuffer)
logging.info("Configuration : "+ ResourceNameVsFullConfiguration[resourceName])
def get_formatted_configuration(self, configurationBuffer):
confStr = ''.join(map(str, configurationBuffer))
confStr = confStr.replace("\'","\"")
confStr = confStr.replace("True", "true")
confStr = confStr.replace("False","false")
# confStr = confStr.encode('ascii')
return confStr
def get_formatted_attribute(self, attribute, attributes_map):
to_return = None
attribute_type = self.get_attribute_type(attribute)
# logging.info("attribute_type : "+ str(attribute_type))
if attribute_type == "str":
to_return = "\""+attributes_map[attribute]+"\""
elif attribute_type == "list":
to_return = [str(i) for i in attributes_map[attribute]]
else:
to_return = attributes_map[attribute]
return to_return
def get_attribute_type(self, attribute):
attribute_name_vs_type = self.site24x7_resource_vs_attribute_types[Site24x7TerraformResourceName]
if attribute in attribute_name_vs_type:
return attribute_name_vs_type[attribute]
def write_imported_configuration(self):
config_str = ''
for resourceName in ResourceNameVsFullConfiguration.keys():
config = ResourceNameVsFullConfiguration[resourceName]
config_str = config_str + config + "\n"
FileUtil.write(ImportedConfigurationFile, config_str)
logging.info("Please check the imported configuration in "+ImportedConfigurationFile)
def write_import_commands(self):
import_commands_list = []
import_commands_list.append("#!/bin/bash")
for import_command in self.resource_name_vs_import_commands.values():
import_commands_list.append("\n")
import_commands_list.append(" ".join(import_command))
print("import_commands_list : ",import_commands_list)
FileUtil.write(ImportCommandsFile, import_commands_list)
logging.info("Please execute the import_commands.sh file for importing your monitors : "+ImportCommandsFile)
class TerraformState:
def __init__(self, filePath):
logging.info("Loading Terraform state information")
self.file = filePath
self.resource_name_vs_details = {}
self.resource_name_vs_attributes = {}
self.parse()
def get_resource_name_vs_details(self):
return self.resource_name_vs_details
def get_resource_name_vs_attributes(self):
return self.resource_name_vs_attributes
# Parses the terraform state file and populates resource_name_vs_attributes
def parse(self):
if not os.path.exists(self.file):
logging.info("Unable to find the Terraform state file : "+self.file)
return
with open(TerraformStateFile, 'r') as terraformStateFile:
stateFileJSON = json.load(terraformStateFile)
if "resources" in stateFileJSON:
resourcesList = stateFileJSON["resources"]
for resource in resourcesList:
if resource["mode"] == "managed":
resourceName = resource["name"]
self.resource_name_vs_details[resourceName] = resource
self.populate_resource_name_vs_attributes_json(resourceName, resource)
logging.info("Resources in Terraform State : "+str(self.resource_name_vs_details.keys()))
def populate_resource_name_vs_attributes_json(self, resourceName, resourceMap):
instancesList = resourceMap["instances"]
for instanceMap in instancesList:
if "attributes" in instanceMap:
attributesMap = instanceMap["attributes"]
self.resource_name_vs_attributes[resourceName] = attributesMap
# Utility for grabbing attribute types
def getAttribute_types(self):
dict_to_return = {}
for resourceName in self.resource_name_vs_attributes.keys():
attributesDict = {}
attributesMap = self.resource_name_vs_attributes[resourceName]
for attribute in attributesMap:
val = attributesMap[attribute]
typeOfVal = type(val).__name__
if typeOfVal == "NoneType":
typeOfVal = "str"
elif typeOfVal == "dict":
typeOfVal = "map"
attributesDict[attribute] = typeOfVal
dict_to_return[resourceName] = attributesDict
return dict_to_return
def write_attribute_types(self):
attributeTypes = self.getAttribute_types()
FileUtil.write_json(Site24x7TerraformResourceNameVsAttributeTypesFile, attributeTypes)
class FileUtil:
@staticmethod
def write_json(file_name, data):
with open(file_name, "w") as file_handle:
json_data = json.dumps(data)
file_handle.write(json_data)
@staticmethod
def append(file_name, data_to_append):
with open(file_name, 'a') as file_handle:
file_handle.writelines(data_to_append)
@staticmethod
def write(file_name, data_to_append):
with open(file_name, 'w') as file_handle:
file_handle.writelines(data_to_append)
@staticmethod
def read_json(file_name):
json_to_return = None
with open(file_name, 'r') as file_handle:
data=file_handle.read()
json_to_return = json.loads(data)
return json_to_return
@staticmethod
def read(file_name):
data_to_return = None
with open(file_name, 'r') as file_handle:
data_to_return = file_handle.read()
return data_to_return
class Util:
@staticmethod
def initialize_logging():
logging.basicConfig(level=logging.INFO)
logging.info("Logging Initialized : WorkingDirectory : " + WorkingDirectory)
# terraform import site24x7_website_monitor.url_123456000026467038 123456000025786003
@staticmethod
def execute_command(command_list):
process = subprocess.Popen(command_list,
stdout=subprocess.PIPE,
# cwd=ProjectHome,
universal_newlines=True)
while True:
output = process.stdout.readline()
logging.info(output.strip())
# Do something else
return_code = process.poll()
if return_code is not None:
logging.info('RETURN CODE : '+ str(return_code))
# Process has finished, read rest of the output
for output in process.stdout.readlines():
logging.info(output.strip())
break
@staticmethod
def parse_command_line_args():
global CommandLineParser, Site24x7TerraformResourceName, ResourceType
CommandLineParser = argparse.ArgumentParser(description='Optional command description')
CommandLineParser.add_argument('--resource', type=str, required=True,
help='Type of the Site24x7 terraform resource')
args = CommandLineParser.parse_args()
logging.info("Input argument resource : " + args.resource)
Site24x7TerraformResourceName = args.resource
ResourceType = Site24x7TerraformResourceVsMonitorType[Site24x7TerraformResourceName]
def init():
global TfState, S247Importer
Util.initialize_logging()
Util.parse_command_line_args()
S247Importer = Site24x7Importer(MonitorsToImportFile)
TfState = TerraformState(TerraformStateFile)
def main():
init()
# Invoke S247Importer.import_monitors() only after populating state information
S247Importer.import_monitors()
S247Importer.convert_attributes_from_state_file_to_configuration()
S247Importer.write_imported_configuration()
S247Importer.write_import_commands()
main()
``` |
{
"source": "Jimbly/SublimeTransposeWord",
"score": 3
} |
#### File: Jimbly/SublimeTransposeWord/transpose_word.py
```python
import sublime, sublime_plugin, re
class TransposeWordCommand(sublime_plugin.TextCommand):
def run(self, edit):
# TODO: Use the Sublime language definitions to determine which characters
# delineate value tokens better (although Javascript is missing $, so that
# might just make things worse for me)
r = re.compile('^[\\w\\$]+$')
view = self.view
end_sels = [];
for s in view.sel():
# do transpose
word1 = view.word(s)
if not r.match(view.substr(word1)):
word1 = view.word(word1.begin() - 1)
word2 = view.word(word1.end() + 1)
if not r.match(view.substr(word2)):
word2 = view.word(word2.end() + 1)
word1_text = view.substr(word1)
word2_text = view.substr(word2)
view.replace(edit, word2, word1_text)
view.replace(edit, word1, word2_text)
end_sels.append(sublime.Region(word2.end(), word2.end()))
view.sel().clear()
for s in end_sels:
view.sel().add(s)
class TransposeCharCommand(sublime_plugin.TextCommand):
def run(self, edit):
view = self.view
end_sels = [];
for s in view.sel():
# do transpose
if not s.begin() == s.end():
# Selection is more than one character, no character transpose
continue
right = view.substr(s.begin())
view.erase(edit, sublime.Region(s.begin(), s.begin() + 1));
view.insert(edit, s.begin()-1, right);
``` |
{
"source": "jimbo00000/meetup-attendance-graph",
"score": 3
} |
#### File: meetup-attendance-graph/data/get_meetup_attendance.py
```python
import json
import urllib
from operator import itemgetter
import time
from meetupapikey import getKey
"""
# The meetupapikey.py file should look like this:
def getKey():
return "PASTE_YOUR_KEY_HERE"
"""
def get_data(endpoint, params):
url = endpoint + urllib.urlencode(params) + "&offset=%s"
data = []
offset= 0
while True:
print url%offset
response = urllib.urlopen(url%offset)
s = unicode(response.read(), errors="ignore")
try:
results = json.loads(s)['results']
except KeyError:
print s
raise IOError("something went wrong...")
if len(results) == 0:
print "no more results returned"
break
data.extend(results)
offset += 1
return data
params = {
"key": getKey(),
"group_urlname": "Boston-Virtual-Reality",
"status":"past"
}
endpoint = 'https://api.meetup.com/2/events?'
with open('groups.json') as group_json:
group_data = json.load(group_json)
groups = group_data[0]["groups"]
for g in groups:
print(g)
params["group_urlname"] = g
print(params)
outfile = "meetup_history_" + g +".json"
print(outfile)
if False: # True for live API calls
time.sleep(5)
meetups = get_data(endpoint, params)
meetups.sort(key=itemgetter('time'))
json.dump(meetups, open(outfile,'w'))
``` |
{
"source": "jimbo00000/RiftSkeleton",
"score": 3
} |
#### File: RiftSkeleton/tools/hardcode_shaders.py
```python
from __future__ import print_function
import sys
import os
header = """/* GENERATED FILE - DO NOT EDIT!
* Created by hardcode_shaders.py.
*
*/
"""
def generateSourceFile():
"""
Output a hardcoded C++ source file with shaders as strings.
"""
shaderPath = "shaders/"
autogenDir = "autogen/"
sourceFileOut = autogenDir + "g_shaders.h"
# Write a small comment if no shaders directory.
if not os.path.isdir(shaderPath):
print("Directory", shaderPath, "does not exist.")
with open(sourceFileOut,'w') as outStream:
print("/* Directory", shaderPath, "does not exist. */", file=outStream,)
return
# Create autogen/ if it's not there.
if not os.path.isdir(autogenDir):
os.makedirs(autogenDir)
print("hardcode_shaders.py writing the following shaders to",autogenDir,":")
shaderList = os.listdir(shaderPath)
# filter out some extraneous results: directories, svn files...
shaderList = [s for s in shaderList if s != '.svn']
shaderList = [s for s in shaderList if not os.path.isdir(shaderPath + s)]
for shaderName in shaderList:
print(" hardcoding shader:", shaderName)
tab = " "
decl = "const char* "
newline = "\\n"
quote = "\""
with open(sourceFileOut,'w') as outStream:
print(header, file=outStream)
print("#include <map>", file=outStream)
#shaderList = os.listdir(shaderPath)
for shaderName in shaderList:
file = shaderPath + shaderName
lines = open(file).read().splitlines()
varname = shaderName.replace(".","_")
print("\n" + decl + varname + " = ", file=outStream)
for l in lines:
if l != "":
l = l.replace('"', '\\"')
print(tab + quote + l + newline + quote, file=outStream)
print(";", file=outStream)
mapvar = "g_shaderMap"
print("\n", file=outStream)
print("std::map<std::string, std::string> " + mapvar + ";", file=outStream)
print("\n", file=outStream)
print("void initShaderList() {", file=outStream)
for fname in shaderList:
varname = fname.replace(".","_")
print(tab + mapvar + "[\"" + fname + "\"] = " + varname + ";", file=outStream)
print("}", file=outStream)
#
# Main: enter here
#
def main(argv=None):
# TODO: create directory if it doesn't exist
generateSourceFile()
if __name__ == "__main__":
sys.exit(main())
``` |
{
"source": "jimbo1qaz/amktools",
"score": 2
} |
#### File: amktools/amktools/mmkparser.py
```python
import argparse
import copy
import heapq
import itertools
import math
import numbers
import os
import re
import sys
from abc import abstractmethod, ABC
from contextlib import contextmanager
from fractions import Fraction
from io import StringIO
from pathlib import Path
from typing import (
Dict,
List,
Union,
Pattern,
Tuple,
Callable,
Optional,
ClassVar,
Iterator,
TypeVar,
Iterable,
)
import dataclasses
import pygtrie
from dataclasses import dataclass, field
from more_itertools import peekable, split_before
from ruamel.yaml import YAML
# We cannot identify instrument macros.
# The only way to fix that would be to expand macros, which would both complicate the program and
# make the generated source less human-readable.
from amktools.util import ceildiv, coalesce
from amktools.utils.parsing import safe_eval
from amktools.utils.substring_trie import StringSlice
class MMKError(ValueError):
pass
def perr(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
yaml = YAML(typ="safe")
def remove_ext(path):
head = os.path.splitext(path)[0]
return head
from amktools.common import TUNING_PATH, WAVETABLE_PATH
TXT_SUFFIX = ".txt"
RETURN_ERR = 1
def main(args: List[str]) -> int:
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
argument_default=argparse.SUPPRESS,
description="Parse one or more MMK files to a single AddmusicK source file.",
epilog="""Examples:
`mmk_parser file.mmk` outputs to file.txt
`mmk_parser file.mmk infile2.mmk` outputs to file.txt
`mmk_parser file.mmk -o outfile.txt` outputs to outfile.txt""",
)
# fmt: off
parser.add_argument("files", help="Input files, will be concatenated", nargs="+")
parser.add_argument("-t", "--tuning",
help="Tuning file produced by wav2brr (defaults to {})".format(TUNING_PATH))
parser.add_argument("-w", "--wavetable",
help=f"Wavetable metadata produced by wavetable.to_brr (defaults to {WAVETABLE_PATH})")
parser.add_argument("-o", "--outpath", help="Output path (if omitted)")
# fmt: on
args = parser.parse_args(args)
# FILES
inpaths = args.files
first_path = inpaths[0]
mmk_dir = Path(first_path).parent
datas = []
for _inpath in inpaths:
with open(_inpath) as ifile:
datas.append(ifile.read())
datas.append("\n")
in_str = "\n".join(datas)
# TUNING
if "tuning" in args:
tuning_path = Path(args.tuning)
else:
tuning_path = mmk_dir / TUNING_PATH
try:
with open(tuning_path) as f:
tuning = yaml.load(f)
if type(tuning) != dict:
perr(
"invalid tuning file {}, must be YAML key-value map".format(
tuning_path.resolve()
)
)
return RETURN_ERR
except FileNotFoundError:
tuning = None
# WAVETABLE
wavetable_path = vars(args).get("wavetable", mmk_dir / WAVETABLE_PATH)
try:
wavetable_path = Path(wavetable_path)
wavetable = yaml.load(wavetable_path)
wavetable = {
k: WavetableMetadata(name=k, **meta) for k, meta in wavetable.items()
}
# type: Dict[str, WavetableMetadata]
except FileNotFoundError:
wavetable = None
# OUT PATH
if "outpath" in args:
outpath = args.outpath
else:
outpath = remove_ext(first_path) + TXT_SUFFIX
for _inpath in inpaths:
if Path(outpath).resolve() == Path(_inpath).resolve():
perr("Error: Output file {} will overwrite an input file!".format(outpath))
if ".txt" in _inpath.lower():
perr("Try renaming input files to .mmk")
return RETURN_ERR
# PARSE
parser = MMKParser(in_str, tuning, wavetable)
try:
outstr = parser.parse()
except MMKError as e:
if str(e):
perr("Error:", str(e))
return RETURN_ERR
with open(outpath, "w") as ofile:
ofile.write(outstr)
return 0
def parse_int_round(instr):
return int(parse_frac(instr))
def parse_int_hex(instr: str):
if instr.startswith("$"):
return int(instr[1:], 16)
else:
return int(instr, 0)
def try_int(s):
try:
return int(s, 10)
except ValueError:
return None
def parse_hex_only(in_str: str):
if in_str.startswith("$"):
return int(in_str[1:], 16)
else:
hex_value = int(in_str, 16)
int_value = try_int(in_str)
if int_value is not None and int_value != hex_value:
raise MMKError(
f"Ambiguous value {in_str} in hexadecimal-only field (try prefixing $)"
)
return hex_value
def parse_frac(infrac):
if type(infrac) == str:
slash_pos = infrac.find("/")
if slash_pos != -1:
num = infrac[:slash_pos]
den = infrac[slash_pos + 1 :]
return Fraction(num) / Fraction(den)
return Fraction(infrac)
def to_hex(in_frac):
in_frac = int(in_frac)
if not (-0x80 <= in_frac < 0x100):
raise ValueError(f"Passed invalid {type(in_frac)} {in_frac} to int2hex")
value = "$%02x" % (in_frac % 0x100)
return value
def parse_wave_range(sweep_str: str, nwave: int) -> range:
""" Parse range of wave sample indices. """
error = MMKError(f"wave range {sweep_str} invalid, must be [x,y] or [x,y)")
begin_end = sweep_str.split(",")
if len(begin_end) != 2:
raise error
begin_str, end_str = begin_end
# Either sample index, or fraction {1., .5} of nwave.
# TODO move bounds checking to SweepEvent
def _parse_ratio_or_int(s: str) -> int:
if "." in s:
return round(nwave * float(s))
else:
return parse_int_hex(s)
# [Begin interval
open_paren = begin_str[0]
if open_paren not in "[(":
raise error
begin_idx = _parse_ratio_or_int(begin_str[1:])
# End interval)
close_paren = end_str[-1]
if close_paren not in "])":
raise error
end_idx = _parse_ratio_or_int((end_str[:-1]))
# Python range() defaults to [x..y-1]. We can turn it into [x+1..y].
delta = int(math.copysign(1, end_idx - begin_idx))
if open_paren == "(":
begin_idx += delta
if close_paren == "]":
end_idx += delta
return range(begin_idx, end_idx, delta)
OCTAVE = 12
note_names = ["c", "c+", "d", "d+", "e", "f", "f+", "g", "g+", "a", "a+", "b"]
def format_note(midi: int):
octave = (midi // OCTAVE) - 1
note = note_names[midi % OCTAVE]
return f"o{octave}{note}"
note2pitch = {note: idx for idx, note in enumerate(note_names)}
accidental2pitch = {"+": 1, "-": -1}
TICKS_PER_BEAT = 0x30
TICKS_PER_MEASURE = 4 * TICKS_PER_BEAT
def vol_midi2smw(midi_vol):
midi_vol = parse_frac(midi_vol)
fractional = midi_vol / 127
smw_vol = fractional * 255
return round(smw_vol)
WHITESPACE = " \t\n\r\x0b\f,"
TERMINATORS = WHITESPACE + '"()[]'
def any_of(chars) -> Pattern:
"""Compile chars into wildcard regex pattern.
Match is 0 characters long and does not include char."""
chars = "".join(sorted(chars))
regex = "(?=[{}])".format(re.escape(chars))
return re.compile(regex)
def none_of(chars) -> Pattern:
"""Compile chars into negative-wildcard regex pattern.
Match is 0 characters long and does not include non-matched char."""
chars = "".join(sorted(chars))
regex = "(?=[^{}])".format(re.escape(chars))
return re.compile(regex)
@dataclass
class WavetableMetadata:
nwave: int = field(init=False)
nsamp: int
ntick: int
fps: float # Unused. %wave_sweep (constant rate) assumes fps = ticks/second.
wave_sub: int # Each wave is repeated `wave_sub` times.
env_sub: int # Each volume/frequency entry is repeated `env_sub` times.
root_pitch: int
pitches: List[float]
tuning: int = field(init=False)
tuning_str: str = field(init=False)
smp_idx: Optional[int] = None
silent: bool = False
def __post_init__(self):
nsamp = self.nsamp
if nsamp % 16:
raise MMKError(f"cannot load sample with {nsamp} samples != n*16")
self.tuning = nsamp // 16
self.tuning_str = "$%02x $00" % self.tuning
name: str = None
class Stream:
# Idea: Stream object with read methods, get_word, etc.
# And external parse_... functions.
SHEBANG = "%mmk0.1"
def __init__(self, in_str: str, defines: Dict[str, str], remove_shebang=False):
"""
Construct an input Stream.
:param in_str: string
:param defines: Passed by reference.
:param remove_shebang: Only True on first Stream created
(not on #instruments{...}).
"""
self.in_str = in_str
self.defines = defines
self.pos = 0
if remove_shebang:
if self.in_str.startswith(self.SHEBANG):
self.in_str = self.in_str[len(self.SHEBANG) :].lstrip()
def size(self):
return len(self.in_str)
# so I basically reimplemented the iterator protocol ad-hoc... except I can't use takewhile.
# Iterators don't support peek(). https://pypi.org/project/more-itertools/ supports peek() like
# my API.
def peek(self) -> str:
if self.is_eof():
# Return an "invalid Unicode character". Will it hide bugs?
return "\uFFFF"
return self.in_str[self.pos]
def peek_equals(self, keyword: str):
return self.in_str.startswith(keyword, self.pos)
def is_eof(self):
assert self.pos <= self.size()
return self.pos >= self.size() # TODO ==
def get_char(self) -> str:
out = self.in_str[self.pos]
self.pos += 1
return out
# **** Parsing ****
def get_until(self, regex: Union[Pattern, str], strict) -> str:
"""
Read until first regex match. Move pos after end of match (before lookahead).
:param regex: Regex pattern terminating region.
:param strict: If true, throws exception on failure. If false, returns in_str[pos:size()].
:return: Text until (not including) regex match.
"""
regex = re.compile(regex)
match = regex.search(self.in_str, self.pos)
if match:
end = match.end()
out_idx = match.start()
elif not strict:
end = self.size()
out_idx = end
else:
raise MMKError('Unterminated region, missing "{}"'.format(regex.pattern))
out = self.in_str[self.pos : out_idx]
self.pos = end
return out
def get_chars(self, num: int) -> str:
"""Gets the specified number of characters.
:param num: Number of characters to skip.
:return: String of characters
"""
new = min(self.pos + num, self.size())
skipped = self.in_str[self.pos : new]
self.pos = new
return skipped
def skip_chars(self, num, put: Callable = None):
skipped = self.get_chars(num)
if put:
put(skipped)
def skip_until(self, end: str, put: Callable):
# FIXME deprecated
in_str = self.in_str
self.skip_chars(1, put)
end_pos = in_str.find(end, self.pos)
if end_pos == -1:
end_pos = self.size()
# The delimiter is skipped as well.
# If end_pos == self.len(), skip_chars handles the OOB case by not reading the extra char.
self.skip_chars(end_pos - self.pos + 1, put)
return self.in_str[end_pos]
# High-level matching functions
# Returns (parse, whitespace = skip_spaces())
TERMINATORS_REGEX = any_of(TERMINATORS) # 0-character match
def get_word(self, terminators=None) -> Tuple[str, str]:
"""Gets single word from file. If word begins with %, replaces with definition (used for parameters).
Removes all leading spaces, but only trailing spaces up to the first \n.
That helps preserve formatting.
:param terminators: Custom set of characters to include
:return: (word, trailing whitespace)
"""
self.skip_spaces()
if terminators:
regex = re.compile(any_of(terminators))
else:
regex = self.TERMINATORS_REGEX
word = self.get_until(regex, strict=False)
if not word:
raise ValueError(
"Tried to get word where none exists (invalid command or missing arguments?)"
)
whitespace = self.get_spaces(exclude="\n")
if word.startswith("%"):
word = self.defines.get(word[1:], word) # dead code?
return word, whitespace
def get_phrase(self, n: int) -> List[str]:
""" Gets n words, plus trailing whitespace. """
if n <= 0:
raise ValueError("invalid n={} < 0".format(repr(n)))
words = []
whitespace = None
for i in range(n):
word, whitespace = self.get_word()
words.append(word)
words.append(whitespace)
return words
def get_spaces(self, exclude: Iterable[str] = "") -> str:
whitespace = set(WHITESPACE) - set(exclude)
not_whitespace = none_of(whitespace) # 0-character match
skipped = self.get_until(not_whitespace, strict=False)
return skipped
def skip_spaces(self, put: Callable = None, exclude: Iterable[str] = ""):
skipped = self.get_spaces(exclude)
if put:
put(skipped)
def get_line_spaces(self):
# TODO use function more
return self.get_spaces(exclude="\n")
def get_quoted(self):
"""
:return: contents of quotes
"""
if self.get_char() != '"':
raise MMKError('string does not start with "')
quoted = self.get_until(r'["]', strict=True)
whitespace = self.get_spaces(exclude="\n")
return quoted, whitespace
def get_line(self):
# TODO add "put" parameter
return self.get_until(any_of("\n"), strict=False)
# Returns parse (doesn't fetch trailing whitespace)
def get_int(self, maybe=False) -> Optional[int]:
buffer = ""
while self.peek().isdigit():
buffer += self.get_char()
if not buffer:
if maybe:
return None
else:
raise MMKError("Integer expected, but no digits to parse")
return parse_int_round(buffer)
def get_time(self) -> Tuple[Optional[int], str]:
"""Obtains time and fetches trailing whitespace.
Returns (nticks, whitespace)."""
dur = self._get_time()
whitespace = self.get_spaces(exclude="\n")
return dur, whitespace
def _get_time(self) -> Optional[int]:
"""Obtains time without getting trailing whitespace.
Returns nticks."""
first = self.peek()
if first == "=":
# =48
self.skip_chars(1)
return self.get_int()
is_numerator = first.isnumeric()
is_reciprocal = first == "/"
if not (is_numerator or is_reciprocal):
# no duration specified
return None
if is_numerator:
# 1, 1/48
num = self.get_int()
else:
# /48
num = 1
if self.peek() == "/":
# 1/48. /48
self.skip_chars(1)
den = self.get_int()
else:
# 1
den = 1
dur = Fraction(num, den) * TICKS_PER_BEAT
if int(dur) != dur:
raise MMKError(
f"Invalid duration {Fraction(num/den)}, must be multiple of 1/48"
)
return int(dur)
# Takes total duration and returns note duration.
NoteLenCalc = Callable[[int], int]
NoteLenFactory = Callable[[int], NoteLenCalc]
def release_early(dur: int) -> NoteLenCalc:
def _release(note_len: int) -> int:
return note_len - dur
return _release
def staccato(dur: int) -> NoteLenCalc:
# Disable staccato.
if dur == 0:
return release_early(0)
# Return dur-tick-long staccato.
def _staccato(note_len: int) -> int:
return min(note_len, dur)
return _staccato
@dataclass
class MMKState:
isvol: bool = False
ispan: bool = False
is_notelen: bool = False
panscale: Fraction = Fraction("5/64")
vmod: Fraction = Fraction(1)
# Note staccato and early release
default_note_len: int = None
# staccato: int = 0
note_len_calc: NoteLenCalc = release_early(0)
v: Optional[str] = None
y: str = "10"
keys: ClassVar = ["v", "y"]
# TODO move parsers from methods to functions
NOTES_WITH_DURATION = frozenset("abcdefg^rl")
RELEASE_CHAR = "~"
STACCATO_CHAR = "." # Dots have a different meaning from normal MML.
class MMKParser:
FIRST_INSTRUMENT = 30
def __init__(
self,
in_str: str,
tuning: Optional[Dict[str, str]],
wavetable: Optional[Dict[str, WavetableMetadata]] = None,
):
# Input parameters
self.tuning = tuning
self.wavetable = wavetable
# Parser state
self.orig_state = MMKState()
self.state = copy.copy(self.orig_state)
self.defines = dict(
viboff="$DF",
tremoff="$E5 $00 $00 $00",
slur="$F4 $01",
legato="$F4 $01",
light="$F4 $02",
restore_instr="$F4 $09",
) # type: Dict[str, str]
self.amk_keys = pygtrie.CharTrie()
# Wavetable parser state
self.curr_chan: int = None
self.smp_num = 0
self.instr_num = self.FIRST_INSTRUMENT
self.silent_idx: int = None
# File IO
self.stream = Stream(in_str, self.defines, remove_shebang=True)
self.out = StringIO()
# To print exception location
self._command = None
self._begin_pos = 0
# **** I/O manipulation, AKA "wish I wrote a proper lexer/parser/output" ****
@contextmanager
def set_input(self, in_str: str):
"""Temporarily replaces self.stream with new string.
Idea: Maybe parser functions should take a stream parameter?
"""
stream = self.stream
self.stream = Stream(in_str, self.defines)
try:
yield
finally:
self.stream = stream
@contextmanager
def end_at(self, end_regex: Pattern):
""" Temporarily replaces self.stream with truncated version. """
in_str = self.stream.get_until(end_regex, strict=False)
with self.set_input(in_str):
yield
if not self.stream.is_eof():
raise Exception(
"Bounded parsing error, parsing ended at {} but region ends at {}".format(
self.stream.pos, len(in_str)
)
)
def until_comment(self):
return self.end_at(any_of(";\n"))
@contextmanager
def capture(self) -> StringIO:
orig = self.out
self.out = StringIO()
with self.out:
try:
yield self.out
finally:
self.out = orig
def parse_str(self, in_str: str):
with self.set_input(in_str):
self.parse()
# Writing strings
def put(self, pstr):
self.out.write(pstr)
def put_hex(self, *nums):
not_first = False
for num in nums:
if not_first:
self.put(" ")
self.put(to_hex(num))
not_first = True
self.put(" ")
# Begin parsing functions!
def parse_amk_replace(self):
assert self.stream.get_char() == '"'
before = self.stream.get_until("=", strict=True)
after = self.stream.get_until('"', strict=True)
self.amk_keys[before.strip()] = True
self.put('"{}='.format(before))
self.parse_str(after)
self.put('"')
def subst_define(self, command_case, whitespace):
""" TODO Parse literal define, passthrough. """
if command_case in self.defines:
self.put(self.defines[command_case] + whitespace)
return True
return False
# Save/restore state
def parse_save(self):
assert self.state is not self.orig_state
self.orig_state = copy.copy(self.state)
assert self.state is not self.orig_state
def parse_restore(self):
assert self.state is not self.orig_state
for key in MMKState.keys:
old = getattr(self.orig_state, key)
new = getattr(self.state, key)
if old != new:
self.put(key + old)
self.state = copy.copy(self.orig_state)
assert self.state is not self.orig_state
# **** Numerator-fraction note lengths ****
WORD_TO_BOOL = dict(on=True, off=False, true=True, false=False)
def parse_toggle_notelen(self):
word, _ = self.stream.get_word()
try:
state = self.WORD_TO_BOOL[word]
except KeyError:
raise MMKError(
f"invalid %notelen value {word}, expected {self.WORD_TO_BOOL.keys()}"
)
self.state.is_notelen = state
def parse_note(self, allowed_notes=NOTES_WITH_DURATION):
""" Parse a fractional note, and write a tick count. """
note_str = self.stream.get_char()
if note_str not in allowed_notes:
allowed_str = "".join(sorted(allowed_notes))
raise MMKError(f"Invalid note name {note_str} not in {allowed_str}")
if self.stream.peek() in "+-":
note_str += self.stream.get_char()
# If no duration supplied, nticks is None.
nticks, whitespace = self.stream.get_time()
if note_str == "l":
if nticks is None:
raise MMKError("Cannot use lxx command without duration")
self.state.default_note_len = nticks
self.write_note(note_str, nticks)
self.put(whitespace)
return
# Compute note and release duration.
note_ticks, rest_ticks = self.get_release(
coalesce(nticks, self.state.default_note_len)
)
if nticks is None and note_ticks == self.state.default_note_len:
assert rest_ticks == 0
self.put(note_str + whitespace)
elif note_ticks + rest_ticks > 0:
if note_ticks:
self.write_note(note_str, note_ticks)
if rest_ticks:
self.write_note("r", rest_ticks)
self.put(whitespace)
def get_release(self, nticks):
note_ticks = self.state.note_len_calc(nticks)
name = self.state.note_len_calc.__name__
if note_ticks > nticks:
# Redundant staccatos should be filtered out by staccato().
raise MMKError(
f"Note length {name}: overlong "
f"{note_ticks}-ticks from {nticks}-tick note"
)
if nticks > 0 and note_ticks <= 0:
raise MMKError(
f"Note length {name}: missing "
f"{note_ticks}-ticks from {nticks}-tick note"
)
rest_ticks = nticks - note_ticks
return note_ticks, rest_ticks
def write_note(self, note_str: str, nticks: int):
time_str: str = self._format_time(nticks)
self.put(f"{note_str}{time_str}")
NOTES_ONLY = frozenset("abcdefg")
def parse_notelen(self, char: str, note_len: NoteLenFactory):
"""Release the next note early.
If two tildes, release all future notes early.
Single-note form cannot be followed by lxx.
TODO: Should ties be allowed?
"""
def read_release():
dur, _ = self.stream.get_time()
self.state.note_len_calc = note_len(dur)
assert self.stream.get_char() == char
if self.stream.peek() == char:
self.stream.get_char()
# Continue until cancelled.
read_release()
else:
# Release the next note.
old_state = copy.copy(self.state)
read_release()
self.parse_note(allowed_notes=self.NOTES_ONLY)
self.state = old_state
@staticmethod
def _format_time(ntick: Optional[int]) -> str:
""" Convert a tick duration to a MML "c4" or "c=48"-style duration. """
if ntick is None:
return ""
# If possible, convert to fraction of a measure (c4).
measure_frac = Fraction(ntick, TICKS_PER_MEASURE)
if measure_frac.numerator == 1:
return str(measure_frac.denominator)
# Otherwise return a tick duration (c=48).
return f"={ntick}"
# **** Transpose ****
def parse_transpose(self) -> None:
transpose_str, whitespace = self.stream.get_phrase(1)
transpose = int(transpose_str)
if transpose not in range(-0x80, 0x80):
raise MMKError("invalid transpose {}".format(transpose_str))
transpose_hex = to_hex(transpose & 0xFF)
self.put("$FA $02 {}".format(transpose_hex))
self.put(whitespace)
# **** volume ****
def calc_vol(self, in_vol):
vol = parse_frac(in_vol)
vol *= self.state.vmod
if self.state.isvol:
vol *= 2
return str(round(vol))
def parse_vol(self):
self.stream.skip_chars(1, self.put)
orig_vol = self.stream.get_int()
self.state.v = self.calc_vol(orig_vol)
self.put(self.state.v)
def parse_vol_hex(self, arg):
# This both returns the volume and modifies state.
# Time to throw away state?
assert self.state is not self.orig_state
new_vol = self.state.v = self.calc_vol(arg) # type: str
hex_vol = to_hex(new_vol)
return hex_vol
def parse_vbend(self):
# Takes a fraction of a quarter note as input.
# Converts to ticks.
time, _ = self.stream.get_time()
vol, whitespace = self.stream.get_phrase(1)
time_hex = to_hex(time)
vol_hex = self.parse_vol_hex(vol)
self.put("$E8 {} {}{}".format(time_hex, vol_hex, whitespace))
# **** pan ****
def calc_pan(self, orig_pan):
# Convert panning
if self.state.ispan:
zeroed_pan = parse_frac(orig_pan) - 64
scaled_pan = zeroed_pan * self.state.panscale
return str(round(scaled_pan + 10))
else:
return str(orig_pan)
def parse_pan(self):
self.stream.skip_chars(1, self.put)
orig_pan = self.stream.get_int()
self.state.y = self.calc_pan(orig_pan)
# Pass the command through.
self.put(self.state.y)
def parse_ybend(self):
duration, _ = self.stream.get_time()
pan, whitespace = self.stream.get_phrase(1)
duration_hex = to_hex(duration)
self.state.y = self.calc_pan(pan)
pan_hex = to_hex(self.state.y)
self.put("$DC {} {}{}".format(duration_hex, pan_hex, whitespace))
# **** meh ****
def parse_comment(self, put=True):
comment = self.stream.get_until(any_of("\n"), strict=False)
if put:
self.put(comment)
def skip_comment(self):
self.parse_comment(put=False)
# Multi-word parsing
def parse_pbend(self):
# Takes a fraction of a quarter note as input.
# Converts to ticks.
delay, _ = self.stream.get_time()
time, _ = self.stream.get_time()
note, whitespace = self.stream.get_phrase(1)
delay_hex = to_hex(delay)
time_hex = to_hex(time)
self.put("$DD {} {} {}{}".format(delay_hex, time_hex, note, whitespace))
# **** oscillatory effects ****
def parse_vib(self):
delay, _ = self.stream.get_time()
frequency, amplitude, whitespace = self.stream.get_phrase(2)
delay_hex = to_hex(delay)
freq_hex = to_hex(parse_frac(frequency))
self.put("$DE {} {} {}{}".format(delay_hex, freq_hex, amplitude, whitespace))
def parse_trem(self):
delay, _ = self.stream.get_time()
frequency, amplitude, whitespace = self.stream.get_phrase(2)
delay_hex = to_hex(delay)
freq_hex = to_hex(parse_frac(frequency))
self.put("$E5 {} {} {}{}".format(delay_hex, freq_hex, amplitude, whitespace))
# **** envelope effects ****
_GAINS = [
# curve, begin, max_rate
["direct", "set", 0x00],
["down", 0x80],
["exp", 0xA0],
["up", 0xC0],
["bent", 0xE0],
[None, 0x100],
]
for i in range(len(_GAINS) - 1):
_GAINS[i].append(_GAINS[i + 1][-1] - _GAINS[i][-1])
_GAINS = _GAINS[:-1]
def parse_gain(self, *, instr):
# Look for a matching GAIN value, ensure the input rate lies in-bounds,
# then write a hex command.
curve, rate, whitespace = self.stream.get_phrase(2)
if instr:
prefix = "$00 $00"
else:
prefix = "$FA $01"
raw_rate = rate
rate = parse_hex_only(rate)
for *curves, begin, max_rate in self._GAINS:
if curve in curves:
rate = self._index_check(curve, rate, max_rate)
self.put("%s %s%s" % (prefix, to_hex(begin + rate), whitespace))
return
perr("Invalid gain %s, options are:" % repr(curve))
for curve, _, max_rate in self._GAINS:
perr("%s (rate < %s)" % (curve, hex(max_rate)))
raise MMKError
def parse_adsr(self, instr: bool):
"""
Parse ADSR command.
attack: Attack speed (0-15)
decay: Decay speed (0-7)
sustain: Sustain volume (0-7)
release: Release speed (0-31)
:param instr: Whether ADSR command occurs in instrument definition (or MML command)
"""
attack, decay, sustain, release, whitespace = self.stream.get_phrase(4)
if sustain.startswith("full"):
sustain = "7"
attack = parse_hex_only(attack)
decay = parse_hex_only(decay)
sustain = parse_hex_only(sustain)
release = parse_hex_only(release)
attack = self._index_check("attack", attack, 0x10)
decay = self._index_check("decay", decay, 0x08)
sustain = self._index_check("sustain", sustain, 0x08)
release = self._index_check("release", release, 0x20)
a = 0x10 * decay + attack
b = 0x20 * sustain + release
if instr:
a += 0x80
fmt = "{} {} $A0"
else:
fmt = "$ED {} {}"
self.put(fmt.format(to_hex(a), to_hex(b)))
self.put(whitespace)
def parse_exp(self, instr: bool):
release, whitespace = self.stream.get_word()
with self.set_input(f"-1,-1,full," + release + whitespace):
self.parse_adsr(instr)
@staticmethod
def _index_check(caption, val, end):
if val < 0:
val += end
if val not in range(end):
raise MMKError(
"Invalid ADSR/gain {} {} (must be < {})".format(caption, val, end)
)
return val
# **** event handler callbacks ****
event_map = {
"clear": 0,
"keyon": -1,
"kon": -1,
"begin": -1,
"start": -1,
"after": 1, # after keyon
"before": 2, # before keyoff
"keyoff": 3,
"koff": 3,
"kof": 3,
"end": 3,
"now": 4,
}
def parse_callback(self):
expr = self.stream.get_until(any_of(")"), strict=True)
args = [word.strip() for word in expr.split(",")]
# if len(args) < 1:
# raise MMKError(
# f"Invalid callback (!{expr}), must have (!callback)[] or (!callback, event)")
if len(args) < 2:
# Callback definition (!n)
self.put(expr)
return
callback_num = args[0]
event = args[1]
event_num = self.event_map[event]
if event in ["after", "before"]:
time = args[2]
if len(args) != 3:
raise MMKError(
f"Invalid event binding (!{expr}), must have duration (measure/$x)"
)
self.put("{}, {}, {}".format(callback_num, event_num, time))
else:
self.put("{}, {}".format(callback_num, event_num))
# **** #instruments ****
def parse_instr(self):
"""Parse an instrument definition. Define a name for the instrument number.
Do not place %tune before %instr, it breaks named instruments.
"foo.brr"
- %foo=@30
Define a custom alias.
bar="foo.brr"
- %bar=@31
"""
with self.capture() as fout, self.until_comment():
input = self.stream.in_str
self.parse_instruments()
output = fout.getvalue()
# Process custom aliases
if "=" in input and input.index("=") < input.index('"'):
before_assignment = input.split("=")[0].strip()
instr_name = before_assignment.split()[-1]
if not instr_name:
raise MMKError("invalid alias definition, what is this? " + input)
else:
instr_name = None
self.put(output)
if instr_name is None:
stream = Stream(output, self.defines)
instr_path, whitespace = stream.get_quoted()
instr_path = Path(instr_path)
if instr_path.suffix != ".brr":
raise MMKError(f"Invalid instrument sample {instr_path} not .brr file")
instr_name = instr_path.stem
self.defines[instr_name] = f"@{self.instr_num}"
self.instr_num += 1
def parse_tune(self):
self.smp_num += 1
# "test.brr" $ad $sr $gain $tune $tune
self.stream.get_until(any_of('"'), strict=True)
brr, whitespace = self.stream.get_quoted()
if self.tuning is None:
perr("Cannot use %tune without a tuning file")
raise MMKError
tuning = self.tuning[brr]
self.put('"{}"{}'.format(brr, whitespace))
with self.end_at(any_of(";\n")):
self.parse_instruments() # adsr+gain
self.put(" {}".format(tuning))
# **** Wavetable sweeps ****
def parse_smp(self):
self.smp_num += 1
def parse_silent(self):
self.silent_idx = self.smp_num
self.smp_num += 1
def parse_group(self):
self.smp_num += self.stream.get_int()
def parse_wave_group(self, is_instruments: bool):
"""
#samples {
%wave_group "name" [ntick_playback] [silent|...]
#instruments {
%wave_group "0" %adsr -1,-1,-1,0
"""
name, whitespace = self.stream.get_quoted()
if name.endswith(".brr"):
raise MMKError(f'Try removing ".brr" from %wave_group {name}')
ntick_playback = None
if not is_instruments:
ntick_playback = self.stream.get_int(
maybe=True
) # Only load the first N ticks
if ntick_playback is not None:
whitespace = self.stream.get_spaces(exclude="\n")
meta = self.wavetable[name]
waves = self._get_waves_in_group(name, ntick_playback)
with self.capture() as output, self.until_comment():
if is_instruments:
self.parse_instruments()
self.put(" " + meta.tuning_str)
# *ugh* the instrument's tuning value is basically unused
else:
self.put(self.stream.get_line())
after = output.getvalue()
if not is_instruments: # If samples
args = after.split()
after = after[len(after.rstrip()) :] # Only keep whitespace
# print(name, args)
for arg in args:
if arg in ["silent"]:
setattr(meta, arg, True)
else:
raise MMKError(
f"Invalid #samples{{%wave_group}} argument {arg}"
)
comments = self.stream.get_line()
self.stream.skip_chars(1) # remove trailing newline
for wave in waves:
# eh, missing indentation. who cares.
self.put(f'"{wave}"{whitespace}{after}{comments}\n')
comments = ""
if not is_instruments: # FIXME
meta.smp_idx = self.smp_num
meta.nwave = len(waves)
# meta.nwave is not always equal to len(meta.pitches),
# since they may be subsampled differently.
self.smp_num += len(waves)
WAVE_GROUP_TEMPLATE = "{}-{:03}.brr"
def _get_waves_in_group(
self, name: str, ntick_playback: Optional[int]
) -> List[str]:
""" Returns a list of N BRR wave names. """
# if name in self.wave_groups:
# return self.wave_groups[name]
if self.wavetable is None:
raise MMKError("cannot load wavetables, missing wavetable.yaml")
meta = self.wavetable[name]
if ntick_playback is not None:
meta.ntick = min(meta.ntick, ntick_playback)
nwave = ceildiv(meta.ntick, meta.wave_sub)
wave_names = [self.WAVE_GROUP_TEMPLATE.format(name, i) for i in range(nwave)]
return wave_names
# Wave sweeps
_REG = 0xF6
def put_load_sample(self, smp_idx: int):
self.put_hex(self._REG, self._get_wave_reg(), smp_idx)
def _get_wave_reg(self):
return 0x10 * self.curr_chan + 0x04
# Echo and FIR
def parse_fir(self):
# params = []
*params, _whitespace = self.stream.get_phrase(8)
params = [parse_int_hex(param) for param in params]
# params.append(self.stream.get_int())
# _whitespace = self.stream.get_line_spaces()
self.put("$F5 ")
self.put_hex(*params)
# self.state:
# PAN, VOL, INSTR: str (Remove segments?)
# PANSCALE: Fraction (5/64)
# ISVOL, ISPAN: bool
def parse(self) -> str:
# For exception debug
try:
while not self.stream.is_eof():
# Yeah, simpler this way. But could hide bugs/inconsistencies.
self.stream.skip_spaces(self.put)
if self.stream.is_eof():
break
# Only whitespace left, means already printed, nothing more to do
self._begin_pos = self.stream.pos
amk_key = self.amk_keys.longest_prefix(
StringSlice(self.stream.in_str, self.stream.pos)
)
if amk_key:
self.stream.skip_chars(len(amk_key.key), self.put)
continue
char = self.stream.peek()
# noinspection PyUnreachableCode
if False:
# Do you realize exactly how many bugs I've created
# because I accidentally used `if` instead of `elif`?
pass
# Save AMK keys, to skip parsing them later.
elif char == '"':
self.parse_amk_replace()
# Parse the default AMK commands.
elif self.state.is_notelen and char in NOTES_WITH_DURATION:
self.parse_note()
elif self.state.is_notelen and char == RELEASE_CHAR:
self.parse_notelen(RELEASE_CHAR, release_early)
elif self.state.is_notelen and char == STACCATO_CHAR:
self.parse_notelen(STACCATO_CHAR, staccato)
elif char == "v":
self.parse_vol()
elif char == "y":
self.parse_pan()
elif char in "q$":
self.stream.skip_chars(3, self.put)
elif char == ";":
self.parse_comment()
elif char == "#": # instruments{}
self.stream.skip_chars(1, self.put)
self.stream.skip_spaces(self.put)
ret = False
def branch(keyword: str, method: Callable):
nonlocal ret
if self.stream.peek_equals(keyword):
self.stream.skip_until("{", self.put)
self.stream.skip_chars(1, self.put)
method()
ret = True
branch("samples", self.parse_samples)
branch("instruments", self.parse_instruments)
branch("spc", self.parse_spc)
if ret:
continue
if self.stream.peek().isnumeric():
chan = self.stream.get_char()
self.curr_chan = int(chan)
self.put(chan)
else:
self.put(self.stream.get_line())
elif char == "(":
self.stream.skip_chars(1, self.put)
if self.stream.peek() == "!":
self.stream.skip_chars(1, self.put)
self.parse_callback()
# Begin custom commands.
elif char == "%":
self.stream.skip_chars(1)
# NO ARGUMENTS
command_case, whitespace = self.stream.get_word()
command = command_case.lower()
self._command = command
if self.subst_define(command_case, whitespace):
continue
if command == "mmk0.1":
raise Exception("this shouldn't happen")
elif command == "define":
key = self.stream.get_word()[0]
value = self.stream.get_line()
self.defines[key] = value
elif command == "reset":
self.state = copy.copy(self.orig_state)
assert self.state is not self.orig_state
elif command == "isvol":
self.state.isvol = True
elif command == "ispan":
self.state.ispan = True
elif command == "notvol":
self.state.isvol = False
elif command == "notpan":
self.state.ispan = False
elif command == "notelen":
self.parse_toggle_notelen()
# N ARGUMENTS
elif command == "save":
self.parse_save()
elif command == "restore":
self.parse_restore()
elif command in ["t", "transpose"]:
self.parse_transpose()
elif command == "adsr":
self.parse_adsr(instr=False)
elif command == "exp":
self.parse_exp(instr=False)
elif command == "gain":
self.parse_gain(instr=False)
# Wavetable sweep
elif command == "wave_sweep":
parse_wave_sweep(self)
elif command == "sweep{":
parse_parametric_sweep(self, is_legato=True)
elif command in ["note_sweep{", "ns{"]:
parse_parametric_sweep(
self, is_legato=False, retrigger_sweep=True
)
# Echo and FIR
elif command == "fir":
self.parse_fir()
# Volume scaling
elif command == "vmod":
arg, _ = self.stream.get_word()
self.state.vmod = parse_frac(arg)
# Parameter slides
elif command in ["vbend", "vb"]:
self.parse_vbend()
elif command in ["ybend", "yb"]:
self.parse_ybend()
elif command in ["pbend", "pb"]:
self.parse_pbend()
# Vibrato/tremolo
elif command == "vib":
self.parse_vib()
elif command == "trem":
self.parse_trem()
# INVALID COMMAND
else:
raise MMKError("Invalid command " + command)
else:
self.stream.skip_chars(1, self.put)
self.stream.skip_spaces(self.put)
return self.out.getvalue().strip() + "\n"
except Exception:
# Seek at least 100 characters back
begin_pos = self._begin_pos
idx = begin_pos
for i in range(3):
idx = self.stream.in_str.rfind("\n", 0, idx)
if idx == -1:
break
if begin_pos - idx >= 100:
break
idx += 1
if self._command is None:
last = "None"
else:
last = "%" + self._command
perr()
perr("#### MMK parsing error ####")
perr(" Last command: " + last)
perr(" Context:")
perr(self.stream.in_str[idx:begin_pos] + "...\n")
raise # main() eats MMKError to avoid visual noise
# noinspection PyMethodParameters
def _brace_parser_factory(
mapping: Dict[str, Callable[["MMKParser"], None]]
) -> Callable:
def _parse(self: "MMKParser"):
"""
Parses #instruments{...} blocks. Eats trailing close-brace.
Also used for parsing quoted BRR filenames within #instruments.
"""
close = "}"
while not self.stream.is_eof():
# pos = self.pos
self.stream.skip_spaces(self.put, exclude=close)
self._begin_pos = self.stream.pos
# assert pos == self.pos
char = self.stream.peek()
if char in close:
self.stream.skip_chars(1, self.put) # {}, ""
self.stream.skip_spaces(self.put, exclude="\n")
return
if char == ";":
self.parse_comment()
elif char == "%":
self.stream.skip_chars(1)
command_case, whitespace = self.stream.get_word()
command = command_case.lower()
self._command = command
# **** Parse defines ****
if self.subst_define(command_case, whitespace):
pass
# **** Parse commands ****
elif command in mapping:
mapping[command](self)
else:
perr(mapping.keys())
raise MMKError("Invalid command " + command)
else:
self.stream.skip_chars(1, self.put)
self.stream.skip_spaces(self.put)
return _parse
# noinspection PyArgumentList
parse_instruments = _brace_parser_factory(
{
"instr": lambda self: self.parse_instr(),
"group": lambda self: self.parse_group(),
"tune": lambda self: self.parse_tune(),
"gain": lambda self: self.parse_gain(instr=True),
"adsr": lambda self: self.parse_adsr(instr=True),
"exp": lambda self: self.parse_exp(instr=True),
"wave_group": lambda self: self.parse_wave_group(is_instruments=True),
}
)
# noinspection PyArgumentList
parse_samples = _brace_parser_factory(
{
"smp": lambda self: self.parse_smp(),
"silent": lambda self: self.parse_silent(),
"wave_group": lambda self: self.parse_wave_group(is_instruments=False),
}
)
# noinspection PyArgumentList
parse_spc = _brace_parser_factory({})
#### %wave_sweep
T = TypeVar("T")
Timed = Tuple[int, T]
@dataclass
class SweepEvent:
sample_idx: Optional[int]
pitch: Optional[float]
def __bool__(self):
return any(x is not None for x in dataclasses.astuple(self))
SweepIter = Iterator[Tuple[int, SweepEvent]]
SweepList = List[Tuple[int, SweepEvent]]
class Sweepable(ABC):
@abstractmethod
def ntick(self, midi_pitch: Optional[int]) -> int:
...
@abstractmethod
def iter(self, midi_pitch: Optional[int]) -> SweepIter:
...
def sweep_chain(sweeps: List[Sweepable], midi_pitch: Optional[int]) -> SweepIter:
curr_ntick = 0
for sweep in sweeps:
for tick, event in sweep.iter(midi_pitch):
yield (curr_ntick + tick, event)
curr_ntick += sweep.ntick(midi_pitch)
class PitchedSweep(Sweepable):
""" Pitched sweep, with fixed wave/pitch rate. """
def __init__(self, meta: WavetableMetadata):
self.meta = meta
self._ntick = meta.ntick
def ntick(self, midi_pitch: Optional[int]) -> int:
return self._ntick
def iter(self, midi_pitch: Optional[int]) -> SweepIter:
""" Pitched sweep, plays at fixed pitch and rate. midi_pitch is ignored. """
meta = self.meta
def tick_range(skip):
return peekable(
itertools.chain(
range(0, meta.ntick, skip),
[math.inf],
)
)
wave_ticks = tick_range(meta.wave_sub)
pitch_ticks = tick_range(meta.env_sub)
tick = 0
while tick < meta.ntick:
event = SweepEvent(None, None)
# Wave envelope
if tick == wave_ticks.peek():
event.sample_idx = tick // meta.wave_sub
next(wave_ticks)
if tick == pitch_ticks.peek():
env_idx = tick // meta.env_sub
event.pitch = meta.pitches[env_idx]
next(pitch_ticks)
yield (tick, event)
tick = min(wave_ticks.peek(), pitch_ticks.peek())
# @dataclass
class INote:
ntick: int
@dataclass
class Note(INote):
"""A note used in %wave_sweep and %sweep{.
If `midi_pitch` is set, it overrides the sweep's pitch."""
ntick: int
midi_pitch: Optional[int]
@dataclass(frozen=True)
class _ToggleLegato(INote):
ntick: int = 0
ToggleLegato = _ToggleLegato()
del _ToggleLegato
NoteIter = Iterator[Tuple[int, INote]]
def note_chain(notes: List[INote]) -> NoteIter:
tick = 0
for note in notes:
yield (tick, note)
tick += note.ntick
DETUNE = 0xEE
LEGATO = "$F4 $01 "
def parse_wave_sweep(self: MMKParser):
""" Print a wavetable sweep at a fixed rate. """
name, _ = self.stream.get_quoted()
note_ntick = self.stream.get_int() # The sweep lasts for N ticks
meta = self.wavetable[name]
sweeps = [PitchedSweep(meta)]
notes = [ToggleLegato, Note(note_ntick, None)]
_put_sweep(self, sweeps, notes, meta, is_legato=True)
@dataclass
class SweepState:
is_legato: bool
is_detuned: bool
def _put_sweep(
self: MMKParser,
sweeps: List[Sweepable],
notes: List[INote],
meta: WavetableMetadata,
is_legato: bool,
retrigger_sweep: bool = False,
):
"""Write a wavetable sweep. Duration is determined by `notes`.
If notes[].midi_pitch exists, overrides sweeps[].pitch.
Used by %wave_sweep and %sweep{.
# Each note follows a pitch/wave event. It is printed (with the proper
# begin/end ticks) when the next pitch/wave event begins.
Workflow: If a note lasts from t0 to t1, the following occurs:
- end_note(t0)
- SweepEvent assigns sweep_pitch[t0]
- and/or Note assigns note_pitch[t0]
- end_note(t1) writes a note from t0 to t1. midi_pitch() == end of t0.
TODO:
- Add datatype for rests
- Add support for arbitrary events (volume, pan)
- Add support for retriggering wave envelope
"""
if getattr(meta, "nwave", None) is None:
raise MMKError(f'Did you forget to add #samples{{ %wave_group "{meta.name}" ?')
# Enable ADSR fade-in
self.parse_str("%adsr -3,-1,full,0 ")
# Load silent instrument with proper tuning
if self.silent_idx is None:
raise MMKError("cannot %wave_sweep without silent sample defined")
# @30 to zero out fine-tuning
self.put(self.defines["silent"])
# Set coarse tuning
self.put_hex(0xF3, self.silent_idx, meta.tuning)
state = SweepState(is_legato, is_detuned=False)
del is_legato
# Enable legato
if state.is_legato:
self.put(" ")
self.put(LEGATO) # Legato glues right+2, and unglues left+right.
if retrigger_sweep:
# Sweep once per actual note.
# Note: rests should not retrigger sweep, only continue or stop sweep.
for note_and_trailing in split_before(notes, _is_note_trigger):
_put_single_sweep(self, state, meta, sweeps, note_and_trailing)
else:
# Sweep continuously across all notes.
_put_single_sweep(self, state, meta, sweeps, notes)
# Cleanup: disable legato and detune.
if state.is_legato:
self.put(LEGATO) # Legato deactivates immediately.
if state.is_detuned:
self.put_hex(DETUNE, 0)
def _is_note_trigger(e: INote):
return isinstance(e, Note)
def _get_pitch(notes: List[INote]) -> Optional[int]:
for note in notes:
if isinstance(note, Note):
return note.midi_pitch
return None
def _put_single_sweep(
self: MMKParser,
state: SweepState,
meta: WavetableMetadata,
sweeps: List[Sweepable],
notes: List[INote],
):
"""Note: If retriggering is enabled, each note will call this function
with the same `sweep_list`, but different chunks of `notes`.
So precompute `sweep_list` for a (dubious) efficiency boost.
"""
midi_pitch = _get_pitch(notes)
# Generate iterator of all SweepEvents.
sweep_iter: SweepIter = sweep_chain(sweeps, midi_pitch)
sweep_ntick = sum(sweep.ntick(midi_pitch) for sweep in sweeps)
# Generate iterator of all Notes
note_iter = note_chain(notes)
note_ntick = sum(note.ntick for note in notes)
# Overall event iterator.
time_event_iter = heapq.merge(sweep_iter, note_iter, key=lambda tup: tup[0])
#### Write notes.
note_begin = 0
def end_note(note_end):
nonlocal note_begin
dtime = note_end - note_begin
if dtime > 0:
# AddmusicK will glitch out if you write a 1-tick note,
# followed by instrument/volume changes.
# TODO unit test
# If we write a 1-tick terminating note, write a rest instead.
if note_end == note_ntick and dtime == 1:
note_str = "r"
else:
note_str = note_name()
self.put(f"{note_str}={dtime} ")
note_begin = note_end
# Pitch tracking
note_pitch: int = None
is_new_note: bool = False
sweep_pitch: int = None
def note_name() -> str:
""" Return note, tie, or pitch from sweep. """
nonlocal is_new_note
if note_pitch is not None:
if is_new_note:
is_new_note = False
return format_note(note_pitch)
else:
return "^"
elif sweep_pitch is not None:
return format_note(sweep_pitch)
else:
raise ValueError("_put_sweep missing both note_pitch and sweep_pitch")
for time, event in time_event_iter: # type: int, Union[SweepEvent, INote]
if time >= note_ntick:
break
end_note(time)
if isinstance(event, SweepEvent):
# Wave envelope
if event.sample_idx is not None:
if not 0 <= event.sample_idx < meta.nwave:
raise MMKError(
f"Cannot sweep to sample {event.sample_idx}, len={meta.nwave}"
f" ({meta.name})"
)
self.put_load_sample(meta.smp_idx + event.sample_idx)
# Pitch envelope
if event.pitch is not None:
# Decompose sweep pitch into integer and detune.
sweep_pitch = int(event.pitch)
detune = event.pitch - sweep_pitch
detune_int = int(detune * 256)
state.is_detuned = detune_int != 0
# Write detune value immediately (begins at following note).
self.put_hex(DETUNE, detune_int)
elif isinstance(event, Note):
note_pitch = event.midi_pitch
is_new_note = True
elif event is ToggleLegato:
state.is_legato = not state.is_legato
self.put(" " + LEGATO)
else:
raise TypeError(f"invalid sweep event type={type(event)}, programmer error")
if meta.silent and sweep_ntick < note_ntick:
# Add GAIN fadeout.
end_note(sweep_ntick)
# GAIN starts when the following note starts.
self.parse_str("%gain down $18 ")
# End final note.
end_note(note_ntick)
### %sweep{
class LinearSweep(Sweepable):
def __init__(self, sweep: range, ntick: int, pitch_scaling: float, root_pitch: int):
self.sweep = sweep # Range of sweep
self.nsweep = len(sweep)
self._ntick_unscaled = ntick
self.pitch_scaling = pitch_scaling
self.root_pitch = root_pitch
def ntick(self, midi_pitch: Optional[int]) -> int:
""" ntick /= (f/f0) ** scaling """
if midi_pitch is None:
return self._ntick_unscaled
dpitch = midi_pitch - self.root_pitch
freq_ratio = 2 ** (dpitch / 12)
return round(self._ntick_unscaled / (freq_ratio ** self.pitch_scaling))
def iter(self, midi_pitch: Optional[int]) -> SweepIter:
"""Unpitched linear sweep, with fixed endpoints and duration.
Created using the `[a,b) time` notation.
"""
prev_tick = -1
ntick = self.ntick(midi_pitch)
for sweep_idx in range(self.nsweep):
tick = ceildiv(sweep_idx * ntick, self.nsweep)
if tick > prev_tick:
event = SweepEvent(self.sweep[sweep_idx], None)
yield (tick, event)
ONLY_WHITESPACE = " \t\n\r\x0b\f"
def parse_parametric_sweep(
self: MMKParser, is_legato: bool, retrigger_sweep: bool = False
):
"""Read parameters, and print a sweep with fixed duration.
%sweep{ "name"
[begin,end) beats # Increasing intervals have step 1.
[,end] beats # Decreasing intervals have step -1.
[begin,end) beats ~scaling
# Notes above/below av have speed multiplied by (f/f0) ** scaling.
TODO:
= # `env_sub` ticks per wave.
=/3 # `env_sub*3` ticks per wave.
=/3 ~scaling # Apply scaling to above.
:
# Duration in beats, separate from outside lxx events.
l/4
o4 c1 c/2 c c
# Loops are unrolled.
[c <b >]5
}
"""
stream = self.stream
stream.skip_spaces()
# Get name
name, _ = stream.get_quoted()
meta = self.wavetable[name]
# Get sweep, duration pairs
sweeps = []
stream.skip_spaces()
while stream.peek() != ":":
# TODO unit test comments in header, especially skipping spaces.
if stream.peek() == ";":
self.skip_comment()
stream.skip_spaces()
continue
sweep_str, _ = stream.get_word(ONLY_WHITESPACE)
if sweep_str == "=":
raise MMKError("sweep{ = at fixed rate is not supported yet")
# [x,y)
sweep_range = parse_wave_range(sweep_str, meta.nwave)
# Read sweep duration
if stream.peek() == ">":
# Rate: Fraction of all waves, per tick
stream.get_char()
word, _ = stream.get_word()
if "/" in word or "." in word:
nwave_percent_per_tick = parse_frac(word)
else:
nwave_percent_per_tick = int(word)
ntick = round(1 / nwave_percent_per_tick * len(sweep_range) / meta.nwave)
elif stream.peek() == "*":
# One wave every X ticks
stream.get_char()
duration_mul = parse_frac(stream.get_word()[0])
ntick = round(meta.nwave * duration_mul)
else:
ntick, _ = stream.get_time()
if ntick is None:
raise MMKError("failed to specify sweep time")
# Read speed scaling exponent.
if stream.peek() == "~":
stream.skip_chars(1)
pitch_scaling = safe_eval(stream.get_word()[0], numbers.Real)
else:
pitch_scaling = 0
sweeps.append(
LinearSweep(
sweep_range,
ntick,
pitch_scaling,
meta.root_pitch,
)
)
stream.skip_spaces()
# stream.skip_spaces(exclude=set(WHITESPACE) - set(ONLY_WHITESPACE))
# I can't remember why I ever marked colons as whitespace...
# It's not used in standard AMK MML.
# Using colon as a syntactic separator is creating a world of pain.
_separator = stream.get_char()
stream.skip_spaces()
# Get notes
notes = []
note_chars = set("abcdefg")
octave = None
default_ntick = None
while stream.peek() != "}":
c = stream.get_char()
# noinspection PyUnreachableCode
if False:
pass
elif c == ";":
self.skip_comment()
# octave
elif c == "o":
octave = int(stream.get_char())
elif c == ">":
octave += 1
elif c == "<":
octave -= 1
# Legato/slur toggle
elif c == "_":
notes.append(ToggleLegato)
# note length
elif c == "l":
default_ntick, _ = stream.get_time()
# notes
elif c in note_chars:
# Note pitch
# TODO note to midi function?
sharp_flat = stream.peek()
if sharp_flat in accidental2pitch:
stream.skip_chars(1)
dpitch = accidental2pitch[sharp_flat]
else:
dpitch = 0
if octave is None:
raise MMKError("You must assign octave within sweep{}")
midi_pitch = note2pitch[c] + dpitch + OCTAVE * (octave + 1)
# Note duration
ntick, _ = stream.get_time()
try:
ntick = coalesce(ntick, default_ntick)
except TypeError:
raise MMKError(
"You must assign lxx within sweep{} before entering untimed notes"
)
notes.append(Note(ntick, midi_pitch))
# ties
elif c == "^":
tie_ntick, _ = stream.get_time()
notes[-1].ntick += tie_ntick
stream.skip_spaces()
# Eat close }
stream.skip_chars(1)
_put_sweep(self, sweeps, notes, meta, is_legato, retrigger_sweep)
if __name__ == "__main__":
exit(main(sys.argv[1:]))
```
#### File: amktools/amktools/util.py
```python
from typing import TypeVar, Optional
def ceildiv(n: int, d: int) -> int:
return -(-n // d)
T = TypeVar("T")
def coalesce(*args: Optional[T]) -> T:
if len(args) == 0:
raise TypeError("coalesce expected >=1 argument, got 0")
for arg in args:
if arg is not None:
return arg
raise TypeError("coalesce() called with all None")
```
#### File: amktools/tests/test_substring_trie.py
```python
import pygtrie
import pytest
from amktools.utils.substring_trie import StringSlice
def test_stringslice():
x = StringSlice("aaaabbbbcccc", 8)
assert x[0] == "c"
assert x[-1] == "c"
assert x[:] == "cccc"
assert x[-4:] == "cccc"
with pytest.raises(TypeError):
x[None]
def test_substring_trie():
trie = pygtrie.CharTrie()
trie["cccc"] = True
input = StringSlice("aaaabbbbcccc", 8)
assert trie.longest_prefix(input).key == "cccc"
``` |
{
"source": "jimbo1qaz/loopplayer",
"score": 3
} |
#### File: jimbo1qaz/loopplayer/loopify.py
```python
import os
import math
def path_append(*it):
for el in it:
os.environ['PATH'] += os.pathsep + el
path_append(os.curdir, r'C:\Program Files (x86)\sox-14-4-2')
from plumbum import BG, FG, local as pb, cli
assert BG, FG
SOXI = pb['sox']['--i']
SOX = pb['sox']
FFMPEG = pb['ffmpeg']
def skip_spaces(in_str, index, character=None):
"""
@type in_str: str
@type index: int
"""
if index < 0:
raise ValueError('cannot exclude negative substring')
splitted = in_str.split(sep=character, maxsplit=index)
if index < len(splitted):
return splitted[index]
return ''
def keep_leading(in_str, index, character=None):
if index < 0: raise ValueError('cannot get negative substring')
if index == 0: return ''
num_items = len(in_str.split(character))
if index >= num_items: return in_str
# wtf pep8
return in_str.rsplit(sep=character, maxsplit=num_items - index)[0]
def expand(s, *args):
formatted = s % args
return [sub.replace('~', ' ') for sub in formatted.split()]
# **** loopify util
def get_len(wavname):
return int(SOXI['-s', wavname]())
def get_rate(wavname):
return int(SOXI['-r', wavname]())
def get_base_ext(filename):
rev = filename[::-1]
base = skip_spaces(rev, 1, '.')[::-1]
ext = keep_leading(rev, 1, '.')[::-1]
return base, ext
def smp(n):
return '=%ss' % n
class Looper:
def get_loop_overlap(self, sample_rate):
# if LOOP_SECONDS:
# return self.sample_rate * LOOP_SECONDS
# else:
# return LOOP_SAMPLES
return sample_rate * self.LOOP_SECONDS
def get_loop_data(self):
""" Initializes and returns sample_rate, loopStart, and loopEnd. """
# --LoooopLoooop
# a b
# start end
sample_rate = get_rate(self.x1name)
a = get_len(self.x1name)
b = get_len(self.x2name)
loop_len = b - a
overlap = self.get_loop_overlap(sample_rate)
loopStart = a - loop_len + overlap
loopEnd = a + overlap
return [sample_rate, loopStart, loopEnd]
def generate_out_base(self, loopdata):
return '.'.join([self.title] + [str(s) for s in loopdata])
def generate_outname(self, ext):
return self.out_base + ext
def __init__(self, x1name, x2name, title, padding):
self.x1name = x1name
self.x2name = x2name
self.title = title
self.LOOP_SECONDS = padding
# self.LOOP_SAMPLES = 0
loopdata = self.get_loop_data()
[self.sample_rate, self.loopStart, self.loopEnd] = loopdata
self.loop_len = self.loopEnd - self.loopStart
# We want to loop the original (x2) file.
# Out base = "title.rate.begin.end" plus whatever file format.
# Out wav = "title.rate.begin.end.wav".
self.out_base = self.generate_out_base(loopdata)
self.wav = self.generate_outname('.wav')
def loopify(self, compress=True):
samples = self.loopEnd
orig = self.x2name
# BUG: Firefox currently doesn't support 24-bit WAV.
# https://bugzilla.mozilla.org/show_bug.cgi?id=864780
# TODO: Firefox bug is resolved. Should we use 24?
args = ['trim', '0s', smp(samples)]
SOX[orig, '-b', '16', self.wav][args] & FG
# Convert to Ogg.
# BUG: Chrome currently doesn't support WebAudio Opus.
# https://bugs.chromium.org/p/chromium/issues/detail?id=482934
# Oh, and sox also doesn't support opus.
if compress:
ogg = self.generate_outname('.ogg')
logg = self.generate_outname('.logg')
# Ogg Vorbis VBR
# -1=bad, 10=good, default=3 (112kbps)
# we use 6 ~= 192kbps
SOX[orig, '-C', '6',
'--add-comment', 'LOOP_START=%s' % self.loopStart,
'--add-comment', 'LOOP_END=%s' % self.loopEnd,
ogg][args] & FG
os.rename(ogg, logg)
def extend(self, total_seconds, fadeout=30, extension='opus', codec='', curve='squ'):
"""
assemble an extended sound file using "intro_loop" and "loop_only".
`codec` is split and passed as an argument. It can be used for
codec selection/configuration/bitrate.
`curve` is the fadeout curve. 'squ' is linear-power. 'ipar' is very subtle.
"""
intro_loop = self.wav
loop_only = self.title + '-loop.wav'
# **** Generate looped section only.
SOX[intro_loop, loop_only, 'trim',
smp(self.loopStart),
smp(self.loopEnd)
] & FG
# **** Calculate loop count.
# loopEnd + n*loop_len >= total samples
# n = ceil((total_samples - loopend) / loop_len)
# add 1 for luck
total_samples = total_seconds * self.sample_rate
n = math.ceil(
(total_samples - self.loopEnd) / self.loop_len
) + 1
# **** Tell FFmpeg to: intro_loop, repeat(loop_only).
CAT_TXT = 'temp-extend.txt'
with open(CAT_TXT, 'w') as file:
FORMAT = "file '%s'\n"
file.write(FORMAT % intro_loop)
for i in range(n):
file.write(FORMAT % loop_only)
# **** Use FFmpeg to loop audio. COMPRESSION IS CALLER-DETERMINED.
extended = self.title + '-extend.' + extension
# Documentation:
# https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu
# https://trac.ffmpeg.org/wiki/Encode/AAC
# https://www.ffmpeg.org/ffmpeg-codecs.html
FFMPEG[expand(
'-loglevel warning -hide_banner -y '
'-t %s -f concat -i %s ' # Concatenate intro_loop and loop_only.
'-t %s '
,
total_seconds, CAT_TXT, total_seconds # FIXME why total_seconds twice?
# FIXME ffmpeg lacks AAC length field?
# TODO is m4a fixed?
)][
'-af', 'afade = t=out: st=%s: d=%s: curve=%s'
% (total_seconds - fadeout, fadeout, curve)
][codec.split()
][extended] & FG
class LooperApp(cli.Application):
extend = cli.Flag(["-e", "--extend"], help="Extend the file, as well as looping.")
extend_only = cli.Flag(["-E", "--extend-only"], help="Extend the file, and skip logg file compression.")
def main(self, x1name, x2name, outname, padding=0):
padding = int(padding)
looper = Looper(x1name, x2name, outname, padding)
compress = True
if self.extend_only:
compress = False
looper.loopify(compress=compress)
if self.extend or self.extend_only:
looper.extend(1800, extension='m4a', codec='-c:a libfdk_aac -cutoff 20000 -vbr 5')
# "Note, the VBR setting is unsupported and only works with some parameter combinations"
# https://hydrogenaud.io/index.php/topic,95989.msg817833.html#msg817833
if __name__ == '__main__':
LooperApp.run()
``` |
{
"source": "jimbo1qaz/ovgenpy",
"score": 2
} |
#### File: corrscope/gui/util.py
```python
import html
from itertools import groupby
from operator import itemgetter
from typing import TypeVar, Iterable, Generic, Tuple, Any, Optional
import matplotlib.colors
from qtpy.QtCore import QMutex, QMutexLocker
from qtpy.QtWidgets import QErrorMessage, QWidget
from corrscope.config import CorrError
def color2hex(color: Any) -> str:
if color is None:
return ""
try:
return matplotlib.colors.to_hex(color, keep_alpha=False)
except ValueError:
raise CorrError(f"invalid color {color}")
except Exception as e:
raise CorrError(f"doubly invalid color {color}, raises {e} (report bug!)")
T = TypeVar("T")
class Locked(Generic[T]):
"""Based off https://stackoverflow.com/a/37606669"""
def __init__(self, obj: T):
super().__init__()
self.obj = obj
self.lock = QMutex()
def set(self, value: T) -> T:
# We don't actually need a mutex since Python holds the GIL during reads and
# writes. But keep it anyway.
with QMutexLocker(self.lock):
self.obj = value
return value
def get(self) -> T:
with QMutexLocker(self.lock):
return self.obj
class TracebackDialog(QErrorMessage):
w = 640
h = 360
template = """\
<style>
body {
white-space: pre-wrap;
}
</style>
<body>%s</body>"""
def __init__(self, parent: Optional[QWidget] = None):
QErrorMessage.__init__(self, parent)
self.resize(self.w, self.h)
def showMessage(self, message: str, type: Any = None) -> None:
message = self.template % (html.escape(message))
QErrorMessage.showMessage(self, message, type)
def find_ranges(iterable: Iterable[T]) -> Iterable[Tuple[T, int]]:
"""Extracts consecutive runs from a list of items.
:param iterable: List of items.
:return: Iterable of (first elem, length).
"""
for group in consecutive_groups(iterable):
group = list(group)
yield group[0], len(group)
# Taken from more-itertools 4.3.0
def consecutive_groups(iterable, ordering=lambda x: x):
"""Yield groups of consecutive items using :func:`itertools.groupby`.
The *ordering* function determines whether two items are adjacent by
returning their position.
By default, the ordering function is the identity function. This is
suitable for finding runs of numbers:
>>> iterable = [1, 10, 11, 12, 20, 30, 31, 32, 33, 40]
>>> for group in consecutive_groups(iterable):
... print(list(group))
[1]
[10, 11, 12]
[20]
[30, 31, 32, 33]
[40]
For finding runs of adjacent letters, try using the :meth:`index` method
of a string of letters:
>>> from string import ascii_lowercase
>>> iterable = 'abcdfgilmnop'
>>> ordering = ascii_lowercase.index
>>> for group in consecutive_groups(iterable, ordering):
... print(list(group))
['a', 'b', 'c', 'd']
['f', 'g']
['i']
['l', 'm', 'n', 'o', 'p']
"""
for k, g in groupby(enumerate(iterable), key=lambda x: x[0] - ordering(x[1])):
yield map(itemgetter(1), g)
``` |
{
"source": "Jimbo51000/bsdr",
"score": 2
} |
#### File: Jimbo51000/bsdr/test_model.py
```python
import argparse
import random
from crowd_dataset import CrowdDataset
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import cv2
import numpy as np
import os
import random, string
import math
import pickle
from collections import OrderedDict
import torch
from torch import nn as nn, optim as optim
from torch.autograd import Variable
import datetime
import scipy.stats as ss
from pdb import set_trace as bp
from models import BSDR_Net
from models import load_rot_model_blocks, check_BN_no_gradient_change
from models import check_conv_no_gradient_change, set_batch_norm_to_eval
from models import load_net
from noisy_gts import create_noisy_gt
from models import NRN
import warnings
warnings.filterwarnings("ignore")
parser = argparse.ArgumentParser(description='PyTorch BSDR Testing')
parser.add_argument('--gpu', default=1, type=int,
help='GPU number')
parser.add_argument('--dataset', default="parta", type=str,
help='dataset to train on')
parser.add_argument('--model-name', default="", type=str,
help='name of model file')
def log(f, txt, do_print=1):
txt = str(datetime.datetime.now()) + ': ' + txt
if do_print == 1:
print(txt)
f.write(txt + '\n')
# Get the filename for the model stored after 'epochs_over' epochs got over
def get_filename(net_name, epochs_over):
return net_name + "_epoch_" + str(epochs_over) + ".pth"
def save_checkpoint(state, fdir, name='checkpoint.pth'):
filepath = os.path.join(fdir, name)
torch.save(state, filepath)
def print_graph(maps, title, save_path):
fig = plt.figure()
st = fig.suptitle(title)
for i, (map, args) in enumerate(maps):
plt.subplot(1, len(maps), i + 1)
if len(map.shape) > 2 and map.shape[0] == 3:
# bp()
plt.imshow(map.transpose((1, 2, 0)).astype(np.uint8),aspect='equal', **args)
# bp()
else:
# bp()
plt.imshow(map, aspect='equal', **args)
plt.colorbar()
# bp()
plt.axis('off')
plt.savefig(save_path + ".png", bbox_inches='tight', pad_inches = 0)
fig.clf()
plt.clf()
plt.close()
excluded_layers = ['conv4_1', 'conv4_2', 'conv5_1']
@torch.no_grad()
def test_function(X, Y, network):
"""
Evaluation of network on test and valid set
Parameters
----------
X : input images (B,3,h,w)
Y : ground truth (B,1,h/8,w/8)
network : BSDR object
"""
X = torch.autograd.Variable(torch.from_numpy(X)).cuda()
Y = torch.autograd.Variable(torch.from_numpy(Y)).cuda()
network = network.cuda()
network.eval()
output = network(X) # (B,1,h,w)
loss = 0.0
loss_criterion = nn.MSELoss(size_average=True)
# bp()
loss = loss_criterion(output, Y)
count_error = torch.abs(torch.sum(Y.view(Y.size(0), -1), dim=1) - torch.sum(output.view(output.size(0), -1), dim=1))
network.train()
network = set_batch_norm_to_eval(network)
return loss.item(), output.cpu().detach().numpy(), count_error.cpu().detach().numpy()
def test_network(dataset, set_name, network, print_output=False):
"""
Main loop for evaluation of BSDR network
Parameters
----------
dataset : dataset object for retrieving data from test/valid set
set-name : choose the test / valid set
network : BSDR object
print_output : determine to dump predictions
"""
if isinstance(print_output, str):
print_path = print_output
elif isinstance(print_output, bool) and print_output:
print_path = model_save_dir+'/dump'
else:
print_path = None
loss_list = []
count_error_list = []
for idx, data in enumerate(dataset.test_get_data(set_name)):
image_name, Xs, Ys = data
image = Xs[0].transpose((1, 2, 0))
image = cv2.resize(image, (image.shape[1] // output_downscale, image.shape[0] // output_downscale))
loss, pred_dmap, count_error = test_function(Xs, Ys, network)
# bp()
max_val = max(np.max(pred_dmap[0, 0].reshape(-1)), np.max(Ys[0, 0].reshape(-1)))
maps = [(np.transpose(image,(2,0,1)), {}),
(pred_dmap[0,0], {'cmap': 'jet', 'vmin': 0., 'vmax': max_val}),
(Ys[0,0], {'cmap': 'jet', 'vmin': 0., 'vmax': max_val})]
# bp()
loss_list.append(loss)
count_error_list.append(count_error)
# -- Plotting boxes
if print_path:
print_graph(maps, "Gt:{},Pred:{}".format(np.sum(Ys),np.sum(pred_dmap)), os.path.join(print_path, image_name))
loss = np.mean(loss_list)
mae = np.mean(count_error_list)
mse = np.sqrt(np.mean(np.square(count_error_list)))
return {'loss1':loss,'new_mae':mae,'mse':mse}, mae
def train_network():
"""
Main training loop for BSDR
"""
network = BSDR_Net()
model_save_path = os.path.join(model_save_dir, 'train2')
if not os.path.exists(model_save_path):
os.makedirs(model_save_path)
os.makedirs(os.path.join(model_save_path, 'snapshots'))
os.makedirs(os.path.join(model_save_dir,'dump'))
os.makedirs(os.path.join(model_save_dir,'dump_test'))
global f
snapshot_path = os.path.join(model_save_path, 'snapshots')
f = open(os.path.join(model_save_path, 'train0.log'), 'w')
# -- Logging Parameters
log(f, 'args: ' + str(args))
log(f, 'model: ' + str(network), False)
network = load_net(network,'models_BSDR/train2/snapshots',str(args.model_name))
log(f, 'Testing...')
epoch_test_losses, mae = test_network(dataset, 'test', network, False)
log(f, 'TEST epoch: ' + str(-1) + ' test loss1, mae:' + str(epoch_test_losses))
return
if __name__ == '__main__':
args = parser.parse_args()
# -- Assign GPU
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu)
# -- Assertions
assert (args.dataset)
# -- Setting seeds for reproducability
np.random.seed(11)
random.seed(11)
torch.manual_seed(11)
torch.backends.cudnn.enabled = False
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.cuda.manual_seed(11)
torch.cuda.manual_seed_all(11)
# -- Dataset paths
if args.dataset == "parta":
validation_set = 60
path = '../dataset/ST_partA/'
output_downscale = 8
density_map_sigma = 1
blur_sigma = 1
image_size_min = 256
image_crop_size = 256
network_output_downscale = 4
elif args.dataset == "ucfqnrf":
validation_set = 240
output_downscale = 8
path = '../dataset/UCF-QNRF_ECCV18/'
output_downscale = 8
density_map_sigma = 1
blur_sigma = 1
image_size_min = 256
image_crop_size = 256
network_output_downscale = 4
else:
validation_set = 0
output_downscale = 8
path = '../../dataset/ST_partA_' + args.dataset.replace('parta_', '') + '/'
model_save_dir = './models_BSDR_test'
dataset = CrowdDataset(path, name=args.dataset, valid_set_size=validation_set,
gt_downscale_factor=output_downscale,density_map_sigma=density_map_sigma,
image_size_multiple = output_downscale * network_output_downscale,
image_size_min = image_size_min , image_crop_size = image_crop_size)
#print(dataset.data_files['test_valid'], len(dataset.data_files['test_valid']))
print(dataset.data_files['train'], len(dataset.data_files['train']))
# -- Train the model
train_network()
``` |
{
"source": "jimbo8098/molecule",
"score": 2
} |
#### File: src/molecule/logger.py
```python
import logging
import os
import time
from functools import lru_cache, wraps
from typing import Callable, Iterable
from enrich.logging import RichHandler
from molecule.console import console
from molecule.text import underscore
LOG = logging.getLogger(__name__)
LOG_LEVEL_LUT = {
0: logging.INFO,
1: logging.DEBUG,
}
def configure() -> None:
"""
Configure a molecule root logger.
All other loggers will inherit the configuration we set here.
"""
logger = logging.getLogger("molecule")
handler = RichHandler(
console=console, show_time=False, show_path=False, markup=True
) # type: ignore
logger.addHandler(handler)
logger.propagate = False
logger.setLevel(logging.INFO)
def set_log_level(log_level: int, debug: bool) -> None:
"""
Set logging level.
:param log_level: verbosity control (0 - INFO, 1 - DEBUG)
:param debug: debug mode indicator
"""
# If we get verbosity level > 1, we just use debug because this is the
# most detailed log level we have.
if debug:
log_level = 1 # DEBUG from the LOG_LEVEL_LUT
logging.getLogger("molecule").setLevel(LOG_LEVEL_LUT.get(log_level, logging.DEBUG))
def get_logger(name: str) -> logging.Logger:
"""
Return a child logger.
Returned logger inherits configuration from the molecule logger.
"""
return logging.getLogger("molecule." + name)
def github_actions_groups(func: Callable) -> Callable:
"""Print group indicators before/after execution of a method."""
@wraps(func)
def wrapper(*args, **kwargs):
self = args[0]
scenario = self._config.scenario.name
subcommand = underscore(self.__class__.__name__)
console.print(
"::group::",
f"[ci_info]Molecule[/] [scenario]{scenario}[/] > [action]{subcommand}[/]",
sep="",
markup=True,
emoji=False,
highlight=False,
)
try:
return func(*args, **kwargs)
finally:
console.print("::endgroup::", markup=True, emoji=False, highlight=False)
return wrapper
def gitlab_ci_sections(func: Callable) -> Callable:
"""Print group indicators before/after execution of a method."""
# GitLab requires:
# - \r (carriage return)
# - \e[0K (clear line ANSI escape code. We use \033 for the \e escape char)
clear_line = "\r\033[0K"
@wraps(func)
def wrapper(*args, **kwargs):
self = args[0]
scenario = self._config.scenario.name
subcommand = underscore(self.__class__.__name__)
console.print(
f"section_start:{int(time.time())}:{scenario}.{subcommand}",
end=clear_line,
markup=False,
emoji=False,
highlight=False,
)
console.print(
# must be one color for the whole line or gitlab sets odd widths to each word.
f"[ci_info]Molecule {scenario} > {subcommand}[/]",
end="\n",
markup=True,
emoji=False,
highlight=False,
)
try:
return func(*args, **kwargs)
finally:
console.print(
f"section_end:{int(time.time())}:{scenario}.{subcommand}",
end=f"{clear_line}\n",
markup=False,
emoji=False,
highlight=False,
)
return wrapper
def travis_ci_folds(func: Callable) -> Callable:
"""Print group indicators before/after execution of a method."""
@wraps(func)
def wrapper(*args, **kwargs):
self = args[0]
scenario = self._config.scenario.name
subcommand = underscore(self.__class__.__name__)
console.print(
f"travis_fold:start:{scenario}.{subcommand}",
f"[ci_info]Molecule[/] [scenario]{scenario}[/] > [action]{subcommand}[/]",
sep="",
markup=True,
emoji=False,
highlight=False,
)
try:
return func(*args, **kwargs)
finally:
console.print(
f"travis_fold:end:{scenario}.{subcommand}",
markup=False,
emoji=False,
highlight=False,
)
return wrapper
def section_logger(func: Callable) -> Callable:
"""Wrap effective execution of a method."""
@wraps(func)
def wrapper(*args, **kwargs):
self = args[0]
LOG.info(
"[info]Running [scenario]%s[/] > [action]%s[/][/]",
self._config.scenario.name,
underscore(self.__class__.__name__),
extra={"markup": True},
)
rt = func(*args, **kwargs)
# section close code goes here
return rt
return wrapper
@lru_cache()
def get_section_loggers() -> Iterable[Callable]:
"""Return a list of section wrappers to be added."""
default_section_loggers = [section_logger]
if not os.getenv("CI"):
return default_section_loggers
elif os.getenv("GITHUB_ACTIONS"):
return [github_actions_groups] + default_section_loggers
elif os.getenv("GITLAB_CI"):
return [gitlab_ci_sections] + default_section_loggers
elif os.getenv("TRAVIS"):
return [travis_ci_folds] + default_section_loggers
# CI is set but no extra section_loggers apply.
return default_section_loggers
``` |
{
"source": "JimBob3000/python_mini_projects",
"score": 3
} |
#### File: JimBob3000/python_mini_projects/password_generator.py
```python
import random
import string
def generate_password():
# Using a-z, A-Z and 0-9
options = string.ascii_letters + string.digits
# Pick 18 characters from options at random and put into a string
password = "".join(random.choices(options, k=18))
return password
print(f"New password: {generate_password()}")
``` |
{
"source": "jimbob88/MonkeyTest",
"score": 3
} |
#### File: MonkeyTest/src/monkeytest.py
```python
from __future__ import division, print_function # for compatability with py2
try:
import Tkinter as tk
import ttk
import tkFileDialog as filedialog
import tkMessageBox as messagebox
except ImportError:
import tkinter as tk
import tkinter.ttk as ttk
from tkinter import filedialog
from tkinter import messagebox
try:
import ttkthemes
except:
pass
import os
import sys
import platform
from random import shuffle
import argparse
import json
import matplotlib.pyplot as plt
import matplotlib.ticker as plticker
import numpy as np
import colorama as col
ASCIIART = r'''Brought to you by coding monkeys.
Eat bananas, drink coffee & enjoy!
_
,//)
) /
/ /
_,^^,/ /
(G,66<_/
_/\_,_) _
/ _ \ ,' )
/ /"\ \/ ,_\
__(,/ > e ) / (_\.oO
\_ / ( -,_/ \_/
U \_, _)
( /
>/
(.oO
''' # r''''''
# ASCII-art: used part of text-image @ http://www.ascii-art.de/ascii/mno/monkey.txt
# it seems that its original author is <NAME> (mic aka miK)
# text-image is a bit old (1999) so I couldn't find a way to communicate with author
# if You're reading this and You're an author -- feel free to write me
try: # if Python >= 3.3 use new high-res counter
from time import perf_counter as time
except ImportError: # else select highest available resolution counter
if sys.platform[:3] == 'win':
from time import clock as time
else:
from time import time
def get_args():
parser = argparse.ArgumentParser(description='Arguments', formatter_class = argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-f', '--file',
required=False,
action='store',
default='/tmp/monkeytest',
help='The file to read/write to')
parser.add_argument('-s', '--size',
required=False,
action='store',
default=128,
help='Total MB to write')
parser.add_argument('-w', '--write-block-size',
required=False,
action='store',
default=1024,
help='The block size for writing in bytes')
parser.add_argument('-r', '--read-block-size',
required=False,
action='store',
default=512,
help='The block size for reading in bytes')
parser.add_argument('-j', '--json',
required=False,
action='store',
help='Output to json file')
parser.add_argument('-m', '--mode',
required=False,
default='cli',
help='Choose either CLI or GUI or TUI')
parser.add_argument('-g', '--graph',
required=False,
default=None,
help='Save a GUI graph into a PNG, Options: Write, Read, Write+Read, Read+Write, Write/Read')
parser.add_argument('-gf', '--graph-file',
required=False,
default='/tmp/',
help='Set graph save location')
args = parser.parse_args()
return args
class Benchmark:
def __init__(self, file,write_mb, write_block_kb, read_block_b):
self.file = file
self.write_mb = write_mb
self.write_block_kb = write_block_kb
self.read_block_b = read_block_b
def run(self, show_progress=True, update_pb=False):
wr_blocks = int(self.write_mb * 1024 / self.write_block_kb)
rd_blocks = int(self.write_mb * 1024 * 1024 / self.read_block_b)
self.write_results = self.write_test( 1024 * self.write_block_kb, wr_blocks, show_progress, update_pb)
self.read_results = self.read_test(self.read_block_b, rd_blocks, show_progress, update_pb)
def write_test(self, block_size, blocks_count, show_progress=True, update_pb=False):
'''
Tests write speed by writing random blocks, at total quantity
of blocks_count, each at size of block_size bytes to disk.
Function returns a list of write times in sec of each block.
'''
f = os.open(self.file, os.O_CREAT | os.O_WRONLY, 0o777) # low-level I/O
self.write_took = []
self.wperc_took = []
prev_perc = 0
for i in range(blocks_count):
if show_progress:
# dirty trick to actually print progress on each iteration
sys.stdout.write('\rWriting: {:.2f} %'.format(
(i + 1) * 100 / blocks_count))
sys.stdout.flush()
if update_pb is not False:
update_pb["value"] = ((i + 1) * 100 / blocks_count)
update_pb.update()
prev_perc = ((i + 1) * 100 / blocks_count)
buff = os.urandom(block_size)
start = time()
os.write(f, buff)
os.fsync(f) # force write to disk
t = time() - start
self.write_took.append(t)
self.wperc_took.append(((i + 1) * 100 / blocks_count))
os.close(f)
if update_pb is not False:
update_pb["value"] = 100
return self.write_took
def read_test(self, block_size, blocks_count, show_progress=True, update_pb=False):
'''
Performs read speed test by reading random offset blocks from
file, at maximum of blocks_count, each at size of block_size
bytes until the End Of File reached.
Returns a list of read times in sec of each block.
'''
f = os.open(self.file, os.O_RDONLY, 0o777) # low-level I/O
# generate random read positions
offsets = list(range(0, blocks_count * block_size, block_size))
shuffle(offsets)
self.read_took = []
self.rperc_took = []
prev_perc = 0
for i, offset in enumerate(offsets, 1):
if show_progress and i % int(self.write_block_kb * 1024 / self.read_block_b) == 0:
# read is faster than write, so try to equalize print period
sys.stdout.write('\rReading: {:.2f} %'.format(
(i + 1) * 100 / blocks_count))
sys.stdout.flush()
if update_pb is not False and ((i + 1) * 100 / blocks_count)-1 > prev_perc:
update_pb["value"] = ((i + 1) * 100 / blocks_count)
update_pb.update()
prev_perc = ((i + 1) * 100 / blocks_count)
start = time()
os.lseek(f, offset, os.SEEK_SET) # set position
buff = os.read(f, block_size) # read from position
t = time() - start
if not buff: break # if EOF reached
self.read_took.append(t)
self.rperc_took.append(((i + 1) * 100 / blocks_count))
os.close(f)
if update_pb is not False:
update_pb["value"] = 100
return self.read_took
def print_result(self):
result = ('\n\nWritten {} MB in {:.4f} s\nWrite speed is {:.2f} MB/s'
'\n max: {max:.2f}, min: {min:.2f}\n'.format(
self.write_mb, sum(self.write_results), self.write_mb / sum(self.write_results),
max=self.write_block_kb / (1024 * min(self.write_results)),
min=self.write_block_kb / (1024 * max(self.write_results))))
result += ('\nRead {} x {} B blocks in {:.4f} s\nRead speed is {:.2f} MB/s'
'\n max: {max:.2f}, min: {min:.2f}\n'.format(
len(self.read_results), self.read_block_b,
sum(self.read_results), self.write_mb / sum(self.read_results),
max=self.read_block_b / (1024 * 1024 * min(self.read_results)),
min=self.read_block_b / (1024 * 1024 * max(self.read_results))))
print(result)
print(ASCIIART)
def return_result(self):
result = ('\n\nWritten {} MB in {:.4f} s\nWrite speed is {:.2f} MB/s'
'\n max: {max:.2f}, min: {min:.2f}\n'.format(
self.write_mb, sum(self.write_results), self.write_mb / sum(self.write_results),
max=self.write_block_kb / (1024 * min(self.write_results)),
min=self.write_block_kb / (1024 * max(self.write_results))))
result += ('\nRead {} x {} B blocks in {:.4f} s\nRead speed is {:.2f} MB/s'
'\n max: {max:.2f}, min: {min:.2f}\n'.format(
len(self.read_results), self.read_block_b,
sum(self.read_results), self.write_mb / sum(self.read_results),
max=self.read_block_b / (1024 * 1024 * min(self.read_results)),
min=self.read_block_b / (1024 * 1024 * max(self.read_results))))
return result
def get_json_result(self,output_file):
results_json = {}
results_json["Written MB"] = self.write_mb
results_json["Write time (sec)"] = round(sum(self.write_results),2)
results_json["Write speed in MB/s"] = round(self.write_mb / sum(self.write_results),2)
results_json["Read blocks"] = len(self.read_results)
results_json["Read time (sec)"] = round(sum(self.read_results),2)
results_json["Read speed in MB/s"] = round(self.write_mb / sum(self.read_results),2)
with open(output_file,'w') as f:
json.dump(results_json,f)
class benchmark_gui:
def __init__(self, master, file, write_mb, write_block_kb, read_block_b):
self.master = master
self.master.title('Monkey Test')
self.master.resizable(0, 0)
self.main_frame = ttk.Frame(self.master)
self.main_frame.pack()
self.current_file = tk.StringVar()
self.current_file.set(file)
self.write_mb = tk.IntVar()
self.write_mb.set(write_mb)
self.write_block_kb = tk.IntVar()
self.write_block_kb.set(write_block_kb)
self.read_block_b = tk.IntVar()
self.read_block_b.set(read_block_b)
self.show_progress = tk.IntVar()
self.show_progress.set(1)
self.initialize()
def initialize(self):
ttk.Label(self.main_frame, textvariable=self.current_file, relief='groove').grid(row=1, column=1, padx=5,pady=5)
ttk.Button(self.main_frame, text='Open', command=lambda: self.current_file.set(filedialog.asksaveasfilename(initialdir = "~",title = "Save As" ))).grid(row=1, column=2, padx=5, pady=5)
ttk.Label(self.main_frame, text='Write MB').grid(row=2, column=1, padx=5, pady=5)
self.write_mb_spinbox = tk.Spinbox(self.main_frame, justify='center', textvariable=self.write_mb, width=8, from_=0, to=999999)
self.write_mb_spinbox.grid(row=2, column=2, padx=5, pady=5)
ttk.Label(self.main_frame, text='Write Block KB').grid(row=3, column=1, padx=5, pady=5)
self.write_block_kb_spinbox = tk.Spinbox(self.main_frame, justify='center', textvariable=self.write_block_kb, width=8, from_=0, to=999999)
self.write_block_kb_spinbox.grid(row=3, column=2, padx=5, pady=5)
ttk.Label(self.main_frame, text='Read Block B').grid(row=4, column=1, padx=5, pady=5)
self.read_block_b_spinbox = tk.Spinbox(self.main_frame, justify='center', textvariable=self.read_block_b, width=8, from_=0, to=999999)
self.read_block_b_spinbox.grid(row=4, column=2, padx=5, pady=5)
ttk.Checkbutton(self.main_frame, text='Show Progress', variable=self.show_progress).grid(row=5, column=1, columnspan=2)
ttk.Button(self.main_frame, text='Run Monkey Test', command=self.run).grid(row=6, column=1, columnspan=2, padx=5, pady=5)
#file,write_mb, write_block_kb, read_block_b
def run(self):
dummy_check = {'Size': self.write_mb.get(), 'Write Block': self.write_block_kb.get(), 'Read Block': self.read_block_b.get()}
if any(v <= 0 for v in dummy_check.values()):
cont = messagebox.askquestion('Dummy Check Failure', 'One or more value(s) smaller than or equal to 0, Would you like to set these numbers to defaults?', icon = 'warning')
if cont != 'yes':
return
if dummy_check['Size'] <= 0: self.write_mb.set(128)
if dummy_check['Write Block'] <= 0: self.write_mb.set(1024)
if dummy_check['Read Block'] <= 0: self.write_mb.set(512)
if self.current_file.get() == '':
def_file = messagebox.askquestion('No File Selected', 'You have not selected a file, Would you like the default file to be selected?')
if def_file != 'yes':
return
self.current_file.set('/tmp/monkeytest')
file = self.current_file.get()
write_mb = self.write_mb.get()
write_block_kb = self.write_block_kb.get()
read_block_b = self.read_block_b.get()
for widget in self.main_frame.winfo_children():
widget.destroy()
perc_comp_pb = ttk.Progressbar(self.main_frame, orient="horizontal", length=200, mode="determinate")
perc_comp_pb["maximum"] = 100
perc_comp_pb.grid(row=1, column=0, padx=5, pady=5)
benchmark = Benchmark(file,write_mb, write_block_kb, read_block_b)
self.benchmark = benchmark
run_lb = ttk.Label(self.main_frame, text='Running...')
run_lb.grid(row=0, column=0, padx=5, pady=5)
if self.show_progress.get():
benchmark.run(update_pb=perc_comp_pb, show_progress=False)
if not self.show_progress.get():
benchmark.run(show_progress=False)
perc_comp_pb.destroy()
run_lb.destroy()
show_results = tk.Message(self.main_frame, text=benchmark.return_result(), justify='center')
show_results.grid(columnspan=2, row=0, column=0)
self.read_graph = ttk.Button(self.main_frame, text='Read Graph', command=lambda: self.plot('Read', benchmark, self.read_graph))
self.read_graph.grid(row=1, column=0)
self.write_graph = ttk.Button(self.main_frame, text='Write Graph', command=lambda: self.plot('Write', benchmark, self.write_graph))
self.write_graph.grid(row=1, column=1)
ttk.Button(self.main_frame, text='Save JSON File', command=lambda: benchmark.get_json_result(filedialog.asksaveasfilename(initialdir = "~",title = "Save As", defaultextension='.json'))).grid(row=2, column=0)
ttk.Button(self.main_frame, text='Delete File', command=lambda: os.remove(file)).grid(row=2, column=1)
benchmark.print_result()
@classmethod
def plot(self, rw, benchmark, button=False, show=True):
if rw == 'Read':
if button is not False: button.configure(state="disabled")
x = [0] + benchmark.read_took
y = [0] + benchmark.rperc_took
plt.plot(np.cumsum(x), y, label='Read')
if plt.gca().get_title() == '':
plt.title('Read Graph')
else:
plt.title('Write/Read Graph')
elif rw == 'Write':
if button is not False: button.configure(state="disabled")
x = [0] + benchmark.write_took
y = [0] + benchmark.wperc_took
plt.plot(np.cumsum(x), y, label='Write')
if plt.gca().get_title() == '':
plt.title('Write Graph')
else:
plt.title('Read/Write Graph')
plt.legend(loc='upper left')
plt.ylabel('Percent Complete (y)')
plt.xlabel('Time taken (x)')
if show: plt.show()
def main():
args = get_args()
if args.mode.lower() == 'cli':
if os.path.isfile(args.file):
if input('Are you sure you wish to continue? Selected file will be deleted. (Y/N) ') == 'Y'.casefold():
os.remove(args.file)
else:
print('Terminated')
exit()
if int(args.size) <= 0:
print('{yellow}Total MB to write is smaller than or equal to 0, assuming default value of{end} {red}(128){end}'.format(
yellow=col.Fore.YELLOW, end=col.Style.RESET_ALL, red=col.Fore.RED))
args.size = 128
if int(args.write_block_size) <= 0:
print('{yellow}The block size for writing in bytes is smaller than or equal to 0, assuming default value of{end} {red}(1024){end}'.format(
yellow=col.Fore.YELLOW, end=col.Style.RESET_ALL, red=col.Fore.RED))
args.write_block_size = 1024
if int(args.read_block_size) <= 0:
print('{yellow}The block size for reading in bytes is smaller than or equal to 0, assuming default value of{end} {red}(512){end}'.format(
yellow=col.Fore.YELLOW, end=col.Style.RESET_ALL, red=col.Fore.RED))
args.read_block_size = 512
benchmark = Benchmark(args.file, args.size, args.write_block_size, args.read_block_size)
benchmark.run()
if args.json is not None:
benchmark.get_json_result(args.json)
else:
benchmark.print_result()
os.remove(args.file)
elif args.mode.lower() == 'gui':
if 'ttkthemes' in sys.modules:
root = ttkthemes.ThemedTk()
benchmark_gui_var = benchmark_gui(root, args.file, args.size, args.write_block_size, args.read_block_size)
if platform.system() == 'Linux':
if platform.dist()[0] == 'Ubuntu':
root.set_theme("ubuntu")
else:
root.set_theme("equilux")
elif platform.system() == 'Windows':
root.set_theme("vista")
elif platform.system() == 'Darwin':
root.set_theme("aqua")
root.mainloop()
else:
root = tk.Tk()
benchmark_gui_var = benchmark_gui(root, args.file, args.size, args.write_block_size, args.read_block_size)
root.mainloop()
elif args.mode.lower() == 'tui':
try:
from picotui.context import Context
from picotui.screen import Screen
import picotui.widgets as ptwidgets
import picotui.defs as ptdefs
import picotui.dialogs as ptdialog
except:
print('{red}WARNING:{end} {yellow}picotui not installed, install it with the command:{end}\n\tpip install git+https://github.com/jimbob88/picotui-python2_3.git'.format(
yellow=col.Fore.YELLOW, end=col.Style.RESET_ALL, red=col.Fore.RED))
exit()
with Context():
Screen.attr_color(ptdefs.C_WHITE, ptdefs.C_BLUE)
Screen.cls()
Screen.attr_reset()
dialog = ptwidgets.Dialog(0, 0, 50, 12)
dialog.add(10, 1, "File:")
current_file = ptwidgets.WTextEntry(20, args.file)
dialog.add(17, 1, current_file)
dialog.add(10, 3, "Write MB:")
write_mb = ptwidgets.WTextEntry(17, str(args.size))
dialog.add(20, 3, write_mb)
dialog.add(10, 5, "Write Block KB:")
write_block_kb = ptwidgets.WTextEntry(11, str(args.write_block_size))
dialog.add(26, 5, write_block_kb)
dialog.add(10, 7, "Read Block B:")
read_block_b = ptwidgets.WTextEntry(13, str(args.read_block_size))
dialog.add(24, 7, read_block_b)
ok_b = ptwidgets.WButton(8, "OK")
dialog.add(10, 16, ok_b)
ok_b.finish_dialog = ptwidgets.ACTION_OK
cancel_b = ptwidgets.WButton(8, "Cancel")
dialog.add(30, 16, cancel_b)
cancel_b.finish_dialog = ptwidgets.ACTION_CANCEL
res = dialog.loop()
if res == ptwidgets.ACTION_OK:
os.system('clear' if os.name == 'posix' else 'cls')
if os.path.isfile(current_file.get()):
with Context():
res = ptdialog.DConfirmation("Are you sure you wish to continue? Selected file will be deleted", title="File Exists").result()
if res != ptwidgets.ACTION_OK:
return main()
os.system('clear' if os.name == 'posix' else 'cls')
try:
args.size = int(write_mb.get())
except ValueError:
print('{yellow}Total MB to write is smaller than or equal to 0, assuming default value of{end} {red}(128){end}'.format(
yellow=col.Fore.YELLOW, end=col.Style.RESET_ALL, red=col.Fore.RED))
args.size = 128
try:
args.write_block_size = int(write_block_kb.get())
except ValueError:
print('{yellow}The block size for writing in bytes is smaller than or equal to 0, assuming default value of{end} {red}(1024){end}'.format(
yellow=col.Fore.YELLOW, end=col.Style.RESET_ALL, red=col.Fore.RED))
args.write_block_size = 1024
try:
args.read_block_size = int(read_block_b.get())
except ValueError:
print('{yellow}The block size for reading in bytes is smaller than or equal to 0, assuming default value of{end} {red}(512){end}'.format(
yellow=col.Fore.YELLOW, end=col.Style.RESET_ALL, red=col.Fore.RED))
args.read_block_size = 512
benchmark = Benchmark(current_file.get(), args.size, args.write_block_size, args.read_block_size)
benchmark.run()
if args.json is not None:
benchmark.get_json_result(args.json)
else:
benchmark.print_result()
os.remove(args.file)
if args.graph is not None:
print(args.graph)
os.chdir(args.graph_file)
plt.clf()
benchmark = benchmark if args.mode.lower() != 'gui' else benchmark_gui_var.benchmark
if args.graph == 'Write'.casefold():
benchmark_gui.plot('Write', benchmark, show=False)
plt.savefig('graph.png')
elif args.graph == 'Read':
benchmark_gui.plot('Read', benchmark, show=False)
plt.savefig('graph.png')
elif args.graph == 'Write+Read'.casefold() or args.graph == 'Read+Write'.casefold():
benchmark_gui.plot('Write', benchmark, show=False)
benchmark_gui.plot('Read', benchmark, show=False)
plt.savefig('graph.png')
elif args.graph == 'Write/Read'.casefold() or args.graph == 'Read/Write'.casefold():
benchmark_gui.plot('Write', benchmark, show=False)
plt.savefig('graph1.png')
plt.clf()
benchmark_gui.plot('Read', benchmark, show=False)
plt.savefig('graph2.png')
if __name__ == "__main__":
main()
``` |
{
"source": "jimbob88/onion_file_search",
"score": 3
} |
#### File: onion_file_search/onion_file_search/custom_treeview.py
```python
import tkinter as tk
import tkinter.ttk as ttk
import platform
import sys
class AutoScroll(object):
"""
Made By <NAME>:
all rights reserved (via http://page.sourceforge.net/)
"""
def __init__(self, master):
self.set_xscroll(master)
self.set_yscroll(master)
master.grid_columnconfigure(0, weight=1)
master.grid_rowconfigure(0, weight=1)
methods = tk.Grid.__dict__.keys()
for meth in methods:
if meth[0] != '_' and meth not in ("config", "configure"):
setattr(self, meth, getattr(master, meth))
def set_xscroll(self, master):
hsb = ttk.Scrollbar(master, orient="horizontal", command=self.xview)
self.configure(xscrollcommand=self._autoscroll(hsb))
self.grid(column=0, row=0, sticky="nsew")
hsb.grid(column=0, row=1, sticky="ew")
def set_yscroll(self, master):
try:
vsb = ttk.Scrollbar(master, orient="vertical", command=self.yview)
self.configure(yscrollcommand=self._autoscroll(vsb))
vsb.grid(column=1, row=0, sticky="ns")
except Exception:
pass
@staticmethod
def _autoscroll(sbar):
"""Hide and show scrollbar as needed."""
def wrapped(first, last):
first, last = float(first), float(last)
if first <= 0 and last >= 1:
sbar.grid_remove()
else:
sbar.grid()
sbar.set(first, last)
return wrapped
def __str__(self):
return str(self.master)
def _create_container(func):
"""Creates a ttk Frame with a given master, and use this new frame to
place the scrollbars and the widget."""
def wrapped(cls, master, **kw):
container = ttk.Frame(master)
container.bind("<Enter>", lambda e: _bound_to_mousewheel(e, container))
container.bind(
"<Leave>", lambda e: _unbound_to_mousewheel(e, container))
return func(cls, container, **kw)
return wrapped
class ScrolledTreeView(AutoScroll, ttk.Treeview):
"""A standard ttk Treeview widget with scrollbars that will
automatically show/hide as needed."""
@_create_container
def __init__(self, master, **kw):
ttk.Treeview.__init__(self, master, **kw)
AutoScroll.__init__(self, master)
def _bound_to_mousewheel(event, widget):
child = widget.winfo_children()[0]
def mswheel_func(e): return _on_mousewheel(e, child)
def msshift_func(e): return _on_shiftmouse(e, child)
if platform.system() != "Linux":
child.bind_all("<MouseWheel>", mswheel_func)
child.bind_all("<Shift-MouseWheel>", msshift_func)
else:
child.bind_all("<Button-4>", mswheel_func)
child.bind_all("<Button-5>", mswheel_func)
child.bind_all("<Shift-Button-4>", msshift_func)
child.bind_all("<Shift-Button-5>", msshift_func)
def _unbound_to_mousewheel(event, widget):
if platform.system() == "Windows" or platform.system() == "Darwin":
widget.unbind_all("<MouseWheel>")
widget.unbind_all("<Shift-MouseWheel>")
else:
widget.unbind_all("<Button-4>")
widget.unbind_all("<Button-5>")
widget.unbind_all("<Shift-Button-4>")
widget.unbind_all("<Shift-Button-5>")
def _on_mousewheel(event, widget):
if platform.system() == "Windows":
widget.yview_scroll(-1 * int(event.delta / 120), "units")
elif platform.system() == "Darwin":
widget.yview_scroll(-1 * int(event.delta), "units")
elif event.num == 4:
widget.yview_scroll(-1, "units")
elif event.num == 5:
widget.yview_scroll(1, "units")
```
#### File: onion_file_search/onion_file_search/main.py
```python
import tkinter as tk
import tkinter.ttk as ttk
import os
import sys
import platform
import stat
from time import ctime
try:
import ttkthemes
except ImportError:
pass
try:
from benedict import benedict
except ImportError:
raise ImportError(
"""
onion_file_search now relies on python-benedict,
install it with pip install python-benedict
"""
)
from custom_treeview import ScrolledTreeView
from custom_file_counter import file_counter_win
from search import (
os_walk_search,
not_darwin_search,
)
def get_all_values(tree, nested_dictionary, build_path=""):
for key, value in nested_dictionary.items():
path = os.path.join(build_path, key)
if type(value) is dict:
tree.insert(build_path, "end", path, text=key,
values=[path, "directory"])
get_all_values(tree, value, path)
else:
tree.insert(build_path, "end", path,
text=key, values=[path, "file"])
_stat = os.stat(path.replace(":", ":\\"))
size = _stat.st_size
tree.set(path, "size", "%d bytes" % size)
modified = ctime(_stat[stat.ST_MTIME])
tree.set(path, "Last Access", modified)
class file_walker:
def __init__(self, master):
self.master = master
self.master.title("Onion Search")
self.search_loc = tk.StringVar()
self.search_loc.set(os.path.expanduser("~"))
self.search_var = tk.StringVar()
self.init_menubar()
self.init_searchbar()
self.init_treeview()
self.files = []
self.inside_search_files = []
def init_menubar(self):
self.menubar = tk.Menu(self.master)
self.master.configure(menu=self.menubar)
self.filemenu = tk.Menu(self.menubar, tearoff=0)
self.show_prog = tk.BooleanVar()
self.filemenu.add_checkbutton(
label="Show Progress", variable=self.show_prog)
self.search_inside_var = tk.BooleanVar()
self.filemenu.add_checkbutton(
label="Search Inside TextFiles", variable=self.search_inside_var
)
self.menubar.add_cascade(label="File", menu=self.filemenu)
def init_searchbar(self):
ttk.Entry(
self.master,
textvariable=self.search_loc,
justify="center",
).grid(row=0, column=0, sticky="ns")
self.search_bar = ttk.Entry(self.master, textvariable=self.search_var)
self.search_bar.grid(row=0, column=1, sticky="nsew")
self.search_but = ttk.Button(
self.master, text="Search", command=self.search)
self.search_but.grid(row=0, column=2)
def init_treeview(self):
self.search_vew = ScrolledTreeView(self.master)
self.search_vew.grid(row=1, column=0, columnspan=3, sticky="nsew")
self.search_vew.heading("#0", text="File")
self.search_vew["columns"] = (
"fullpath", "type", "size", "Last Access")
self.search_vew["displaycolumns"] = ("size", "Last Access")
for col in self.search_vew["columns"]:
self.search_vew.heading(col, text=col[0].upper() + col[1:])
self.master.grid_rowconfigure(1, weight=1)
self.master.grid_columnconfigure(1, weight=1)
def search(self, *args):
if not os.path.isdir(self.search_loc.get()):
return
if self.show_prog.get():
return self.search_win(self, *args)
self.search_but.configure(state="disabled")
if platform.system() == "Darwin":
self.files = os_walk_search(
self.search_loc.get(), self.search_var.get())
else:
self.files = not_darwin_search(
self.search_loc.get(), self.search_var.get())
if self.search_inside_var.get():
self.files += self.search_inside()
self.search_vew.delete(*self.search_vew.get_children())
self.populate_tree()
self.search_but.configure(state="normal")
def search_win(self, *args):
"""
Search but with a counter for the number of currently found files
Tries to mimic the functionality of catfish
Not very good at its job to be honest
"""
progress_window = file_counter_win(self.master)
if platform.system() == "Darwin":
self.files = os_walk_search(
self.search_loc.get(), self.search_var.get())
else:
self.files = not_darwin_search(
self.search_loc.get(), self.search_var.get())
if self.search_inside_var.get():
self.files += self.search_inside()
self.search_but.configure(state="disabled")
progress_window.count(self.files)
self.search_vew.delete(*self.search_vew.get_children())
self.populate_tree()
self.search_vew.update()
self.search_but.configure(state="normal")
def search_inside(self):
files = []
self.inside_search_files = []
if platform.system() == "Linux":
os.system(
'grep -sRil "{text}" {location} > files.txt'.format(
text=self.search_var.get(), location=self.search_loc.get()
)
)
elif platform.system() == "Windows":
os.system(
'findstr /s /m /p /i /c:"{text}" {location} > files.txt'.format(
text=self.search_var.get(),
location=os.path.join(self.search_loc.get(), "*"),
)
)
with open("files.txt", "r") as f:
for line in f:
files.append(os.path.dirname(line))
self.inside_search_files.append(os.path.normpath(line.strip()))
return files
def populate_tree(self):
file_tree = benedict({}, keypath_separator=os.sep)
for_merge = []
for file_path in self.files:
for_merge.append(benedict(keypath_separator=os.sep))
for_merge[-1][file_path.replace("[", "\\[").replace("]", "\\]").replace("\\\\?\\", "")] = [
"file",
file_path,
]
file_tree.merge(*for_merge)
self.file_tree = file_tree
get_all_values(self.search_vew, self.file_tree, build_path="")
def main():
root = tk.Tk()
file_walker(root)
root.mainloop()
def themed_main():
root = ttkthemes.ThemedTk()
file_walker(root)
if platform.system() == "Linux":
root.set_theme("equilux")
elif platform.system() == "Windows":
root.set_theme("vista")
elif platform.system() == "Darwin":
root.set_theme("aqua")
root.mainloop()
if __name__ == "__main__":
if "ttkthemes" in sys.modules:
themed_main()
else:
main()
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.