id
stringlengths 3
8
| content
stringlengths 100
981k
|
---|---|
24159
|
import os
import shutil
import pychemia
import tempfile
import unittest
class MyTestCase(unittest.TestCase):
def test_incar(self):
"""
Test (pychemia.code.vasp) [INCAR parsing and writing] :
"""
print(os.getcwd())
iv = pychemia.code.vasp.read_incar('tests/data/vasp_01/INCAR')
self.assertEqual(len(iv), 12)
self.assertEqual(iv.EDIFF, 1E-7)
wf = tempfile.NamedTemporaryFile()
iv.write(wf.name)
wf.close()
iv4dir = pychemia.code.vasp.read_incar('tests/data/vasp_01')
self.assertEqual(iv, iv4dir)
self.assertRaises(ValueError, pychemia.code.vasp.read_incar, 'tests/data')
iv3 = pychemia.code.vasp.VaspInput(variables={'EDIFF': 1E-6})
self.assertEqual(iv3['EDIFF'], 1E-6)
iv = pychemia.code.vasp.read_incar('tests/data/vasp_02')
iv.EDIFF *= 1.3
td = tempfile.mkdtemp()
pychemia.code.vasp.write_incar(iv, td)
self.assertRaises(ValueError, iv.write_key, 'EDIF')
shutil.rmtree(td)
def test_bad_outcar(self):
"""
Test (pychemia.code.vasp) [corrupted VASP OUTCAR] :
"""
vo = pychemia.code.vasp.VaspOutput('tests/data/vasp_04/OUTCAR')
self.assertTrue(vo.is_finished)
def test_encut_setup(self):
"""
Test (pychemia.code.vasp) [ENCUT setup] :
"""
iv = pychemia.code.vasp.read_incar('tests/data/vasp_06')
iv.set_encut(ENCUT=1.2, POTCAR='tests/data/vasp_06/POTCAR')
self.assertEqual(iv.ENCUT, 307)
iv.set_rough_relaxation()
self.assertEqual(iv.EDIFFG, -1E-2)
iv.set_mit_settings()
def test_vaspjob(self):
"""
Test (pychemia.code.vasp) [VaspJob] :
"""
td = tempfile.mkdtemp()
st = pychemia.code.vasp.read_poscar('tests/data/vasp_06')
kp = pychemia.code.vasp.read_kpoints('tests/data/vasp_06')
self.assertEqual(kp.number_of_kpoints, 693)
iv = pychemia.code.vasp.read_incar('tests/data/vasp_06')
vj = pychemia.code.vasp.VaspJob(workdir=td,)
vj.initialize(st, kpoints=kp)
vj.set_input_variables(iv)
vj.write_poscar()
vj.write_kpoints()
vj.write_incar()
shutil.rmtree(td)
def test_outcar(self):
"""
Test (pychemia.code.vasp) [outcar] :
"""
vo = pychemia.code.vasp.VaspOutput('tests/data/vasp_06/OUTCAR')
self.assertEqual(vo.get_memory_used()['grid'], (1028.0, 'kBytes'))
self.assertAlmostEqual(vo.to_dict['energy'], -19.67192646)
print(vo)
self.assertTrue(vo.has_forces_stress_energy())
def test_poscar(self):
"""
Test (pychemia.code.vasp) [poscar] :
"""
# Temporal directory for outputs
tmpdir = tempfile.mkdtemp()
# Read a POSCAR by directory
st = pychemia.code.vasp.read_poscar('tests/data/vasp_06')
self.assertEqual(st.natom, 4)
# Opening old format POSCAR without POTCAR
with self.assertRaises(ValueError) as context:
st = pychemia.code.vasp.read_poscar('tests/data/vasp_07/POSCAR')
st = pychemia.code.vasp.read_poscar('tests/data/vasp_08/POSCAR_old')
self.assertEqual(st.natom, 2)
st = pychemia.code.vasp.read_poscar('tests/data/vasp_08/POSCAR_new')
self.assertEqual(st.natom, 2)
with self.assertRaises(ValueError) as context:
pychemia.code.vasp.write_potcar(st, filepath=tmpdir + os.sep + 'POTCAR', basepsp='/no/existing/path')
with self.assertRaises(ValueError) as context:
pychemia.code.vasp.write_potcar(st, filepath=tmpdir + os.sep + 'POTCAR', basepsp='tests/data')
cwd = os.getcwd()
os.chdir('tests/data/vasp_07')
st = pychemia.code.vasp.read_poscar('POSCAR_new')
os.chdir(cwd)
self.assertEqual(st.natom, 44)
st = pychemia.code.vasp.read_poscar('tests/data/vasp_07/POSCAR_alt')
pychemia.code.vasp.write_poscar(st, tmpdir + os.sep + 'POSCAR1')
pychemia.code.vasp.write_poscar(st, tmpdir + os.sep + 'POSCAR2', direct=False)
pychemia.code.vasp.write_poscar(st, tmpdir + os.sep + 'POSCAR3', newformat=False)
st = pychemia.code.vasp.read_poscar(tmpdir + os.sep + 'POSCAR1')
self.assertAlmostEqual(st.volume, 584.47161926043907)
sym = pychemia.crystal.CrystalSymmetry(st)
self.assertEqual(sym.symbol(), 'C2/c')
st = pychemia.code.vasp.read_poscar(tmpdir + os.sep + 'POSCAR2')
self.assertAlmostEqual(st.volume, 584.47161926043907)
sym = pychemia.crystal.CrystalSymmetry(st)
self.assertEqual(sym.symbol(), 'C2/c')
st = pychemia.code.vasp.read_poscar(tmpdir + os.sep + 'POSCAR3')
self.assertAlmostEqual(st.volume, 584.47161926043907)
sym = pychemia.crystal.CrystalSymmetry(st)
self.assertEqual(sym.symbol(), 'C2/c')
pychemia.code.vasp.write_potcar(st, filepath=tmpdir + os.sep + 'POTCAR', basepsp='tests/data')
pychemia.code.vasp.get_potcar_info(tmpdir + os.sep + 'POTCAR')
shutil.rmtree(tmpdir)
|
24180
|
import copy
import logging
import warnings
from kolibri.plugins.registry import registered_plugins
logger = logging.getLogger(__name__)
def __validate_config_option(
section, name, base_config_spec, plugin_specs, module_path
):
# Raise an error if someone tries to overwrite a base option
# except for the default value.
if section in base_config_spec:
if name in base_config_spec[section]:
raise ValueError("Cannot overwrite a core Kolibri options spec option")
# Warn if a plugin tries to add an option that another plugin has already added
if section in plugin_specs:
if name in plugin_specs[section]:
warnings.warn(
"{plugin} set an option {option} in section {section} but {plugins} had already set it".format(
plugin=module_path,
plugins=", ".join(plugin_specs[section][name]),
option=name,
section=section,
)
)
plugin_specs[section][name].append(module_path)
else:
# If not create the list for this option name
# to track this and future modifications
plugin_specs[section][name] = [module_path]
else:
# If not create the dict for the section
# and the list for this option name
plugin_specs[section] = {name: [module_path]}
def __process_config_spec(
option_spec, base_config_spec, plugin_specs, module_path, final_spec
):
for section, opts in option_spec.items():
for name, attrs in opts.items():
__validate_config_option(
section, name, base_config_spec, plugin_specs, module_path
)
if section not in final_spec:
final_spec[section] = {}
final_spec[section][name] = attrs
def __validate_option_default(section, name, plugin_default_overrides, module_path):
# Warn if a plugin tries to add an option that another plugin has already added
if section in plugin_default_overrides:
if name in plugin_default_overrides[section]:
warnings.warn(
"{plugin} set an option default {option} in section {section} but {plugins} had already set it".format(
plugin=module_path,
plugins=", ".join(plugin_default_overrides[section][name]),
option=name,
section=section,
)
)
plugin_default_overrides[section][name].append(module_path)
else:
# If not create the list for this option name
# to track this and future modifications
plugin_default_overrides[section][name] = [module_path]
else:
# If not create the dict for the section
# and the list for this option name
plugin_default_overrides[section] = {name: [module_path]}
def __process_option_defaults(
option_defaults, base_config_spec, plugin_default_overrides, module_path, final_spec
):
for section, opts in option_defaults.items():
for name, default in opts.items():
__validate_option_default(
section, name, plugin_default_overrides, module_path
)
if section not in final_spec:
logger.error(
"Tried to set a new default in section {}, but this is not a valid section".format(
section
)
)
continue
if name in final_spec[section]:
# This is valid, so set a default
# Note that we do not validation here for now,
# so it is up to the user to ensure the default value
# is kosher.
final_spec[section][name]["default"] = default
else:
logger.error(
"Tried to set a new default in section {}, for option {} but this is not a valid option".format(
section, name
)
)
def extend_config_spec(base_config_spec):
plugin_specs = {}
final_spec = copy.deepcopy(base_config_spec)
# First process options config spec additions
for plugin_instance in registered_plugins:
plugin_options = plugin_instance.options_module
if plugin_options and hasattr(plugin_options, "option_spec"):
module_path = plugin_instance.module_path
option_spec = plugin_options.option_spec
__process_config_spec(
option_spec, base_config_spec, plugin_specs, module_path, final_spec
)
# Now process default value overrides, do this second in order to allow plugins
# to override default values for other plugins!
plugin_default_overrides = {}
for plugin_instance in registered_plugins:
plugin_options = plugin_instance.option_defaults_module
if plugin_options and hasattr(plugin_options, "option_defaults"):
module_path = plugin_instance.module_path
option_defaults = plugin_options.option_defaults
__process_option_defaults(
option_defaults,
base_config_spec,
plugin_default_overrides,
module_path,
final_spec,
)
return final_spec
|
24211
|
import unittest
from huobi.rest.client import HuobiRestClient
from huobi.rest.error import (
HuobiRestiApiError
)
import os
from os.path import join, dirname
from dotenv import load_dotenv
dotenv_path = join(dirname(dirname(dirname(dirname(__file__)))), '.env')
load_dotenv(dotenv_path)
class TestCommonEndpoint(unittest.TestCase):
def setUp(self):
access_key = os.environ['ACCESS_KEY']
secret_key = os.environ['SECRET_KEY']
self.client = HuobiRestClient(
access_key=access_key, secret_key=secret_key)
def tearDown(self):
self.client.close()
class TestCommonSymbols(TestCommonEndpoint):
def test_success(self):
res = self.client.symbols()
self.assertEqual(res.res.status_code, 200)
self.assertIn('data', res.data)
self.assertIsInstance(res.data['data'], list)
def test_authentication_fail(self):
client = HuobiRestClient(
access_key='1',
secret_key='2',
)
with self.assertRaises(HuobiRestiApiError):
client.accounts()
class TestCommonCurrencies(TestCommonEndpoint):
def test_success(self):
res = self.client.currencies()
self.assertEqual(res.res.status_code, 200)
def test_alias(self):
res = self.client.currencys()
self.assertEqual(res.res.status_code, 200)
class TestCommonTimestamp(TestCommonEndpoint):
def test_success(self):
res = self.client.timestamp()
self.assertEqual(res.res.status_code, 200)
|
24226
|
from django.apps import AppConfig
class TextGeneratorConfig(AppConfig):
name = 'text_generator'
|
24245
|
from uncertainties import ufloat
from utilities import min_value, max_value
def main():
print 'Plate motion rate parallel to section'
print plate_motion()
print 'Shortening (including ductile) from bed-length'
print bed_length_shortening()
print 'Estimated total shortening accomodated by OOSTS'
print oost_shortening()
print 'Shortening accommodated by seaward branch of OOSTS'
print seaward_shortening()
print 'Percentage of OOST shortening'
print total_oost_percentage()
print 'Landward Percentage'
print landward_percentage()
print 'Seaward Percentage'
print seaward_percentage()
def bed_length_balancing():
"""Summed fault heaves from bed-length balancing."""
present_length = 32
# 2km error from range in restored pin lines + 10% interpretation error
restored_length = ufloat(82, 10)
shortening = restored_length - present_length
return shortening
def bed_length_shortening():
"""Shortening estimate including volume loss."""
alpha = ufloat(0.35, 0.1)
heaves = bed_length_balancing()
return heaves * (1 + alpha)
def age():
"""
Age of the oldest in-sequence structures from Strasser, 2009.
Returns:
--------
avg_age : A ufloat with an assumed 2 sigma uncertainty
min_age : The "hard" minimum from Strasser, et al, 2009
max_age : The "hard" maximum from Strasser, et al, 2009
"""
min_age = 1.95 # Ma
max_age = 2.512 # Ma
# Strasser perfers an older age within this range, so we model this as
# 2.3 +/- 0.2, but provide mins and maxs
avg_age = ufloat(2.3, 0.2) # Ma
return avg_age, min_age, max_age
def plate_motion():
"""
Plate motion rate (forearc relative to oceanic plate) _parallel_ _to_
_section_ (Not full plate vector!) based on elastic block modeling
(Loveless&Meade, 2010).
Returns:
--------
rate : A ufloat in mm/yr with a 2 sigma error
"""
# See /data/MyCode/VariousJunk/loveless_meade_block_model_slip_vector.py
# for details of derivation... Uses block segment nearest study area instead
# of derived euler pole.
# I'm assuming that Loveless's reported errors are 2 sigma...
section_parallel_rate = ufloat(42.9, 2.1)
return section_parallel_rate
def total_convergence():
"""
Total shortening parallel to section from plate motion and ages.
Returns:
--------
shortening : A ufloat representing the plate motion integrated over the
age of deformation with a 2 sigma confidence interal.
min_shortening : A "hard" minimum using the uncertainty in the plate
motion and minimum constraints on the age.
max_shortening : A "hard" maximum using the uncertainty in the plate
motion and maximum constraints on the age.
"""
avg_age, min_age, max_age = age()
rate = plate_motion()
shortening = rate * avg_age
min_shortening = min_value(min_age * rate)
max_shortening = max_value(max_age * rate)
return shortening, min_shortening, max_shortening
def oost_shortening():
"""
Shortening on the out-of-sequence thrust system based on integrated plate
convergence minus the shortening predicted in the outer wedge from line
balancing results.
Returns:
--------
shortening : A ufloat with a 2 sigma error estimate
"""
total_shortening, min_total, max_total = total_convergence()
return total_shortening - bed_length_shortening()
def seaward_shortening():
"""Shortening accomodated on the seaward branch of the OOSTS based on
comparing the total (`oost_shortening()`) shortening with the shortening
predicted on the landward branch from forearc uplift.
Returns:
--------
shortening : a ufloat with 2 sigma error in kilometers.
"""
from process_bootstrap_results import shortening_parallel_to_section
landward_shortening = shortening_parallel_to_section() / 1000
return oost_shortening() - landward_shortening
def total_oost_percentage():
"""
Percentage of shortening accommdated by out-of-sequence thrusting during
the development of the present-day outer wedge.
Returns:
--------
percentage : A ufloat with a 2 sigma error representing a unitless
ratio (e.g. multiply by 100 to get percentage).
"""
total_shortening, min_total, max_total = total_convergence()
return oost_shortening() / total_shortening
def seaward_percentage():
"""
Percentage of total plate convergence accomodated by the seaward branch of
the OOSTS during its period of activity.
Returns:
--------
percentage : A ufloat with a 2 sigma error representing a unitless
ratio (e.g. multiply by 100 to get percentage).
"""
# Duration in myr from Strasser, 2009
duration = 1.95 - 1.24
rate = plate_motion()
total = duration * rate
return seaward_shortening() / total
def landward_percentage():
"""
Maximum percentage of total plate convergence accomodated by the landward
branch of the OOSTS during its period of activity.
Returns:
--------
percentage : A ufloat with a 2 sigma error representing a unitless
ratio (e.g. multiply by 100 to get percentage).
"""
from process_bootstrap_results import shortening_parallel_to_section
landward_shortening = shortening_parallel_to_section() / 1000
duration = ufloat(0.97, 0.07) - ufloat(0.25, 0.25)
rate = plate_motion()
total = duration * rate
return landward_shortening / total
if __name__ == '__main__':
main()
|
24251
|
from unittest.mock import ANY, Mock, patch
import pytest
from streamlit_server_state.server_state_item import ServerStateItem
@pytest.fixture
def patch_is_rerunnable():
with patch(
"streamlit_server_state.server_state_item.is_rerunnable"
) as mock_is_rerunnable:
mock_is_rerunnable.return_value = True
yield
def test_bound_sessions_are_requested_to_rerun_when_value_is_set_or_update(
patch_is_rerunnable,
):
session = Mock()
item = ServerStateItem()
item.bind_session(session)
session.request_rerun.assert_not_called()
item.set_value(42)
session.request_rerun.assert_has_calls([ANY])
item.set_value(100)
session.request_rerun.assert_has_calls([ANY, ANY])
def test_all_bound_sessions_are_requested_to_rerun(patch_is_rerunnable):
session1 = Mock()
session2 = Mock()
item = ServerStateItem()
item.bind_session(session1)
item.bind_session(session2)
session1.request_rerun.assert_not_called()
session2.request_rerun.assert_not_called()
item.set_value(42)
session1.request_rerun.assert_has_calls([ANY])
session2.request_rerun.assert_has_calls([ANY])
item.set_value(100)
session1.request_rerun.assert_has_calls([ANY, ANY])
session2.request_rerun.assert_has_calls([ANY, ANY])
def test_bound_sessions_are_not_duplicate(patch_is_rerunnable):
session = Mock()
item = ServerStateItem()
item.bind_session(session)
item.bind_session(session) # Bind the sessoin twice
session.request_rerun.assert_not_called()
item.set_value(42)
session.request_rerun.assert_called_once()
def test_bound_sessions_are_not_requested_to_rerun_when_the_set_value_is_not_changed(
patch_is_rerunnable,
):
session = Mock()
item = ServerStateItem()
item.bind_session(session)
session.request_rerun.assert_not_called()
item.set_value(42)
session.request_rerun.assert_called_once()
item.set_value(42)
session.request_rerun.assert_called_once() # No new calls
def test_bound_sessions_are_requested_to_rerun_when_a_same_but_mutated_object_is_set(
patch_is_rerunnable,
):
session = Mock()
item = ServerStateItem()
item.bind_session(session)
session.request_rerun.assert_not_called()
item.set_value({})
session.request_rerun.assert_has_calls([ANY])
value = item.get_value()
value["foo"] = 42
item.set_value(value)
session.request_rerun.assert_has_calls([ANY, ANY])
|
24269
|
import dns
import dns.resolver
import dns.rdatatype
def dns_resolve(domain: str) -> list:
addrs = []
resolver = dns.resolver.Resolver(configure=False)
# Default to Google DNS
resolver.nameservers = ['8.8.8.8', '8.8.4.4']
try:
for answer in resolver.resolve(domain, 'A').response.answer:
for item in answer:
if item.rdtype == dns.rdatatype.A:
addrs.append(item.address)
except dns.resolver.NoAnswer:
pass
try:
for answer in resolver.resolve(domain, 'AAAA').response.answer:
for item in answer:
if item.rdtype == dns.rdatatype.AAAA:
addrs.append(item.address)
except dns.resolver.NoAnswer:
pass
return addrs
|
24271
|
import numpy as np
def pline(x1, y1, x2, y2, x, y):
px = x2 - x1
py = y2 - y1
dd = px * px + py * py
u = ((x - x1) * px + (y - y1) * py) / max(1e-9, float(dd))
dx = x1 + u * px - x
dy = y1 + u * py - y
return dx * dx + dy * dy
def psegment(x1, y1, x2, y2, x, y):
px = x2 - x1
py = y2 - y1
dd = px * px + py * py
u = max(min(((x - x1) * px + (y - y1) * py) / float(dd), 1), 0)
dx = x1 + u * px - x
dy = y1 + u * py - y
return dx * dx + dy * dy
def plambda(x1, y1, x2, y2, x, y):
px = x2 - x1
py = y2 - y1
dd = px * px + py * py
return ((x - x1) * px + (y - y1) * py) / max(1e-9, float(dd))
def postprocess(lines, scores, threshold=0.01, tol=1e9, do_clip=False):
nlines, nscores = [], []
for (p, q), score in zip(lines, scores):
start, end = 0, 1
for a, b in nlines: # nlines: Selected lines.
if (
min(
max(pline(*p, *q, *a), pline(*p, *q, *b)),
max(pline(*a, *b, *p), pline(*a, *b, *q)),
)
> threshold ** 2
):
continue
lambda_a = plambda(*p, *q, *a)
lambda_b = plambda(*p, *q, *b)
if lambda_a > lambda_b:
lambda_a, lambda_b = lambda_b, lambda_a
lambda_a -= tol
lambda_b += tol
# case 1: skip (if not do_clip)
if start < lambda_a and lambda_b < end:
continue
# not intersect
if lambda_b < start or lambda_a > end:
continue
# cover
if lambda_a <= start and end <= lambda_b:
start = 10
break
# case 2 & 3:
if lambda_a <= start and start <= lambda_b:
start = lambda_b
if lambda_a <= end and end <= lambda_b:
end = lambda_a
if start >= end:
break
if start >= end:
continue
nlines.append(np.array([p + (q - p) * start, p + (q - p) * end]))
nscores.append(score)
return np.array(nlines), np.array(nscores)
|
24277
|
import os
from typing import List, Optional
import pickle
import numpy as np
from utilities import augment_long_text, tokenize, tokenize_long_text, to_chars, align
from config import Config as cf
PAD = 0 # TODO: choose appropriate index for these special chars
UNK = 1
DEFAULT = {'PAD': PAD, 'UNK': UNK}
DEFAULT_C = {'': PAD, 'UNK': UNK}
def word_lookup(w: str, table: dict, default=None):
"""
Translate a word into a value by looking up from a dict.
First priority is case-sensitive, then the next priority is case-insensitive.
In case the word does not exist in the dict, a KeyError exception is raised or a default value is returned.
Args:
w: word to translate
table: a dict to translate the word by looking up
default: If not None, this is the value to return in case the word does not exist in the table.
Returns:
Translated value for the word by looking up the word into table.
"""
if w in table: # Match word in case-sentitive mode is the first priority
return table[w]
elif w.lower() in table: # Then, case-insensitive
return table[w.lower()]
else:
if default is not None:
return default
else:
raise KeyError('Key `{}` not found'.format(w))
def char_lookup(c: str, table: dict, default=None):
"""
Translate a char into a value by looking up from a dict.
Args:
c: char to translate
table: a dict to translate the char by looking up
default: If not None, this is the value to return in case the char does not exist in the table.
Returns:
Translated value for the char by looking up the char into table.
"""
if c in table: # Match word in case-sentitive mode is the first priority
return table[c]
else:
if default is not None:
return default
else:
raise KeyError('Key `{}` not found'.format(c))
class Vocabulary(object):
def __init__(self, wv: dict, char_vocab: set):
offset = len(DEFAULT)
w2id = {w: idx+offset for idx, w in enumerate(wv.keys())}
w2id.update(DEFAULT)
id2w = {i:w for w, i in w2id.items()}
c2id = {c: idx+offset for idx, c in enumerate(list(char_vocab))}
c2id.update(DEFAULT_C)
id2c = {i:c for c, i in c2id.items()}
self.wv = wv
self.emb_size = len(wv['the']) # most common word that absolutely appears in the dict
self.w2id = w2id # mapping word to index
self.id2w = id2w # mapping index to word
self.c2id = c2id # mapping char to index
self.id2c = id2c # mapping index to char
def vectorize(self, tokens: List[str], length: int):
"""
Convert list of text tokens into list of indices
"""
vect = [word_lookup(t, self.w2id, default=UNK) for t in tokens]
vect = vect[:length]
if len(vect) < length:
vect.extend([PAD]*(length-len(vect)))
return vect
def vectorize_c(self, chars_list: List[List[str]], length: int, w_length: int):
"""
Convert list of list of chars into list of index-based representation
"""
vects = []
PAD_VECT = [PAD]*w_length
for chars in chars_list:
vects.append([char_lookup(c, self.c2id, default=UNK) for c in chars])
vects = vects[:length]
while len(vects) < length:
vects.append(PAD_VECT)
return vects
def get_embed_weights(self):
"""
Build weights for a word embedding layer.
Note that pre-trained word embedding is used, so no need to parameterize embed_size.
Args:
emb_size: Dim of the vectors
Returns:
[N, emb_size] matrix, where N is number of VOCAB + 1 (for pad)
"""
emb_size = len(self.wv[list(self.wv.keys())[0]])
weights = np.zeros((len(self.id2w), emb_size))
for i, tok in self.id2w.items():
if tok in self.wv:
weights[i] = self.wv[tok]
else:
weights[i] = np.random.uniform(0.0, 1.0, [emb_size])
return weights
def get_char_embed_weights(self, emb_size=64):
"""
Initialize weights for char embedding layer.
Args:
emb_size: Dim of the vectors
Returns:
[len(id2c), emb_size] matrix
"""
weights = emb = np.random.uniform(0.0, 1.0, size=(len(self.id2c), emb_size))
return weights
@property
def vocab_size(self):
return len(self.w2id)
def __getitem__(self, idx):
"""
Get vector for a word.
"""
if not isinstance(idx, str):
raise ValueError('Index must be a string')
return word_lookup(idx, self.wv, default=None)
def __contains__(self, idx):
if not isinstance(idx, str):
raise ValueError('Index must be a string')
return idx in self.wv or idx.lower() in self.wv
class Span(object):
def __init__(self, start_idx: int, end_idx: int):
self.start = start_idx # index of the start token in context
self.end = end_idx # index of the end token in context
@classmethod
def allocate(cls, anchors: List[int], start_char: int, end_char: int):
start_idx = 0
while anchors[start_idx] < start_char:
start_idx += 1
if anchors[start_idx] > start_char:
start_idx -= 1
end_idx = start_idx
while end_idx < len(anchors) and anchors[end_idx] <= end_char:
end_idx += 1
end_idx -= 1
return Span(start_idx, end_idx)
def __str__(self):
return "({}, {})".format(self.start, self.end)
class Answer(object):
def __init__(self, answer_text: str, answer_toks: List[str], span: Span, answer_start: int):
self.answer_text = answer_text # original answer text in JSON
self.answer_toks = answer_toks # tokens of the original answer text
self.answer_chars = to_chars(answer_toks, cf.WORD_LEN, cf.PAD_CHAR) # list of chars of the answer text
self.span = span # The span (token-based index) of the answer in context
self.answer_start = answer_start # start character in original answer text
def vectorize(self, vocab: Vocabulary):
self.answer: List[int] = vocab.vectorize(self.answer_toks, cf.ANSWER_LEN)
self.answer_c: List[List[int]] = vocab.vectorize_c(self.answer_chars, cf.ANSWER_LEN, cf.WORD_LEN)
@classmethod
def parse_json(cls, answers_js: List[dict], context: str, context_toks: List[str], anchors: List[int]):
answers = []
for ans in answers_js:
ans_text = ans['text']
ans_start = ans['answer_start']
ans_toks = tokenize(ans_text)
# Identify the span from context, ans_text & start index
span = Span.allocate(anchors, ans_start, ans_start+len(ans_text)-1)
answers.append(Answer(ans_text, ans_toks, span, ans_start))
return answers
class Question(object):
def __init__(self, question_text: str, ques_id: str, question: List[str], answers: List[Answer], plausible_answers: List[Answer]):
self.question_text = question_text # original question text in JSON
self.question_toks = question # tokens of the original question text
self.question_chars = to_chars(question, cf.WORD_LEN, cf.PAD_CHAR) # list of chars of the question text
self.answers = answers # list of Answer object of the question
self.ques_id = ques_id # id of the question in JSON
self.plausible_answers = plausible_answers
self.paragraph = None # handle to the parent paragraph
def set_paragraph(self, paragraph):
self.paragraph = paragraph
def vectorize(self, vocab: Vocabulary):
self.question: List[int] = vocab.vectorize(self.question_toks, cf.QUERY_LEN)
self.question_c: List[List[int]] = vocab.vectorize_c(self.question_chars, cf.QUERY_LEN, cf.WORD_LEN)
for answer in self.answers:
answer.vectorize(vocab)
class Paragraph(object):
def __init__(self, raw_context: str, context_text: str, context_toks: List[str], questions: List[Question], para_idx: int, anchors: List[int]):
self.raw_context = raw_context # original context text in JSON
self.context_text = context_text # augmented from original context text with SPACES to guide the tokenization
self.context_toks = context_toks # tokens of the context text
self.context_chars = to_chars(context_toks, cf.WORD_LEN, cf.PAD_CHAR) # chars of the context
self.questions = questions # list of Question objects
self.local_word_vocab = self._build_local_word_vocab()
self.local_char_vocab = self._build_local_char_vocab()
self.para_idx = para_idx # Just for management & debug. Not used in experiment.
self.anchors = anchors
def _build_local_word_vocab(self):
local_vocab = set()
local_vocab = local_vocab.union(set(self.context_toks))
for question in self.questions:
local_vocab = local_vocab.union(set(question.question_toks))
for answer in question.answers + question.plausible_answers:
local_vocab = local_vocab.union(set(answer.answer_toks))
return local_vocab
def _build_local_char_vocab(self):
def char_set(tokens):
chars = set()
for tok in tokens:
chars = chars.union(set(tok))
return chars
char_vocab = set()
char_vocab = char_vocab.union(char_set(self.context_chars))
for question in self.questions:
char_vocab = char_vocab.union(char_set(question.question_chars))
for answer in question.answers + question.plausible_answers:
char_vocab = char_vocab.union(char_set(answer.answer_chars))
return char_vocab
@classmethod
def parse_json(cls, para_js: dict, para_idx: int):
# Accumulate all answers' tokens first
all_para_answers = []
for q in para_js['qas']:
if 'answers' in q:
all_para_answers.extend([ans for ans in q['answers']])
if 'plausible_answers' in q:
all_para_answers.extend([ans for ans in q['plausible_answers']])
# Improve the context for better tokenization
raw_context = para_js['context']
# context = augment_long_text(para_js['context'], all_para_answers)
context = raw_context
context_toks = tokenize_long_text(context)
context_toks = [t.strip(' ') for t in context_toks]
anchors = align(raw_context, context_toks)
questions = []
for q in para_js['qas']:
question_text = q['question']
q_toks = tokenize(question_text)
ques_id = q['id']
answers = Answer.parse_json(q['answers'], raw_context, context_toks, anchors) if 'answers' in q else []
plausible_answers = Answer.parse_json(q['plausible_answers'], raw_context, context_toks, anchors) if 'plausible_answers' in q else []
questions.append(Question(question_text, ques_id, q_toks, answers, plausible_answers))
para = Paragraph(raw_context, context, context_toks, questions, para_idx, anchors)
for ques in questions:
ques.set_paragraph(para)
return para
def vectorize(self, vocab):
"""
Vectorize pargraph context, question text & answer text based on given vocab.
"""
self.context: List[int] = vocab.vectorize(self.context_toks, cf.CONTEXT_LEN)
self.context_c: List[List[int]] = vocab.vectorize_c(self.context_chars, cf.CONTEXT_LEN, cf.WORD_LEN)
for question in self.questions:
question.vectorize(vocab)
def exact_match(gt_s, gt_e, pr_s, pr_e):
"""
Evaluate exact match of a predicted span over a ground truth span.
Args:
gt_s: index of the ground truth start position
gt_e: index of the ground truth end position
pr_s: index of the predicted start position
pr_e: index of the predicted end position
"""
return gt_s == pr_s and gt_e == pr_e
def f1(gt_s, gt_e, pr_s, pr_e):
"""
Evaluate F1 score of a predicted span over a ground truth span.
Args:
gt_s: index of the ground truth start position
gt_e: index of the ground truth end position
pr_s: index of the predicted start position
pr_e: index of the predicted end position
"""
gt = {idx for idx in range(gt_s, gt_e+1)}
pr = {idx for idx in range(pr_s, pr_e+1)}
intersection = gt.intersection(pr)
prec = 1. * len(intersection) / len(pr)
rec = 1. * len(intersection) / len(gt)
f1_score = (2. * prec * rec) / (prec+rec) if prec+rec != 0. else 0.
return f1_score
def get_score(metric, gt_starts, gt_ends, pred_start, pred_end):
"""
Args:
metric: a metric function to calculate the score (exact_match or f1_score)
gt_starts: (list) an array of start indices of the available answers
gt_ends: (list) an array of end indices of the available answers
pred_start: (int) predicted start index returned by a model
pred_end: (int) predicted end index returned by a model
Returns:
The best score of the metric evaluated on multiple answer spans.
"""
scores = []
for gt_s, gt_e in zip(gt_starts, gt_ends):
scores.append(metric(gt_s, gt_e, pred_start, pred_end))
return 1.0 * np.max(scores)
class SquadData(object):
"""
To save the whole object to pickle file:
```python
data.save('data/squad_processed.pkl')
```
To load the whole object from pickle file, and extract train & validation data
```python
data = SquadData.load('data/squad_processed.pkl')
ques_ids_train, X_train, y_train = data.train_data()
ques_ids_valid, X_valid, y_valid = data.validation_data()
```
To save structured data to binary files for fast loading:
```python
data.save(np_path='data/numpy')
```
To load numpy data from binary files:
```python
word_vectors, char_vectors, ques_ids_train, X_train, y_train, ques_ids_valid, X_valid, y_valid = SquadData.load(np_path='data/numpy')
```
"""
def __init__(self, train_paragraphs: List[Paragraph], dev_paragraphs: List[Paragraph], vocab: Vocabulary, squad_words: set, squad_chars: set):
"""
Initializer.
Args:
train_paragraphs: list of Paragraph objects from train data
dev_paragraphs: list of Paragraph objects from dev data
vocab: Vocabulary object which store vectors of words appearing in Squad data
squad_words: set of all tokens appearing in Squad data (context, question text, answer text).
Note that some tokens may not appear in vocab. They are treated as unknown words.
Note that this is a set of words, so it must not be used to map words to indices. Use Vocabulary.w2id instead.
squad_chars: set of all characters appearing in Squad data (context, question text, answer text).
"""
self.train_paragraphs = train_paragraphs
self.dev_paragraphs = dev_paragraphs
self.vocab = vocab
self.squad_words = squad_words
self.squad_chars = squad_chars
def summary(self):
print('Num of train paragraphs: {}'.format(len(self.train_paragraphs)))
print('Num of dev paragraphs: {}'.format(len(self.dev_paragraphs)))
print('Num words in vocab: {}'.format(self.vocab.vocab_size))
print('Num unique words: {}'.format(len(self.squad_words)))
print('Num unique chars: {}'.format(len(self.squad_chars)))
unknown_words = [w for w in self.squad_words if w not in self.vocab]
print('Num of unknown words: {}'.format(len(unknown_words)))
def _generate_data(self, paragraphs, dataset: str ='train'):
ques_ids = []
contextw_inp, queryw_inp, contextc_inp, queryc_inp = [], [], [], []
p1, p2, start, end = [], [], [], []
long_count = 0
for para in paragraphs:
for ques in para.questions:
if dataset == 'train':
for ans in ques.answers:
if ans.span.start >= cf.CONTEXT_LEN or ans.span.end >= cf.CONTEXT_LEN:
# print('ques.ques_id:', ques.ques_id, ',', 'ans.span.start, end:', ans.span.start, ',', ans.span.end)
long_count += 1
continue
ques_ids.append(ques.ques_id)
contextw_inp.append(para.context)
queryw_inp.append(ques.question)
contextc_inp.append(para.context_c)
queryc_inp.append(ques.question_c)
vect = np.zeros(cf.CONTEXT_LEN, dtype=np.float16)
vect[ans.span.start] = 1.
p1.append(vect)
vect = np.zeros(cf.CONTEXT_LEN, dtype=np.float16)
vect[ans.span.end] = 1.
p2.append(vect)
start.append(ans.span.start)
end.append(ans.span.end)
else: # dev dataset
ques_ids.append(ques.ques_id)
contextw_inp.append(para.context)
queryw_inp.append(ques.question)
contextc_inp.append(para.context_c)
queryc_inp.append(ques.question_c)
start_list = []
end_list = []
for ans in ques.answers:
if ans.span.start >= cf.CONTEXT_LEN or ans.span.end >= cf.CONTEXT_LEN:
long_count += 1
continue
start_list.append(ans.span.start)
end_list.append(ans.span.end)
# p1, p2 are ignored in dev set
start.append(start_list)
end.append(end_list)
print('There are {} long answers'.format(long_count))
ques_ids = np.array(ques_ids)
contextw_inp, queryw_inp, contextc_inp, queryc_inp = np.array(contextw_inp), np.array(queryw_inp), np.array(contextc_inp), np.array(queryc_inp)
p1, p2, start, end = np.array(p1), np.array(p2), np.array(start), np.array(end)
return (ques_ids, [contextw_inp, queryw_inp, contextc_inp, queryc_inp], [p1, p2, start, end])
def train_data(self):
return self._generate_data(self.train_paragraphs)
def validation_data(self):
return self._generate_data(self.dev_paragraphs, dataset='dev')
def search_paragraph(self, para_idx: int, dataset: str ='train'):
"""
Search for paragraph by index. This function is used for debug only.
"""
paragraphs = self.train_paragraphs if dataset == 'train' else self.dev_paragraphs
for para in paragraphs:
if para.para_idx == para_idx:
return para
return None
def search_question(self, ques_id: str, dataset: str ='train'):
"""
Search for question by ques_id. This function is used for debug only.
"""
paragraphs = self.train_paragraphs if dataset == 'train' else self.dev_paragraphs
for para in paragraphs:
for ques in para.questions:
if ques.ques_id == ques_id:
return ques
return None
@classmethod
def evaluate(cls, gt_start_list, gt_end_list, pred_starts, pred_ends):
"""
Evaluate ExactMatch score & F1 score of predictions on a validation set.
Args:
gt_start_list: list of start indices of multiple ground-truth answer spans
gt_start_list: list of end indices of multiple ground-truth answer spans
pred_starts: list of predicted start indices
pred_ends: list of predicted end indices
Returns:
A hash with 2 keys: 'exact_match' & 'f1'
"""
em_score = 0
f1_score = 0
total = 0
for gt_starts, gt_ends, pred_start, pred_end in zip(gt_start_list, gt_end_list, pred_starts, pred_ends):
if len(gt_starts) > 0:
em_score += get_score(exact_match, gt_starts, gt_ends, pred_start, pred_end)
f1_score += get_score(f1, gt_starts, gt_ends, pred_start, pred_end)
# If gt_starts is empty, the ground-truth answer is over the limit length of the input text.
# We give penalty for that case, that means we give 0 to EM & F1 while we increase the total.
total += 1
em_score = 100. * em_score / total
f1_score = 100. * f1_score / total
em_score, f1_score
return {
'exact_match': em_score,
'f1': f1_score
}
def save(self, filepath=None, np_path=None):
def save_data(prefix, ques_ids,
contextw, queryw, contextc, queryc,
p1, p2, start, end):
np.save(np_path + '/%s_ques_ids.npy' % prefix, ques_ids)
np.save(np_path + '/%s_contextw.npy' % prefix, contextw)
np.save(np_path + '/%s_queryw.npy' % prefix, queryw)
np.save(np_path + '/%s_contextc.npy' % prefix, contextc)
np.save(np_path + '/%s_queryc.npy' % prefix, queryc)
np.save(np_path + '/%s_p1.npy' % prefix, p1)
np.save(np_path + '/%s_p2.npy' % prefix, p2)
np.save(np_path + '/%s_start.npy' % prefix, start)
np.save(np_path + '/%s_end.npy' % prefix, end)
if filepath: # Save the SquadData object to pickle file (slow)
print('Saving squad data to {}...'.format(filepath))
with open(filepath, 'wb') as f:
pickle.dump(self, f)
else: # Save the binary data to *.npy files (fast)
print('Accumulating train & validation arrays from the structure...')
t_ques_ids, X_train, y_train = self.train_data()
v_ques_ids, X_valid, y_valid = self.validation_data()
t_contextw, t_queryw, t_contextc, t_queryc = X_train
t_p1, t_p2, t_start, t_end = y_train
v_contextw, v_queryw, v_contextc, v_queryc = X_valid
v_p1, v_p2, v_start, v_end = y_valid
if not os.path.exists(np_path):
os.makedirs(np_path)
print('Saving word vectors into numpy files...')
word_vectors = self.vocab.get_embed_weights()
char_vectors = self.vocab.get_char_embed_weights()
np.save(np_path + '/word_vectors.npy', word_vectors)
np.save(np_path + '/char_vectors.npy', char_vectors)
print('Saving train arrays into numpy files...')
save_data(
'train', t_ques_ids,
t_contextw, t_queryw, t_contextc, t_queryc,
t_p1, t_p2, t_start, t_end)
print('Saving validation arrays into numpy files...')
save_data(
'val', v_ques_ids,
v_contextw, v_queryw, v_contextc, v_queryc,
v_p1, v_p2, v_start, v_end)
@classmethod
def load(cls, filepath=None, np_path=None):
def load_data(prefix):
ques_ids = np.load(np_path + '/%s_ques_ids.npy' % prefix)
contextw = np.load(np_path + '/%s_contextw.npy' % prefix)
queryw = np.load(np_path + '/%s_queryw.npy' % prefix)
contextc = np.load(np_path + '/%s_contextc.npy' % prefix)
queryc = np.load(np_path + '/%s_queryc.npy' % prefix)
p1 = np.load(np_path + '/%s_p1.npy' % prefix)
p2 = np.load(np_path + '/%s_p2.npy' % prefix)
start = np.load(np_path + '/%s_start.npy' % prefix)
end = np.load(np_path + '/%s_end.npy' % prefix)
return ques_ids, contextw, queryw, contextc, queryc, p1, p2, start, end
if filepath: # Load SquadData object from pickle file (slow)
print('Loading squad data from pickle file {}...'.format(filepath))
with open(filepath, 'rb') as f:
return pickle.load(f)
else: # Load binary data from *.npy files (fast)
print('Loading word vectors from numpy files...')
word_vectors = np.load(np_path + '/word_vectors.npy')
char_vectors = np.load(np_path + '/char_vectors.npy')
print('Loading train arrays from numpy files...')
t_ques_ids, t_contextw, t_queryw, t_contextc, t_queryc, t_p1, t_p2, t_start, t_end = load_data('train')
print('Loading validation arrays from numpy files...')
v_ques_ids, v_contextw, v_queryw, v_contextc, v_queryc, v_p1, v_p2, v_start, v_end = load_data('val')
return [
word_vectors,
char_vectors,
t_ques_ids,
[t_contextw, t_queryw, t_contextc, t_queryc],
[t_p1, t_p2, t_start, t_end],
v_ques_ids,
[v_contextw, v_queryw, v_contextc, v_queryc],
[v_p1, v_p2, v_start, v_end]
]
|
24284
|
import configparser as parser
import random
class config:
# load the configuration file
def __init__(self, config_filename):
self.load_config(config_filename)
def load_config(self, config_filename):
# create a config parser
config = parser.ConfigParser()
config.optionxform = str
# read the file
config.read(config_filename)
# read the values
dictionary = {}
for section in config.sections():
print('Found section: ' + section)
dictionary[section] = {}
for option in config.options(section):
dictionary[section][option] = config.get(section, option).splitlines()
self.phrases = dictionary['phrases']
if 'defaults' in dictionary and 'subjects' in dictionary:
self.has_subjects = True
self.defaults = dictionary['defaults']
self.subjects = dictionary['subjects']
for subject in self.subjects:
self.subjects[subject] = self.subjects[subject][0].split(',')
print('loaded defaults and subjects')
else:
self.has_subjects = False
def create_subjects(self, number = 0):
if number == 0:
number = int(self.defaults['num_subjects'][0])
if self.has_subjects:
first_subject = random.choice(list(self.subjects))
subjects = [first_subject]
for i in range(1,number):
subjects.append(self.get_adjacent_subject(subjects[i-1]))
self.current_subjects = subjects
else:
pass
def get_adjacent_subject(self, subject):
node = self.subjects[subject]
return random.choice(node)
def get_subject(self):
return random.choice(self.current_subjects)
def get_phrase(self, key):
try:
string_to_return = random.choice(self.phrases[key])
if string_to_return == 'none':
return ''
else:
return string_to_return
except:
print('Could not find phrases with key ' + key)
return ''
|
24286
|
import sys
import gtk
from datetime import datetime
import gobject
from threading import Thread
class uiSignalHelpers(object):
def __init__(self, *args, **kwargs):
super(uiSignalHelpers, self).__init__(*args, **kwargs)
#print 'signal helpers __init__'
def callback(self, *args, **kwargs):
super(uiSignalHelpers, self).callback(*args, **kwargs)
#print 'signal helpers callback'
def gtk_widget_show(self, w, e = None):
w.show()
return True
def gtk_widget_hide(self, w, e = None):
w.hide()
return True
def information_message(self, widget, message, cb = None):
self.attention = "INFO: %s" % message
messagedialog = gtk.MessageDialog(widget, gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_INFO, gtk.BUTTONS_OK, message)
messagedialog.connect("delete-event", lambda w, e: w.hide() or True)
if cb:
messagedialog.connect("response", cb)
messagedialog.set_default_response(gtk.RESPONSE_OK)
messagedialog.show()
messagedialog.present()
return messagedialog
def error_message(self, widget, message):
self.attention = "ERROR: %s" % message
messagedialog = gtk.MessageDialog(widget, gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_ERROR, gtk.BUTTONS_CANCEL, message)
messagedialog.run()
messagedialog.destroy()
def warning_message(self, widget, message):
self.attention = "WARNING: %s" % message
messagedialog = gtk.MessageDialog(widget, gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_WARNING, gtk.BUTTONS_OK_CANCEL, message)
messagedialog.show()
messagedialog.present()
messagedialog.run()
messagedialog.destroy()
def question_message(self, widget, message, cb = None):
self.attention = "QUESTION: %s" % message
messagedialog = gtk.MessageDialog(widget, gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_QUESTION, gtk.BUTTONS_YES_NO, message)
messagedialog.connect("delete-event", lambda w, e: w.hide() or True)
if cb:
messagedialog.connect("response", cb)
messagedialog.set_default_response(gtk.RESPONSE_YES)
messagedialog.show()
messagedialog.present()
return messagedialog
def interval_dialog(self, message):
if not self.interval_dialog_showing:
if not self.timetracker_window.is_active():
self.timetracker_window.show()
self.timetracker_window.present()
self.interval_dialog_showing = True
return self.question_message(self.timetracker_window, message, self.on_interval_dialog)
return None
def stop_interval_dialog(self, message):
if not self.stop_interval_dialog_showing:
if not self.timetracker_window.is_active():
self.timetracker_window.show()
self.timetracker_window.present()
self.stop_interval_dialog_showing = True
return self.information_message(self.timetracker_window, message, self.on_stopped)
return None
def set_custom_label(self, widget, text):
#set custom label on stock button
Label = widget.get_children()[0]
Label = Label.get_children()[0].get_children()[1]
Label = Label.set_label(text)
def window_state(self, widget, state):
self.timetracker_window_state = state.new_window_state
class uiSignals(uiSignalHelpers):
def __init__(self, *args, **kwargs):
super(uiSignals, self).__init__(*args, **kwargs)
#these are components defined inside the ui file
#print 'signals __init__'
self.preferences_window.connect('delete-event', lambda w, e: w.hide() or True)
self.timetracker_window.connect('delete-event', lambda w, e: w.hide() or True)
self.timetracker_window.connect('destroy', lambda w, e: w.hide() or True)
self.timetracker_window.connect("window-state-event", self.window_state)
self.about_dialog.connect("delete-event", lambda w, e: w.hide() or True)
self.about_dialog.connect("response", lambda w, e: w.hide() or True)
self.notes_textview.connect('key_press_event', self.on_textview_ctrl_enter)
def callback(self, *args, **kwargs): #stub
super(uiSignals, self).callback(*args, **kwargs) #executed after init, hopefully this will let me inject interrupts
#print 'signals callback'
self.icon.connect('activate', self.left_click)
self.icon.connect("popup-menu", self.right_click)
if sys.platform == "win32":
from gtkwin32 import GTKWin32Ext
self.timetracker_window.realize()
self.win32ext = GTKWin32Ext(self.timetracker_window)
self.win32ext.add_notify_icon()
def before_init(self): #stub for later
#print 'signals before init'
pass
def after_init(self): #init any other callback we can't setup in the actual init phase
#print 'signals after init'
self.project_combobox_handler = self.project_combobox.connect('changed', self.on_project_combobox_changed)
self.task_combobox_handler = self.task_combobox.connect('changed', self.on_task_combobox_changed)
def on_show_about_dialog(self, widget):
self.about_dialog.show()
def on_interval_dialog(self, dialog, a): #interval_dialog callback
if a == gtk.RESPONSE_NO:
self.refresh_and_show()
else:
#keep the timer running
self.running = True
self.current_selected_project_id = self.last_project_id
self.current_selected_task_id = self.last_task_id
self.current_notes = self.get_notes(self.last_notes)
self.current_hours = "%0.02f" % round(float(self.last_hours) + float(self.interval), 2)
self.current_text = self.last_text
self.current_entry_id = self.last_entry_id
entry = self.harvest.update(self.current_entry_id, {#append to existing timer
'notes': self.current_notes,
'hours': self.current_hours,
'project_id': self.current_project_id,
'task_id': self.current_task_id
})
self.refresh_and_show()
self.timetracker_window.hide() #hide timetracker and continue task
dialog.destroy()
self.attention = None
self.interval_dialog_showing = False
def on_textview_ctrl_enter(self, widget, event):
'''
submit clicked event on ctrl+enter in notes textview
'''
if event.state == gtk.gdk.CONTROL_MASK and \
gtk.gdk.keyval_name(event.keyval) == "Return":
self.submit_button.emit('clicked')
def on_stopped(self, dialog):
if not self.timetracker_window.is_active():
self.timetracker_window.show()
self.timetracker_window.present()
dialog.destroy()
self.attention = None
self.stop_interval_dialog_showing = False
def on_save_preferences_button_clicked(self, widget):
if self.running: #if running it will turn off, lets empty the comboboxes
#stop the timer
#self.toggle_current_timer(self.current_entry_id) #maybe add pref option to kill timer on pref change?
if self.interval_dialog_instance:
self.interval_dialog_instance.hide() #hide the dialog
self.stop_and_refactor_time()
self.get_prefs()
if self.connect_to_harvest():
self.preferences_window.hide()
self.timetracker_window.show()
self.timetracker_window.present()
def on_task_combobox_changed(self, widget):
new_idx = widget.get_active()
if new_idx != -1:
if new_idx != self.current_selected_task_idx: #-1 is sent from pygtk loop or something
self.current_selected_task_id = self.get_combobox_selection(widget)
self.current_selected_task_idx = new_idx
self.refresh_comboboxes()
def on_project_combobox_changed(self, widget):
self.current_selected_project_id = self.get_combobox_selection(widget)
new_idx = widget.get_active()
if new_idx != -1:
#reset task when new project is selected
self.current_selected_project_idx = new_idx
self.current_selected_task_id = None
self.current_selected_task_idx = 0
self.refresh_comboboxes()
def on_show_preferences(self, widget):
self.preferences_window.show()
self.preferences_window.present()
def on_away_from_desk(self, widget):
#toggle away state
if self.running:
self.away_from_desk = True if not self.away_from_desk else False
def on_check_for_updates(self, widget):
pass
def on_top(self, widget):
self.always_on_top = False if self.always_on_top else True
self.timetracker_window.set_keep_above(self.always_on_top)
def on_submit_button_clicked(self, widget):
self.away_from_desk = False
self.attention = None
self.append_add_entry()
self.set_textview_text(self.notes_textview, "")
self.notes_textview.grab_focus()
def on_stop_timer(self, widget):
self.stop_and_refactor_time()
def on_quit(self, widget):
if self.running and self.harvest:
self.harvest.toggle_timer(self.current_entry_id)
gtk.main_quit()
def refresh_and_show(self):
self.set_entries()
self.timetracker_window.show()
self.timetracker_window.present()
self.notes_textview.grab_focus()
def on_refresh(self, widget):
self.refresh_and_show()
def left_click(self, widget):
self.refresh_and_show()
def right_click(self, widget, button, time):
#create popup menu
menu = gtk.Menu()
refresh = gtk.ImageMenuItem(gtk.STOCK_REFRESH)
refresh.connect("activate", self.on_refresh)
menu.append(refresh)
if self.running:
stop_timer = gtk.MenuItem("Stop Timer")
stop_timer.connect("activate", self.on_stop_timer)
menu.append(stop_timer)
if not self.away_from_desk:
away = gtk.ImageMenuItem(gtk.STOCK_NO)
away.set_label("Away from desk")
else:
away = gtk.ImageMenuItem(gtk.STOCK_YES)
away.set_label("Back at desk")
away.connect("activate", self.on_away_from_desk)
menu.append(away)
top = gtk.MenuItem("Always on top")
prefs = gtk.MenuItem("Preferences")
about = gtk.MenuItem("About")
quit = gtk.MenuItem("Quit")
top.connect("activate", self.on_top)
prefs.connect("activate", self.on_show_preferences)
about.connect("activate", self.on_show_about_dialog)
quit.connect("activate", self.on_quit)
menu.append(prefs)
menu.append(top)
menu.append(about)
menu.append(quit)
menu.show_all()
menu.popup(None, None, gtk.status_icon_position_menu, button, time, self.icon)
|
24308
|
from awx.main import signals
class TestCleanupDetachedLabels:
def test_cleanup_detached_labels_on_deleted_parent(self, mocker):
mock_labels = [mocker.MagicMock(), mocker.MagicMock()]
mock_instance = mocker.MagicMock()
mock_instance.labels.all = mocker.MagicMock()
mock_instance.labels.all.return_value = mock_labels
mock_labels[0].is_candidate_for_detach.return_value = True
mock_labels[1].is_candidate_for_detach.return_value = False
signals.cleanup_detached_labels_on_deleted_parent(None, mock_instance)
mock_labels[0].is_candidate_for_detach.assert_called_with()
mock_labels[1].is_candidate_for_detach.assert_called_with()
mock_labels[0].delete.assert_called_with()
mock_labels[1].delete.assert_not_called()
|
24329
|
from dataclasses import dataclass, field
from typing import Dict
import perde
import pytest
from util import FORMATS, FORMATS_EXCEPT
"""rust
#[derive(Serialize, Debug, new)]
struct Plain {
a: String,
b: String,
c: u64,
}
add!(Plain {"xxx".into(), "yyy".into(), 3});
"""
@pytest.mark.parametrize("m", FORMATS)
def test_plain(m):
@dataclass
class Plain:
a: str
b: str
c: int
m.repack_type(Plain)
"""rust
#[derive(Serialize, Debug, new)]
#[serde(rename_all = "camelCase")]
struct RenameAll {
pen_pineapple: String,
apple_pen: String,
}
add!(RenameAll {"xxx".into(), "yyy".into()});
"""
@pytest.mark.parametrize("m", FORMATS)
def test_rename_all(m):
@perde.attr(rename_all="camelCase")
@dataclass
class RenameAll:
pen_pineapple: str
apple_pen: str
m.repack_type(RenameAll)
"""rust
#[derive(Serialize, Debug, new)]
#[serde(rename = "RenameAllSerialize", rename_all = "PascalCase")]
struct RenameAllSerializeOutput {
pen_pineapple: String,
apple_pen: String,
}
#[derive(Serialize, Debug, new)]
#[serde(rename = "RenameAllSerialize")]
struct RenameAllSerializeInput {
pen_pineapple: String,
apple_pen: String,
}
add!(RenameAllSerializeInput {"--".into(), "==".into()});
add!(RenameAllSerializeOutput {"--".into(), "==".into()});
"""
@pytest.mark.parametrize("m", FORMATS)
def test_rename_all_serialize(m):
@perde.attr(rename_all_serialize="PascalCase")
@dataclass
class RenameAllSerialize:
pen_pineapple: str
apple_pen: str
d = m.unpack_data("RenameAllSerializeInput", astype=RenameAllSerialize)
v = m.dumps(d)
e = m.data("RenameAllSerializeOutput")
assert v == e
"""rust
#[derive(Serialize, Debug, new)]
#[serde(rename = "RenameAllDeserialize")]
struct RenameAllDeserializeOutput {
pen_pineapple: String,
apple_pen: String,
}
#[derive(Serialize, Debug, new)]
#[serde(rename = "RenameAllDeserialize", rename_all = "SCREAMING_SNAKE_CASE")]
struct RenameAllDeserializeInput {
pen_pineapple: String,
apple_pen: String,
}
add!(RenameAllDeserializeInput {"--".into(), "==".into()});
add!(RenameAllDeserializeOutput {"--".into(), "==".into()});
"""
@pytest.mark.parametrize("m", FORMATS)
def test_rename_all_deserialize(m):
@perde.attr(rename_all_deserialize="SCREAMING_SNAKE_CASE")
@dataclass
class RenameAllDeserialize:
pen_pineapple: str
apple_pen: str
d = m.unpack_data("RenameAllDeserializeInput", astype=RenameAllDeserialize)
v = m.dumps(d)
e = m.data("RenameAllDeserializeOutput")
assert v == e
"""rust
#[derive(Serialize, Debug, new)]
struct DenyUnknownFields {
x: String,
y: i64,
z: i64,
q: String,
}
add!(DenyUnknownFields {"aaaaa".into(), 1, -2, "unknown".into()});
"""
@pytest.mark.parametrize("m", FORMATS)
def test_deny_unknown_fields(m):
@dataclass
class NoDenyUnknownFields:
x: str
y: int
z: int
@perde.attr(deny_unknown_fields=True)
@dataclass
class DenyUnknownFields:
x: str
y: int
z: int
e = m.unpack_data("DenyUnknownFields", astype=NoDenyUnknownFields)
assert e == NoDenyUnknownFields("aaaaa", 1, -2)
with pytest.raises(Exception) as e:
m.unpack_data("DenyUnknownFields", astype=DenyUnknownFields)
print(f"{e}")
"""rust
#[derive(Serialize, Debug, new)]
struct Rename {
a: String,
#[serde(rename = "x")]
b: String,
c: u64,
}
add!(Rename {"xxx".into(), "yyy".into(), 3});
"""
@pytest.mark.parametrize("m", FORMATS)
def test_rename(m):
@dataclass
class Rename:
a: str
b: str = field(metadata={"perde_rename": "x"})
c: int
m.repack_type(Rename)
"""rust
#[derive(Serialize, Debug, new)]
#[serde(rename_all = "camelCase")]
struct RenameAllRename {
pen_pineapple: String,
#[serde(rename = "pen_pen")]
apple_pen: String,
}
add!(RenameAllRename {"xxx".into(), "yyy".into()});
"""
@pytest.mark.parametrize("m", FORMATS)
def test_rename_in_rename_all(m):
@perde.attr(rename_all="camelCase")
@dataclass
class RenameAllRename:
pen_pineapple: str
apple_pen: str = field(metadata={"perde_rename": "pen_pen"})
m.repack_type(RenameAllRename)
"""rust
#[derive(Serialize, Debug, new)]
struct NestedRenameChild {
a: String,
#[serde(rename = "d")]
b: String,
}
#[derive(Serialize, Debug, new)]
struct NestedRename {
x: String,
#[serde(rename = "w")]
y: NestedRenameChild,
z: i64,
}
add!(NestedRename
{"xxx".into(),
NestedRenameChild::new("ppp".into(), "qqq".into()),
1111}
except "toml");
"""
@pytest.mark.parametrize("m", FORMATS_EXCEPT("toml"))
def test_nested_rename(m):
@dataclass
class NestedRenameChild:
a: str
b: str = field(metadata={"perde_rename": "d"})
@dataclass
class NestedRename:
x: str
y: NestedRenameChild = field(metadata={"perde_rename": "w"})
z: int
m.repack_type(NestedRename)
"""rust
#[derive(Serialize, Debug, new)]
#[serde(rename_all = "UPPERCASE")]
struct NestedRenameAllChild {
a: String,
b: String,
}
#[derive(Serialize, Debug, new)]
struct NestedRenameAll {
x: String,
y: NestedRenameAllChild,
z: i64,
}
add!(NestedRenameAll
{"xxx".into(),
NestedRenameAllChild::new("ppp".into(), "qqq".into()),
1111}
except "toml");
"""
@pytest.mark.parametrize("m", FORMATS_EXCEPT("toml"))
def test_nested_rename_all(m):
@perde.attr(rename_all="UPPERCASE")
@dataclass
class NestedRenameAllChild:
a: str
b: str
@dataclass
class NestedRenameAll:
x: str
y: NestedRenameAllChild
z: int
m.repack_type(NestedRenameAll)
"""rust
#[derive(Serialize, Debug, new)]
struct FlattenChild {
a: String,
b: String,
}
#[derive(Serialize, Debug, new)]
struct Flatten {
x: String,
#[serde(flatten)]
y: FlattenChild,
z: i64,
}
add!(Flatten
{"xxx".into(),
FlattenChild::new("ppp".into(), "qqq".into()),
1111}
except "msgpack");
"""
@pytest.mark.parametrize("m", FORMATS_EXCEPT("msgpack"))
def test_flatten(m):
@dataclass
class FlattenChild:
a: str
b: str
@dataclass
class Flatten:
x: str
y: FlattenChild = field(metadata={"perde_flatten": True})
z: int
m.repack_type(Flatten)
"""rust
#[derive(Serialize, Debug, new)]
struct DictFlatten {
x: String,
y: i64,
#[serde(flatten)]
z: IndexMap<String, String>,
}
add!(DictFlatten {"hey".into(), -103223,
{
let mut m = IndexMap::new();
m.insert("pp".into(), "q1".into());
m.insert("ppp".into(), "q2".into());
m.insert("pppp".into(), "q3".into());
m
}}
except "msgpack");
"""
@pytest.mark.parametrize("m", FORMATS_EXCEPT("msgpack"))
def test_dict_flatten(m):
@dataclass
class DictFlatten:
x: str
y: int
z: Dict[str, str] = field(metadata={"perde_flatten": True})
m.repack_type(DictFlatten)
"""rust
#[derive(Serialize, Debug, new)]
struct Flatten2 {
x: String,
a: i64,
b: i64,
}
add!(Flatten2 { "haa".into(), 11, 33 });
"""
@pytest.mark.parametrize("m", FORMATS)
def test_flatten2(m):
@dataclass
class Flatten2Child:
a: int
b: int
@dataclass
class Flatten2:
x: str
y: Flatten2Child = field(metadata={"perde_flatten": True})
m.repack_type(Flatten2)
"""rust
#[derive(Serialize, Debug, new)]
struct DictFlatten2 {
x: String,
y: i64,
pp: String,
ppp: String,
pppp: String,
}
add!(DictFlatten2 {
"hey".into(), -103223,
"q1".into(), "q2".into(), "q3".into()
});
"""
# Hopefully support msgpack.
@pytest.mark.parametrize("m", FORMATS_EXCEPT("msgpack"))
def test_dict_flatten2(m):
@dataclass
class DictFlatten2:
x: str
y: int
z: Dict[str, str] = field(metadata={"perde_flatten": True})
m.repack_type(DictFlatten2)
|
24371
|
from django.urls import path
from . import views
urlpatterns = [
path("draugiem/login/", views.login, name="draugiem_login"),
path("draugiem/callback/", views.callback, name="draugiem_callback"),
]
|
24376
|
import sys
import requests
import json
import argparse
import time
parser = argparse.ArgumentParser(description='Collects monitoring data from Pingdom.')
parser.add_argument('-u', '--pingdom-user-name', help='The Pingdom User Name', required=True)
parser.add_argument('-p', '--pingdom-password', help='The Pingdom Password', required=True)
parser.add_argument('-a', '--pingdom-api-key', help='The Pingdom API-KEY', required=True)
class Pingdom:
def __init__(self, api_key, user_name, password):
self.api_key = api_key,
self.user_name = user_name,
self.password = password,
self.jsonData = []
def handle_error(self, error_message):
sys.stderr.write("ERROR:|Pingdom| " + error_message)
sys.exit(1)
def call_api(self, api):
headers = {'App-Key': self.api_key[0]}
base_api = 'https://api.pingdom.com/api/2.0/' + api
response = requests.get(base_api, headers=headers, auth=requests.auth.HTTPBasicAuth(self.user_name[0], self.password[0]))
if response.status_code == 200:
return response.json()
else:
self.handle_error("API [" + base_api + "] failed to execute with error code [" + str(response.status_code) + "].")
def get_checks(self):
response = self.call_api('checks')
data = response.get("checks")
counts = response.get("counts")
up_count = 0
down_count = 0
unconfirmed_down_count = 0
unknown_count = 0
paused_count = 0
for x in data:
status = x.get("status")
if status == "up":
up_count = up_count + 1
elif status == "down":
down_count == down_count + 1
elif status == "unconfirmed_down":
unconfirmed_down_count = unconfirmed_down_count + 1
elif status == "unknown":
unknown_count = unknown_count + 1
elif status == "paused":
paused_count = paused_count + 1
counts["up"] = up_count
counts["down"] = down_count
counts["unconfirmed_down"] = unconfirmed_down_count
counts["unknown"] = unknown_count
counts["paused"] = paused_count
data.append(counts)
self.jsonData = data
def get_credits(self):
response = self.call_api('credits')
self.jsonData.append(response)
def get_maintenance(self):
response = self.call_api('maintenance')
if response.get('maintenance'):
for mw in response.get('maintenance'):
window = {}
window["description"] = mw.get("description")
window["recurrencetype"] = mw.get("recurrencetype")
window["repeatevery"] = mw.get("repeatevery")
window["from"] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(mw.get("from")))
window["to"] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(mw.get("to")))
window["window"] = 1
self.jsonData.append(window)
if __name__ == "__main__":
try:
args = parser.parse_args()
pingdom = Pingdom(args.pingdom_api_key, args.pingdom_user_name, args.pingdom_password)
pingdom.get_checks()
pingdom.get_credits()
pingdom.get_maintenance()
print(json.dumps(pingdom.jsonData))
except Exception as e:
pingdom.handle_error(e.message)
|
24381
|
import logging
from typing import Any, List, Optional
from homeassistant.components.climate.const import (
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_FAN_ONLY,
)
from gehomesdk import ErdAcFanSetting
from ..common import OptionsConverter
_LOGGER = logging.getLogger(__name__)
class AcFanModeOptionsConverter(OptionsConverter):
def __init__(self, default_option: ErdAcFanSetting = ErdAcFanSetting.AUTO):
self._default = default_option
@property
def options(self) -> List[str]:
return [i.stringify() for i in [ErdAcFanSetting.AUTO, ErdAcFanSetting.LOW, ErdAcFanSetting.MED, ErdAcFanSetting.HIGH]]
def from_option_string(self, value: str) -> Any:
try:
return ErdAcFanSetting[value.upper().replace(" ","_")]
except:
_LOGGER.warn(f"Could not set fan mode to {value}")
return self._default
def to_option_string(self, value: Any) -> Optional[str]:
try:
return {
ErdAcFanSetting.AUTO: ErdAcFanSetting.AUTO,
ErdAcFanSetting.LOW: ErdAcFanSetting.LOW,
ErdAcFanSetting.LOW_AUTO: ErdAcFanSetting.AUTO,
ErdAcFanSetting.MED: ErdAcFanSetting.MED,
ErdAcFanSetting.MED_AUTO: ErdAcFanSetting.AUTO,
ErdAcFanSetting.HIGH: ErdAcFanSetting.HIGH,
ErdAcFanSetting.HIGH_AUTO: ErdAcFanSetting.HIGH
}.get(value).stringify()
except:
pass
return self._default.stringify()
class AcFanOnlyFanModeOptionsConverter(AcFanModeOptionsConverter):
def __init__(self):
super().__init__(ErdAcFanSetting.LOW)
@property
def options(self) -> List[str]:
return [i.stringify() for i in [ErdAcFanSetting.LOW, ErdAcFanSetting.MED, ErdAcFanSetting.HIGH]]
|
24392
|
from typing import Dict
# The rest of the codebase uses mojos everywhere.
# Only use these units for user facing interfaces.
units: Dict[str, int] = {
"cryptodoge": 10 ** 6, # 1 cryptodoge (XCD) is 1,000,000 mojo (1 million)
"mojo:": 1,
"colouredcoin": 10 ** 3, # 1 coloured coin is 1000 colouredcoin mojos
}
|
24413
|
import torch
import torch.nn as nn
from cogdl.utils import spmm
class GINLayer(nn.Module):
r"""Graph Isomorphism Network layer from paper `"How Powerful are Graph
Neural Networks?" <https://arxiv.org/pdf/1810.00826.pdf>`__.
.. math::
h_i^{(l+1)} = f_\Theta \left((1 + \epsilon) h_i^{l} +
\mathrm{sum}\left(\left\{h_j^{l}, j\in\mathcal{N}(i)
\right\}\right)\right)
Parameters
----------
apply_func : callable layer function)
layer or function applied to update node feature
eps : float32, optional
Initial `\epsilon` value.
train_eps : bool, optional
If True, `\epsilon` will be a learnable parameter.
"""
def __init__(self, apply_func=None, eps=0, train_eps=True):
super(GINLayer, self).__init__()
if train_eps:
self.eps = torch.nn.Parameter(torch.FloatTensor([eps]))
else:
self.register_buffer("eps", torch.FloatTensor([eps]))
self.apply_func = apply_func
def forward(self, graph, x):
out = (1 + self.eps) * x + spmm(graph, x)
if self.apply_func is not None:
out = self.apply_func(out)
return out
|
24420
|
import json
import falcon
import time
import uuid
import requests
from apps.database import init_db, db_session
from apps.models import Account
from apps.restaccount.logging import logging
logger = logging.getLogger(__name__)
from decouple import config
ES_HOST = config('EVENTSTORE_HOST', default='eventstore')
ES_PORT = config('EVENTSTORE_PORT', default=2113, cast=int)
stream_url = 'http://{}:{}/streams/accounts'.format(ES_HOST, ES_PORT)
content_header = { 'Content-Type': 'application/vnd.eventstore.events+json' }
logger.info('stream_url: {}'.format(stream_url))
def get_account(account_id):
return Account.query.get(account_id)
class BalanceResource(object):
def on_get(self, req, resp, account_id):
init_db()
doc = db_session.query(Account).get(account_id)
db_session.close()
if doc is None:
raise falcon.HTTPBadRequest('Balance missing', 'Deposit money to start using an account')
else:
# Create a JSON representation of the resource
resp.body = json.dumps(doc.as_dict(), ensure_ascii=False)
# The following line can be omitted because 200 is the default
# status returned by the framework, but it is included here to
# illustrate how this may be overridden as needed.
resp.status = falcon.HTTP_200
class DepositResource(object):
def on_post(self, req, resp):
body = req.stream.read()
doc = json.loads(body.decode('utf-8'))
logger.info('doc: {}'.format(doc))
payload = [
{
"eventId": str(uuid.uuid1()),
"eventType": "created-deposit",
"data": doc
}
]
logger.info("payload: {}".format(payload))
r = requests.post(stream_url, data=str(payload), headers=content_header)
resp.status = falcon.HTTP_200
class TransferResource(object):
def on_post(self, req, resp):
body = req.stream.read()
doc = json.loads(body.decode('utf-8'))
acc = get_account(doc['account_id'])
payload = [
{
"eventId": str(uuid.uuid1()),
"eventType": "created-transfer",
"data": doc
}
]
if acc is None:
raise falcon.HTTPBadRequest('Account missing', 'You must deposit into an account before transfering')
if acc.balance < doc['amount']:
raise falcon.HTTPBadRequest('Insufficient funds', 'Account balance {} less than transfer amount {}'.format(acc.balance, doc['amount']))
else:
logger.info("payload: {}".format(payload))
r = requests.post(stream_url, data=str(payload), headers=content_header)
resp.status = falcon.HTTP_200
|
24453
|
import unittest
import paddle
import neural_renderer_paddle as nr
class TestLighting(unittest.TestCase):
def test_case1(self):
"""Test whether it is executable."""
faces = paddle.randn([64, 16, 3, 3], dtype=paddle.float32)
textures = paddle.randn([64, 16, 8, 8, 8, 3], dtype=paddle.float32)
nr.lighting(faces, textures)
if __name__ == '__main__':
unittest.main()
|
24475
|
from typing import List, Optional
from pydantic import BaseModel
class Provider(BaseModel):
url: str
name: str
roles: Optional[List[str]] = None
|
24517
|
from datetime import datetime, timezone
from io import StringIO
from unittest import mock
import freezegun
import pytest
from django.conf import settings
from django.core.management import call_command
from django.utils import timezone
from model_bakery import baker
from supportal.app.common.enums import CanvassResult
from supportal.app.models import EmailSend
CREATED_AT = datetime(2019, 10, 26, 1, tzinfo=timezone.utc)
CREATED_AT_EARLIER = datetime(2019, 10, 26, tzinfo=timezone.utc)
DAY_BEFORE_EXPIRE = datetime(2019, 11, 1, tzinfo=timezone.utc)
TWO_DAY_BEFORE_EXPIRE = datetime(2019, 10, 31, tzinfo=timezone.utc)
EXPIRED_AT = datetime(2019, 11, 2, 1, tzinfo=timezone.utc)
EXPIRED_EARLIER = datetime(2019, 11, 2, tzinfo=timezone.utc)
AFTER_EXPIRATION_DATE = datetime(2019, 11, 3, tzinfo=timezone.utc)
SIX_DAYS_BEFORE_EXPIRE = datetime(2019, 10, 27, tzinfo=timezone.utc)
def email_expiring_users(*args, **kwargs):
call_command("email_users_with_expiring_assignments", **kwargs)
@pytest.fixture
def first_cambridge_assignment(cambridge_leader_user, cambridge_prospect):
cambridge_assignment = baker.make(
"VolProspectAssignment", user=cambridge_leader_user, person=cambridge_prospect
)
cambridge_assignment.created_at = CREATED_AT
cambridge_assignment.save()
return cambridge_assignment
@pytest.fixture
def hayes_assignment(hayes_valley_leader_user, california_prospect):
hayes_valley_assignment = baker.make(
"VolProspectAssignment",
user=hayes_valley_leader_user,
person=california_prospect,
)
hayes_valley_assignment.created_at = CREATED_AT_EARLIER
hayes_valley_assignment.save()
return hayes_valley_assignment
@pytest.fixture
def hayes_cambrdige_assignment(hayes_valley_leader_user, cambridge_prospect):
hayes_valley_assignment = baker.make(
"VolProspectAssignment",
user=hayes_valley_leader_user,
person=cambridge_prospect,
)
hayes_valley_assignment.created_at = CREATED_AT
hayes_valley_assignment.save()
return hayes_valley_assignment
@pytest.fixture
def second_cambridge_assignment(cambridge_leader_user, california_prospect):
cambridge_assignment = baker.make(
"VolProspectAssignment", user=cambridge_leader_user, person=california_prospect
)
cambridge_assignment.created_at = CREATED_AT
cambridge_assignment.save()
return cambridge_assignment
@pytest.fixture
def expired_assignment(cambridge_leader_user, somerville_prospect):
cambridge_assignment = baker.make(
"VolProspectAssignment", user=cambridge_leader_user, person=somerville_prospect
)
cambridge_assignment.created_at = CREATED_AT
cambridge_assignment.expired_at = EXPIRED_AT
cambridge_assignment.save()
return cambridge_assignment
DEFAULT_TEMPLATE_DATA = {
"assignment_count": "",
"email": "",
"expiration_date": "",
"switchboard_login_url": settings.SUPPORTAL_BASE_URL,
"first_name": "",
"last_name": "",
}
def make_payload(assignment_count, email, expiration, first_name, last_name):
return {
"assignment_count": assignment_count,
"email": email,
"expiration_date": expiration.strftime("%a %b %d, %Y"),
"switchboard_login_url": settings.SUPPORTAL_BASE_URL,
"first_name": first_name,
"last_name": last_name,
}
def check_email_sends(user, assignment_count, expiration, single_call_mock=None):
assert EmailSend.objects.filter(user=user).count() == 1
email_sent = EmailSend.objects.get(user=user)
assert email_sent.template_name == "expiring_contacts_email"
assert email_sent.payload == {
"assignment_count": assignment_count,
"email": user.email,
"expiration_date": expiration.strftime("%a %b %d, %Y"),
"switchboard_login_url": settings.SUPPORTAL_BASE_URL,
"first_name": user.first_name,
"last_name": user.last_name,
}
if single_call_mock:
single_call_mock.return_value.send_bulk_email.assert_called_once_with(
configuration_set_name="organizing_emails",
default_template_data=DEFAULT_TEMPLATE_DATA,
from_email=settings.FROM_EMAIL,
payload_array=[
make_payload(
assignment_count,
user.email,
expiration,
user.first_name,
user.last_name,
)
],
reply_to_email=settings.REPLY_TO_EMAIL,
template="expiring_contacts_email",
application_name="supportal",
)
@pytest.mark.django_db
@freezegun.freeze_time(TWO_DAY_BEFORE_EXPIRE)
def test_email_with_uncontacted_assignments(
first_cambridge_assignment, expired_assignment
):
out = StringIO()
assert EmailSend.objects.filter(user=first_cambridge_assignment.user).count() == 0
with mock.patch(
"supportal.app.management.commands.base_email_command.EmailService"
) as email_service_mock:
email_expiring_users(stdout=out, send=True)
first_cambridge_assignment.refresh_from_db()
assert EmailSend.objects.all().count() == 1
check_email_sends(
first_cambridge_assignment.user, 1, EXPIRED_AT, email_service_mock
)
assert "Found 1 users to email." in out.getvalue()
@pytest.mark.django_db
@freezegun.freeze_time(TWO_DAY_BEFORE_EXPIRE)
def test_dryrun(first_cambridge_assignment, expired_assignment):
out = StringIO()
assert EmailSend.objects.filter(user=first_cambridge_assignment.user).count() == 0
with mock.patch(
"supportal.app.management.commands.base_email_command.EmailService"
) as email_service_mock:
email_expiring_users(stdout=out)
first_cambridge_assignment.refresh_from_db()
assert EmailSend.objects.all().count() == 0
assert first_cambridge_assignment.user.email in out.getvalue()
assert "Found 1 users to email." in out.getvalue()
@pytest.mark.django_db
@freezegun.freeze_time(DAY_BEFORE_EXPIRE)
def test_dont_email_outside_of_two_days(first_cambridge_assignment, expired_assignment):
out = StringIO()
email_expiring_users(stdout=out, send=True)
assert EmailSend.objects.all().count() == 0
assert EmailSend.objects.filter(user=first_cambridge_assignment.user).count() == 0
assert "Found 0 users to email." in out.getvalue()
@pytest.mark.django_db
@freezegun.freeze_time(TWO_DAY_BEFORE_EXPIRE)
def test_email_with_two_assignments(
first_cambridge_assignment, second_cambridge_assignment, expired_assignment
):
out = StringIO()
with mock.patch(
"supportal.app.management.commands.base_email_command.EmailService"
) as email_service_mock:
email_expiring_users(stdout=out, send=True)
assert EmailSend.objects.all().count() == 1
check_email_sends(
first_cambridge_assignment.user, 2, EXPIRED_AT, email_service_mock
)
assert "Found 1 users to email." in out.getvalue()
@pytest.mark.django_db
@freezegun.freeze_time(TWO_DAY_BEFORE_EXPIRE)
def test_email_with_two_users(
first_cambridge_assignment,
hayes_assignment,
hayes_cambrdige_assignment,
expired_assignment,
):
out = StringIO()
with mock.patch(
"supportal.app.management.commands.base_email_command.EmailService"
) as email_service_mock:
email_expiring_users(stdout=out, send=True)
assert EmailSend.objects.all().count() == 2
check_email_sends(first_cambridge_assignment.user, 1, EXPIRED_AT)
check_email_sends(hayes_assignment.user, 2, EXPIRED_EARLIER)
email_service_mock.return_value.send_bulk_email.assert_called_once_with(
configuration_set_name="organizing_emails",
default_template_data=DEFAULT_TEMPLATE_DATA,
from_email=settings.FROM_EMAIL,
payload_array=[
make_payload(
1,
first_cambridge_assignment.user.email,
EXPIRED_AT,
first_cambridge_assignment.user.first_name,
first_cambridge_assignment.user.last_name,
),
make_payload(
2,
hayes_assignment.user.email,
EXPIRED_EARLIER,
hayes_assignment.user.first_name,
hayes_assignment.user.last_name,
),
],
reply_to_email=settings.REPLY_TO_EMAIL,
template="expiring_contacts_email",
application_name="supportal",
)
assert "Found 2 users to email." in out.getvalue()
@pytest.mark.django_db
@freezegun.freeze_time(TWO_DAY_BEFORE_EXPIRE)
def test_email_with_two_users_send_all_to_flag(
first_cambridge_assignment,
hayes_assignment,
hayes_cambrdige_assignment,
expired_assignment,
):
out = StringIO()
with mock.patch(
"supportal.app.management.commands.base_email_command.EmailService"
) as email_service_mock:
email_expiring_users(
stdout=out, send=True, send_all_to="<EMAIL>"
)
assert EmailSend.objects.all().count() == 0
email_service_mock.return_value.send_bulk_email.assert_called_once_with(
configuration_set_name="organizing_emails",
default_template_data=DEFAULT_TEMPLATE_DATA,
from_email=settings.FROM_EMAIL,
payload_array=[
make_payload(
1,
"<EMAIL>",
EXPIRED_AT,
first_cambridge_assignment.user.first_name,
first_cambridge_assignment.user.last_name,
),
make_payload(
2,
"<EMAIL>",
EXPIRED_EARLIER,
hayes_assignment.user.first_name,
hayes_assignment.user.last_name,
),
],
reply_to_email=settings.REPLY_TO_EMAIL,
template="expiring_contacts_email",
application_name="supportal",
)
assert "Found 2 users to email." in out.getvalue()
@pytest.mark.django_db
@freezegun.freeze_time(TWO_DAY_BEFORE_EXPIRE)
def test_email_with_two_users_limit_flag(
first_cambridge_assignment,
hayes_assignment,
hayes_cambrdige_assignment,
expired_assignment,
):
out = StringIO()
with mock.patch(
"supportal.app.management.commands.base_email_command.EmailService"
) as email_service_mock:
email_expiring_users(stdout=out, limit=1, send=True)
assert EmailSend.objects.all().count() == 1
check_email_sends(first_cambridge_assignment.user, 1, EXPIRED_AT)
assert "Found 1 users to email." in out.getvalue()
@pytest.mark.django_db
@freezegun.freeze_time(TWO_DAY_BEFORE_EXPIRE)
def test_email_unsuccessfully_contacted_assignments(
first_cambridge_assignment, expired_assignment
):
first_cambridge_assignment.create_contact_event(
result=CanvassResult.UNAVAILABLE_LEFT_MESSAGE
)
first_cambridge_assignment.save()
out = StringIO()
with mock.patch(
"supportal.app.management.commands.base_email_command.EmailService"
) as email_service_mock:
email_expiring_users(stdout=out, send=True)
assert EmailSend.objects.all().count() == 1
check_email_sends(
first_cambridge_assignment.user, 1, EXPIRED_AT, email_service_mock
)
assert "Found 1 users to email." in out.getvalue()
@pytest.mark.django_db
@freezegun.freeze_time(TWO_DAY_BEFORE_EXPIRE)
def test_dont_email_unsubscribed_user(first_cambridge_assignment, expired_assignment):
first_cambridge_assignment.user.unsubscribed_at = datetime.now(tz=timezone.utc)
first_cambridge_assignment.user.save()
out = StringIO()
email_expiring_users(stdout=out, send=True)
assert EmailSend.objects.all().count() == 0
assert EmailSend.objects.filter(user=first_cambridge_assignment.user).count() == 0
assert "Found 0 users to email." in out.getvalue()
@pytest.mark.django_db
@freezegun.freeze_time(TWO_DAY_BEFORE_EXPIRE)
def test_dont_email_user_who_was_emailed_recently(
first_cambridge_assignment, expired_assignment
):
EmailSend.objects.create(
user=first_cambridge_assignment.user,
template_name=EmailSend.EXPIRING_PROSPECTS,
payload={},
)
assert first_cambridge_assignment.user.unsubscribed_at is None
out = StringIO()
email_expiring_users(stdout=out, send=True)
assert EmailSend.objects.all().count() == 1
assert EmailSend.objects.filter(user=first_cambridge_assignment.user).count() == 1
assert "Found 0 users to email." in out.getvalue()
@pytest.mark.django_db
@freezegun.freeze_time(TWO_DAY_BEFORE_EXPIRE)
def test_email_user_who_was_invited_recently(
first_cambridge_assignment, expired_assignment
):
EmailSend.objects.create(
user=first_cambridge_assignment.user,
template_name=EmailSend.INVITE_EMAIL,
payload={},
)
assert first_cambridge_assignment.user.unsubscribed_at is None
out = StringIO()
with mock.patch(
"supportal.app.management.commands.base_email_command.EmailService"
) as email_service_mock:
email_expiring_users(stdout=out, send=True)
assert EmailSend.objects.all().count() == 2
assert EmailSend.objects.filter(user=first_cambridge_assignment.user).count() == 2
assert "Found 1 users to email." in out.getvalue()
@pytest.mark.django_db
@freezegun.freeze_time(TWO_DAY_BEFORE_EXPIRE)
def test_successfully_contacted_dont_email(
first_cambridge_assignment, expired_assignment
):
# Make sure that having a previous unsuccessful contact event doesn't cause
# the contact to get expired.
first_cambridge_assignment.create_contact_event(
result=CanvassResult.UNAVAILABLE_LEFT_MESSAGE
)
first_cambridge_assignment.create_contact_event(
result=CanvassResult.SUCCESSFUL_CANVASSED
)
first_cambridge_assignment.save()
out = StringIO()
email_expiring_users(stdout=out, send=True)
first_cambridge_assignment.refresh_from_db()
assert EmailSend.objects.all().count() == 0
assert "Found 0 users to email." in out.getvalue()
@pytest.mark.django_db
def test_expire_zero_assignments():
out = StringIO()
email_expiring_users(stdout=out, send=True)
assert EmailSend.objects.all().count() == 0
assert "Found 0 users to email." in out.getvalue()
|
24556
|
import unittest
from pyhmmer.easel import Alphabet
from pyhmmer.errors import UnexpectedError, AllocationError, EaselError, AlphabetMismatch
class TestErrors(unittest.TestCase):
def test_unexpected_error(self):
err = UnexpectedError(1, "p7_ReconfigLength")
self.assertEqual(repr(err), "UnexpectedError(1, 'p7_ReconfigLength')")
self.assertEqual(str(err), "Unexpected error occurred in 'p7_ReconfigLength': eslFAIL (status code 1)")
def test_allocation_error(self):
err = AllocationError("ESL_SQ", 16)
self.assertEqual(repr(err), "AllocationError('ESL_SQ', 16)")
self.assertEqual(str(err), "Could not allocate 16 bytes for type ESL_SQ")
err2 = AllocationError("float", 4, 32)
self.assertEqual(repr(err2), "AllocationError('float', 4, 32)")
self.assertEqual(str(err2), "Could not allocate 128 bytes for an array of 32 float")
def test_easel_error(self):
err = EaselError(1, "failure")
self.assertEqual(repr(err), "EaselError(1, 'failure')")
self.assertEqual(str(err), "Error raised from C code: failure, eslFAIL (status code 1)")
def test_alphabet_mismatch(self):
err = AlphabetMismatch(Alphabet.dna(), Alphabet.rna())
self.assertEqual(repr(err), "AlphabetMismatch(Alphabet.dna(), Alphabet.rna())")
self.assertEqual(str(err), "Expected Alphabet.dna(), found Alphabet.rna()")
self.assertNotEqual(err, 1)
err2 = AlphabetMismatch(Alphabet.dna(), Alphabet.rna())
self.assertEqual(err, err)
self.assertEqual(err, err2)
err3 = AlphabetMismatch(Alphabet.dna(), Alphabet.amino())
self.assertNotEqual(err, err3)
|
24557
|
import base64
STANDARD_ALPHABET = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567'
CUSTOM_ALPHABET = 'abcdefghjkmnprstuvwxyz0123456789'
ENCODE_TRANS = str.maketrans(STANDARD_ALPHABET, CUSTOM_ALPHABET)
DECODE_TRANS = str.maketrans(CUSTOM_ALPHABET, STANDARD_ALPHABET)
PADDING_LETTER = '='
def encode(buffer):
assert type(buffer) == bytes or type(buffer) == bytearray, "please pass an bytes"
b32encoded = base64.b32encode(buffer) # encode bytes
b32str = b32encoded.decode().replace(PADDING_LETTER, "") # translate chars
return b32str.translate(ENCODE_TRANS) # remove padding char
def decode(b32str):
assert type(b32str) == str, "please pass an str"
# pad to 8's multiple with '='
b32len = len(b32str)
if b32len % 8 > 0:
padded_len = b32len + (8 - b32len % 8)
b32str = b32str.ljust(padded_len, PADDING_LETTER)
# translate and decode
return base64.b32decode(b32str.translate(DECODE_TRANS))
def decode_to_words(b32str):
result = bytearray()
for c in b32str:
result.append(CUSTOM_ALPHABET.index(c))
return result
def encode_words(words):
result = ""
for v in words:
result += CUSTOM_ALPHABET[v]
return result
|
24591
|
import os
import re
import csv
import sys
import json
import yaml
import time
import socket
import connexion
import postgresql as psql
from flask import current_app
from urllib.parse import urlencode
from hashlib import md5
from bokeh.embed import server_document
from .processes import fetch_process, is_running, process_info
from .utils import column_filter
float_pattern = re.compile(r'^\d*\.\d+$')
int_pattern = re.compile(r'^-?\d+$')
NA_pattern = re.compile(r'^NA$')
queryfilters = re.compile(r'(.+)(<=?|>=?|!=|==)(.+)')
def init_column_mapping(row, schema):
"""Generate initial estimates of column data types"""
defs = {column_filter(col): 'text' for col in row}
# Apply predefined table schema
defs.update({k: v for (k, v) in schema.items() if k in defs})
for (col, val) in row.items():
col = column_filter(col)
if col not in schema:
if int_pattern.match(val):
try:
int(val)
print("Assigning int to", col, "based on", val)
defs[col] = 'integer'
except ValueError:
print("ERROR: Int mismatch:", val)
elif float_pattern.match(val):
try:
float(val)
print("Assigning float to", col, "based on", val)
defs[col] = 'decimal'
except ValueError:
print("ERROR: Float mismatch:", val)
mapping = {}
for (col, val) in defs.items():
if 'int' in val:
mapping[col] = int
elif val == 'decimal':
mapping[col] = float
else:
mapping[col] = str
return (mapping, defs)
def column_mapping(row, mapping, schema):
"""Apply filtering to the current row.
Detect if column data types need to be changed"""
output = {}
changes = {}
for (col, val) in row.items():
col = column_filter(col)
if val == None or NA_pattern.match(str(val)):
output[col] = None
continue
if col not in schema and mapping[col] == str:
if int_pattern.match(val):
try:
int(val)
print("Assigning int to", col, "based on", val)
mapping[col] = int
changes[col] = int
except ValueError:
print("ERROR: Int mismatch:", val)
elif float_pattern.match(val):
try:
float(val)
print("Assigning float to", col, "based on", val)
mapping[col] = float
changes[col] = float
except ValueError:
print("ERROR: Float mismatch:", val)
try:
output[col] = mapping[col](val)
except ValueError:
output[col] = None
return (mapping, output, changes)
def old_file_read(db, CREATE_TABLE, tablekey, column_names, reader, mapping):
with db.xact():
db.execute(CREATE_TABLE)
# table marked for insertion during original attempt, so don't need to here
# prepare the insertion query
insert = db.prepare("INSERT INTO %s (%s) VALUES (%s)" % (
tablekey,
','.join(column_names),
','.join('$%d' % i for (_, i) in zip(
column_names, range(1, sys.maxsize)
))
))
update = "ALTER TABLE %s " % tablekey
for row in reader:
# process each row
# We format the data in the row and update column data types, if
# necessary
(mapping, formatted, changes) = column_mapping(row, mapping, current_app.config['schema'])
if len(changes):
#Generate a query to alter the table schema, if any changes are required
alter_cols = []
for (k, v) in changes.items():
# if there were any changes to the data type, update the table
# since we only ever update a text column to int/decimal, then
# it's okay to nullify the data
typ = ''
if v == int:
typ = 'bigint' if k in {'start', 'stop'} else 'integer'
elif v == float:
typ = 'decimal'
alter_cols.append(
"ALTER COLUMN %s SET DATA TYPE %s USING %s::%s" % (
k, typ, k, typ
)
)
# Re-generate the insert statement since the data types changed
print("Alter:", update + ','.join(alter_cols))
db.execute(update + ','.join(alter_cols))
insert = db.prepare("INSERT INTO %s (%s) VALUES (%s)" % (
tablekey,
','.join(column_names),
','.join('$%d' % i for (_, i) in zip(
column_names, range(1, sys.maxsize)
))
))
# insert the row
insert(*[formatted[column] for column in column_names])
def table_transaction(file_permissions, db, CREATE_TABLE, tablekey, all_tablecolumns, raw_reader, column_names, mapping):
with db.xact():
db.execute(CREATE_TABLE)
db.prepare("LOCK TABLE %s IN ACCESS EXCLUSIVE MODE" % (tablekey))
copy_query = "COPY %s (%s) FROM '%s' WITH FREEZE NULL 'NA' DELIMITER E'\t' CSV HEADER" % (tablekey, all_tablecolumns, raw_reader.name)
#copy_query may result in psql.exceptions.InsufficientPrivilegeError when run; workaround attempted below
if file_permissions:
#mark the table for deletion when the server shuts down
#don't need to mark table for deletion during second attempt
if 'db-clean' not in current_app.config:
current_app.config['db-clean'] = [tablekey]
else:
current_app.config['db-clean'].append(tablekey)
#attempt file copy
db.execute(copy_query)
else:
import subprocess
filedest = "/tmp/"+os.path.basename(raw_reader.name)
subprocess.run(["mktemp", filedest], stdout=subprocess.DEVNULL)
subprocess.run(["cp", raw_reader.name, filedest])
subprocess.run(["chmod", "666", filedest])
copy_query = "COPY %s (%s) FROM '%s' WITH FREEZE NULL 'NA' DELIMITER E'\t' CSV HEADER" % (tablekey, all_tablecolumns, filedest)
try:
db.execute(copy_query)
print("...Success")
finally:
subprocess.run(["rm", filedest])
col_val_query = "SELECT "
for col_name in column_names:
col_val_query += "(select %s from %s where %s is not null limit 1), "%(col_name, tablekey, col_name)
col_val_query = col_val_query[:-2]
col_values = db.prepare(col_val_query)
values = col_values()[0]
update = "ALTER TABLE %s " % tablekey
row = dict(zip(col_values.column_names, values))
(mapping, formatted, changes) = column_mapping(row, mapping, current_app.config['schema'])
if len(changes):
#Generate a query to alter the table schema, if any changes are required
alter_cols = []
for (k, v) in changes.items():
# if there were any changes to the data type, update the table
# since we only ever update a text column to int/decimal, then
# it's okay to nullify the data
typ = ''
if v == int:
typ = 'bigint' if k in {'start', 'stop'} else 'integer'
elif v == float:
typ = 'decimal'
alter_cols.append(
"ALTER COLUMN %s SET DATA TYPE %s USING %s::%s" % (
k, typ, k, typ
)
)
print("Alter:", update + ','.join(alter_cols))
db.execute(update + ','.join(alter_cols))
def create_table(parentID, fileID, data, tablekey, db):
# Open a reader to cache the file in the database
if parentID != -1:
process = fetch_process(parentID, data, current_app.config['storage']['children'])
if not process[0]:
return (
{
"code": 400,
"message": "The requested process (%d) does not exist" % parentID,
"fields": "parentID"
}, 400
)
if is_running(process):
return (
{
"code": 400,
"message": "The requested process (%d) is still running" % parentID,
"fields": "parentID"
}, 400
)
if str(fileID) not in process[0]['files']:
return (
{
"code": 400,
"message": "The requested fileID (%s) does not exist for this process (%d)" % (fileID, parentID),
"fields": "fileID"
}, 400
)
raw_reader = open(process[0]['files'][fileID]['fullname'])
else:
if str(fileID) not in data['visualize']:
return (
{
"code": 400,
"message": "The requested fileID (%s) does not exist in the visualize" % fileID,
"fields": "fileID"
}, 400
)
raw_reader = open(data['visualize'][str(fileID)]['fullname'])
if not raw_reader.name.endswith('.tsv'):
ext = os.path.splitext(raw_reader.name)[1].lower()
if len(ext) and ext[0] == '.':
ext = ext[1:]
return serve_as(raw_reader, ext)
reader = csv.DictReader(raw_reader, delimiter='\t')
tmp_reader = open(raw_reader.name)
tmp = csv.DictReader(tmp_reader, delimiter='\t')
try:
init = next(tmp)
except StopIteration:
return []
tmp_reader.close()
# Get an initial estimate of column datatypes from the first row
(mapping, column_names) = init_column_mapping(init, current_app.config['schema'])
tablecolumns = "\n".join( # use the estimated types to create the table
"%s %s," % (colname, column_names[colname])
for colname in column_names
)[:-1]
CREATE_TABLE = "CREATE TABLE %s (\
rowid SERIAL PRIMARY KEY NOT NULL,\
%s\
)" % (tablekey, tablecolumns)
all_tablecolumns = ', '.join(column_filter(col) for col in reader.fieldnames)
try:
table_transaction(True, db, CREATE_TABLE, tablekey, all_tablecolumns, raw_reader, column_names, mapping)
except psql.exceptions.UniqueError: #If another transaction already created specified table, pass
pass
except psql.exceptions.InsufficientPrivilegeError as e:
#can occur when postgres user unable to open file due to permissions; specifically for travis-ci tests
#check if resulting from postgres user permissions
if e.args[0].startswith("must be superuser"):
print("WARNING: Postgres user is not a super user; visualization time may be slow")
old_file_read(db, CREATE_TABLE, tablekey, column_names, reader, mapping) #use inefficient file-read-to-db method
else:
#attempt to resolve by copying file to /tmp/, changing its permissions, and accessing it there
try:
print("InsufficientPrivilegeError raised in accessing file.\nAttempting workaround...")
table_transaction(False, db, CREATE_TABLE, tablekey, all_tablecolumns, raw_reader, column_names, mapping)
except psql.exceptions.InsufficientPrivilegeError:
print("Postgres could not access file. Check to make sure that both the "
"file and your current postgres user has the appropriate permissions.")
raise
raw_reader.close()
def filterfile(parentID, fileID, count, page, filters, sort, direction):
"""Gets the file ID belonging to the parent.\
For result files, the parentID is the process ID that spawned them.\
For visualize files, the parentID is -1"""
data = current_app.config['storage']['loader']()
# first, generate the key
tablekey = "data_%s_%s" % (
(parentID if parentID >= 0 else 'visualize'),
fileID
)
# check if the table exists:
db = psql.open("localhost/pvacseq")
fileID = str(fileID)
with db.xact():
query = db.prepare("SELECT 1 FROM information_schema.tables WHERE table_name = $1")
response = query(tablekey)
if not len(response): # table does not exist
table_errors = create_table(parentID, fileID, data, tablekey, db)
if table_errors != None:
return table_errors
#with db.synchronizer:
# test_query = db.prepare("SELECT 1 FROM information_schema.tables WHERE table_name = $1")
# test_response = query(tablekey)
with db.xact():
typequery = db.prepare(
"SELECT column_name, data_type FROM information_schema.columns WHERE table_name = $1"
)
column_defs = typequery(tablekey)
column_maps = {}
for (col, typ) in column_defs:
if 'int' in typ:
column_maps[col] = int
elif typ == 'numeric'or typ == 'decimal':
column_maps[col] = float
else:
column_maps[col] = str
formatted_filters = []
for i in range(len(filters)):
f = filters[i].strip()
if not len(f):
continue
result = queryfilters.match(f)
if not result:
return ({
"code": 400,
"message": "Encountered an invalid filter (%s)" % f,
"fields": "filters"
}, 400)
colname = column_filter(result.group(1))
if colname not in column_maps:
return ({
"code": 400,
"message": "Unknown column name %s" % result.group(1),
"fields": "filters"
}, 400)
op = result.group(2)
typ = column_maps[colname]
val = None
try:
val = column_maps[colname](
result.group(3)
)
except ValueError:
return ({
"code": 400,
"message": "Value %s cannot be formatted to match the type of column %s (%s)" % (
result.group(3),
result.group(1),
typ
)
}, 400)
if typ == str and (op in {'==', '!='}):
formatted_filters.append(
json.dumps(colname) + (' not ' if '!' in op else ' ') + "LIKE '%s'" % (
json.dumps(val)[1:-1]
)
)
else: # type is numerical
op = op.replace('==', '=')
formatted_filters.append(
'%s %s %s' % (
json.dumps(colname),
op,
json.dumps(val)
)
)
raw_query = "SELECT %s FROM %s" % (
','.join([k[0] for k in column_defs]),
tablekey
)
if len(formatted_filters):
raw_query += " WHERE " + " AND ".join(formatted_filters)
if sort:
if column_filter(sort) not in column_maps:
return ({
'code': 400,
'message': 'Invalid column name %s' % sort,
'fields': 'sort'
}, 400)
raw_query += " ORDER BY %s" % (column_filter(sort))
if direction:
raw_query += " " + direction
if count:
raw_query += " LIMIT %d" % count
if page:
raw_query += " OFFSET %d" % (page * count)
print("Query:", raw_query)
import decimal
with db.xact('SERIALIZABLE', 'READ ONLY DEFERRABLE'):
query = db.prepare(raw_query)
decimalizer = lambda x: (float(x) if type(x) == decimal.Decimal else x)
result = [
{
colname: decimalizer(value) for (colname, value) in zip(
[k[0] for k in column_defs],
[val for val in row]
)
} for row in query.rows()
]
db.close()
return result
def fileschema(parentID, fileID):
data = current_app.config['storage']['loader']()
tablekey = "data_%s_%s" % (
(parentID if parentID >= 0 else 'visualize'),
fileID
)
# check if the table exists:
db = psql.open("localhost/pvacseq")
with db.xact():
query = db.prepare("SELECT 1 FROM information_schema.tables WHERE table_name = $1")
if not len(query(tablekey)): # table does not exist
return ({
'code': 400,
'message': "The requested file has not been loaded into the Postgres database",
'fields': "fileID"
}, 400)
typequery = db.prepare("SELECT column_name, data_type FROM information_schema.columns WHERE table_name = $1")
result = {
key: val for (key, val) in typequery(tablekey)
}
db.close()
return result
def serve_as(reader, filetype):
if filetype == 'json':
return {
'filetype':'json',
'content':json.load(reader)
}
elif filetype == 'yaml' or filetype == 'yml':
return {
'filetype':'yaml',
'content':yaml.load(reader.read())
}
elif filetype == 'log':
return {
'filetype':'log',
'content':[line.rstrip() for line in reader.readlines()]
}
else:
return {
'filetype':'raw',
'content':reader.read()
}
def visualize(parentID, fileID):
vis = visualize_script(parentID, fileID)
return '<html><head></head><body>%s</body></html'%(vis if type(vis)!=tuple else vis[0])
def visualize_script(parentID, fileID):
"""Return an HTML document containing the requested table visualization"""
from .files import results_getcols
data = current_app.config['storage']['loader']()
#first call filterfile to load the table if it's not loaded already
result = filterfile(parentID, fileID, 1, 0, '', 'rowid', 'ASC')
if type(result) != list:
return (
{
'code':400,
'message':json.dumps(result),
'fields':'unknown',
},
400
)
if len(result) == 0 or type(result) == dict:
return (
'Results file contains no data - cannot visualize'
)
cols = results_getcols(parentID, fileID)
if type(cols) != dict:
return (
{
'code':400,
'message':json.dumps(cols),
'fields':'unknown'
},
400
)
proc_data = process_info(parentID)
if type(proc_data)==dict and 'parameters' in proc_data and 'sample_name' in proc_data['parameters']:
sample = proc_data['parameters']['sample_name']
elif parentID == -1:
sample = data['visualize'][str(fileID)]['display_name'].rsplit(".", 1)[0]
else:
sample = 'Unknown Sample'
if current_app.PROXY_IP_ADDRESS is not None:
IP = current_app.PROXY_IP_ADDRESS
else:
IP = current_app.IP_ADDRESS
return (
server_document(
url="http://" + IP + ":5006/visualizations",
arguments={
'target-process': parentID,
'target-file': fileID,
'cols': json.dumps(cols),
'samplename': sample
}
)
)
|
24592
|
import numpy as np
import chainer
from chainer import cuda, Function, gradient_check, Variable
from chainer import optimizers, serializers, utils
from chainer import Link, Chain, ChainList
import chainer.functions as F
import chainer.links as L
class normalNN(Chain):
def __init__(self, dim):
super().__init__(
l1=L.Linear(dim, 100),
l2=L.Linear(100, 1),
)
self.af = F.relu
def __call__(self, x):
h = self.l1(x)
h = self.af(h)
h = self.l2(h)
return h
class MultiLayerPerceptron(Chain):
def __init__(self, dim):
super(MultiLayerPerceptron, self).__init__(
l1=L.Linear(dim, 300, nobias=True),
b1=L.BatchNormalization(300),
l2=L.Linear(300, 300, nobias=True),
b2=L.BatchNormalization(300),
l3=L.Linear(300, 300, nobias=True),
b3=L.BatchNormalization(300),
l4=L.Linear(300, 300, nobias=True),
b4=L.BatchNormalization(300),
l5=L.Linear(300, 1))
self.af = F.relu
def __call__(self, x):
h = self.l1(x)
h = self.b1(h)
h = self.af(h)
h = self.l2(h)
h = self.b2(h)
h = self.af(h)
h = self.l3(h)
h = self.b3(h)
h = self.af(h)
h = self.l4(h)
h = self.b4(h)
h = self.af(h)
h = self.l5(h)
return h
class CNN(Chain):
def __init__(self, dim):
super(CNN, self).__init__(
conv1=L.Convolution2D(3, 96, 3, pad=1),
conv2=L.Convolution2D(96, 96, 3, pad=1),
conv3=L.Convolution2D(96, 96, 3, pad=1, stride=2),
conv4=L.Convolution2D(96, 192, 3, pad=1),
conv5=L.Convolution2D(192, 192, 3, pad=1),
conv6=L.Convolution2D(192, 192, 3, pad=1, stride=2),
conv7=L.Convolution2D(192, 192, 3, pad=1),
conv8=L.Convolution2D(192, 192, 1),
conv9=L.Convolution2D(192, 10, 1),
b1=L.BatchNormalization(96),
b2=L.BatchNormalization(96),
b3=L.BatchNormalization(96),
b4=L.BatchNormalization(192),
b5=L.BatchNormalization(192),
b6=L.BatchNormalization(192),
b7=L.BatchNormalization(192),
b8=L.BatchNormalization(192),
b9=L.BatchNormalization(10),
fc1=L.Linear(None, 1000),
fc2=L.Linear(1000, 1000),
fc3=L.Linear(1000, 1),
)
self.af = F.relu
def __call__(self, x):
h = self.conv1(x)
h = self.b1(h)
h = self.af(h)
h = self.conv2(h)
h = self.b2(h)
h = self.af(h)
h = self.conv3(h)
h = self.b3(h)
h = self.af(h)
h = self.conv4(h)
h = self.b4(h)
h = self.af(h)
h = self.conv5(h)
h = self.b5(h)
h = self.af(h)
h = self.conv6(h)
h = self.b6(h)
h = self.af(h)
h = self.conv7(h)
h = self.b7(h)
h = self.af(h)
h = self.conv8(h)
h = self.b8(h)
h = self.af(h)
h = self.conv9(h)
h = self.b9(h)
h = self.af(h)
h = self.fc1(h)
h = self.af(h)
h = self.fc2(h)
h = self.af(h)
h = self.fc3(h)
return h
|
24596
|
import json
import requests
from typing import List
from konlpy.tag import Okt
from requests.models import Response
class OktTokenizer:
"""
A POS-tagger based tokenizer functor. Note that these are just examples. The `phrases` function usually gives a better result than an ordinary POS tokenizer.
Example:
tokenizer: OktTokenizer = OktTokenizer()
tokens: List[str] = tokenizer(your_text_here)
"""
okt: Okt = Okt()
def __call__(self, text: str) -> List[str]:
tokens: List[str] = self.okt.phrases(text)
return tokens
class ApiTokenizer:
"""
An API based tokenizer functor, assuming that the response body is a jsonifyable string with content of list of `str` tokens.
Example:
tokenizer: ApiTokenizer = ApiTokenizer()
tokens: List[str] = tokenizer(your_text_here)
"""
def __init__(self, endpoint: str) -> None:
self.endpoint: str = endpoint
def __call__(self, text: str) -> List[str]:
body: bytes = text.encode('utf-8')
res: Response = requests.post(self.endpoint, data=body)
tokens: List[str] = json.loads(res.text)
return tokens
|
24608
|
from .alexnet import alexnet_V2
import tensorflow.compat.v1 as tf
import tensorflow.contrib.slim as slim
from utils import montage_tf
from .lci_nets import patch_inpainter, patch_discriminator
import tensorflow.contrib as contrib
# Average pooling params for imagenet linear classifier experiments
AVG_POOL_PARAMS = {'conv_1': (6, 6, 'SAME'), 'conv_2': (4, 4, 'VALID'), 'conv_3': (3, 3, 'SAME'),
'conv_4': (3, 3, 'SAME'), 'conv_5': (2, 2, 'VALID')}
class TRCNet:
def __init__(self, batch_size, im_shape, n_tr_classes=6, lci_patch_sz=64, lci_crop_sz=80, ae_dim=48, n_layers_lci=5,
tag='default', feats_ids=None, feat_pool='AVG', enc_params=None):
if enc_params is None:
enc_params = {}
self.name = 'TRNet_{}'.format(tag)
self.n_tr_classes = n_tr_classes
self.batch_size = batch_size
self.im_shape = im_shape
self.feats_IDs = feats_ids
self.feat_pool = feat_pool
self.enc_params = enc_params
self.lci_patch_sz = lci_patch_sz
self.lci_crop_sz = lci_crop_sz
self.num_LCI_layers = n_layers_lci
self.ae_model = patch_inpainter
self.class_model = alexnet_V2
self.disc_model = patch_discriminator
self.ae_dim = ae_dim
def lci(self, img, enc_scope, dec_scope):
# Extract random patch
patch, jit_x, jit_y = random_crop(img, crop_sz=(self.lci_crop_sz, self.lci_crop_sz))
# Erase the center of the patch
patch_erased, mask_erase = patch_erase(patch, patch_sz=(self.lci_patch_sz, self.lci_patch_sz))
tf.summary.image('imgs/patch_erased', montage_tf(patch_erased, 4, 8), max_outputs=1)
# Perform inpainting/autoencoding
net_in = tf.concat([patch, patch_erased], 0)
net_out, _ = self.ae_model(net_in, depth=self.ae_dim, num_layers=self.num_LCI_layers,
encoder_scope=enc_scope, decoder_scope=dec_scope)
patch_ae, patch_ip = tf.split(net_out, 2)
# Paste inpainted patches
pasted_patch_inpaint, patch_mask = paste_crop(img, patch_ip, jit_x, jit_y)
pasted_patch_ae, _ = paste_crop(img, patch_ae, jit_x, jit_y)
img_lci = img * (1. - patch_mask) + pasted_patch_inpaint
img_patchae = img * (1. - patch_mask) + pasted_patch_ae
return patch_ip, patch_ae, mask_erase, tf.ones_like(mask_erase), patch, img_lci, img_patchae
def ssl_net(self, net, reuse=None, training=True, scope='encoder'):
return self.class_model(net, self.n_tr_classes, reuse, training, scope, **self.enc_params)
def net(self, img, reuse=tf.AUTO_REUSE, training=True):
preds, _ = self.ssl_net(img, reuse, training, scope='features')
return preds
def linear_classifiers(self, img, num_classes, training, reuse=None):
_, feats = self.ssl_net(img, training=False, scope='features')
preds_list = []
with tf.variable_scope('classifier', reuse=reuse):
for feats_id in self.feats_IDs:
p = AVG_POOL_PARAMS[feats_id]
if self.feat_pool == 'AVG':
class_in = slim.avg_pool2d(feats[feats_id], p[0], p[1], p[2])
elif self.feat_pool == 'None':
class_in = feats[feats_id]
print('{} linear classifier input shape: {}'.format(feats_id, class_in.get_shape().as_list()))
preds = linear_classifier(class_in, num_classes, reuse, training, scope=feats_id, wd=5e-4)
preds_list.append(preds)
return preds_list
def patch_disc(self, input, update_collection, disc_scope):
in_1, in_2 = tf.split(input, 2)
input = tf.concat([in_1, in_2], -1)
model, _ = self.disc_model(input,
update_collection=update_collection,
num_layers=self.num_LCI_layers - 1,
scope=disc_scope)
return model
def linear_class_loss(self, scope, preds, labels):
total_loss = 0.
for pred, f_id in zip(preds, self.feats_IDs):
loss = tf.losses.softmax_cross_entropy(labels, pred, scope=scope)
tf.summary.scalar('losses/SCE_{}'.format(f_id), loss)
total_loss += loss
# Compute accuracy
predictions = tf.argmax(pred, 1)
tf.summary.scalar('accuracy/train_accuracy_{}'.format(f_id),
slim.metrics.accuracy(predictions, tf.argmax(labels, 1)))
loss_wd = tf.add_n(tf.losses.get_regularization_losses(), name='loss_wd')
tf.summary.scalar('losses/loss_wd', loss_wd)
total_loss = total_loss + loss_wd
return total_loss
def inpainter_loss(self, preads_fake, imgs, recs_erase, mask_erase, recs_orig, mask_orig):
loss_fake = -tf.reduce_mean(preads_fake)
tf.summary.scalar('losses/generator_fake_loss', loss_fake)
loss_ae_erase = tf.losses.mean_squared_error(imgs, recs_erase, weights=50. * mask_erase)
loss_ae_orig = tf.losses.mean_squared_error(imgs, recs_orig, weights=50. * mask_orig)
tf.summary.scalar('losses/loss_ae_erase', loss_ae_erase)
tf.summary.scalar('losses/loss_ae_orig', loss_ae_orig)
return loss_fake + loss_ae_erase + loss_ae_orig
def discriminator_loss(self, preds_fake, preds_real):
loss_real = tf.reduce_mean(tf.nn.relu(1. - preds_real))
loss_fake = tf.reduce_mean(tf.nn.relu(1. + preds_fake))
loss = loss_real + loss_fake
tf.summary.scalar('losses/disc_fake_loss', loss_fake)
tf.summary.scalar('losses/disc_real_loss', loss_real)
tf.summary.scalar('losses/disc_total_loss', loss)
return loss
def loss_ssl(self, preds, labels):
# Define the loss
loss = tf.losses.softmax_cross_entropy(labels, preds)
tf.summary.scalar('losses/SCE', loss)
# Compute accuracy
predictions = tf.argmax(preds, 1)
tf.summary.scalar('accuracy/train_accuracy',
slim.metrics.accuracy(predictions, tf.argmax(labels, 1)))
bs = self.batch_size
tf.summary.scalar('accuracy/train_accuracy_real_noae',
slim.metrics.accuracy(predictions[:bs // 2], tf.argmax(labels[:bs // 2], 1)))
tf.summary.scalar('accuracy/train_accuracy_real_ae',
slim.metrics.accuracy(predictions[bs // 2:bs], tf.argmax(labels[bs // 2:bs], 1)))
tf.summary.scalar('accuracy/train_accuracy_lci',
slim.metrics.accuracy(predictions[bs:2 * bs], tf.argmax(labels[bs:2 * bs], 1)))
tf.summary.scalar('accuracy/train_accuracy_rot',
slim.metrics.accuracy(predictions[2 * bs:-bs], tf.argmax(labels[2 * bs:-bs], 1)))
tf.summary.scalar('accuracy/train_accuracy_warp',
slim.metrics.accuracy(predictions[-bs:], tf.argmax(labels[-bs:], 1)))
return loss
def loss_lci_adv(self, preds, labels_tf):
loss = tf.losses.softmax_cross_entropy(labels_tf, preds)
return loss
def linear_classifier(net, num_out, reuse=None, training=True, scope='classifier', wd=5e-4):
with tf.variable_scope(scope, reuse=reuse):
net = slim.batch_norm(net, decay=0.975, is_training=training, fused=True, center=False, scale=False)
net = slim.flatten(net)
net = slim.fully_connected(net, num_out,
weights_initializer=contrib.layers.variance_scaling_initializer(),
weights_regularizer=slim.l2_regularizer(wd),
activation_fn=None, normalizer_fn=None)
return net
def patch_erase(img, patch_sz=(16, 16)):
im_shape = img.get_shape()
pad_sz = [im_shape[1] - patch_sz[0], im_shape[2] - patch_sz[1]]
patch_mask = tf.ones([im_shape[0], patch_sz[0], patch_sz[1], im_shape[3]])
patch_mask = tf.pad(patch_mask,
[[0, 0], [pad_sz[0] // 2, pad_sz[0] // 2], [pad_sz[1] // 2, pad_sz[1] // 2], [0, 0]])
return img * (1. - patch_mask) + 0.1 * patch_mask * tf.random_normal(im_shape), 1. - patch_mask
def random_crop(img, crop_sz=(20, 20)):
im_shape = img.get_shape().as_list()
bsz = im_shape[0]
dx = (im_shape[1] - crop_sz[0]) // 2
dy = (im_shape[2] - crop_sz[1]) // 2
base = tf.constant(
[1, 0, 0, 0, 1, 0, 0, 0], shape=[1, 8], dtype=tf.float32
)
base = tf.tile(base, [bsz, 1])
mask_x = tf.constant(
[0, 0, 1, 0, 0, 0, 0, 0], shape=[1, 8], dtype=tf.float32
)
mask_x = tf.tile(mask_x, [bsz, 1])
mask_y = tf.constant(
[0, 0, 0, 0, 0, 1, 0, 0], shape=[1, 8], dtype=tf.float32
)
mask_y = tf.tile(mask_y, [bsz, 1])
jit_x = tf.random_uniform([bsz, 8], minval=-dx + 1, maxval=dx, dtype=tf.int32)
jit_x = tf.cast(jit_x, tf.float32)
jit_y = tf.random_uniform([bsz, 8], minval=-dy + 1, maxval=dy, dtype=tf.int32)
jit_y = tf.cast(jit_y, tf.float32)
xforms = base + jit_x * mask_x + jit_y * mask_y
processed_data = contrib.image.transform(
images=img, transforms=xforms
)
cropped_data = processed_data[:, dx:dx + crop_sz[0], dy:dy + crop_sz[1], :]
return cropped_data, jit_x, jit_y
def paste_crop(img, crop, jit_x, jit_y):
im_shape = tf.shape(img)
crop_shape = tf.shape(crop)
bsz = im_shape[0]
dx_1 = (im_shape[1] - crop_shape[1]) // 2
dy_1 = (im_shape[2] - crop_shape[2]) // 2
dx_2 = im_shape[1] - crop_shape[1] - dx_1
dy_2 = im_shape[2] - crop_shape[2] - dy_1
patch_mask = tf.ones_like(crop)
crop = tf.pad(crop, [[0, 0], [dx_1, dx_2], [dy_1, dy_2], [0, 0]])
patch_mask = tf.pad(patch_mask, [[0, 0], [dx_1, dx_2], [dy_1, dy_2], [0, 0]])
base = tf.constant(
[1, 0, 0, 0, 1, 0, 0, 0], shape=[1, 8], dtype=tf.float32
)
base = tf.tile(base, [bsz, 1])
mask_x = tf.constant(
[0, 0, 1, 0, 0, 0, 0, 0], shape=[1, 8], dtype=tf.float32
)
mask_x = tf.tile(mask_x, [bsz, 1])
mask_y = tf.constant(
[0, 0, 0, 0, 0, 1, 0, 0], shape=[1, 8], dtype=tf.float32
)
mask_y = tf.tile(mask_y, [bsz, 1])
xforms = base - jit_x * mask_x - jit_y * mask_y
transformed_crop = contrib.image.transform(
images=crop, transforms=xforms
)
transformed_mask = contrib.image.transform(
images=patch_mask, transforms=xforms
)
return transformed_crop, transformed_mask
|
24614
|
import tvm
from functools import reduce
from ..utils import to_int, to_int_or_None
def get_need_tile(need_tile):
return [True if x.value == 1 else False for x in need_tile]
def get_factors(split_factor_entities):
return [[x.value for x in factors.factors] for factors in split_factor_entities]
def tile_axis(stage, axis, factors, inner_to_outer=False):
ret = []
if inner_to_outer:
factors = list(reversed(factors))
for f in factors[:-1]:
axis, inner = stage.split(axis, f)
ret.append(inner)
ret.append(axis)
ret = list(reversed(ret))
else:
for f in factors[:-1]:
outer, axis = stage.split(axis, nparts=f)
ret.append(outer)
ret.append(axis)
return ret
def tile_axes(sch, op, axes, need_tile, split_factors, inner_to_outer=False):
"""Tile axes according to need_tile and split_factors
"""
axis_map = {}
count_axis = 0
split_axis_list = []
split_factor_list = []
for axis, need_tile, factors in zip(axes, need_tile, split_factors):
if need_tile:
split_axis = tile_axis(sch[op], axis, factors, inner_to_outer=inner_to_outer)
split_axis_list.append(split_axis)
split_factor_list.append(factors)
axis_map[count_axis] = split_axis
else:
axis_map[count_axis] = axis
count_axis += 1
return axis_map, split_axis_list, split_factor_list
def get_bind_spec(binding_entity):
ret = []
for b in binding_entity:
tmp = []
for bb in b:
tmp.append([bb[0].value, bb[1].value])
ret.append(tmp)
return ret
def bind_axes(sch, op, axis_map, bind, to_bind, already_bind=None, factors=None, extents=None):
"""The bind function will fuse some axes,
which is dangerous because this is not updated
to the schedule state. For now it shouldn't be
a problem because the fusion should only happen
on blockIdx.z
"""
ret = []
for part in bind:
to_fuse = []
to_fuse_extent = 1
for ele in part:
if ele[1] < 0:
axis = axis_map[ele[0]]
if already_bind is not None:
to_fuse_extent *= extents[ele[0]]
else:
axis = axis_map[ele[0]][ele[1]]
if already_bind is not None:
to_fuse_extent *= factors[ele[0]][ele[1]]
to_fuse.append(axis)
if len(to_fuse) > 1:
sch[op].reorder(*to_fuse)
fused_axis = sch[op].fuse(*to_fuse)
else:
fused_axis = to_fuse[0]
ret.append(fused_axis)
sch[op].bind(fused_axis, to_bind)
if already_bind is not None:
already_bind["extent"] = to_fuse_extent
return ret
def get_move_to_inner(move):
return [x.value for x in move]
def reorder_spatial_and_reduce_axes(sch, op, axis_map, split_axis_list, reduce_split_axis_list, extents_info=None):
"""Reorder spatial and reduce axes
"""
pre = []
ones = []
for k, v in axis_map.items():
if not isinstance(v, (list, tuple)):
if v.dom is None:
ext = None
else:
ext = to_int_or_None(v.dom.extent)
if ext is None:
if v in extents_info:
ext = extents_info[v]
else:
ERROR("Can't decide extent for %s" % (str(v)))
if ext > 1:
pre.append(v)
else:
ones.append(v)
# perform local reorder
num_axis_parts = len(split_axis_list[0]) if len(split_axis_list) > 0 else 0
num_reduce_axis_parts = len(reduce_split_axis_list[0]) if len(reduce_split_axis_list) > 0 else 0
leveled_axes = []
reduce_leveled_axes = []
local_order = []
def _inner(axis_list, leveled, nparts):
for i in range(nparts):
leveled.append([])
for part in axis_list:
for i, axis in enumerate(part):
leveled[i].append(axis)
_inner(split_axis_list, leveled_axes, num_axis_parts)
_inner(reduce_split_axis_list, reduce_leveled_axes, num_reduce_axis_parts)
if len(leveled_axes) >= 1:
# GPU specific reorder choice
# put the inner part as inner-most axes
local_order = list(reduce(lambda x, y: x + y, leveled_axes[:-1], []))
local_order += list(reduce(lambda x, y: x + y, reduce_leveled_axes, []))
local_order += leveled_axes[-1]
else:
local_order += list(reduce(lambda x, y: x + y, reduce_leveled_axes, []))
if len(local_order) > 0:
sch[op].reorder(*ones, *pre, *local_order)
return leveled_axes, reduce_leveled_axes
|
24645
|
import io
from PIL import Image
from torchvision import models
import torch
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
import urllib
import os
def get_model_from_global_agent():
global_model = models.squeezenet1_1(pretrained=True)
global_model.classifier[1] = nn.Conv2d(512, 5, kernel_size=(1,1), stride=(1,1))
global_model.num_classes = 5
global_model.to(torch.device('cpu'))
map_location=torch.device('cpu')
model_weights_link = 'https://drive.google.com/uc?id=11pb2yJKXgyYC9XnB9cd6HlNCFNxnlY1D'
model_weights_path = './model/squeezenet_0.pt'
urllib.request.urlretrieve(model_weights_link, model_weights_path)
global_model.load_state_dict(torch.load("./model/squeezenet_0.pt", map_location=torch.device('cpu')))
os.remove(model_weights_path)
global_model.eval()
return global_model
def transform_image(image_bytes):
apply_transform = transforms.Compose([transforms.Resize(265),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
image = Image.open(io.BytesIO(image_bytes)).convert('RGB')
return apply_transform(image).unsqueeze(0)
# change to DR dataset format
def format_class_name(class_name):
class_name = class_name.replace('_', ' ')
class_name = class_name.title()
return class_name
|
24659
|
import torch
from torch import nn
from configs import ANCHOR_SIZES
class PostRes(nn.Module):
def __init__(self, n_in, n_out, stride=1):
super(PostRes, self).__init__()
self.conv1 = nn.Conv3d(n_in, n_out, kernel_size=3, stride=stride, padding=1)
self.bn1 = nn.BatchNorm3d(n_out)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv3d(n_out, n_out, kernel_size=3, padding=1)
self.bn2 = nn.BatchNorm3d(n_out)
if stride != 1 or n_out != n_in:
self.shortcut = nn.Sequential(
nn.Conv3d(n_in, n_out, kernel_size=1, stride=stride),
nn.BatchNorm3d(n_out))
else:
self.shortcut = None
def forward(self, x):
residual = x
if self.shortcut is not None:
residual = self.shortcut(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += residual
out = self.relu(out)
return out
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.preBlock = nn.Sequential(
nn.Conv3d(1, 24, kernel_size=3, padding=1),
nn.BatchNorm3d(24),
nn.ReLU(inplace=True),
nn.Conv3d(24, 24, kernel_size=3, padding=1),
nn.BatchNorm3d(24),
nn.ReLU(inplace=True))
num_blocks_forw = [2, 2, 3, 3]
num_blocks_back = [3, 3]
self.featureNum_forw = [24, 32, 64, 64, 64]
self.featureNum_back = [128, 64, 64]
for i in range(len(num_blocks_forw)):
blocks = []
for j in range(num_blocks_forw[i]):
if j == 0:
blocks.append(PostRes(self.featureNum_forw[i], self.featureNum_forw[i + 1]))
else:
blocks.append(PostRes(self.featureNum_forw[i + 1], self.featureNum_forw[i + 1]))
setattr(self, 'forw' + str(i + 1), nn.Sequential(*blocks))
for i in range(len(num_blocks_back)):
blocks = []
for j in range(num_blocks_back[i]):
if j == 0:
if i == 0:
addition = 3
else:
addition = 0
blocks.append(PostRes(self.featureNum_back[i + 1] + self.featureNum_forw[i + 2] + addition,
self.featureNum_back[i]))
else:
blocks.append(PostRes(self.featureNum_back[i], self.featureNum_back[i]))
setattr(self, 'back' + str(i + 2), nn.Sequential(*blocks))
self.maxpool1 = nn.MaxPool3d(kernel_size=2, stride=2, return_indices=True)
self.maxpool2 = nn.MaxPool3d(kernel_size=2, stride=2, return_indices=True)
self.maxpool3 = nn.MaxPool3d(kernel_size=2, stride=2, return_indices=True)
self.maxpool4 = nn.MaxPool3d(kernel_size=2, stride=2, return_indices=True)
self.unmaxpool1 = nn.MaxUnpool3d(kernel_size=2, stride=2)
self.unmaxpool2 = nn.MaxUnpool3d(kernel_size=2, stride=2)
self.path1 = nn.Sequential(
nn.ConvTranspose3d(64, 64, kernel_size=2, stride=2),
nn.BatchNorm3d(64),
nn.ReLU(inplace=True))
self.path2 = nn.Sequential(
nn.ConvTranspose3d(64, 64, kernel_size=2, stride=2),
nn.BatchNorm3d(64),
nn.ReLU(inplace=True))
self.drop = nn.Dropout3d(p=0.5, inplace=False)
self.output = nn.Sequential(nn.Conv3d(self.featureNum_back[0], 64, kernel_size=1),
nn.ReLU(),
nn.Conv3d(64, 5 * len(ANCHOR_SIZES), kernel_size=1))
def forward(self, x, coord):
out = self.preBlock(x) # 16
out_pool, indices0 = self.maxpool1(out)
out1 = self.forw1(out_pool) # 32
out1_pool, indices1 = self.maxpool2(out1)
out2 = self.forw2(out1_pool) # 64
out2_pool, indices2 = self.maxpool3(out2)
out3 = self.forw3(out2_pool) # 96
out3_pool, indices3 = self.maxpool4(out3)
out4 = self.forw4(out3_pool) # 96
rev3 = self.path1(out4)
comb3 = self.back3(torch.cat((rev3, out3), 1)) # 96+96
rev2 = self.path2(comb3)
comb2 = self.back2(torch.cat((rev2, out2, coord), 1)) # 64+64
comb2 = self.drop(comb2)
out = self.output(comb2)
size = out.size()
out = out.view(out.size(0), out.size(1), -1)
out = out.transpose(1, 2).contiguous().view(size[0], size[2], size[3], size[4], len(ANCHOR_SIZES), 5)
return out
|
24672
|
from tir import Webapp
import unittest
from tir.technologies.apw_internal import ApwInternal
import datetime
import time
DateSystem = datetime.datetime.today().strftime('%d/%m/%Y')
DateVal = datetime.datetime(2120, 5, 17)
"""-------------------------------------------------------------------
/*/{Protheus.doc} PLSA809TestCase
TIR - Casos de testes da rotina Indicacao de Prestador via CallCenter
@author <NAME>
@since 10/2020
@version 12
-------------------------------------------------------------------"""
class PLSA809(unittest.TestCase):
@classmethod
def setUpClass(inst):
inst.oHelper = Webapp()
inst.oHelper.Setup("SIGAPLS","13/10/2020","T1","M SP 01","33")
inst.oHelper.Program("PLSA809")
inst.oHelper.AddParameter("MV_PLCALPG","" , "2")
inst.oHelper.AddParameter("MV_PL809VL","" , ".F.")
inst.oHelper.SetParameters()
def test_PLSA809_001(self):
# INCLUIR
self.oHelper.SetButton("Incluir")
self.oHelper.SetBranch("M SP 01 ")
self.oHelper.SetValue("B9Y_CARTEI","00010100000001024", check_value = False)
self.oHelper.SetValue("B9Y_CRMCGC","41226834671", check_value = False)
time.sleep(10)
self.oHelper.SetValue("B9Y_NOME","PLS DSAUPC TIR INCLUSAO")
self.oHelper.SetValue("B9Y_EMAIL","<EMAIL>")
self.oHelper.SetValue("B9Y_TEL","11332220000", check_value = False)
self.oHelper.SetValue("B9Y_TIPOAT", "3 - Ambos")
self.oHelper.SetValue("B9Y_OBS", "TESTE 2 TIR INCLUSAO")
# Grid Enderecos
self.oHelper.ClickGridCell("Cód Logr",row=1, grid_number=1)
self.oHelper.SetKey("Enter", grid=True, grid_number=1)
self.oHelper.SetValue("B9V_CODLOG","008")
self.oHelper.ClickGridCell("Endereço",row=1, grid_number=1)
self.oHelper.SetKey("Enter", grid=True, grid_number=1)
time.sleep(10)
self.oHelper.SetValue("B9V_ENDER","ALBERT BARTHOLOME")
self.oHelper.ClickGridCell("Nº",row=1, grid_number=1)
self.oHelper.SetKey("Enter", grid=True, grid_number=1)
time.sleep(10)
self.oHelper.SetValue("B9V_NUMERO","434")
#self.oHelper.ClickGridCell("Complemento",row=1, grid_number=1)
#self.oHelper.SetKey("Enter", grid=True, grid_number=1)
time.sleep(30)
#self.oHelper.SetValue("B9V_COMEND","SALA 10")
#self.oHelper.ClickGridCell("Bairro",row=1, grid_number=1)
#self.oHelper.SetKey("Enter", grid=True, grid_number=1)
time.sleep(30)
#self.oHelper.SetValue("B9V_BAIRRO","BUTANTA")
#self.oHelper.ClickGridCell("Cód Cidade",row=1, grid_number=1)
#self.oHelper.SetKey("Enter", grid=True, grid_number=1)
time.sleep(30)
#self.oHelper.SetValue("B9V_CODCID","3550308")
#self.oHelper.ClickGridCell("CEP",row=1, grid_number=1)
#self.oHelper.SetKey("Enter", grid=True, grid_number=1)
time.sleep(30)
#self.oHelper.SetValue("B9V_CEP","05541000", check_value = False)
# Grid Especialidades
self.oHelper.ClickGridCell("Cod Espec",row=1, grid_number=2)
self.oHelper.SetKey("Enter", grid=True, grid_number=2)
time.sleep(10)
self.oHelper.SetValue("B9Q_CODESP","002")
self.oHelper.SetButton("Confirmar")
self.oHelper.SetButton("Fechar") # "O beneficiário não possui email cadastrado na base de dados, favor informar o protocolo a ele para que seja possível acompanhar a indicação feita"
self.oHelper.SetButton("Fechar") # "Registro inserido com sucesso."
# VISUALIZAR
self.oHelper.SetButton("Visualizar")
self.oHelper.CheckResult("B9Y_CRMCGC","41226834671")
self.oHelper.SetButton("Fechar")
# INCLUSÃO COM MESMO CRM/CNPJ
self.oHelper.SetButton("Incluir")
self.oHelper.SetBranch("M SP 01 ")
self.oHelper.SetValue("B9Y_CARTEI","00010100000001024", check_value = False)
self.oHelper.SetValue("B9Y_CRMCGC","41226834671", check_value = False)
time.sleep(10)
self.oHelper.SetValue("B9Y_NOME","PLS DSAUPC TIR INCLUSAO 2")
self.oHelper.SetValue("B9Y_EMAIL","<EMAIL>")
self.oHelper.SetValue("B9Y_TEL","11333331234", check_value = False)
self.oHelper.SetValue("B9Y_TIPOAT", "2 - Assistencial")
self.oHelper.SetValue("B9Y_OBS", "TESTE 2 TIR INCLUSAO COM MESMO CRM/CNPJ")
# Grid Especialidades
self.oHelper.ClickGridCell("Indicar",row=1, grid_number=2)
self.oHelper.SetKey("Enter", grid=True, grid_number=2)
self.oHelper.SetButton("Confirmar")
self.oHelper.SetButton("Fechar") # "O beneficiário não possui email cadastrado na base de dados, favor informar o protocolo a ele para que seja possível acompanhar a indicação feita"
self.oHelper.SetButton("Fechar") # "Registro inserido com sucesso."
self.oHelper.SetButton('x')
self.oHelper.AssertTrue()
@classmethod
def tearDownClass(inst):
inst.oHelper.TearDown()
if __name__ == '__main__':
unittest.main()
|
24707
|
import abc
from config import MODEL_DIR
class Network:
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __init__(self):
self.model = None
self.model_history = None
self.X_train = None
self.X_test = None
self.y_train = None
self.y_test = None
@abc.abstractmethod
def set_train_test_split(self):
pass
@abc.abstractmethod
def build_model(self):
pass
@abc.abstractmethod
def train_model(self):
pass
@abc.abstractmethod
def forecast_model(self, start_datetime, end_datetime, freq):
pass
@abc.abstractmethod
def visualize_output(self):
pass
def evaluate_model(self):
return self.model.evaluate(self.X_test, self.y_test)
def save_model(self, filename):
self.model.save_weights(MODEL_DIR + filename)
def load_model(self, filename):
self.model.load_weights(MODEL_DIR + filename)
|
24715
|
import os
def remove_comments_and_crlf(inp_path, comment_string=';', overwrite=False):
tmpfilename = os.path.splitext(os.path.basename(inp_path))[0] + '_mod.inp'
tmpfilepath = os.path.join(os.path.dirname(inp_path), tmpfilename)
with open (inp_path) as oldf:
with open(tmpfilepath, 'w') as newf:
for line in oldf:
if ';' in line:
#remove the comments
if line.strip()[0] == comment_string:
#skip the whole line
pass
else:
#write the line to the left of the comment
non_comment_line = line.split(';')[0]
newf.write(non_comment_line + '\n')
elif line == '\n':
pass
else:
newf.write(line)
if overwrite:
os.remove(inp_path)
os.rename(tmpfilepath, inp_path)
def line_by_line(path1, path2, outfile):
"""
given paths to two INP files, return a text file showing where differences
occur in line-by-line fashion. If the order of elements do not match, this
will be recorded as a difference.
ignores any spaces in a file such that lines with more or less white space
having the same non-whitespace will be considered equal.
"""
#outfile =r"P:\06_Tools\v_control\Testing\cleaned\linebyline.txt"
with open(outfile, 'w') as diff_file:
with open (path1) as f1:
with open(path2) as f2:
line1 = next(f1)
line2 = next(f2)
while line1 and line2:
#replace all white space to check only actual content
if line1.replace(" ", "") != line2.replace(" ", ""):
diff_file.write(line1)
line1 = next(f1)
line2 = next(f2)
|
24717
|
import re
import pprint
pp = pprint.PrettyPrinter(indent=4)
from sys import version_info # py3, for checking type of input
def combine_messages(messages):
""" Combines messages that have one or more integers in them, such as
"trial001" "trial002", into a single message like "trial# (#=1-2)".
This is to reduce the number of messages required to be displayed.
Operates by creating the following structure, named "ti" for "template info":
{
't2tn': {} - maps each template (containing "#") to a template number (tn)
'tn2t': [] - list of templates, indexed by the template number
'm2tns': {} - maps each message number (index in messages) to
array of template numbers (tns)
'tn2dm': {} - maps each template number to a dictionary that has as keys the digits
used to make the template, and with value the message number used to make the template
with those digits. i.e.:
{ tn1: {d1: m1, d2: m2}, tn2: {d3: m3, d4: m4}, tn2: { ...}}
where:
tn - template number
d: m - digits used to make template from message number m
'tn2md': {} - maps each template number of a dictionary that has keys the message number
and value the digits used to make the message. These reverse the key-values in 'tn2dm', e.g.:
{ tn1: {m1: d1, m2: d2}, tn2: {m3: d3, m4: d4}, tn2: { ...}}
where:
tn - template number
d: m - digits used to make template from message number m
This array is used to dynamically remove entries in 'tn2dm' as each message in a
template is displayed so that structure always has an accurate list of remaining messages.
'mout': [] - messages to display (output), formed by combining messages
'mfin': [] - set of message numbers "finished" (already included in mout).
}
This function works by first creating everything except mout and mfin, then
going through each message, finding the template numbers that have the most
digits, and using those to make the combined message.
"""
ti = {}
ti['t2tn'] = {}
ti['tn2t'] = []
ti['m2tns'] = {}
ti['tn2dm'] = {}
ti['tn2md'] = {}
# debug_msg = "/acquisition/timeseries/fov_15002_17/data"
# debug_mn = -1
for mn in range(len(messages)):
msg = messages[mn]
if version_info[0] > 2:
assert isinstance(msg, str), "in Python 3, messages must be str (unicode) type"
# if msg.startswith(debug_msg):
# debug_mn = mn
found_nums = re.findall("\d+", msg)
if not found_nums:
# no numbers found, don't process
continue
# remove any duplicates
found_nums = list(set(found_nums))
for digits in found_nums:
pattern = "(?<!\d)%s(?!\d)" % digits # substitute only if digits not surrounded by other digits
template = re.sub(pattern, "#", msg) # make template for this message and digits
if template not in ti['t2tn']:
tn = len(ti['tn2t']) # template number
ti['tn2t'].append(template) # add template to list of templates
ti['t2tn'][template] = tn # add entry to map of template to template number
else:
tn = ti['t2tn'][template]
# save template number (tn) in 'm2tns'
if mn not in ti['m2tns']:
ti['m2tns'][mn] = [tn,]
else:
ti['m2tns'][mn].append(tn)
# save template number, digits and message number in 'tn2dm'
idigits = int(digits)
if tn not in ti['tn2dm']:
ti['tn2dm'][tn] = {idigits: mn}
ti['tn2md'][tn] = {mn: idigits}
else:
if digits in ti['tn2dm'][tn]:
print ("duplicate message found: %s" % msg)
break
ti['tn2dm'][tn][idigits] = mn
ti['tn2md'][tn][mn] = idigits
# done building needed structures. Now generate 'output' (i.e. ti['mfin'] and ti['mout']
ti['mout'] = []
ti['mfin'] = set([])
for mn in range(len(messages)):
# if mn == debug_mn:
# print ("found mn %i '%s'" % (debug_mn, debug_msg))
# import pdb; pdb.set_trace()
if mn in ti['mfin']:
# message has already been displayed (using a template)
continue
if mn not in ti['m2tns']:
# no digits found in this message, just display as is
ti['mout'].append(messages[mn])
ti['mfin'].add(mn)
continue
# this message has at least one pattern. Find template with largest number of other messages
# that have not been displayed yet
# build list of pairs, (a, b); a - template number, b - number of messages in template
tn_nm_pairs = [ (tn, len(ti['tn2dm'][tn])) for tn in ti['m2tns'][mn] ]
# get those pairs that have the largest number of messages
ltn_nm_pairs = largest_pairs(tn_nm_pairs)
# nmax = 0
# for tn in ti['m2tns'][mn]:
# dm = ti['tn2dm'][tn]
# num_messages = len(ti['tn2dm'][tn]) # num messages associated with this template
# if num_messages > nmax:
# max_tn = [tn]
# nmax = num_messages
# elif num_messages == nmax:
# # multiple templates have the same number of messages, will need to select
# # one in a deterministic way
# max_tn.append(tn)
# # if no other messages use pattern, just display as is
# if nmax == 1:
if ltn_nm_pairs[0][1] == 1:
# only one messages uses pattern, just display as is
ti['mout'].append(messages[mn])
ti['mfin'].add(mn)
continue
# if len(max_tn) > 1:
if len(ltn_nm_pairs) == 1:
# only one template found that has maximal number of messages. use it.
max_tn = ltn_nm_pairs[0][0]
else:
# multiple templates have the same maximal number of messages. Select the one
# with the rightmost position of '#' in the template
# build list of pairs, (a,b): a - template number, b - index of '#' in template
tn_ix_pairs = [ (ltn_nm_pairs[i][0], ti['tn2t'][ltn_nm_pairs[i][0]].index('#'))
for i in range(len(ltn_nm_pairs))]
tn_ix_pairs = largest_pairs(tn_ix_pairs)
if len(tn_ix_pairs) > 1:
# should never happen since templates made for the same message cannot have
# the same position for the '#'
sys.exit("found multiple templates with same maximal number of messages and same template")
# use the template found
max_tn = tn_ix_pairs[0][0]
# other messages use this template. Get list message numbers and digits that share this template
s_digits = list(ti['tn2dm'][max_tn].keys()) # shared digits
s_mns = list(ti['tn2dm'][max_tn].values()) # shared message numbers
# update tn2dm to remove messages that will be displayed shortly (in this template)
for mn in s_mns:
for tn in ti['m2tns'][mn]:
idigit = ti['tn2md'][tn][mn]
del ti['tn2dm'][tn][idigit]
# make new message by combining shared digits with template
template = ti['tn2t'][max_tn]
# convert digits from string to int
# i_digits = sorted([int(i) for i in s_digits])
i_digits = sorted(s_digits)
# make string representing ranges of digits
prevn = i_digits[0] # initialize previous number to first
sr = str(prevn) # string of ranges being generated
in_range = False
for i in range(1, len(i_digits)):
newn = i_digits[i]
if newn == prevn + 1:
# in a range
in_range = True
else:
# not in a range. But if was previously save end of previous range
if in_range:
sr = "%s-%i" % (sr, prevn)
in_range = False
# save new number
sr = "%s,%i" % (sr, newn)
prevn = newn
# append final number if in range
if in_range:
sr = "%s-%i" % (sr, newn)
new_message = template + " (#=%s)" % sr
ti['mout'].append(new_message)
# add all messages that share this template to ti['mfin'] so they are not displayed again
ti['mfin'].update(s_mns)
# return list of combined messages
return ti['mout']
def largest_pairs(pairs):
""""Input is a list of two-element tuples, e.g. [(5, 4), (2, 7), ...]
Output is list of those, which have the largest 2nd element, e.g. [(2,7)]"""
largest = -1
for pair in pairs:
a, b = pair
if b > largest:
largest = b
lpairs = [pair]
elif b == largest:
lpairs.append(pair)
return lpairs
def test_combine_messages():
""" tests combine_messages function"""
messages = [
"some prefix trial-none",
"some prefix trial23",
"some prefix trial23/timestamps",
"some prefix trial23 timestamps",
"some prefix trial23\ntimestamps",
"some prefix 32-bits, trial32",
"some prefix 32-bits, trial33",
"some prefix 32-bits, trial34",
"some prefix 32-bits, trial35",
"some prefix trial-11",
"some prefix trial23 and trial23 again",
"some prefix trial27",
"some prefix trial27/timestamps",
"some prefix trial27 timestamps",
"some prefix trial27\ntimestamps",
"some prefix 32-bits, trial27",
"some prefix trial27 and trial27 again"]
cm = combine_messages(messages)
pp.pprint(cm)
if __name__ == '__main__':
test_combine_messages()
|
24756
|
from . import BasicType
class UserProfilePhotos(BasicType):
fields = {
'total_count': int,
}
def __init__(self, obj=None):
super(UserProfilePhotos, self).__init__(obj)
from . import photosize
UserProfilePhotos.fields.update({
'photos': {
'class': photosize.PhotoSize,
'array_of_array': True
}
})
|
24780
|
import nengo
import pytest
from nengo_spinnaker.builder import Model
from nengo_spinnaker.builder.ports import OutputPort, InputPort
from nengo_spinnaker.node_io import ethernet as ethernet_io
from nengo_spinnaker.operators import SDPReceiver, SDPTransmitter
@pytest.mark.parametrize("transmission_period", [0.001, 0.002])
def test_Ethernet_init(transmission_period):
"""Test that the Ethernet initialisation creates a host network and stores
appropriate rates.
"""
# Create the EthernetIO
io = ethernet_io.Ethernet(transmission_period=transmission_period)
# Check that we stored the transmission period
assert io.transmission_period == transmission_period
# Check that there is a (empty) host network
assert io.host_network.all_objects == list()
assert io.host_network.all_connections == list()
assert io.host_network.all_probes == list()
# Check that the node input dictionary and lock are present
with io.node_input_lock:
assert io.node_input == dict()
def test_get_spinnaker_source_for_node():
"""Check that getting the SpiNNaker source for a Node returns an SDP Rx
operator as the source object with OutputPort.standard as the port. The
spec should indicate that the connection should be latching.
"""
with nengo.Network():
a = nengo.Node(lambda t: t**2, size_out=1)
b = nengo.Ensemble(100, 1)
a_b = nengo.Connection(a, b)
# Create an empty model and an Ethernet object
model = Model()
io = ethernet_io.Ethernet()
spec = io.get_node_source(model, a_b)
assert isinstance(spec.target.obj, SDPReceiver)
assert spec.target.port is OutputPort.standard
assert spec.latching
assert model.extra_operators == [spec.target.obj]
def test_get_spinnaker_source_for_node_repeated():
"""Getting the source twice for the same Node should return the same
object.
"""
with nengo.Network():
a = nengo.Node(lambda t: t**2, size_out=1)
b = nengo.Ensemble(100, 1)
a_b0 = nengo.Connection(a, b)
a_b1 = nengo.Connection(a, b, transform=-0.5)
# Create an empty model and an Ethernet object
model = Model()
io = ethernet_io.Ethernet()
spec0 = io.get_node_source(model, a_b0)
spec1 = io.get_node_source(model, a_b1)
assert spec0.target.obj is spec1.target.obj
assert model.extra_operators == [spec0.target.obj]
def test_get_spinnaker_sink_for_node():
"""Check that getting the SpiNNaker sink for a Node returns an SDP Tx
operator as the sink object with InputPort.standard as the port.
"""
with nengo.Network():
a = nengo.Ensemble(100, 1)
b = nengo.Node(lambda t, x: None, size_in=1)
a_b = nengo.Connection(a, b)
# Create an empty model and an Ethernet object
model = Model()
io = ethernet_io.Ethernet()
spec = io.get_node_sink(model, a_b)
assert isinstance(spec.target.obj, SDPTransmitter)
assert spec.target.port is InputPort.standard
assert model.extra_operators == [spec.target.obj]
def test_get_spinnaker_sink_for_node_repeated():
"""Check that getting the SpiNNaker sink for a Node twice returns the same
target.
"""
with nengo.Network():
a = nengo.Ensemble(100, 1)
b = nengo.Node(lambda t, x: None, size_in=1)
a_b0 = nengo.Connection(a, b)
a_b1 = nengo.Connection(a, b, synapse=0.3)
# Create an empty model and an Ethernet object
model = Model()
io = ethernet_io.Ethernet()
spec0 = io.get_node_sink(model, a_b0)
spec1 = io.get_node_sink(model, a_b1)
assert spec0.target.obj is spec1.target.obj
assert model.extra_operators == [spec0.target.obj]
|
24788
|
from rest_framework import serializers
from api.model.foodComment import FoodComment
from api.model.food import Food
from django.contrib.auth.models import User
from api.serializer.user import UserSerializer
class FoodCommentSerializer(serializers.ModelSerializer):
comment = serializers.CharField(max_length=255)
photo = serializers.CharField(max_length=255, allow_null=True, required=False)
user = serializers.PrimaryKeyRelatedField(queryset=User.objects.all())
food = serializers.PrimaryKeyRelatedField(queryset=Food.objects.all())
class Meta:
model = FoodComment
fields = '__all__'
depth = 1
class FoodCommentReadSerializer(serializers.ModelSerializer):
user = UserSerializer()
class Meta:
model = FoodComment
fields = '__all__'
depth = 1
class FoodCommentPureSerializer(serializers.ModelSerializer):
user = serializers.PrimaryKeyRelatedField(queryset=User.objects.all())
food = serializers.PrimaryKeyRelatedField(queryset=Food.objects.all())
class Meta:
model = FoodComment
fields = ('comment', 'user', 'food')
depth = 1
|
24823
|
import deepchem as dc
import numpy as np
import os
def test_numpy_dataset_get_shape():
"""Test that get_shape works for numpy datasets."""
num_datapoints = 100
num_features = 10
num_tasks = 10
# Generate data
X = np.random.rand(num_datapoints, num_features)
y = np.random.randint(2, size=(num_datapoints, num_tasks))
w = np.random.randint(2, size=(num_datapoints, num_tasks))
ids = np.array(["id"] * num_datapoints)
dataset = dc.data.NumpyDataset(X, y, w, ids)
X_shape, y_shape, w_shape, ids_shape = dataset.get_shape()
assert X_shape == X.shape
assert y_shape == y.shape
assert w_shape == w.shape
assert ids_shape == ids.shape
def test_disk_dataset_get_shape_single_shard():
"""Test that get_shape works for disk dataset."""
num_datapoints = 100
num_features = 10
num_tasks = 10
# Generate data
X = np.random.rand(num_datapoints, num_features)
y = np.random.randint(2, size=(num_datapoints, num_tasks))
w = np.random.randint(2, size=(num_datapoints, num_tasks))
ids = np.array(["id"] * num_datapoints)
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
X_shape, y_shape, w_shape, ids_shape = dataset.get_shape()
assert X_shape == X.shape
assert y_shape == y.shape
assert w_shape == w.shape
assert ids_shape == ids.shape
def test_disk_dataset_get_shape_multishard():
"""Test that get_shape works for multisharded disk dataset."""
num_datapoints = 100
num_features = 10
num_tasks = 10
# Generate data
X = np.random.rand(num_datapoints, num_features)
y = np.random.randint(2, size=(num_datapoints, num_tasks))
w = np.random.randint(2, size=(num_datapoints, num_tasks))
ids = np.array(["id"] * num_datapoints)
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
# Should now have 10 shards
dataset.reshard(shard_size=10)
X_shape, y_shape, w_shape, ids_shape = dataset.get_shape()
assert X_shape == X.shape
assert y_shape == y.shape
assert w_shape == w.shape
assert ids_shape == ids.shape
def test_disk_dataset_get_legacy_shape_single_shard():
"""Test that get_shape works for legacy disk dataset."""
# This is the shape of legacy_data
num_datapoints = 100
num_features = 10
num_tasks = 10
current_dir = os.path.dirname(os.path.abspath(__file__))
# legacy_dataset is a dataset in the legacy format kept around for testing
# purposes.
data_dir = os.path.join(current_dir, "legacy_dataset")
dataset = dc.data.DiskDataset(data_dir)
X_shape, y_shape, w_shape, ids_shape = dataset.get_shape()
assert X_shape == (num_datapoints, num_features)
assert y_shape == (num_datapoints, num_tasks)
assert w_shape == (num_datapoints, num_tasks)
assert ids_shape == (num_datapoints,)
def test_disk_dataset_get_legacy_shape_multishard():
"""Test that get_shape works for multisharded legacy disk dataset."""
# This is the shape of legacy_data_reshard
num_datapoints = 100
num_features = 10
num_tasks = 10
# legacy_dataset_reshard is a sharded dataset in the legacy format kept
# around for testing
current_dir = os.path.dirname(os.path.abspath(__file__))
data_dir = os.path.join(current_dir, "legacy_dataset_reshard")
dataset = dc.data.DiskDataset(data_dir)
# Should now have 10 shards
assert dataset.get_number_shards() == 10
X_shape, y_shape, w_shape, ids_shape = dataset.get_shape()
assert X_shape == (num_datapoints, num_features)
assert y_shape == (num_datapoints, num_tasks)
assert w_shape == (num_datapoints, num_tasks)
assert ids_shape == (num_datapoints,)
def test_get_shard_size():
"""
Test that using ids for getting the shard size does not break the method.
The issue arises when attempting to load a dataset that does not have a labels
column. The create_dataset method of the DataLoader class sets the y to None
in this case, which causes the existing implementation of the get_shard_size()
method to fail, as it relies on the dataset having a not None y column. This
consequently breaks all methods depending on this, like the splitters for
example.
Note
----
DiskDatasets without labels cannot be resharded!
"""
current_dir = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(current_dir, "reaction_smiles.csv")
featurizer = dc.feat.DummyFeaturizer()
loader = dc.data.CSVLoader(
tasks=[], feature_field="reactions", featurizer=featurizer)
dataset = loader.create_dataset(file_path)
assert dataset.get_shard_size() == 4
|
24856
|
import logging
import sys
import os
from logging.handlers import RotatingFileHandler
from multiprocessing.pool import ThreadPool
from optparse import OptionParser
import requests
from requests.packages import urllib3
urllib3.disable_warnings()
# Workers configurations
ASYNC_WORKERS_COUNT = 100 # How many threads will make http requests.
WORKERS_DECREMENTED_COUNT_ON_ERROR = 10 # Retry the fuzzing with x less workers, to decrease the load on the server.
STARTED_JOB_LOG_INTERVAL = 100 # Every x started jobs, a log will be written
# IO Configurations
DEFAULT_PATHS_LIST_FILE = 'words_lists/Filenames_or_Directories_Common.wordlist'
VALID_ENDPOINTS_FILE = 'endpoints.txt'
# HTTP Configuration
RESOURCE_EXISTS_STATUS_CODES = list(range(200, 300)) + [401, 402, 403]
DEFAULT_BASE_URL = 'https://www.example.com'
# Logging configurations
LOGS_DIRECTORY_FULL_NAME = 'logs'
LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
LOGGING_LEVEL = logging.INFO
BACKUP_LOGS_FILES_COUNT = 5
FUZZING_LOGGER_NAME = 'fuzzing'
LOG_FILE_MAX_BYTES = 0.5 * 1000 * 1000 # 500 KB
class FilesFactory(object):
"""
Manage files and directories
"""
files = []
urls = []
def read_files_from_directory(self, user_path):
self.files = [os.path.join(user_path, f) for f in os.listdir(user_path) if os.path.isfile(os.path.join(user_path, f))]
def read_lines_from_files(self):
for l in self.files:
h = open(l, 'r')
self.urls += h.read().splitlines()
def __init__(self,user_path):
if os.path.isdir(user_path):
self.read_files_from_directory(user_path)
self.read_lines_from_files()
elif(os.path.isfile(user_path)):
self.files.append(user_path)
self.read_lines_from_files()
class LoggerFactory(object):
"""
Manages loggers
"""
loggers = {}
logging_level = LOGGING_LEVEL
logging.basicConfig(stream=sys.stdout, level=logging_level,
format=LOG_FORMAT)
# Modifying the logger's level to ERROR to prevent console spam
logging.getLogger('urllib3').setLevel(logging.WARNING)
@staticmethod
def get_logger(logger_name):
"""
Gets a logger by it's name. Created the logger if it don't exist yet.
:param logger_name: The name of the logger (identifier).
:return: The logger instance.
:returns: Logger
"""
if logger_name not in LoggerFactory.loggers:
LoggerFactory.loggers[logger_name] = LoggerFactory._get_logger(logger_name)
return LoggerFactory.loggers[logger_name]
@staticmethod
def _get_logger(logger_name, logs_directory_path=LOGS_DIRECTORY_FULL_NAME):
"""
Creates a logger with rolling file handler,
Or returns the logger if it already exists.
:param logger_name: The name of the logger
:param logs_directory_path: The path of the directory that the logs will be written to.
:return: An initialized logger instance.
returns: Logger
"""
# Creating the logs folder if its doesn't exist
if not os.path.exists(logs_directory_path):
os.mkdir(logs_directory_path)
logger = logging.getLogger(logger_name)
formatter = logging.Formatter(LOG_FORMAT)
# Adding a rotating file handler
rotating_file_handler = RotatingFileHandler(
os.path.join(logs_directory_path, '{0}.log'.format(logger_name)), maxBytes=LOG_FILE_MAX_BYTES,
backupCount=BACKUP_LOGS_FILES_COUNT)
rotating_file_handler.setFormatter(formatter)
rotating_file_handler.setLevel(LOGGING_LEVEL)
logger.addHandler(rotating_file_handler)
return logger
class AsyncURLFuzzer(object):
"""
An asynchronous http(s) website endpoint locator.
Discovers active endpoints in websites, based on a list of common URLS.
"""
def __init__(self, base_url=DEFAULT_BASE_URL, list_file=DEFAULT_PATHS_LIST_FILE,
async_workers_count=ASYNC_WORKERS_COUNT,
output_file=VALID_ENDPOINTS_FILE, resource_exists_status_codes=RESOURCE_EXISTS_STATUS_CODES):
"""
Initializes a new member of this class.
:param base_url: The base url of the website.
:type base_url: str
:param list_file: The path of a file, containing the paths to check.
:type list_file: str
:param async_workers_count: How many workers (threads) to use.
:type async_workers_count: int
:param output_file: The name of the active endpoints output file.
:type output_file: str
:param resource_exists_status_codes: A list of HTTP status codes to consider as valid.
:type resource_exists_status_codes: list
"""
self._logger = LoggerFactory.get_logger(FUZZING_LOGGER_NAME)
self._base_url = base_url
self._list_file_path = list_file
self._async_workers_count = async_workers_count
self._output_file_path = output_file
self._resource_exists_status_codes = resource_exists_status_codes
self._active_paths_status_codes = {}
self._checked_endpoints = {}
self._endpoints_total_count = 0
self._session = requests.session()
def start(self):
"""
Starts the fuzzing with the initialized parameters.
"""
self._get_website_endpoints()
def _get_website_endpoints(self, async_workers_count=ASYNC_WORKERS_COUNT):
"""
Requests asynchronously for all the resources with a number of workers (threads).
If it fails for HTTP overloads reasons, it retries with less workers, because it's probably a DDOS
protection mechanism.
:param async_workers_count: How many workers (threads) to use.
:type async_workers_count: int
"""
self._load_paths_list()
self._logger.info(
'Getting the endpoints of the website {0} with list file "{1}" and {2} async workers.'.format(
self._base_url,
self._list_file_path,
async_workers_count))
if 0 >= async_workers_count:
self._logger.error('Seems like the site does not support fuzzing, as it has a DDOS protection engine.')
return
pool = ThreadPool(async_workers_count)
try:
tasks = []
self._logger.debug('Preparing the workers...')
for i, path in enumerate(self._paths):
self._logger.debug('Started a worker for the endpoint {0}'.format(path))
if i > i and i % STARTED_JOB_LOG_INTERVAL == 0:
self._logger.info('Started {0} workers'.format(i))
path = path.strip()
full_path = '/'.join([self._base_url, path])
tasks.append(pool.apply_async(self.request_head, (full_path, path)))
for t in tasks:
status_code, full_path, path = t.get()
self._checked_endpoints[path] = path
if self._is_valid_status_code(status_code):
self._active_paths_status_codes[path] = status_code
self._logger.info(
'Fetched {0}/{1}; {2}; {3}'.format(len(self._checked_endpoints), self._endpoints_total_count,
status_code,
full_path))
self._save_output_log()
except requests.ConnectionError as e:
pool.terminate()
self._logger.error(e)
self._logger.warning('An error occured while fuzzing.'
' Retrying with less async workers to reduce the server load.')
retry_workers_count = async_workers_count - WORKERS_DECREMENTED_COUNT_ON_ERROR
self._get_website_endpoints(retry_workers_count)
def _is_valid_status_code(self, status_code):
"""
Checks whether a HTTP status code implies that the resouce exists.
:param status_code:
:return: True if the status code implies that the resouce exists, False otherwise.
"""
return status_code in self._resource_exists_status_codes
def _save_output_log(self):
"""
Saves the results to an output file.
"""
full_status_codes = {'/'.join([self._base_url, p]): code for p, code in self._active_paths_status_codes.items()}
output_lines = ['{0} : {1}'.format(path, code) for path, code in full_status_codes.items()]
if 1 >= len(output_lines):
self._logger.warning(
'There were no discovered endpoints. consider using a different file from "words_list" directory')
self._logger.info('The following endpoints are active:{0}{1}'.format(os.linesep, os.linesep.join(output_lines)))
with open(self._output_file_path, 'a+') as output_file:
output_lines.sort()
output_file.write(os.linesep.join(output_lines))
self._logger.info('The endpoints were exported to "{0}"'.format(self._output_file_path))
def _load_paths_list(self):
"""
Loads the list of paths from the configured status.
"""
if not os.path.exists(self._list_file_path):
raise FileNotFoundError('The file "{0}" does not exist.'.format(self._list_file_path))
with open(self._list_file_path) as paths_file:
paths = [p.strip().lstrip('/').rstrip('/') for p in paths_file.readlines()]
paths = [p for p in paths if p not in self._active_paths_status_codes]
if not self._endpoints_total_count:
self._endpoints_total_count = len(paths)
self._paths = paths
def request_head(self, url, path):
"""
Executes a http HEAD request to a url.
:param url: The full url to contact.
:param path: The uri of the request.
:return: A tuple of 3 variables:
the recieved status code (int),
the url argument (str),
the path argument (str).
"""
if url != '':
res = self._session.head(url, verify=False, allow_redirects=True)
return res.status_code, url, path
if __name__ == '__main__':
# Parsing the parameters.
parser = OptionParser(description=
'An Asynchronous, robust websites endpoint discovery tool with smart error handling. '
'Locates resources in websites based on a list of paths. '
'Check out the "words_list"" directory for lists examples.',
usage='%prog -u https://example.com/', version='%prog 0.1')
parser.add_option('-u', '--url', dest='base_url', help='The target website to scan.', default=DEFAULT_BASE_URL)
parser.add_option('-l', '--list', dest='list_file', help='A file containing the paths to check (separated with lines).',
default=DEFAULT_PATHS_LIST_FILE)
(options, args) = parser.parse_args()
list_file = options.list_file
base_url = options.base_url
if base_url is None:
parser.print_help()
sys.exit()
# Suspending warning logs from requests and urllib3
logging.getLogger("urllib3").setLevel(logging.ERROR)
logging.getLogger("requests").setLevel(logging.ERROR)
if (os.path.isdir(base_url) or os.path.isfile(base_url)):
FilesFactory(base_url)
for u in FilesFactory.urls:
fuzzer = AsyncURLFuzzer(u, list_file)
fuzzer.start()
else:
fuzzer = AsyncURLFuzzer(base_url, list_file)
fuzzer.start()
|
24906
|
from __future__ import division, print_function
from .. import __version__
from ._global_imports import *
try:
import h5py
except ImportError:
print('Install h5py to enable signal caching.')
raise
class _Cache(object):
""" Cache numerical model objects computed during likelihood evaluation.
:param str filename:
Filename of cache.
:param str cache_dir:
Directory to write cache to.
:param bool read_only:
Do not write to cache file?
:param bool archive:
If not read-only, then archive an existing cache file found at the
same path?
"""
def __init__(self, filename, cache_dir='./',
read_only=False, archive=True):
if isinstance(filename, _six.string_types):
if filename[-3:] != '.h5':
self._filename = filename + '.h5'
else:
self._filename = filename
self._cache_dir = cache_dir
self._path = _os.path.join(self._cache_dir, self._filename)
self._read_only = read_only
self._archive_if_incompatible = archive
def __enter__(self):
return self
def __exit__(self, exc, exc_value, traceback):
if exc:
print('Encountered problem whilst caching:')
def _open(self, mode='r'):
""" Get the :mod:`h5py` context manager. """
if self._read_only and mode != 'r':
raise RuntimeError('The cache is in read-only mode.')
return h5py.File(self._path, mode)
def cache(self, data):
""" Cache the computational data. """
with self._open('r+') as f:
g = f['data']
for key, value in data.iteritems():
if isinstance(value, tuple) or isinstance(value, list):
if key not in g.keys():
shape = [f.attrs['n'], len(value)]
shape += [s for s in value[0].shape]
g.create_dataset(key, shape=shape, dtype='float64')
for j, v in enumerate(value):
g[key][self.i,j,...] = v
else:
if key not in g.keys():
shape = [f.attrs['n']] + [s for s in value.shape]
g.create_dataset(key, shape=shape, dtype='float64')
g[key][self.i,...] = value
self.i += 1
def reset_iterator(self):
""" Reset the counter for the cache iterator. """
self.i = 0
def __iter__(self):
self.reset_iterator()
return self
def __next__(self):
""" Read from the cache. """
cached = {}
with self._open('r') as f:
g = f['data']
for key in g.keys():
cached[key] = g[key][self.i,...]
self.i += 1
return cached
def next(self):
""" Python 2.x compatibility. """
return self.__next__()
@make_verbose('Checking whether an existing cache can be read:',
'Cache state determined')
def do_caching(self, samples, force=False):
""" Check whether a new cache is required or whether an exising
cache can be read without additional computation.
:return: Boolean indicating whether to read (``False``) or write.
"""
if force:
self._new(samples)
return True
try: # try reading file and checking keys
with self._open('r') as f:
if 'thetas' not in f.keys():
self._new(samples)
return True
except IOError: # create new cache file
self._new(samples)
return True
else: # can be read, so check if samples array are matching
if self._changed(samples):
self._new(samples)
return True
else:
return False
@make_verbose('Creating new cache file', 'Cache file created')
def _new(self, samples):
""" Prepare a new cache file. """
if not _os.path.isdir(self._cache_dir):
_os.mkdir(self._cache_dir)
if self._archive_if_incompatible:
try:
with self._open('r'):
pass
except IOError:
self._initialise(samples)
else:
self._archive()
self._initialise(samples)
else:
self._initialise(samples)
@make_verbose('Initialising cache file', 'Cache file initialised')
def _initialise(self, samples):
""" Initialise the cache. """
with self._open('w') as f:
f.attrs['version'] = __version__
f.attrs['n'] = samples.shape[0]
f.create_dataset('thetas', data=samples)
f.create_group('/data')
self.reset_iterator()
def _changed(self, samples):
""" Check whether software version or sample set has changed. """
with self._open('r') as f:
if f.attrs['version'] != __version__:
return True
if not _np.array_equal(f['thetas'], samples):
return True
return False
@make_verbose('Attempting to archive existing cache file in '
'a subdirectory')
def _archive(self):
""" Archive an existing cache file. """
# to archive the existing cache file
archive_dir = _os.path.join(self._cache_dir, 'archive')
try:
if not _os.path.isdir(archive_dir):
_os.mkdir(archive_dir)
except OSError:
yield ('Archiving failed... cache file %s will be '
'overwritten.' % self._filename)
yield
else:
yield 'Targeting subdirectory: %s.' % archive_dir
try:
from datetime import datetime
except ImportError:
yield ('Archiving failed... cache file %s will be '
'overwritten.' % self._filename)
yield
else:
name_archived = self._filename[:-3] + '__archive__'
name_archived += 'xpsi_version_%s__' % __version__
obj = datetime.now()
name_archived += 'datetime__%i.%i.%i__%i.%i.%i' % (obj.day,
obj.month,
obj.year,
obj.hour,
obj.minute,
obj.second)
try:
_os.rename(self._filename,
_os.path.join(archive_dir, name_archived + '.h5'))
except OSError:
yield ('Archiving failed... cache file %s will be '
'overwritten.' % self._filename)
else:
yield ('Exisiting cache file archived in '
'subdirectory %s.' % archive_dir)
yield None
|
24962
|
from django import template
register = template.Library()
@register.inclusion_tag('quiz/correct_answer.html', takes_context=True)
def correct_answer_for_all(context, question):
"""
processes the correct answer based on a given question object
if the answer is incorrect, informs the user
"""
answers = question.get_answers()
incorrect_list = context.get('incorrect_questions', [])
if question.id in incorrect_list:
user_was_incorrect = True
else:
user_was_incorrect = False
return {'previous': {'answers': answers},
'user_was_incorrect': user_was_incorrect}
@register.filter
def answer_choice_to_string(question, answer):
return question.answer_choice_to_string(answer)
|
24992
|
import re
import string
import numpy as np
from tqdm import tqdm
from typing import List
from docqa.triviaqa.read_data import TriviaQaQuestion
from docqa.triviaqa.trivia_qa_eval import normalize_answer, f1_score
from docqa.utils import flatten_iterable, split
"""
Tools for turning the aliases and answer strings from TriviaQA into labelled spans
"""
class ExactMatchDetector(object):
def __init__(self):
self.answer_tokens = None
def set_question(self, normalized_aliases):
self.answer_tokens = normalized_aliases
def any_found(self, para):
words = [x.lower() for x in flatten_iterable(para)]
occurances = []
for answer_ix, answer in enumerate(self.answer_tokens):
word_starts = [i for i, w in enumerate(words) if answer[0] == w]
n_tokens = len(answer)
for start in word_starts:
end = start + 1
ans_token = 1
while ans_token < n_tokens and end < len(words):
next = words[end]
if answer[ans_token] == next:
ans_token += 1
end += 1
else:
break
if n_tokens == ans_token:
occurances.append((start, end))
return list(set(occurances))
class NormalizedAnswerDetector(object):
""" Try to labels tokens sequences, such that the extracted sequence would be evaluated as 100% correct
by the official trivia-qa evaluation script """
def __init__(self):
self.answer_tokens = None
def set_question(self, normalized_aliases):
self.answer_tokens = normalized_aliases
def any_found(self, para):
words = [normalize_answer(w) for w in flatten_iterable(para)]
occurances = []
for answer_ix, answer in enumerate(self.answer_tokens):
word_starts = [i for i, w in enumerate(words) if answer[0] == w]
n_tokens = len(answer)
for start in word_starts:
end = start + 1
ans_token = 1
while ans_token < n_tokens and end < len(words):
next = words[end]
if answer[ans_token] == next:
ans_token += 1
end += 1
elif next == "":
end += 1
else:
break
if n_tokens == ans_token:
occurances.append((start, end))
return list(set(occurances))
class FastNormalizedAnswerDetector(object):
""" almost twice as fast and very,very close to NormalizedAnswerDetector's output """
def __init__(self):
# These come from the TrivaQA official evaluation script
self.skip = {"a", "an", "the", ""}
self.strip = string.punctuation + "".join([u"‘", u"’", u"´", u"`", "_"])
self.answer_tokens = None
def set_question(self, normalized_aliases):
self.answer_tokens = normalized_aliases
def any_found(self, para):
# Normalize the paragraph
words = [w.lower().strip(self.strip) for w in flatten_iterable(para)]
occurances = []
for answer_ix, answer in enumerate(self.answer_tokens):
# Locations where the first word occurs
word_starts = [i for i, w in enumerate(words) if answer[0] == w]
n_tokens = len(answer)
# Advance forward until we find all the words, skipping over articles
for start in word_starts:
end = start + 1
ans_token = 1
while ans_token < n_tokens and end < len(words):
next = words[end]
if answer[ans_token] == next:
ans_token += 1
end += 1
elif next in self.skip:
end += 1
else:
break
if n_tokens == ans_token:
occurances.append((start, end))
return list(set(occurances))
class CarefulAnswerDetector(object):
"""
There are some common false negatives in the above answer detection, in particular plurals of answers are
often not found (nor are counted correct by the official script). This detector makes a stronger effort to
find them, although its unclear if training with these additional answers would hurt/help our overall score
since I never got around to trying it.
"""
def __init__(self):
self.skip = {"a", "an", "the", "&", "and", "-", "\u2019", "\u2018", "\"", ";", "'",
"(", ")", "'s'", "s", ":", ",", "."}
self.answer_regex = None
self.aliases = None
def set_question(self, normalized_aliases):
answer_regex = []
self.aliases = normalized_aliases
for answer in normalized_aliases:
tokens = []
for token in answer:
if len(token) > 1:
tokens.append(token + "s?")
else:
tokens.append(token)
if tokens[-1] == "s":
tokens[-1] = "s?"
answer_regex.append([re.compile(x, re.IGNORECASE) for x in tokens])
self.answer_regex = answer_regex
def any_found(self, para):
words = flatten_iterable(para)
occurances = []
for answer_ix, answer in enumerate(self.answer_regex):
word_starts = [i for i, w in enumerate(words) if answer[0].fullmatch(w)]
n_tokens = len(answer)
for start in word_starts:
end = start + 1
ans_token = 1
while ans_token < n_tokens and end < len(words):
next = words[end]
if answer[ans_token].match(next):
ans_token += 1
end += 1
elif next in self.skip:
end += 1
else:
break
if n_tokens == ans_token:
occurances.append((start, end))
return list(set(occurances))
def evaluate_question_detector(questions, corpus, word_tokenize, detector, reference_detector=None, compute_f1s=False):
""" Just for debugging """
n_no_docs = 0
answer_per_doc = []
answer_f1s = []
for question_ix, q in enumerate(tqdm(questions)):
tokenized_aliases = [word_tokenize(x) for x in q.answer.normalized_aliases]
detector.set_question(tokenized_aliases)
for doc in q.all_docs:
doc = corpus.get_document(doc.doc_id)
if doc is None:
n_no_docs += 1
continue
output = []
for i, para in enumerate(doc):
for s,e in detector.any_found(para):
output.append((i, s, e))
if len(output) == 0 and reference_detector is not None:
if reference_detector is not None:
reference_detector.set_question(tokenized_aliases)
detected = []
for i, para in enumerate(doc):
for s, e in reference_detector.any_found(para):
detected.append((i, s, e))
if len(detected) > 0:
print("Found a difference")
print(q.answer.normalized_aliases)
print(tokenized_aliases)
for p, s, e in detected:
token = flatten_iterable(doc[p])[s:e]
print(token)
answer_per_doc.append(output)
if compute_f1s:
f1s = []
for p, s, e in output:
token = flatten_iterable(doc[p])[s:e]
answer = normalize_answer(" ".join(token))
f1 = 0
for gt in q.answer.normalized_aliases:
f1 = max(f1, f1_score(answer, gt))
f1s.append(f1)
answer_f1s.append(f1s)
n_answers = sum(len(x) for x in answer_per_doc)
print("Found %d answers (av %.4f)" % (n_answers, n_answers/len(answer_per_doc)))
print("%.4f docs have answers" % np.mean([len(x) > 0 for x in answer_per_doc]))
if len(answer_f1s) > 0:
print("Average f1 is %.4f" % np.mean(flatten_iterable(answer_f1s)))
def compute_answer_spans(questions: List[TriviaQaQuestion], corpus, word_tokenize,
detector):
for i, q in enumerate(questions):
if i % 500 == 0:
print("Completed question %d of %d (%.3f)" % (i, len(questions), i/len(questions)))
q.question = word_tokenize(q.question)
if q.answer is None:
continue
tokenized_aliases = [word_tokenize(x) for x in q.answer.all_answers]
if len(tokenized_aliases) == 0:
raise ValueError()
detector.set_question(tokenized_aliases)
for doc in q.all_docs:
text = corpus.get_document(doc.doc_id)
if text is None:
raise ValueError()
spans = []
offset = 0
for para_ix, para in enumerate(text):
for s, e in detector.any_found(para):
spans.append((s+offset, e+offset-1)) # turn into inclusive span
offset += sum(len(s) for s in para)
if len(spans) == 0:
spans = np.zeros((0, 2), dtype=np.int32)
else:
spans = np.array(spans, dtype=np.int32)
doc.answer_spans = spans
def _compute_answer_spans_chunk(questions, corpus, tokenizer, detector):
# We use tokenize_paragraph since some questions can have multiple sentences,
# but we still store the results as a flat list of tokens
word_tokenize = tokenizer.tokenize_paragraph_flat
compute_answer_spans(questions, corpus, word_tokenize, detector)
return questions
def compute_answer_spans_par(questions: List[TriviaQaQuestion], corpus,
tokenizer, detector, n_processes: int):
if n_processes == 1:
word_tokenize = tokenizer.tokenize_paragraph_flat
compute_answer_spans(questions, corpus, word_tokenize, detector)
return questions
from multiprocessing import Pool
with Pool(n_processes) as p:
chunks = split(questions, n_processes)
questions = flatten_iterable(p.starmap(_compute_answer_spans_chunk,
[[c, corpus, tokenizer, detector] for c in chunks]))
return questions
def main():
from trivia_qa.build_span_corpus import TriviaQaWebDataset
from data_processing.text_utils import NltkAndPunctTokenizer
dataset = TriviaQaWebDataset()
qs = dataset.get_train()
qs = np.random.RandomState(0).choice(qs, 1000, replace=False)
evaluate_question_detector(qs, dataset.evidence, NltkAndPunctTokenizer().tokenize_paragraph_flat,
FastNormalizedAnswerDetector())
if __name__ == "__main__":
main()
|
25004
|
from django.shortcuts import render, redirect, reverse
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.db.models import Q
from django.views.generic import View
from .models import OrgInfo, CityInfo, TeacherInfo
from operations.models import UserLove
# Create your views here.
class OrgList(View):
"""
org_list 机构列表展示
"""
@staticmethod
def get(request):
all_orgs = OrgInfo.objects.all()
all_citys = CityInfo.objects.all()
sort_orgs = all_orgs.order_by('-love_num')[:3]
# 全局搜索过滤,模糊搜索
keyword = request.GET.get('keyword', '')
if keyword:
all_orgs = all_orgs.filter(Q(name__icontains=keyword)|Q(desc__icontains=keyword)|Q(detail__icontains=keyword))
# 根据机构类型进行过滤
category = request.GET.get('cat', '')
if category:
all_orgs = all_orgs.filter(org_category=category)
# 根据城市进行过滤
city_id = request.GET.get('city', '')
if city_id:
all_orgs = all_orgs.filter(city_id=int(city_id))
# 排序
sort = request.GET.get('sort', '')
if sort:
if sort == 'course_num':
pass
else:
all_orgs = all_orgs.order_by('-'+sort)
# 分页
page = request.GET.get('page')
pa = Paginator(all_orgs, 2)
try:
pages = pa.page(page)
except PageNotAnInteger:
pages = pa.page(1)
except EmptyPage:
pages = pa.page(pa.num_pages)
return render(request, 'orgs/org-list.html', {
'all_orgs': all_orgs,
'all_citys': all_citys,
'sort_orgs': sort_orgs,
'pages': pages,
'category': category,
'city_id': city_id,
'sort': sort,
'keyword': keyword,
})
class OrgDetail(View):
"""
org_detail 机构详情页
用户点击机构详情页,点击数+1
当用户登录时显示用户收藏状态信息
@params org_id: 机构id 通过查询数据库找到对应的机构进行展示
"""
@staticmethod
def get(request, org_id):
if org_id:
org = OrgInfo.objects.filter(id=int(org_id))[0]
# 动态修改机构点击数
org.click_num += 1
org.save()
# 在返回页面数据的时候,需要返回收藏这个机构的收藏状态
love_status = False
if request.user.is_authenticated:
love = UserLove.objects.filter(love_man=request.user, love_id=int(org_id), love_type=1, love_status=True)
if love:
love_status = True
return render(request, 'orgs/org-detail-homepage.html', {
'org': org,
'detail_type': 'home',
'love_status': love_status,
})
class OrgDetailCourse(View):
"""
org_detail_course 机构详情页-机构课程
"""
def get(self, request, org_id):
if org_id:
org = OrgInfo.objects.filter(id=int(org_id))[0]
love_status = False
if request.user.is_authenticated:
love = UserLove.objects.filter(love_man=request.user, love_id=int(org_id), love_type=1, love_status=True)
if love:
love_status = True
return render(request, 'orgs/org-detail-course.html', {
'org': org,
'detail_type': 'course',
'love_status': love_status,
})
class OrgDetailDesc(View):
"""
org_detail_desc 机构详情页-机构描述
"""
def get(self, request, org_id):
if org_id:
org = OrgInfo.objects.filter(id=int(org_id))[0]
love_status = False
if request.user.is_authenticated:
love = UserLove.objects.filter(love_man=request.user, love_id=int(org_id), love_type=1, love_status=True)
if love:
love_status = True
return render(request, 'orgs/org-detail-desc.html', {
'org': org,
'detail_type': 'desc',
'love_status': love_status,
})
class OrgDetailTeacher(View):
"""
org_detail_teacher 机构详情页-机构讲师
"""
def get(self, request, org_id):
if org_id:
org = OrgInfo.objects.filter(id=int(org_id))[0]
love_status = False
if request.user.is_authenticated:
love = UserLove.objects.filter(love_man=request.user, love_id=int(org_id), love_type=1, love_status=True)
if love:
love_status = True
return render(request, 'orgs/org-detail-teachers.html', {
'org': org,
'detail_type': 'teacher',
'love_status': love_status,
})
class TeacherList(View):
"""
teacher_list 讲师列表
"""
@staticmethod
def get(request):
all_teachers = TeacherInfo.objects.all()
recommend = all_teachers.order_by('-love_num')[:2]
# 全局搜索过滤,模糊搜索
keyword = request.GET.get('keyword', '')
if keyword:
all_teachers = all_teachers.filter(Q(name__icontains=keyword))
# 排序
sort = request.GET.get('sort', '')
if sort:
all_teachers = all_teachers.order_by('-' + sort)
# 分页
page = request.GET.get('page')
pa = Paginator(all_teachers, 2)
try:
pages = pa.page(page)
except PageNotAnInteger:
pages = pa.page(1)
except EmptyPage:
pages = pa.page(pa.num_pages)
return render(request, 'orgs/teachers-list.html', {
'all_teachers': all_teachers,
'pages': pages,
'recommend': recommend,
'sort': sort,
'keyword': keyword,
})
class TeacherDetail(View):
"""
teacher_detail 讲师详情
"""
def get(self, request, teacher_id):
if teacher_id:
teacher_list = TeacherInfo.objects.filter(id=teacher_id)
if teacher_list:
teacher = teacher_list[0]
teacher.click_num += 1
teacher.save()
# 讲师排行
recommend = TeacherInfo.objects.all().order_by('-click_num')[:3]
return render(request, 'orgs/teacher-detail.html', {
'teacher': teacher,
'recommend': recommend,
})
|
25008
|
from sys import path
path.append('..')
from armoryengine import *
TheBDM.setBlocking(True)
TheBDM.setOnlineMode(True)
if not os.path.exists('testmultiblock'):
os.mkdir('testmultiblock')
fout = []
fout.append([0, 101, 'testmultiblock/blk00000.dat'])
fout.append([0, 102, 'testmultiblock/blk00000_test1.dat']) # Add 1 block
fout.append([0, 105, 'testmultiblock/blk00000_test2.dat']) # Add 3 blocks
fout.append([106, 106, 'testmultiblock/blk00001_test3.dat']) # Just block split
fout.append([107, 109, 'testmultiblock/blk00002_test4.dat']) # Another block split 3 blks
fout.append([107, 110, 'testmultiblock/blk00002_test5.dat']) # Add block
fout.append([110, 113, 'testmultiblock/blk00003_test5.dat']) # and split
for start,end,theFile in fout:
if os.path.exists(theFile):
os.remove(theFile)
lastLocation = [0]*len(fout)
openfiles = [[trip[0], trip[1], open(trip[2],'wb')] for trip in fout]
# Assume we are only reading into blk000000.dat, no split
for h in range(120):
head = TheBDM.getHeaderByHeight(h)
blk = head.serializeWholeBlock(MAGIC_BYTES, True)
for i,trip in enumerate(openfiles):
start,end,theFile = trip
if (start <= h <= end):
theFile.write(blk)
lastLocation[i] += len(blk)
for start,end,opnfil in openfiles:
opnfil.close()
for i,trip in enumerate(fout):
start,end,theFile = trip
sz = os.path.getsize(theFile)
f = open(theFile,'ab')
if i<3:
f.write('\x00'*(22000-sz))
else:
f.write('\x00'*(1000-sz))
f.close()
print 'Blocks written out:'
for start,end,fn in fout:
if end-start==0:
print '\t%d in file: %s' % (end,fn)
else:
print '\t%d-%d in file: %s' % (start,end,fn)
|
25062
|
from django.urls import reverse
from rest_framework import status
from conf_site.api.tests import ConferenceSiteAPITestCase
class ConferenceSiteAPIConferenceTestCase(ConferenceSiteAPITestCase):
def test_conference_api_anonymous_user(self):
response = self.client.get(reverse("conference-detail"))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, {
"title": self.conference.title,
"start_date": self.conference.start_date.strftime("%Y-%m-%d"),
"end_date": self.conference.end_date.strftime("%Y-%m-%d"),
})
|
25087
|
from tests.common.devices.base import AnsibleHostBase
class VMHost(AnsibleHostBase):
"""
@summary: Class for VM server
For running ansible module on VM server
"""
def __init__(self, ansible_adhoc, hostname):
AnsibleHostBase.__init__(self, ansible_adhoc, hostname)
@property
def external_port(self):
if not hasattr(self, "_external_port"):
vm = self.host.options["variable_manager"]
im = self.host.options["inventory_manager"]
hostvars = vm.get_vars(host=im.get_host(self.hostname), include_delegate_to=False)
setattr(self, "_external_port", hostvars["external_port"])
return getattr(self, "_external_port")
|
25098
|
from __future__ import absolute_import, division, print_function
import pytest
import telnyx
TEST_RESOURCE_ID = "f1486bae-f067-460c-ad43-73a92848f902"
class TestPortingOrder(object):
def test_is_listable(self, request_mock):
resources = telnyx.PortingOrder.list()
request_mock.assert_requested("get", "/v2/porting_orders")
assert isinstance(resources.data, list)
assert isinstance(resources.data[0], telnyx.PortingOrder)
def test_is_retrievable(self, request_mock):
resource = telnyx.PortingOrder.retrieve(TEST_RESOURCE_ID)
request_mock.assert_requested("get", "/v2/porting_orders/%s" % TEST_RESOURCE_ID)
assert isinstance(resource, telnyx.PortingOrder)
def test_is_creatable(self, request_mock):
resource = telnyx.PortingOrder.create(
phone_numbers=["13035550000", "13035550001", "13035550002"],
)
request_mock.assert_requested("post", "/v2/porting_orders")
assert isinstance(resource.data[0], telnyx.PortingOrder)
def test_is_saveable(self, request_mock):
porting_order = telnyx.PortingOrder.retrieve(TEST_RESOURCE_ID)
porting_order.webhook_event = "https://update.com"
porting_order.customer_reference = "updated name"
resource = porting_order.save()
request_mock.assert_requested(
"patch", "/v2/porting_orders/%s" % TEST_RESOURCE_ID
)
assert isinstance(resource, telnyx.PortingOrder)
assert resource is porting_order
def test_is_modifiable(self, request_mock):
resource = telnyx.PortingOrder.modify(
TEST_RESOURCE_ID,
webhook_event="https://update.com",
customer_reference="updated name",
)
request_mock.assert_requested(
"patch", "/v2/porting_orders/%s" % TEST_RESOURCE_ID
)
assert isinstance(resource, telnyx.PortingOrder)
def test_is_deletable(self, request_mock):
resource = telnyx.PortingOrder.retrieve(TEST_RESOURCE_ID)
resource.delete()
request_mock.assert_requested(
"delete", "/v2/porting_orders/%s" % TEST_RESOURCE_ID
)
assert isinstance(resource, telnyx.PortingOrder)
def test_can_confirm_porting_order_action(self, request_mock):
resource = telnyx.PortingOrder.retrieve(TEST_RESOURCE_ID)
resource.confirm()
request_mock.assert_requested(
"post", "/v2/porting_orders/%s/actions/confirm" % TEST_RESOURCE_ID
)
assert isinstance(resource, telnyx.PortingOrder)
@pytest.mark.skip(reason="PDF endpoint not supported by mock currently")
def test_can_get_loa_template(self, request_mock):
resource = telnyx.PortingOrder.retrieve(TEST_RESOURCE_ID)
resource.loaTemplate()
request_mock.assert_requested(
"get", "/v2/porting_orders/%s/loa_template" % TEST_RESOURCE_ID
)
assert isinstance(resource, telnyx.PortingOrder)
def test_can_list_porting_phone_numbers(self, request_mock):
resource = telnyx.PortingPhoneNumber.list()
request_mock.assert_requested("get", "/v2/porting_phone_numbers")
assert isinstance(resource.data, list)
assert isinstance(resource.data[0], telnyx.PortingPhoneNumber)
|
25217
|
from bumblebee.bigquery_service import BigqueryService
from datetime import datetime
from abc import ABC
from abc import abstractmethod
from bumblebee.config import LoadMethod
class BaseLoader(ABC):
@abstractmethod
def load(self, query):
pass
class PartitionLoader(BaseLoader):
def __init__(self, bigquery_service, destination: str, load_method: LoadMethod, partition: datetime):
self.bigquery_service = bigquery_service
self.destination_name = destination
self.load_method = load_method
self.partition_date = partition
def load(self, query):
partition_date_str = self.partition_date.strftime("%Y%m%d")
load_destination = "{}${}".format(self.destination_name, partition_date_str)
write_disposition = self.load_method.write_disposition
return self.bigquery_service.transform_load(query=query,
write_disposition=write_disposition,
destination_table=load_destination)
class TableLoader(BaseLoader):
def __init__(self, bigquery_service, destination: str, load_method: LoadMethod):
self.bigquery_service = bigquery_service
self.full_table_name = destination
self.load_method = load_method
def load(self, query):
return self.bigquery_service.transform_load(query=query,
write_disposition=self.load_method.write_disposition,
destination_table=self.full_table_name)
class DMLLoader(BaseLoader):
def __init__(self,bigquery_service: BigqueryService, destination: str):
self.bigquery_service = bigquery_service
self.full_table_name = destination
def load(self,query):
return self.bigquery_service.execute_query(query)
|
25241
|
from distutils.version import StrictVersion as SV
import unittest
import minecraft
class VersionTest(unittest.TestCase):
def test_module_version_is_a_valid_pep_386_strict_version(self):
SV(minecraft.__version__)
def test_protocol_version_is_an_int(self):
for version in minecraft.SUPPORTED_PROTOCOL_VERSIONS:
self.assertTrue(type(version) is int)
|
25258
|
import datetime
import os
import sys
from optparse import make_option
from django.core.management.base import BaseCommand
from django.conf import settings
try:
from django.contrib.gis.utils import LayerMapping
except ImportError:
print("gdal is required")
sys.exit(1)
from tigerline.models import County
def county_import(county_shp, year):
if year == "2010":
county_mapping = {
'state_fips_code': 'STATEFP10',
'fips_code': 'COUNTYFP10',
'county_identifier': 'GEOID10',
'name': 'NAME10',
'name_and_description': 'NAMELSAD10',
'legal_statistical_description': 'LSAD10',
'fips_55_class_code': 'CLASSFP10',
'feature_class_code': 'MTFCC10',
'functional_status': 'FUNCSTAT10',
'mpoly': 'POLYGON',
}
else:
county_mapping = {
'state_fips_code': 'STATEFP',
'fips_code': 'COUNTYFP',
'county_identifier': 'GEOID',
'name': 'NAME',
'name_and_description': 'NAMELSAD',
'legal_statistical_description': 'LSAD',
'fips_55_class_code': 'CLASSFP',
'feature_class_code': 'MTFCC',
'functional_status': 'FUNCSTAT',
'mpoly': 'POLYGON',
}
lm = LayerMapping(County, county_shp, county_mapping, encoding='LATIN1')
lm.save(verbose=True)
class Command(BaseCommand):
help = 'Installs the 2010-2016 tigerline files for counties'
def add_arguments(self, parser):
parser.add_argument('--path', default='', dest='path',
help='The directory where the county data is stored.'
)
def handle(self, *args, **kwargs):
path = kwargs['path']
# With DEBUG on this will DIE.
settings.DEBUG = False
# figure out which path we want to use.
years = ["2016", "2015", "2014", "2013", "2012", "2011", "2010"]
directories = [('tl_%s_us_county' % year, year) for year in years]
tiger_file = ""
for (directory, year) in directories:
if year == "2010":
directory = directory + "10"
if os.path.exists(os.path.join(path, directory)):
print('Found %s files.' % year)
tiger_file = os.path.join(path, directory + "/" + directory + ".shp")
break
if not tiger_file:
print('Could not find files.')
exit()
print("Start Counties: %s" % datetime.datetime.now())
county_import(tiger_file, year)
print("End Counties: %s" % datetime.datetime.now())
|
25264
|
from __future__ import print_function
import os
import time
import tensorflow as tf
import numpy as np
import sys
from zoneout_wrapper import ZoneoutWrapper
class SequencePredictor():
def add_placeholders(self):
"""Generates placeholder variables to represent the input tensors
"""
self.inputs_placeholder = tf.placeholder(tf.int32, shape=(None, self.config.max_length), name="x")
self.labels_placeholder = tf.placeholder(tf.int32, shape=(None, self.config.max_length), name="y")
self.dropout_placeholder = tf.placeholder(tf.float32)
def create_feed_dict(self, inputs_batch, labels_batch=None, initial_state=None, keep_prob=1.0):
"""Creates the feed_dict for the model.
NOTE: You do not have to do anything here.
"""
feed_dict = {
self.inputs_placeholder: inputs_batch,
self.dropout_placeholder: keep_prob,
}
if labels_batch is not None:
feed_dict[self.labels_placeholder] = labels_batch
if initial_state is not None:
feed_dict[self.in_state] = initial_state
return feed_dict
def add_embedding(self):
""" Creates one-hot encoding for the input. No embedding is used as of now
"""
embedding = tf.one_hot(self.inputs_placeholder, self.config.num_classes)
return embedding
def add_prediction_op(self):
""" Get the input from the embedding layer
"""
x = self.add_embedding()
""" Create a RNN first & define a placeholder for the initial state
"""
if self.config.model_type == "gru":
cell = tf.nn.rnn_cell.GRUCell(self.config.hidden_size)
elif self.config.model_type == "rnn":
cell = tf.nn.rnn_cell.BasicRNNCell(self.config.hidden_size)
else:
raise Exception("Unsuppoprted model type...")
if self.config.regularization == "dropout":
cell = tf.nn.rnn_cell.DropoutWrapper(cell, output_keep_prob=self.dropout_placeholder)
elif self.config.regularization == "zoneout":
cell = ZoneoutWrapper(cell, zoneout_prob=self.dropout_placeholder)
cell = tf.nn.rnn_cell.MultiRNNCell([cell] * self.config.num_layers, state_is_tuple=False)
batch_size = tf.shape(x)[0]
dynamic_max_length = tf.shape(x)[1]
zero_state = cell.zero_state(batch_size, tf.float32)
self.in_state = tf.placeholder_with_default(zero_state, [None, cell.state_size])
""" First find the sequence length and then use it to run the model
"""
#length = tf.reduce_sum(tf.reduce_max(tf.sign(x), 2), 1)
output, self.out_state = tf.nn.dynamic_rnn(cell, x, initial_state=self.in_state)
output = tf.reshape(output, shape=[-1, self.config.hidden_size])
""" Pass it through a linear + Softmax layer to get the predictions
"""
xavier_init = tf.contrib.layers.xavier_initializer()
W = tf.get_variable("W", shape=[self.config.hidden_size, self.config.num_classes], initializer=xavier_init )
b1 = tf.get_variable("b1", shape=[self.config.num_classes], initializer=xavier_init )
preds = tf.add(tf.matmul(output,W),b1)
preds = tf.reshape(preds, shape=[batch_size,dynamic_max_length, self.config.num_classes])
return preds
def add_loss_op(self, preds):
loss = tf.reduce_mean( tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.labels_placeholder, logits=preds) )
scaled_loss = loss/np.log(2)
tf.summary.scalar('loss', scaled_loss);
return scaled_loss
def add_training_op(self, loss):
"""Sets up the training Ops.
"""
global_step = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
optimizer = tf.train.AdamOptimizer(self.config.lr)
train_op = optimizer.minimize(loss, global_step=global_step)
return global_step, train_op
def loss_on_batch(self, sess, inputs_batch, labels_batch, initial_state=None):
feed = self.create_feed_dict(inputs_batch=inputs_batch, labels_batch=labels_batch, initial_state=initial_state, keep_prob=1.0)
loss, out_state = sess.run([self.loss,self.out_state], feed_dict=feed)
return loss, out_state
def train_on_batch(self, sess, inputs_batch, labels_batch, initial_state=None, dropout=1.0):
feed = self.create_feed_dict(inputs_batch=inputs_batch, labels_batch=labels_batch, initial_state=initial_state, keep_prob=dropout)
_, loss,out_state,_step, summary = sess.run([self.train_op, self.loss, self.out_state, self.global_step, self.merged_summaries], feed_dict=feed)
return loss, out_state, _step, summary
def build(self):
self.add_placeholders()
self.pred = self.add_prediction_op()
self.loss = self.add_loss_op(self.pred)
self.global_step, self.train_op = self.add_training_op(self.loss)
self.merged_summaries = tf.summary.merge_all()
def __init__(self, config):
self.config = config
self.build()
|
25276
|
from z3 import *
H = Int('H')
s = Solver()
t = 4 + 4 * (((H - 1) / 2) / 2)
s.add(H % 4 == 0)
s.check()
m = s.model()
def subterms(t):
seen = {}
def subterms_rec(t):
if is_app(t):
for ch in t.children():
if ch in seen:
continue
seen[ch] = True
yield ch
for sub in subterms_rec(ch):
yield sub
return { s for s in subterms_rec(t) }
def are_equal(s, t1, t2):
s.push()
s.add(t1 != t2)
r = s.check()
s.pop()
return r == unsat
def simplify(slv, mdl, t):
subs = subterms(t)
values = { s : mdl.eval(s) for s in subs }
values[t] = mdl.eval(t)
def simplify_rec(t):
subs = subterms(t)
for s in subs:
if s.sort().eq(t.sort()) and values[s].eq(values[t]) and are_equal(slv, s, t):
return simplify_rec(s)
chs = [simplify_rec(ch) for ch in t.children()]
return t.decl()(chs)
return simplify_rec(t)
print(t, "-->", simplify(s, m, t))
|
25278
|
from abc import ABC
class ProbeConfig(object):
def __init__(self):
self.directives = {}
def add_directive(self, directive):
name = directive.keyword
if name not in self.directives:
self.directives[name] = []
self.directives[name].append(directive)
def get_directives(self, name):
return self.directives.get(name)
def has_directive(self, name):
return name in self.directives
def get_directive(self, name):
return self.directives.get(name)[0]
def __str__(self):
return ' '.join([s for s in self.directives])
class Directive(ABC):
"""
Represents a directive type.
See https://nmap.org/book/vscan-fileformat.html
"""
def __init__(self, keyword, param_count, raw):
self.keyword = keyword
self.raw = raw
self.parameters = raw.split(" ", param_count)[1:]
def validate(self):
pass
class Exclude(Directive):
"""
This line tells nmap what ports identified by the probe are found on
(only once per section)
"""
def __init__(self, raw):
super().__init__('exclude', 1, raw)
# This will need to be parsed into proper port format later
self.ports = self.parameters[0]
class Probe(Directive):
"""
This directive describes what nmap will send to fingerprint this service
"""
def __init__(self, raw):
super().__init__('probe', 3, raw)
self.protocol = self.parameters[0]
self.probename = self.parameters[1]
self.probestring = self.parameters[2]
def validate(self):
assert self.protocol == 'TCP' or self.protocol == 'UDP', \
'Invalid protocol {} found, expected "UDP" or "TCP"'.format(self.protocol)
class Match(Directive):
"""
This directive describes the response nmap is expecting to recieve for a service
"""
def __init__(self, raw):
super().__init__('match', 2, raw)
self.service = self.parameters[0]
self.raw_pattern = self.parameters[1]
self.pattern = None
self.flags = []
self.version_info = []
class SoftMatch(Match):
"""
Similar to match, but after a softmap, nmap will only send probes matching the given service.
This is intended to eventually lead to a 'hard' match that will provide more version info
"""
def __init__(self, raw):
super().__init__(raw)
self.service = self.parameters[0]
self.raw_pattern = self.parameters[1]
self.keyword = 'softmatch'
class Ports(Directive):
"""
This line tells nmap what ports identified by the probe are found on
(only once per section)
"""
def __init__(self, raw):
super().__init__('ports', 1, raw)
# This will need to be parsed into proper port format later
self.ports = self.parameters[0]
class SslPorts(Ports):
"""
Same as Ports, but wrapped in ssl
"""
def __init__(self, raw):
super().__init__(raw)
self.keyword = 'sslports'
class Rarity(Directive):
"""
Determines how frequently a probe returns useful results. The higher the number, the rarer the probe is
https://nmap.org/book/vscan-technique.html#vscan-selection-and-rarity
"""
def __init__(self, raw):
super().__init__('rarity', 1, raw)
self.rarity = int(self.parameters[0])
|
25324
|
import pytest
from flake8.exceptions import ExecutionError
from flake8_adjustable_complexity.config import DEFAULT_CONFIG
@pytest.mark.parametrize(
('args', 'max_mccabe_complexity'),
[
(['--max-mccabe-complexity=5'], 5),
(['--max-adjustable-complexity=10'], 10),
([], DEFAULT_CONFIG.max_mccabe_complexity),
],
)
def test_parse_max_mccabe_complexity(parse_options, args, max_mccabe_complexity):
config = parse_options(args)
assert config.max_mccabe_complexity == max_mccabe_complexity
@pytest.mark.parametrize(
('args', 'max_complexity_per_path'),
[
(
[
'--per-path-max-adjustable-complexity',
'foo.py:10,bar.py:20',
],
{
'foo.py': 10,
'bar.py': 20,
},
),
([], DEFAULT_CONFIG.max_complexity_per_path),
],
)
def test_parse_max_complexity_per_path(parse_options, args, max_complexity_per_path):
config = parse_options(args)
assert config.max_complexity_per_path == max_complexity_per_path
def test_parse_max_complexity_per_path_error(parse_options):
args = [
'--per-path-max-adjustable-complexity',
'foo.py:invalid-complexity',
]
with pytest.raises(ExecutionError) as excinfo:
parse_options(args)
assert "Couldn\'t parse --per-path-adjustable-max-complexity" in str(excinfo.value)
@pytest.mark.parametrize(
('args', 'var_names_blacklist'),
[
(
['--var-names-extra-blacklist=my_obj,my_var'],
DEFAULT_CONFIG.var_names_blacklist | {'my_obj', 'my_var'},
),
(
['--var-names-whitelist=var,result'],
DEFAULT_CONFIG.var_names_blacklist - {'var', 'result'},
),
(
[
'--var-names-extra-blacklist=my_obj,my_var',
'--var-names-whitelist=var,result',
],
(DEFAULT_CONFIG.var_names_blacklist | {'my_obj', 'my_var'}) - {'var', 'result'},
),
([], DEFAULT_CONFIG.var_names_blacklist),
],
)
def test_parse_var_names_blacklist(parse_options, args, var_names_blacklist):
config = parse_options(args)
assert config.var_names_blacklist == var_names_blacklist
|
25333
|
from django.utils.translation import get_language
def django_settings(request):
return {
"LANGUAGE": get_language(),
}
|
25351
|
import math
import numpy as np
from numba import cuda, float32
from numba.cuda.testing import unittest
import numba.cuda.random
from numba.cuda.testing import skip_on_cudasim, CUDATestCase
from numba.cuda.random import \
xoroshiro128p_uniform_float32, xoroshiro128p_normal_float32, \
xoroshiro128p_uniform_float64, xoroshiro128p_normal_float64
from numba.core import config
# Distributions
UNIFORM = 1
NORMAL = 2
@cuda.jit
def rng_kernel_float32(states, out, count, distribution):
thread_id = cuda.grid(1)
for i in range(count):
if distribution == UNIFORM:
out[thread_id * count + i] = xoroshiro128p_uniform_float32(states, thread_id)
elif distribution == NORMAL:
out[thread_id * count + i] = xoroshiro128p_normal_float32(states, thread_id)
@cuda.jit
def rng_kernel_float64(states, out, count, distribution):
thread_id = cuda.grid(1)
for i in range(count):
if distribution == UNIFORM:
out[thread_id * count + i] = xoroshiro128p_uniform_float64(states, thread_id)
elif distribution == NORMAL:
out[thread_id * count + i] = xoroshiro128p_normal_float64(states, thread_id)
class TestCudaRandomXoroshiro128p(CUDATestCase):
def test_create(self):
states = cuda.random.create_xoroshiro128p_states(10, seed=1)
s = states.copy_to_host()
self.assertEqual(len(np.unique(s)), 10)
def test_create_subsequence_start(self):
states = cuda.random.create_xoroshiro128p_states(10, seed=1)
s1 = states.copy_to_host()
states = cuda.random.create_xoroshiro128p_states(10, seed=1,
subsequence_start=3)
s2 = states.copy_to_host()
# Starting seeds should match up with offset of 3
np.testing.assert_array_equal(s1[3:], s2[:-3])
def test_create_stream(self):
stream = cuda.stream()
states = cuda.random.create_xoroshiro128p_states(10, seed=1, stream=stream)
s = states.copy_to_host()
self.assertEqual(len(np.unique(s)), 10)
def check_uniform(self, kernel_func, dtype):
states = cuda.random.create_xoroshiro128p_states(32 * 2, seed=1)
out = np.zeros(2 * 32 * 32, dtype=np.float32)
kernel_func[2, 32](states, out, 32, UNIFORM)
self.assertAlmostEqual(out.min(), 0.0, delta=1e-3)
self.assertAlmostEqual(out.max(), 1.0, delta=1e-3)
self.assertAlmostEqual(out.mean(), 0.5, delta=1.5e-2)
self.assertAlmostEqual(out.std(), 1.0/(2*math.sqrt(3)), delta=6e-3)
def test_uniform_float32(self):
self.check_uniform(rng_kernel_float32, np.float32)
@skip_on_cudasim('skip test for speed under cudasim')
def test_uniform_float64(self):
self.check_uniform(rng_kernel_float64, np.float64)
def check_normal(self, kernel_func, dtype):
states = cuda.random.create_xoroshiro128p_states(32 * 2, seed=1)
out = np.zeros(2 * 32 * 32, dtype=dtype)
kernel_func[2, 32](states, out, 32, NORMAL)
self.assertAlmostEqual(out.mean(), 0.0, delta=4e-3)
self.assertAlmostEqual(out.std(), 1.0, delta=2e-3)
def test_normal_float32(self):
self.check_normal(rng_kernel_float32, np.float32)
@skip_on_cudasim('skip test for speed under cudasim')
def test_normal_float64(self):
self.check_normal(rng_kernel_float64, np.float64)
if __name__ == '__main__':
unittest.main()
|
25431
|
import os.path
import time
from moler.config import load_config
from moler.device.device import DeviceFactory
from moler.util.moler_test import MolerTest
def outage_callback(device_name, ping_times):
MolerTest.info("Network outage on {}".format(device_name))
ping_times["lost_connection_time"] = time.time()
def ping_is_on_callback(ping_times):
MolerTest.info("Ping works")
if ping_times["lost_connection_time"] > 0: # ping operable AFTER any net loss
if ping_times["reconnection_time"] == 0:
ping_times["reconnection_time"] = time.time()
outage_time = ping_times["reconnection_time"] - ping_times["lost_connection_time"]
MolerTest.info("Network outage time is {}".format(outage_time))
def test_network_outage():
load_config(config=os.path.abspath('config/my_devices.yml'))
unix1 = DeviceFactory.get_device(name='MyMachine1')
unix2 = DeviceFactory.get_device(name='MyMachine2')
# test setup
ping_times = {"lost_connection_time": 0,
"reconnection_time": 0}
# ensure network is up before running test
net_up = unix2.get_cmd(cmd_name="ifconfig", cmd_params={"options": "lo up"})
sudo_ensure_net_up = unix2.get_cmd(cmd_name="sudo", cmd_params={"password": "<PASSWORD>", "cmd_object": net_up})
sudo_ensure_net_up()
# run event observing "network down/up"
no_ping = unix1.get_event(event_name="ping_no_response")
no_ping.add_event_occurred_callback(callback=outage_callback,
callback_params={'device_name': 'MyMachine1',
'ping_times': ping_times})
no_ping.start()
ping_is_on = unix1.get_event(event_name="ping_response")
ping_is_on.add_event_occurred_callback(callback=ping_is_on_callback,
callback_params={'ping_times': ping_times})
ping_is_on.start()
# run test
ping = unix1.get_cmd(cmd_name="ping", cmd_params={"destination": "localhost", "options": "-O"})
ping.start(timeout=120)
time.sleep(3)
ifconfig_down = unix2.get_cmd(cmd_name="ifconfig", cmd_params={"options": "lo down"})
sudo_ifconfig_down = unix2.get_cmd(cmd_name="sudo", cmd_params={"password": "<PASSWORD>", "cmd_object": ifconfig_down})
sudo_ifconfig_down()
time.sleep(5)
ifconfig_up = unix2.get_cmd(cmd_name="ifconfig", cmd_params={"options": "lo up"})
sudo_ifconfig_up = unix2.get_cmd(cmd_name="sudo", cmd_params={"password": "<PASSWORD>", "cmd_object": ifconfig_up})
sudo_ifconfig_up()
time.sleep(3)
# test teardown
ping.cancel()
no_ping.cancel()
if __name__ == '__main__':
test_network_outage()
"""
copy this file into workshop1/network_outage.py
*** calculating network outage time ***
1. run it
2. see logs - look for "Network outage" and "Ping works"
- be carefull in logs analysis - what's wrong?
3. fix incorrect calculation by exchanging:
no_ping = unix1.get_event(event_name="ping_no_response")
into:
no_ping = unix1.get_event(event_name="ping_no_response", event_params={"till_occurs_times": 1})
"""
|
25449
|
from config import args
from utils import delete_existing, get_img, get_img_files
import tensorbayes as tb
import tensorflow as tf
import numpy as np
import os
def push_to_buffer(buf, data_files):
files = np.random.choice(data_files, len(buf), replace=False)
for i, f in enumerate(files):
buf[i] = get_img(f, (256, 256, 3))
def train(M):
delete_existing(args.log_dir)
train_writer = tf.summary.FileWriter(args.log_dir)
train_files = get_img_files(args.train_dir)
validation_files = get_img_files(args.validation_dir)
iterep = args.iter_visualize
with M.graph.as_default():
M.sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
batch = np.zeros((args.batch_size, 256, 256, 3), dtype='float32')
for i in xrange(len(train_files) * args.n_epochs):
push_to_buffer(batch, train_files)
summary, _ = M.sess.run(M.ops_main, {M.x: batch})
train_writer.add_summary(summary, i + 1)
train_writer.flush()
message='i={:d}'.format(i + 1)
end_viz, _ = tb.utils.progbar(i, iterep, message)
if (i + 1) % args.iter_visualize == 0:
for f, op in zip(validation_files, M.ops_images):
img = np.expand_dims(get_img(f), 0)
summary = M.sess.run(op, {M.x_test: img})
train_writer.add_summary(summary, i + 1)
if (i + 1) % args.iter_save == 0:
path = saver.save(M.sess, os.path.join(args.model_dir, 'model'),
global_step=i + 1)
print "Saving model to {:s}".format(path)
|
25503
|
import humps
import pytest
from django import test
from django.contrib.auth.models import User
from django.urls import reverse
def test_profile_updates_correctly(
profile_admin_client: test.Client, user: User, update_profile_params
):
url = f"{reverse('admin_update_profile')}?email={user.email}"
res = profile_admin_client.patch(url, humps.camelize(update_profile_params))
assert res.status_code == 200
user.refresh_from_db()
profile = user.profile
for key, val in update_profile_params.items():
assert getattr(profile, key) == val
@pytest.mark.parametrize(
argnames="method, status",
argvalues=[("get", 400), ("put", 400), ("post", 405), ("patch", 400)],
)
def test_requires_query_param(
profile_admin_client: test.Client, method: str, status: int
):
request_method = getattr(profile_admin_client, method)
url = f"{reverse('admin_update_profile')}"
res = request_method(url)
assert res.status_code == status
def test_missing_profile_returns_404(profile_admin_client: test.Client):
url = f"{reverse('admin_update_profile')}?email=abc"
res = profile_admin_client.get(url)
assert res.status_code == 404
@pytest.mark.parametrize(
argnames="method, status", argvalues=[("get", 200), ("post", 405), ("patch", 200)]
)
def test_staff_user_has_access(
authed_admin_client: test.Client, user: User, method: str, status: int
):
request_method = getattr(authed_admin_client, method)
url = f"{reverse('admin_update_profile')}?email={user.email}"
res = request_method(url)
assert res.status_code == status
@pytest.mark.parametrize(
argnames="method, status",
argvalues=[("get", 403), ("put", 403), ("post", 405), ("patch", 403)],
)
def test_view_requires_profile_admin_group(
authed_client: test.Client, user: User, method: str, status: int
):
request_method = getattr(authed_client, method)
url = f"{reverse('admin_update_profile')}?email={user.email}"
res = request_method(url)
assert res.status_code == status
|
25549
|
import time
import json
from wptserve.utils import isomorphic_decode, isomorphic_encode
def main(request, response):
headers = [(b'Content-Type', b'application/javascript'),
(b'Cache-Control', b'max-age=86400'),
(b'Last-Modified', isomorphic_encode(time.strftime(u"%a, %d %b %Y %H:%M:%S GMT", time.gmtime())))]
test = request.GET[b'test']
body = u'''
const mainTime = {time:8f};
const testName = {test};
importScripts('update-max-aged-worker-imported-script.py');
addEventListener('message', event => {{
event.source.postMessage({{
mainTime,
importTime,
test: {test}
}});
}});
'''.format(
time=time.time(),
test=json.dumps(isomorphic_decode(test))
)
return headers, body
|
25561
|
import asyncio
import time
from collections import namedtuple
from http import HTTPStatus
import pytest
from aiojenkins.exceptions import JenkinsError
from aiojenkins.jenkins import Jenkins
from tests import CreateJob, get_host, get_login, get_password, is_ci_server
@pytest.mark.asyncio
async def test_invalid_host(jenkins):
with pytest.raises(JenkinsError):
jenkins = Jenkins('@#$')
await jenkins.get_version()
@pytest.mark.asyncio
async def test_get_status(jenkins):
await jenkins.get_status()
@pytest.mark.asyncio
async def test_quiet_down(jenkins):
await jenkins.quiet_down()
server_status = await jenkins.get_status()
assert server_status['quietingDown'] is True
await jenkins.cancel_quiet_down()
server_status = await jenkins.get_status()
assert server_status['quietingDown'] is False
@pytest.mark.asyncio
async def test_restart(jenkins):
if not is_ci_server():
pytest.skip('takes too much time +40 seconds')
await jenkins.safe_restart()
await asyncio.sleep(5)
await jenkins.wait_until_ready()
assert (await jenkins.is_ready()) is True
await jenkins.restart()
await jenkins.wait_until_ready()
assert (await jenkins.is_ready()) is True
@pytest.mark.asyncio
async def test_tokens(jenkins):
version = await jenkins.get_version()
if not (version.major >= 2 and version.minor >= 129):
pytest.skip('Version isn`t support API tokens')
async with CreateJob(jenkins) as job_name:
token_value, token_uuid = await jenkins.generate_token('')
token_name = str(time.time())
token_value, token_uuid = await jenkins.generate_token(token_name)
await jenkins.nodes.enable('master')
# instance without credentials
jenkins_tokened = Jenkins(get_host(), get_login(), token_value)
await jenkins_tokened.builds.start(job_name)
await jenkins.revoke_token(token_uuid)
with pytest.raises(JenkinsError):
await jenkins_tokened.builds.start(job_name)
@pytest.mark.asyncio
async def test_run_groovy_script(jenkins):
# TC: compare with expected result
text = 'test'
response = await jenkins.run_groovy_script('print("{}")'.format(text))
assert response == text
# TC: invalid script
response = await jenkins.run_groovy_script('xxx')
assert 'No such property' in response
@pytest.mark.asyncio
async def test_retry_client(monkeypatch):
attempts = 0
async def text():
return 'error'
async def request(*args, **kwargs):
nonlocal attempts
attempts += 1
response = namedtuple(
'response', ['status', 'cookies', 'text', 'json']
)
if attempts == 1:
raise asyncio.TimeoutError
elif attempts < 3:
response.status = HTTPStatus.INTERNAL_SERVER_ERROR
else:
response.status = HTTPStatus.OK
response.text = text
response.json = text
return response
retry = dict(total=5, statuses=[HTTPStatus.INTERNAL_SERVER_ERROR])
try:
jenkins = Jenkins(get_host(), get_login(), get_password(), retry=retry)
await jenkins.get_status()
monkeypatch.setattr('aiohttp.client.ClientSession.request', request)
await jenkins.get_status()
finally:
await jenkins.close()
@pytest.mark.asyncio
async def test_retry_validation():
retry = dict(attempts=5, statuses=[HTTPStatus.INTERNAL_SERVER_ERROR])
with pytest.raises(JenkinsError):
jenkins = Jenkins(get_host(), get_login(), get_password(), retry=retry)
await jenkins.get_status()
def test_session_close():
def do():
Jenkins(
get_host(),
get_login(),
get_password(),
retry=dict(enabled=True)
)
do()
# just check for no exceptions
import gc
gc.collect()
|
25572
|
from flax.geometry import Blob, Point, Rectangle, Size, Span
def test_blob_create():
rect = Rectangle(origin=Point(0, 0), size=Size(5, 5))
blob = Blob.from_rectangle(rect)
assert blob.area == rect.area
assert blob.height == rect.height
def test_blob_math_disjoint():
# These rectangles look like this:
# xxx
# xxx
# xxx xxx
# xxx
# xxx
rect1 = Rectangle(origin=Point(0, 0), size=Size(3, 3))
rect2 = Rectangle(origin=Point(6, 2), size=Size(3, 3))
blob1 = Blob.from_rectangle(rect1)
blob2 = Blob.from_rectangle(rect2)
union_blob = blob1 + blob2
assert union_blob.area == blob1.area + blob2.area
assert union_blob.area == rect1.area + rect2.area
assert union_blob.height == 5
left_blob = blob1 - blob2
from pprint import pprint
pprint(blob1.spans)
pprint(blob2.spans)
pprint(left_blob.spans)
assert left_blob.area == blob1.area
assert left_blob == blob1
right_blob = blob2 - blob1
from pprint import pprint
pprint(blob1.spans)
pprint(blob2.spans)
pprint(right_blob.spans)
assert right_blob.area == blob2.area
assert right_blob == blob2
def test_blob_math_overlap():
# These rectangles look like this:
# xxx
# x##x
# x##x
# xxx
rect1 = Rectangle(origin=Point(0, 0), size=Size(3, 3))
rect2 = Rectangle(origin=Point(1, 1), size=Size(3, 3))
blob1 = Blob.from_rectangle(rect1)
blob2 = Blob.from_rectangle(rect2)
union_blob = blob1 + blob2
assert union_blob.area == 14
left_blob = blob1 - blob2
assert left_blob.area == 5
assert left_blob.height == 3
assert left_blob.spans == {
0: (Span(0, 2),),
1: (Span(0, 0),),
2: (Span(0, 0),),
}
right_blob = blob2 - blob1
assert right_blob.area == 5
assert right_blob.height == 3
assert right_blob.spans == {
1: (Span(3, 3),),
2: (Span(3, 3),),
3: (Span(1, 3),),
}
def test_blob_math_contain():
# These rectangles look like this:
# xxxxx
# x###x
# x###x
# x###x
# xxxxx
rect1 = Rectangle(origin=Point(0, 0), size=Size(5, 5))
rect2 = Rectangle(origin=Point(1, 1), size=Size(3, 3))
blob1 = Blob.from_rectangle(rect1)
blob2 = Blob.from_rectangle(rect2)
union_blob = blob1 + blob2
assert union_blob.area == blob1.area
assert union_blob.height == blob1.height
left_blob = blob1 - blob2
assert left_blob.area == 16
assert left_blob.height == 5
assert left_blob.spans == {
0: (Span(0, 4),),
1: (Span(0, 0), Span(4, 4)),
2: (Span(0, 0), Span(4, 4)),
3: (Span(0, 0), Span(4, 4)),
4: (Span(0, 4),),
}
right_blob = blob2 - blob1
assert right_blob.area == 0
assert right_blob.height == 0
assert right_blob.spans == {}
def test_blob_math_fuzzer():
pass
|
25577
|
from resolwe.flow.models import Data
from resolwe.test import tag_process
from resolwe_bio.utils.filter import filter_vcf_variable
from resolwe_bio.utils.test import BioProcessTestCase
class CheMutWorkflowTestCase(BioProcessTestCase):
@tag_process("workflow-chemut")
def test_chemut_workflow(self):
with self.preparation_stage():
inputs = {
"src": "chemut_genome.fasta.gz",
"species": "Dictyostelium discoideum",
"build": "dd-05-2009",
}
ref_seq = self.run_process("upload-fasta-nucl", inputs)
bwa_index = self.run_process("bwa-index", {"ref_seq": ref_seq.id})
inputs = {"src1": ["AX4_mate1.fq.gz"], "src2": ["AX4_mate2.fq.gz"]}
parental_reads = self.run_process("upload-fastq-paired", inputs)
inputs = {"src1": ["CM_mate1.fq.gz"], "src2": ["CM_mate2.fq.gz"]}
mut_reads = self.run_process("upload-fastq-paired", inputs)
inputs = {"genome": bwa_index.id, "reads": parental_reads.id}
align_parental = self.run_process("alignment-bwa-mem", inputs)
inputs = {"genome": bwa_index.id, "reads": mut_reads.id}
align_mut = self.run_process("alignment-bwa-mem", inputs)
self.run_process(
"workflow-chemut",
{
"analysis_type": "snv",
"parental_strains": [align_parental.id],
"mutant_strains": [align_mut.id],
"genome": ref_seq.id,
"Vc": {"stand_emit_conf": 15, "stand_call_conf": 35, "rf": True},
"Vf": {"read_depth": 7},
},
)
for data in Data.objects.all():
self.assertStatus(data, Data.STATUS_DONE)
variants = Data.objects.last()
self.assertFile(
variants,
"vcf",
"chemut.vcf.gz",
file_filter=filter_vcf_variable,
compression="gzip",
)
|
25628
|
from hearthbreaker.cards.base import SpellCard
from hearthbreaker.constants import CHARACTER_CLASS, CARD_RARITY
from hearthbreaker.tags.base import BuffUntil, Buff
from hearthbreaker.tags.event import TurnStarted
from hearthbreaker.tags.status import Stealth, Taunt, Frozen
import hearthbreaker.targeting
class TheCoin(SpellCard):
def __init__(self):
super().__init__("The Coin", 0, CHARACTER_CLASS.ALL, CARD_RARITY.COMMON, False)
def use(self, player, game):
super().use(player, game)
if player.mana < 10:
player.mana += 1
class ArmorPlating(SpellCard):
def __init__(self):
super().__init__("Armor Plating", 1, CHARACTER_CLASS.ALL, CARD_RARITY.COMMON, False,
target_func=hearthbreaker.targeting.find_minion_spell_target)
def use(self, player, game):
super().use(player, game)
self.target.increase_health(1)
class EmergencyCoolant(SpellCard):
def __init__(self):
super().__init__("Emergency Coolant", 1, CHARACTER_CLASS.ALL, CARD_RARITY.COMMON, False,
target_func=hearthbreaker.targeting.find_minion_spell_target)
def use(self, player, game):
super().use(player, game)
self.target.add_buff(Buff(Frozen()))
class FinickyCloakfield(SpellCard):
def __init__(self):
super().__init__("Finicky Cloakfield", 1, CHARACTER_CLASS.ALL, CARD_RARITY.COMMON, False,
target_func=hearthbreaker.targeting.find_friendly_minion_spell_target)
def use(self, player, game):
super().use(player, game)
self.target.add_buff(BuffUntil(Stealth(), TurnStarted()))
class ReversingSwitch(SpellCard):
def __init__(self):
super().__init__("Reversing Switch", 1, CHARACTER_CLASS.ALL, CARD_RARITY.COMMON, False,
target_func=hearthbreaker.targeting.find_minion_spell_target)
def use(self, player, game):
super().use(player, game)
temp_attack = self.target.calculate_attack()
temp_health = self.target.health
if temp_attack == 0:
self.target.die(None)
else:
self.target.set_attack_to(temp_health)
self.target.set_health_to(temp_attack)
class RustyHorn(SpellCard):
def __init__(self):
super().__init__("Rusty Horn", 1, CHARACTER_CLASS.ALL, CARD_RARITY.COMMON, False,
target_func=hearthbreaker.targeting.find_minion_spell_target)
def use(self, player, game):
super().use(player, game)
self.target.add_buff(Buff(Taunt()))
class TimeRewinder(SpellCard):
def __init__(self):
super().__init__("Time Rewinder", 1, CHARACTER_CLASS.ALL, CARD_RARITY.COMMON, False,
target_func=hearthbreaker.targeting.find_friendly_minion_spell_target)
def use(self, player, game):
super().use(player, game)
self.target.bounce()
class WhirlingBlades(SpellCard):
def __init__(self):
super().__init__("Whirling Blades", 1, CHARACTER_CLASS.ALL, CARD_RARITY.COMMON, False,
target_func=hearthbreaker.targeting.find_minion_spell_target)
def use(self, player, game):
super().use(player, game)
self.target.change_attack(1)
spare_part_list = [ArmorPlating(), EmergencyCoolant(), FinickyCloakfield(), TimeRewinder(), ReversingSwitch(),
RustyHorn(), WhirlingBlades()]
class GallywixsCoin(SpellCard):
def __init__(self):
super().__init__("Gallywix's Coin", 0, CHARACTER_CLASS.ALL, CARD_RARITY.COMMON, False)
def use(self, player, game):
super().use(player, game)
if player.mana < 10:
player.mana += 1
|
25644
|
import subprocess
import time
import sys
import signal
from testutils import assert_raises
is_unix = not sys.platform.startswith("win")
if is_unix:
def echo(text):
return ["echo", text]
def sleep(secs):
return ["sleep", str(secs)]
else:
def echo(text):
return ["cmd", "/C", f"echo {text}"]
def sleep(secs):
# TODO: make work in a non-unixy environment (something with timeout.exe?)
return ["sleep", str(secs)]
p = subprocess.Popen(echo("test"))
time.sleep(0.1)
assert p.returncode is None
assert p.poll() == 0
assert p.returncode == 0
p = subprocess.Popen(sleep(2))
assert p.poll() is None
with assert_raises(subprocess.TimeoutExpired):
assert p.wait(1)
p.wait()
assert p.returncode == 0
p = subprocess.Popen(echo("test"), stdout=subprocess.PIPE)
p.wait()
assert p.stdout.read().strip() == b"test"
p = subprocess.Popen(sleep(2))
p.terminate()
p.wait()
if is_unix:
assert p.returncode == -signal.SIGTERM
else:
assert p.returncode == 1
p = subprocess.Popen(sleep(2))
p.kill()
p.wait()
if is_unix:
assert p.returncode == -signal.SIGKILL
else:
assert p.returncode == 1
p = subprocess.Popen(echo("test"), stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
assert stdout.strip() == b"test"
p = subprocess.Popen(sleep(5), stdout=subprocess.PIPE)
with assert_raises(subprocess.TimeoutExpired):
p.communicate(timeout=1)
|
25654
|
from . import get_main_movies_base_data
from . import get_main_movies_full_data
from . import get_celebrities_full_data
from . import down_video_images
from . import down_celebrity_images
|
25661
|
import random
from precise.skaters.managerutil.managertesting import manager_test_run
from precise.skaters.managers.equalmanagers import equal_daily_long_manager, equal_long_manager
from precise.skaters.managers.equalmanagers import equal_weekly_long_manager, equal_weekly_buy_and_hold_long_manager
from precise.skatertools.data.equityhistorical import random_cached_equity_dense
from numpy.testing import assert_array_almost_equal
def test_random_manager():
from precise.skaters.managers.allmanagers import LONG_MANAGERS
mgr = random.choice(LONG_MANAGERS)
manager_test_run(mgr=mgr)
def test_daily_equal():
assert_equal_managing(equal_long_manager, equal_daily_long_manager)
def test_weekly_equal():
assert_equal_managing(equal_weekly_long_manager, equal_weekly_buy_and_hold_long_manager)
def assert_equal_managing(mgr1,mgr2):
ys = random_cached_equity_dense(k=1, n_obs=50, n_dim=3, as_frame=False)
s1 = {}
s2 = {}
for y in ys:
w1, s1 = mgr1(y=y, s=s1)
w2, s2 = mgr2(y=y, s=s2)
assert_array_almost_equal(w1,w2, err_msg='managers are not the same')
if __name__=='__main__':
test_daily_equal()
test_weekly_equal()
|
25672
|
def main():
seed = 0x1234
e = [0x62d5, 0x7b27, 0xc5d4, 0x11c4, 0x5d67, 0xa356, 0x5f84,
0xbd67, 0xad04, 0x9a64, 0xefa6, 0x94d6, 0x2434, 0x0178]
flag = ""
for index in range(14):
for i in range(0x7f-0x20):
c = chr(0x20+i)
res = encode(c, index, seed)
if res == e[index]:
print(c)
flag += c
seed = encode(c, index, seed)
print("Kosen{%s}" % flag)
def encode(p1, p2, p3):
p1 = ord(p1) & 0xff
p2 = p2 & 0xffffffff
p3 = p3 & 0xffffffff
result = (((p1 >> 4) | (p1 & 0xf) << 4) + 1) ^ ((p2 >> 4) |
(~p2 << 4)) & 0xff | (p3 >> 4) << 8 ^ ((p3 >> 0xc) | (p3 << 4)) << 8
return result & 0xffff
if __name__ == "__main__":
main()
|
25706
|
import arcpy, os
#walk through all subdirectories and change mxd to store relative paths
for root, dirs, files in os.walk(r"Q:\Geodata\shape"):
for f in files:
if f.endswith(".mxd"):
filepath = root + '\\' + f
print filepath
try:
mxd = arcpy.mapping.MapDocument(filepath)
#set relative paths property
mxd.relativePaths = True
mxd.save()
except:
print filepath + ' failed'
pass
|
25721
|
from django.contrib.auth.models import User
from django.test import TestCase
from adminlte_log.models import AdminlteLogType, AdminlteLog
class AdminlteLogTest(TestCase):
def setUp(self):
AdminlteLogType.objects.create(name='test', code='test')
self.user = User.objects.create_user(username='bohan')
def test_log(self):
log = AdminlteLog.info('test', user=self.user, sort_desc='This is a log', foo='bar')
self.assertEqual(log.id, 1)
|
25778
|
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from rl.dataset import ReplayBuffer, RandomSampler
from rl.base_agent import BaseAgent
from rl.policies.mlp_actor_critic import MlpActor, MlpCritic
from util.logger import logger
from util.mpi import mpi_average
from util.pytorch import optimizer_cuda, count_parameters, \
compute_gradient_norm, compute_weight_norm, sync_networks, sync_grads, \
obs2tensor, to_tensor
from env.action_spec import ActionSpec
class MetaPPOAgent(BaseAgent):
""" Meta policy class. """
def __init__(self, config, ob_space):
super().__init__(config, ob_space)
if config.meta is None:
logger.warn('Creating a dummy meta policy.')
return
# parse body parts and skills
if config.subdiv:
# subdiv = ob1,ob2-ac1/ob3,ob4-ac2/...
clusters = config.subdiv.split('/')
clusters = [cluster.split('-')[1].split(',') for cluster in clusters]
else:
clusters = [ob_space.keys()]
if config.subdiv_skills:
subdiv_skills = config.subdiv_skills.split('/')
subdiv_skills = [skills.split(',') for skills in subdiv_skills]
else:
subdiv_skills = [['primitive']] * len(clusters)
self.subdiv_skills = subdiv_skills
assert len(subdiv_skills) == len(clusters), \
'subdiv_skills and clusters have different # subdivisions'
if config.meta == 'hard':
ac_space = ActionSpec(size=0)
for cluster, skills in zip(clusters, subdiv_skills):
ac_space.add(','.join(cluster), 'discrete', len(skills), 0, 1)
self.ac_space = ac_space
if config.diayn:
ob_clusters = config.subdiv.split('/')
ob_clusters = [cluster.split('-')[0].split(',') for cluster in ob_clusters]
for cluster, skills in zip(ob_clusters, subdiv_skills):
self.ac_space.add(','.join(cluster) + '_diayn', 'continuous', config.z_dim, 0, 1)
# build up networks
self._actor = MlpActor(config, ob_space, ac_space, tanh_policy=False)
self._old_actor = MlpActor(config, ob_space, ac_space, tanh_policy=False)
self._critic = MlpCritic(config, ob_space)
self._network_cuda(config.device)
self._actor_optim = optim.Adam(self._actor.parameters(), lr=config.lr_actor)
self._critic_optim = optim.Adam(self._critic.parameters(), lr=config.lr_critic)
sampler = RandomSampler()
self._buffer = ReplayBuffer(['ob', 'ac', 'done', 'rew', 'ret', 'adv',
'ac_before_activation', 'log_prob'],
config.buffer_size,
sampler.sample_func)
if config.is_chef:
logger.warn('Creating a meta PPO agent')
logger.info('The actor has %d parameters', count_parameters(self._actor))
logger.info('The critic has %d parameters', count_parameters(self._critic))
def store_episode(self, rollouts):
""" Stores @rollouts to replay buffer. """
self._compute_gae(rollouts)
self._buffer.store_episode(rollouts)
def _compute_gae(self, rollouts):
""" Computes GAE from @rollouts. """
T = len(rollouts['done'])
ob = rollouts['ob']
ob = self.normalize(ob)
ob = obs2tensor(ob, self._config.device)
vpred = self._critic(ob).detach().cpu().numpy()[:,0]
assert len(vpred) == T + 1
done = rollouts['done']
rew = rollouts['rew']
adv = np.empty((T, ) , 'float32')
lastgaelam = 0
for t in reversed(range(T)):
nonterminal = 1 - done[t]
delta = rew[t] + self._config.discount_factor * vpred[t + 1] * nonterminal - vpred[t]
adv[t] = lastgaelam = delta + self._config.discount_factor * self._config.gae_lambda * nonterminal * lastgaelam
ret = adv + vpred[:-1]
assert np.isfinite(adv).all()
assert np.isfinite(ret).all()
# update rollouts
if adv.std() == 0:
rollouts['adv'] = (adv * 0).tolist()
else:
rollouts['adv'] = ((adv - adv.mean()) / adv.std()).tolist()
rollouts['ret'] = ret.tolist()
def state_dict(self):
if self._config.meta is None:
return {}
return {
'actor_state_dict': self._actor.state_dict(),
'critic_state_dict': self._critic.state_dict(),
'actor_optim_state_dict': self._actor_optim.state_dict(),
'critic_optim_state_dict': self._critic_optim.state_dict(),
'ob_norm_state_dict': self._ob_norm.state_dict(),
}
def load_state_dict(self, ckpt):
if self._config.meta is None:
return
self._actor.load_state_dict(ckpt['actor_state_dict'])
self._critic.load_state_dict(ckpt['critic_state_dict'])
self._ob_norm.load_state_dict(ckpt['ob_norm_state_dict'])
self._network_cuda(self._config.device)
self._actor_optim.load_state_dict(ckpt['actor_optim_state_dict'])
self._critic_optim.load_state_dict(ckpt['critic_optim_state_dict'])
optimizer_cuda(self._actor_optim, self._config.device)
optimizer_cuda(self._critic_optim, self._config.device)
def _network_cuda(self, device):
self._actor.to(device)
self._old_actor.to(device)
self._critic.to(device)
def sync_networks(self):
sync_networks(self._actor)
sync_networks(self._critic)
def train(self):
self._copy_target_network(self._old_actor, self._actor)
for _ in range(self._config.num_batches):
transitions = self._buffer.sample(self._config.batch_size)
train_info = self._update_network(transitions)
self._buffer.clear()
train_info.update({
'actor_grad_norm': compute_gradient_norm(self._actor),
'actor_weight_norm': compute_weight_norm(self._actor),
'critic_grad_norm': compute_gradient_norm(self._critic),
'critic_weight_norm': compute_weight_norm(self._critic),
})
return train_info
def _update_network(self, transitions):
info = {}
# pre-process observations
o = transitions['ob']
o = self.normalize(o)
bs = len(transitions['done'])
_to_tensor = lambda x: to_tensor(x, self._config.device)
o = _to_tensor(o)
ac = _to_tensor(transitions['ac'])
z = _to_tensor(transitions['ac_before_activation'])
ret = _to_tensor(transitions['ret']).reshape(bs, 1)
adv = _to_tensor(transitions['adv']).reshape(bs, 1)
old_log_pi = _to_tensor(transitions['log_prob']).reshape(bs, 1)
log_pi, ent = self._actor.act_log(o, z)
if (log_pi - old_log_pi).max() > 20:
print('(log_pi - old_log_pi) is too large', (log_pi - old_log_pi).max())
import ipdb; ipdb.set_trace()
# the actor loss
entropy_loss = self._config.entropy_loss_coeff * ent.mean()
ratio = torch.exp(torch.clamp(log_pi - old_log_pi, -20, 20))
surr1 = ratio * adv
surr2 = torch.clamp(ratio, 1.0 - self._config.clip_param,
1.0 + self._config.clip_param) * adv
actor_loss = -torch.min(surr1, surr2).mean()
if not np.isfinite(ratio.cpu().detach()).all() or not np.isfinite(adv.cpu().detach()).all():
import ipdb; ipdb.set_trace()
info['entropy_loss'] = entropy_loss.cpu().item()
info['actor_loss'] = actor_loss.cpu().item()
actor_loss += entropy_loss
discriminator_loss = self._actor.discriminator_loss()
if discriminator_loss is not None:
actor_loss += discriminator_loss * self._config.discriminator_loss_weight
info['discriminator_loss'] = discriminator_loss.cpu().item()
# the q loss
value_pred = self._critic(o)
value_loss = self._config.value_loss_coeff * (ret - value_pred).pow(2).mean()
info['value_target'] = ret.mean().cpu().item()
info['value_predicted'] = value_pred.mean().cpu().item()
info['value_loss'] = value_loss.cpu().item()
# update the actor
self._actor_optim.zero_grad()
actor_loss.backward()
sync_grads(self._actor)
self._actor_optim.step()
# update the critic
self._critic_optim.zero_grad()
value_loss.backward()
sync_grads(self._critic)
self._critic_optim.step()
# include info from policy
info.update(self._actor.info)
return mpi_average(info)
def act(self, ob, is_train=True):
"""
Returns a set of actions and the actors' activations given an observation @ob.
"""
if self._config.meta:
ob = self.normalize(ob)
return self._actor.act(ob, is_train, return_log_prob=True)
else:
return [0], None, None
|
25793
|
from rest_framework import generics, permissions
from rest_framework import filters as filters_rf
from django_filters import rest_framework as filters
from allauth.socialaccount.models import SocialAccount, SocialApp, SocialToken
from .serializers import SocialAppSerializer, SocialAppExtendedSerializer, SocialAccountSerializer, \
SocialAccountExtendedSerializer, SocialTokenSerializer, SocialTokenExtendedSerializer
class SocialAppListApi(generics.ListAPIView):
"""Список всех SocialApp"""
permission_classes = [permissions.DjangoModelPermissions]
queryset = SocialApp.objects.all()
serializer_class = SocialAppExtendedSerializer
filter_backends = [filters.DjangoFilterBackend,
filters_rf.SearchFilter,
filters_rf.OrderingFilter]
filter_fields = ('id', 'provider', 'sites')
search_fields = ['name', 'client_id', 'id']
ordering = ['id']
class SocialAppRetrieveDeleteUpdateApi(generics.RetrieveUpdateDestroyAPIView):
"""Просмотр, изменение и удаления приложения соц. сети"""
permission_classes = [permissions.DjangoModelPermissions]
queryset = SocialApp.objects.all()
lookup_field = 'id'
serializer_class = SocialAppSerializer
class SocialAppCreateApi(generics.CreateAPIView):
"""Добавление приложения соц. сети"""
permission_classes = [permissions.DjangoModelPermissions]
queryset = SocialApp.objects.none()
serializer_class = SocialAppSerializer
class SocialAccountListApi(generics.ListAPIView):
"""Список всех аккаунтов соц. сетей"""
permission_classes = [permissions.DjangoModelPermissions]
queryset = SocialAccount.objects.all()
serializer_class = SocialAccountExtendedSerializer
filter_backends = [filters.DjangoFilterBackend,
filters_rf.SearchFilter,
filters_rf.OrderingFilter]
filter_fields = ('id', 'user', 'provider')
search_fields = ['user__username']
ordering = ['id']
class SocialAccountRetrieveDeleteUpdateApi(generics.RetrieveUpdateDestroyAPIView):
"""Просмотр, изменение и удаления аккаунта в соц. сети"""
permission_classes = [permissions.DjangoModelPermissions]
queryset = SocialAccount.objects.all()
serializer_class = SocialAccountSerializer
lookup_field = 'id'
class SocialAccountCreateApi(generics.CreateAPIView):
"""Добавление аккаунта соц. сети"""
permission_classes = [permissions.DjangoModelPermissions]
queryset = SocialAccount.objects.none()
serializer_class = SocialAccountSerializer
class SocialTokenListApi(generics.ListAPIView):
"""Список токенов"""
permission_classes = [permissions.DjangoModelPermissions]
queryset = SocialToken.objects.all()
serializer_class = SocialTokenExtendedSerializer
filter_backends = [filters.DjangoFilterBackend,
filters_rf.SearchFilter,
filters_rf.OrderingFilter]
filter_fields = ('id', 'app', 'account')
search_fields = ['account__user__username', 'token', 'id']
ordering = ['id']
class SocialTokenRetrieveDeleteUpdateApi(generics.RetrieveUpdateDestroyAPIView):
"""Просмотр, изменение и удаления токенов"""
permission_classes = [permissions.DjangoModelPermissions]
queryset = SocialToken.objects.all()
serializer_class = SocialTokenSerializer
lookup_field = 'id'
class SocialTokenCreateApi(generics.CreateAPIView):
"""Добавление токена"""
permission_classes = [permissions.DjangoModelPermissions]
queryset = SocialToken.objects.none()
serializer_class = SocialTokenSerializer
|
25798
|
import os
import json
import re
import sys
import logging
import hashlib
import uuid
import jsonschema
import tempfile
import controller
import anchore_utils
import anchore_auth
from anchore.util import contexts
_logger = logging.getLogger(__name__)
default_policy_version = '1_0'
default_whitelist_version = '1_0'
default_bundle_version = '1_0'
supported_whitelist_versions = [default_whitelist_version]
supported_bundle_versions = [default_bundle_version]
supported_policy_versions = [default_bundle_version]
# interface operations
def check():
if not load_policymeta():
return (False, "policys are not initialized: please run 'anchore policys sync' and try again")
return (True, "success")
def sync_policymeta(bundlefile=None, outfile=None):
ret = {'success': False, 'text': "", 'status_code': 1}
policyurl = contexts['anchore_config']['policy_url']
policy_timeout = contexts['anchore_config']['policy_conn_timeout']
policy_maxretries = contexts['anchore_config']['policy_max_retries']
policymeta = {}
if bundlefile:
if not os.path.exists(bundlefile):
ret['text'] = "no such file ("+str(bundlefile)+")"
return(False, ret)
try:
with open(bundlefile, 'r') as FH:
policymeta = json.loads(FH.read())
except Exception as err:
ret['text'] = "synced policy bundle cannot be read/is not valid JSON: exception - " +str(err)
return(False, ret)
else:
record = anchore_auth.anchore_auth_get(contexts['anchore_auth'], policyurl, timeout=policy_timeout, retries=policy_maxretries)
if record['success']:
try:
bundleraw = json.loads(record['text'])
policymeta = bundleraw['bundle']
except Exception as err:
ret['text'] = 'failed to parse bundle response from service - exception: ' + str(err)
return(False, ret)
else:
_logger.debug("failed to download policybundle: message from server - " + str(record))
themsg = "unspecificied failure while attempting to download bundle from anchore.io"
try:
if record['status_code'] == 404:
themsg = "no policy bundle found on anchore.io - please create and save a policy using the policy editor in anchore.io and try again"
elif record['status_code'] == 401:
themsg = "cannot download a policy bundle from anchore.io - current user does not have access rights to download custom policies"
except Exception as err:
themsg = "exception while inspecting response from server - exception: " + str(err)
ret['text'] = "failed to download policybundle: " + str(themsg)
return(False, ret)
if not verify_policy_bundle(bundle=policymeta):
_logger.debug("downloaded policy bundle failed to verify: " +str(policymeta))
ret['text'] = "input policy bundle does not conform to policy bundle schema"
return(False, ret)
if outfile:
if outfile != '-':
try:
with open(outfile, 'w') as OFH:
OFH.write(json.dumps(policymeta))
except Exception as err:
ret['text'] = "could not write downloaded policy bundle to specified file ("+str(outfile)+") - exception: " + str(err)
return(False, ret)
else:
if not contexts['anchore_db'].save_policymeta(policymeta):
ret['text'] = "cannot get list of policies from service\nMessage from server: " + record['text']
return (False, ret)
if policymeta:
ret['text'] = json.dumps(policymeta, indent=4)
return(True, ret)
def load_policymeta(policymetafile=None):
ret = {}
if policymetafile:
with open(policymetafile, 'r') as FH:
ret = json.loads(FH.read())
else:
ret = contexts['anchore_db'].load_policymeta()
if not ret:
# use the system default
default_policy_bundle_file = os.path.join(contexts['anchore_config'].config_dir, 'anchore_default_bundle.json')
try:
if os.path.exists(default_policy_bundle_file):
with open(default_policy_bundle_file, 'r') as FH:
ret = json.loads(FH.read())
else:
raise Exception("no such file: " + str(default_policy_bundle_file))
except Exception as err:
_logger.warn("could not load default bundle (" + str(default_policy_bundle_file) + ") - exception: " + str(err))
raise err
return(ret)
def save_policymeta(policymeta):
return(contexts['anchore_db'].save_policymeta(policymeta))
# bundle
# Convert
def convert_to_policy_bundle(name="default", version=default_bundle_version, policy_file=None, policy_version=default_policy_version, whitelist_files=[], whitelist_version=default_whitelist_version):
policies = {}
p = read_policy(name=str(uuid.uuid4()), file=policy_file)
policies.update(p)
whitelists = {}
for wf in whitelist_files:
w = read_whitelist(name=str(uuid.uuid4()), file=wf)
whitelists.update(w)
m = create_mapping(map_name="default", policy_name=policies.keys()[0], whitelists=whitelists.keys(), repotagstring='*/*:*')
mappings.append(m)
bundle = create_policy_bundle(name='default', policies=policies, policy_version=policy_version, whitelists=whitelists, whitelist_version=whitelist_version, mappings=mappings)
if not verify_policy_bundle(bundle=bundle):
return({})
return(bundle)
# C
def create_policy_bundle(name=None, version=default_bundle_version, policies={}, policy_version=default_policy_version, whitelists={}, whitelist_version=default_whitelist_version, mappings=[]):
ret = {
'id': str(uuid.uuid4()),
'name':name,
'version':version,
'policies':[],
'whitelists':[],
'mappings':[]
}
for f in policies:
el = {
'version':policy_version,
'id':f,
'name':f,
'rules':[]
}
el['rules'] = unformat_policy_data(policies[f])
ret['policies'].append(el)
for f in whitelists:
el = {
'version':whitelist_version,
'id':f,
'name':f,
'items':[]
}
el['items'] = unformat_whitelist_data(whitelists[f])
ret['whitelists'].append(el)
for m in mappings:
ret['mappings'].append(m)
_logger.debug("created bundle: ("+str(name)+") : " + json.dumps(ret.keys(), indent=4))
return(ret)
# R
def read_policy_bundle(bundle_file=None):
ret = {}
with open(bundle_file, 'r') as FH:
ret = json.loads(FH.read())
cleanstr = json.dumps(ret).encode('utf8')
ret = json.loads(cleanstr)
if not verify_policy_bundle(bundle=ret):
raise Exception("input bundle does not conform to bundle schema")
return(ret)
# V
def verify_policy_bundle(bundle={}):
bundle_schema = {}
try:
bundle_schema_file = os.path.join(contexts['anchore_config']['pkg_dir'], 'schemas', 'anchore-bundle.schema')
except:
from pkg_resources import Requirement, resource_filename
bundle_schema_file = os.path.join(resource_filename("anchore", ""), 'schemas', 'anchore-bundle.schema')
try:
if os.path.exists(bundle_schema_file):
with open (bundle_schema_file, "r") as FH:
bundle_schema = json.loads(FH.read())
except Exception as err:
_logger.error("could not load bundle schema: " + str(bundle_schema_file))
return(False)
if not bundle_schema:
_logger.error("could not load bundle schema: " + str(bundle_schema_file))
return(False)
else:
try:
jsonschema.validate(bundle, schema=bundle_schema)
except Exception as err:
_logger.error("could not validate bundle against schema: " + str(err))
return(False)
return(True)
# U
def update_policy_bundle(bundle={}, name=None, policies={}, whitelists={}, mappings={}):
if not verify_policy_bundle(bundle=bundle):
raise Exception("input bundle is incomplete - cannot update bad bundle: " + json.dumps(bundle, indent=4))
ret = {}
ret.update(bundle)
new_bundle = create_policy_bundle(name=name, policies=policies, whitelists=whitelists, mappings=mappings)
for key in ['name', 'policies', 'whitelists', 'mappings']:
if new_bundle[key]:
ret[key] = new_bundle.pop(key, ret[key])
return(ret)
# SAVE
def write_policy_bundle(bundle_file=None, bundle={}):
if not verify_policy_bundle(bundle=bundle):
raise Exception("cannot verify input policy bundle, skipping write: " + str(bundle_file))
with open(bundle_file, 'w') as OFH:
OFH.write(json.dumps(bundle))
return(True)
# mapping
# C
def create_mapping(map_name=None, policy_name=None, whitelists=[], repotagstring=None):
ret = {}
ret['name'] = map_name
ret['policy_id'] = policy_name
ret['whitelist_ids'] = whitelists
image_info = anchore_utils.get_all_image_info(repotagstring)
registry = image_info.pop('registry', "N/A")
repo = image_info.pop('repo', "N/A")
tag = image_info.pop('tag', "N/A")
imageId = image_info.pop('imageId', "N/A")
digest = image_info.pop('digest', "N/A")
ret['registry'] = registry
ret['repository'] = repo
ret['image'] = {
'type':'tag',
'value':tag
}
ret['id'] = str(uuid.uuid4())
return(ret)
# policy/wl
# V
def verify_whitelist(whitelistdata=[], version=default_whitelist_version):
ret = True
if not isinstance(whitelistdata, list):
ret = False
if version in supported_whitelist_versions:
# do 1_0 format/checks
pass
return(ret)
# R
def read_whitelist(name=None, file=None, version=default_whitelist_version):
if not name:
raise Exception("bad input: " + str(name) + " : " + str(file))
if file:
if not os.path.exists(file):
raise Exception("input file does not exist: " + str(file))
wdata = anchore_utils.read_plainfile_tolist(file)
if not verify_whitelist(whitelistdata=wdata, version=version):
raise Exception("cannot verify whitelist data read from file as valid")
else:
wdata = []
ret = {}
ret[name] = wdata
return(ret)
def structure_whitelist(whitelistdata):
ret = []
for item in whitelistdata:
try:
(k,v) = re.match("([^\s]*)\s*([^\s]*)", item).group(1,2)
if not re.match("^\s*#.*", k):
ret.append([k, v])
except Exception as err:
pass
return(ret)
def unformat_whitelist_data(wldata):
ret = []
whitelists = structure_whitelist(wldata)
for wlitem in whitelists:
gate, triggerId = wlitem
el = {
'gate':gate,
'trigger_id':triggerId,
'id':str(uuid.uuid4())
}
ret.append(el)
return(ret)
def format_whitelist_data(wldata):
ret = []
version = wldata['version']
if wldata['version'] == default_whitelist_version:
for item in wldata['items']:
ret.append(' '.join([item['gate'], item['trigger_id']]))
else:
raise Exception ("detected whitelist version format in bundle not supported: " + str(version))
return(ret)
def extract_whitelist_data(bundle, wlid):
for wl in bundle['whitelists']:
if wlid == wl['id']:
return(format_whitelist_data(wl))
# R
def read_policy(name=None, file=None, version=default_bundle_version):
if not name or not file:
raise Exception("input error")
if not os.path.exists(file):
raise Exception("input file does not exist: " + str(file))
pdata = anchore_utils.read_plainfile_tolist(file)
if not verify_policy(policydata=pdata, version=version):
raise Exception("cannot verify policy data read from file as valid")
ret = {}
ret[name] = pdata
return(ret)
def structure_policy(policydata):
policies = {}
for l in policydata:
l = l.strip()
patt = re.compile('^\s*#')
if (l and not patt.match(l)):
polinput = l.split(':')
module = polinput[0]
check = polinput[1]
action = polinput[2]
modparams = ""
if (len(polinput) > 3):
modparams = ':'.join(polinput[3:])
if module not in policies:
policies[module] = {}
if check not in policies[module]:
policies[module][check] = {}
if 'aptups' not in policies[module][check]:
policies[module][check]['aptups'] = []
aptup = [action, modparams]
if aptup not in policies[module][check]['aptups']:
policies[module][check]['aptups'].append(aptup)
policies[module][check]['action'] = action
policies[module][check]['params'] = modparams
return(policies)
# return a give policyId from a bundle in raw poldata format
def extract_policy_data(bundle, polid):
for pol in bundle['policies']:
if polid == pol['id']:
return(format_policy_data(pol))
# convert from policy bundle policy format to raw poldata format
def format_policy_data(poldata):
ret = []
version = poldata['version']
if poldata['version'] == default_policy_version:
for item in poldata['rules']:
polline = ':'.join([item['gate'], item['trigger'], item['action'], ""])
if 'params' in item:
for param in item['params']:
polline = polline + param['name'] + '=' + param['value'] + " "
ret.append(polline)
else:
raise Exception ("detected policy version format in bundle not supported: " + str(version))
return(ret)
# convert from raw poldata format to bundle format
def unformat_policy_data(poldata):
ret = []
policies = structure_policy(poldata)
for gate in policies.keys():
try:
for trigger in policies[gate].keys():
action = policies[gate][trigger]['action']
params = policies[gate][trigger]['params']
el = {
'gate':gate,
'trigger':trigger,
'action':action,
'params':[]
}
for p in params.split():
(k,v) = p.split("=")
el['params'].append({'name':k, 'value':v})
ret.append(el)
except Exception as err:
print str(err)
pass
return(ret)
# V
def verify_policy(policydata=[], version=default_policy_version):
ret = True
if not isinstance(policydata, list):
ret = False
if version in supported_policy_versions:
# do 1_0 format/checks
pass
return(ret)
def run_bundle(anchore_config=None, bundle={}, image=None, matchtags=[], stateless=False, show_whitelisted=True, show_triggerIds=True):
retecode = 0
if not anchore_config or not bundle or not image:
raise Exception("input error")
if not verify_policy_bundle(bundle=bundle):
raise Exception("input bundle does not conform to bundle schema")
imageId = anchore_utils.discover_imageId(image)
digests = []
if not matchtags:
matchtags = [image]
evalmap = {}
evalresults = {}
for matchtag in matchtags:
_logger.info("evaluating tag: " + str(matchtag))
mapping_results = get_mapping_actions(image=matchtag, imageId=imageId, in_digests=digests, bundle=bundle)
for pol,wl,polname,wlnames,mapmatch,match_json,evalhash in mapping_results:
evalmap[matchtag] = evalhash
_logger.debug("attempting eval: " + evalhash + " : " + matchtag)
if evalhash not in evalresults:
fnames = {}
try:
if stateless:
policies = structure_policy(pol)
whitelists = structure_whitelist(wl)
rc = execute_gates(imageId, policies)
result, fullresult = evaluate_gates_results(imageId, policies, {}, whitelists)
eval_result = structure_eval_results(imageId, fullresult, show_whitelisted=show_whitelisted, show_triggerIds=show_triggerIds, imageName=matchtag)
gate_result = {}
gate_result[imageId] = eval_result
else:
con = controller.Controller(anchore_config=anchore_config, imagelist=[imageId], allimages=contexts['anchore_allimages'], force=True)
for (fname, data) in [('tmppol', pol), ('tmpwl', wl)]:
fh, thefile = tempfile.mkstemp(dir=anchore_config['tmpdir'])
fnames[fname] = thefile
try:
with open(thefile, 'w') as OFH:
for l in data:
OFH.write(l + "\n")
except Exception as err:
raise err
finally:
os.close(fh)
gate_result = con.run_gates(policy=fnames['tmppol'], global_whitelist=fnames['tmpwl'], show_triggerIds=show_triggerIds, show_whitelisted=show_whitelisted)
evalel = {
'results': list(),
'policy_name':"N/A",
'whitelist_names':"N/A",
'policy_data':list(),
'whitelist_data':list(),
'mapmatch':"N/A",
'matched_mapping_rule': {}
}
evalel['results'] = gate_result
evalel['policy_name'] = polname
evalel['whitelist_names'] = wlnames
evalel['policy_data'] = pol
evalel['whitelist_data'] = wl
evalel['mapmatch'] = mapmatch
evalel['matched_mapping_rule'] = match_json
_logger.debug("caching eval result: " + evalhash + " : " + matchtag)
evalresults[evalhash] = evalel
ecode = result_get_highest_action(gate_result)
if ecode == 1:
retecode = 1
elif retecode == 0 and ecode > retecode:
retecode = ecode
except Exception as err:
_logger.error("policy evaluation error: " + str(err))
finally:
for f in fnames.keys():
if os.path.exists(fnames[f]):
os.remove(fnames[f])
else:
_logger.debug("skipping eval, result already cached: " + evalhash + " : " + matchtag)
ret = {}
for matchtag in matchtags:
ret[matchtag] = {}
ret[matchtag]['bundle_name'] = bundle['name']
try:
evalresult = evalresults[evalmap[matchtag]]
ret[matchtag]['evaluations'] = [evalresult]
except Exception as err:
raise err
return(ret, retecode)
def result_get_highest_action(results):
highest_action = 0
for k in results.keys():
action = results[k]['result']['final_action']
if action == 'STOP':
highest_action = 1
elif highest_action == 0 and action == 'WARN':
highest_action = 2
return(highest_action)
def get_mapping_actions(image=None, imageId=None, in_digests=[], bundle={}):
"""
Given an image, image_id, digests, and a bundle, determine which policies and whitelists to evaluate.
:param image: Image obj
:param imageId: image id string
:param in_digests: candidate digests
:param bundle: bundle dict to evaluate
:return: tuple of (policy_data, whitelist_data, policy_name, whitelist_names, matchstring, mapping_rule_json obj, evalhash)
"""
if not image or not bundle:
raise Exception("input error")
if not verify_policy_bundle(bundle=bundle):
raise Exception("input bundle does not conform to bundle schema")
ret = []
image_infos = []
image_info = anchore_utils.get_all_image_info(image)
if image_info and image_info not in image_infos:
image_infos.append(image_info)
for m in bundle['mappings']:
polname = m['policy_id']
wlnames = m['whitelist_ids']
for image_info in image_infos:
#_logger.info("IMAGE INFO: " + str(image_info))
ii = {}
ii.update(image_info)
registry = ii.pop('registry', "N/A")
repo = ii.pop('repo', "N/A")
tags = []
fulltag = ii.pop('fulltag', "N/A")
if fulltag != 'N/A':
tinfo = anchore_utils.parse_dockerimage_string(fulltag)
if 'tag' in tinfo and tinfo['tag']:
tag = tinfo['tag']
for t in [image, fulltag]:
tinfo = anchore_utils.parse_dockerimage_string(t)
if 'tag' in tinfo and tinfo['tag'] and tinfo['tag'] not in tags:
tags.append(tinfo['tag'])
digest = ii.pop('digest', "N/A")
digests = [digest]
for d in image_info['digests']:
dinfo = anchore_utils.parse_dockerimage_string(d)
if 'digest' in dinfo and dinfo['digest']:
digests.append(dinfo['digest'])
p_ids = []
p_names = []
for p in bundle['policies']:
p_ids.append(p['id'])
p_names.append(p['name'])
wl_ids = []
wl_names = []
for wl in bundle['whitelists']:
wl_ids.append(wl['id'])
wl_names.append(wl['name'])
if polname not in p_ids:
_logger.info("policy not in bundle: " + str(polname))
continue
skip=False
for wlname in wlnames:
if wlname not in wl_ids:
_logger.info("whitelist not in bundle" + str(wlname))
skip=True
if skip:
continue
mname = m['name']
mregistry = m['registry']
mrepo = m['repository']
if m['image']['type'] == 'tag':
mtag = m['image']['value']
mdigest = None
mimageId = None
elif m['image']['type'] == 'digest':
mdigest = m['image']['value']
mtag = None
mimageId = None
elif m['image']['type'] == 'id':
mimageId = m['image']['value']
mtag = None
mdigest = None
else:
mtag = mdigest = mimageId = None
mregistry_rematch = mregistry
mrepo_rematch = mrepo
mtag_rematch = mtag
try:
matchtoks = []
for tok in mregistry.split("*"):
matchtoks.append(re.escape(tok))
mregistry_rematch = "^" + '(.*)'.join(matchtoks) + "$"
matchtoks = []
for tok in mrepo.split("*"):
matchtoks.append(re.escape(tok))
mrepo_rematch = "^" + '(.*)'.join(matchtoks) + "$"
matchtoks = []
for tok in mtag.split("*"):
matchtoks.append(re.escape(tok))
mtag_rematch = "^" + '(.*)'.join(matchtoks) + "$"
except Exception as err:
_logger.error("could not set up regular expression matches for mapping check - exception: " + str(err))
_logger.debug("matchset: " + str([mregistry_rematch, mrepo_rematch, mtag_rematch]) + " : " + str([mregistry, mrepo, mtag]) + " : " + str([registry, repo, tag, tags]))
if registry == mregistry or mregistry == '*' or re.match(mregistry_rematch, registry):
_logger.debug("checking mapping for image ("+str(image_info)+") match.")
if repo == mrepo or mrepo == '*' or re.match(mrepo_rematch, repo):
doit = False
matchstring = mname + ": N/A"
if tag:
if False and (mtag == tag or mtag == '*' or mtag in tags or re.match(mtag_rematch, tag)):
matchstring = mname + ":" + ','.join([mregistry, mrepo, mtag])
doit = True
else:
for t in tags:
if re.match(mtag_rematch, t):
matchstring = mname + ":" + ','.join([mregistry, mrepo, mtag])
doit = True
break
if not doit and (digest and (mdigest == digest or mdigest in in_digests or mdigest in digests)):
matchstring = mname + ":" + ','.join([mregistry, mrepo, mdigest])
doit = True
if not doit and (imageId and (mimageId == imageId)):
matchstring = mname + ":" + ','.join([mregistry, mrepo, mimageId])
doit = True
matchstring = matchstring.encode('utf8')
if doit:
_logger.debug("match found for image ("+str(image_info)+") matchstring ("+str(matchstring)+")")
wldata = []
wldataset = set()
for wlname in wlnames:
wldataset = set(list(wldataset) + extract_whitelist_data(bundle, wlname))
wldata = list(wldataset)
poldata = extract_policy_data(bundle, polname)
wlnames.sort()
evalstr = ','.join([polname] + wlnames)
evalhash = hashlib.md5(evalstr).hexdigest()
ret.append( ( poldata, wldata, polname,wlnames, matchstring, m, evalhash) )
return(ret)
else:
_logger.debug("no match found for image ("+str(image_info)+") match.")
else:
_logger.debug("no match found for image ("+str(image_info)+") match.")
return(ret)
def execute_gates(imageId, policies, refresh=True):
import random
success = True
anchore_config = contexts['anchore_config']
imagename = imageId
gatesdir = '/'.join([anchore_config["scripts_dir"], "gates"])
workingdir = '/'.join([anchore_config['anchore_data_dir'], 'querytmp'])
outputdir = workingdir
_logger.info(imageId + ": evaluating policies...")
for d in [outputdir, workingdir]:
if not os.path.exists(d):
os.makedirs(d)
imgfile = '/'.join([workingdir, "queryimages." + str(random.randint(0, 99999999))])
anchore_utils.write_plainfile_fromstr(imgfile, imageId)
try:
gmanifest, failedgates = anchore_utils.generate_gates_manifest()
if failedgates:
_logger.error("some gates failed to run - check the gate(s) modules for errors: " + str(','.join(failedgates)))
success = False
else:
success = True
for gatecheck in policies.keys():
# get all commands that match the gatecheck
gcommands = []
for gkey in gmanifest.keys():
if gmanifest[gkey]['gatename'] == gatecheck:
gcommands.append(gkey)
# assemble the params from the input policy for this gatecheck
params = []
for trigger in policies[gatecheck].keys():
if 'params' in policies[gatecheck][trigger] and policies[gatecheck][trigger]['params']:
params.append(policies[gatecheck][trigger]['params'])
if not params:
params = ['all']
if gcommands:
for command in gcommands:
cmd = [command] + [imgfile, anchore_config['image_data_store'], outputdir] + params
_logger.debug("running gate command: " + str(' '.join(cmd)))
(rc, sout, cmdstring) = anchore_utils.run_command(cmd)
if rc:
_logger.error("FAILED")
_logger.error("\tCMD: " + str(cmdstring))
_logger.error("\tEXITCODE: " + str(rc))
_logger.error("\tOUTPUT: " + str(sout))
success = False
else:
_logger.debug("")
_logger.debug("\tCMD: " + str(cmdstring))
_logger.debug("\tEXITCODE: " + str(rc))
_logger.debug("\tOUTPUT: " + str(sout))
_logger.debug("")
else:
_logger.warn("WARNING: gatecheck ("+str(gatecheck)+") line in policy, but no gates were found that match this gatecheck")
except Exception as err:
_logger.error("gate evaluation failed - exception: " + str(err))
finally:
if imgfile and os.path.exists(imgfile):
try:
os.remove(imgfile)
except:
_logger.error("could not remove tempfile: " + str(imgfile))
if success:
report = generate_gates_report(imageId)
contexts['anchore_db'].save_gates_report(imageId, report)
_logger.info(imageId + ": evaluated.")
return(success)
def generate_gates_report(imageId):
# this routine reads the results of image gates and generates a formatted report
report = {}
outputs = contexts['anchore_db'].list_gate_outputs(imageId)
for d in outputs:
report[d] = contexts['anchore_db'].load_gate_output(imageId, d)
return(report)
def evaluate_gates_results(imageId, policies, image_whitelist, global_whitelist):
ret = list()
fullret = list()
final_gate_action = 'GO'
for m in policies.keys():
gdata = contexts['anchore_db'].load_gate_output(imageId, m)
for l in gdata:
(k, v) = re.match('(\S*)\s*(.*)', l).group(1, 2)
imageId = imageId
check = m
trigger = k
output = v
triggerId = hashlib.md5(''.join([check,trigger,output])).hexdigest()
# if the output is structured (i.e. decoded as an
# anchore compatible json string) then extract the
# elements for display
try:
json_output = json.loads(output)
if 'id' in json_output:
triggerId = str(json_output['id'])
if 'desc' in json_output:
output = str(json_output['desc'])
except:
pass
if k in policies[m]:
trigger = k
action = policies[check][trigger]['action']
r = {'imageId':imageId, 'check':check, 'triggerId':triggerId, 'trigger':trigger, 'output':output, 'action':action}
# this is where whitelist check should go
whitelisted = False
whitelist_type = "none"
if global_whitelist and ([m, triggerId] in global_whitelist):
whitelisted = True
whitelist_type = "global"
elif image_whitelist and 'ignore' in image_whitelist and (r in image_whitelist['ignore']):
whitelisted = True
whitelist_type = "image"
else:
# look for prefix wildcards
try:
for [gmod, gtriggerId] in global_whitelist:
if gmod == m:
# special case for backward compat
try:
if gmod == 'ANCHORESEC' and not re.match(".*\*.*", gtriggerId) and re.match("^CVE.*|^RHSA.*", gtriggerId):
gtriggerId = gtriggerId + "*"
except Exception as err:
_logger.warn("problem with backward compat modification of whitelist trigger - exception: " + str(err))
matchtoks = []
for tok in gtriggerId.split("*"):
matchtoks.append(re.escape(tok))
rematch = "^" + '(.*)'.join(matchtoks) + "$"
_logger.debug("checking regexp wl<->triggerId for match: " + str(rematch) + " : " + str(triggerId))
if re.match(rematch, triggerId):
_logger.debug("found wildcard whitelist match")
whitelisted = True
whitelist_type = "global"
break
except Exception as err:
_logger.warn("problem with prefix wildcard match routine - exception: " + str(err))
fullr = {}
fullr.update(r)
fullr['whitelisted'] = whitelisted
fullr['whitelist_type'] = whitelist_type
fullret.append(fullr)
if not whitelisted:
if policies[m][k]['action'] == 'STOP':
final_gate_action = 'STOP'
elif final_gate_action != 'STOP' and policies[m][k]['action'] == 'WARN':
final_gate_action = 'WARN'
ret.append(r)
else:
# whitelisted, skip evaluation
pass
ret.append({'imageId':imageId, 'check':'FINAL', 'trigger':'FINAL', 'output':"", 'action':final_gate_action})
fullret.append({'imageId':imageId, 'check':'FINAL', 'trigger':'FINAL', 'output':"", 'action':final_gate_action, 'whitelisted':False, 'whitelist_type':"none", 'triggerId':"N/A"})
return(ret, fullret)
def structure_eval_results(imageId, evalresults, show_triggerIds=False, show_whitelisted=False, imageName=None):
if not imageName:
imageName = imageId
record = {}
record['result'] = {}
record['result']['header'] = ['Image_Id', 'Repo_Tag']
if show_triggerIds:
record['result']['header'].append('Trigger_Id')
record['result']['header'] += ['Gate', 'Trigger', 'Check_Output', 'Gate_Action']
if show_whitelisted:
record['result']['header'].append('Whitelisted')
record['result']['rows'] = list()
for m in evalresults:
id = imageId
name = imageName
gate = m['check']
trigger = m['trigger']
output = m['output']
triggerId = m['triggerId']
action = m['action']
row = [id[0:12], name]
if show_triggerIds:
row.append(triggerId)
row += [gate, trigger, output, action]
if show_whitelisted:
row.append(m['whitelist_type'])
if not m['whitelisted'] or show_whitelisted:
record['result']['rows'].append(row)
if gate == 'FINAL':
record['result']['final_action'] = action
return(record)
# small test
if __name__ == '__main__':
from anchore.configuration import AnchoreConfiguration
config = AnchoreConfiguration(cliargs={})
anchore_utils.anchore_common_context_setup(config)
policies = {}
whitelists = {}
mappings = []
pol0 = read_policy(name=str(uuid.uuid4()), file='/root/.anchore/conf/anchore_gate.policy')
pol1 = read_policy(name=str(uuid.uuid4()), file='/root/.anchore/conf/anchore_gate.policy')
policies.update(pol0)
policies.update(pol1)
gl0 = read_whitelist(name=str(uuid.uuid4()))
wl0 = read_whitelist(name=str(uuid.uuid4()), file='/root/wl0')
whitelists.update(gl0)
whitelists.update(wl0)
map0 = create_mapping(map_name="default", policy_name=policies.keys()[0], whitelists=whitelists.keys(), repotagstring='*/*:*')
mappings.append(map0)
bundle = create_policy_bundle(name='default', policies=policies, policy_version=default_policy_version, whitelists=whitelists, whitelist_version=default_whitelist_version, mappings=mappings)
print "CREATED BUNDLE: " + json.dumps(bundle, indent=4)
rc = write_policy_bundle(bundle_file="/tmp/bun.json", bundle=bundle)
newbun = read_policy_bundle(bundle_file="/tmp/bun.json")
if newbun != bundle:
print "BUNDLE RESULT DIFFERENT AFTER SAVE/LOAD"
thebun = convert_to_policy_bundle(name='default', policy_file='/root/.anchore/conf/anchore_gate.policy', policy_version=default_policy_version, whitelist_files=['/root/wl0'], whitelist_version=default_whitelist_version)
rc = write_policy_bundle(bundle_file="/tmp/bun1.json", bundle=thebun)
pol0 = read_policy(name="meh", file='/root/.anchore/conf/anchore_gate.policy')
policies = structure_policy(pol0['meh'])
#rc = execute_gates("4a415e3663882fbc554ee830889c68a33b3585503892cc718a4698e91ef2a526", policies)
result, image_ecode = run_bundle(anchore_config=config, image='alpine', matchtags=[], bundle=thebun)
with open("/tmp/a", 'w') as OFH:
OFH.write(json.dumps(result, indent=4))
try:
result, image_ecode = run_bundle_stateless(anchore_config=config, image='alpine', matchtags=[], bundle=thebun)
with open("/tmp/b", 'w') as OFH:
OFH.write(json.dumps(result, indent=4))
except Exception as err:
import traceback
traceback.print_exc()
print str(err)
|
25805
|
import ROOT,sys
from larlite import larlite as fmwk1
from larcv import larcv as fmwk2
from ROOT import handshake
io1=fmwk1.storage_manager(fmwk1.storage_manager.kBOTH)
io1.add_in_filename(sys.argv[1])
io1.set_out_filename('boke.root')
io1.open()
io2=fmwk2.IOManager(fmwk2.IOManager.kREAD)
io2.add_in_file(sys.argv[2])
io2.initialize()
hs=handshake.HandShaker()
ctr=0
while io1.next_event() and io2.read_entry(ctr):
ev_pfpart = io1.get_data(fmwk1.data.kPFParticle, "dl")
ev_vertex = io1.get_data(fmwk1.data.kVertex, "dl")
ev_shower = io1.get_data(fmwk1.data.kShower, "dl")
ev_track = io1.get_data(fmwk1.data.kTrack, "dl")
ev_cluster = io1.get_data(fmwk1.data.kCluster, "dl")
ev_hit = io1.get_data(fmwk1.data.kHit, "dl")
ev_ass = io1.get_data(fmwk1.data.kAssociation,"dl")
ev_hit_in = io1.get_data(fmwk1.data.kHit, "gaushit")
ev_pgraph = io2.get_data(fmwk2.kProductPGraph,'test')
ev_pixel2d = io2.get_data(fmwk2.kProductPixel2D,'test_ctor')
hs.pixel_distance_threshold(1.)
hs.set_larlite_pointers(ev_pfpart, ev_vertex,
ev_shower, ev_track,
ev_cluster, ev_hit,
ev_ass)
hs.construct(ev_pgraph, ev_pixel2d, ev_hit_in)
io1.set_id(io1.run_id(), io1.subrun_id(), io1.event_id())
#io1.next_event()
#io1.go_to()
#io2.read_entry()
#io1.save_entry()
ctr+=1
io1.close()
io2.finalize()
|
25826
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
app = Flask(__name__)
app.config[
'SQLALCHEMY_DATABASE_URI'] = 'postgres://xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
db = SQLAlchemy(app)
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
class UserData(db.Model):
__tablename__ = 'UserData'
Id = db.Column(db.Integer, primary_key=True)
Name = db.Column(db.String(64))
Description = db.Column(db.String(256))
CreateDate = db.Column(db.DateTime)
def __init__(self
, Name
, Description
, CreateDate
):
self.Name = Name
self.Description = Description
self.CreateDate = CreateDate
if __name__ == '__main__':
manager.run()
|
25837
|
from django.conf import settings
from django.conf.urls import url
from django.urls import LocalePrefixPattern, URLResolver, get_resolver, path
from TWLight.i18n.views import set_language
# Direct rip from django.conf.urls.i18n, but imports our local set_language
# from GitHub
def i18n_patterns(*urls, prefix_default_language=True):
"""
Add the language code prefix to every URL pattern within this function.
This may only be used in the root URLconf, not in an included URLconf.
"""
if not settings.USE_I18N:
return list(urls)
return [
URLResolver(
LocalePrefixPattern(prefix_default_language=prefix_default_language),
list(urls),
)
]
urlpatterns = [path("setlang/", set_language, name="set_language")]
|
25840
|
from __future__ import absolute_import
import chainer
import chainer.functions as F
from .convolution import ConvolutionND
def _pair(x, ndim=2):
if hasattr(x, '__getitem__'):
return x
return [x]*ndim
class PixelShuffleUpsamplerND(chainer.Chain):
"""Pixel Shuffler for the super resolution.
This upsampler is effective upsampling method compared with the deconvolution.
The deconvolution has a problem of the checkerboard artifact.
A detail of this problem shows the following.
http://distill.pub/2016/deconv-checkerboard/
See also:
https://arxiv.org/abs/1609.05158
"""
def __init__(self, ndim, in_channels, out_channels, resolution,
ksize=None, stride=1, pad=0, pad_mode='reflect', nobias=False,
initialW=None, initial_bias=None):
super(PixelShuffleUpsamplerND, self).__init__()
self.ndim = ndim
self.resolution = resolution
self.in_channels = in_channels
self.out_channels = out_channels
self.pad = _pair(pad, self.ndim)
self.pad_mode = pad_mode
with self.init_scope():
m = self.resolution ** self.ndim
self.conv = ConvolutionND(
ndim, in_channels, out_channels * m,
ksize, stride, self.pad, self.pad_mode, nobias,
initialW, initial_bias)
def __call__(self, x):
r = self.resolution
out = self.conv(x)
batchsize = out.shape[0]
in_channels = out.shape[1]
out_channels = self.out_channels
in_shape = out.shape[2:]
out_shape = tuple(s * r for s in in_shape)
r_tuple = tuple(self.resolution for _ in range(self.ndim))
out = F.reshape(out, (batchsize, out_channels,) + r_tuple + in_shape)
out = F.transpose(out, self.make_transpose_indices())
out = F.reshape(out, (batchsize, out_channels, ) + out_shape)
return out
def make_transpose_indices(self):
si = [0, 1]
si.extend([2 * (i + 1) + 1 for i in range(self.ndim)])
si.extend([2 * (i + 1) for i in range(self.ndim)])
return si
class PixelShuffleUpsampler2D(PixelShuffleUpsamplerND):
def __init__(self, in_channels, out_channels, resolution,
ksize=None, stride=1, pad=0, pad_mode='reflect', nobias=False,
initialW=None, initial_bias=None):
super(PixelShuffleUpsampler2D, self).__init__(
2, in_channels, out_channels, resolution,
ksize, stride, pad, pad_mode, nobias,
initialW, initial_bias)
class PixelShuffleUpsampler3D(PixelShuffleUpsamplerND):
def __init__(self, in_channels, out_channels, resolution,
ksize=None, stride=1, pad=0, pad_mode='reflect', nobias=False,
initialW=None, initial_bias=None):
super(PixelShuffleUpsampler3D, self).__init__(
3, in_channels, out_channels, resolution,
ksize, stride, pad, pad_mode, nobias,
initialW, initial_bias)
|
25899
|
import alignments
import re
import read
import binaryIO
import math
import os
import preprocess
import time
class Compressor:
aligned = None
# 0 - zlib
# 1 - lzma
# 2 - bz2
compressMethod = 0
covSize = 0
totalSize = 0
def __init__(self, frag_len_cutoff):
if self.compressMethod == 0:
self.zlib = __import__('zlib')
elif self.compressMethod == 1:
self.lzma = __import__('lzma')
elif self.compressMethod == 2:
self.bz2 = __import__('bz2')
if frag_len_cutoff:
print('Set fragment length cutoff to %d' % frag_len_cutoff)
self.frag_len_cutoff = frag_len_cutoff
def compress(self, samFilename, compressedFilename, gtf, min_filename, frag_len_z_cutoff, split_diff_strands, split_discordant):
''' Compresses the alignments to 2 files, one for unspliced and one for spliced
file_prefix: Prefix for all output file names
'''
self.p = preprocess.Preprocessor(samFilename, frag_len_z_cutoff, split_diff_strands)
if not self.frag_len_cutoff:
self.frag_len_cutoff = self.p.frag_len_cutoff
print('Using fragment length cutoff of ' + str(self.frag_len_cutoff))
if split_diff_strands:
print('Splitting mates on different strands')
else:
print('Not splitting mates on different strands')
if split_discordant:
print('Splitting discordant')
else:
print('Not splitting discordant')
# Reads on different strands that should be unpaired
self.diff_strand_unpaired = self.p.unpaired
del self.p
# Read header
header = ''
with open(samFilename, 'r') as f:
for line in f:
if line[0] == '@':
header += line
else:
break
self.chromosomes = self.parseSAMHeader(header)
self.aligned = alignments.Alignments(self.chromosomes, self.frag_len_cutoff, split_discordant)
if gtf:
self.aligned.gtf_exons = self.parseGTF(gtf, self.aligned.chromOffsets)
self.compressByBundle(samFilename, compressedFilename, min_filename)
#print('%d unmatched' % self.aligned.numUnmatched)
print('Approximately %d / %d = %f%% of compressed file is coverage' % (self.covSize, self.totalSize, 100.0*float(self.covSize)/float(self.totalSize)))
print('Finished compressing')
def compressByBundle(self, input_name, compressed_name, intermediate_name=None):
'''
Read a sorted SAM file and compress in segments determined by clusters of reads
:param filename:
:return:
'''
# If coverage is 0 for at least this many bases end of a potential gene
overlapRadius = 50
spliced_index = []
bundles = []
first = True
bundle_id = 0
read_id = 0
diff_strand_unpaired_id = 0
num_diff_strand_unpaired = len(self.diff_strand_unpaired)
firstR = None
with open(input_name, 'r') as filehandle:
id = 0
start_id = 0
for line in filehandle:
# Check if header line
if line[0] == '@':
continue
row = line.strip().split('\t')
if row[2] == '*':
# HISAT includes unmapped reads at the end of the file; we just skip them
continue
if not row[2] in self.chromosomes[0]:
print('Error! Chromosome ' + str(row[2]) + ' not found!')
exit()
# Starting position of this read
start = self.aligned.chromOffsets[row[2]] + int(row[3])
if self.aligned.gene_bounds and start > (self.aligned.gene_bounds[-1] + overlapRadius):
# Compress most recent bundle
self.aligned.finalizeExons()
self.aligned.finalizeUnmatched()
self.aligned.finalize_cross_bundle_reads()
#if self.aligned.gene_bounds[0] < 100480943 and self.aligned.gene_bounds[1] > 100478955:
# print(bundle_id)
# print(self.aligned.gene_bounds)
# print(self.aligned.exons)
# print(self.aligned.gene_bounds[0] - self.aligned.chromOffsets['X'])
# print(self.aligned.gene_bounds[1] - self.aligned.chromOffsets['X'])
# exit()
bundle_id += 1
start_id = id
bundles.append(self.aligned.exons)
# Write to intermediate file
if intermediate_name:
if first:
# If it's the first bundle, write the header as well
with open(intermediate_name, 'w') as f1:
read_id = self.aligned.writeSAM(f1, self.aligned.unpaired, self.aligned.paired, True, False, read_id)
else:
with open(intermediate_name, 'a') as f1:
read_id = self.aligned.writeSAM(f1, self.aligned.unpaired, self.aligned.paired, False, False, read_id)
junctions, maxReadLen = self.aligned.computeBuckets()
self.sortedJuncs = sorted(junctions.keys())
# Compress bundle to temporary file
if first:
mode = 'wb'
else:
mode = 'ab'
with open('temp.bin', mode) as f:
l = self.compressBundle(junctions, maxReadLen, f)
spliced_index.append(l)
# Start new bundle
self.aligned.resetBundle()
self.aligned.exons.add(start)
first = False
# Process read
if row[5] == '*':
# HISAT occasionally prints * as the cigar string when it is identical to its mate
#print('No cigar string')
#print(row[0])
#exit()
exons = None
else:
exons = self.parseCigar(row[5], int(row[3]))
# find XS (strand) and NH values
strand = None
NH = 1
for r in row[11 : len(row)]:
if r[0:5] == 'XS:A:' or r[0:5] == 'XS:a:':
strand = r[5]
elif r[0:3] == 'NH:':
NH = int(r[5:])
flags = int(row[1])
if flags & 4:
# Read is unmapped
continue
r = read.Read(row[2], int(row[3]), exons, strand, NH)
#r.name = row[0]
if row[6] == '*' or (flags & 8):
paired = False
elif diff_strand_unpaired_id < num_diff_strand_unpaired and id == self.diff_strand_unpaired[diff_strand_unpaired_id]:
#if not row[6] == '*':
# print('\t'.join(row))
paired = False
diff_strand_unpaired_id += 1
else:
paired = True
r.bundle = bundle_id
r.pairOffset = int(row[7])
if row[6] == '=':
r.pairChrom = row[2]
else:
r.pairChrom = row[6]
self.aligned.processRead(row[0], r, paired)
id += 1
# Compress final cluster
self.aligned.finalizeExons()
self.aligned.finalizeUnmatched()
self.aligned.finalize_cross_bundle_reads()
bundle_id += 1
bundles.append(self.aligned.exons)
# Write to intermediate file
if intermediate_name:
if first:
# If it's the first bundle, write the header as well
with open(intermediate_name, 'w') as f1:
read_id = self.aligned.writeSAM(f1, self.aligned.unpaired, self.aligned.paired, True, False, read_id)
first = False
else:
with open(intermediate_name, 'a') as f1:
read_id = self.aligned.writeSAM(f1, self.aligned.unpaired, self.aligned.paired, False, False, read_id)
junctions, maxReadLen = self.aligned.computeBuckets()
self.sortedJuncs = sorted(junctions.keys())
# Compress bundle to temporary file
if first:
mode = 'wb'
else:
mode = 'ab'
with open('temp.bin', mode) as f:
l = self.compressBundle(junctions, maxReadLen, f)
spliced_index.append(l)
leftovers = 0
for k,v in self.aligned.cross_bundle_reads.items():
#if len(v) > 0:
# print(k)
# print(v)
# exit()
leftovers += len(v)
print('%d cross-bundle reads unmatched' % leftovers)
bundle_lens = [c[-1]-c[0] for c in bundles]
print('Minimum bundle length: %d' % min(bundle_lens))
print('Maximum bundle length: %d' % max(bundle_lens))
print('Average bundle length: %d'% (sum(bundle_lens) / len(bundle_lens)))
# Write index information and append spliced and unspliced files
with open(compressed_name, 'wb') as f:
s = binaryIO.writeChroms(self.chromosomes)
s += binaryIO.writeClusters(bundles)
s += binaryIO.writeList(spliced_index)
f.write(s)
# Compress bundle-spanning buckets
self.compressCrossBundle(self.aligned.cross_bundle_buckets, self.aligned.max_cross_bundle_read_len, bundle_id, f)
# Move contents of temporary file to output file
with open('temp.bin', 'rb') as f2:
f.write(f2.read())
os.remove('temp.bin')
def compressBundle(self, junctions, maxReadLen, filehandle):
# Determine the number of bytes for read lengths
readLenBytes = binaryIO.findNumBytes(maxReadLen)
cluster = binaryIO.valToBinary(1, readLenBytes)
cluster += binaryIO.writeJunctionsList(self.sortedJuncs, 2)
self.totalSize += len(cluster)
# TODO: No need for junc_lens?
junc_lens = []
junc_string = b''
for j in self.sortedJuncs:
#if self.aligned.exons[0] == 100476370 and j == [2, None, 1]:
#
s, c, t = binaryIO.writeJunction(readLenBytes, junctions[j])
self.covSize += c
self.totalSize += t
junc_lens.append(len(s))
junc_string += s
#cluster += binaryIO.writeList(junc_lens)
cluster += junc_string
# Write to file
start = filehandle.tell()
filehandle.write(self.compressString(cluster))
# return length of cluster in file
return filehandle.tell() - start
def compressCrossBundle(self, cross_bundle_buckets, maxReadLen, num_bundles, filehandle):
'''
Compress the bundle-spanning buckets
'''
readLenBytes = binaryIO.findNumBytes(maxReadLen)
bundleIdBytes = binaryIO.findNumBytes(num_bundles)
buckets_sorted = sorted(cross_bundle_buckets.keys())
if len(buckets_sorted) > 0:
print('%d cross-bundle buckets' % len(buckets_sorted))
pos = filehandle.tell()
chunk_size = 20
num_chunks = math.ceil(len(buckets_sorted) / chunk_size)
chunk_lens = [0] * num_chunks
index = binaryIO.valToBinary(4, len(buckets_sorted))
index += binaryIO.valToBinary(2, chunk_size)
index += binaryIO.valToBinary(1, readLenBytes)
index += binaryIO.writeCrossBundleBucketNames(bundleIdBytes, cross_bundle_buckets, buckets_sorted)
self.totalSize += len(index)
main = b''
chunk = b''
chunk_id = 0
for i in range(len(buckets_sorted)):
b = buckets_sorted[i]
ch, c, t = binaryIO.writeCrossBundleBucket(readLenBytes, cross_bundle_buckets[b])
chunk += ch
self.covSize += c
self.totalSize += t
if (i+1) % chunk_size == 0:
compressed = self.compressString(chunk)
chunk_lens[chunk_id] = len(compressed)
chunk_id += 1
main += compressed
chunk = b''
if len(chunk) > 0:
compressed = self.compressString(chunk)
chunk_lens[chunk_id] = len(compressed)
main += compressed
index += binaryIO.writeList(chunk_lens)
index = self.compressString(index)
length = len(index)
numBytes = binaryIO.findNumBytes(length)
binaryIO.writeVal(filehandle, 1, numBytes)
binaryIO.writeVal(filehandle, numBytes, length)
filehandle.write(index)
filehandle.write(main)
print('Compressed size: %d' % (filehandle.tell() - pos))
else:
binaryIO.writeVal(filehandle, 1, 1)
binaryIO.writeVal(filehandle, 1, 0)
def parseCigar(self, cigar, offset):
''' Parse the cigar string starting at the given index of the genome
Returns a list of offsets for each exonic region of the read [(start1, end1), (start2, end2), ...]
'''
exons = []
newExon = True
# Parse cigar string
match = re.search("\D", cigar)
while match:
index = match.start()
length = int(''.join(cigar[:index]))
if cigar[index] == 'N':
# Separates contiguous exons, so set boolean to start a new one
newExon = True
elif cigar[index] == 'M':
# If in the middle of a contiguous exon, append the length to it, otherwise start a new exon
if newExon:
exons.append([offset, offset+length])
newExon = False
else:
exons[-1][1] += length
elif cigar[index] == 'D':
# If in the middle of a contiguous exon, append the deleted length to it
if not newExon:
exons[-1][1] += length
# Skip soft clipping
if not cigar[index] == 'S':
offset += length
cigar = cigar[index+1:]
match = re.search("\D", cigar)
return exons
def parseSAMHeader(self, header):
# In the order they appear in the header
chromNames = []
chromLens = []
# Dictionary contains chromosome lengths for lookup
for line in header.split('\n'):
if line[0:3] == '@SQ':
row = line.strip().split('\t')
chromNames.append(row[1][3:])
chromLens.append(int(row[2][3:]))
return [chromNames, chromLens]
def parseGTF(self, gtf, chromOffsets):
exons = set()
with open(gtf, 'r') as f:
for line in f:
row = line.rstrip().split('\t')
if row[2] == 'exon':
exons.add(int(row[3]) + chromOffsets[row[0]])
exons.add(int(row[4]) + chromOffsets[row[0]])
return sorted(list(exons))
def compressString(self, s):
''' Use a predefined python library to compress the given string.
Return the compressed string '''
if self.compressMethod == 0:
return self.zlib.compress(s)
elif self.compressMethod == 1:
return self.lzma.compress(s)
elif self.compressMethod == 2:
return self.bz2.compress(s)
|
25914
|
import inspect
from collections import defaultdict
from typing import Dict, List, NamedTuple
from .core import PluginFinder, PluginSpec
from .discovery import PackagePathPluginFinder
class EntryPoint(NamedTuple):
name: str
value: str
group: str
EntryPointDict = Dict[str, List[str]]
def discover_entry_points(finder: PluginFinder) -> EntryPointDict:
"""
Creates a dictionary for the entry_points attribute of setuptools' setup(), where keys are
stevedore plugin namespaces, and values are lists of "name = module:object" pairs.
:return: an entry_point dictionary
"""
return to_entry_point_dict([spec_to_entry_point(spec) for spec in finder.find_plugins()])
def to_entry_point_dict(eps: List[EntryPoint]) -> EntryPointDict:
result = defaultdict(list)
names = defaultdict(set) # book-keeping to check duplicates
for ep in eps:
if ep.name in names[ep.group]:
raise ValueError("Duplicate entry point %s %s" % (ep.group, ep.name))
result[ep.group].append("%s=%s" % (ep.name, ep.value))
names[ep.group].add(ep.name)
return result
def spec_to_entry_point(spec: PluginSpec) -> EntryPoint:
module = inspect.getmodule(spec.factory).__name__
name = spec.factory.__name__
path = f"{module}:{name}"
return EntryPoint(group=spec.namespace, name=spec.name, value=path)
def find_plugins(where=".", exclude=(), include=("*",)) -> EntryPointDict:
"""
Utility for setup.py that collects all plugins from the specified path, and creates a dictionary for entry_points.
For example:
setup(
entry_points=find_plugins()
)
"""
return discover_entry_points(
PackagePathPluginFinder(where=where, exclude=exclude, include=include)
)
|
25986
|
from transformers import model_paths
def test_candy_model():
assert model_paths.CANDY_FAST_NEURAL_TRANSFER_MODEL == "models/candy.t7"
def test_feathers_model():
assert model_paths.FEATHERS_FAST_NEURAL_TRANSFER_MODEL == "models/feathers.t7"
def test_mosaic_model():
assert model_paths.MOSAIC_FAST_NEURAL_TRANSFER_MODEL == "models/mosaic.t7"
def test_the_scream_model():
assert model_paths.THE_SCREAM_FAST_NEURAL_TRANSFER_MODEL == "models/the_scream.t7"
def test_udnie_model():
assert model_paths.UDNIE_FAST_NEURAL_TRANSFER_MODEL == "models/udnie.t7"
def test_celeba_distill_model():
assert model_paths.CELEBA_DISTILL_ANIME_GAN == "models/celeba_distill.pt"
def test_face_paint_model():
assert model_paths.FACE_PAINT_ANIME_GAN == "models/face_paint_512_v2.pt"
def test_paprika_model():
assert model_paths.PAPRIKA_ANIME_GAN == "models/paprika.pt"
|
26021
|
from watchFaceParser.elements.basicElements.imageSet import ImageSet
class Icon:
definitions = {
1: { 'Name': 'Images', 'Type': ImageSet},
2: { 'Name': 'NoWeatherImageIndex', 'Type': 'long'},
}
|
26028
|
from data_importers.management.commands import BaseHalaroseCsvImporter
class Command(BaseHalaroseCsvImporter):
council_id = "SCE"
addresses_name = "2021-11-10T10:12:49.277177/polling_station_export-2021-11-10.csv"
stations_name = "2021-11-10T10:12:49.277177/polling_station_export-2021-11-10.csv"
elections = ["2021-11-25"]
def address_record_to_dict(self, record):
if record.housepostcode in [
"YO21 1SU",
"YO12 5DB",
"YO14 9EW",
"YO21 3JU",
"YO21 3FP",
"YO11 3PQ",
]:
return None
if record.uprn == "10023875937": # 3 POSTGATE WAY, UGTHORPE, WHITBY
return None
return super().address_record_to_dict(record)
|
26044
|
from beem.utils import formatTimeString, resolve_authorperm, construct_authorperm, addTzInfo
from beem.nodelist import NodeList
from beem.comment import Comment
from beem import Steem
from beem.account import Account
from beem.instance import set_shared_steem_instance
from beem.blockchain import Blockchain
import time
import json
import os
import math
import dataset
import random
from datetime import date, datetime, timedelta
from dateutil.parser import parse
from beem.constants import STEEM_100_PERCENT
from steemrewarding.post_storage import PostsTrx
from steemrewarding.command_storage import CommandsTrx
from steemrewarding.vote_rule_storage import VoteRulesTrx
from steemrewarding.pending_vote_storage import PendingVotesTrx
from steemrewarding.config_storage import ConfigurationDB
from steemrewarding.vote_storage import VotesTrx
from steemrewarding.vote_log_storage import VoteLogTrx
from steemrewarding.failed_vote_log_storage import FailedVoteLogTrx
from steemrewarding.broadcast_vote_storage import BroadcastVoteTrx
from steemrewarding.utils import isfloat, upvote_comment, valid_age, upvote_comment_without_check
from steemrewarding.version import version as rewardingversion
from steemrewarding.account_storage import AccountsDB
from steemrewarding.version import version as rewarding_version
import dataset
if __name__ == "__main__":
config_file = 'config.json'
if not os.path.isfile(config_file):
raise Exception("config.json is missing!")
else:
with open(config_file) as json_data_file:
config_data = json.load(json_data_file)
# print(config_data)
databaseConnector = config_data["databaseConnector"]
wallet_password = config_data["wallet_password"]
posting_auth_acc = config_data["posting_auth_acc"]
voting_round_sec = config_data["voting_round_sec"]
start_prep_time = time.time()
db = dataset.connect(databaseConnector)
# Create keyStorage
print("Start upvote_post_comments_timebased.py")
nobroadcast = False
# nobroadcast = True
postTrx = PostsTrx(db)
votesTrx = VotesTrx(db)
voteRulesTrx = VoteRulesTrx(db)
confStorage = ConfigurationDB(db)
pendingVotesTrx = PendingVotesTrx(db)
voteLogTrx = VoteLogTrx(db)
failedVoteLogTrx = FailedVoteLogTrx(db)
accountsTrx = AccountsDB(db)
broadcastVoteTrx = BroadcastVoteTrx(db)
conf_setup = confStorage.get()
# last_post_block = conf_setup["last_post_block"]
nodes = NodeList()
# nodes.update_nodes(weights={"block": 1})
try:
nodes.update_nodes()
except:
print("could not update nodes")
node_list = nodes.get_nodes(exclude_limited=False)
stm = Steem(node=node_list, num_retries=5, call_num_retries=3, timeout=15, nobroadcast=nobroadcast)
stm.wallet.unlock(wallet_password)
last_voter = None
print("Start apply new timebased votes")
voter_counter = 0
delete_pending_votes = []
rc_sp_to_low_account_list = []
vote_counter = 0
vote_count = 0
for pending_vote in pendingVotesTrx.get_command_list_timed():
settings = None
voter_acc = None
author, permlink = resolve_authorperm(pending_vote["authorperm"])
if pending_vote["voter"] in rc_sp_to_low_account_list:
continue
age_min = (datetime.utcnow() - pending_vote["comment_timestamp"]).total_seconds() / 60
maximum_vote_delay_min = pending_vote["maximum_vote_delay_min"]
if age_min < pending_vote["vote_delay_min"] - voting_round_sec / 2.0 / 60 - 3:
# print("%s is not ready yet - %.2f min should be %.2f" % (pending_vote["authorperm"], age_min, pending_vote["vote_delay_min"]))
continue
if settings is None:
settings = accountsTrx.get(pending_vote["voter"])
if settings is None:
voter_acc = Account(pending_vote["voter"], steem_instance=stm)
print("update %s - did not exists" % pending_vote["voter"])
posting_auth = False
for a in voter_acc["posting"]["account_auths"]:
if a[0] == posting_auth_acc:
posting_auth = True
if pending_vote["voter"] == posting_auth_acc:
posting_auth = True
accountsTrx.upsert({"name": pending_vote["voter"], "vp_update":datetime.utcnow(), "vp": voter_acc.vp, "down_vp": voter_acc.get_downvoting_power(),
"sp": voter_acc.sp, "rc": voter_acc.get_rc_manabar()["current_mana"] / 1e9, "last_update": datetime.utcnow(),
"posting_auth_acc": posting_auth})
pause_votes_below_vp = 0
settings = accountsTrx.get(pending_vote["voter"])
elif settings["sp"] is None or settings["vp"] is None or settings["last_update"] is None or settings["rc"] is None or settings["posting_auth_acc"] is None:
print("update %s - None" % pending_vote["voter"])
voter_acc = Account(pending_vote["voter"], steem_instance=stm)
posting_auth = False
for a in voter_acc["posting"]["account_auths"]:
if a[0] == posting_auth_acc:
posting_auth = True
if pending_vote["voter"] == posting_auth_acc:
posting_auth = True
accountsTrx.upsert({"name": pending_vote["voter"], "vp_update":datetime.utcnow(), "vp": voter_acc.vp, "down_vp": voter_acc.get_downvoting_power(),
"sp": voter_acc.sp, "rc": voter_acc.get_rc_manabar()["current_mana"] / 1e9, "last_update": datetime.utcnow(),
"posting_auth_acc": posting_auth})
settings = accountsTrx.get(pending_vote["voter"])
elif (datetime.utcnow() - settings["last_update"]).total_seconds() / 60 > 1:
print("update %s - last update was before %f s" % (pending_vote["voter"], (datetime.utcnow() - settings["last_update"]).total_seconds()))
voter_acc = Account(pending_vote["voter"], steem_instance=stm)
posting_auth = False
for a in voter_acc["posting"]["account_auths"]:
if a[0] == posting_auth_acc:
posting_auth = True
if pending_vote["voter"] == posting_auth_acc:
posting_auth = True
accountsTrx.upsert({"name": pending_vote["voter"], "vp_update":datetime.utcnow(), "vp": voter_acc.vp, "down_vp": voter_acc.get_downvoting_power(),
"sp": voter_acc.sp, "rc": voter_acc.get_rc_manabar()["current_mana"] / 1e9, "last_update": datetime.utcnow(),
"posting_auth_acc": posting_auth})
settings = accountsTrx.get(pending_vote["voter"])
if pending_vote["vote_weight"] > 0:
pause_votes_below_vp = settings["pause_votes_below_vp"]
vp = settings["vp"]
else:
pause_votes_below_vp = settings["pause_down_votes_below_down_vp"]
vp = settings["down_vp"]
vp_update = settings["last_update"]
if vp_update is not None:
diff_in_seconds = ((datetime.utcnow()) - (vp_update)).total_seconds()
regenerated_vp = diff_in_seconds * 10000 / 432000 / 100
vp = vp + regenerated_vp
#down_vp = down_vp + regenerated_vp
if vp > 100:
vp = 100
#if down_vp > 100:
# down_vp = 100
if vp < pause_votes_below_vp:
failedVoteLogTrx.add({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "error": "Voting is paused (VP = %.2f %%, which below pause_votes_below_vp of %.2f %%)" % (vp, pause_votes_below_vp),
"timestamp": datetime.utcnow(), "vote_weight": pending_vote["vote_weight"], "vote_delay_min": pending_vote["vote_delay_min"],
"min_vp": pending_vote["min_vp"], "vp": settings["vp"], "down_vp": settings["down_vp"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"],
"main_post": pending_vote["main_post"]})
delete_pending_votes.append({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"]})
continue
# print("time vote %.2f s - %d votes" % (time.time() - start_prep_time, vote_count))
if (pending_vote["vote_weight"] is None or pending_vote["vote_weight"] == 0) and (pending_vote["vote_sbd"] is None or float(pending_vote["vote_sbd"]) <= 0):
# voter_acc = Account(pending_vote["voter"], steem_instance=stm)
failedVoteLogTrx.add({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "error": "vote_weight was set to zero. (%s %% and %s $)" % (pending_vote["vote_weight"], pending_vote["vote_sbd"]),
"timestamp": datetime.utcnow(), "vote_weight": pending_vote["vote_weight"], "vote_delay_min": pending_vote["vote_delay_min"],
"min_vp": pending_vote["min_vp"], "vp": settings["vp"], "down_vp": settings["down_vp"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"],
"main_post": pending_vote["main_post"]})
delete_pending_votes.append({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"]})
continue
if maximum_vote_delay_min < 0:
maximum_vote_delay_min = 9360
if age_min > maximum_vote_delay_min + voting_round_sec / 60:
# voter_acc = Account(pending_vote["voter"], steem_instance=stm)
failedVoteLogTrx.add({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "error": "post is older than %.2f min." % (maximum_vote_delay_min),
"timestamp": datetime.utcnow(), "vote_weight": pending_vote["vote_weight"], "vote_delay_min": pending_vote["vote_delay_min"],
"min_vp": pending_vote["min_vp"], "vp": settings["vp"], "down_vp": settings["down_vp"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"],
"main_post": pending_vote["main_post"]})
delete_pending_votes.append({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"]})
continue
voter_counter += 1
# voter_acc = Account(pending_vote["voter"], steem_instance=stm)
if settings["sp"] < 0.1:
failedVoteLogTrx.add({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "error": "Could not vot %s, as Steem Power is almost zero." % (pending_vote["authorperm"]),
"timestamp": datetime.utcnow(), "vote_weight": pending_vote["vote_weight"], "vote_delay_min": pending_vote["vote_delay_min"],
"min_vp": pending_vote["min_vp"], "vp": settings["vp"], "down_vp": settings["down_vp"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"],
"main_post": pending_vote["main_post"]})
delete_pending_votes.append({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"]})
print("Could not process %s - sp < 0.1" % pending_vote["authorperm"])
rc_sp_to_low_account_list.append(pending_vote["voter"])
continue
if settings["rc"] < 0.5:
failedVoteLogTrx.add({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "error": "Could not vot %s, as RC is almost zero." % (pending_vote["authorperm"]),
"timestamp": datetime.utcnow(), "vote_weight": pending_vote["vote_weight"], "vote_delay_min": pending_vote["vote_delay_min"],
"min_vp": pending_vote["min_vp"], "vp": settings["vp"], "down_vp": settings["down_vp"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"],
"main_post": pending_vote["main_post"]})
delete_pending_votes.append({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"]})
print("Could not process %s - rc to low" % pending_vote["authorperm"])
rc_sp_to_low_account_list.append(pending_vote["voter"])
continue
vote_weight = pending_vote["vote_weight"]
if vote_weight is None or vote_weight == 0:
voter_acc = Account(pending_vote["voter"], steem_instance=stm)
vote_weight = voter_acc.get_vote_pct_for_SBD(float(pending_vote["vote_sbd"])) / 100.
if vote_weight > 100:
vote_weight = 100
elif vote_weight < 0.01:
failedVoteLogTrx.add({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "error": "vote_weight was set to zero.",
"timestamp": datetime.utcnow(), "vote_weight": vote_weight, "vote_delay_min": pending_vote["vote_delay_min"],
"min_vp": pending_vote["min_vp"], "vp": voter_acc.vp, "vote_when_vp_reached": pending_vote["vote_when_vp_reached"],
"main_post": pending_vote["main_post"]})
delete_pending_votes.append({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"]})
continue
age_hour = ((datetime.utcnow()) - pending_vote["created"]).total_seconds() / 60 / 60
if age_hour > 156:
failedVoteLogTrx.add({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "error": "post is older than 6.5 days.",
"timestamp": datetime.utcnow(), "vote_weight": vote_weight, "vote_delay_min": pending_vote["vote_delay_min"],
"min_vp": pending_vote["min_vp"], "vp": settings["vp"], "down_vp": settings["down_vp"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"],
"main_post": pending_vote["main_post"]})
delete_pending_votes.append({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"]})
continue
if vp < pending_vote["min_vp"]:
failedVoteLogTrx.add({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "error": "Voting power is %.2f %%, which is to low. (min_vp is %.2f %%)" % (vp, pending_vote["min_vp"]),
"timestamp": datetime.utcnow(), "vote_weight": vote_weight, "vote_delay_min": pending_vote["vote_delay_min"],
"min_vp": pending_vote["min_vp"], "vp": settings["vp"], "down_vp": settings["down_vp"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"],
"main_post": pending_vote["main_post"]})
delete_pending_votes.append({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"]})
continue
if pending_vote["max_votes_per_day"] > -1:
if settings is None:
settings = accountsTrx.get(pending_vote["voter"])
if settings is not None:
sliding_time_window = settings["sliding_time_window"]
else:
sliding_time_window = True
votes_24h_before = voteLogTrx.get_votes_per_day(pending_vote["voter"], author, sliding_time_window)
if votes_24h_before >= pending_vote["max_votes_per_day"]:
failedVoteLogTrx.add({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "error": "The author was already upvoted %d in the last 24h (max_votes_per_day is %d)." % (votes_24h_before, pending_vote["max_votes_per_day"]),
"timestamp": datetime.utcnow(), "vote_weight": vote_weight, "vote_delay_min": pending_vote["vote_delay_min"],
"min_vp": pending_vote["min_vp"], "vp": settings["vp"], "down_vp": settings["down_vp"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"],
"main_post": pending_vote["main_post"]})
delete_pending_votes.append({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"]})
continue
if pending_vote["max_votes_per_week"] > -1:
if settings is None:
settings = accountsTrx.get(pending_vote["voter"])
if settings is not None:
sliding_time_window = settings["sliding_time_window"]
else:
sliding_time_window = True
votes_168h_before = voteLogTrx.get_votes_per_week(pending_vote["voter"], author, sliding_time_window)
if votes_168h_before >= pending_vote["max_votes_per_week"]:
failedVoteLogTrx.add({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "error": "The author was already upvoted %d in the last 7 days (max_votes_per_week is %d)." % (votes_168h_before, pending_vote["max_votes_per_week"]),
"timestamp": datetime.utcnow(), "vote_weight": vote_weight, "vote_delay_min": pending_vote["vote_delay_min"],
"min_vp": pending_vote["min_vp"], "vp": settings["vp"], "down_vp": settings["down_vp"],"vote_when_vp_reached": pending_vote["vote_when_vp_reached"],
"main_post": pending_vote["main_post"]})
delete_pending_votes.append({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"]})
continue
if pending_vote["vp_scaler"] > 0:
vote_weight *= 1 - ((100 - vp) / 100 * pending_vote["vp_scaler"])
if abs(vote_weight) < 0.02:
error_msg = "Vote weight is zero or below zero (%.2f %%)" % vote_weight
failedVoteLogTrx.add({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "error": error_msg,
"timestamp": datetime.utcnow(), "vote_weight": vote_weight, "vote_delay_min": pending_vote["vote_delay_min"],
"min_vp": pending_vote["min_vp"], "vp": settings["vp"], "down_vp": settings["down_vp"],"vote_when_vp_reached": pending_vote["vote_when_vp_reached"],
"main_post": pending_vote["main_post"]})
delete_pending_votes.append({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"]})
continue
cnt = 0
c = None
while c is None and cnt < 5:
cnt += 1
try:
c = Comment(pending_vote["authorperm"], use_tags_api=True, steem_instance=stm)
c.refresh()
except:
nodelist = NodeList()
nodelist.update_nodes()
stm = Steem(node=nodelist.get_nodes(), num_retries=5, call_num_retries=3, timeout=15, nobroadcast=nobroadcast)
time.sleep(1)
if cnt == 5:
print("Could not read %s" % (pending_vote["authorperm"]))
failedVoteLogTrx.add({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "error": "Could not process %s" % (pending_vote["authorperm"]),
"timestamp": datetime.utcnow(), "vote_weight": pending_vote["vote_weight"], "vote_delay_min": pending_vote["vote_delay_min"],
"min_vp": pending_vote["min_vp"], "vp": vp, "vote_when_vp_reached": pending_vote["vote_when_vp_reached"],
"main_post": pending_vote["main_post"]})
delete_pending_votes.append({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"]})
print("Could not process %s" % pending_vote["authorperm"])
continue
votes_list = votesTrx.get_authorperm_votes(pending_vote["authorperm"])
try:
if pending_vote["max_net_votes"] >= 0 and pending_vote["max_net_votes"] < len(votes_list):
failedVoteLogTrx.add({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "error": "The number of post/comment votes (%d) is higher than max_net_votes (%d)." % (len(votes_list), pending_vote["max_net_votes"]),
"timestamp": datetime.utcnow(), "vote_weight": vote_weight, "vote_delay_min": pending_vote["vote_delay_min"],
"min_vp": pending_vote["min_vp"], "vp": vp, "vote_when_vp_reached": pending_vote["vote_when_vp_reached"],
"main_post": pending_vote["main_post"]})
delete_pending_votes.append({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"]})
continue
except:
continue
if False and pending_vote["max_pending_payout"] >= 0 and pending_vote["max_pending_payout"] < float(c["pending_payout_value"]):
failedVoteLogTrx.add({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "error": "The pending payout of post/comment votes (%.2f) is higher than max_pending_payout (%.2f)." % (float(c["pending_payout_value"]), pending_vote["max_pending_payout"]),
"timestamp": datetime.utcnow(), "vote_weight": vote_weight, "vote_delay_min": pending_vote["vote_delay_min"],
"min_vp": pending_vote["min_vp"], "vp": vp, "vote_when_vp_reached": pending_vote["vote_when_vp_reached"],
"main_post": pending_vote["main_post"]})
delete_pending_votes.append({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"]})
continue
# check for max votes per day/week
already_voted = False
for v in votes_list:
if pending_vote["voter"] == v["voter"]:
already_voted = True
if not settings["posting_auth_acc"] or already_voted:
if already_voted:
error_msg = "already voted."
else:
error_msg = "posting authority is missing"
failedVoteLogTrx.add({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "error": error_msg,
"timestamp": datetime.utcnow(), "vote_weight": vote_weight, "vote_delay_min": pending_vote["vote_delay_min"],
"min_vp": pending_vote["min_vp"], "vp": vp, "vote_when_vp_reached": pending_vote["vote_when_vp_reached"],
"main_post": pending_vote["main_post"]})
delete_pending_votes.append({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"]})
continue
# sucess = upvote_comment(c, pending_vote["voter"], vote_weight)
if False:
reply_message = upvote_comment_without_check(c, pending_vote["voter"], vote_weight)
if reply_message is not None:
vote_count += 1
if pending_vote["leave_comment"]:
try:
if settings is None:
settings = accountsTrx.get(pending_vote["voter"])
if settings is not None and "upvote_comment" in settings and settings["upvote_comment"] is not None:
json_metadata = {'app': 'rewarding/%s' % (rewarding_version)}
reply_body = settings["upvote_comment"]
reply_body = reply_body.replace("{{name}}", "@%s" % c["author"] ).replace("{{voter}}", "@%s" % pending_vote["voter"])
c.reply(reply_body, author=pending_vote["voter"], meta=json_metadata)
except:
print("Could not leave comment!")
voteLogTrx.add({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "author": c["author"],
"timestamp": datetime.utcnow(), "vote_weight": vote_weight, "vote_delay_min": pending_vote["vote_delay_min"],
"voted_after_min": age_min, "vp": vp, "vote_when_vp_reached": pending_vote["vote_when_vp_reached"],
"trail_vote": pending_vote["trail_vote"], "main_post": pending_vote["main_post"],
"voter_to_follow": pending_vote["voter_to_follow"]})
expiration = formatTimeString(reply_message["expiration"]).replace(tzinfo=None)
delete_pending_votes.append({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"]})
else:
expiration = datetime.utcnow()
broadcastVoteTrx.add({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"],
"weight": vote_weight, "vote_delay_min": pending_vote["vote_delay_min"], "min_vp": pending_vote["min_vp"],
"vote_when_vp_reached": pending_vote["vote_when_vp_reached"], "main_post": pending_vote["main_post"],
"author": c["author"], "voted_after_min": 0, "created": datetime.utcnow(), "vp": settings["vp"], "down_vp": settings["down_vp"],
"maximum_vote_delay_min": pending_vote["maximum_vote_delay_min"], "comment_timestamp": pending_vote["comment_timestamp"],
"trail_vote": pending_vote["trail_vote"], "voter_to_follow": pending_vote["voter_to_follow"], "leave_comment": pending_vote["leave_comment"],
"vote_timestamp": pending_vote["comment_timestamp"] + timedelta(seconds=pending_vote["vote_delay_min"]/60),
"max_votes_per_day": pending_vote["max_votes_per_day"], "max_votes_per_week": pending_vote["max_votes_per_week"]})
delete_pending_votes.append({"authorperm": pending_vote["authorperm"], "voter": pending_vote["voter"], "vote_when_vp_reached": pending_vote["vote_when_vp_reached"]})
for pending_vote in delete_pending_votes:
pendingVotesTrx.delete(pending_vote["authorperm"], pending_vote["voter"], pending_vote["vote_when_vp_reached"])
delete_pending_votes = []
print("%d voter have been checked!" % voter_counter)
print("time vote %.2f s - %d votes" % (time.time() - start_prep_time, vote_count))
|
26064
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..decoder import ConvDecoder
from ..encoder import build_encoder
from ..modules import conv, deconv
from ..similarity import CorrelationLayer
from ..utils import warp
from .build import MODEL_REGISTRY
@MODEL_REGISTRY.register()
class PWCNet(nn.Module):
"""
Implementation of the paper
`PWC-Net: CNNs for Optical Flow Using Pyramid, Warping, and Cost Volume <https://arxiv.org/abs/1709.02371>`_
Parameters
----------
cfg : :class:`CfgNode`
Configuration for the model
"""
def __init__(self, cfg):
super(PWCNet, self).__init__()
self.cfg = cfg
self.encoder = build_encoder(cfg.ENCODER)
self.correlation_layer = CorrelationLayer(
pad_size=cfg.SIMILARITY.PAD_SIZE,
max_displacement=cfg.SIMILARITY.MAX_DISPLACEMENT,
)
search_range = (2 * cfg.SIMILARITY.MAX_DISPLACEMENT + 1) ** 2
self.decoder_layers = nn.ModuleList()
decoder_cfg = cfg.DECODER.CONFIG
self.up_feature_layers = nn.ModuleList()
for i in range(len(decoder_cfg)):
if i == 0:
concat_channels = search_range
else:
concat_channels = (
search_range + decoder_cfg[i] + cfg.SIMILARITY.MAX_DISPLACEMENT
)
self.decoder_layers.append(
ConvDecoder(
config=decoder_cfg,
to_flow=True,
concat_channels=concat_channels,
)
)
self.up_feature_layers.append(
deconv(
concat_channels + sum(decoder_cfg),
2,
kernel_size=4,
stride=2,
padding=1,
)
)
self.deconv_layers = nn.ModuleList()
for i in range(len(decoder_cfg)):
self.deconv_layers.append(deconv(2, 2, kernel_size=4, stride=2, padding=1))
self.dc_conv = nn.ModuleList(
[
conv(
search_range
+ cfg.SIMILARITY.MAX_DISPLACEMENT
+ decoder_cfg[-1]
+ sum(decoder_cfg),
128,
kernel_size=3,
stride=1,
padding=1,
dilation=1,
),
]
)
self.dc_conv.append(
conv(
decoder_cfg[0],
decoder_cfg[0],
kernel_size=3,
stride=1,
padding=2,
dilation=2,
)
)
padding = 4
dilation = 4
for i in range(len(decoder_cfg) - 2):
self.dc_conv.append(
conv(
decoder_cfg[i],
decoder_cfg[i + 1],
kernel_size=3,
stride=1,
padding=padding,
dilation=dilation,
)
)
padding *= 2
dilation *= 2
self.dc_conv.append(
conv(
decoder_cfg[3],
decoder_cfg[4],
kernel_size=3,
stride=1,
padding=1,
dilation=1,
)
)
self.dc_conv.append(
nn.Conv2d(32, 2, kernel_size=3, stride=1, padding=1, bias=True)
)
self.dc_conv = nn.Sequential(*self.dc_conv)
self._init_weights()
def _init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
nn.init.kaiming_normal_(m.weight.data, mode="fan_in")
if m.bias is not None:
m.bias.data.zero_()
def _corr_relu(self, features1, features2):
corr = self.correlation_layer(features1, features2)
return F.leaky_relu(corr, negative_slope=0.1)
def forward(self, img1, img2):
"""
Performs forward pass of the network
Parameters
----------
img1 : torch.Tensor
Image to predict flow from
img2 : torch.Tensor
Image to predict flow to
Returns
-------
torch.Tensor
Flow from img1 to img2
"""
H, W = img1.shape[-2:]
feature_pyramid1 = self.encoder(img1)
feature_pyramid2 = self.encoder(img2)
up_flow, up_features = None, None
up_flow_scale = 0.625
flow_preds = []
for i in range(len(self.decoder_layers)):
if i == 0:
corr = self._corr_relu(feature_pyramid1[i], feature_pyramid2[i])
concatenated_features = corr
else:
warped_features = warp(feature_pyramid2[i], up_flow * up_flow_scale)
up_flow_scale *= 2
corr = self._corr_relu(feature_pyramid1[i], warped_features)
concatenated_features = torch.cat(
[corr, feature_pyramid1[i], up_flow, up_features], dim=1
)
flow, features = self.decoder_layers[i](concatenated_features)
flow_preds.append(flow)
up_flow = self.deconv_layers[i](flow)
up_features = self.up_feature_layers[i](features)
flow_preds.reverse()
flow_preds[0] += self.dc_conv(features)
if self.training:
return flow_preds
else:
flow = flow_preds[0]
if self.cfg.INTERPOLATE_FLOW:
H_, W_ = flow.shape[-2:]
flow = F.interpolate(
flow, img1.shape[-2:], mode="bilinear", align_corners=True
)
flow_u = flow[:, 0, :, :] * (W / W_)
flow_v = flow[:, 1, :, :] * (H / H_)
flow = torch.stack([flow_u, flow_v], dim=1)
if self.cfg.FLOW_SCALE_FACTOR is not None:
flow *= self.cfg.FLOW_SCALE_FACTOR
return flow
|
26129
|
import FWCore.ParameterSet.Config as cms
import TrackingTools.MaterialEffects.OppositeMaterialPropagator_cfi
#PropagatorWithMaterialESProducer
oppositeToMomElePropagator = TrackingTools.MaterialEffects.OppositeMaterialPropagator_cfi.OppositeMaterialPropagator.clone(
Mass = 0.000511,
ComponentName = 'oppositeToMomElePropagator'
)
|
26130
|
from rdflib import plugin
from rdflib import store
plugin.register(
"SQLAlchemy",
store.Store,
"rdflib_sqlalchemy.store",
"SQLAlchemy",
)
|
26132
|
import difflib
import discord
from discord.ext import commands
from discord.ext.commands import CommandNotFound
intents = discord.Intents.all()
client = commands.Bot(command_prefix="+", intents=intents, help_command=None)
@client.event
async def on_ready():
print("Bot Online")
@client.event
async def on_command_error(ctx: commands.Context, exc):
if isinstance(exc, CommandNotFound):
await send_command_suggestion(ctx, ctx.invoked_with)
else:
pass
async def send_command_suggestion(ctx: commands.Context, command_name: str) -> None:
"""Sends user similar commands if any can be found."""
raw_commands = []
for cmd in client.walk_commands():
if not cmd.hidden:
raw_commands += (cmd.name, *cmd.aliases)
if similar_command_data := difflib.get_close_matches(command_name, raw_commands, 1):
similar_command_name = similar_command_data[0]
similar_command = client.get_command(similar_command_name)
if not similar_command:
return
try:
if not await similar_command.can_run(ctx):
return
except commands.errors.CommandError:
return
misspelled_content = ctx.message.content
e = discord.Embed()
e.set_author(name="Did you mean:")
e.description = misspelled_content.replace(
command_name, similar_command_name, 1
)
await ctx.send(embed=e, delete_after=10.0)
client.run("TOKEN")
|
26133
|
import asyncio
import logging
import time
from typing import Optional, List
from hummingbot.core.data_type.user_stream_tracker_data_source import \
UserStreamTrackerDataSource
from hummingbot.logger import HummingbotLogger
from hummingbot.connector.exchange.bitfinex.bitfinex_order_book import BitfinexOrderBook
from hummingbot.connector.exchange.bitfinex.bitfinex_websocket import BitfinexWebsocket
from hummingbot.connector.exchange.bitfinex.bitfinex_auth import BitfinexAuth
from hummingbot.connector.exchange.bitfinex.bitfinex_order_book_message import \
BitfinexOrderBookMessage
class BitfinexAPIUserStreamDataSource(UserStreamTrackerDataSource):
MESSAGE_TIMEOUT = 30.0
_logger: Optional[HummingbotLogger] = None
@classmethod
def logger(cls) -> HummingbotLogger:
if cls._logger is None:
cls._logger = logging.getLogger(__name__)
return cls._logger
def __init__(self, bitfinex_auth: BitfinexAuth, trading_pairs: Optional[List[str]] = None):
if trading_pairs is None:
trading_pairs = []
self._bitfinex_auth: BitfinexAuth = bitfinex_auth
self._trading_pairs = trading_pairs
self._current_listen_key = None
self._listen_for_user_stream_task = None
self._last_recv_time: float = 0
super().__init__()
@property
def order_book_class(self):
return BitfinexOrderBook
@property
def last_recv_time(self) -> float:
return self._last_recv_time
async def listen_for_user_stream(self, ev_loop: asyncio.BaseEventLoop, output: asyncio.Queue):
while True:
try:
ws = await BitfinexWebsocket(self._bitfinex_auth).connect()
await ws.authenticate()
async for msg in ws.messages():
transformed_msg: BitfinexOrderBookMessage = self._transform_message_from_exchange(msg)
if transformed_msg is None:
continue
else:
output.put_nowait(transformed_msg)
except asyncio.CancelledError:
raise
except Exception:
self.logger().error(
"Unexpected error with Bitfinex WebSocket connection. " "Retrying after 30 seconds...",
exc_info=True,
)
await asyncio.sleep(self.MESSAGE_TIMEOUT)
def _transform_message_from_exchange(self, msg) -> Optional[BitfinexOrderBookMessage]:
order_book_message: BitfinexOrderBookMessage = BitfinexOrderBook.diff_message_from_exchange(msg, time.time())
if any([
order_book_message.type_heartbeat,
order_book_message.event_auth,
order_book_message.event_info,
]):
# skip unneeded events and types
return
return order_book_message
|
26174
|
import csv
def read_regressor_examples(num_of_features, num_of_decisions, file_path):
xs = []
ys = []
with open(file_path, mode='r', encoding='utf-8') as file:
reader = csv.reader(file, delimiter=' ')
for row in reader:
x = [float(value) for value in row[0 : num_of_features]]
y = [float(value) for value in row[num_of_features : num_of_features + num_of_decisions]]
xs.append(x)
ys.append(y)
return {
'x': xs,
'y': ys
}
|
26207
|
class T:
WORK_REQUEST = 1
WORK_REPLY = 2
REDUCE = 3
BARRIER = 4
TOKEN = 7
class Tally:
total_dirs = 0
total_files = 0
total_filesize = 0
total_stat_filesize = 0
total_symlinks = 0
total_skipped = 0
total_sparse = 0
max_files = 0
total_nlinks = 0
total_nlinked_files = 0
total_0byte_files = 0
devfile_cnt = 0
devfile_sz = 0
spcnt = 0 # stripe cnt account per process
# ZFS
total_blocks = 0
class G:
ZERO = 0
ABORT = -1
WHITE = 50
BLACK = 51
NONE = -99
TERMINATE = -100
MSG = 99
MSG_VALID = True
MSG_INVALID = False
fmt1 = '%(asctime)s - %(levelname)s - %(rank)s:%(filename)s:%(lineno)d - %(message)s'
fmt2 = '%(asctime)s - %(rank)s:%(filename)s:%(lineno)d - %(message)s'
bare_fmt = '%(name)s - %(levelname)s - %(message)s'
mpi_fmt = '%(name)s - %(levelname)s - %(rank)s - %(message)s'
bare_fmt2 = '%(message)s'
str = {WHITE: "white", BLACK: "black", NONE: "not set", TERMINATE: "terminate",
ABORT: "abort", MSG: "message"}
KEY = "key"
VAL = "val"
logger = None
logfile = None
loglevel = "warn"
use_store = False
fix_opt = False
preserve = False
DB_BUFSIZE = 10000
memitem_threshold = 100000
tempdir = None
total_chunks = 0
rid = None
chk_file = None
chk_file_db = None
totalsize = 0
src = None
dest = None
args_src = None
args_dest = None
resume = None
reduce_interval = 30
reduce_enabled = False
verbosity = 0
am_root = False
copytype = 'dir2dir'
# Lustre file system
fs_lustre = None
lfs_bin = None
stripe_threshold = None
b0 = 0
b4k = 4 * 1024
b8k = 8 * 1024
b16k = 16 * 1024
b32k = 32 * 1024
b64k = 64 * 1024
b128k = 128 * 1024
b256k = 256 * 1024
b512k = 512 * 1024
b1m = 1024 * 1024
b2m = 2 * b1m
b4m = 4 * b1m
b8m = 8 * b1m
b16m = 16 * b1m
b32m = 32 * b1m
b64m = 64 * b1m
b128m = 128 * b1m
b256m = 256 * b1m
b512m = 512 * b1m
b1g = 1024 * b1m
b4g = 4 * b1g
b16g = 16 * b1g
b64g = 64 * b1g
b128g = 128 * b1g
b256g = 256 * b1g
b512g = 512 * b1g
b1tb = 1024 * b1g
b4tb = 4 * b1tb
FSZ_BOUND = 64 * b1tb
# 25 bins
bins = [b0, b4k, b8k, b16k, b32k, b64k, b128k, b256k, b512k,
b1m, b2m, b4m, b16m, b32m, b64m, b128m, b256m, b512m,
b1g, b4g, b64g, b128g, b256g, b512g, b1tb, b4tb]
# 17 bins, the last bin is special
# This is error-prone, to be refactored.
# bins_fmt = ["B1_000k_004k", "B1_004k_008k", "B1_008k_016k", "B1_016k_032k", "B1_032k_064k", "B1_064k_256k",
# "B1_256k_512k", "B1_512k_001m",
# "B2_001m_004m", "B2_m004_016m", "B2_016m_512m", "B2_512m_001g",
# "B3_001g_100g", "B3_100g_256g", "B3_256g_512g",
# "B4_512g_001t",
# "B5_001t_up"]
# GPFS
gpfs_block_size = ("256k", "512k", "b1m", "b4m", "b8m", "b16m", "b32m")
gpfs_block_cnt = [0, 0, 0, 0, 0, 0, 0]
gpfs_subs = (b256k/32, b512k/32, b1m/32, b4m/32, b8m/32, b16m/32, b32m/32)
dev_suffixes = [".C", ".CC", ".CU", ".H", ".CPP", ".HPP", ".CXX", ".F", ".I", ".II",
".F90", ".F95", ".F03", ".FOR", ".O", ".A", ".SO", ".S",
".IN", ".M4", ".CACHE", ".PY", ".PYC"]
|
26256
|
def _init():
from som.vm.universe import create_universe
return create_universe()
current_universe = _init()
|
26261
|
import re
# crawl roster
faulty_prof = {
'Francis,J)' : 'Francis,J (jdf2)',
'Glathar,E)' : 'Glathar,E',
'Cady,B)' : 'Cady,B'
}
section_types = set()
day_pattern = {
'M': 1,
'T': 1<<1,
'W': 1<<2,
'R': 1<<3,
'F': 1<<4,
'S': 1<<5,
'U': 1<<6
}
def to_bool(s):
return True if s == 'Y' else False
def to_list(node):
return [a.text.strip() for a in node]
def set_if_truthy(obj, idx, value):
if value:
obj[idx] = value
def convert_crosslist(c):
if c is None:
return None
if len(c) > 0:
return [c.find('subject').text, int(c.find('catalog_nbr').text)]
return None
def get_s(node):
if node is None:
return None
return node.text
def maybe_float(s):
if s.find('.') > -1:
return float(s)
return int(s)
def convert_units(s):
return [maybe_float(a) for a in s.split('-')]
class CourseParser(object):
def __init__(self):
self.courses = []
self.profs = set()
def parse(self, node):
raise NotImplementedError()
class CourseParserJson(CourseParser):
def __init__(self):
super(CourseParserJson, self).__init__()
self.sessions = set()
self.locations = set()
self.facility = set()
@staticmethod
def crosslist(d):
if d:
return [d['subject'], int(d['catalogNbr']), d['type']]
return None
def convert_meeting(self, node, parent=None):
obj = {}
pattern = 0
pattern_desc = node.get('pattern', '').replace('Su', 'U')
if pattern_desc != 'TBA':
for c in pattern_desc:
pattern |= day_pattern[c]
set_if_truthy(obj, 'ptn', pattern)
facility = node.get('facilityDescrshort')
if facility and facility != 'TBA':
set_if_truthy(obj, 'bldg', facility[:3])
set_if_truthy(obj, 'rm', facility[3:])
set_if_truthy(obj, 'loc', node.get('facilityDescr'))
set_if_truthy(obj, 'st', node.get('timeStart'))
set_if_truthy(obj, 'et', node.get('timeEnd'))
set_if_truthy(obj, 'sd', node.get('startDt'))
set_if_truthy(obj, 'ed', node.get('endDt'))
set_if_truthy(obj, 'profs', [s['netid'] for s in node.get('instructors', [])])
set_if_truthy(obj, 'topic', node.get('meetingTopicDescription'))
return obj
def convert_section(self, node, parent=None):
comp = node.get('ssrComponent')
obj = {}
obj['nbr'] = int(node.get('classNbr'))
obj['sec'] = node.get('section')
# obj['loc'] = node.get('location')
# obj['campus'] = node.get('campus')
set_if_truthy(obj, 'topic', node.get('topicDescription'))
self.locations.add((node.get('location'), node.get('locationDescr'), node.get('campus'), node.get('campusDescr')))
set_if_truthy(obj, 'mt', [self.convert_meeting(s, node) for s in node.get('meetings', [])])
return comp, obj
def parse(self, node):
obj = {}
obj['sub'] = node.get('subject')
obj['nbr'] = int(node.get('catalogNbr'))
obj['title'] = node.get('titleLong')
for group in node.get('enrollGroups', []):
course = obj.copy()
if group['unitsMinimum'] == group['unitsMaximum']:
course['unit'] = [group['unitsMaximum']]
else:
course['unit'] = [group['unitsMinimum'], group['unitsMaximum']]
set_if_truthy(course, 'optcomp', group['componentsOptional'])
set_if_truthy(course, 'session', group['sessionCode'])
set_if_truthy(course, 'crosslists', [self.crosslist(d) for d in group.get('simpleCombinations', [])])
secs = {}
for sec in group['classSections']:
comp, sec = self.convert_section(sec, group)
if comp not in secs:
secs[comp] = []
secs[comp].append(sec)
course['secs'] = secs
self.courses.append(course)
self.sessions.add((group['sessionCode'], group['sessionBeginDt'], group['sessionEndDt'], group['sessionLong']))
class CourseParserXML(CourseParser):
def __init__(self):
self.courses = []
self.profs = set()
def parse(self, node):
self.courses.append(self.convert_course(node))
def parse_prof(self, name):
if name in faulty_prof:
name = faulty_prof[name]
result = re.search(r'\((.+)\)', name)
if result is None:
print "warning: %s dont have netid" % name
return name
else:
netid = result.group(1)
self.profs.add(netid)
return netid
def convert_meeting(self, node):
obj = {}
pattern = 0
pattern_desc = node.find('meeting_pattern_sdescr').text
if pattern_desc != 'TBA':
for c in pattern_desc:
pattern |= day_pattern[c]
set_if_truthy(obj, 'ptn', pattern)
set_if_truthy(obj, 'bldg', node.find('building_code').text)
set_if_truthy(obj, 'rm', node.find('room').text)
set_if_truthy(obj, 'st', node.find('start_time').text)
set_if_truthy(obj, 'et', node.find('end_time').text)
set_if_truthy(obj, 'sd', node.find('start_date').text)
set_if_truthy(obj, 'ed', node.find('end_date').text)
set_if_truthy(obj, 'profs', [self.parse_prof(s) for s in to_list(node.find('instructors') or [])])
return obj
def convert_section(self, node):
comp = node.get('ssr_component')
obj = {}
obj['nbr'] = int(node.get('class_number'))
obj['sec'] = node.get('class_section')
section_types.add(comp)
set_if_truthy(obj, 'consent', get_s(node.find('consent_ldescr')))
set_if_truthy(obj, 'note', get_s(node.find('notes')))
set_if_truthy(obj, 'mt', [self.convert_meeting(s) for s in node.findall('meeting')])
return comp, obj
def convert_course(self, node):
obj = {}
obj['sub'] = node.get('subject')
obj['nbr'] = int(node.get('catalog_nbr'))
obj['unit'] = convert_units(node.find('units').text)
obj['title'] = node.find('course_title').text
set_if_truthy(obj, 'topics', to_list(node.find('topics')))
set_if_truthy(obj, 'crosslists', [convert_crosslist(a) for a in node.find('crosslists') or []])
set_if_truthy(obj, 'comeetings', [convert_crosslist(a) for a in node.find('comeetings') or []])
secs = {}
for sec in node.find('sections'):
comp, sec = self.convert_section(sec)
if comp not in secs:
secs[comp] = []
secs[comp].append(sec)
obj['secs'] = secs
return obj
|
26291
|
import time
from typing import Any, List
import pytest
from yarl import URL
from neuro_sdk import Action, FileStatus, FileStatusType
from neuro_sdk.storage import DiskUsageInfo
from neuro_cli.formatters.storage import (
BaseFilesFormatter,
BSDAttributes,
BSDPainter,
DiskUsageFormatter,
FilesSorter,
GnuIndicators,
GnuPainter,
LongFilesFormatter,
NonePainter,
SimpleFilesFormatter,
VerticalColumnsFilesFormatter,
get_painter,
)
class TestNonePainter:
def test_simple(self, rich_cmp: Any) -> None:
painter = NonePainter()
file = FileStatus(
"File1",
2048,
FileStatusType.FILE,
int(time.mktime(time.strptime("2018-01-01 03:00:00", "%Y-%m-%d %H:%M:%S"))),
Action.READ,
uri=URL("storage://default/user/File1"),
)
rich_cmp(painter.paint(file.name, file.type))
class TestGnuPainter:
def test_color_parsing_simple(self) -> None:
painter = GnuPainter("rs=1;0;1")
assert painter.color_indicator[GnuIndicators.RESET] == "1;0;1"
painter = GnuPainter(":rs=1;0;1")
assert painter.color_indicator[GnuIndicators.RESET] == "1;0;1"
painter = GnuPainter("rs=1;0;1:")
assert painter.color_indicator[GnuIndicators.RESET] == "1;0;1"
painter = GnuPainter("rs=1;0;1:fi=32;42")
assert painter.color_indicator[GnuIndicators.RESET] == "1;0;1"
assert painter.color_indicator[GnuIndicators.FILE] == "32;42"
painter = GnuPainter("rs=1;0;1:fi")
assert painter.color_indicator[GnuIndicators.RESET] == "1;0;1"
assert painter.color_indicator[GnuIndicators.FILE] == ""
painter = GnuPainter("rs=1;0;1:fi=")
assert painter.color_indicator[GnuIndicators.RESET] == "1;0;1"
assert painter.color_indicator[GnuIndicators.FILE] == ""
@pytest.mark.parametrize(
"escaped,result",
[
("\\a", "\a"),
("\\b", "\b"),
("\\e", chr(27)),
("\\f", "\f"),
("\\n", "\n"),
("\\r", "\r"),
("\\t", "\t"),
("\\v", "\v"),
("\\?", chr(127)),
("\\_", " "),
("a\\n", "a\n"),
("a\\tb", "a\tb"),
("a\\t\\rb", "a\t\rb"),
("a\\=b", "a=b"),
],
)
def test_color_parsing_escaped_simple(self, escaped: str, result: str) -> None:
painter = GnuPainter("rs=" + escaped)
assert painter.color_indicator[GnuIndicators.RESET] == result
painter = GnuPainter(escaped + "=1;2")
assert painter.color_ext_type[result] == "1;2"
painter = GnuPainter(escaped + "=" + escaped)
assert painter.color_ext_type[result] == result
@pytest.mark.parametrize(
"escaped,result",
[
("\\7", chr(7)),
("\\8", "8"),
("\\10", chr(8)),
("a\\2", "a" + chr(2)),
("a\\2b", "a" + chr(2) + "b"),
],
)
def test_color_parsing_escaped_octal(self, escaped: str, result: str) -> None:
painter = GnuPainter("rs=" + escaped)
assert painter.color_indicator[GnuIndicators.RESET] == result
painter = GnuPainter(escaped + "=1;2")
assert painter.color_ext_type[result] == "1;2"
painter = GnuPainter(escaped + "=" + escaped)
assert painter.color_ext_type[result] == result
@pytest.mark.parametrize(
"escaped,result",
[
("\\x7", chr(0x7)),
("\\x8", chr(0x8)),
("\\x10", chr(0x10)),
("\\XaA", chr(0xAA)),
("a\\x222", "a" + chr(0x22) + "2"),
("a\\x2z", "a" + chr(0x2) + "z"),
],
)
def test_color_parsing_escaped_hex(self, escaped: str, result: str) -> None:
painter = GnuPainter("rs=" + escaped)
assert painter.color_indicator[GnuIndicators.RESET] == result
painter = GnuPainter(escaped + "=1;2")
assert painter.color_ext_type[result] == "1;2"
painter = GnuPainter(escaped + "=" + escaped)
assert painter.color_ext_type[result] == result
@pytest.mark.parametrize(
"escaped,result",
[
("^a", chr(1)),
("^?", chr(127)),
("^z", chr(26)),
("a^Z", "a" + chr(26)),
("a^Zb", "a" + chr(26) + "b"),
],
)
def test_color_parsing_carret(self, escaped: str, result: str) -> None:
painter = GnuPainter("rs=" + escaped)
assert painter.color_indicator[GnuIndicators.RESET] == result
painter = GnuPainter(escaped + "=1;2")
assert painter.color_ext_type[result] == "1;2"
painter = GnuPainter(escaped + "=" + escaped)
assert painter.color_ext_type[result] == result
@pytest.mark.parametrize("escaped", [("^1"), ("^"), ("^" + chr(130))])
def test_color_parsing_carret_incorrect(self, escaped: str) -> None:
with pytest.raises(EnvironmentError):
GnuPainter("rs=" + escaped)
with pytest.raises(EnvironmentError):
GnuPainter(escaped + "=1;2")
@pytest.mark.parametrize(
"ls_colors",
[
"di=32;41:fi=0;44:no=0;46",
"di=32;41:no=0;46",
"no=0;46",
"*.text=0;46",
"*.txt=0;46",
],
)
def test_coloring(self, rich_cmp: Any, ls_colors: str) -> None:
file = FileStatus(
"test.txt",
1024,
FileStatusType.FILE,
int(time.mktime(time.strptime("2018-01-01 03:00:00", "%Y-%m-%d %H:%M:%S"))),
Action.READ,
uri=URL("storage://default/usertest.txt"),
)
folder = FileStatus(
"tmp",
0,
FileStatusType.DIRECTORY,
int(time.mktime(time.strptime("2018-01-01 03:00:00", "%Y-%m-%d %H:%M:%S"))),
Action.WRITE,
uri=URL("storage://default/usertmp"),
)
painter = GnuPainter(ls_colors)
rich_cmp(painter.paint(file.name, file.type), index=0)
rich_cmp(painter.paint(folder.name, folder.type), index=1)
class TestBSDPainter:
def test_color_parsing(self) -> None:
painter = BSDPainter("exfxcxdxbxegedabagacad")
assert painter._colors[BSDAttributes.DIRECTORY] == "ex"
@pytest.mark.parametrize(
"ls_colors", ["exfxcxdxbxegedabagacad", "Eafxcxdxbxegedabagacad"]
)
def test_coloring(self, ls_colors: str, rich_cmp: Any) -> None:
file = FileStatus(
"test.txt",
1024,
FileStatusType.FILE,
int(time.mktime(time.strptime("2018-01-01 03:00:00", "%Y-%m-%d %H:%M:%S"))),
Action.READ,
uri=URL("storage://default/usertest.txt"),
)
folder = FileStatus(
"tmp",
0,
FileStatusType.DIRECTORY,
int(time.mktime(time.strptime("2018-01-01 03:00:00", "%Y-%m-%d %H:%M:%S"))),
Action.WRITE,
uri=URL("storage://default/usertmp"),
)
painter = BSDPainter(ls_colors)
rich_cmp(painter.paint(file.name, file.type), index=0)
rich_cmp(painter.paint(folder.name, folder.type), index=1)
class TestPainterFactory:
def test_detection(self, monkeypatch: Any) -> None:
monkeypatch.setenv("LS_COLORS", "")
monkeypatch.setenv("LSCOLORS", "")
painter = get_painter(True)
assert isinstance(painter, NonePainter)
monkeypatch.setenv("LSCOLORS", "exfxcxdxbxegedabagacad")
monkeypatch.setenv("LS_COLORS", "di=32;41:fi=0;44:no=0;46")
painter_without_color = get_painter(False)
painter_with_color = get_painter(True)
assert isinstance(painter_without_color, NonePainter)
assert not isinstance(painter_with_color, NonePainter)
monkeypatch.setenv("LSCOLORS", "")
monkeypatch.setenv("LS_COLORS", "di=32;41:fi=0;44:no=0;46")
painter = get_painter(True)
assert isinstance(painter, GnuPainter)
monkeypatch.setenv("LSCOLORS", "exfxcxdxbxegedabagacad")
monkeypatch.setenv("LS_COLORS", "")
painter = get_painter(True)
assert isinstance(painter, BSDPainter)
class TestFilesFormatter:
files = [
FileStatus(
"File1",
2048,
FileStatusType.FILE,
int(time.mktime(time.strptime("2018-01-01 03:00:00", "%Y-%m-%d %H:%M:%S"))),
Action.READ,
uri=URL("storage://default/userFile1"),
),
FileStatus(
"File2",
1024,
FileStatusType.FILE,
int(time.mktime(time.strptime("2018-10-10 13:10:10", "%Y-%m-%d %H:%M:%S"))),
Action.READ,
uri=URL("storage://default/userFile2"),
),
FileStatus(
"File3 with space",
1_024_001,
FileStatusType.FILE,
int(time.mktime(time.strptime("2019-02-02 05:02:02", "%Y-%m-%d %H:%M:%S"))),
Action.READ,
uri=URL("storage://default/userFile 3 with space"),
),
]
folders = [
FileStatus(
"Folder1",
0,
FileStatusType.DIRECTORY,
int(time.mktime(time.strptime("2017-03-03 06:03:03", "%Y-%m-%d %H:%M:%S"))),
Action.MANAGE,
uri=URL("storage://default/userFolder11"),
),
FileStatus(
"1Folder with space",
0,
FileStatusType.DIRECTORY,
int(time.mktime(time.strptime("2017-03-03 06:03:02", "%Y-%m-%d %H:%M:%S"))),
Action.MANAGE,
uri=URL("storage://default/user1Folder with space"),
),
]
files_and_folders = files + folders
@pytest.mark.parametrize(
"formatter",
[
(SimpleFilesFormatter(color=False)),
(VerticalColumnsFilesFormatter(width=100, color=False)),
(LongFilesFormatter(human_readable=False, color=False)),
],
)
def test_formatter_with_files_and_folders(
self, formatter: BaseFilesFormatter, rich_cmp: Any
) -> None:
rich_cmp(formatter(self.files_and_folders))
@pytest.mark.parametrize(
"formatter",
[
(SimpleFilesFormatter(color=False)),
(VerticalColumnsFilesFormatter(width=100, color=False)),
(LongFilesFormatter(human_readable=False, color=False)),
],
)
def test_formatter_with_empty_files(
self, formatter: BaseFilesFormatter, rich_cmp: Any
) -> None:
files: List[FileStatus] = []
rich_cmp(formatter(files))
def test_sorter(self) -> None:
sorter = FilesSorter.NAME
files = sorted(self.files_and_folders, key=sorter.key())
assert files == [
self.folders[1],
self.files[0],
self.files[1],
self.files[2],
self.folders[0],
]
sorter = FilesSorter.SIZE
files = sorted(self.files_and_folders, key=sorter.key())
assert files[2:5] == [self.files[1], self.files[0], self.files[2]]
sorter = FilesSorter.TIME
files = sorted(self.files_and_folders, key=sorter.key())
assert files == [
self.folders[1],
self.folders[0],
self.files[0],
self.files[1],
self.files[2],
]
class TestUsageFormatter:
def test_formatter(self, rich_cmp: Any) -> None:
usage = DiskUsageInfo(
total=100000, used=80000, free=20000, cluster_name="default"
)
formatter = DiskUsageFormatter()
rich_cmp(formatter(usage))
|
26306
|
import time
import asyncio
import random
import pyee
import logging
from plugins.input_fsx import fsx_pb2
from hexi.service import event
_logger = logging.getLogger(__name__)
class UDPServer(asyncio.DatagramProtocol):
def __init__(self, manager, token):
super().__init__()
self.manager = manager
self.token = token
self.sn = 0
def datagram_received(self, data, addr):
try:
# Note: there are no length prefix in UDP packets
msg = fsx_pb2.UdpResponseMessage()
msg.ParseFromString(data)
if msg.token != self.token:
_logger.warn('A message is discarded because of incorrect token')
self.manager.ee.emit('udp_discarded_message')
return
if msg.serialNumber <= self.sn:
_logger.warn('A message is discarded because of received newer message')
self.manager.ee.emit('udp_discarded_message')
return
self.sn = msg.serialNumber
self.manager.ee.emit('udp_received_message', msg)
except Exception as e:
_logger.warn(e)
self.manager.ee.emit('udp_discarded_message')
def connection_lost(self, exc):
self.manager.ee.emit('udp_closed')
class TCPClientManager(object):
def __init__(self, channel, host, port, retry_sec=2):
self.channel = channel
self.host = host
self.port = port
self.retry_sec = retry_sec
self.work_future = None
self.heartbeat_future = None
self.connect_future = None
self.reconnect_future = None
self.reader = None
self.writer = None
self.state = 'idle'
self.ee = channel.ee
async def connect_async(self):
while True and (self.state in ['connecting', 'reconnecting']):
try:
future = asyncio.open_connection(self.host, self.port)
reader, writer = await asyncio.wait_for(future, timeout=3)
_logger.info('Telemetry connected')
self.reader = reader
self.writer = writer
self.state = 'connected'
self.work_future = asyncio.ensure_future(self.work_async())
self.work_future.add_done_callback(self.on_work_done)
self.heartbeat_future = asyncio.ensure_future(self.heartbeat_async())
self.heartbeat_future.add_done_callback(self.on_heartbeat_done)
self.ee.emit('tcp_connected')
break
except (OSError, asyncio.TimeoutError):
#print('Server not connected, retry in {0} seconds'.format(self.retry_sec))
await asyncio.sleep(self.retry_sec)
def connect(self):
assert(self.state in ['idle', 'disconnected'])
assert(self.connect_future == None)
self.state = 'connecting'
self.connect_future = asyncio.ensure_future(self.connect_async())
self.connect_future.add_done_callback(self.on_connect_done)
return self.connect_future
def on_connect_done(self, future):
self.connect_future = None
async def heartbeat_async(self):
while True:
await asyncio.sleep(10)
msg = fsx_pb2.TcpRequestMessage()
msg.msgType = fsx_pb2.TcpRequestMessage.MSG_TYPE_PING
msg.pingBody.timeStamp = int(time.time())
self.write_message(msg)
def on_heartbeat_done(self, future):
self.heartbeat_future = None
async def work_async(self):
try:
while True:
size_buffer = await self.reader.readexactly(4)
size = int.from_bytes(size_buffer, byteorder='little')
body_buffer = await self.reader.readexactly(size)
msg = fsx_pb2.TcpResponseMessage()
msg.ParseFromString(body_buffer)
self.ee.emit('tcp_received_message', msg)
except (asyncio.IncompleteReadError, ConnectionResetError, ConnectionAbortedError):
pass
def on_work_done(self, future):
_logger.info('Telemetry connection lost')
self.work_future = None
if self.heartbeat_future != None:
self.heartbeat_future.cancel()
self.reader = None
self.writer = None
if self.state != 'disconnected':
self.reconnect()
async def reconnect_async(self):
await self.connect_async()
def reconnect(self):
assert(self.state == 'connected')
assert(self.reconnect_future == None)
_logger.info('Telemetry reconnecting')
self.state = 'reconnecting'
self.reconnect_future = asyncio.ensure_future(self.reconnect_async())
self.reconnect_future.add_done_callback(self.on_reconnect_done)
return self.reconnect_future
def on_reconnect_done(self, f):
self.reconnect_future = None
def disconnect(self):
assert(self.state in ['connecting', 'connected', 'reconnecting'])
self.state = 'disconnected'
if self.connect_future != None:
self.connect_future.cancel()
if self.reconnect_future != None:
self.reconnect_future.cancel()
if self.work_future != None:
self.work_future.cancel()
if self.heartbeat_future != None:
self.heartbeat_future.cancel()
if self.writer != None:
self.writer.close()
def write_message(self, msg):
data = msg.SerializeToString()
data = len(data).to_bytes(4, byteorder = 'little') + data
self.writer.write(data)
class UDPServerManager(object):
def __init__(self, channel, token, host, port):
self.channel = channel
self.token = token
self.host = host
self.port = port
self.transport = None
self.protocol = None
self.state = 'idle'
self.ee = channel.ee
def protocol_factory(self):
return UDPServer(self, self.token)
async def create_endpoint_async(self):
assert(self.state in ['idle', 'closed'])
self.state = 'opening'
loop = asyncio.get_event_loop()
transport, protocol = await loop.create_datagram_endpoint(
self.protocol_factory, local_addr=(self.host, self.port))
self.transport = transport
self.protocol = protocol
self.state = 'opened'
_logger.info('Telemetry receiver listening at {0}:{1}'.format(self.host, self.port))
def close(self):
assert(self.state in ['opening', 'opened'])
_logger.info('Telemetry receiver is closing')
self.state = 'closed'
if self.transport != None:
self.transport.close()
self.transport == None
self.protocol == None
class DataChannel(object):
def __init__(self, udp_port, tcp_host, tcp_port):
self.ee = pyee.EventEmitter()
self.udp_token = random.randint(0, 0x6FFFFFFF)
self.udp_port = udp_port
self.tcp = TCPClientManager(self, tcp_host, tcp_port)
self.udp = UDPServerManager(self, self.udp_token, '0.0.0.0', udp_port)
self.udp_receive_counter = 0
self.udp_discard_counter = 0
self.ee.on('tcp_connected', self.on_tcp_connected)
self.ee.on('tcp_received_message', self.on_tcp_received_message)
self.ee.on('udp_received_message', self.on_udp_received_message)
self.ee.on('udp_discarded_message', self.on_udp_discarded_message)
async def udp_analytics_async(self):
last_receive = 0
last_discard = 0
while True:
await asyncio.sleep(1)
delta_receive = self.udp_receive_counter - last_receive
delta_discard = self.udp_discard_counter - last_discard
last_receive = self.udp_receive_counter
last_discard = self.udp_discard_counter
self.ee.emit('udp_analytics_tick', {
'receive_all': last_receive,
'discard_all': last_discard,
'receive_tick': delta_receive,
'discard_tick': delta_discard})
def on_udp_analytics_done(self, future):
self.udp_analytics_future = None
async def start_async(self):
_logger.info('Starting telemetry channel')
self.udp_analytics_future = asyncio.ensure_future(self.udp_analytics_async())
self.udp_analytics_future.add_done_callback(self.on_udp_analytics_done)
await self.udp.create_endpoint_async()
await self.tcp.connect()
_logger.info('Telemetry channel started')
def stop(self):
_logger.info('Stopping telemetry channel')
if self.udp_analytics_future != None:
self.udp_analytics_future.cancel()
self.tcp.disconnect()
self.udp.close()
def on_tcp_connected(self):
self.udp.protocol.sn = 0
msg = fsx_pb2.TcpRequestMessage()
msg.msgType = fsx_pb2.TcpRequestMessage.MSG_TYPE_SET_CONFIG
msg.setConfigBody.udpPort = self.udp_port
msg.setConfigBody.udpToken = self.udp_token
self.tcp.write_message(msg)
def on_tcp_received_message(self, msg):
if msg.success != True:
_logger.error('Telemetry command failed')
def on_udp_received_message(self, msg):
self.udp_receive_counter = self.udp_receive_counter + 1
def on_udp_discarded_message(self):
self.udp_discard_counter = self.udp_discard_counter + 1
|
26311
|
import argparse
from collections import defaultdict
import pickle
import re
import lightgbm as lgb
import pandas as pd
import numpy as np
import xgboost as xgb
from ..data_utils import SEG_FP, get_encoded_classes
from ..utils import print_metrics
from ..metric import get_metrics
from .blend import (
score_predictions_by_image_id, submission_from_predictions_by_image_id)
def main():
parser = argparse.ArgumentParser()
arg = parser.add_argument
arg('detailed_then_features', nargs='+',
help='detailed dataframes and the features in the same order')
arg('--use-xgb', type=int, default=1)
arg('--use-lgb', type=int, default=1)
arg('--num-boost-round', type=int, default=400)
arg('--lr', type=float, default=0.05, help='for lightgbm')
arg('--eta', type=float, default=0.15, help='for xgboost')
arg('--save-model')
arg('--load-model')
arg('--output')
arg('--n-folds', type=int, default=5)
arg('--seg-fp-adjust', type=float)
args = parser.parse_args()
if len(args.detailed_then_features) % 2 != 0:
parser.error('number of detailed and features must be equal')
n = len(args.detailed_then_features) // 2
detailed_paths, feature_paths = (args.detailed_then_features[:n],
args.detailed_then_features[n:])
if args.output:
if not args.load_model:
parser.error('--output needs --load-model')
elif len(feature_paths) == 1:
parser.error('need more than one feature df for train/valid split')
print('\n'.join(
f'{f} | {d}' for f, d in zip(detailed_paths, feature_paths)))
detailed_dfs = [pd.read_csv(path) for path in detailed_paths]
feature_dfs = [pd.read_csv(path) for path in feature_paths]
valid_df = feature_dfs[0]
assert valid_df.columns[0] == 'item'
assert valid_df.columns[-1] == 'y'
feature_cols = [
col for col in valid_df.columns[1:-1] if col not in {
'width', 'height', 'aspect',
'candidate_count', 'candidate_count_on_page',
'candidate_freq_on_page',
}]
top_cls_re = re.compile('^top_\d+_cls')
def build_features(df):
df = df[feature_cols].copy()
for col in feature_cols:
if top_cls_re.match(col):
df[f'{col}_is_candidate'] = df[col] == df['candidate_cls']
# del df[col]
print(' '.join(df.columns))
return df
classes = get_encoded_classes()
cls_by_idx = {idx: cls for cls, idx in classes.items()}
cls_by_idx[-1] = SEG_FP
y_preds = []
all_metrics = []
for fold_num in range(args.n_folds):
print(f'fold {fold_num}')
detailed = (detailed_dfs[fold_num if len(detailed_dfs) != 1 else 0]
.copy())
valid_df = feature_dfs[fold_num if len(feature_dfs) != 1 else 0].copy()
valid_features = build_features(valid_df)
xgb_valid_data = xgb.DMatrix(valid_features, label=valid_df['y'])
fold_path = lambda path, kind: f'{path}.{kind}.fold{fold_num}'
if args.load_model:
lgb_load_path = (fold_path(args.load_model, 'lgb')
if args.use_lgb else None)
xgb_load_path = (fold_path(args.load_model, 'xgb')
if args.use_xgb else None)
print(f'loading from {lgb_load_path}, {xgb_load_path}')
if lgb_load_path:
lgb_model = lgb.Booster(model_file=lgb_load_path)
if xgb_load_path:
with open(xgb_load_path, 'rb') as f:
xgb_model = pickle.load(f)
else:
train_df = pd.concat([df for i, df in enumerate(feature_dfs)
if i != fold_num])
train_features = build_features(train_df)
if args.use_lgb:
lgb_model = train_lgb(
train_features, train_df['y'],
valid_features, valid_df['y'],
lr=args.lr,
num_boost_round=args.num_boost_round)
if args.use_xgb:
xgb_model = train_xgb(
train_features, train_df['y'],
valid_features, valid_df['y'],
eta=args.eta,
num_boost_round=args.num_boost_round)
if args.save_model:
lgb_save_path = (fold_path(args.save_model, 'lgb')
if args.use_lgb else None)
xgb_save_path = (fold_path(args.save_model, 'xgb')
if args.use_xgb else None)
print(f'saving to {lgb_save_path}, {xgb_save_path}')
if lgb_save_path:
lgb_model.save_model(
lgb_save_path, num_iteration=lgb_model.best_iteration)
if xgb_save_path:
with open(xgb_save_path, 'wb') as f:
pickle.dump(xgb_model, f)
print('prediction')
predictions = []
if args.use_lgb:
predictions.append(lgb_model.predict(
valid_features, num_iteration=lgb_model.best_iteration))
if args.use_xgb:
predictions.append(xgb_model.predict(
xgb_valid_data, ntree_limit=xgb_model.best_ntree_limit))
valid_df['y_pred'] = np.mean(predictions, axis=0)
if args.seg_fp_adjust:
valid_df.loc[valid_df['candidate_cls'] == -1, 'y_pred'] += \
args.seg_fp_adjust
y_preds.append(valid_df['y_pred'].values)
max_by_item = get_max_by_item(valid_df)
print('scoring')
detailed['pred'] = \
max_by_item['candidate_cls'].apply(cls_by_idx.__getitem__)
print(f'SEG_FP ratio: {(detailed["pred"] == SEG_FP).mean():.5f}')
predictions_by_image_id = get_predictions_by_image_id(detailed)
if not args.output:
metrics = {
'accuracy': (detailed["pred"] == detailed["true"]).mean(),
}
metrics.update(
score_predictions_by_image_id(predictions_by_image_id))
print_metrics(metrics)
all_metrics.append(metrics)
if args.output:
valid_df['y_pred'] = np.mean(y_preds, axis=0)
max_by_item = get_max_by_item(valid_df)
detailed['pred'] = \
max_by_item['candidate_cls'].apply(cls_by_idx.__getitem__)
predictions_by_image_id = get_predictions_by_image_id(detailed)
submission = submission_from_predictions_by_image_id(
predictions_by_image_id)
submission.to_csv(args.output, index=False)
else:
print('\nAll folds:')
print_metrics(get_metrics(all_metrics))
def train_lgb(train_features, train_y, valid_features, valid_y, *,
lr, num_boost_round):
train_data = lgb.Dataset(train_features, train_y)
valid_data = lgb.Dataset(valid_features, valid_y, reference=train_data)
params = {
'objective': 'binary',
'metric': 'binary_logloss',
'learning_rate': lr,
'bagging_fraction': 0.8,
'bagging_freq': 5,
'feature_fraction': 0.9,
'min_data_in_leaf': 20,
'num_leaves': 41,
'scale_pos_weight': 1.2,
'lambda_l2': 1,
}
print(params)
return lgb.train(
params=params,
train_set=train_data,
num_boost_round=num_boost_round,
early_stopping_rounds=20,
valid_sets=[valid_data],
verbose_eval=10,
)
def train_xgb(train_features, train_y, valid_features, valid_y, *,
eta, num_boost_round):
train_data = xgb.DMatrix(train_features, label=train_y)
valid_data = xgb.DMatrix(valid_features, label=valid_y)
params = {
'eta': eta,
'objective': 'binary:logistic',
'gamma': 0.01,
'max_depth': 8,
}
print(params)
eval_list = [(valid_data, 'eval')]
return xgb.train(
params, train_data, num_boost_round, eval_list,
early_stopping_rounds=20,
verbose_eval=10,
)
def get_max_by_item(df):
return (df.iloc[df.groupby('item')['y_pred'].idxmax()]
.reset_index(drop=True))
def get_predictions_by_image_id(detailed):
predictions_by_image_id = defaultdict(list)
for item in detailed.itertuples():
if item.pred != SEG_FP:
predictions_by_image_id[item.image_id].append({
'cls': item.pred,
'center': (item.x + item.w / 2, item.y + item.h / 2),
})
return predictions_by_image_id
if __name__ == '__main__':
main()
|
26319
|
from anuga.utilities import plot_utils as util
from matplotlib import pyplot as pyplot
import numpy
verbose= True
swwfile = 'merewether_1m.sww'
p=util.get_output(swwfile)
p2=util.get_centroids(p)
# Time index at last time
tindex = len(p2.time)-1
if verbose: print('calculating experimental transect')
x_data = [ 0.0, 3.0, 6.0, 9.0, 12.0, 15.0, 18.0, 21.0, 24.0, 27.0, 30.0, 33.0]
#vel = [ 0.0, 0.0, 1.1, 3.2, 3.4, 2.4, 3.2, 3.2, 3.7, 3.1, 0.4, 0.0]
vel_data = [ 0.0, 0.4, 3.1, 3.7, 3.2, 3.2, 2.4, 3.4, 3.2, 1.1, 0.0, 0.0]
#depth = [ 0.0, 0.0, 0.1, 0.5, 0.45, 0.4, 0.55, 0.1, 0.1, 0.05, 0.04, 0.0]
depth_data = [ 0.0, 0.04, 0.05, 0.1, 0.1, 0.55, 0.4, 0.45, 0.5, 0.1, 0.0, 0.0]
from scipy import interpolate
fvel = interpolate.interp1d(x_data, vel_data)
fdepth = interpolate.interp1d(x_data, depth_data)
if verbose: print('calculating model heights at observation points')
# Get nearest wet points to 'point observations'
point_observations = numpy.genfromtxt(
'Observations/ObservationPoints.csv',
delimiter=",",skip_header=1)
nearest_points = point_observations[:,0]*0. - 1
for i in range(len(nearest_points)):
# Compute distance of ANUGA points to observation, and
# if the ANUGA point is dry then add a large value
# Then find index of minimum
n = ( (p2.x+p2.xllcorner-point_observations[i,0])**2 + \
(p2.y+p2.yllcorner-point_observations[i,1])**2 + \
(p2.stage[tindex,:] <= p2.elev)*1.0e+06).argmin()
nearest_points[i] = n
f = open('Stage_point_comparison.csv','w')
f.writelines( 'Field, ANUGA, TUFLOW, ANUGA minus Field, ANUGA minus TUFLOW \n' )
if verbose: print nearest_points.tolist()
for i in range(len(nearest_points)):
po = point_observations[i,-2]
tu = point_observations[i,-1]
anuga_data = p2.stage[tindex, nearest_points.tolist()[i]]
newline = str(round(po,2)) + ', ' + str(round(anuga_data,2)) + ', ' + str(tu) + ', ' + \
str(round(anuga_data - po,2)) + ', ' + str(round(anuga_data - tu,2)) + '\n'
f.writelines(newline)
f.flush()
f.close()
if verbose: print('Plot transect')
## Plot transect 1 [need to guess appropriate end points as these are not so
## clear from the report]
xx=util.near_transect(p2,[103, 100.], [130.,80.],tol=0.5)
xx2=xx[0]
pyplot.clf()
pyplot.figure(figsize=(16,10.5))
pyplot.subplot(121)
pyplot.scatter(p2.x, p2.y, c=p2.elev,edgecolors='none')
# Add nice elevation data
colVals = numpy.maximum(numpy.minimum(p2.elev, 25.), 19.)
util.plot_triangles(p, values = colVals, edgecolors='none')
pyplot.gca().set_aspect('equal')
pyplot.scatter(p2.x[xx2],p2.y[xx2],color='green')
pyplot.xlim( (40., 160.))
pyplot.ylim( (0.,140.))
pyplot.title('Transect points in green')
pyplot.subplot(222)
pyplot.scatter(xx[1],p2.vel[tindex,xx[0]],color='green',label='model')
pyplot.scatter(xx[1],fvel(xx[1]),color='blue',label='data')
pyplot.legend(loc='upper left')
#pyplot.xlim(0,25)
pyplot.title('Final flow speed along the transect')
pyplot.subplot(224)
pyplot.scatter(xx[1],p2.stage[tindex,xx[0]]-p2.elev[xx[0]],color='green',label='model')
pyplot.scatter(xx[1],fdepth(xx[1]),color='blue',label='data')
pyplot.legend(loc='upper left')
#pyplot.xlim(0,25)
pyplot.title('Final depth along the transect')
pyplot.savefig('Transect1.png', bbox_inches='tight')
if verbose: print('Plot velocity field')
pyplot.clf()
# Velocity vector plot
pyplot.figure(figsize=(16,22))
pyplot.scatter(p2.x,p2.y,c=(p2.elev>24.),edgecolors='none', s=0.2)
pyplot.gca().set_aspect('equal')
pyplot.xlim((100,180))
pyplot.ylim((100,210))
#k=range(0,len(p2.x),2) # Thin out the vectors for easier viewing
colVals = numpy.maximum(numpy.minimum(p2.elev, 25.), 19.)
util.plot_triangles(p, values = colVals, edgecolors='white')
k = range(len(p2.x))
# Thin out the triangles
#k = (((10.*(p2.x - p2.x.round())).round()%2 == 0.0)*((10.*(p2.y - p2.y.round())).round()%2 == 0.0)).nonzero()[0]
pyplot.quiver(p2.x[k],p2.y[k],p2.xvel[tindex,k], p2.yvel[tindex,k],
scale_units='xy',units='xy',width=0.1,
color='black',scale=1.0)
pyplot.savefig('velocity_stationary.png',dpi=100, bbox_inches='tight')
## Froude number plot
if verbose: print('Plot Froude number plot')
pyplot.clf()
pyplot.figure(figsize=(6,8))
froude_number = p2.vel[tindex]/(numpy.maximum(p2.height[tindex], 1.0e-03)*9.8)**0.5
froude_category = (froude_number>1.).astype(float) + (froude_number > 0.).astype(float)
pyplot.scatter(p2.x,p2.y,edgecolors='none', s=0.2)
## Fake additions to plot to hack matplotlib legend
pyplot.scatter(0.,0., color='FireBrick',label='>1', marker='s')
pyplot.scatter(0.,0., color='PaleGreen',label='0-1', marker='s')
pyplot.scatter(0.,0., color='blue',label='0',marker='s')
pyplot.gca().set_aspect('equal')
util.plot_triangles(p, values = froude_category, edgecolors='none')
pyplot.xlim((p.x.min(), p.x.max()))
pyplot.ylim((p.y.min(), p.y.max()))
pyplot.title("Froude Number zones: 0, (0,1], or >1")
import matplotlib.patches as mpatches
#red_patch = mpatches.Patch(color='red', label='>1')
#green_patch = mpatches.Patch(color='green', label='(0-1]')
#blue_patch = mpatches.Patch(color='blue', label='0.')
#pyplot.legend(handles=[red_patch, green_patch, blue_patch], labels=['>1', '(0-1]', '0.'], loc='best')
pyplot.legend(loc='upper left')
pyplot.savefig('froudeNumber.png',dpi=100,bbox_inches='tight')
|
26344
|
import time
from typing import Callable
from functools import wraps
def timeit(metric_callback: Callable, **labels):
def wrapper(func):
@wraps(func)
def func_wrapper(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
metric_callback(time.time() - start, labels=labels)
return result
return func_wrapper
return wrapper
|
26363
|
from myhdl import Signal, intbv, always, always_comb, block, instances
from hdmi.cores.primitives import dram16xn
@block
def convert_30_to_15(reset, clock, clockx2, data_in, tmds_data2, tmds_data1, tmds_data0):
"""
The block converts the 30-bit data into 15-bit data.
Args:
reset: The reset signal
clock: The pixel clock
clockx2: The clock with twice the frequency of pixel clock
data_in: The input 30-bit data
tmds_data2: 5 bits of the output data (output[15:10])
tmds_data1: 5 bits of the output data (output[10:5])
tmds_data0: 5 bits of the output data (output[5:0])
Returns:
myhdl.instances() : A list of myhdl instances.
"""
# RAM Address
write_addr, _write_addr, read_addr, _read_addr = [Signal(intbv(0)[4:0]) for _ in range(4)]
data_int = Signal(intbv(0)[30:0])
@always(write_addr)
def case_wa():
if write_addr < 15:
_write_addr.next = write_addr + 1
else:
_write_addr.next = 0
@always(clock.posedge, reset.posedge)
def fdc():
if reset:
write_addr.next = 0
else:
write_addr.next = _write_addr
o_data_out = Signal(intbv(0)[30:0]) # Dummy variable
fifo_u = dram16xn(data_in, write_addr, read_addr, Signal(True), clock, o_data_out, data_int)
@always(read_addr)
def case_ra():
if read_addr < 15:
_read_addr.next = read_addr + 1
else:
_read_addr.next = 0
reset_sync, _reset_sync, reset_p = [Signal(bool(0)) for _ in range(3)]
sync = Signal(bool(0))
@always(clockx2.posedge, reset.posedge)
def fdp():
if reset:
reset_sync.next = 1
else:
reset_sync.next = reset
@always(clockx2.posedge)
def fdr():
if reset_p:
sync.next = 0
else:
sync.next = not sync
@always(clockx2.posedge)
def fdre():
if reset_p:
read_addr.next = 0
elif sync:
read_addr.next = _read_addr
db = Signal(intbv(0)[30:0])
@always(clockx2.posedge)
def fde():
if sync:
db.next = data_int
mux = Signal(intbv(0)[15:0])
@always_comb
def mux_logic():
if not sync:
mux.next = db[15:0]
else:
mux.next = db[30:15]
@always(clockx2.posedge)
def fd():
_reset_sync.next = reset_sync
reset_p.next = _reset_sync
tmds_data0.next = mux[5:0]
tmds_data1.next = mux[10:5]
tmds_data2.next = mux[15:10]
return instances()
|
26468
|
from __future__ import division
import numpy as np
from sklearn.utils import shuffle
from sklearn.metrics import *
"""
Module with different fitness functions implemented to be used by the CRO algorithm.
The functions' only argument must be an individual (coral) and return its fitness, a number.
The fitness might require other arguments, in that case the partial function in python's functools module is a very good option
"""
def max_ones(coral):
"""
Description: Returns the percentage of 1's in the coral. This function assumes 'coral' is a list,
it could be further improved if it was a numpy array
Input:
- coral
Output:
- fitness
"""
return 100*(sum(coral) / len(coral))
def feature_selection(coral, X, y, model,
get_prediction = lambda model, X: model.predict(X),
metric=roc_auc_score, random_seed=None):
"""
Description: Returns the fitness (given by metric) of the selected features given by coral,
when using Xt and yt for training the model clf
Input:
- coral : an individual
- X: Data input
- y: Data output
- model: instance of the model to be trained
- get_prediction: function that accepts the model and X and outputs the vector
that will be used in the metric (predictions, scores...)
- metric: metric that will be used as fitness
Output:
- fitness
"""
# offset % of data for training, the rest for testing
offset = int(X.shape[0] * 0.9)
Xs, ys = shuffle(X, y, random_state=random_seed)
Xs = np.multiply(Xs, coral)
X_train, y_train = Xs[:offset], ys[:offset]
X_test, y_test = Xs[offset:], ys[offset:]
# train model
model.fit(X_train, y_train)
# Compute metric
y_pred = get_prediction(model, X_test)
fitness = metric(y_test, y_pred)
return fitness
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.