content
stringlengths 5
1.05M
|
---|
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import logging
import tensorflow as tf
class AutoEncoder:
"""
Autoencoder implemented with tensorflow
"""
def __init__(self, encoder_dims, decoder_dims=None, lr=1e-05, logger=None):
"""
Constructor of the model
:param encoder_dims:
encoder's dimensions list.
ex) [4, 2] => x -> 4dim -> 2dim(z)
:param decoder_dims:
decoder's dimensions list.
ex) None, encoder_dims: [4, 2] => z -> 4dim
ex) [2, 4]: z -> 2dim -> 4dim
:param lr: learning rate
"""
self.encoder_dims = encoder_dims
if decoder_dims:
self.decoder_dims = decoder_dims
else:
self.decoder_dims = list(reversed(encoder_dims))[1:]
self.learning_rate = lr
if not logger:
self.logger = logging.getLogger(__name__)
self.logger.setLevel('DEBUG')
else:
self.logger = logger
def build_graph(self, x_ph):
"""
Build graph
:param x_ph: the placeholder of input
:return: None
"""
with tf.name_scope('regularizer'):
regularizer = tf.contrib.layers.l2_regularizer(scale=0.1)
with tf.variable_scope('encoder'):
self.x = x_ph
self.logger.debug('x: {}'.format(self.x))
self.z = self.encoder(self.x, regularizer=regularizer)
self.logger.debug('z: {}'.format(self.z))
with tf.variable_scope('decoder'):
self.x_ = self.decoder(self.z, regularizer=regularizer)
self.logger.debug('x_: {}'.format(self.x_))
with tf.name_scope('train'):
self.loss = tf.losses.mean_squared_error(self.x, self.x_)
self.logger.debug('loss: {}'.format(self.loss))
self.optimizer = tf.train.AdamOptimizer(self.learning_rate)
self.logger.debug('optimizer: {}'.format(self.optimizer))
self.train = self.optimizer.minimize(self.loss)
self.logger.debug('train: {}'.format(self.train))
def encoder(self, x, regularizer=None):
"""
Encoder
activation: leaky_relu for test
:param x: input
:return: encoded_x
"""
result_x = x
for dim in self.encoder_dims:
try:
result_x = tf.layers.dense(result_x, dim,
activation=tf.nn.leaky_relu,
kernel_regularizer=regularizer,
reuse=tf.get_variable_scope().reuse)
except ValueError:
raise ValueError('use tensorflow 1.8.0 or dtype=tf.float32')
self.logger.debug('result_x: {}'.format(result_x))
return result_x
def decoder(self, z, regularizer=None):
"""
Decoder
activation: leaky_relu for test
:param z: latent variables
:return: decoded(x_hat)
"""
result_z = z
for dim in self.decoder_dims + [self.x.shape[-1]]:
try:
result_z = tf.layers.dense(result_z, dim,
activation=tf.nn.leaky_relu,
kernel_regularizer=regularizer,
reuse=tf.get_variable_scope().reuse)
except ValueError:
raise ValueError('use tensorflow 1.8.0 or dtype=tf.float32')
self.logger.debug('result_z: {}'.format(result_z))
return result_z |
import pandas as pd
import argparse
from imblearn.over_sampling import SVMSMOTE
import numpy as np
import os
import shutil
import argparse
import json
from json_manager import JsonManager
import pipeline_constants as constants
def process_command_line_args():
ap = argparse.ArgumentParser()
ap.add_argument("-c", "--config", required = True, help = "Full path to json config file, relative paths work as well")
ap.add_argument("-o", "--outputlog", required = True, help = "Path to log file")
args = vars(ap.parse_args())
return args["config"], args["outputlog"]
def run_upsample(json_file_path, fmt_file_path):
json_manager = JsonManager(json_file_path)
if json_manager.get_upsample_status() == True:
print(f"Upsampling started using {json_file_path} and {fmt_file_path}")
upsampled_path = json_manager.get_upsampled_path()
constants.remove_folder_if_exists(\
constants.UPSAMPLED_CSV_FOLDER_NAME, upsampled_path)
print("Upsampling")
hot_encoded_folder = os.fsdecode(os.path.join(\
json_manager.get_hot_encoded_path(), \
constants.HOT_ENCODED_CSV_FOLDER_NAME))
hot_encoded_file = os.fsdecode(os.path.join(\
hot_encoded_folder, \
constants.HOT_ENCODED_CSV_FILENAME))
hotEncoded_data = pd.read_csv(hot_encoded_file)
features_data = pd.read_csv(hot_encoded_file, \
usecols = list(hotEncoded_data.columns)[:-1]) # everything except label
labels_data = pd.read_csv(hot_encoded_file, \
usecols = [list(hotEncoded_data.columns)[-1]]) # label
sm = SVMSMOTE(random_state = json_manager.get_random_state())
X_res, y_res = sm.fit_resample(features_data, labels_data)
csv_ready = np.append(X_res, y_res, axis = constants.COLUMN_AXIS)
upsampled_folder = constants.add_folder_to_directory(\
constants.UPSAMPLED_CSV_FOLDER_NAME, upsampled_path)
upsampled_file_path = os.fsdecode(os.path.join(\
upsampled_folder, constants.UPSAMPLED_CSV_FILENAME))
if os.path.exists(upsampled_file_path):
os.remove(upsampled_file_path)
f = open(fmt_file_path, "r")
fmt = f.readline()
f.close()
header = ','.join(str(i) for i in hotEncoded_data.columns)
np.savetxt(upsampled_file_path, csv_ready, \
fmt = fmt, \
delimiter = constants.CSV_DELIMITER, \
header = header, \
comments='')
print(f"Upsampling finished, results in {upsampled_file_path}")
def main():
json_file_path, fmt_file_path = process_command_line_args()
run_upsample(json_file_path, fmt_file_path)
if __name__ == '__main__':
main()
|
from subprocess import call
import time
import os
import selenium
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from config import EMAIL
def click_option(driver, menu, text):
select = Select(driver.find_element_by_name(menu))
select.select_by_visible_text(text)
def run(rejected, background, folder='.', name=''):
"""
run webgestalt GO-enrichment analysis with given rejected and background
files. Specify folder to save to other location than current.
>>> rejected = 'test/rej.txt'
>>> background = 'test/background.txt'
>>> run(rejected, background, folder='test', name='results')
"""
rejected = os.path.abspath(rejected)
background = os.path.abspath(background)
folder = os.path.abspath(folder)
driver = selenium.webdriver.Firefox()
main_window = driver.window_handles[0]
driver.implicitly_wait(5)
driver.get('http://bioinfo.vanderbilt.edu/webgestalt')
start = driver.find_element_by_link_text('START')
start.click()
# LOGIN
login_form = driver.find_element_by_id('loginForm')
login_form_email = login_form.find_element_by_id('email')
login_form_email.send_keys(EMAIL)
time.sleep(1)
login_form_submit = login_form.find_element_by_name('submit')
login_form_submit.click()
# INPUT
click_option(driver, 'organism', 'hsapiens')
time.sleep(4)
click_option(driver, 'idtype', 'hsapiens__entrezgene')
time.sleep(4)
inputfile = driver.find_element_by_name('inputfile')
inputfile.send_keys(rejected)
enter = driver.find_element_by_css_selector('[value=ENTER]')
enter.click()
# ANALYSIS
id_info = driver.find_element_by_link_text('[Download]')
call(['wget', '-O', os.path.join(folder, '{}_idinfo.tab'.format(name)),
id_info.get_attribute('href')])
analysis_menu = driver.find_element_by_class_name('dropdown')
analysis_menu.click() # makes options visible
analysis = driver.find_element_by_link_text('GO Analysis')
analysis.click()
refsetfile = driver.find_element_by_name('refsetfile')
refsetfile.send_keys(background)
click_option(driver, 'upload_idtype', 'hsapiens__entrezgene')
submit = driver.find_element_by_css_selector('[value="Run Enrichment Analysis"]')
submit.click()
# RESULTS
driver.switch_to_window(driver.window_handles[1])
element = WebDriverWait(driver, 100).until(
EC.presence_of_element_located((By.LINK_TEXT, "View results"))
)
element.click()
driver.switch_to_window(driver.window_handles[2])
img = driver.find_element_by_tag_name('img')
img_src = img.get_attribute('src')
call(['wget', '-O', os.path.join(folder, '{}.gif'.format(name)), img_src])
for window in driver.window_handles:
driver.switch_to_window(window)
driver.close()
|
from __future__ import absolute_import
import pytest
import numpy as np
from astropy import units as u
from astropy.coordinates import Longitude, Latitude, Angle
from sunpy.physics.transforms.differential_rotation import diff_rot, _sun_pos, _calc_P_B0_SD, rot_hpc
from sunpy.tests.helpers import assert_quantity_allclose
#pylint: disable=C0103,R0904,W0201,W0212,W0232,E1103
# Please note the numbers in these tests are not checked for physical
# accuracy, only that they are the values the function was outputting upon
# implementation.
@pytest.fixture
def seconds_per_day():
return 24 * 60 * 60.0 * u.s
def test_single(seconds_per_day):
rot = diff_rot(10 * seconds_per_day, 30 * u.deg)
assert rot == 136.8216 * u.deg
def test_array(seconds_per_day):
rot = diff_rot(10 * seconds_per_day, np.linspace(-70, 70, 2) * u.deg)
assert_quantity_allclose(rot, Longitude(np.array([110.2725, 110.2725]) * u.deg))
def test_synodic(seconds_per_day):
rot = diff_rot(10 * seconds_per_day, 30 * u.deg, rot_type='howard', frame_time='synodic')
assert rot == 126.9656 * u.deg
def test_sidereal(seconds_per_day):
rot = diff_rot(10 * seconds_per_day, 30 * u.deg, rot_type='howard', frame_time='sidereal')
assert rot == 136.8216 * u.deg
def test_howard(seconds_per_day):
rot = diff_rot(10 * seconds_per_day, 30 * u.deg, rot_type='howard')
assert rot == 136.8216 * u.deg
def test_allen(seconds_per_day):
rot = diff_rot(10 * seconds_per_day, 30 * u.deg, rot_type='allen')
assert rot == 136.9 * u.deg
def test_snodgrass(seconds_per_day):
rot = diff_rot(10 * seconds_per_day, 30 * u.deg, rot_type='snodgrass')
assert rot == 135.4232 * u.deg
def test_fail(seconds_per_day):
with pytest.raises(ValueError):
rot = diff_rot(10 * seconds_per_day, 30 * u.deg, rot_type='garbage')
def test_sunpos():
result = _sun_pos('2013-05-14')
assertion = {'obliq': (23.4358, Angle, u.deg),
'app_long': (53.3683, Longitude, u.deg),
'dec': (18.6125, Latitude, u.deg),
'ra': (50.9796, Longitude, u.deg),
'longitude': (53.3705, Longitude, u.deg)}
for k in assertion:
np.testing.assert_almost_equal(result[k].to(u.deg).value, assertion[k][0], decimal=4)
isinstance(result[k], assertion[k][1])
result[k].unit == assertion[k][2]
def test_calc_P_B0_SD():
result = _calc_P_B0_SD('2012-12-14')
assertion = {'p': (10.4868, Angle, u.deg),
'b0': (-0.8127, Angle, u.deg),
'l0': (0.0000, Angle, u.deg),
'sd': (16.2364 / 60.0, Angle, u.arcmin)}
for k in assertion:
np.testing.assert_almost_equal(result[k].to(u.degree).value,
assertion[k][0], decimal=4)
# Test that the correct astropy Quantity objects are returned and
# that they have the expected units.
isinstance(result[k], assertion[k][1])
result[k].unit == assertion[k][2]
def test_rot_hpc():
# testing along the Sun-Earth line, observer is on the Earth
x, y = rot_hpc(451.4 * u.arcsec, -108.9 * u.arcsec,
'2012-06-15', '2012-06-15 16:05:23')
np.testing.assert_almost_equal(x.to(u.arcsec).value, 574.2, decimal=1)
np.testing.assert_almost_equal(y.to(u.arcsec).value, -108.4, decimal=1)
# Test that astropy Angles are returned and that they have the expected
# units
isinstance(x, Angle)
x.unit == u.arcsec
isinstance(y, Angle)
y.unit == u.arcsec
|
import sys
import os
import pytest
root_path = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
if root_path not in sys.path:
sys.path.append(root_path)
from common import base
# common_path = os.path.join(os.getcwd(), r'../../common')
#
# print(common_path)
# if common_path not in sys.path:
# sys.path.append(common_path)
#
# import base
|
import torch
from typing import Union, Optional
from colossalai.tensor import ColoTensor
GeneralTensor = Union[ColoTensor, torch.Tensor]
Number = Union[int, float]
def convert_to_colo_tensor(tensor: Optional[GeneralTensor]) -> Optional[ColoTensor]:
if tensor is not None and not isinstance(tensor, ColoTensor):
tensor = ColoTensor.from_torch_tensor(tensor)
return tensor
|
# pyskel
import time
from random import random
has_legs = False
class Simulation(object):
def __init__(self, pars):
self.pars = pars
self.pd = None
def mod_run(sim, p):
_, p2 = sim.pars
sim.pars = p, p2
return run(sim)
def run(sim):
pd = []
p1, p2 = sim.pars
for ctr in range(3):
pd.append([p1, p2])
time.sleep(random() * 5)
print(pd)
return "done"
|
import numpy as np
import pytest
from alitra import AlignFrames, Euler, FrameTransform, PointList, Translation
@pytest.mark.parametrize(
"eul_rot, ref_translations, p_robot,rotation_axes",
[
(
Euler(psi=np.pi * -0.0, from_="robot", to_="asset"),
Translation(x=200030, y=10000, from_="robot", to_="asset"),
PointList.from_array(
np.array(
[
[10, 1, 0],
[20, 2, 0],
[30, 7, 0],
[40, 5, 0],
]
),
frame="robot",
),
"z",
),
(
Euler(psi=np.pi / 2, from_="robot", to_="asset"),
Translation(x=10, y=0, from_="robot", to_="asset"),
PointList.from_array(
np.array([[5, 0, 0], [5, 2, 0], [7, 5, 0], [3, 5, 0]]),
frame="robot",
),
"z",
),
(
Euler(theta=np.pi * 0.9, from_="robot", to_="asset"),
Translation(x=1, y=10, from_="robot", to_="asset"),
PointList.from_array(
np.array([[10, 0, 0], [5, 2, 0], [7, 5, 0], [3, 5, 0]]), frame="robot"
),
"x",
),
(
Euler(phi=1 * 0.2, theta=1, psi=0.4, from_="robot", to_="asset"),
Translation(x=0, y=10, z=2, from_="robot", to_="asset"),
PointList.from_array(
np.array(
[
[0, 1, 2],
[5, 2, 6],
[7, 5, 0],
[3, 5, 0],
[3, 5, 10],
[3, 5, 11],
]
),
frame="robot",
),
"xyz",
),
(
Euler(psi=np.pi / 4, from_="robot", to_="asset"),
Translation(x=1, y=0, from_="robot", to_="asset"),
PointList.from_array(np.array([[1, 1, 0], [10, 1, 0]]), frame="robot"),
"z",
),
(
Euler(phi=np.pi * 0.2, from_="robot", to_="asset"),
Translation(x=1, y=10, z=2, from_="robot", to_="asset"),
PointList.from_array(
np.array([[0, 1, 2], [5, 2, 0], [7, 5, 0], [3, 5, 0]]), frame="robot"
),
"y",
),
],
)
def test_align_frames(eul_rot, ref_translations, p_robot, rotation_axes):
rotations_c2to_c1 = eul_rot.as_np_array()
c_frame_transform = FrameTransform(
euler=eul_rot,
translation=ref_translations,
from_=eul_rot.from_,
to_=eul_rot.to_,
)
ref_translation_array = ref_translations.as_np_array()
p_asset = c_frame_transform.transform_point(p_robot, from_="robot", to_="asset")
transform = AlignFrames.align_frames(p_robot, p_asset, rotation_axes)
assert np.allclose(
transform.transform.translation.as_np_array(), ref_translation_array
)
assert np.allclose(transform.transform.euler.as_np_array(), rotations_c2to_c1)
p_robot_noisy = PointList.from_array(
p_robot.as_np_array()
+ np.clip(
np.random.normal(np.zeros(p_robot.as_np_array().shape), 0.1), -0.1, 0.1
),
frame="robot",
)
p_asset_noisy = PointList.from_array(
p_asset.as_np_array()
+ np.clip(
np.random.normal(np.zeros(p_asset.as_np_array().shape), 0.1), -0.1, 0.1
),
frame="asset",
)
transform_noisy = AlignFrames.align_frames(
p_robot_noisy, p_asset_noisy, rotation_axes
)
translation_arr_noise = transform_noisy.transform.translation.as_np_array()
euler_arr_noise = transform_noisy.transform.euler.as_np_array()
rotations = np.absolute(euler_arr_noise - rotations_c2to_c1)
translations = np.absolute(translation_arr_noise - ref_translation_array)
assert np.any(rotations > 0.3) == False
assert np.any(translations > 0.4) == False
@pytest.mark.parametrize(
"p_asset, p_robot,rotation_frame",
[
(
PointList.from_array(np.array([[10, 0, 0], [5, 2, 0], [7, 5, 0]]), "asset"),
PointList.from_array(np.array([[12, 0, 0], [5, 2, 0], [7, 5, 0]]), "robot"),
"z",
),
(
PointList.from_array(np.array([[10, 0, 0], [5, 2, 0]]), "asset"),
PointList.from_array(np.array([[13, 2, 0], [7, 4, 0]]), "robot"),
"z",
),
(
PointList.from_array(np.array([[10, 0, 0], [5, 2, 0]]), "robot"),
PointList.from_array(np.array([[11, 0, 0], [6, 2, 0]]), "asset"),
"z",
),
(
PointList.from_array(
np.array([[10, 0, 0], [10, 0, 0], [5, 2, 0], [7, 5, 0]]), "asset"
),
PointList.from_array(
np.array([[11, 0, 0], [11, 0, 0], [5, 2, 0], [7, 5, 0]]), "robot"
),
"z",
),
(
PointList.from_array(np.array([[10, 0, 0], [5, 2, 0]]), "asset"),
PointList.from_array(np.array([[11, 0, 0], [6, 2, 0]]), "robot"),
"xyz",
),
],
)
def test_align_frames_exceptions(p_robot, p_asset, rotation_frame):
with pytest.raises(ValueError):
AlignFrames.align_frames(p_1=p_robot, p_2=p_asset, rot_axes=rotation_frame)
|
# Copyright (C) 2018 Hatching B.V.
# This file is licensed under the MIT License, see also LICENSE.
import base64
import datetime
import logging
import os.path
import yaml
from arbiter.const import MINIMUM_STAKE_DEFAULT
log = logging.getLogger(__name__)
def repr_timedelta(dumper, data):
# Fun for days
r = {}
for k in ("days", "seconds", "microseconds", "milliseconds",
"minutes", "hours", "weeks"):
v = getattr(data, k, 0)
if v != 0:
r[k] = v
return dumper.represent_data(r)
yaml.add_representer(datetime.timedelta, repr_timedelta)
class ConfigFile(object):
defaults = {
"bind": ":9080",
"url": "http://localhost:9080",
"polyproxy": "",
"polyswarmd": "polyswarmd.polyswarm.io",
"apikey": "a"*32,
"addr": "",
"addr_privkey": "",
"chain": "side",
"minimum_stake": MINIMUM_STAKE_DEFAULT,
"dashboard_password": "",
"api_secret": "",
"artifacts": "~/.artifacts",
"dburi": "postgresql://arbiter:arbiter@localhost/arbiter",
"expires": datetime.timedelta(days=5),
"analysis_backends": {},
"trusted_experts": [],
"testing_mode": False,
"monitor_bind": "10.1.0.12:12333",
}
def __init__(self, path=None):
if not path:
self.properties = {}
self.properties.update(self.defaults)
else:
self.properties = yaml.safe_load(open(path, "rb")) or {}
for k, v in self.defaults.items():
if k not in self.properties:
self.properties[k] = v
for k in ("dashboard_password", "api_secret"):
if not self.properties.get(k):
log.warning("Please configure `%s`!. Creating random secret...", k)
pw = base64.b64encode(os.urandom(16)).decode("utf8").rstrip("=")
self.properties[k] = pw
def __getattr__(self, name):
if name in self.properties:
return self.properties[name]
if name in self.defaults:
return self.defaults[name]
raise AttributeError(name)
@property
def api_secret(self):
return self.__getattr__("api_secret").encode("utf8")
@property
def artifacts(self):
return os.path.expanduser(self.__getattr__("artifacts"))
@property
def expires(self):
exp = self.__getattr__("expires")
if isinstance(exp, dict):
return datetime.timedelta(**exp)
if isinstance(exp, int):
return datetime.timedelta(hours=int(exp))
return exp
|
from .sphere import Sphere |
# -*- coding: utf-8 -*-
"""Uniform Resource Identifier
.. seealso:: :rfc:`3986`
"""
# TODO: parse HTTP/1.0-';'-params?
import re
from socket import AF_INET, AF_INET6, error as SocketError, inet_ntop, inet_pton
from httoop.exceptions import InvalidURI
from httoop.six import int2byte, iterbytes, with_metaclass
from httoop.uri.percent_encoding import Percent
from httoop.uri.query_string import QueryString
from httoop.uri.type import URIType
from httoop.util import Unicode, _, integer
class URI(with_metaclass(URIType)):
u"""Uniform Resource Identifier"""
__slots__ = ('scheme', 'username', 'password', 'host', '_port', 'path', 'query_string', 'fragment')
slots = __slots__
SCHEMES = {}
SCHEME = None
PORT = None
encoding = 'UTF-8'
@property
def query(self):
return tuple(QueryString.decode(self.query_string.encode(self.encoding), self.encoding))
@query.setter
def query(self, query):
self.query_string = QueryString.encode(query, self.encoding).decode(self.encoding)
@property
def path_segments(self):
return [Unicode.replace(p, u'%2f', u'/') for p in self.path.split(u'/')]
@path_segments.setter
def path_segments(self, path):
self.path = u'/'.join(seq.replace(u'/', u'%2f') for seq in path)
@property
def hostname(self):
host = self.host
if host.startswith(u'[v') and host.endswith(u']') and u'.' in host and host[2:-1].split(u'.', 1)[0].isdigit():
return host[2:-1].split(u'.', 1)[1]
return host.rstrip(u']').lstrip(u'[').lower()
@property
def port(self):
return self._port or self.PORT
@port.setter
def port(self, port):
port = port or self.PORT
if port:
try:
port = integer(port)
if not 0 < integer(port) <= 65535:
raise ValueError
except ValueError:
raise InvalidURI(_(u'Invalid port: %r'), port) # TODO: TypeError
self._port = port
def __init__(self, uri=None, *args, **kwargs):
self.set(kwargs or args or uri or b'')
def join(self, other=None, *args, **kwargs):
u"""Join a URI with another absolute or relative URI"""
relative = URI(other or args or kwargs)
joined = URI()
current = URI(self)
if relative.scheme:
current = relative
current.normalize()
return current
joined.scheme = current.scheme
if relative.host:
current = relative
joined.username = current.username
joined.password = current.password
joined.host = current.host
joined.port = current.port
if relative.path:
current = relative
joined.path = current.path
if relative.path and not relative.path.startswith(u'/'):
joined.path = u'%s%s%s' % (self.path, u'' if self.path.endswith(u'/') else u'/../', relative.path)
if relative.query_string:
current = relative
joined.query_string = current.query_string
if relative.fragment:
current = relative
joined.fragment = current.fragment
joined.normalize()
return joined
def normalize(self):
u"""Normalize the URI to make it compareable.
.. seealso:: :rfc:`3986#section-6`
"""
self.scheme = self.scheme.lower()
self.host = self.host.lower()
if not self.port:
self.port = self.PORT
self.abspath()
if not self.path.startswith(u'/') and self.host and self.scheme and self.path:
self.path = u'/%s' % (self.path, )
def abspath(self):
"""Clear out any '..' and excessive slashes from the path
>>> dangerous = (u'/./', u'/../', u'./', u'/.', u'../', u'/..', u'//')
>>> uris = (URI(b'/foo/./bar/../baz//blah/.'), )
>>> _ = [uri.abspath() for uri in uris]
>>> all(all(d not in uri.path for d in dangerous) for uri in uris)
True
>>> u = URI(b'/foo/../bar/.'); u.abspath(); u.path == u'/bar/'
True
"""
path = re.sub(u'\\/{2,}', u'/', self.path) # remove //
if not path:
return
unsplit = []
directory = False
for part in path.split(u'/'):
if part == u'..' and (not unsplit or unsplit.pop() is not None):
directory = True
elif part != u'.':
unsplit.append(part)
directory = False
else:
directory = True
if directory:
unsplit.append(u'')
self.path = u'/'.join(unsplit) or u'/'
def set(self, uri):
if isinstance(uri, Unicode):
uri = uri.encode(self.encoding) # FIXME: remove?
if isinstance(uri, bytes):
self.parse(uri)
elif isinstance(uri, URI):
self.tuple = uri.tuple
elif isinstance(uri, tuple):
self.tuple = uri
elif isinstance(uri, dict):
self.dict = uri
else:
raise TypeError('URI must be bytes/unicode/tuple/dict not %r' % (type(uri).__name__, ))
@property
def dict(self):
slots = (key.lstrip('_') for key in self.slots)
return dict((key, getattr(self, key)) for key in slots)
@dict.setter
def dict(self, uri):
for key in self.slots:
key = key.lstrip('_')
setattr(self, key, uri.get(key, u''))
@property
def tuple(self):
return tuple(getattr(self, key) for key in self.slots)
@tuple.setter
def tuple(self, tuple_):
(self.scheme, self.username, self.password, self.host,
self.port, self.path, self.query_string, self.fragment) = tuple_
def parse(self, uri):
r"""Parses a well formed absolute or relative URI.
foo://example.com:8042/over/there?name=ferret#nose
\_/ \______________/\_________/ \_________/ \__/
| | | | |
scheme authority path query fragment
| _____________________|__
/ \ / \
urn:example:animal:ferret:nose
https://username:password@[::1]:8090/some/path?query#fragment
<scheme>://<username>:<password>@<host>:<port>/<path>?<query>#<fragment>
[<scheme>:][//[<username>[:<password>]@][<host>][:<port>]/]<path>[?<query>][#<fragment>]
"""
if type(self) is URI and b':' in uri:
self.scheme = uri.split(b':', 1)[0].lower()
if type(self) is not URI:
return self.parse(uri)
if uri and uri.strip(b'0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~'):
raise InvalidURI(_(u'Invalid URI: must consist of printable ASCII characters without whitespace.'))
uri, __, fragment = uri.partition(b'#')
uri, __, query_string = uri.partition(b'?')
scheme, authority_exists, uri = uri.rpartition(b'://')
if not authority_exists and uri.startswith(b'//'):
uri = uri[2:]
authority_exists = True
if not authority_exists and b':' in uri:
scheme, __, uri = uri.partition(b':')
authority, path = b'', uri
if authority_exists:
authority, __, path = uri.partition(b'/')
path = b'%s%s' % (__, path)
userinfo, __, hostport = authority.rpartition(b'@')
username, __, password = userinfo.partition(b':')
if b':' in hostport and not hostport.endswith(b']'):
host, __, port = hostport.rpartition(b':')
else:
host, port = hostport, b''
unquote = self.unquote
path = u'/'.join([unquote(seq).replace(u'/', u'%2f') for seq in path.split(b'/')])
try:
scheme = scheme.decode('ascii').lower()
except UnicodeDecodeError: # pragma: no cover
raise InvalidURI(_(u'Invalid scheme: must be ASCII.'))
if scheme and scheme.strip(u'abcdefghijklmnopqrstuvwxyz0123456789.-+'):
raise InvalidURI(_(u'Invalid scheme: must only contain alphanumeric letters or plus, dash, dot.'))
if query_string:
query_string = QueryString.encode(QueryString.decode(query_string, self.encoding), self.encoding)
self.tuple = (
scheme,
unquote(username),
unquote(password),
self._unquote_host(host),
port,
path,
query_string.decode(self.encoding),
unquote(fragment)
)
def _unquote_host(self, host):
# IPv6 / IPvFuture
if host.startswith(b'[') and host.endswith(b']'):
host = host[1:-1]
try:
host = inet_ntop(AF_INET6, inet_pton(AF_INET6, host.decode('ascii')))
if isinstance(host, bytes): # Python 2
host = host.decode('ascii')
return u'[%s]' % (host, )
except (SocketError, UnicodeDecodeError):
# IPvFuture
if host.startswith(b'v') and b'.' in host and host[1:].split(b'.', 1)[0].isdigit():
try:
return u'[%s]' % host.decode('ascii')
except UnicodeDecodeError: # pragma: no cover
raise InvalidURI(_('Invalid IPvFuture address: must be ASCII.'))
raise InvalidURI(_('Invalid IP address in URI.'))
# IPv4
if all(x.isdigit() for x in host.split(b'.')):
try:
host = inet_ntop(AF_INET, inet_pton(AF_INET, host.decode('ascii')))
if isinstance(host, bytes): # Python 2
host = host.decode('ascii')
return host
except (SocketError, UnicodeDecodeError):
raise InvalidURI(_('Invalid IPv4 address in URI.'))
if host.strip(Percent.UNRESERVED + Percent.SUB_DELIMS + b'%'):
raise InvalidURI(_('Invalid URI host.'))
# DNS hostname
host = self.unquote(host)
try:
return host.encode('ascii').decode('idna').lower()
except UnicodeError: # pragma: no cover
raise InvalidURI(_('Invalid host.'))
def compose(self):
return b''.join(self._compose_absolute_iter())
def _compose_absolute_iter(self):
u"""composes the whole URI"""
scheme, username, password, host, port, path, _, fragment = self.tuple
if scheme:
yield self.quote(scheme, Percent.SCHEME)
yield b':'
authority = b''.join(self._compose_authority_iter())
if authority:
yield b'//'
yield authority
yield b''.join(self._compose_relative_iter())
def _compose_authority_iter(self):
if not self.host:
return
username, password, host, port, quote = self.username, self.password, self.host, self.port, self.quote
if username:
yield quote(username, Percent.USERINFO)
if password:
yield b':'
yield quote(password, Percent.USERINFO)
yield b'@'
yield host.encode('idna')
if port and integer(port) != self.PORT:
yield b':%d' % integer(port)
def _compose_relative_iter(self):
u"""Composes the relative URI beginning with the path"""
scheme, path, query_string, quote, fragment = self.scheme, self.path, self.query_string, self.quote, self.fragment
PATH = Percent.PATH
if not scheme and not path.startswith(u'/'):
PATH = b''.join({int2byte(c) for c in iterbytes(PATH)} - {b':', b'@'})
yield b'/'.join(quote(x, PATH) for x in path.split(u'/'))
if query_string:
yield b'?'
yield query_string.encode(self.encoding)
if fragment:
yield b'#'
yield quote(fragment, Percent.FRAGMENT)
def unquote(self, data):
return Percent.unquote(bytes(data)).decode(self.encoding)
def quote(self, data, charset):
return Percent.quote(Unicode(data).encode(self.encoding), charset)
def __eq__(self, other):
u"""Compares the URI with another string or URI
.. seealso:: :rfc:`2616#section-3.2.3`
.. seealso:: :rfc:`3986#section-6`
>>> u1 = URI(b'http://abc.com:80/~smith/home.html')
>>> u2 = b'http://ABC.com/%7Esmith/home.html'
>>> u3 = URI(b'http://ABC.com:/%7esmith/home.html')
>>> u1 == u2 == u3
True
"""
cls = type(self)
self_, other = cls(self), cls(other)
self_.normalize()
other.normalize()
return self_.tuple == other.tuple
def __setattr__(self, name, value):
if name.startswith('_'):
return super(URI, self).__setattr__(name, value)
if name == 'scheme' and value:
self.__class__ = self.SCHEMES.get(value if isinstance(value, bytes) else value.encode(), URI)
if name in self.slots:
if isinstance(value, bytes):
try:
value = value.decode('UTF-8')
except UnicodeDecodeError:
value = value.decode('ISO8859-1')
if value is None:
pass
elif not isinstance(value, Unicode):
raise TypeError('%r must be string, not %s' % (name, type(value).__name__))
super(URI, self).__setattr__(name, value)
def __repr__(self):
return '<URI(%s)>' % bytes(self)
|
# coding=utf-8
#pylint:disable=no-name-in-module
"""
Module for video source acquisition.
Classes capture data from a video source into a numpy array.
"""
import logging
import datetime
import cv2
import numpy as np
import sksurgerycore.utilities.validate_file as vf
import sksurgeryimage.utilities.camera_utilities as cu
LOGGER = logging.getLogger(__name__)
class TimestampedVideoSource:
"""
Capture and store data from camera/file source.
Augments the cv2.VideoCapture() to provide passing of
camera dimensions in constructor, and storage of frame data.
"""
def __init__(self, source_num_or_file, dims=None):
"""
Constructs a TimestampedVideoSource.
:param source_num_or_file: integer camera number or file path
:param dims: optional (width, height) as a pair of integers
"""
self.source = cv2.VideoCapture(source_num_or_file)
self.timestamp = None
if not self.source.isOpened():
raise RuntimeError("Failed to open Video camera:"
+ str(source_num_or_file))
self.source_name = source_num_or_file
LOGGER.info("Adding input from source: %s", self.source_name)
if dims:
width, height = dims
if not isinstance(width, int):
raise TypeError("Width must be an integer")
if not isinstance(height, int):
raise TypeError("Height must be an integer")
if width < 1:
raise ValueError("Width must be >= 1")
if height < 1:
raise ValueError("Height must be >= 1")
self.set_resolution(width, height)
else:
width = int(self.source.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(self.source.get(cv2.CAP_PROP_FRAME_HEIGHT))
LOGGER.info("Source dimensions %s %s", width, height)
self.frame = np.empty((height, width, 3), dtype=np.uint8)
self.ret = None
def set_resolution(self, width: int, height: int):
"""Set the resolution of the input source.
:param width: Width
:type width: int
:param height: Height
:type height: int
:raises ValueError: If resolution is not supported.
"""
self.source.set(cv2.CAP_PROP_FRAME_WIDTH, width)
self.source.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
set_w = self.source.get(cv2.CAP_PROP_FRAME_WIDTH)
set_h = self.source.get(cv2.CAP_PROP_FRAME_HEIGHT)
if set_w != width or set_h != height:
raise ValueError(f"Tried to set width/height to {width} x {height} \
but failed. Width and height set to {set_w} x {set_h}")
def grab(self):
"""
Call the cv2.VideoCapture grab function and get a timestamp.
"""
self.ret = self.source.grab()
self.timestamp = datetime.datetime.now()
return self.ret
def retrieve(self):
"""
Call the cv2.VideoCapture retrieve function and
store the returned frame.
"""
self.ret, self.frame = self.source.retrieve()
return self.ret, self.frame
def read(self):
"""
Do a grab(), then retrieve() operation.
"""
self.grab()
self.retrieve()
return self.ret, self.frame
def isOpened(self):
""" Call the cv2.VideoCapture isOpened function.
"""
# pylint: disable=invalid-name
# using isOpened to be consistent with OpenCV function name
return self.source.isOpened()
def release(self):
"""
Release the cv2.VideoCapture source.
"""
self.source.release()
class VideoSourceWrapper:
"""
Wrapper for multiple TimestampedVideoSource objects.
"""
def __init__(self):
self.sources = []
self.frames = []
self.timestamps = []
self.save_timestamps = True
self.num_sources = 0
def add_camera(self, camera_number, dims=None):
"""
Create VideoCapture object from camera and add it to the list
of sources.
:param camera_number: integer camera number
:param dims: (width, height) as integer numbers of pixels
"""
cu.validate_camera_input(camera_number)
self.add_source(camera_number, dims)
def add_file(self, filename, dims=None):
"""
Create videoCapture object from file and add it to the list of sources.
:param filename: a string containing a valid file path
:param dims: (width, height) as integer numbers of pixels
"""
vf.validate_is_file(filename)
self.add_source(filename, dims)
def add_source(self, camera_num_or_file, dims=None):
"""
Add a video source (camera or file) to the list of sources.
:param camera_num_or_file: either an integer camera number or filename
:param dims: (width, height) as integer numbers of pixels
"""
video_source = TimestampedVideoSource(camera_num_or_file, dims)
self.sources.append(video_source)
self.num_sources = len(self.sources)
def are_all_sources_open(self):
"""
Check all input sources are active/open.
"""
for source in self.sources:
if not source.isOpened():
return False
return True
def release_all_sources(self):
"""
Close all camera/file sources.
"""
logging.info("Releasing video sources")
for source in self.sources:
source.release()
def get_next_frames(self):
"""
Do a grab() operation for each source,
followed by a retrieve().
"""
self.grab()
self.retrieve()
def grab(self):
"""
Perform a grab() operation for each source
"""
if self.are_all_sources_open():
for source in self.sources:
source.grab()
def retrieve(self):
"""
Perform a retrieve operation for each source.
Should only be run after a grab() operation.
:returns list of views on frames
"""
self.frames = []
for source in self.sources:
source.retrieve()
self.frames.append(source.frame)
return self.frames
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import functools
import math
import numpy as np
from collections import OrderedDict
import veriloggen as vg
import nngen.basic_types as bt
import nngen.util as util
class slice_(bt._Operator):
"""
Create a sliced tensor with a similar API to the numpy slice.
"""
input_chainable = False
output_chainable = False
def __sub_str__(self):
begins = str(self.begins)
ends = str(self.ends)
strides = str(self.strides)
par = ' par:%d' % self.par if self.par > 1 else ''
value_ram_size = (' value_ram_size:%d' % self.value_ram_size
if self.value_ram_size is not None else '')
out_ram_size = (' out_ram_size:%d' % self.out_ram_size
if self.out_ram_size is not None else '')
return (' begins:%s ends:%s strides:%s %s%s%s' %
(begins, ends, strides, par, value_ram_size, out_ram_size))
def __init__(self, value, begins, ends, strides,
dtype=None, name=None, par=1,
value_ram_size=None, out_ram_size=None):
if not isinstance(begins, (tuple, list)):
raise TypeError('begins must be tuple or list.')
if not isinstance(ends, (tuple, list)):
raise TypeError('ends must be tuple or list.')
if not isinstance(strides, (tuple, list)):
raise TypeError('strides must be tuple or list.')
if len(value.shape) != len(begins):
raise ValueError('length mismatch between value.shape and begins: %d != %d' %
(len(value.shape), len(begins)))
if len(value.shape) != len(ends):
raise ValueError('length mismatch between value.shape and ends: %d != %d' %
(len(value.shape), len(ends)))
if len(value.shape) != len(strides):
raise ValueError('length mismatch between value.shape and strides: %d != %d' %
(len(value.shape), len(strides)))
for begin in begins:
begin = int(begin)
if not isinstance(begin, int):
raise TypeError('values of begins must be int, not %s' % str(type(begin)))
for end in ends:
end = int(end)
if not isinstance(end, int):
raise TypeError('values of ends must be int, not %s' % str(type(end)))
for stride in strides:
stride = int(stride)
if not isinstance(stride, int):
raise TypeError('values of strides must be int, not %s' % str(type(stride)))
if strides[-1] != 1 and par != 1:
raise ValueError("par must be 1 when strides[-1] is not 1")
if value_ram_size is not None and value_ram_size < 1:
raise ValueError('value_ram_size must be greater than 0')
if out_ram_size is not None and out_ram_size < 1:
raise ValueError('out_ram_size must be greater than 0')
# delegate a shape calculation to numpy
slices = to_slices(begins, ends, strides)
shape = np.zeros(value.shape)[slices].shape
bt._Operator.__init__(self, value,
dtype=dtype, shape=shape, name=name, par=par)
self.begins = tuple(begins)
self.ends = tuple(ends)
self.strides = tuple(strides)
# attribute
self.value_ram_size = value_ram_size
self.out_ram_size = out_ram_size
slice_.attribute(self, par, value_ram_size, out_ram_size)
def attribute(self, par=None, value_ram_size=None, out_ram_size=None):
if par is not None:
if (par - 1) & par != 0:
raise ValueError('par must be power of 2.')
self.par = par
for arg in self.args:
arg.add_alignment_request(self.par)
self.add_alignment_request(self.par)
if value_ram_size is not None:
if value_ram_size < 1:
raise ValueError('value_ram_size must be greater than 0')
self.value_ram_size = value_ram_size
if out_ram_size is not None:
if out_ram_size < 1:
raise ValueError('out_ram_size must be greater than 0')
self.out_ram_size = out_ram_size
def get_required_rams(self):
act = self.args[0]
act_shape = act.get_aligned_shape()
out_shape = self.get_aligned_shape()
input_min_size = ((act_shape[-1] // self.par) *
(act_shape[-2] if len(act_shape) > 1 else 1) * 2)
if self.value_ram_size is not None and input_min_size < self.value_ram_size:
input_min_size = self.value_ram_size
input_width = act.get_ram_width() * self.par
output_min_size = ((out_shape[-1] // self.par) *
(out_shape[-2] if len(out_shape) > 1 else 1) * 2)
if self.out_ram_size is not None and output_min_size < self.out_ram_size:
output_min_size = self.out_ram_size
output_width = self.get_ram_width() * self.par
inputs = []
inputs.append((input_width, input_min_size))
outputs = []
outputs.append((output_width, output_min_size))
temps = []
return inputs, outputs, temps
def get_stream_hash(self):
base = bt._Operator.get_stream_hash(self)
rank = len(self.shape)
return (base, rank, self.par)
def get_stream_func(self):
def func(strm):
arg = self.args[0]
datawidth = arg.get_op_width()
vec_datawidth = datawidth * self.par
point = arg.get_op_point()
signed = arg.get_signed()
vec_act_var = strm.source(datawidth=vec_datawidth, signed=False)
strm.sink(vec_act_var)
return func
def get_control_param_values(self):
act = self.args[0]
act_shape = act.get_aligned_shape()
act_num_ch = act_shape[-1]
out_shape = self.get_aligned_shape()
out_num_ch = out_shape[-1]
act_offset_base = bt.to_byte(act_num_ch * act.get_ram_width())
act_offset_begins = []
act_offset_strides = []
for i, (begin, stride) in enumerate(zip(reversed(self.begins[:-2]), reversed(self.strides[:-2]))):
mul = functools.reduce(lambda x, y: x * y, act_shape[-i - 2:-1], 1)
act_offset_begin = act_offset_base * mul * begin
act_offset_begins.append(act_offset_begin)
act_offset_stride = act_offset_base * mul * stride
act_offset_strides.append(act_offset_stride)
act_offset_begins.reverse()
act_offset_strides.reverse()
act_read_size = ((act_num_ch // self.par) *
(act_shape[-2] if len(act_shape) > 1 else 1))
out_offset_base = bt.to_byte(out_num_ch * self.get_ram_width())
out_offset_strides = []
for i in range(len(out_shape) - 2):
mul = functools.reduce(lambda x, y: x * y, out_shape[-i - 2:-1], 1)
out_offset_stride = out_offset_base * mul
out_offset_strides.append(out_offset_stride)
out_offset_strides.reverse()
out_write_size = ((out_num_ch // self.par) *
(out_shape[-2] if len(out_shape) > 1 else 1))
stream_size = out_num_ch // self.par
if len(self.strides) > 1:
stream_stride = self.strides[-2] * (act_num_ch // self.par)
stream_local = self.begins[-2] * (act_num_ch // self.par) + self.begins[-1]
else:
stream_stride = 0
stream_local = self.begins[-1]
return OrderedDict([('act_shape', act_shape),
('out_shape', out_shape),
('act_begins', self.begins),
('act_strides', self.strides),
('act_offset_begins', act_offset_begins),
('act_offset_strides', act_offset_strides),
('act_read_size', act_read_size),
('out_offset_strides', out_offset_strides),
('out_write_size', out_write_size),
('stream_size', stream_size),
('stream_stride', stream_stride),
('stream_local', stream_local)])
def control_sequence(self, fsm):
act_ram = self.input_rams[0]
out_ram = self.output_rams[0]
act_base_offset = self.m.Wire(self._name('act_base_offset'),
self.maxi.addrwidth, signed=True)
act_offsets = [self.m.Reg(self._name('act_offset_%d' % i),
self.maxi.addrwidth, initval=0, signed=True)
for i, _ in enumerate(self.act_shape[:-2])]
if act_offsets:
v = act_offsets[0]
for act_offset in act_offsets[1:]:
v += act_offset
act_base_offset.assign(v)
else:
act_base_offset.assign(0)
out_base_offset = self.m.Wire(self._name('out_base_offset'),
self.maxi.addrwidth, signed=True)
out_offsets = [self.m.Reg(self._name('out_offset_%d' % i),
self.maxi.addrwidth, initval=0, signed=True)
for i, _ in enumerate(self.out_shape[:-2])]
if out_offsets:
v = out_offsets[0]
for out_offset in out_offsets[1:]:
v += out_offset
out_base_offset.assign(v)
else:
out_base_offset.assign(0)
counts = [self.m.Reg(self._name('count_%d' % i),
self.maxi.addrwidth, initval=0)
for i, _ in enumerate(self.act_shape[:-2])]
prev_counts = [self.m.Reg(self._name('prev_count_%d' % i),
self.maxi.addrwidth, initval=0)
for i, _ in enumerate(self.act_shape[:-2])]
stream_act_local = self.m.Reg(self._name('stream_act_local'),
self.maxi.addrwidth, initval=0)
stream_out_local = self.m.Reg(self._name('stream_out_local'),
self.maxi.addrwidth, initval=0)
comp_count = self.m.Reg(self._name('comp_count'),
self.maxi.addrwidth, initval=0)
out_count = self.m.Reg(self._name('out_count'),
self.maxi.addrwidth, initval=0)
act_page = self.m.Reg(self._name('act_page'), initval=0)
act_page_comp_offset = self.m.Reg(self._name('act_page_comp_offset'),
self.maxi.addrwidth, initval=0)
act_page_dma_offset = self.m.Reg(self._name('act_page_dma_offset'),
self.maxi.addrwidth, initval=0)
out_page = self.m.Reg(self._name('out_page'), initval=0)
out_page_comp_offset = self.m.Reg(self._name('out_page_comp_offset'),
self.maxi.addrwidth, initval=0)
out_page_dma_offset = self.m.Reg(self._name('out_page_dma_offset'),
self.maxi.addrwidth, initval=0)
act_page_size = act_ram.length // 2
out_page_size = out_ram.length // 2
skip_read_act = self.m.Reg(self._name('skip_read_act'), initval=0)
skip_comp = self.m.Reg(self._name('skip_comp'), initval=0)
skip_write_out = self.m.Reg(self._name('skip_write_out'), initval=0)
# --------------------
# initialization phase
# --------------------
# ReadAct: offset
for act_offset, act_offset_begin in zip(act_offsets, self.act_offset_begins):
fsm(
act_offset(act_offset_begin)
)
# ReadAct: double buffer control
fsm(
act_page(0),
act_page_comp_offset(0),
act_page_dma_offset(0)
)
# WriteOutput: offset
for out_offset in out_offsets:
fsm(
out_offset(0)
)
out_offset = out_base_offset
# WriteOutput: double buffer control
fsm(
out_page(0),
out_page_comp_offset(0),
out_page_dma_offset(0)
)
# counter
fsm(
[count(0) for count in counts],
[prev_count(0) for prev_count in prev_counts]
)
# double buffer control
fsm(
skip_read_act(0),
skip_comp(0),
skip_write_out(1)
)
fsm(
out_count(0)
)
state_init = fsm.current
fsm.goto_next()
# --------------------
# ReadAct phase
# --------------------
state_read_act = fsm.current
act_gaddr = self.arg_objaddrs[0] + act_base_offset
bt.bus_lock(self.maxi, fsm)
act_laddr = act_page_dma_offset
begin_state_read = fsm.current
fsm.goto_next()
bt.dma_read(self.maxi, fsm, act_ram, act_laddr,
act_gaddr, self.act_read_size, port=1)
end_state_read = fsm.current
# --------------------
# Comp phase
# --------------------
state_comp = fsm.current
# Stream Control FSM
comp_fsm = vg.FSM(self.m, self._name('comp_fsm'), self.clk, self.rst)
comp_state_init = comp_fsm.current
comp_fsm.If(fsm.state == state_comp, vg.Not(skip_comp)).goto_next()
fsm.If(comp_fsm.state == comp_state_init).goto_next()
# waiting for previous DMA write completion
bt.dma_wait_write_idle(self.maxi, comp_fsm)
# local address
comp_fsm(
stream_act_local(self.stream_local),
stream_out_local(0)
)
act_page_comp_offset_buf = self.m.Reg(self._name('act_page_comp_offset_buf'),
self.maxi.addrwidth, initval=0)
out_page_comp_offset_buf = self.m.Reg(self._name('out_page_comp_offset_buf'),
self.maxi.addrwidth, initval=0)
comp_fsm(
act_page_comp_offset_buf(act_page_comp_offset),
out_page_comp_offset_buf(out_page_comp_offset)
)
comp_fsm.goto_next()
# busy check
self.stream.source_join(comp_fsm)
# set_source
name = list(self.stream.sources.keys())[0]
local = stream_act_local + act_page_comp_offset_buf
if len(self.out_shape) > 1:
pat = ((self.stream_size, self.act_strides[-1]),
(self.out_shape[-2], self.stream_stride))
else:
pat = ((self.stream_size, self.act_strides[-1]),)
self.stream.set_source_pattern(comp_fsm, name, act_ram,
local, pat)
comp_fsm.set_index(comp_fsm.current - 1)
# set_sink
name = list(self.stream.sinks.keys())[0]
local = stream_out_local + out_page_comp_offset_buf
if len(self.out_shape) > 1:
pat = ((self.stream_size, 1),
(self.out_shape[-2], self.stream_size))
else:
pat = ((self.stream_size, 1),)
self.stream.set_sink_pattern(comp_fsm, name, out_ram,
local, pat)
# stream run (async)
self.stream.run(comp_fsm)
comp_fsm.goto_init()
# sync with WriteOut control
comp_fsm.seq.If(fsm.state == state_init)(
comp_count(0)
)
comp_fsm.seq.If(self.stream.source_stop)(
comp_count.inc()
)
# --------------------
# WriteOut phase
# --------------------
state_write_out = fsm.current
# sync with Comp control
fsm.If(comp_count > out_count).goto_next()
out_laddr = out_page_dma_offset
out_gaddr = self.objaddr + out_offset
bt.bus_lock(self.maxi, fsm)
bt.dma_write(self.maxi, fsm, out_ram, out_laddr,
out_gaddr, self.out_write_size, port=1, use_async=True)
bt.bus_unlock(self.maxi, fsm)
fsm(
out_count.inc()
)
fsm.goto_next()
state_write_out_end = fsm.current
fsm.If(skip_write_out).goto_from(state_write_out, state_write_out_end)
# --------------------
# update for next iteration
# --------------------
# ReadAct: count
cond = None
for size, count in zip(reversed(self.out_shape[:-2]), reversed(counts)):
fsm.If(cond)(
count.inc()
)
fsm.If(cond, count >= size - 1)(
count(0)
)
if cond is not None:
cond = vg.Ands(cond, count >= size - 1)
else:
cond = count >= size - 1
# ReadAct: offset
cond = None
for size, count, act_offset, act_offset_stride in zip(
reversed(self.out_shape[:-2]), reversed(counts),
reversed(act_offsets), reversed(self.act_offset_strides)):
fsm.If(cond)(
act_offset.add(act_offset_stride)
)
fsm.If(cond, count >= size - 1)(
act_offset(0)
)
if cond is not None:
cond = vg.Ands(cond, count >= size - 1)
else:
cond = count >= size - 1
# ReadAct and Comp: double buffer
fsm.If(vg.Not(act_page))(
act_page_comp_offset(act_page_size),
act_page_dma_offset(act_page_size),
act_page(1)
)
fsm.If(act_page)(
act_page_comp_offset(0),
act_page_dma_offset(0),
act_page(0)
)
# WriteOut: offset
cond = vg.Not(skip_write_out)
for size, prev_count, out_offset, out_offset_stride in zip(
reversed(self.out_shape[:-2]), reversed(prev_counts),
reversed(out_offsets), reversed(self.out_offset_strides)):
fsm.If(cond)(
out_offset.add(out_offset_stride)
)
fsm.If(cond, prev_count >= size - 1)(
out_offset(0)
)
cond = vg.Ands(cond, prev_count >= size - 1)
# WriteOut and Comp: double buffer
fsm.If(vg.Not(out_page))(
out_page_comp_offset(out_page_size),
out_page_dma_offset(0),
out_page(1)
)
fsm.If(out_page)(
out_page_comp_offset(0),
out_page_dma_offset(out_page_size),
out_page(0)
)
# ReadAct and WriteOut: prev
for count, prev_count in zip(counts, prev_counts):
fsm(
prev_count(count)
)
# ReadAct, Comp, WriteOut: skip
cond_skip_read_act = None
cond_skip_comp = None
for size, count in zip(reversed(self.out_shape[:-2]), reversed(counts)):
if cond_skip_read_act is not None:
cond_skip_read_act = vg.Ands(cond_skip_read_act, count >= size - 1)
else:
cond_skip_read_act = count >= size - 1
cond_skip_comp = cond_skip_read_act
cond_cancel_write_out = None
for size, prev_count in zip(reversed(self.out_shape[:-2]), reversed(prev_counts)):
if cond_cancel_write_out is not None:
cond_cancel_write_out = vg.Ands(cond_cancel_write_out, prev_count == 0)
else:
cond_cancel_write_out = prev_count == 0
cond_done = None
for size, prev_count in zip(reversed(self.out_shape[:-2]), reversed(prev_counts)):
if cond_done is not None:
cond_done = vg.Ands(cond_done, prev_count >= size - 1)
else:
cond_done = prev_count >= size - 1
fsm.If(cond_skip_read_act)(
skip_read_act(1)
)
fsm.If(cond_skip_comp)(
skip_comp(1)
)
fsm.If(skip_write_out,
cond_cancel_write_out)(
skip_write_out(0)
)
fsm.goto(state_read_act)
fsm.If(vg.Not(skip_write_out), cond_done).goto_next()
# wait for last DMA write
bt.dma_wait_write(self.maxi, fsm)
def eval(self, memo, input_dict, **kwargs):
if id(self) in memo:
return memo[id(self)]
import nngen.verify as verify
name = self.__class__.__name__
args = [arg.eval(memo, input_dict)
for arg in self.args]
value = args[0]
kwargs['begins'] = self.begins
kwargs['ends'] = self.ends
kwargs['strides'] = self.strides
kwargs['dtype'] = self.dtype
kwargs['name'] = self.name
kwargs['par'] = self.par
method = self.get_eval_method()
ret = method(value, **kwargs)
memo[id(self)] = ret
return ret
def to_slices(begins, ends, strides):
slices = []
for begin, end, stride in zip(begins, ends, strides):
slices.append(slice(begin, end, stride))
return tuple(slices)
|
# before changing tasks, task, activity_type to task
from django.views.generic.list import ListView
from django.views.generic.edit import CreateView, UpdateView
from django.views.generic import TemplateView
from django import forms
from avs.models import *
from avs.forms import *
from django.shortcuts import redirect
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from django.shortcuts import render_to_response
from django.forms.models import inlineformset_factory
from django.db.models import Sum, Max, Min, Q
import datetime
from datetime import timedelta
import time
import re
from datetime import datetime as from_datetime
from django.template import RequestContext
from django.views.decorators.csrf import csrf_protect
# Summary - Fire
@login_required
def firesummary(request, str_date_to=None, str_date_from=None):
state = ''
state_type = ''
table = ''
header = ''
if request.method == 'POST': # If the form has been submitted...
drForm = DateRangeForm(request.POST) # A form bound to the POST data
if drForm.is_valid(): # All validation rules pass
str_date_from = request.POST['date_from']
str_date_to = request.POST['date_to']
# Get Filtered Aircraft Flight Logs
if str_date_to is None:
date_to = from_datetime.strptime(
"30/06/" + str(datetime.date.today().year), "%d/%m/%Y")
else:
date_to = from_datetime.strptime(str_date_to, "%d/%m/%Y")
if str_date_from is None:
date_from = from_datetime.strptime(
"01/07/" + str(datetime.date.today().year - 1), "%d/%m/%Y")
else:
date_from = from_datetime.strptime(str_date_from, "%d/%m/%Y")
str_date_to = date_to.strftime("%d/%m/%Y")
str_date_from = date_from.strftime("%d/%m/%Y")
data = {'date_to': str_date_to, 'date_from': str_date_from}
drForm = DateRangeForm(data)
if date_from > date_to:
state = 'Date From must be less than Date To'
state_type = 'Warning'
else:
fire_flightlogs = AircraftFlightLogDetail.objects.exclude(
fire_number='').exclude(
fire_number=None).filter(
aircraft_flight_log__date__gte=date_from).filter(
aircraft_flight_log__date__lte=date_to)
logs_2000 = AircraftFlightLogDetail.objects
header = '<th>Flight Log Number</th><th>Date</th><th>Task</th><th>Fire Number</th><th>Job Number</th><th>Datcon</th>'
table = ''
for fl in fire_flightlogs:
table = table + '<tr>'
table = table + '<td>' + '<a href="' + str(fl.aircraft_flight_log.get_absolute_url(
)) + '">' + str(fl.aircraft_flight_log.flight_log_number) + '</a>' + '</td>'
#table = table + '<td>' + str(fl.aircraft_flight_log.flight_log_number) + '</td>'
table = table + '<td>' + \
str(fl.aircraft_flight_log.date.strftime("%d/%m/%Y")) + '</td>'
if fl.task is None:
table = table + '<td></td>'
else:
table = table + '<td>' + fl.task.name + '</td>'
table = table + '<td>' + fl.fire_number + '</td>'
table = table + '<td>' + fl.job_number + '</td>'
table = table + '<td>' + str(fl.datcon) + '</td>'
table = table + '</tr>'
return render_to_response("summaryfire.html",
{'table': table,
'header': header,
'drForm': drForm,
'state': state,
'state_type': state_type,
'pagetitle': 'Datcon Fire Summary'},
context_instance=RequestContext(request))
def createSummaryFlightHTML(aircraft_post, task_post, pilot_post):
task = task_post.order_by("name")
# ----- Create Header Row --------
header = "<th>Year</th>"
footer = '<th></th>'
header_size = 0
for a in task:
header = header + "<th>" + a.name + "</th>"
footer = footer + '<th></th>'
header_size = header_size + 1
header = header + "<th>Total</th>"
header_size = header_size + 2
footer = footer + '<th></th>'
table = ''
# -------------
'''
logs_2000 = AircraftFlightLogDetail.objects.filter(aircraft_flight_log__date__gte=date_from).filter(aircraft_flight_log__date__lte=date_to) \
.filter(Q(pilot_in_command__in = pilot_ids) | Q(pilot_in_command_under_supervision__in = pilot_ids))
'''
flightlogs = AircraftFlightLogDetail.objects.filter(
aircraft_flight_log__aircraft__in=aircraft_post)
flightlogs = flightlogs.filter(Q(pilot_in_command__in=pilot_post) | Q(
pilot_in_command_under_supervision__in=pilot_post))
#flightlogs = AircraftFlightLogDetail.objects.extra(where=["1=0"])
# if (flightlogs) = 0:
# if len(aircraft) != 0 and len(flightlogs) != 0:
if flightlogs:
# Determine start and finish date for years
maxYear = flightlogs.aggregate(Max('aircraft_flight_log__date'))[
'aircraft_flight_log__date__max'].year
minYear = flightlogs.aggregate(Min('aircraft_flight_log__date'))[
'aircraft_flight_log__date__min'].year
# Create Table with Sum of months
table = ''
year = minYear - 1
while year <= maxYear:
table = table + '<tr>'
table += '<td>' + str(year) + '/' + str(year + 1) + '</td>'
date_from = from_datetime.strptime(
"01/07/" + str(year), "%d/%m/%Y")
date_to = from_datetime.strptime(
"30/06/" + str(year + 1), "%d/%m/%Y")
fl_year = flightlogs.filter(
aircraft_flight_log__date__gte=date_from).filter(
aircraft_flight_log__date__lte=date_to)
total = 0
for a in task:
sum_datcon = 0
fl_year_task = fl_year.filter(task=a)
sum_datcon = fl_year_task.aggregate(Sum('datcon'))[
'datcon__sum']
if sum_datcon is None:
sum_datcon = 0
total += sum_datcon
table = table + '<td>' + str(sum_datcon) + '</td>'
table = table + '<td>' + str(total) + '</td>'
table = table + '</tr>'
year += 1
#header_size = header_size + 1
return (header, table, footer, header_size)
# Summary - Flight Task
@login_required
def flightsummary(request):
aircraft_post = []
task_post = []
pilot_post = []
if request.method == 'POST': # If the form has been submitted...
drForm = FieldFilterForm(request.POST) # A form bound to the POST data
if drForm.is_valid(): # All validation rules pass
aircraft_post = drForm.cleaned_data['aircraft']
task_post = drForm.cleaned_data['task']
pilot_post = drForm.cleaned_data['pilot']
aircraft_ids = []
for x in aircraft_post:
aircraft_ids.append(x.id)
task_ids = []
for x in task_post:
task_ids.append(x.id)
pilot_ids = []
for x in pilot_post:
pilot_ids.append(x.id)
if len(aircraft_ids) == 0 or len(pilot_ids) == 0 or len(task_ids) == 0:
header = ''
table = ''
footer = ''
header_size = 0
state = 'Please select a minimum of 1 item from each select list.'
state_type = 'Warning'
else:
#header, table, footer, header_size = createSummaryPilotHTML(pilot_post, task_post, logs_2000, 'command')
#header, table, footer, header_size = createSummaryAircraftHTML(logs_2000, aircraft_post, task_post, )
header, table, footer, header_size = createSummaryFlightHTML(
aircraft_post, task_post, pilot_post)
state = ''
state_type = ''
default_data = {
'aircraft': aircraft_ids,
'pilot': pilot_ids,
'task': task_ids}
drForm = FieldFilterForm(default_data)
#header, table, footer, header_size = createSummaryFlightHTML(aircraft_post)
return render_to_response("summaryflight.html",
{'pagetitle': 'Datcon Flight Summary',
'table': table,
'header': header,
'footer': footer,
'header_size': header_size,
'drForm': drForm,
'state': state,
'state_type': state_type},
context_instance=RequestContext(request))
def createSummaryPilotHTML(pilots, tasks, flightlogs, pilot_type):
time_start = time.time()
#tasks = Task.objects.all().order_by("name")
# ----- Create Header Row --------
header = "<th>Pilot</th>"
footer = '<th></th>'
header_size = 0
for a in tasks:
header = header + "<th>" + a.name + "</th>"
footer = footer + '<th></th>'
header_size = header_size + 1
# Add 2 more onto header size. 1 because javascript counts from 0 and
# another for the total on the end.
header_size = header_size + 2
header = header + "<th>Total</th>"
footer = footer + '<th></th>'
# -------------
table = ''
time_middle = time.time()
time_total = time_middle - time_start
# print 'Create Header: ' +str(time_total)
for p in pilots:
time_section = time.time()
# Start New Row
temp_table = table
table = table + '<tr>'
if pilot_type == 'command':
logs_2000_pilot = flightlogs.filter(pilot_in_command=p)
elif pilot_type == 'training':
logs_2000_pilot = flightlogs.filter(
pilot_in_command_under_supervision=p)
table = table + '<td>' + p.first_name + " " + p.last_name + '</td>'
total = 0
for a in tasks:
sum_datcon = 0
logs_2000_task = logs_2000_pilot.filter(task=a)
#total += logs_2000_task.count()
sum_datcon = logs_2000_task.aggregate(Sum('datcon'))['datcon__sum']
# print sum_datcon
if sum_datcon is None:
sum_datcon = 0
total += sum_datcon
# print a.name + ": " + str(logs_2000_task.count())
table = table + '<td>' + str(sum_datcon) + '</td>'
if total == 0:
table = temp_table
else:
table = table + '<td>' + str(total) + '</td>'
# End Row
table = table + '</tr>'
time_middle = time.time()
time_total = time_middle - time_section
# print 'Create Row: ' + p.first_name + ' - ' +str(time_total)
time_total = time_middle - time_start
# print 'Create Row: ' + p.first_name + ' - ' +str(time_total)
time_end = time.time()
time_total = time_end - time_start
# print 'Total: ' +str(time_total)
return (header, table, footer, header_size)
# Summary - Training Pilot Task
@login_required
def trainingpilotsummary(request, str_date_to=None, str_date_from=None,
aircraft_post=None, pilot_post=None, task_post=None):
# print '+++++++++++++++'
#time_start = time.time()
state = ''
state_type = ''
if request.method == 'POST': # If the form has been submitted...
# A form bound to the POST data
drForm = FieldFilterDRForm(request.POST)
if drForm.is_valid(): # All validation rules pass
str_date_from = request.POST['date_from']
str_date_to = request.POST['date_to']
aircraft_post = drForm.cleaned_data['aircraft']
pilot_post = drForm.cleaned_data['pilot']
task_post = drForm.cleaned_data['task']
# Get Filtered Aircraft Flight Logs
if str_date_to is None:
date_to = from_datetime.strptime(
"30/06/" + str(datetime.date.today().year), "%d/%m/%Y")
else:
date_to = from_datetime.strptime(str_date_to, "%d/%m/%Y")
if str_date_from is None:
date_from = from_datetime.strptime(
"01/07/" + str(datetime.date.today().year - 1), "%d/%m/%Y")
else:
date_from = from_datetime.strptime(str_date_from, "%d/%m/%Y")
str_date_to = date_to.strftime("%d/%m/%Y")
str_date_from = date_from.strftime("%d/%m/%Y")
table = ''
logs_2000 = AircraftFlightLogDetail.objects.extra(where=["1=0"])
aircraft_ids = []
if aircraft_post is not None:
logs_2000 = AircraftFlightLogDetail.objects.filter(
aircraft_flight_log__date__gte=date_from).filter(
aircraft_flight_log__date__lte=date_to) .filter(
aircraft_flight_log__aircraft__in=aircraft_post)
for x in aircraft_post:
aircraft_ids.append(x.id)
pilot_ids = []
if pilot_post is not None:
for p in pilot_post:
pilot_ids.append(p.id)
task_ids = []
if task_post is not None:
for a in task_post:
task_ids.append(a.id)
data = {
'date_to': str_date_to,
'date_from': str_date_from,
'aircraft': aircraft_ids,
'pilot': pilot_ids,
'task': task_ids}
drForm = FieldFilterDRForm(data)
if date_from < date_to:
if len(aircraft_ids) == 0 or len(pilot_ids) == 0 or len(task_ids) == 0:
header = ''
table = ''
footer = ''
header_size = 0
state = 'Please select a minimum of 1 item from each select list.'
state_type = 'Warning'
else:
header, table, footer, header_size = createSummaryPilotHTML(
pilot_post, task_post, logs_2000, 'training')
else:
header = ''
table = ''
footer = ''
header_size = 0
state = 'Date From must be less than Date To'
state_type = 'Warning'
#time_end = time.time()
#time_total = time_end - time_start
# print 'Total Time: ' +str(time_total)
return render_to_response(
"summarytrainingpilot.html",
{
'pagetitle': 'Datcon Pilot in Command Under Supervision Summary',
'table': table,
'drForm': drForm,
'header': header,
'footer': footer,
'header_size': header_size,
"state": state,
"state_type": state_type},
context_instance=RequestContext(request))
# Summary - Time
@login_required
def timesummary(request):
state = ''
state_type = ''
aircraft_post = []
task_post = []
pilot_post = []
if request.method == 'POST': # If the form has been submitted...
drForm = FieldFilterForm(request.POST) # A form bound to the POST data
if drForm.is_valid(): # All validation rules pass
aircraft_post = drForm.cleaned_data['aircraft']
task_post = drForm.cleaned_data['task']
pilot_post = drForm.cleaned_data['pilot']
aircraft_ids = []
task_ids = []
pilot_ids = []
for x in aircraft_post:
aircraft_ids.append(int(x.id))
for x in task_post:
task_ids.append(int(x.id))
for x in pilot_post:
pilot_ids.append(int(x.id))
flightlogs = AircraftFlightLogDetail.objects.extra(where=["1=0"])
if len(aircraft_ids) == 0 or len(pilot_ids) == 0 or len(task_ids) == 0:
header = ''
table = ''
footer = ''
header_size = 0
state = 'Please select a minimum of 1 item from each select list.'
state_type = 'Warning'
else:
flightlogs = AircraftFlightLogDetail.objects.filter(
aircraft_flight_log__aircraft__in=aircraft_ids)
flightlogs = flightlogs.filter(task__in=task_ids)
flightlogs = flightlogs.filter(Q(pilot_in_command__in=pilot_ids) | Q(
pilot_in_command_under_supervision__in=pilot_ids))
go = 1
# Create Month Array [(1,'Jan')
month_choices = []
for i in range(7, 13):
month_choices.append((i, datetime.date(2008, i, 1).strftime('%b')))
for i in range(1, 7):
month_choices.append((i, datetime.date(2008, i, 1).strftime('%b')))
footer = '<th></th>'
# Create Table Header with Months
header = ''
header += '<th>Year</th>'
for month in month_choices:
header += '<th>' + month[1] + '</th>'
footer = footer + '<th></th>'
header += '<th>Total</th>'
footer = footer + '<th></th>'
table = ''
if flightlogs:
# print len(flightlogs)
# Determine start and finish date for years
maxYear = flightlogs.aggregate(Max('aircraft_flight_log__date'))[
'aircraft_flight_log__date__max'].year
minYear = flightlogs.aggregate(Min('aircraft_flight_log__date'))[
'aircraft_flight_log__date__min'].year
# print maxYear
# print minYear
# Create Table with Sum of months
year = minYear - 1
while year <= maxYear:
# print 'Row: ' + str(year)
table = table + '<tr>'
total = 0
table += '<td>' + str(year) + '/' + str(year + 1) + '</td>'
# July to Dec
fl_year = flightlogs.filter(
aircraft_flight_log__date__year=year)
# print len(fl_year)
counter = 0
while counter < 6:
fl_month = fl_year.filter(
aircraft_flight_log__date__month=month_choices[counter][0])
sum_datcon = fl_month.aggregate(
Sum('datcon'))['datcon__sum']
# print str(year) + " : " + str(month_choices[counter][0])
# + " : " + str(sum_datcon)
if sum_datcon is None:
sum_datcon = 0
table = table + '<td>' + str(sum_datcon) + '</td>'
total += sum_datcon
counter += 1
# Jan to June
fl_year = flightlogs.filter(
aircraft_flight_log__date__year=year + 1)
# print len(fl_year)
while counter < 12:
fl_month = fl_year.filter(
aircraft_flight_log__date__month=month_choices[counter][0])
sum_datcon = fl_month.aggregate(
Sum('datcon'))['datcon__sum']
# print str(year) + " : " + str(month_choices[counter][0])
# + " : " + str(sum_datcon)
if sum_datcon is None:
sum_datcon = 0
table = table + '<td>' + str(sum_datcon) + '</td>'
total += sum_datcon
counter += 1
table = table + '<td>' + str(total) + '</td>'
table = table + '</tr>'
year += 1
default_data = {
'aircraft': aircraft_ids,
'task': task_ids,
'pilot': pilot_ids}
drForm = FieldFilterForm(default_data)
return render_to_response("summarytime.html",
{'pagetitle': 'Datcon Time Summary',
'table': table,
'header': header,
'footer': footer,
'drForm': drForm,
'state': state,
'state_type': state_type},
context_instance=RequestContext(request))
def createSummaryAircraftHTML(flightlogs, aircraft_post, task_post):
# print flightlogs
task = task_post.order_by("name")
# ----- Create Header Row --------
header = '<th style="width: 80px;">Aircraft</th>'
footer = '<th></th>'
header_size = 0
for a in task:
header = header + "<th>" + a.name + "</th>"
footer = footer + '<th></th>'
header_size = header_size + 1
header = header + "<th>Total</th>"
footer = footer + '<th></th>'
# -------------
table = ''
for ac in aircraft_post:
# Start New Row
temp_table = table
table = table + '<tr>'
logs_2000_aircraft = flightlogs.filter(
aircraft_flight_log__aircraft=ac)
table = table + '<td>' + ac.name + '</td>'
total = 0
for a in task:
sum_datcon = 0
logs_2000_task = logs_2000_aircraft.filter(task=a)
sum_datcon = logs_2000_task.aggregate(Sum('datcon'))['datcon__sum']
if sum_datcon is None:
sum_datcon = 0
total += sum_datcon
# print a.name + ": " + str(logs_2000_task.count())
table = table + '<td>' + str(sum_datcon) + '</td>'
if total == 0:
table = temp_table
#table = table + '<td>'+ str(total) + '</td>'
else:
table = table + '<td>' + str(total) + '</td>'
# End Row
table = table + '</tr>'
# add 2, 1 because javascript starts at 0 and 1 for the total column
header_size = header_size + 2
return (header, table, footer, header_size)
# Summary - Aircraft Task
@login_required
def aircraftsummary(request, str_date_to=None, str_date_from=None):
pilot_post = []
task_post = []
aircraft_post = []
pilot_ids = []
aircraft_ids = []
task_ids = []
if request.method == 'POST': # If the form has been submitted...
# A form bound to the POST data
drForm = FieldFilterDRForm(request.POST)
if drForm.is_valid(): # All validation rules pass
str_date_from = request.POST['date_from']
str_date_to = request.POST['date_to']
#aircraft_post = request.POST['aircraft']
pilot_post = drForm.cleaned_data['pilot']
task_post = drForm.cleaned_data['task']
aircraft_post = drForm.cleaned_data['aircraft']
# Get Filtered Aircraft Flight Logs
if str_date_to is None:
date_to = from_datetime.strptime(
"30/06/" + str(datetime.date.today().year), "%d/%m/%Y")
else:
date_to = from_datetime.strptime(str_date_to, "%d/%m/%Y")
if str_date_from is None:
date_from = from_datetime.strptime(
"01/07/" + str(datetime.date.today().year - 1), "%d/%m/%Y")
else:
date_from = from_datetime.strptime(str_date_from, "%d/%m/%Y")
str_date_to = date_to.strftime("%d/%m/%Y")
str_date_from = date_from.strftime("%d/%m/%Y")
if date_from > date_to:
header = ''
table = ''
footer = ''
header_size = 0
state = 'Date From must be less than Date To'
state_type = 'Warning'
else:
logs_2000 = AircraftFlightLogDetail.objects.extra(where=["1=0"])
for x in pilot_post:
pilot_ids.append(x.id)
for x in aircraft_post:
aircraft_ids.append(x.id)
for x in task_post:
task_ids.append(x.id)
'''
logs_2000 = AircraftFlightLogDetail.objects.filter(aircraft_flight_log__date__gte=date_from).filter(aircraft_flight_log__date__lte=date_to) \
.filter(Q(pilot_in_command__in = pilot_post) | Q(pilot_in_command_under_supervision__in = pilot_post) | \
Q(task__in = task_post) | Q(aircraft_flight_log__aircraft__in = aircraft_post) )
'''
logs_2000 = AircraftFlightLogDetail.objects.filter(
aircraft_flight_log__date__gte=date_from).filter(
aircraft_flight_log__date__lte=date_to) .filter(
Q(
pilot_in_command__in=pilot_ids) | Q(
pilot_in_command_under_supervision__in=pilot_ids))
if len(aircraft_ids) == 0 or len(pilot_ids) == 0 or len(task_ids) == 0:
header = ''
table = ''
footer = ''
header_size = 0
state = 'Please select a minimum of 1 item from each select list.'
state_type = 'Warning'
else:
#header, table, footer, header_size = createSummaryPilotHTML(pilot_post, task_post, logs_2000, 'command')
header, table, footer, header_size = createSummaryAircraftHTML(
logs_2000, aircraft_post, task_post)
state = ''
state_type = ''
data = {
'date_to': str_date_to,
'date_from': str_date_from,
'pilot': pilot_ids,
'aircraft': aircraft_ids,
'task': task_ids}
drForm = FieldFilterDRForm(data)
#header, table, footer, header_size = createSummaryAircraftHTML(logs_2000, aircraft, task)
return render_to_response("summaryaircraft.html",
{'pagetitle': 'Datcon Aircraft Summary',
'table': table,
'drForm': drForm,
'header': header,
'footer': footer,
'header_size': header_size,
"state": state,
"state_type": state_type},
context_instance=RequestContext(request))
# Summary - Command Pilot Task
@login_required
def commandpilotsummary(request, str_date_to=None, str_date_from=None,
aircraft_post=None, pilot_post=None, task_post=None):
# print '+++++++++++++++'
#time_start = time.time()
if request.method == 'POST': # If the form has been submitted...
# A form bound to the POST data
drForm = FieldFilterDRForm(request.POST)
if drForm.is_valid(): # All validation rules pass
str_date_from = request.POST['date_from']
str_date_to = request.POST['date_to']
aircraft_post = drForm.cleaned_data['aircraft']
pilot_post = drForm.cleaned_data['pilot']
task_post = drForm.cleaned_data['task']
# Get Filtered Aircraft Flight Logs
if str_date_to is None:
date_to = from_datetime.strptime(
"30/06/" + str(datetime.date.today().year), "%d/%m/%Y")
else:
date_to = from_datetime.strptime(str_date_to, "%d/%m/%Y")
if str_date_from is None:
date_from = from_datetime.strptime(
"01/07/" + str(datetime.date.today().year - 1), "%d/%m/%Y")
else:
date_from = from_datetime.strptime(str_date_from, "%d/%m/%Y")
str_date_to = date_to.strftime("%d/%m/%Y")
str_date_from = date_from.strftime("%d/%m/%Y")
table = ''
logs_2000 = AircraftFlightLogDetail.objects.extra(where=["1=0"])
aircraft_ids = []
if aircraft_post is not None:
logs_2000 = AircraftFlightLogDetail.objects.filter(
aircraft_flight_log__date__gte=date_from).filter(
aircraft_flight_log__date__lte=date_to) .filter(
aircraft_flight_log__aircraft__in=aircraft_post)
for x in aircraft_post:
aircraft_ids.append(x.id)
pilot_ids = []
if pilot_post is not None:
for p in pilot_post:
pilot_ids.append(p.id)
task_ids = []
if task_post is not None:
for a in task_post:
task_ids.append(a.id)
data = {
'date_to': str_date_to,
'date_from': str_date_from,
'aircraft': aircraft_ids,
'pilot': pilot_ids,
'task': task_ids}
drForm = FieldFilterDRForm(data)
state = ''
state_type = ''
if date_from < date_to:
if len(aircraft_ids) == 0 or len(pilot_ids) == 0 or len(task_ids) == 0:
header = ''
table = ''
footer = ''
header_size = 0
state = 'Please select a minimum of 1 item from each select list.'
state_type = 'Warning'
else:
header, table, footer, header_size = createSummaryPilotHTML(
pilot_post, task_post, logs_2000, 'command')
else:
header = ''
table = ''
footer = ''
header_size = 0
state = 'Date From must be less than Date To'
state_type = 'Warning'
#time_end = time.time()
#time_total = time_end - time_start
# print 'Total Time: ' +str(time_total)
return render_to_response("summarycommandpilot.html",
{'pagetitle': 'Datcon Pilot in Command Summary',
'table': table,
'drForm': drForm,
'header': header,
'footer': footer,
'header_size': header_size,
"state": state,
"state_type": state_type},
context_instance=RequestContext(request))
# Pilots
@login_required
def pilotlist(request, state='', state_type=''):
queryset = Pilot.objects.all()
# return object_list(request, queryset = queryset, template_name =
# 'pilotlist.html', extra_context={'title':'Pilot List','pagetitle':'Pilot
# List', 'state':state, 'state_type':state_type}) depracated
view = ListView.as_view(template_name='pilotlist.html', queryset=queryset)
return view(
request,
extra_context={
'title': 'Pilot List',
'pagetitle': 'Pilot List',
'state': state,
'state_type': state_type})
@login_required
def pilotadd(request):
state = ''
state_type = ''
# print request.user
if request.method == 'POST':
form = PilotForm(data=request.POST)
if form.is_valid():
new_pilot = form.save(commit=False)
new_pilot.creator = request.user
new_pilot.modifer = request.user
new_pilot.save()
return redirect('pilotlist_saved')
else:
state = 'Warning - Pilot not valid'
state_type = 'Warning'
# return create_object(request, template_name = 'lookupadd.html',
# form_class = PilotForm, extra_context={'pagetitle':'Add
# Pilot','title':'Add Pilot','state':state, 'state_type':state_type})
# depracated
view = CreateView.as_view(
template_name='lookupadd.html',
form_class=PilotForm)
return view(
request,
extra_context={
'pagetitle': 'Add Pilot',
'title': 'Add Pilot',
'state': state,
'state_type': state_type})
class PilotUpdate(UpdateView):
# look at
# https:/ccbv.co.uk/projects/Django/1.4/django.views.generic.edit.UpdateView/
state = ''
state_type = ''
model = Pilot
form_class = PilotForm
template_name = 'lookupupdate.html'
extra_context = {
'pagetitle': 'Update Pilot',
'title': 'Update Pilot',
'state': state,
'state_type': state_type}
def get_success_url(self):
return reverse('pilotlist_saved')
def get_context_data(self, **kwargs):
context = super(PilotUpdate, self).get_context_data(**kwargs)
context.update(self.extra_context)
return context
def form_invalid(self, form):
state = 'Warning - Pilot not valid'
state_type = 'Warning'
return self.render_to_response(self.get_context_data(form=form))
@login_required
def pilotupdate(request, id):
state = ''
state_type = ''
pilot = Pilot.objects.get(id=id)
if request.method == 'POST':
form = PilotForm(data=request.POST, instance=pilot)
if form.is_valid():
new_pilot = form.save(commit=False)
new_pilot.creator = request.user
new_pilot.modifer = request.user
new_pilot.save()
return redirect('pilotlist_saved')
else:
state = 'Warning - Pilot not valid'
state_type = 'Warning'
# return update_object(request, object_id = id, model = Pilot,
# template_name = 'lookupupdate.html', form_class = PilotForm,
# extra_context={'pagetitle':'Update Pilot','title':'Update
# Pilot','state':state, 'state_type':state_type}) depracated
# Aircraft
@login_required
def aircraftlist(request, state='', state_type=''):
queryset = Aircraft.objects.all()
# return object_list(request, queryset = queryset, template_name =
# 'lookuplist.html', extra_context={'title':'Aircraft List',
# 'state':state, 'state_type':state_type,'pagetitle':'Aircraft List'})
# depracated
view = ListView.as_view(template_name='lookuplist.html', queryset=queryset)
return view(
request,
extra_context={
'title': 'Aircraft List',
'state': state,
'state_type': state_type,
'pagetitle': 'Aircraft List'})
@login_required
def aircraftadd(request):
# print request.user
state = ''
state_type = ''
if request.method == 'POST':
form = AircraftForm(data=request.POST)
if form.is_valid():
new_aircraft = form.save(commit=False)
new_aircraft.creator = request.user
new_aircraft.modifer = request.user
new_aircraft.save()
return redirect('aircraftlist_saved')
else:
state = 'Warning - Aircraft not valid'
state_type = 'Warning'
# return create_object(request, template_name = 'lookupadd.html',
# form_class = AircraftForm, extra_context={'title':'Add Aircraft',
# 'state':state, 'state_type':state_type, 'pagetitle':'Add Aircraft'})
# depracated
view = CreateView.as_view(
template_name='lookupadd.html',
form_class=AircraftForm)
return view(
request,
extra_context={
'title': 'Add Aircraft',
'state': state,
'state_type': state_type,
'pagetitle': 'Add Aircraft'})
class AircraftUpdate(UpdateView):
# look at
# https:/ccbv.co.uk/projects/Django/1.4/django.views.generic.edit.UpdateView/
state = ''
state_type = ''
model = Aircraft
form_class = AircraftForm
template_name = 'lookupupdate.html'
extra_context = {
'pagetitle': 'Update Aircraft',
'title': 'Update Aircraft',
'state': state,
'state_type': state_type}
def get_success_url(self):
return reverse('aircraftlist_saved')
def get_context_data(self, **kwargs):
context = super(AircraftUpdate, self).get_context_data(**kwargs)
context.update(self.extra_context)
return context
def form_invalid(self, form):
state = 'Warning - Aircraft not valid'
state_type = 'Warning'
return self.render_to_response(self.get_context_data(form=form))
@login_required
def aircraftupdate(request, id):
state = ''
state_type = ''
aircraft = Aircraft.objects.get(id=id)
if request.method == 'POST':
form = AircraftForm(data=request.POST, instance=aircraft)
if form.is_valid():
new_aircraft = form.save(commit=False)
new_aircraft.creator = request.user
new_aircraft.modifer = request.user
new_aircraft.save()
return redirect('aircraftlist_saved')
else:
state = 'Warning - Aircraft not valid'
state_type = 'Warning'
# return update_object(request, object_id = id, model = Aircraft,
# template_name = 'lookupupdate.html', form_class = AircraftForm,
# extra_context={'pagetitle':'Update Aircraft','title':'Update Aircraft',
# 'state':state, 'state_type':state_type}) depracated
view = UpdateView.as_view(
template_name='lookupupdate.html',
form_class=AircraftForm)
return view(
request,
object_id=id,
model=Aircraft,
extra_context={
'pagetitle': 'Update Aircraft',
'title': 'Update Aircraft',
'state': state,
'state_type': state_type})
# Task
@login_required
def tasklist(request, state='', state_type=''):
queryset = Task.objects.all()
# return object_list(request, queryset = queryset, template_name =
# 'lookuplist.html', extra_context={'pagetitle':'Task List','title':'Task
# List','pagetitle':'Task List', 'state':state, 'state_type':state_type,})
# depracated
view = ListView.as_view(template_name='lookuplist.html', queryset=queryset)
return view(
request,
extra_context={
'pagetitle': 'Task List',
'title': 'Task List',
'pagetitle': 'Task List',
'state': state,
'state_type': state_type,
})
@login_required
def taskadd(request):
# print request.user
state = ''
state_type = ''
if request.method == 'POST':
form = TaskForm(data=request.POST)
if form.is_valid():
new_task = form.save(commit=False)
new_task.creator = request.user
new_task.modifer = request.user
new_task.save()
return redirect('tasklist_saved')
else:
state = 'Warning - Task not valid'
state_type = 'Warning'
# return create_object(request, template_name = 'lookupadd.html',
# form_class = TaskForm, extra_context={'pagetitle':'Add
# Task','title':'Add Task', 'state':state, 'state_type':state_type})
# depracated
view = CreateView.as_view(
template_name='lookupadd.html',
form_class=TaskForm)
return view(
request,
extra_context={
'pagetitle': 'Add Task',
'title': 'Add Task',
'state': state,
'state_type': state_type})
class TaskUpdate(UpdateView):
# look at
# https:/ccbv.co.uk/projects/Django/1.4/django.views.generic.edit.UpdateView/
state = ''
state_type = ''
model = Task
form_class = TaskForm
template_name = 'lookupupdate.html'
extra_context = {
'pagetitle': 'Task Update',
'title': 'Update Task',
'state': state,
'state_type': state_type}
def get_success_url(self):
return reverse('tasklist_saved')
def get_context_data(self, **kwargs):
context = super(TaskUpdate, self).get_context_data(**kwargs)
context.update(self.extra_context)
return context
def form_invalid(self, form):
state = 'Warning - Task not valid'
state_type = 'Warning'
return self.render_to_response(self.get_context_data(form=form))
@login_required
def taskupdate(request, pk):
state = ''
state_type = ''
task = Task.objects.get(id=pk)
if request.method == 'POST':
form = TaskForm(data=request.POST, instance=task)
if form.is_valid():
new_task = form.save(commit=False)
new_task.creator = request.user
new_task.modifer = request.user
new_task.save()
return redirect('tasklist_saved')
else:
state = 'Warning - Task not valid'
state_type = 'Warning'
# return update_object(request, object_id = id, model = Task,
# template_name = 'lookupupdate.html', form_class = TaskForm,
# extra_context={'pagetitle':'Task Update','title':'Update Task',
# 'state':state, 'state_type':state_type}) depracated
# look at
# https:/ccbv.co.uk/projects/Django/1.4/django.views.generic.edit.UpdateView/
# Aircraft Flight Log Report - Detailed
@login_required
@csrf_protect
def aircraftflightloglistdetailed(
request, str_date_to=None, str_date_from=None):
# print '-------------------'
time_start = time.time()
# print 'time start: ' + str(time_start)
aircraft = ''
aircraft_ids = []
task = ''
task_ids = []
pilot = ''
pilot_ids = []
flight_log_number = ''
fire_number = ''
job_number = ''
if request.method == 'POST': # If the form has been submitted...
# drForm = DateRangeForm(request.POST) # A form bound to the POST data
# A form bound to the POST data
drForm = FlightLogFieldSearch(request.POST)
# FlightLogFieldSearch
if drForm.is_valid(): # All validation rules pass
str_date_from = request.POST['date_from']
str_date_to = request.POST['date_to']
aircraft = drForm.cleaned_data['aircraft']
task = drForm.cleaned_data['task']
pilot = drForm.cleaned_data['pilot']
flight_log_number = request.POST['flight_log_number']
fire_number = request.POST['fire_number']
job_number = request.POST['job_number']
for a in aircraft:
aircraft_ids.append(a.id)
for t in task:
task_ids.append(t.id)
for p in pilot:
pilot_ids.append(p.id)
else:
aircraft_qs = Aircraft.objects.all()
for a in aircraft_qs:
aircraft_ids.append(a.id)
task_qs = Task.objects.all()
for t in task_qs:
task_ids.append(t.id)
pilot_qs = Pilot.objects.all()
for p in pilot_qs:
pilot_ids.append(p.id)
# Create Headers for Table
table = ''
# Get Filtered Aircraft Flight Logs
if str_date_to is None:
date_to = datetime.date.today()
else:
date_to = from_datetime.strptime(str_date_to, "%d/%m/%Y")
if str_date_from is None:
diff = datetime.timedelta(days=7)
date_from = datetime.date.today() - diff
else:
date_from = from_datetime.strptime(str_date_from, "%d/%m/%Y")
str_date_to = date_to.strftime("%d/%m/%Y")
str_date_from = date_from.strftime("%d/%m/%Y")
data = {
'date_to': str_date_to,
'date_from': str_date_from,
'aircraft': aircraft_ids,
'task': task_ids,
'pilot': pilot_ids,
'flight_log_number': flight_log_number,
'fire_number': fire_number,
'job_number': job_number}
drForm = FlightLogFieldSearch(data)
# Filter on Flight Log Fields
queryset = AircraftFlightLog.objects.filter(
date__gte=date_from).filter(
date__lte=date_to)
queryset = queryset.filter(aircraft__in=aircraft_ids)
if len(flight_log_number) != 0:
# print flight_log_number
queryset = queryset.filter(
flight_log_number__icontains=flight_log_number)
# Fitler on Flight Log Detail Fields
queryset_details = AircraftFlightLogDetail.objects.filter(
aircraft_flight_log__in=queryset)
if len(fire_number) != 0:
queryset_details = queryset_details.filter(
fire_number__icontains=fire_number)
if len(job_number) != 0:
queryset_details = queryset_details.filter(
job_number__icontains=job_number)
queryset_details = queryset_details.filter(task__in=task_ids)
queryset_details = queryset_details.filter(Q(pilot_in_command__in=pilot_ids) | Q(
pilot_in_command_under_supervision__in=pilot_ids))
'''
queryset_details = AircraftFlightLogDetail.objects.filter(aircraft_flight_log__in=queryset)
kwargs = {}
if len(fire_number) != 0:
kwargs['fire_number__icontains'] = fire_number
if len(job_number) != 0:
kwargs['job_number__icontains'] = job_number
kwargs['task__in'] = task_ids
args = ( Q(pilot_in_command__in = pilot_ids) | Q(pilot_in_command_under_supervision__in = pilot_ids), )
queryset_details = queryset_details.filter(*args, **kwargs)
'''
# Cycle Through Flight Logs
for detail in queryset_details:
# Start Row
table = table + '<tr>'
# Aircraft Flgiht Log
table = table + '<td>' + '<a href="' + str(detail.aircraft_flight_log.get_absolute_url(
)) + '">' + str(detail.aircraft_flight_log.flight_log_number) + '</a>' + '</td>'
# Date
table = table + '<td>' + \
str(detail.aircraft_flight_log.date.strftime("%d/%m/%Y")) + '</td>'
# Aircraft
table = table + '<td>' + \
str(detail.aircraft_flight_log.aircraft.name) + '</td>'
# FDI
fdi = detail.aircraft_flight_log.fire_danger_index
if fdi is None:
table = table + '<td></td>'
else:
table = table + '<td>' + str(fdi) + '</td>'
# Datcon
datcon = detail.datcon
if datcon is None:
datcon_sum = '0'
table = table + '<td>' + str(datcon) + '</td>'
# WST Out
time_out = detail.time_out.strftime("%H:%M")
table = table + '<td>' + time_out + '</td>'
# Task
table = table + '<td>' + detail.task.name + '</td>'
# Fuel Added
fuel_added = detail.fuel_added
if fuel_added is None:
fuel_added = ''
table = table + '<td>' + str(fuel_added) + '</td>'
# Landings
landings = detail.landings
if landings is None:
landings = ''
table = table + '<td>' + str(landings) + '</td>'
# Fire Number
fire_number = detail.fire_number
if fire_number is None:
fire_number = ''
table = table + '<td>' + fire_number + '</td>'
# Job Number
job_number = detail.job_number
if job_number is None:
job_number = ''
table = table + '<td>' + job_number + '</td>'
# Pilot in Command
pilot_in_command = detail.pilot_in_command.first_name + \
' ' + detail.pilot_in_command.last_name
table = table + '<td>' + pilot_in_command + '</td>'
# Pilot in Command Under Super
try:
pilot_in_command_under_supervision = detail.pilot_in_command_under_supervision.first_name + \
' ' + detail.pilot_in_command_under_supervision.last_name
table = table + '<td>' + pilot_in_command_under_supervision + '</td>'
except:
table = table + '<td>' + '' + '</td>'
# End Row
table = table + '</tr>'
state = ''
state_type = ''
if date_from > date_to:
state = 'Date From must be less than Date To'
state_type = 'Warning'
time_finish = time.time()
# print 'time finish: ' + str(time_finish)
total_time = time_finish - time_start
# print 'total time: ' + str(total_time)
return render_to_response('aircraftflightloglistdetailed.html',
{'table': table,
'drForm': drForm,
'state': state,
'state_type': state_type,
'pagetitle': 'Aircraft Flight Log Report - Detailed'},
context_instance=RequestContext(request))
# Aircraft Flight Log Report - Summary
@login_required
@csrf_protect
def aircraftflightloglist(request, str_date_to=None, str_date_from=None):
aircraft = ''
aircraft_ids = []
task = ''
task_ids = []
pilot = ''
pilot_ids = []
flight_log_number = ''
fire_number = ''
job_number = ''
if request.method == 'POST': # If the form has been submitted...
# drForm = DateRangeForm(request.POST) # A form bound to the POST data
# A form bound to the POST data
drForm = FlightLogFieldSearch(request.POST)
# FlightLogFieldSearch
if drForm.is_valid(): # All validation rules pass
str_date_from = request.POST['date_from']
str_date_to = request.POST['date_to']
aircraft = drForm.cleaned_data['aircraft']
task = drForm.cleaned_data['task']
pilot = drForm.cleaned_data['pilot']
flight_log_number = request.POST['flight_log_number']
fire_number = request.POST['fire_number']
job_number = request.POST['job_number']
for a in aircraft:
aircraft_ids.append(a.id)
for t in task:
task_ids.append(t.id)
for p in pilot:
pilot_ids.append(p.id)
else:
aircraft_qs = Aircraft.objects.all()
for a in aircraft_qs:
aircraft_ids.append(a.id)
task_qs = Task.objects.all()
for t in task_qs:
task_ids.append(t.id)
pilot_qs = Pilot.objects.all()
for p in pilot_qs:
pilot_ids.append(p.id)
# Create Headers for Table
table = ''
# Get Filtered Aircraft Flight Logs
if str_date_to is None:
date_to = datetime.date.today()
else:
date_to = from_datetime.strptime(str_date_to, "%d/%m/%Y")
if str_date_from is None:
diff = datetime.timedelta(days=7)
date_from = datetime.date.today() - diff
else:
date_from = from_datetime.strptime(str_date_from, "%d/%m/%Y")
str_date_to = date_to.strftime("%d/%m/%Y")
str_date_from = date_from.strftime("%d/%m/%Y")
# print 'ADSAA##$'
data = {
'date_to': str_date_to,
'date_from': str_date_from,
'aircraft': aircraft_ids,
'task': task_ids,
'pilot': pilot_ids,
'flight_log_number': flight_log_number,
'fire_number': fire_number,
'job_number': job_number}
drForm = FlightLogFieldSearch(data)
# Filter on Flight Log Fields
queryset = AircraftFlightLog.objects.filter(
date__gte=date_from).filter(
date__lte=date_to)
queryset = queryset.filter(aircraft__in=aircraft_ids)
if len(flight_log_number) != 0:
# print flight_log_number
queryset = queryset.filter(
flight_log_number__icontains=flight_log_number)
flight_ids_master = []
for y in queryset:
flight_ids_master.append(y.id)
# Fitler on Flight Log Detail Fields
queryset_details = AircraftFlightLogDetail.objects.filter(
aircraft_flight_log__in=queryset)
flight_ids_detail = []
for y in queryset_details:
flight_ids_detail.append(y.aircraft_flight_log_id)
s = set(flight_ids_detail)
# gets difference between 2 lists. flight_ids_master - flight_ids_detail.
# Get list of flight logs without detail children
no_detail_logs = [x for x in flight_ids_master if x not in s]
# print 'difference'
# print no_detail_logs
if len(fire_number) != 0:
queryset_details = queryset_details.filter(
fire_number__icontains=fire_number)
no_detail_logs = []
if len(job_number) != 0:
queryset_details = queryset_details.filter(
job_number__icontains=job_number)
no_detail_logs = []
task_total = Task.objects.all()
if len(task_ids) != len(task_total):
no_detail_logs = []
queryset_details = queryset_details.filter(task__in=task_ids)
pilot_total = Pilot.objects.all()
if len(pilot_ids) != len(pilot_total):
no_detail_logs = []
queryset_details = queryset_details.filter(Q(pilot_in_command__in=pilot_ids) | Q(
pilot_in_command_under_supervision__in=pilot_ids))
flight_ids = []
for y in queryset_details:
flight_ids.append(y.aircraft_flight_log_id)
flight_ids.extend(no_detail_logs)
queryset = queryset.filter(id__in=flight_ids)
# Cycle Through Flight Logs
for flightlog in queryset:
# Start Row
table = table + '<tr>'
# Aircraft Flgiht Log
table = table + '<td>' + '<a href="' + \
str(flightlog.get_absolute_url()) + '">' + \
str(flightlog.flight_log_number) + '</a>' + '</td>'
# Date
table = table + '<td>' + \
str(flightlog.date.strftime("%d/%m/%Y")) + '</td>'
# WST Out
job_num = None
if flightlog.aircraftflightlogdetail_set.all():
details = flightlog.aircraftflightlogdetail_set.all().order_by('time_out')
# print details[0].time_out.strftime("%H:%M")
table = table + '<td>' + \
details[0].time_out.strftime("%H:%M") + '</td>'
job_num = details[0].job_number
#table = table + '<td>' + str(details[0].time_out) + '</td>'
else:
table = table + '<td></td>'
# Aircraft
table = table + '<td>' + str(flightlog.aircraft.name) + '</td>'
# VDO Time
datcon_sum = flightlog.aircraftflightlogdetail_set.all().aggregate(Sum('datcon'))
datcon_sum = datcon_sum['datcon__sum']
if datcon_sum is None:
datcon_sum = '0'
table = table + '<td>' + str(datcon_sum) + '</td>'
# Job Number
if job_num is None:
table = table + '<td></td>'
else:
table = table + '<td>' + job_num + '</td>'
#Pilot in Command
if flightlog.aircraftflightlogdetail_set.all():
details = flightlog.aircraftflightlogdetail_set.all().order_by('time_out')
table = table + '<td>' + \
details[0].pilot_in_command.first_name + ' ' + \
details[0].pilot_in_command.last_name + '</td>'
if details[0].pilot_in_command_under_supervision:
table = table + '<td>' + details[0].pilot_in_command_under_supervision.first_name + ' ' + details[
0].pilot_in_command_under_supervision.last_name + '</td>'
else:
table = table + '<td></td>'
'''
if details[0].task: table = table + '<td>' + details[0].task.name + '</td>'
else: table = table + '<td></td>'
'''
if details[0].task:
table = table + '<td>' + details[0].task.name + '</td>'
else:
table = table + '<td></td>'
'''
if details[0].activity_type: table = table + '<td>' + details[0].activity_type.name + '</td>'
else: table = table + '<td></td>'
'''
else:
table = table + '<td></td>'
table = table + '<td></td>'
table = table + '<td></td>'
# End Row
table = table + '</tr>'
state = ''
state_type = ''
if date_from > date_to:
state = 'Date From must be less than Date To'
state_type = 'Warning'
return render_to_response('aircraftflightloglist.html',
{'table': table,
'drForm': drForm,
'state': state,
'state_type': state_type,
'pagetitle': 'Aircraft Flight Log Report'},
context_instance=RequestContext(request))
@login_required
def aircraftflightlogadd(request):
# print request.user
state = ''
state_type = ''
if request.method == 'POST':
form = AircraftFlightLogForm(data=request.POST)
if form.is_valid():
new_aircraftflightlog = form.save(commit=False)
new_aircraftflightlog.creator = request.user
new_aircraftflightlog.modifer = request.user
new_aircraftflightlog.save()
# print new_aircraftflightlog.id
state = 'Saved'
state_type = 'OK'
return redirect(reverse('aircraftflightlog_saved',
kwargs={'id': new_aircraftflightlog.id}))
else:
state = 'Warning - Flight Log is not valid.'
state_type = 'Warning'
# return create_object(request, template_name =
# 'aircraftflightlogadd.html', form_class = AircraftFlightLogForm,
# extra_context ={'state':state,'state_type':state_type,'pagetitle':'Add
# Aircraft Flight Log'}) depracated
view = CreateView.as_view(
template_name='aircraftflightlogadd.html',
form_class=AircraftFlightLogForm)
return view(
request,
extra_context={
'state': state,
'state_type': state_type,
'pagetitle': 'Add Aircraft Flight Log'})
@login_required
@csrf_protect
def aircraftflightlogdetailsadd(request, id, state='', state_type=''):
# print request.user
#state = ''
#state_type = ''
flightlog = AircraftFlightLog.objects.get(pk=id)
# filter the select lists
flightlogdetails = flightlog.aircraftflightlogdetail_set.all()
# aircraft select list equals all active aicraft + aircraft that are
# already selected.
try:
aircraft_array = []
aircraft_array.append(flightlog.aircraft_id)
aircraft_qs = Aircraft.objects.filter(id__in=aircraft_array)
aircraft_qs = aircraft_qs | Aircraft.objects.filter(
effective_to__exact=None)
except IndexError as e:
# Default list of no child records found
aircraft_qs = Aircraft.objects.filter(effective_to__exact=None)
# task select list equals all active tasks + inactive tasks that are
# already selected.
try:
task_array = []
for detail in flightlogdetails:
task_array.append(detail.task_id)
task_qs = Task.objects.filter(id__in=task_array)
task_qs = task_qs | Task.objects.filter(effective_to__exact=None)
except IndexError as e:
# Default list of no child records found
task_qs = Task.objects.filter(effective_to__exact=None)
# pilot in command select list equals all active tasks + inactive tasks
# that are already selected.
try:
pilot_array = []
for detail in flightlogdetails:
pilot_array.append(detail.pilot_in_command_id)
pilot_qs = Pilot.objects.filter(id__in=pilot_array)
pilot_qs = pilot_qs | Pilot.objects.filter(effective_to__exact=None)
except IndexError as e:
# Default list of no child records found
pilot_qs = Pilot.objects.filter(effective_to__exact=None)
# pilot in command select list equals all active tasks + inactive tasks
# that are already selected.
try:
pilot_array = []
for detail in flightlogdetails:
pilot_array.append(detail.pilot_in_command_under_supervision_id)
pilot_under_qs = Pilot.objects.filter(id__in=pilot_array)
pilot_under_qs = pilot_under_qs | Pilot.objects.filter(
effective_to__exact=None)
except IndexError as e:
# Default list of no child records found
pilot_qs = Pilot.objects.filter(effective_to__exact=None)
form_master = AircraftFlightLogForm(instance=flightlog)
# Overrides Form Defaults
form_master.fields['aircraft'].queryset = aircraft_qs
# Make Form
details_form = AircraftFlightLogDetailForm
# Overrides Form Defaults
details_form.declared_fields['task'] = forms.ModelChoiceField(
queryset=task_qs,
empty_label="",
widget=forms.Select(
attrs={
'class': 'chzn-select-task',
'style': 'width:200px',
}))
details_form.declared_fields['pilot_in_command'] = forms.ModelChoiceField(
queryset=pilot_qs,
empty_label="",
widget=forms.Select(
attrs={
'class': 'chzn-select-command',
'style': 'width:200px',
}))
details_form.declared_fields['pilot_in_command_under_supervision'] = forms.ModelChoiceField(
queryset=pilot_under_qs,
empty_label="",
required=False,
widget=forms.Select(
attrs={
'class': 'chzn-select-super',
'style': 'width:200px',
}))
print "-=-=-=---=-=-=-=-=-"
FlightLogDetailInlineFormSet = inlineformset_factory(
AircraftFlightLog, AircraftFlightLogDetail, extra=6, exclude=(
'creator', 'modifier'), can_delete=False, form=details_form)
if request.method == 'POST':
form = AircraftFlightLogForm(data=request.POST, instance=flightlog)
formset = FlightLogDetailInlineFormSet(
request.POST, request.FILES, instance=flightlog)
#formset = FlightLogDetailInlineFormSet(request.POST)
if form.is_valid():
new_aircraftflightlog = form.save(commit=False)
new_aircraftflightlog.creator = request.user
new_aircraftflightlog.modifer = request.user
# print formset
# print 'HHHHHHHHHHHH'
if formset.is_valid():
#instances = formset.save(commit=False)
# for f in formset:
# print 'Datcon' + str(f['datcon'])
return_time_last = 0
counter = 1
error = 0
for f in formset:
# print 'Datcon' + str(f['datcon'])
if error == 0:
datcon_html = str(f['datcon'])
datcon_array = datcon_html.split("\"")
if len(datcon_array) == 11:
datcon = datcon_array[7]
# print 'datcon: ' + datcon
try:
datcon_hour = int(datcon.split(".")[0])
except:
datcon = "0" + datcon
datcon_hour = int(datcon.split(".")[0])
datcon_24h = datcon_hour * 60
try:
datcon_minute = int(datcon.split(".")[1])
except:
datcon_minute = 0
datcon_min = datcon_minute * 6
total_datcon_minutes = datcon_24h + datcon_min
# print 'time Out' + str(f['time_out'])
timeout_html = str(f['time_out'])
timeout_array = timeout_html.split("\"")
# if len(timeout_array) == 13:
timeout_str = timeout_array[5]
if len(timeout_str) == 4:
timeout_hh = int(timeout_str[:2])
timeout_mm = int(timeout_str[2:])
else:
timeout_hh = int(timeout_str[:1])
timeout_mm = int(timeout_str[1:])
#timeout_int = int(timeout_str)
timeout_total_minutes = (
int(timeout_hh) * 60) + int(timeout_mm)
return_time_minutes = total_datcon_minutes + timeout_total_minutes
'''
print 'datcon: ' + str(datcon)
print 'datcon in minutes: ' + str(total_datcon_minutes)
print 'time out: ' + str(timeout_str)
print 'time out in minutes: ' + str(timeout_total_minutes)
print 'return time in minutes: ' + str(return_time_minutes)
print 'return time last: ' + str(return_time_last)
'''
if return_time_last > timeout_total_minutes:
state = 'Warning (Rows ' + str(counter - 1) + ", " + str(
counter) + ') - Aircraft leaving before it has returned. See Datcon and Time Out.'
state_type = 'Warning'
error = 1
return_time_last = return_time_minutes
counter = counter + 1
# f.save()
if error == 0:
new_aircraftflightlog.save()
formset.save()
state = 'Saved'
state_type = 'OK'
formset = FlightLogDetailInlineFormSet(instance=flightlog)
form = form_master
else:
state = 'Warning - Flight Log Details are not valid.'
state_type = 'Warning'
else:
state = 'Warning - Flight Log is not valid.'
state_type = 'Warning'
# return
# render_to_response('aircraftflightlogdetailsadd.html',{"formset":
# formset,"form":form}, context_instance=RequestContext(request))
else:
#state = ''
#state_type = ''
formset = FlightLogDetailInlineFormSet(instance=flightlog)
form = form_master
#form = AircraftFlightLogForm(instance=flightlog)
return render_to_response("aircraftflightlogdetailsadd.html",
{"formset": formset,
"form": form,
"state": state,
"state_type": state_type,
'pagetitle': 'Aircraft Flight Log Details'},
context_instance=RequestContext(request))
@login_required
# Duty Time
@login_required
@csrf_protect
def dutytimeadd(request):
# print '+++++++++++++++'
time_start = time.time()
state = ''
state_type = ''
pilots = Pilot.objects.filter(
effective_to__exact=None).order_by('last_name')
table = ''
# print 'pilots'
# print pilots
for pilot in pilots:
# print "Pilot Name: " + pilot.first_name
# print "Pilot ID: " + str(pilot.id)
table += '<tr>'
table += '<td>'
try:
table += '<a href="' + \
str(pilot.dutytime_set.all()[0].get_absolute_url()) + '">'
table += '<input type="image" src="/static/img/page_white_edit.png" name="edit" width="24" height="24" alt="Edit">'
table += '</a>'
except IndexError as e:
table += '<img type="image" src="/static/img/cross.png" name="edit" width="24" height="24" alt="No Duty Time Records">'
table += '</td>'
table += '<td>'
try:
table += '<a href="../' + str(pilot.id) + '/hours">'
table += '<input type="image" src="/static/img/page_white_edit.png" name="edit" width="24" height="24" alt="Edit">'
table += '</a>'
except IndexError as e:
table += '<img type="image" src="/static/img/cross.png" name="edit" width="24" height="24" alt="No Duty Time Records">'
table += '</td>'
table += '<td style="text-align:center" >'
table += '<input type="radio" name="rdio" value="' + \
str(pilot.id) + '">'
table += '</td>'
table += '<td>'
table += str(pilot.first_name)
table += '</td>'
table += '<td>'
table += pilot.last_name
table += '</td>'
table += '<td id="date_' + str(pilot.id) + '">'
# print '---------'
try:
dt_date = pilot.dutytime_set.order_by(
'-date')[0].date.strftime("%d/%m/%Y")
# print dt_date
except IndexError as e:
# print pilot.first_name + ' ' + pilot.last_name + ' has no Last
# Date.'
dt_date = ''
table += dt_date
table += '</td>'
table += '</tr>'
if request.method == 'POST':
# Validate Dates
# print '^^^^^^^^^^^^^^^^^^^^^'
# print request.POST['pilot_id']
# Check if pilot id is sent back
if request.POST['pilot_id'] != '':
pilot = Pilot.objects.get(id=int(request.POST['pilot_id']))
# print pilot
# print pilot.id
# Check if both dates have been chosen
if request.POST['date_from'] != '' and request.POST[
'date_to'] != '':
date_from = from_datetime.strptime(
request.POST['date_from'], "%d/%m/%Y")
date_to = from_datetime.strptime(
request.POST['date_to'], "%d/%m/%Y")
# print date_from
# print date_to
# Check date range is valid
if date_to >= date_from:
# Make one day
oneday = datetime.timedelta(days=1)
# While date_change is less than date_to - create day
# records
date_change = date_from
while (date_change <= date_to):
# print date_change
dt = DutyTime(date=date_change, pilot=pilot)
dt.creator = request.user
dt.modifer = request.user
dt.save()
date_change = date_change + oneday
# print date_change
state = 'Saved'
state_type = 'OK'
return redirect(
reverse(
'dutytimeaddset_saved',
kwargs={
'id': pilot.id}))
else:
# No dates. Send user message.
state = 'Warning - Enter values for both date fields'
state_type = 'Warning'
else:
# No pilot id. Send user message.
state = "Warning - No pilot selected"
state_type = "Warning"
drForm = DateRangeForm()
time_end = time.time()
time_total = time_end - time_start
# print 'Total Time: ' +str(time_total)
return render_to_response("dutytimeadd.html",
{'pagetitle': 'Duty Times',
"drForm": drForm,
'pilots': table,
"state": state,
"state_type": state_type},
context_instance=RequestContext(request))
@login_required
@csrf_protect
def dutytimeaddset(request, id, str_date_to=None,
str_date_from=None, state='', state_type=''):
time_start = time.time()
state = ''
state_type = ''
pilot = Pilot.objects.get(pk=id)
name = pilot.first_name + ' ' + pilot.last_name
# inlineformset
DutyTimeInlineFormSet = inlineformset_factory(Pilot, DutyTime, exclude=(
'creator', 'modifier'), can_delete=False, form=DutyTimeForm, extra=0)
#dt_formset = formset_factory(DutyTimeForm, extra=2)
# Do this if something submitted
if request.method == "POST":
# ^^^^^^^^^^^^^^^
time_mid = time.time()
time_total = time_mid - time_start
# print "enter post: " + str(time_total)
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# if duty times are saved do this
if request.POST['type'] == 'Save':
# print request.POST
formset = DutyTimeInlineFormSet(
request.POST, request.FILES, instance=pilot)
#formset = DutyTimeInlineFormSet(request.POST, request.FILES)
# print formset
# ^^^^^^^^^^^^^^^
time_mid = time.time()
time_total = time_mid - time_start
# print "after formset get: " + str(time_total)
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
if len(formset) == 0:
date_to = pilot.dutytime_set.order_by('-date')[0].date
# get date using last entered date - 14 days
date_from = date_to - timedelta(days=13)
# Create formset
sort = 'A'
state = 'Warning - No Records Submitted To Save'
state_type = 'Warning'
else:
# ^^^^^^^^^^^^^^^
time_mid = time.time()
time_total = time_mid - time_start
# print "Before Date Range: " + str(time_total)
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
formsetstring = str(formset)
formsetdates = re.findall(r"\d{2}/\d{2}/\d{4}", formsetstring)
date_from = from_datetime.strptime("01/01/2050", "%d/%m/%Y")
date_to = from_datetime.strptime("01/01/1900", "%d/%m/%Y")
for formdate in formsetdates:
thedate = from_datetime.strptime(formdate, "%d/%m/%Y")
if thedate > date_to:
date_to = thedate
if thedate < date_from:
date_from = thedate
# ^^^^^^^^^^^^^^^
time_mid = time.time()
time_total = time_mid - time_start
# print "After Date Range: " + str(time_total)
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
try:
if from_datetime.strptime(
formsetdates[0], "%d/%m/%Y") > from_datetime.strptime(formsetdates[1], "%d/%m/%Y"):
sort = 'D'
else:
sort = 'A'
except:
sort = 'A'
# ^^^^^^^^^^^^^^^
time_mid = time.time()
time_total = time_mid - time_start
# print "After Order Calc: " + str(time_total)
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
if formset.is_valid():
error = 0
counter = 0
for f in formset:
counter = counter + 1
ontime = str(f['datetime_on_first'])
offtime = str(f['datetime_off_first'])
thedate = str(f['date'])
# print thedate
day = thedate.split("\"")[3]
ontime_arr = ontime.split("\"")
offtime_arr = offtime.split("\"")
if len(ontime_arr) == 11 and len(offtime_arr) == 11:
ontime = int(ontime_arr[7])
offtime = int(offtime_arr[7])
if ontime >= offtime:
state = 'Warning - Duty Time is not valid (' + \
day + '). Time On must be less than Time Off'
state_type = 'Warning'
error = 1
elif len(ontime_arr) == 11 and len(offtime_arr) == 9:
state = 'Warning - Duty Time is not valid (' + \
day + '). Missing Time Off value.'
state_type = 'Warning'
error = 1
elif len(ontime_arr) == 9 and len(offtime_arr) == 11:
state = 'Warning - Duty Time is not valid (' + \
day + '). Missing Time On value.'
state_type = 'Warning'
error = 1
# print "Counter (rows): " + str(counter)
if error == 0:
formset.save()
state = 'Saved'
state_type = 'OK'
# ^^^^^^^^^^^^^^^
time_mid = time.time()
time_total = time_mid - time_start
# print "After Formset Saved: " + str(time_total)
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
else:
state = 'Warning - Duty Time is not valid.'
state_type = 'Warning'
# if date filter submitted do this
elif request.POST['type'] == 'Go':
drForm = DateRangeForm(request.POST)
# print 'Date Range Submitted'
if drForm.is_valid(): # All validation rules pass
# get date from POST
str_date_from = request.POST['date_from']
str_date_to = request.POST['date_to']
# convert date from string to date object
date_from = from_datetime.strptime(str_date_from, "%d/%m/%Y")
date_to = from_datetime.strptime(str_date_to, "%d/%m/%Y")
sort = request.POST['sort']
if sort == 'A':
order = 'date'
if sort == 'D':
order = '-date'
#formset = DutyTimeInlineFormSet(instance=pilot, queryset=DutyTime.objects.filter(date__gte=date_from).filter(date__lte=date_to.order_by(order)))
formset = DutyTimeInlineFormSet(
instance=pilot, queryset=DutyTime.objects.filter(
date__gte=date_from).filter(
date__lte=date_to).order_by(order))
state = 'Duty Times Sorted'
state_type = 'OK'
else:
state = 'Warning - Sorting Failed'
state_type = 'Warning'
# Do this if nothing submitted
else:
# do default date query here
# get the last entered date for the pilot
date_to = pilot.dutytime_set.order_by('-date')[0].date
# get date using alst entered date - 14 days
date_from = date_to - timedelta(days=13)
# Create formset
sort = 'A'
#formset = DutyTimeInlineFormSet(instance=pilot, queryset=DutyTime.objects.filter(date__gte=date_from).filter(date__lte=date_to.order_by('-date')))
formset = DutyTimeInlineFormSet(
instance=pilot, queryset=DutyTime.objects.filter(
date__gte=date_from).filter(
date__lte=date_to).order_by('date'))
# convert dates to strings
str_date_to = date_to.strftime("%d/%m/%Y")
str_date_from = date_from.strftime("%d/%m/%Y")
# make dictionary to put in form
data = {'date_to': str_date_to, 'date_from': str_date_from, 'sort': sort}
# Create form
drForm = DateRangeSortForm(data)
if date_from > date_to:
state = 'Date From must be less than Date To'
state_type = 'Warning'
time_end = time.time()
time_total = time_end - time_start
# print "total time: " + str(time_total)
return render_to_response("dutytimeaddset.html",
{'pagetitle': 'Edit Duty Times / ' + name,
"formset": formset,
'state': state,
'state_type': state_type,
"name": name,
"drForm": drForm},
context_instance=RequestContext(request))
@login_required
@csrf_protect
def dutytimehours(request, id, str_date_to=None,
str_date_from=None, str_start_point=None):
# print '+++++++++++++++'
time_start = time.time()
pilot = Pilot.objects.get(pk=id)
name = pilot.first_name + ' ' + pilot.last_name
# ------- Change to date filtered. Takes too long to get all
flightlogs_command = pilot.pilot_in_command.all()
# ------- Change to date filtered. Takes too long to get all
flightlogs_supervised = pilot.aircraftflightlogdetail_set.all()
if request.method == 'POST': # If the form has been submitted...
drForm = DateRangeForm(request.POST) # A form bound to the POST data
if drForm.is_valid(): # All validation rules pass
str_date_from = request.POST['date_from']
str_date_to = request.POST['date_to']
str_start_point = request.POST['start_point']
# Create Headers for Table
table = ''
# Get Filtered Aircraft Flight Logs
if str_date_to is None:
date_to = from_datetime.today()
else:
date_to = from_datetime.strptime(str_date_to, "%d/%m/%Y")
if str_date_from is None:
diff = datetime.timedelta(days=59)
date_from = from_datetime.today() - diff
else:
date_from = from_datetime.strptime(str_date_from, "%d/%m/%Y")
if str_start_point is None:
start_point = from_datetime.strptime("15/01/2011", "%d/%m/%Y")
else:
start_point = from_datetime.strptime(str_start_point, "%d/%m/%Y")
date_from = date_from.date()
date_to = date_to.date()
start_point = start_point.date()
str_date_to = date_to.strftime("%d/%m/%Y")
str_date_from = date_from.strftime("%d/%m/%Y")
str_start_point = start_point.strftime("%d/%m/%Y")
data = {
'date_to': str_date_to,
'date_from': str_date_from,
'start_point': str_start_point}
drForm = DutyRangeForm(data)
dutytimes = pilot.dutytime_set.filter(
date__gt=(
date_from -
datetime.timedelta(
days=365))).filter(
date__lte=date_to).order_by('date')
# Look thoughs each dutytime
counter = 0
date_counter = date_from - datetime.timedelta(days=365)
# print 'date_from: ' + str(date_from)
# print 'date_to: ' + str(date_to)
this_total = 0
dutytime_arr = []
datcon_arr = []
while date_counter <= date_to:
# print '###'
if date_counter >= date_from:
time_section_start = time.time()
# print date_counter
# Start Row
table += '<tr>'
# Pilot
table += '<td>'
table += name
table += '</td>'
# Date
table += '<td>'
table += date_counter.strftime("%d/%m/%Y")
table += '</td>'
date_diff = start_point - date_counter
int_diff = int(date_diff.days)
'''
if int_diff % 28 == 0:
day = date_counter.strftime("%A") + ' (7 + 14 + 28 Day Reset)'
elif int_diff % 14 == 0:
day = date_counter.strftime("%A") + ' (7 + 14 Day Reset)'
elif int_diff % 7 == 0:
day = date_counter.strftime("%A") + ' (7 Day Reset)'
else
day = date_counter.strftime("%A")
'''
if int_diff % 7 == 0:
day = date_counter.strftime("%A") + ' (7 Day Reset)'
if int_diff % 14 == 0:
day = date_counter.strftime("%A") + ' (7/14 Day Reset)'
if int_diff % 28 == 0:
day = date_counter.strftime(
"%A") + ' (7/14/28 Day Reset)'
else:
day = date_counter.strftime("%A")
table += '<td>'
table += day
table += '</td>'
time_section_2_start = time.time()
# get a duty time record if there is one
try:
dtime = dutytimes.filter(date=date_counter)[0]
except IndexError as e:
# print 'IndexError'
dtime = None
if dtime:
# Date On
table += '<td>'
if dtime.datetime_on_first:
table += dtime.datetime_on_first.strftime("%H:%M")
table += '</td>'
# Date Off
table += '<td>'
if dtime.datetime_off_first:
table += dtime.datetime_off_first.strftime("%H:%M")
table += '</td>'
#
if dtime.datetime_on_first:
on_hour = dtime.datetime_on_first.hour
on_min = float(dtime.datetime_on_first.minute)
on_min = on_min / 60
# print on_min
on_time = float(on_hour) + on_min
# print str(on_time)
if dtime.datetime_off_first:
off_hour = dtime.datetime_off_first.hour
off_min = float(dtime.datetime_off_first.minute)
off_min = off_min / 60
# print off_min
off_time = float(off_hour) + off_min
# print off_time
diff = 0.0
diff = off_time - on_time
diff = round(diff, 1)
else:
diff = 0.00
else:
diff = 0.00
dutytime_arr.append(diff)
# Daily Total
table += '<td>'
table += str("%.1f" % diff)
#table+= str(diff)
table += '</td>'
else:
# Date On
table += '<td>'
table += ''
table += '</td>'
# Date Off
table += '<td>'
table += ''
table += '</td>'
# Daily
table += '<td>'
table += '0.00'
table += '</td>'
dutytime_arr.append(0)
# 7 Days
date_diff = start_point - date_counter
int_diff = int(date_diff.days)
mod_diff = int_diff % 7
mod_changed = (7 - mod_diff)
arr_slice = (mod_changed + 1) * -1
if arr_slice == -8:
arr_slice = -1
table += '<td>'
table += str("%.1f" % sum(dutytime_arr[arr_slice:]))
#table+= str(sum(dutytime_arr[arr_slice:]))
table += '</td>'
# 14 Days
date_diff = start_point - date_counter
int_diff = int(date_diff.days)
mod_diff = int_diff % 14
mod_changed = (14 - mod_diff)
arr_slice = (mod_changed + 1) * -1
if arr_slice == -15:
arr_slice = -1
table += '<td>'
table += str("%.1f" % sum(dutytime_arr[arr_slice:]))
table += '</td>'
# 28 Days
date_diff = start_point - date_counter
int_diff = int(date_diff.days)
mod_diff = int_diff % 28
mod_changed = (28 - mod_diff)
arr_slice = (mod_changed + 1) * -1
if arr_slice == -29:
arr_slice = -1
table += '<td>'
table += str("%.1f" % sum(dutytime_arr[arr_slice:]))
table += '</td>'
# Travel
dtime = dutytimes.filter(date=date_counter)
travel = '0'
if dtime.count() > 0:
if dtime[0].travel_km:
travel = str(dtime[0].travel_km)
table += '<td>'
table += travel
table += '</td>'
# Daily
# Command
flightlogs_comm = flightlogs_command.filter(
aircraft_flight_log__date=date_counter)
time_total_comm = 0.0
time_total_super = 0.0
if flightlogs_comm.count() > 0:
# print flightlogs_comm
for f in flightlogs_comm:
# print 'command time: ' + str(f.datcon)
time_total_comm += float(f.datcon)
# print 'time total: ' + str(time_total)
# Supervised
flightlogs_super = flightlogs_supervised.filter(
aircraft_flight_log__date=date_counter)
if flightlogs_super.count() > 0:
# print flightlogs_super
for f in flightlogs_super:
# print 'super time: ' + str(f.datcon)
time_total_super += float(f.datcon)
time_total_both = time_total_comm + time_total_super
datcon_arr.append(time_total_both)
table += '<td>'
table += str("%.1f" % time_total_both)
table += '</td>'
# 7 Day
# Command
# seven_days
table += '<td>'
table += str(sum(datcon_arr[-7:]))
table += '</td>'
# 30 Days
table += '<td>'
table += str(sum(datcon_arr[-30:]))
table += '</td>'
# 365 Day
table += '<td>'
table += str(sum(datcon_arr[-365:]))
table += '</td>'
# WST Out
flightlogs_comm = flightlogs_command.filter(
aircraft_flight_log__date=date_counter)
flightlogs_super = flightlogs_supervised.filter(
aircraft_flight_log__date=date_counter)
wst_time = ''
flight_time = 0.0
if flightlogs_comm.count() > 0 and flightlogs_super.count() > 0:
min_time = flightlogs_comm[0].time_out
for fl in flightlogs_comm:
new_time = fl.time_out
if new_time < min_time:
min_time = new_time
flight_time += float(fl.datcon)
for fl in flightlogs_super:
new_time = fl.time_out
if new_time < min_time:
min_time = new_time
flight_time += float(fl.datcon)
wst_time = min_time.strftime("%H:%M")
flight_time = str("%.1f" % flight_time)
elif flightlogs_comm.count() > 0:
min_time = flightlogs_comm[0].time_out
for fl in flightlogs_comm:
new_time = fl.time_out
if new_time < min_time:
min_time = new_time
flight_time += float(fl.datcon)
flight_time = str("%.1f" % flight_time)
wst_time = min_time.strftime("%H:%M")
elif flightlogs_super.count() > 0:
min_time = flightlogs_super[0].time_out
for fl in flightlogs_super:
new_time = fl.time_out
if new_time < min_time:
min_time = new_time
flight_time += float(fl.datcon)
wst_time = min_time.strftime("%H:%M")
flight_time = str("%.1f" % flight_time)
else:
flight_time = ''
# WST Out
table += '<td>'
table += wst_time
table += '</td>'
# Time - Day Total
table += '<td>'
table += flight_time
table += '</td>'
# Fuel - Day Total
table += '<td>'
table += '0'
table += '</td>'
# End Row
table += '</tr>'
else:
# Calc Duty Day Total
try:
dtime = dutytimes.filter(date=date_counter)[0]
except IndexError as e:
# print 'IndexError'
dtime = None
if dtime:
if dtime.datetime_on_first:
on_hour = dtime.datetime_on_first.hour
on_min = float(dtime.datetime_on_first.minute)
on_min = on_min / 60
# print on_min
on_time = float(on_hour) + on_min
# print str(on_time)
if dtime.datetime_off_first:
off_hour = dtime.datetime_off_first.hour
off_min = float(dtime.datetime_off_first.minute)
off_min = off_min / 60
# print off_min
off_time = float(off_hour) + off_min
# print off_time
diff = 0.00
diff = off_time - on_time
else:
diff = 0.00
else:
diff = 0.00
x = diff
else:
x = 0
dutytime_arr.append(x)
# Calc Datcon Daily Total
flightlogs_comm = flightlogs_command.filter(
aircraft_flight_log__date=date_counter)
time_total_comm = 0.0
time_total_super = 0.0
if flightlogs_comm.count() > 0:
# print flightlogs_comm
for f in flightlogs_comm:
# print 'command time: ' + str(f.datcon)
time_total_comm += float(f.datcon)
# print 'time total: ' + str(time_total)
# Supervised
flightlogs_super = flightlogs_supervised.filter(
aircraft_flight_log__date=date_counter)
if flightlogs_super.count() > 0:
# print flightlogs_super
for f in flightlogs_super:
# print 'super time: ' + str(f.datcon)
time_total_super += float(f.datcon)
time_total_both = time_total_comm + time_total_super
y = time_total_both
datcon_arr.append(y)
date_counter += datetime.timedelta(days=1)
state = ''
state_type = ''
if date_from > date_to:
state = 'Date From must be less than Date To'
state_type = 'Warning'
return render_to_response("dutytimehours.html",
{'pagetitle': 'Duty Times + Flights - Report / ' + name,
'name': name,
'table': table,
'drForm': drForm,
'state': state,
'state_type': state_type},
context_instance=RequestContext(request))
# This is required in order for the extra context can be added to the view.
class ExtraContextTemplateView(TemplateView):
extra_context = None
def get_context_data(self, *args, **kwargs):
context = super(
ExtraContextTemplateView,
self).get_context_data(
*args,
**kwargs)
if self.extra_context:
context.update(self.extra_context)
return context
|
# coding: utf-8
import unittest
from getsub.util import extract_name
class TestExtractName(unittest.TestCase):
def test_all_en(self):
name = "Young.Sheldon.S01.1080p.WEB-DL.DD5.1.H.264-YFN[v].rar"
new_name = extract_name(name)
self.assertEqual(new_name, name)
def test_mixed(self):
name1 = "[SPS辛普森一家字幕组].[丑陋的美国人.第一季].Ugly.Americans.S01E01.rmvb"
name2 = (
"行尸走肉 第10季第15集【本季终】.The.Walking.Dead.[WEB.1080P]中英文字幕【YYeTs字幕组 简繁英双语字幕】"
"The.Walking.Dead.S10E15.The.Tower.720p/1080p.AMZN.WEB-DL.DD+5.1.H.264-CasStudio"
)
result = (extract_name(name1), extract_name(name2))
self.assertEqual(
result,
(
"Ugly.Americans.S01E01.rmvb",
"The.Walking.Dead.S10E15.The.Tower.720p/1080p.AMZN.WEB-DL.DD+5.1.H.264-CasStudio",
),
)
def test_most_en(self):
name = "少年谢尔顿 第一季(第22集-简繁英双语字幕)Young.Sheldon.S01E22.720p.HDTV.rar"
new_name = extract_name(name)
self.assertEqual(new_name, "Young.Sheldon.S01E22.720p.HDTV.rar")
def test_most_ch(self):
name = "少年谢尔顿 第一季(第22集-简繁英双语字幕)Young.Sheldon.S01E22.720p.rar"
new_name = extract_name(name)
self.assertEqual(new_name, "少年谢尔顿 第一季(第22集-简繁英双语字幕).rar")
def test_force_en(self):
name = "少年谢尔顿 第一季(第22集-简繁英双语字幕)Young.Sheldon.S01E22.720p.rar"
new_name = extract_name(name, en=True)
self.assertEqual(new_name, "Young.Sheldon.S01E22.720p.rar")
|
#!/usr/bin/env python
# Use joystick input to launch behavioral nodes in jackal
#
# Intro to Robotics - EE5531 - Spring 2018
# Final Project
#
# Group #1
# Narendra
# Alex
# Shivam
#
# version: v1.5
# define imports
import rospy
import roslaunch
import sys
import time
import os
import numpy as np
from sensor_msgs.msg import Joy, LaserScan
# class to read joystick messages and launch node
class joy_control(object):
# define self routine
def __init__(self):
# define subscriber
rospy.Subscriber("/bluetooth_teleop/joy", Joy, self.joy_callback)
rospy.Subscriber("/scan",LaserScan, self.laser_callback)
rate = rospy.Rate(5)
rospy.loginfo('started joystick routine..')
# define and init variables
self.person_following_start = False
self.person_following_stop = False
self.wall_following_start = False
self.wall_following_stop = False
self.lane_following_start = False
self.lane_following_stop = False
self.stop_line_start = False
self.stop_line_stop = False
# configure node roslaunch api
package = 'quad_pkg'
executable_person_following = 'person_following3.py'
node_person_following = roslaunch.core.Node(package, executable_person_following)
executable_wall_following = 'wall_following.py'
node_wall_following = roslaunch.core.Node(package, executable_wall_following)
executable_stop_line = 'stop_line2.py'
node_stop_line = roslaunch.core.Node(package, executable_stop_line)
executable_lane_following = 'lane_following2.py'
node_lane_following = roslaunch.core.Node(package, executable_lane_following)
launch = roslaunch.scriptapi.ROSLaunch()
launch.start()
while not rospy.is_shutdown():
# if start flag set: launch main launch-file
if self.person_following_start:
person_following_process = launch.launch(node_person_following)
# if stop flag set: shutdown main launch-file
if self.person_following_stop:
if 'person_following_process' in locals():
person_following_process.stop()
# if start flag set: launch main launch-file
if self.wall_following_start:
wall_following_process = launch.launch(node_wall_following)
# if stop flag set: shutdown main launch-file
if self.wall_following_stop:
if 'wall_following_process' in locals():
wall_following_process.stop()
# if start flag set: launch main launch-file
if self.lane_following_start:
lane_following_process = launch.launch(node_lane_following)
# if stop flag set: shutdown main launch-file
if self.lane_following_stop:
if 'lane_following_process' in locals():
lane_following_process.stop()
# if start flag set: launch main launch-file
if self.stop_line_start:
stop_line_process = launch.launch(node_stop_line)
# if stop flag set: shutdown main launch-file
if self.stop_line_stop:
if 'stop_line_process' in locals():
stop_line_process.stop()
# reset trigger
self.person_following_start = False
self.person_following_stop = False
self.wall_following_start = False
self.wall_following_stop = False
self.lane_following_start = False
self.lane_following_stop = False
self.stop_line_start = False
self.stop_line_stop = False
rate.sleep()
# joystick callback routine
def joy_callback(self, data):
# define joystick buttons
x, circ, sq, tri, L1, R1, share, options, p4, L3, R3, DL, DR, DU, DD = data.buttons
llr, lud, L2, rlr, rud, R2 = data.axes
# Start person following
if (tri == 1) and (self.person_following_start == False):
rospy.loginfo("Starting the person following routine...")
# set the start flag
self.person_following_start = True
# Start wall following
if (sq == 1) and (self.wall_following_start == False):
rospy.loginfo("Starting the wall following routine...")
# set the start flag
self.wall_following_start = True
# Start lane following
if (circ == 1) and (self.lane_following_start == False):
rospy.loginfo("Starting the lane following routine...")
# set the start flag
self.lane_following_start = True
# Start stop line
if (x == 1) and (self.stop_line_start == False):
rospy.loginfo("Starting the stop line routine...")
# set the start flag
self.stop_line_start = True
# Terminate everything running and return to manual
if (R1 == 1):
rospy.loginfo("Terminating the predator routine...")
# set stop flag
self.person_following_stop = True
self.wall_following_stop = True
self.lane_following_stop = True
self.stop_line_stop = True
def laser_callback(self,data):
if (self.stop_line_stop ==False) and (np.min(np.asarray(data.ranges[250:450])) <=0.5):
rospy.loginfo("Terminating stop line")
self.stop_line_stop = True
if __name__ == "__main__":
try:
rospy.init_node("joy_start", anonymous=False)
run = joy_control()
except rospy.ROSInterruptException:
rospy.loginfo("joy_start node terminated.")
|
#!/usr/bin/python
"""
Test the function that maps from EOS
"""
import numpy as np
import pylab as P
import scipy.integrate
import scipy.interpolate
import radiofisher as rf
from radiofisher.experiments import cosmo
C = 3e5
ax1 = P.subplot(111)
def old_eos_fisher_matrix_derivs(cosmo, cosmo_fns):
"""
Pre-calculate derivatives required to transform (aperp, apar) into dark
energy parameters (Omega_k, Omega_DE, w0, wa, h, gamma).
Returns interpolation functions for d(f,a_perp,par)/d(DE params) as fn. of a.
"""
w0 = cosmo['w0']; wa = cosmo['wa']
om = cosmo['omega_M_0']; ol = cosmo['omega_lambda_0']
ok = 1. - om - ol
# Omega_DE(a) and E(a) functions
omegaDE = lambda a: ol * np.exp(3.*wa*(a - 1.)) / a**(3.*(1. + w0 + wa))
E = lambda a: np.sqrt( om * a**(-3.) + ok * a**(-2.) + omegaDE(a) )
# Derivatives of E(z) w.r.t. parameters
#dE_omegaM = lambda a: 0.5 * a**(-3.) / E(a)
if np.abs(ok) < 1e-7: # Effectively zero
dE_omegak = lambda a: 0.5 * a**(-2.) / E(a)
else:
dE_omegak = lambda a: 0.5 * a**(-2.) / E(a) * (1. - 1./a)
dE_omegaM = lambda a: 0.5 * a**(-3.) / E(a)
dE_omegaDE = lambda a: 0.5 / E(a) * (1. - 1./a**3.)
dE_w0 = lambda a: -1.5 * omegaDE(a) * np.log(a) / E(a)
dE_wa = lambda a: -1.5 * omegaDE(a) * (np.log(a) + 1. - a) / E(a)
# Bundle functions into list (for performing repetitive operations with them)
fns = [dE_omegak, dE_omegaDE, dE_w0, dE_wa]
# Set sampling of scale factor, and precompute some values
HH, rr, DD, ff = cosmo_fns
aa = np.linspace(1., 1e-4, 500)
zz = 1./aa - 1.
EE = E(aa); fz = ff(aa)
gamma = cosmo['gamma']; H0 = 100. * cosmo['h']; h = cosmo['h']
# Derivatives of apar w.r.t. parameters
derivs_apar = [f(aa)/EE for f in fns]
# Derivatives of f(z) w.r.t. parameters
f_fac = -gamma * fz / EE
df_domegak = f_fac * (EE/om + dE_omegak(aa))
df_domegaDE = f_fac * (EE/om + dE_omegaDE(aa))
df_w0 = f_fac * dE_w0(aa)
df_wa = f_fac * dE_wa(aa)
df_dh = np.zeros(aa.shape)
df_dgamma = fz * np.log(rf.omegaM_z(zz, cosmo))
derivs_f = [df_domegak, df_domegaDE, df_w0, df_wa, df_dh, df_dgamma]
# Calculate comoving distance (including curvature)
r_c = scipy.integrate.cumtrapz(1./(aa**2. * EE)) # FIXME!
r_c = np.concatenate(([0.], r_c))
if ok > 0.:
r = C/(H0*np.sqrt(ok)) * np.sinh(r_c * np.sqrt(ok))
elif ok < 0.:
r = C/(H0*np.sqrt(-ok)) * np.sin(r_c * np.sqrt(-ok))
else:
r = C/H0 * r_c
# Perform integrals needed to calculate derivs. of aperp
# FIXME: No factor of 2!
print "*"*190
derivs_aperp = [(C/H0)/r[1:] * scipy.integrate.cumtrapz( f(aa)/(aa * EE)**2.)
for f in fns] # FIXME
# Add additional term to curvature integral (idx 1)
# N.B. I think Pedro's result is wrong (for fiducial Omega_k=0 at least),
# so I'm commenting it out
#derivs_aperp[1] -= (H0 * r[1:] / C)**2. / 6.
# Add initial values (to deal with 1/(r=0) at origin)
inivals = [0.5, 0.0, 0., 0.] # FIXME: Are these OK?
derivs_aperp = [ np.concatenate(([inivals[i]], derivs_aperp[i]))
for i in range(len(derivs_aperp)) ]
# Add (h, gamma) derivs to aperp,apar
derivs_aperp += [np.ones(aa.shape)/h, np.zeros(aa.shape)]
derivs_apar += [np.ones(aa.shape)/h, np.zeros(aa.shape)]
# Construct interpolation functions
interp_f = [scipy.interpolate.interp1d(aa[::-1], d[::-1],
kind='linear', bounds_error=False) for d in derivs_f]
interp_apar = [scipy.interpolate.interp1d(aa[::-1], d[::-1],
kind='linear', bounds_error=False) for d in derivs_apar]
interp_aperp = [scipy.interpolate.interp1d(aa[::-1], d[::-1],
kind='linear', bounds_error=False) for d in derivs_aperp]
return [interp_f, interp_aperp, interp_apar]
def eos_fisher_matrix_derivs(cosmo, cosmo_fns):
"""
Pre-calculate derivatives required to transform (aperp, apar) into dark
energy parameters (Omega_k, Omega_DE, w0, wa, h, gamma).
Returns interpolation functions for d(f,a_perp,par)/d(DE params) as fn. of a.
"""
w0 = cosmo['w0']; wa = cosmo['wa']
om = cosmo['omega_M_0']; ol = cosmo['omega_lambda_0']
ok = 1. - om - ol
# Omega_DE(a) and E(a) functions
omegaDE = lambda a: ol * np.exp(3.*wa*(a - 1.)) / a**(3.*(1. + w0 + wa))
E = lambda a: np.sqrt( om * a**(-3.) + ok * a**(-2.) + omegaDE(a) )
# Derivatives of E(z) w.r.t. parameters
#dE_omegaM = lambda a: 0.5 * a**(-3.) / E(a)
if np.abs(ok) < 1e-7: # Effectively zero
dE_omegak = lambda a: 0.5 * a**(-2.) / E(a)
else:
dE_omegak = lambda a: 0.5 * a**(-2.) / E(a) * (1. - 1./a)
dE_omegaM = lambda a: 0.5 * a**(-3.) / E(a)
dE_omegaDE = lambda a: 0.5 / E(a) * (1. - 1./a**3.)
dE_w0 = lambda a: -1.5 * omegaDE(a) * np.log(a) / E(a)
dE_wa = lambda a: -1.5 * omegaDE(a) * (np.log(a) + 1. - a) / E(a)
# Bundle functions into list (for performing repetitive operations with them)
fns = [dE_omegak, dE_omegaDE, dE_w0, dE_wa]
# Set sampling of scale factor, and precompute some values
HH, rr, DD, ff = cosmo_fns
aa = np.linspace(1., 1e-4, 500)
zz = 1./aa - 1.
EE = E(aa); fz = ff(aa)
gamma = cosmo['gamma']; H0 = 100. * cosmo['h']; h = cosmo['h']
# Derivatives of apar w.r.t. parameters
derivs_apar = [f(aa)/EE for f in fns]
# Derivatives of f(z) w.r.t. parameters
f_fac = -gamma * fz / EE
df_domegak = f_fac * (EE/om + dE_omegak(aa))
df_domegaDE = f_fac * (EE/om + dE_omegaDE(aa))
df_w0 = f_fac * dE_w0(aa)
df_wa = f_fac * dE_wa(aa)
df_dh = np.zeros(aa.shape)
df_dgamma = fz * np.log(rf.omegaM_z(zz, cosmo)) # FIXME: rf.omegaM_z
derivs_f = [df_domegak, df_domegaDE, df_w0, df_wa, df_dh, df_dgamma]
# Calculate comoving distance (including curvature)
r_c = scipy.integrate.cumtrapz(1./(aa**2. * EE), aa) # FIXME!
r_c = np.concatenate(([0.], r_c))
if ok > 0.:
r = C/(H0*np.sqrt(ok)) * np.sinh(r_c * np.sqrt(ok))
elif ok < 0.:
r = C/(H0*np.sqrt(-ok)) * np.sin(r_c * np.sqrt(-ok))
else:
r = C/H0 * r_c
# Perform integrals needed to calculate derivs. of aperp
print "*"*190
derivs_aperp = [(C/H0)/r[1:] * scipy.integrate.cumtrapz( f(aa)/(aa * EE)**2., aa)
for f in fns] # FIXME
# Add additional term to curvature integral (idx 1)
# N.B. I think Pedro's result is wrong (for fiducial Omega_k=0 at least),
# so I'm commenting it out
#derivs_aperp[1] -= (H0 * r[1:] / C)**2. / 6.
# Add initial values (to deal with 1/(r=0) at origin)
inivals = [0.5, 0.0, 0., 0.] # FIXME: Are these OK?
derivs_aperp = [ np.concatenate(([inivals[i]], derivs_aperp[i]))
for i in range(len(derivs_aperp)) ]
# Add (h, gamma) derivs to aperp,apar
derivs_aperp += [np.ones(aa.shape)/h, np.zeros(aa.shape)]
derivs_apar += [np.ones(aa.shape)/h, np.zeros(aa.shape)]
# Construct interpolation functions
interp_f = [scipy.interpolate.interp1d(aa[::-1], d[::-1],
kind='linear', bounds_error=False) for d in derivs_f]
interp_apar = [scipy.interpolate.interp1d(aa[::-1], d[::-1],
kind='linear', bounds_error=False) for d in derivs_apar]
interp_aperp = [scipy.interpolate.interp1d(aa[::-1], d[::-1],
kind='linear', bounds_error=False) for d in derivs_aperp]
return [interp_f, interp_aperp, interp_apar]
# Precompute cosmo functions
cosmo_fns = rf.background_evolution_splines(cosmo)
# OLD
old_f, old_aperp, old_apar = old_eos_fisher_matrix_derivs(cosmo, cosmo_fns)
# NEW
new_f, new_aperp, new_apar = eos_fisher_matrix_derivs(cosmo, cosmo_fns)
z = np.linspace(0., 7., 1000)
a = 1. / (1. + z)
# Plot results
P.subplot(111)
cols = ['r', 'g', 'b', 'y', 'c', 'm']
for i in range(len(new_f)):
P.plot(z, old_f[i](a), lw=1.5, color=cols[i], alpha=0.4)
P.plot(z, new_f[i](a), lw=1.5, color=cols[i], ls='dashed')
P.show()
|
import pyparsing as _pp
def _swap_infix(s, l, t):
t[0][0], t[0][1] = t[0][1], t[0][0]
_quoted = _pp.QuotedString('"', escChar='\\')
_plain_tag = _pp.Word(_pp.alphas + "_", _pp.alphanums + "_-")
_tag = (_quoted | _plain_tag).setParseAction(lambda s, l, t: [['$', t[0]]])
_const = _pp.Word('01', exact=1).setParseAction(lambda s, l, t: [[t[0]]])
_term = _tag | _const
_expr = _pp.infixNotation(
_term,
[
("!", 1, _pp.opAssoc.RIGHT),
("&", 2, _pp.opAssoc.RIGHT, _swap_infix),
("|", 2, _pp.opAssoc.RIGHT, _swap_infix),
]
)
def compile(expr):
try:
ast = _expr.parseString(expr, parseAll=True).asList()[0]
except _pp.ParseException as e:
raise RuntimeError("Error parsing tag expression at \"@@@\": {}".format(e.markInputline("@@@"))) from e
def evaluate(tags):
def recurse(ast):
if ast[0] == '$':
return ast[1] in tags
if ast[0] == '0':
return False
if ast[0] == '1':
return True
if ast[0] == '!':
return not recurse(ast[1])
if ast[0] == '&':
return recurse(ast[1]) and recurse(ast[2])
if ast[0] == '|':
return recurse(ast[1]) or recurse(ast[2])
assert 0
return recurse(ast)
return evaluate
|
# Generated by Django 2.2.6 on 2019-10-14 20:16
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('backend', '0007_connection_guacdserver'),
]
operations = [
migrations.AddField(
model_name='connection',
name='credentials',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='backend.Credentials', verbose_name='Credentials'),
),
]
|
#!/usr/bin/env python -u
"""py65mon -- interact with a simulated 6502-based system
Usage: %s [options]
Options:
-h, --help : Show this message
-m, --mpu <device> : Choose which MPU device (default is 6502)
-l, --load <file> : Load a file at address 0
-r, --rom <file> : Load a rom at the top of address space and reset into it
-g, --goto <address> : Perform a goto command after loading any files
-i, --input <address> : define location of getc (default $f004)
-o, --output <address> : define location of putc (default $f001)
"""
import cmd
import getopt
import os
import re
import shlex
import sys
from asyncore import compact_traceback
from py65.devices.mpu6502 import MPU as NMOS6502
from py65.devices.mpu65c02 import MPU as CMOS65C02
from py65.devices.mpu65org16 import MPU as V65Org16
from py65.disassembler import Disassembler
from py65.assembler import Assembler
from py65.utils.addressing import AddressParser
from py65.utils import console
from py65.utils.conversions import itoa
from py65.memory import ObservableMemory
try:
from urllib2 import urlopen
except ImportError: # Python 3
from urllib.request import urlopen
class Monitor(cmd.Cmd):
Microprocessors = {'6502': NMOS6502, '65C02': CMOS65C02,
'65Org16': V65Org16}
def __init__(self, argv=None, stdin=None, stdout=None,
mpu_type=NMOS6502, memory=None,
putc_addr=0xF001, getc_addr=0xF004):
self.mpu_type = mpu_type
self.memory = memory
self.putc_addr = putc_addr
self.getc_addr = getc_addr
self._breakpoints = []
self._width = 78
self.prompt = "."
self._add_shortcuts()
# Save the current system input mode so it can be restored after
# after processing commands and before exiting.
console.save_mode(sys.stdin)
# Attempt to get a copy of stdin that is unbuffered on systems
# that support it. This allows for immediate response to
# typed input as well as pasted input. If unable to get an
# unbuffered version of stdin, the original version is returned.
self.unbuffered_stdin = console.get_unbuffered_stdin(stdin)
cmd.Cmd.__init__(self, stdin=self.unbuffered_stdin, stdout=stdout)
# Check for any exceptions thrown during __init__ while
# processing the arguments.
try:
if argv is None:
argv = sys.argv
load, rom, goto = self._parse_args(argv)
self._reset(self.mpu_type, self.getc_addr, self.putc_addr)
if load is not None:
self.do_load("%r" % load)
if goto is not None:
self.do_goto(goto)
if rom is not None:
# load a ROM and run from the reset vector
self.do_load("%r top" % rom)
physMask = self._mpu.memory.physMask
reset = self._mpu.RESET & physMask
dest = self._mpu.memory[reset] + \
(self._mpu.memory[reset + 1] << self.byteWidth)
self.do_goto("$%x" % dest)
except:
# Restore input mode on any exception and then rethrow the
# exception.
console.restore_mode()
raise
def __del__(self):
try:
# Restore the input mode.
console.restore_mode()
# Close the unbuffered input file handle, if it exists.
if self.unbuffered_stdin != None:
if self.unbuffered_stdin != sys.stdin:
self.unbuffered_stdin.close()
except:
pass
def _parse_args(self, argv):
try:
shortopts = 'hi:o:m:l:r:g:'
longopts = ['help', 'mpu=', 'input=', 'output=', 'load=', 'rom=', 'goto=']
options, args = getopt.getopt(argv[1:], shortopts, longopts)
except getopt.GetoptError as exc:
self._output(exc.args[0])
self._usage()
self._exit(1)
load, rom, goto = None, None, None
for opt, value in options:
if opt in ('-i', '--input'):
self.getc_addr = int(value, 16)
if opt in ('-o', '--output'):
self.putc_addr = int(value, 16)
if opt in ('-m', '--mpu'):
mpu_type = self._get_mpu(value)
if mpu_type is None:
mpus = sorted(self.Microprocessors.keys())
msg = "Fatal: no such MPU. Available MPUs: %s"
self._output(msg % ', '.join(mpus))
sys.exit(1)
self.mpu_type = mpu_type
if opt in ("-h", "--help"):
self._usage()
self._exit(0)
if opt in ('-l', '--load'):
load = value
if opt in ('-r', '--rom'):
rom = value
if opt in ('-g', '--goto'):
goto = value
return load, rom, goto
def _usage(self):
usage = __doc__ % sys.argv[0]
self._output(usage)
def onecmd(self, line):
line = self._preprocess_line(line)
result = None
try:
result = cmd.Cmd.onecmd(self, line)
except KeyboardInterrupt:
self._output("Interrupt")
except Exception:
(file, fun, line), t, v, tbinfo = compact_traceback()
error = 'Error: %s, %s: file: %s line: %s' % (t, v, file, line)
self._output(error)
if not line.startswith("quit"):
self._output_mpu_status()
# Switch back to the previous input mode.
console.restore_mode()
return result
def _reset(self, mpu_type, getc_addr=0xF004, putc_addr=0xF001):
self._mpu = mpu_type(memory=self.memory)
self.addrWidth = self._mpu.ADDR_WIDTH
self.byteWidth = self._mpu.BYTE_WIDTH
self.addrFmt = self._mpu.ADDR_FORMAT
self.byteFmt = self._mpu.BYTE_FORMAT
self.addrMask = self._mpu.addrMask
self.byteMask = self._mpu.byteMask
if getc_addr and putc_addr:
self._install_mpu_observers(getc_addr, putc_addr)
self._address_parser = AddressParser()
self._disassembler = Disassembler(self._mpu, self._address_parser)
self._assembler = Assembler(self._mpu, self._address_parser)
def _add_shortcuts(self):
self._shortcuts = {'EOF': 'quit',
'~': 'tilde',
'a': 'assemble',
'ab': 'add_breakpoint',
'al': 'add_label',
'd': 'disassemble',
'db': 'delete_breakpoint',
'dl': 'delete_label',
'exit': 'quit',
'f': 'fill',
'>': 'fill',
'g': 'goto',
'h': 'help',
'?': 'help',
'l': 'load',
'm': 'mem',
'q': 'quit',
'r': 'registers',
'ret': 'return',
'rad': 'radix',
's': 'save',
'shb': 'show_breakpoints',
'shl': 'show_labels',
'x': 'quit',
'z': 'step'}
def _preprocess_line(self, line):
# line comments
quoted = False
for pos, char in enumerate(line):
if char in ('"', "'"):
quoted = not quoted
if (not quoted) and (char == ';'):
line = line[:pos]
break
# whitespace & leading dots
line = line.strip(' \t').lstrip('.')
# special case for vice compatibility
if line.startswith('~'):
line = self._shortcuts['~'] + ' ' + line[1:]
# command shortcuts
for shortcut, command in self._shortcuts.items():
if line == shortcut:
line = command
break
pattern = '^%s\s+' % re.escape(shortcut)
matches = re.match(pattern, line)
if matches:
start, end = matches.span()
line = "%s %s" % (command, line[end:])
break
return line
def _get_mpu(self, name):
requested = name.lower()
mpu = None
for key, klass in self.Microprocessors.items():
if key.lower() == requested:
mpu = klass
break
return mpu
def _install_mpu_observers(self, getc_addr, putc_addr):
def putc(address, value):
try:
self.stdout.write(chr(value))
except UnicodeEncodeError: # Python 3
self.stdout.write("?")
self.stdout.flush()
def getc(address):
char = console.getch_noblock(self.stdin)
if char:
byte = ord(char)
else:
byte = 0
return byte
m = ObservableMemory(subject=self.memory, addrWidth=self.addrWidth)
m.subscribe_to_write([self.putc_addr], putc)
m.subscribe_to_read([self.getc_addr], getc)
self._mpu.memory = m
def _output_mpu_status(self):
self._output("\n" + repr(self._mpu))
def _output(self, stuff):
self.stdout.write("%s\n" % stuff)
def _exit(self, exitcode=0):
sys.exit(exitcode)
def do_help(self, args):
args = self._shortcuts.get(args.strip(), args)
return cmd.Cmd.do_help(self, args)
def help_version(self):
self._output("version\t\tDisplay Py65 version information.")
def do_version(self, args):
self._output("\nPy65 Monitor UCSB IEEE Edition")
def help_help(self):
self._output("help\t\tPrint a list of available actions.")
self._output("help <action>\tPrint help for <action>.")
def help_reset(self):
self._output("reset\t\tReset the microprocessor")
def do_reset(self, args):
klass = self._mpu.__class__
self._reset(mpu_type=klass)
def do_mpu(self, args):
def available_mpus():
mpus = list(self.Microprocessors.keys())
mpus.sort()
self._output("Available MPUs: %s" % ', '.join(mpus))
if args == '':
self._output("Current MPU is %s" % self._mpu.name)
available_mpus()
else:
new_mpu = self._get_mpu(args)
if new_mpu is None:
self._output("Unknown MPU: %s" % args)
available_mpus()
else:
self._reset(new_mpu,self.getc_addr,self.putc_addr)
self._output("Reset with new MPU %s" % self._mpu.name)
def help_mpu(self):
self._output("mpu\t\tPrint available microprocessors.")
self._output("mpu <type>\tSelect a new microprocessor.")
def do_quit(self, args):
self._output('')
return 1
def help_quit(self):
self._output("To quit, type ^D or use the quit command.")
def do_assemble(self, args):
splitted = args.split(None, 1)
if len(splitted) != 2:
return self._interactive_assemble(args)
statement = splitted[1]
try:
start = self._address_parser.number(splitted[0])
bytes = self._assembler.assemble(statement, start)
end = start + len(bytes)
self._mpu.memory[start:end] = bytes
self.do_disassemble(self.addrFmt % start)
except KeyError as exc:
self._output(exc.args[0]) # "Label not found: foo"
except OverflowError:
self._output("Overflow error: %s" % args)
except SyntaxError:
self._output("Syntax error: %s" % statement)
def help_assemble(self):
self._output("assemble\t\t\t"
"Start interactive assembly at the program counter.")
self._output("assemble <address>\t\t"
"Start interactive assembly at the address.")
self._output("assemble <address> <statement>\t"
"Assemble a statement at the address.")
def _interactive_assemble(self, args):
if args == '':
start = self._mpu.pc
else:
try:
start = self._address_parser.number(args)
except KeyError as exc:
self._output(exc.args[0]) # "Label not found: foo"
return
while True:
prompt = "\r$" + (self.addrFmt % start) + " " + \
(" " * int(1 + self.byteWidth / 4) * 3)
line = console.line_input(prompt,
stdin=self.stdin, stdout=self.stdout)
if not line.strip():
self.stdout.write("\n")
return
# assemble into memory
try:
bytes = self._assembler.assemble(line, pc=start)
numbytes = len(bytes)
end = start + numbytes
self._mpu.memory[start:end] = bytes
# print disassembly
_, disasm = self._disassembler.instruction_at(start)
fdisasm = self._format_disassembly(start, numbytes, disasm)
indent = ' ' * (len(prompt + line) + 5)
self.stdout.write("\r" + indent + "\r")
self.stdout.write(fdisasm + "\n")
# advance to next address
start += numbytes
if start >= (2 ** self._mpu.ADDR_WIDTH):
start = 0
except KeyError:
addr = self.addrFmt % start
self.stdout.write("\r$%s ?Label\n" % addr)
except OverflowError:
addr = self.addrFmt % start
self.stdout.write("\r$%s ?Overflow\n" % addr)
except SyntaxError:
addr = self.addrFmt % start
self.stdout.write("\r$%s ?Syntax\n" % addr)
def do_disassemble(self, args):
splitted = shlex.split(args)
if len(splitted) != 1:
return self.help_disassemble()
address_parts = splitted[0].split(":")
start = self._address_parser.number(address_parts[0])
if len(address_parts) > 1:
end = self._address_parser.number(address_parts[1])
else:
end = start
max_address = (2 ** self._mpu.ADDR_WIDTH) - 1
cur_address = start
needs_wrap = start > end
while needs_wrap or cur_address <= end:
length, disasm = self._disassembler.instruction_at(cur_address)
self._output(self._format_disassembly(cur_address, length, disasm))
remaining = length
while remaining:
remaining -= 1
cur_address += 1
if start > end and cur_address > max_address:
needs_wrap = False
cur_address = 0
def _format_disassembly(self, address, length, disasm):
cur_address = address
max_address = (2 ** self._mpu.ADDR_WIDTH) - 1
bytes_remaining = length
dump = ''
while bytes_remaining:
if cur_address > max_address:
cur_address = 0
dump += self.byteFmt % self._mpu.memory[cur_address] + " "
cur_address += 1
bytes_remaining -= 1
fieldwidth = 1 + int(1 + self.byteWidth / 4) * 3
fieldfmt = "%%-%ds" % fieldwidth
return "$" + self.addrFmt % address + " " + fieldfmt % dump + disasm
def help_disassemble(self):
self._output("disassemble <address_range>")
self._output("Disassemble instructions in the address range.")
self._output('Range is specified like "<start>:<end>".')
def help_step(self):
self._output("step")
self._output("Single-step through instructions.")
def do_step(self, args):
self._mpu.step()
self.do_disassemble(self.addrFmt % self._mpu.pc)
def help_return(self):
self._output("return")
self._output("Continues execution and returns to the monitor just")
self._output("before the next RTS or RTI is executed.")
def do_return(self, args):
returns = [0x60, 0x40] # RTS, RTI
self._run(stopcodes=returns)
def help_goto(self):
self._output("goto <address>")
self._output("Change the PC to address and continue execution.")
def do_goto(self, args):
if args == '':
return self.help_goto()
self._mpu.pc = self._address_parser.number(args)
stps = [0xdb] # STP
self._run(stopcodes=stps)
def _run(self, stopcodes):
stopcodes = set(stopcodes)
breakpoints = set(self._breakpoints)
mpu = self._mpu
mem = self._mpu.memory
# Switch to immediate (noncanonical) no-echo input mode on POSIX
# operating systems. This has no effect on Windows.
console.noncanonical_mode(self.stdin)
if not breakpoints:
while True:
if mem[mpu.pc] in stopcodes:
break
mpu.step()
else:
while True:
pc = mpu.pc
if mem[pc] in stopcodes:
break
if pc in breakpoints:
msg = "Breakpoint %d reached."
self._output(msg % self._breakpoints.index(pc))
break
mpu.step()
# Switch back to the previous input mode.
console.restore_mode()
def help_radix(self):
self._output("radix [H|D|O|B]")
self._output("Set default radix to hex, decimal, octal, or binary.")
self._output("With no argument, the current radix is printed.")
def help_cycles(self):
self._output("Display the total number of cycles executed.")
def do_cycles(self, args):
self._output(str(self._mpu.processorCycles))
def do_radix(self, args):
radixes = {'Hexadecimal': 16, 'Decimal': 10, 'Octal': 8, 'Binary': 2}
if args != '':
new = args[0].lower()
changed = False
for name, radix in radixes.items():
if name[0].lower() == new:
self._address_parser.radix = radix
changed = True
if not changed:
self._output("Illegal radix: %s" % args)
for name, radix in radixes.items():
if self._address_parser.radix == radix:
self._output("Default radix is %s" % name)
def help_tilde(self):
self._output("~ <number>")
self._output("Display a number in decimal, hex, octal, and binary.")
def do_tilde(self, args):
if args == '':
return self.help_tilde()
try:
num = self._address_parser.number(args)
self._output("+%u" % num)
self._output("$" + self.byteFmt % num)
self._output("%04o" % num)
self._output(itoa(num, 2).zfill(8))
except KeyError:
self._output("Bad label: %s" % args)
except OverflowError:
self._output("Overflow error: %s" % args)
def help_registers(self):
self._output("registers[<name>=<value> [, <name>=<value>]*]")
self._output("Assign respective registers. With no parameters,")
self._output("display register values.")
def do_registers(self, args):
if args == '':
return
pairs = re.findall('([^=,\s]*)=([^=,\s]*)', args)
if pairs == []:
return self._output("Syntax error: %s" % args)
for register, value in pairs:
if register not in ('pc', 'sp', 'a', 'x', 'y', 'p'):
self._output("Invalid register: %s" % register)
else:
try:
intval = self._address_parser.number(value)
except KeyError as exc: # label not found
self._output(exc.args[0])
continue
except OverflowError as exc: # wider than address space
msg = "Overflow: %r too wide for register %r"
self._output(msg % (value, register))
continue
if register != 'pc':
if intval != (intval & self.byteMask):
msg = "Overflow: %r too wide for register %r"
self._output(msg % (value, register))
continue
setattr(self._mpu, register, intval)
def help_cd(self):
self._output("cd <directory>")
self._output("Change the working directory.")
def do_cd(self, args):
if args == '':
return self.help_cd()
try:
os.chdir(args)
except OSError as exc:
msg = "Cannot change directory: [%d] %s" % (exc.errno,
exc.strerror)
self._output(msg)
self.do_pwd()
def help_pwd(self):
self._output("Show the current working directory.")
def do_pwd(self, args=None):
cwd = os.getcwd()
self._output(cwd)
def help_load(self):
self._output("load <filename|url> <address|top>")
self._output("Load a file into memory at the specified address.")
self._output('An address of "top" loads into the top of memory.')
self._output("Commodore-style load address bytes are ignored.")
def do_load(self, args):
split = shlex.split(args)
if len(split) not in (1, 2):
self._output("Syntax error: %s" % args)
return
filename = split[0]
if "://" in filename:
try:
f = urlopen(filename)
bytes = f.read()
f.close()
except Exception as exc:
msg = "Cannot fetch remote file: %s" % str(exc)
self._output(msg)
return
else:
try:
f = open(filename, 'rb')
bytes = f.read()
f.close()
except (OSError, IOError) as exc:
msg = "Cannot load file: [%d] %s" % (exc.errno, exc.strerror)
self._output(msg)
return
if len(split) == 2:
if split[1] == "top":
# load a ROM to top of memory
top_address = self.addrMask
program_size = len(bytes) // (self.byteWidth // 8)
start = top_address - program_size + 1
else:
start = self._address_parser.number(split[1])
else:
start = self._mpu.pc
if self.byteWidth == 8:
if isinstance(bytes, str):
bytes = map(ord, bytes)
else: # Python 3
bytes = [ b for b in bytes ]
elif self.byteWidth == 16:
def format(msb, lsb):
if isinstance(bytes, str):
return (ord(msb) << 8) + ord(lsb)
else: # Python 3
return (msb << 8) + lsb
bytes = list(map(format, bytes[0::2], bytes[1::2]))
self._fill(start, start, bytes)
def help_save(self):
self._output("save \"filename\" <start> <end>")
self._output("Save the specified memory range as a binary file.")
self._output("Commodore-style load address bytes are not written.")
def do_save(self, args):
split = shlex.split(args)
if len(split) != 3:
self._output("Syntax error: %s" % args)
return
filename = split[0]
start = self._address_parser.number(split[1])
end = self._address_parser.number(split[2])
mem = self._mpu.memory[start:end + 1]
try:
f = open(filename, 'wb')
for m in mem:
# output each octect from msb first
for shift in range(self.byteWidth - 8, -1, -8):
f.write(bytearray([(m >> shift) & 0xff]))
f.close()
except (OSError, IOError) as exc:
msg = "Cannot save file: [%d] %s" % (exc.errno, exc.strerror)
self._output(msg)
return
self._output("Saved +%d bytes to %s" % (len(mem), filename))
def help_fill(self):
self._output("fill <address_range> <data_list>")
self._output("Fill memory in the address range with the data in")
self._output("<data_list>. If the size of the address range is")
self._output("greater than the size of the data_list, the data_list ")
self._output("is repeated.")
def do_fill(self, args):
split = shlex.split(args)
if len(split) < 2:
return self.help_fill()
try:
start, end = self._address_parser.range(split[0])
filler = list(map(self._address_parser.number, split[1:]))
except KeyError as exc:
self._output(exc.args[0]) # "Label not found: foo"
else:
self._fill(start, end, filler)
def _fill(self, start, end, filler):
address = start
length, index = len(filler), 0
if start == end:
end = start + length - 1
if (end > self.addrMask):
end = self.addrMask
while address <= end:
address &= self.addrMask
self._mpu.memory[address] = (filler[index] & self.byteMask)
index += 1
if index == length:
index = 0
address += 1
fmt = (end - start + 1, start, end)
starttoend = "$" + self.addrFmt + " to $" + self.addrFmt
self._output(("Wrote +%d bytes from " + starttoend) % fmt)
def help_mem(self):
self._output("mem <address_range>")
self._output("Display the contents of memory.")
self._output('Range is specified like "<start:end>".')
def do_mem(self, args):
split = shlex.split(args)
if len(split) != 1:
return self.help_mem()
start, end = self._address_parser.range(split[0])
line = self.addrFmt % start + ":"
for address in range(start, end + 1):
byte = self._mpu.memory[address]
more = " " + self.byteFmt % byte
exceeded = len(line) + len(more) > self._width
if exceeded:
self._output(line)
line = self.addrFmt % address + ":"
line += more
self._output(line)
def help_add_label(self):
self._output("add_label <address> <label>")
self._output("Map a given address to a label.")
def do_add_label(self, args):
split = shlex.split(args)
if len(split) != 2:
self._output("Syntax error: %s" % args)
return self.help_add_label()
try:
address = self._address_parser.number(split[0])
except KeyError as exc:
self._output(exc.args[0]) # "Label not found: foo"
except OverflowError:
self._output("Overflow error: %s" % args)
else:
label = split[1]
self._address_parser.labels[label] = address
def help_show_labels(self):
self._output("show_labels")
self._output("Display current label mappings.")
def do_show_labels(self, args):
values = list(self._address_parser.labels.values())
keys = list(self._address_parser.labels.keys())
byaddress = list(zip(values, keys))
byaddress.sort()
for address, label in byaddress:
self._output(self.addrFmt % address + ": " + label)
def help_delete_label(self):
self._output("delete_label <label>")
self._output("Remove the specified label from the label tables.")
def do_delete_label(self, args):
if args == '':
return self.help_delete_label()
if args in self._address_parser.labels:
del self._address_parser.labels[args]
def do_width(self, args):
if args != '':
try:
new_width = int(args)
if new_width >= 10:
self._width = new_width
else:
self._output("Minimum terminal width is 10")
except ValueError:
self._output("Illegal width: %s" % args)
self._output("Terminal width is %d" % self._width)
def help_width(self):
self._output("width <columns>")
self._output("Set the width used by some commands to wrap output.")
self._output("With no argument, the current width is printed.")
def do_add_breakpoint(self, args):
split = shlex.split(args)
if len(split) != 1:
self._output("Syntax error: %s" % args)
return self.help_add_breakpoint()
address = self._address_parser.number(split[0])
if address in self._breakpoints:
self._output("Breakpoint already present at $%04X" % address)
else:
self._breakpoints.append(address)
msg = "Breakpoint %d added at $%04X"
self._output(msg % (len(self._breakpoints) - 1, address))
def help_add_breakpoint(self):
self._output("add_breakpoint <address|label>")
self._output("Add a breakpoint on execution at the given address or label")
def do_delete_breakpoint(self, args):
split = shlex.split(args)
if len(split) != 1:
self._output("Syntax error: %s" % args)
return self.help_delete_breakpoint()
number = None
try:
number = int(split[0])
if number < 0 or number > len(self._breakpoints):
self._output("Invalid breakpoint number %d", number)
return
except ValueError:
self._output("Illegal number: %s" % args)
return
if self._breakpoints[number] is not None:
self._breakpoints[number] = None
self._output("Breakpoint %d removed" % number)
else:
self._output("Breakpoint %d already removed" % number)
def help_delete_breakpoint(self):
self._output("delete_breakpoint <number>")
self._output("Delete the breakpoint on execution marked by the given number")
def do_show_breakpoints(self, args):
for i, address in enumerate(self._breakpoints):
if address is not None:
bpinfo = "Breakpoint %d: $%04X" % (i, address)
label = self._address_parser.label_for(address)
if label is not None:
bpinfo += " " + label
self._output(bpinfo)
def help_show_breakpoints(self):
self._output("show_breakpoints")
self._output("Lists the currently assigned breakpoints")
def main(args=None):
c = Monitor()
try:
import readline
readline = readline # pyflakes
except ImportError:
pass
try:
c.onecmd('version')
c.cmdloop()
except KeyboardInterrupt:
c._output('')
console.restore_mode()
if __name__ == "__main__":
main()
|
import asyncio
import multiprocessing
import queue
import sys
import time
import pytest
from aplex.executor import (SubmitItem, HandlerCommands,
WorkStates, WorkerStates)
from aplex.process import _ProcessWorker
from aplex.thread import _ThreadWorker
@pytest.fixture(scope='function', params=[
(_ProcessWorker, multiprocessing.SimpleQueue),
(_ThreadWorker, queue.Queue),
])
def worker(request):
Worker, ResultQueue = request.param
worker = Worker(ResultQueue())
worker.start()
yield worker
worker.close()
async def async_work(meta):
if isinstance(meta, Exception):
raise meta
return meta
def sync_work(meta):
if isinstance(meta, Exception):
raise meta
return meta
async def work_to_cancel():
await asyncio.sleep(999)
class TestReceive:
@pytest.mark.parametrize(('work_state', 'work_state_meta'), [
(WorkStates.SUCCESS, 'result'),
(WorkStates.EXCEPTION, Exception('exc')),
])
@pytest.mark.parametrize('work', [async_work, sync_work])
def test_receive_work(self, worker, work, work_state, work_state_meta):
item = SubmitItem.work_form(work=work,
args=(work_state_meta,),
kwargs={},
work_id=666,
load_balancing_meta=None)
worker.submit(item)
result_item = worker._result_queue.get()
assert result_item.work_state == work_state
if work_state == WorkStates.EXCEPTION:
returned_meta = result_item.work_state_meta
# Compare two exceptions.
assert (type(returned_meta) == type(work_state_meta) and
returned_meta.args == work_state_meta.args)
else:
assert result_item.work_state_meta == work_state_meta
@pytest.mark.parametrize(('command', 'command_meta'), [
(HandlerCommands.CANCEL, 666),
(HandlerCommands.CLOSE, None),
])
def test_receive_command(self, worker, command, command_meta):
work_id = 666
item = SubmitItem.work_form(work=work_to_cancel,
args=(),
kwargs={},
work_id=work_id,
load_balancing_meta=None)
worker.submit(item)
item = SubmitItem.command_form(command=command,
command_meta=command_meta)
worker.submit(item)
# Close command will also trigger cancellation.
result_item = worker._result_queue.get()
assert (result_item.work_id == work_id and
result_item.work_state == WorkStates.CANCELLED)
if command == HandlerCommands.CLOSE:
result_item = worker._result_queue.get()
assert (result_item.worker_state == WorkerStates.CLOSING and
result_item.worker_state_meta == worker.ident)
async def async_raise_base_exc():
raise BaseException('base_exception')
def sync_raise_base_exc():
raise BaseException('base_exception')
@pytest.mark.parametrize('work',
[async_raise_base_exc, sync_raise_base_exc])
def test_base_exception_raised(worker, work):
item = SubmitItem.work_form(work=work, args=(), kwargs={},
work_id=666, load_balancing_meta=None)
worker.submit(item)
item = worker._result_queue.get()
assert item.worker_state == WorkerStates.ERROR
time.sleep(2)
assert not worker.is_alive()
@pytest.mark.parametrize('loop_factory', [
asyncio.SelectorEventLoop,
pytest.param('asyncio.ProactorEventLoop',
marks=pytest.mark.skipif(sys.platform != 'win32',
reason='Windows only.')),
pytest.param('uvloop.Loop',
marks=pytest.mark.skipif(sys.platform == 'win32',
reason='Not support Windows.')),
])
@pytest.mark.parametrize('worker_factory', [_ProcessWorker, _ThreadWorker])
def test_loop_factory(worker_factory, loop_factory):
if loop_factory == 'asyncio.ProactorEventLoop':
loop_factory = asyncio.ProactorEventLoop
elif loop_factory == 'uvloop.Loop':
import uvloop
loop_factory = uvloop.Loop
if worker_factory == _ProcessWorker:
q = multiprocessing.SimpleQueue()
else:
q = queue.Queue()
worker = worker_factory(q, loop_factory)
worker.start()
time.sleep(3)
assert worker.is_alive()
worker.close()
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Package for analyzing elastic tensors and properties.
"""
from .elastic import * # noqa
from .strain import * # noqa
from .stress import * # noqa
|
import time
from requests import request, ConnectionError
from ..utils import SSLHttpAdapter, module_member, parse_qs, user_agent
from ..exceptions import AuthFailed
class BaseAuth:
"""A authentication backend that authenticates the user based on
the provider response"""
name = '' # provider name, it's stored in database
supports_inactive_user = False # Django auth
ID_KEY = None
EXTRA_DATA = None
GET_ALL_EXTRA_DATA = False
REQUIRES_EMAIL_VALIDATION = False
SEND_USER_AGENT = False
SSL_PROTOCOL = None
def __init__(self, strategy, redirect_uri=None):
self.strategy = strategy
self.redirect_uri = redirect_uri
self.data = self.strategy.request_data()
self.redirect_uri = self.strategy.absolute_uri(
self.redirect_uri
)
def setting(self, name, default=None):
"""Return setting value from strategy"""
return self.strategy.setting(name, default=default, backend=self)
def start(self):
if self.uses_redirect():
return self.strategy.redirect(self.auth_url())
else:
return self.strategy.html(self.auth_html())
def complete(self, *args, **kwargs):
return self.auth_complete(*args, **kwargs)
def auth_url(self):
"""Must return redirect URL to auth provider"""
raise NotImplementedError('Implement in subclass')
def auth_html(self):
"""Must return login HTML content returned by provider"""
raise NotImplementedError('Implement in subclass')
def auth_complete(self, *args, **kwargs):
"""Completes login process, must return user instance"""
raise NotImplementedError('Implement in subclass')
def process_error(self, data):
"""Process data for errors, raise exception if needed.
Call this method on any override of auth_complete."""
pass
def authenticate(self, *args, **kwargs):
"""Authenticate user using social credentials
Authentication is made if this is the correct backend, backend
verification is made by kwargs inspection for current backend
name presence.
"""
# Validate backend and arguments. Require that the Social Auth
# response be passed in as a keyword argument, to make sure we
# don't match the username/password calling conventions of
# authenticate.
if 'backend' not in kwargs or kwargs['backend'].name != self.name or \
'strategy' not in kwargs or 'response' not in kwargs:
return None
self.strategy = kwargs.get('strategy') or self.strategy
self.redirect_uri = kwargs.get('redirect_uri') or self.redirect_uri
self.data = self.strategy.request_data()
kwargs.setdefault('is_new', False)
pipeline = self.strategy.get_pipeline(self)
args, kwargs = self.strategy.clean_authenticate_args(*args, **kwargs)
return self.pipeline(pipeline, *args, **kwargs)
def pipeline(self, pipeline, pipeline_index=0, *args, **kwargs):
out = self.run_pipeline(pipeline, pipeline_index, *args, **kwargs)
if not isinstance(out, dict):
return out
user = out.get('user')
if user:
user.social_user = out.get('social')
user.is_new = out.get('is_new')
return user
def disconnect(self, *args, **kwargs):
pipeline = self.strategy.get_disconnect_pipeline(self)
kwargs['name'] = self.name
kwargs['user_storage'] = self.strategy.storage.user
return self.run_pipeline(pipeline, *args, **kwargs)
def run_pipeline(self, pipeline, pipeline_index=0, *args, **kwargs):
out = kwargs.copy()
out.setdefault('strategy', self.strategy)
out.setdefault('backend', out.pop(self.name, None) or self)
out.setdefault('request', self.strategy.request_data())
out.setdefault('details', {})
if not isinstance(pipeline_index, int) or \
pipeline_index < 0 or \
pipeline_index >= len(pipeline):
pipeline_index = 0
for idx, name in enumerate(pipeline[pipeline_index:]):
out['pipeline_index'] = pipeline_index + idx
func = module_member(name)
result = func(*args, **out) or {}
if not isinstance(result, dict):
return result
out.update(result)
return out
def extra_data(self, user, uid, response, details=None, *args, **kwargs):
"""Return default extra data to store in extra_data field"""
data = {
# store the last time authentication toke place
'auth_time': int(time.time())
}
extra_data_entries = []
if self.GET_ALL_EXTRA_DATA or self.setting('GET_ALL_EXTRA_DATA', False):
extra_data_entries = response.keys()
else:
extra_data_entries = (self.EXTRA_DATA or []) + self.setting('EXTRA_DATA', [])
for entry in extra_data_entries:
if not isinstance(entry, (list, tuple)):
entry = (entry,)
size = len(entry)
if size >= 1 and size <= 3:
if size == 3:
name, alias, discard = entry
elif size == 2:
(name, alias), discard = entry, False
elif size == 1:
name = alias = entry[0]
discard = False
value = response.get(name) or details.get(name) or details.get(alias)
if discard and not value:
continue
data[alias] = value
return data
def auth_allowed(self, response, details):
"""Return True if the user should be allowed to authenticate, by
default check if email is whitelisted (if there's a whitelist)"""
emails = self.setting('WHITELISTED_EMAILS', [])
domains = self.setting('WHITELISTED_DOMAINS', [])
email = details.get('email')
allowed = True
if email and (emails or domains):
domain = email.split('@', 1)[1]
allowed = email in emails or domain in domains
return allowed
def get_user_id(self, details, response):
"""Return a unique ID for the current user, by default from server
response."""
return response.get(self.ID_KEY)
def get_user_details(self, response):
"""Must return user details in a know internal struct:
{'username': <username if any>,
'email': <user email if any>,
'fullname': <user full name if any>,
'first_name': <user first name if any>,
'last_name': <user last name if any>}
"""
raise NotImplementedError('Implement in subclass')
def get_user_names(self, fullname='', first_name='', last_name=''):
# Avoid None values
fullname = fullname or ''
first_name = first_name or ''
last_name = last_name or ''
if fullname and not (first_name or last_name):
try:
first_name, last_name = fullname.split(' ', 1)
except ValueError:
first_name = first_name or fullname or ''
last_name = last_name or ''
fullname = fullname or ' '.join((first_name, last_name))
return fullname.strip(), first_name.strip(), last_name.strip()
def get_user(self, user_id):
"""
Return user with given ID from the User model used by this backend.
This is called by django.contrib.auth.middleware.
"""
return self.strategy.get_user(user_id)
def continue_pipeline(self, partial):
"""Continue previous halted pipeline"""
return self.strategy.authenticate(self,
pipeline_index=partial.next_step,
*partial.args,
**partial.kwargs)
def auth_extra_arguments(self):
"""Return extra arguments needed on auth process. The defaults can be
overridden by GET parameters."""
extra_arguments = self.setting('AUTH_EXTRA_ARGUMENTS', {}).copy()
extra_arguments.update((key, self.data[key]) for key in extra_arguments
if key in self.data)
return extra_arguments
def uses_redirect(self):
"""Return True if this provider uses redirect url method,
otherwise return false."""
return True
def request(self, url, method='GET', *args, **kwargs):
kwargs.setdefault('headers', {})
if self.setting('PROXIES') is not None:
kwargs.setdefault('proxies', self.setting('PROXIES'))
if self.setting('VERIFY_SSL') is not None:
kwargs.setdefault('verify', self.setting('VERIFY_SSL'))
kwargs.setdefault('timeout', self.setting('REQUESTS_TIMEOUT') or
self.setting('URLOPEN_TIMEOUT'))
if self.SEND_USER_AGENT and 'User-Agent' not in kwargs['headers']:
kwargs['headers']['User-Agent'] = self.setting('USER_AGENT') or \
user_agent()
try:
if self.SSL_PROTOCOL:
session = SSLHttpAdapter.ssl_adapter_session(self.SSL_PROTOCOL)
response = session.request(method, url, *args, **kwargs)
else:
response = request(method, url, *args, **kwargs)
except ConnectionError as err:
raise AuthFailed(self, str(err))
response.raise_for_status()
return response
def get_json(self, url, *args, **kwargs):
return self.request(url, *args, **kwargs).json()
def get_querystring(self, url, *args, **kwargs):
return parse_qs(self.request(url, *args, **kwargs).text)
def get_key_and_secret(self):
"""Return tuple with Consumer Key and Consumer Secret for current
service provider. Must return (key, secret), order *must* be respected.
"""
return self.setting('KEY'), self.setting('SECRET')
|
#! /usr/bin/env python
# --coding:utf-8--
# coding: utf-8
# ━━━━━━神兽出没━━━━━━
# ┏┓ ┏┓
# ┏┛┻━━━┛┻┓
# ┃ ┃
# ┃ ━ ┃
# ┃ ┳┛ ┗┳ ┃
# ┃ ┃
# ┃ ┻ ┃
# ┃ ┃
# ┗━┓ ┏━┛
# ┃ ┃神兽保佑, 永无BUG!
# ┃ ┃Code is far away from bug with the animal protecting
# ┃ ┗━━━┓
# ┃ ┣┓
# ┃ ┏┛
# ┗┓┓┏━┳┓┏┛
# ┃┫┫ ┃┫┫
# ┗┻┛ ┗┻┛
# ━━━━━━感觉萌萌哒━━━━━━
# Module Desc:clover
# User: z.mm | [email protected]
# Date: 2016/1/1
# Time: 15:06
from com.common.BaseLoggingObj import BaseLoggingObj
from com.common.BaseLoggingObj import logger
import json
import psutil
import os
__author__ = 'Administrator'
class DiskModule(BaseLoggingObj, object):
def __init__(self):
BaseLoggingObj.__init__(self)
self.logging.info("DiskModule added")
def list(self):
return {"type": "disk", "items": [{"name": "存储信息", "function": "getDiskInfo"}]}
def __getDiskPartitionInfo(self):
return psutil.disk_partitions()
def __getDiskPartitionUsageInfo(self, path):
return psutil.disk_usage(path)
def getDiskInfo(self):
diskInfo = []
partitions = self.__getDiskPartitionInfo()
for partition in partitions:
try:
diskInfo.append({"partition": partition[1], "detail": self.__getDiskPartitionUsageInfo(partition[1])})
except Exception, e:
partition
return json.dumps(diskInfo)
def getPathDetail(self, path):
'''传入路径必须是绝对路径'''
data = json.dumps(os.listdir(path), encoding="utf-8")
return data
|
from abc import abstractmethod
from typing import Dict, Tuple
from federate_learning.common.parameters import NDArrayList
from federate_learning.common.metrics import Metrics
from federate_learning.orchestrator.control_strategy import ControlStrategy
class Model:
def __init__(self,
name: str,
framework: str,
control_strategy: ControlStrategy = None,
model=None,
logger=None):
self.name = name
self.framework = framework
self.logger = logger
self.model = model
self.weights = None
self.metrics = Metrics()
if control_strategy:
self.control_strategy = control_strategy
self.control_strategy.metrics = self.metrics
self.control_strategy.logger = self.logger
@abstractmethod
def get_weights(self) -> NDArrayList:
"""
:return:
"""
@abstractmethod
def fit(self,
weights: NDArrayList,
config: Dict[str, str]) -> Tuple[NDArrayList, int, float, float]:
"""
:param weights:
:param config:
:return:
"""
@abstractmethod
def evaluate(self,
weights: NDArrayList,
config: Dict[str, str]) -> Tuple[int, float, float]:
"""
:param weights:
:param config:
:return:
"""
|
from django.contrib import admin
from apps.commons import models as commons_models
from import_export.admin import ImportExportModelAdmin
class OwnershipTypeAdmin(ImportExportModelAdmin):
search_fields = ("name",)
admin.site.register(commons_models.OwnershipType, OwnershipTypeAdmin)
|
"""Tests for logger: model User."""
from django.test import TestCase
from geokey.core.models import LoggerHistory
from geokey.users.tests.model_factories import UserFactory
class LogUserTest(TestCase):
"""Test model User."""
def setUp(self):
"""Set up test."""
self.user = UserFactory.create()
def test_log_create(self):
"""Test when user gets created."""
log_count_init = LoggerHistory.objects.count()
UserFactory.create()
self.assertEqual(LoggerHistory.objects.count(), log_count_init)
def test_log_update_display_name(self):
"""Test when display name changes."""
log_count_init = LoggerHistory.objects.count()
self.user.display_name = '%s UPDATED' % self.user.display_name
self.user.save()
self.assertEqual(LoggerHistory.objects.count(), log_count_init)
|
"""Tests runner module."""
import os
import random
import time
import uuid
import webbrowser
from collections import OrderedDict
from schema import Schema, Or, Use
from testplan import defaults
from testplan.common.config import ConfigOption
from testplan.common.entity import Entity, RunnableConfig, RunnableStatus, \
RunnableResult, Runnable
from testplan.common.exporters import BaseExporter, ExporterResult
from testplan.common.utils.path import default_runpath
from testplan.exporters import testing as test_exporters
from testplan.logger import log_test_status, TEST_INFO, TESTPLAN_LOGGER
from testplan.testing.base import TestResult
from testplan.report import TestReport
from testplan.report.testing import TestGroupReport, Status
from testplan.report.testing.styles import Style
from testplan.testing import listing, filtering, ordering, tagging
from .runners.base import Executor
from .runners.pools.tasks import Task, TaskResult
def get_default_exporters(config):
"""
Instantiate certain exporters if related cmdline argument (e.g. --pdf)
is passed but there aren't any exporter declarations.
"""
result = []
if config.pdf_path:
result.append(test_exporters.PDFExporter())
if config.report_tags or config.report_tags_all:
result.append(test_exporters.TagFilteredPDFExporter())
if config.json_path:
result.append(test_exporters.JSONExporter())
if config.xml_dir:
result.append(test_exporters.XMLExporter())
return result
def get_exporters(values):
"""
Validation function for exporter declarations.
:param values: Single or a list of exporter declaration(s).
:return: List of initialized exporter objects.
"""
def get_exporter(value):
if isinstance(value, BaseExporter):
return value
elif isinstance(value, tuple):
exporter_cls, params = value
return exporter_cls(**params)
raise TypeError('Invalid exporter value: {}'.format(value))
if isinstance(values, list):
return [get_exporter(v) for v in values]
return [get_exporter(values)]
def result_for_failed_task(original_result):
"""
Create a new result entry for invalid result retrieved from a resource.
"""
result = TestResult()
result.report = TestGroupReport(name=original_result.task.name)
attrs = [attr for attr in original_result.task.all_attrs]
result_lines = ['{}: {}'.format(attr, getattr(original_result.task, attr))\
if getattr(original_result.task, attr, None) else ''\
for attr in attrs]
result.report.logger.error(
os.linesep.join([line for line in result_lines if line]))
result.report.logger.error(original_result.reason)
result.report.status_override = Status.ERROR
return result
class TestRunnerConfig(RunnableConfig):
"""
Configuration object for
:py:class:`~testplan.runnable.TestRunner` runnable object.
"""
def configuration_schema(self):
"""
Schema for options validation and assignment of default values.
"""
overrides = Schema({
'name': str,
ConfigOption('logger_level', default=TEST_INFO): int,
ConfigOption('runpath', default=default_runpath):
Or(None, str, lambda x: callable(x)),
ConfigOption('path_cleanup', default=True): bool,
ConfigOption('all_tasks_local', default=False): bool,
ConfigOption('shuffle', default=[]): list, # list of string choices
ConfigOption(
'shuffle_seed', default=float(random.randint(1, 9999))): float,
ConfigOption(
'exporters', default=None): Use(get_exporters),
ConfigOption(
'stdout_style',
default=defaults.STDOUT_STYLE): Style,
ConfigOption('report_dir', default=defaults.REPORT_DIR): str,
ConfigOption('xml_dir', default=None): Or(str, None),
ConfigOption('pdf_path', default=None): Or(str, None),
ConfigOption('json_path', default=None): Or(str, None),
ConfigOption(
'pdf_style',
default=defaults.PDF_STYLE): Style,
ConfigOption('report_tags', default=[]):
[Use(tagging.validate_tag_value)],
ConfigOption('report_tags_all', default=[]):
[Use(tagging.validate_tag_value)],
ConfigOption('browse', default=False): bool,
ConfigOption(
'test_filter', default=filtering.Filter()):
filtering.BaseFilter,
ConfigOption(
'test_sorter', default=ordering.NoopSorter()):
ordering.BaseSorter,
# Test lister is None by default, otherwise Testplan would
# list tests, not run them
ConfigOption('test_lister', default=None):
Or(None, listing.BaseLister)
}, ignore_extra_keys=True)
return self.inherit_schema(overrides, super(TestRunnerConfig, self))
class TestRunnerStatus(RunnableStatus):
"""
Status of a
:py:class:`TestRunner <testplan.runnable.TestRunner>` runnable object.
"""
class TestRunnerResult(RunnableResult):
"""
Result object of a
:py:class:`TestRunner <testplan.runnable.TestRunner>` runnable object.
"""
def __init__(self):
super(TestRunnerResult, self). __init__()
self.test_results = OrderedDict()
self.exporter_results = []
self.test_report = None
@property
def report(self):
"""Tests report."""
return self.test_report
@property
def success(self):
"""Run was successful."""
return self.test_report.passed and all(
[exporter_result.success
for exporter_result in self.exporter_results])
class TestRunner(Runnable):
"""
Adds tests to test
:py:class:`executor <testplan.runners.base.Executor>` resources
and invoke report
:py:class:`exporter <testplan.exporters.testing.base.Exporter>` objects
to create the
:py:class:`~testplan.runnable.TestRunnerResult`.
:param name: Name of test runner.
:type name: ``str``
:param logger_level: Logger level.
:type logger_level: ``int``
:param runpath: Input runpath.
:type runpath: ``str`` or ``callable``
:param path_cleanup: Clean previous runpath entries.
:type path_cleanup: ``bool``
:param all_tasks_local: TODO
:type all_tasks_local: ``bool``
:param shuffle: Shuffle strategy.
:type shuffle: ``list`` of ``str``
:param shuffle_seed: Shuffle seed.
:type shuffle_seed: ``float``
:param exporters: Exporters for reports creation.
:type exporters: ``list``
:param stdout_style: Styling output options.
:type stdout_style: :py:class:`Style <testplan.report.testing.styles.Style>`
:param report_dir: Report directory.
:type report_dir: ``str``
:param xml_dir: XML output directory.
:type xml_dir: ``str``
:param pdf_path: PDF output path ..path/*.pdf.
:type pdf_path: ``str``
:param json_path: JSON output path ..path/*.json.
:type json_path: ``str``
:param pdf_style: PDF creation styling options.
:type pdf_style: :py:class:`Style <testplan.report.testing.styles.Style>`
:param report_tags: Matches tests marked with any of the given tags.
:type report_tags: ``list``
:param report_tags_all: Match tests marked with all of the given tags.
:type report_tags_all: ``list``
:param test_filter: Tests filtering class.
:type test_filter: Subclass of
:py:class:`BaseFilter <testplan.testing.filtering.BaseFilter>`
:param test_sorter: Tests sorting class.
:type test_sorter: Subclass of
:py:class:`BaseSorter <testplan.testing.ordering.BaseSorter>`
:param test_lister: Tests listing class.
:type test_lister: Subclass of
:py:class:`BaseLister <testplan.testing.listing.BaseLister>`
Also inherits all
:py:class:`~testplan.common.entity.base.Runnable` options.
"""
CONFIG = TestRunnerConfig
STATUS = TestRunnerStatus
RESULT = TestRunnerResult
def __init__(self, **options):
super(TestRunner, self).__init__(**options)
self._tests = OrderedDict() # uid to resource
self._result.test_report = TestReport(name=self.cfg.name)
@property
def report(self):
"""Tests report."""
return self._result.test_report
def add_resource(self, resource, uid=None):
"""
Adds a test
:py:class:`executor <testplan.runners.base.Executor>`
resource in the test runner environment.
:param resource: Test executor to be added.
:type resource: Subclass of :py:class:`~testplan.runners.base.Executor`
:param uid: Optional input resource uid.
:type uid: ``str``
:return: Resource uid assigned.
:rtype: ``str``
"""
resource.cfg.parent = self.cfg
resource.parent = self
return self.resources.add(
resource, uid=uid or getattr(resource, 'uid', uuid.uuid4)())
def schedule(self, task=None, resource=None, uid=None, **options):
"""
Schedules a serializable
:py:class:`~testplan.runners.pools.tasks.base.Task` in a task runner
:py:class:`~testplan.runners.pools.base.Pool` executor resource.
:param task: Input task.
:param task: :py:class:`~testplan.runners.pools.tasks.base.Task`
:param resource: Target pool resource.
:param resource: :py:class:`~testplan.runners.pools.base.Pool`
:param uid: Optional uid for task.
:param uid: ``str``
:param options: Task input options.
:param options: ``dict``
:return uid: Assigned uid for task.
:rtype: ``str``
"""
return self.add(task or Task(uid=uid, **options),
resource=resource, uid=uid)
def add(self, runnable, resource=None, uid=None):
"""
Adds a
:py:class:`runnable <testplan.common.entity.base.Runnable>` tests entity
to an :py:class:`~testplan.runners.base.Executor` resource.
:param runnable: Test runner entity.
:type runnable: :py:class:`~testplan.common.entity.base.Runnable`
:param resource: Test executor resource.
:type resource: :py:class:`~testplan.runners.base.Executor`
:param uid: Optional test uid.
:type uid: ``str``
:return: Assigned uid for test.
:rtype: ``str``
"""
uid = uid or getattr(runnable, 'uid', uuid.uuid4)()
if uid in self._tests:
self.logger.error(
'Skip adding {} with uid {}.. already added.'.format(
runnable, uid))
return uid
if isinstance(runnable, Entity):
runnable.cfg.parent = self.cfg
runnable.parent = self
elif isinstance(runnable, Task):
pass
elif callable(runnable):
runnable.parent_cfg = self.cfg
runnable.parent = self
# Check if test should not be added only when a filter is used.
if type(self.cfg.test_filter) is not filtering.Filter or\
self.cfg.test_lister is not None:
if not self.should_be_added(runnable):
return None
if resource is None:
resource = self.resources.first()
if resource not in self.resources:
raise RuntimeError('Resource "{}" does not exist.'.format(resource))
self.resources[resource].add(runnable, uid)
self._tests[uid] = resource
return uid
def should_be_added(self, runnable):
"""Determines if a test runnable should be added for execution."""
if isinstance(runnable, Task):
target = runnable.materialize()
target.cfg.parent = self.cfg
target.parent = self
elif callable(runnable):
target = runnable()
target.cfg.parent = runnable.parent_cfg
target.parent = runnable.parent
else:
target = runnable
should_run = target.should_run()
# --list always returns False
if should_run and self.cfg.test_lister is not None:
self.cfg.test_lister.log_test_info(target)
return False
return should_run
def _add_step(self, step, *args, **kwargs):
if self.cfg.test_lister is None:
super(TestRunner, self)._add_step(step, *args, **kwargs)
def _record_start(self):
self.report.timer.start('run')
def _record_end(self):
self.report.timer.end('run')
def pre_resource_steps(self):
"""Steps to be executed before resources started."""
# self._add_step(self._runpath_initialization)
self._add_step(self._record_start)
self._add_step(self.make_runpath_dirs)
def main_batch_steps(self):
"""Steps to be executed while resources are running."""
self._add_step(self._wait_ongoing)
def post_resource_steps(self):
"""Steps to be executed after resources stopped."""
self._add_step(self._create_result)
self._add_step(self._log_test_status)
self._add_step(self._record_end) # needs to happen before export
self._add_step(self._invoke_exporters)
self._add_step(self._post_exporters)
def _wait_ongoing(self):
self.logger.info('{} runpath: {}'.format(self, self.runpath))
if self.resources.start_exceptions:
self.abort()
return
while self.active:
ongoing = False
for resource in self.resources:
if resource.ongoing:
# Maybe print periodically ongoing resource
ongoing = True
if ongoing is False:
break
time.sleep(self.cfg.active_loop_sleep)
def _create_result(self):
step_result = True
test_results = self._result.test_results
for uid, resource in self._tests.items():
if not isinstance(self.resources[resource], Executor):
continue
resource_result = self.resources[resource].results[uid]
if isinstance(resource_result, TaskResult):
if resource_result.status is False:
test_results[uid] = result_for_failed_task(resource_result)
else:
test_results[uid] = resource_result.result
else:
test_results[uid] = resource_result
self._result.test_report.append(test_results[uid].report)
step_result = step_result and test_results[uid].run
return step_result
def uid(self):
"""Entity uid."""
return self.cfg.name
def _log_test_status(self):
log_test_status(
name=self.cfg.name,
passed=self._result.test_report.passed
)
def _invoke_exporters(self):
# Add this logic into a ReportExporter(Runnable)
# that will return a result containing errors
if self.cfg.exporters is None:
exporters = get_default_exporters(self.cfg)
else:
exporters = self.cfg.exporters
for exporter in exporters:
if hasattr(exporter, 'cfg'):
exporter.cfg.parent = self.cfg
if isinstance(exporter, test_exporters.Exporter):
exp_result = ExporterResult.run_exporter(
exporter=exporter,
source=self._result.test_report,
type='test',
)
if not exp_result.success:
TESTPLAN_LOGGER.error(exp_result.traceback)
self._result.exporter_results.append(exp_result)
else:
raise NotImplementedError(
'Exporter logic not'
' implemented for: {}'.format(type(exporter)))
def _post_exporters(self):
if self.cfg.browse:
# Open exporter url to browse.
for result in self._result.exporter_results:
if result.exporter.url is not None:
webbrowser.open(result.exporter.url)
break
def aborting(self):
"""Suppressing not implemented debug log from parent class."""
pass
|
# Twowaits
Twowaits Problem
# Function to print words which can be created
# using given set of characters
def charCount(word):
dict = {}
for i in word:
dict[i] = dict.get(i, 0) + 1
return dict
def possible_words(lwords, charSet):
for word in lwords:
flag = 1
chars = charCount(word)
for key in chars:
if key not in charSet:
flag = 0
else:
if charSet.count(key) != chars[key]:
flag = 0
if flag == 1:
print(word)
if __name__ == "__main__":
input = ['goo', 'bat', 'me', 'eat', 'goal', 'boy', 'run']
charSet = ['e', 'o', 'b', 'a', 'm', 'g', 'l']
possible_words(input, charSet)
|
import urllib.request,json
from .models import Article,Source
# Getting api key
api_key = None
# Getting the movie base url
base_url = None
# all_articles = api_key.get_everything(sort_by='source')
def configure_request(app):
global api_key,base_url
api_key = app.config['NEWS_API_KEY']
base_url = app.config['NEWS_API_BASE_URL']
|
from mne import events_from_annotations, annotations_from_events
def transform_event_id(raw, transform_dic=None, description_transform=None):
"""Transform the description of Raw.
Parameters
----------
raw : mne.Raw
Raw instance.
transform_dic : None | dic
Dictionary holds the new id required for conversion.
Which key is the old id and the value is the new id.
description_transform : None | callable
Function use raw as input and return new_events and new_event_id.
Returns
-------
None
Notes
-----
"""
if description_transform:
all_events, all_event_id = description_transform(raw)
else:
all_events, all_event_id = events_from_annotations(raw)
if transform_dic:
new_all_event_id = _transform_from_dict(all_event_id, transform_dic)
else:
new_all_event_id = {v: k for k, v in all_event_id.items()}
annotation_new = annotations_from_events(all_events, raw.info['sfreq'],
new_all_event_id)
raw.set_annotations(annotation_new)
def _transform_from_dict(dic1, dic2):
"""Transform dic1's key from dic2's value.
"""
dic_new = {}
for key, value in dic1.items():
key_new = dic2[key] if key in dic2 else key
dic_new[value] = key_new
return dic_new
|
from django.core.mail import EmailMessage
from django.dispatch import receiver
from django.template.loader import render_to_string
from django.urls import reverse
from django.conf import settings
from django_rest_passwordreset.signals import reset_password_token_created
@receiver(reset_password_token_created)
def password_reset_token_created(sender, instance, reset_password_token, *args, **kwargs):
"""
Handles password reset tokens
When a token is created, an e-mail needs to be sent to the user
:param sender: View Class that sent the signal
:param instance: View Instance that sent the signal
:param reset_password_token: Token Model Object
:param args:
:param kwargs:
:return:
"""
# send an e-mail to the user
context = {
"current_user": reset_password_token.user,
"username": reset_password_token.user.username,
"email": reset_password_token.user.email,
"reset_password_url": "{}/password_reset/{}".format(settings.CURRENT_DOMAIN, reset_password_token.key),
}
# render email text
email_html_message = render_to_string("email/user_reset_password.html", context)
email_plaintext_message = render_to_string("email/user_reset_password.txt", context)
msg = EmailMessage(
"Password Reset for Care", email_html_message, settings.DEFAULT_FROM_EMAIL, (reset_password_token.user.email,)
)
msg.content_subtype = "html" # Main content is now text/html
msg.send()
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributions as D
class SimpleNet(nn.Module):
def __init__(self, s_dim, a_dim):
super(SimpleNet, self).__init__()
self.s_dim = s_dim
self.a_dim = a_dim
self.a1 = nn.Linear(s_dim, 100)
self.mu = nn.Linear(100, 1)
self.sigma = nn.Linear(100, 1)
self.c1 = nn.Linear(s_dim, 100)
self.v = nn.Linear(100, 1)
layers = [self.a1, self.mu, self.sigma, self.c1, self.v]
for layer in layers:
nn.init.normal(layer.weight, mean=0., std=0.1)
nn.init.constant(layer.bias, 0.1)
def forward(self, s):
a1 = F.relu(self.a1(s))
# mu = 2 * F.tanh(self.mu(a1))
mu = F.tanh(self.mu(a1))
# sigma = F.softplus(self.sigma(a1)) + 0.001 # avoid 0
sigma = F.relu(self.sigma(a1)) + 0.001
c1 = F.relu(self.c1(s))
value = self.v(c1)
return mu, sigma, value
def choose_action(self, s):
mu, sigma, _ = self.forward(s)
# m = self.distribution(mean=mu.view(1, ).data, std=sigma.view(1, ).data)
# gauss = D.Normal(mu.data, sigma.data)
gauss = D.Normal(mu, sigma)
return gauss.sample().data.numpy()
def loss_fn(self, s, a, v_td):
# critic loss
mu, sigma, value = self.forward(s)
td_error = v_td - value
critic_loss = td_error.pow(2)
# policy gradient loss for actor
gauss = D.Normal(mu, sigma)
log_prob = gauss.log_prob(a)
entropy = torch.log(gauss.std) # exploration
# classic actor gradient * reward
actor_loss = -(log_prob * td_error.detach() + 0.001 * entropy)
# combine together
return (critic_loss + actor_loss).mean() |
from setuptools import setup, find_packages
setup(
name="aiogh",
version="0.0.1dev",
packages=find_packages(),
install_requires=["aiohttp", "aiohttp_session", "cryptography"],
)
|
import numpy as np
import yaml
import sys
import os
import platform
from pathlib import Path
from setuptools import setup, find_packages
from pybind11.setup_helpers import Pybind11Extension, build_ext
script_dir = Path(os.path.abspath(os.path.dirname(__file__)))
with open(script_dir / 'environment.yml') as f:
required = yaml.safe_load(f.read())['dependencies'][-1]['pip']
is_arm = (platform.machine() == "arm64") # Apple Silicon or ARM?
if is_arm:
# workaround for cpufeature not available on Apple M1 as of 2021/09.
required = list(filter(lambda s: not s.startswith("cpufeature"), required))
src_path = (script_dir / 'pyalign' / 'algorithm').resolve()
assert src_path.exists()
sources = [
src_path / 'module.cpp'
]
include_dirs = [Path(np.get_include()), script_dir]
if sys.platform == 'darwin':
cc = os.environ.get("CC")
if cc and cc.startswith("gcc"):
import pybind11.setup_helpers
pybind11.setup_helpers.MACOS = False
def mk_ext(name, arch=None, cpu=None):
extra_compile_args = []
extra_link_args = []
is_sanitize = os.environ.get('PYALIGN_SANITIZE_ADDRESS', False)
if is_sanitize:
is_debug_build = True
else:
is_debug_build = os.environ.get("PYALIGN_DEBUG_BUILD", False)
if os.name == 'nt':
pass
else:
if is_debug_build:
extra_compile_args.append("-O0")
extra_compile_args.append("-g")
else:
extra_compile_args.append("-O3")
extra_compile_args.extend([
"-ftemplate-backtrace-limit=0"])
if arch is not None:
extra_compile_args.append(f"-march={arch}")
if cpu is not None:
extra_compile_args.append(f"-mcpu={cpu}")
if is_sanitize:
extra_compile_args.append('-fsanitize=address')
extra_compile_args.append('-fno-omit-frame-pointer')
extra_compile_args.append('-fno-optimize-sibling-calls')
extra_link_args.append('-fsanitize=address')
return Pybind11Extension(
f'pyalign.algorithm.{name}.algorithm',
[str(x) for x in sorted(sources)],
cxx_std=17,
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
include_dirs=[str(x) for x in include_dirs],
)
ext_modules = []
if os.environ.get("PYALIGN_PREBUILT_MARCH"):
ext_modules.append(mk_ext('generic', None))
if is_arm:
ext_modules.append(mk_ext('apple_m1', cpu='apple-m1'))
else:
ext_modules.append(mk_ext('intel_avx2', arch='haswell'))
elif is_arm:
ext_modules.append(mk_ext('generic', None))
else:
ext_modules.append(mk_ext('native', arch='native'))
with open(script_dir / 'README.md') as f:
long_description = f.read()
exec(open("pyalign/_version.py").read())
setup(
name='pyalign',
version=__version__,
packages=find_packages(include=[
'pyalign',
'pyalign.algorithm',
'pyalign.problems',
'pyalign.gaps',
'pyalign.io',
'pyalign.tests']),
python_requires='>=3.8',
license='GPLv2',
author='Bernhard Liebl',
author_email='[email protected]',
ext_modules=ext_modules,
cmdclass={"build_ext": build_ext},
install_requires=required,
test_suite='nose.collector',
tests_require=['nose'],
description='Fast and Versatile Alignments for Python',
long_description=long_description,
long_description_content_type='text/markdown',
#include_package_data=True,
)
|
###############################
# - Set the fileToAverage name:
fileToAverage = "SetTheFileName.txt"
###############################
entriesToAverage = input("How much entries to average [int]?")
outputFile = "ave_" + str(entriesToAverage) + "_" + fileToAverage
current_list = open(fileToAverage, 'r').readlines()
output_list = []
print
print
print
print "Input file:", fileToAverage
print "Averaging input file with lenght", len(current_list), "..."
i=0
while i < len(current_list):
j=0
temp_sum = 0
while j < entriesToAverage:
try:
current_element = float(current_list[i+j].split()[0])
except:
pass
temp_sum += current_element
j+=1
entry = temp_sum / float(entriesToAverage)
output_list.append(entry)
i+=entriesToAverage
print "Output file lenght:", len(output_list)
print "Saving to file:", outputFile
saveFile = open(outputFile, 'w')
i=0
while i < len(output_list):
saveFile.write(str(output_list[i]))
saveFile.write('000')
saveFile.write('\n')
i+=1
saveFile.close()
print "- Done!"
print |
import os
import glob
import numpy as np
from sklearn.manifold import TSNE
from sklearn.model_selection import cross_val_score
from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
from pyLZJD import digest, sim
#First, lets check if we have the t5 corpus
if not (os.path.exists("t5-corpus.zip") or os.path.exists("t5") ):
print("Downloading t5 dataset")
import urllib.request
urllib.request.urlretrieve('http://roussev.net/t5/t5-corpus.zip', 't5-corpus.zip')
import zipfile
with zipfile.ZipFile("t5-corpus.zip","r") as zip_ref:
zip_ref.extractall(".")
#Lets collect up all the files in the t5 corpus. Its organized as one big folder, and the extension of each file tells us what kind of file it is.
X_paths = glob.glob("t5/*")
labels_true = list(set([ x[x.find(".")+1:] for x in X_paths]))
print("Labels:", labels_true)
#Label every file based on which file type it was
Y = np.asarray([ labels_true.index(x[x.find(".")+1:]) for x in X_paths])
#Lets hash all the files now! We have a list of paths, pyLZJD can take that dirrectly and convert it to hashes
X_hashes = digest(X_paths, processes=-1, mode="sh")
print("Done hashing!")
#We are going to use some tools from scikit-learn. It needs a distance function between data stored as a list of vectors.
#So we will create a list of 1-D vectors, each each vector sotres the index to it's hash in X_hashes
X = [ [i] for i in range(len(X_hashes))]
#Now we define a distance function between two vectors in X. It accesses the index value, and computes the LZJD distance
def lzjd_dist(a, b):
a_i = X_hashes[int(a[0])]
b_i = X_hashes[int(b[0])]
return 1.0-sim(a_i, b_i)
knn_model = KNeighborsClassifier(n_neighbors=5, algorithm='brute', metric=lzjd_dist)
scores = cross_val_score(knn_model, X, Y, cv=5)
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
X_embedded = TSNE(n_components=2, perplexity=5, metric=lzjd_dist).fit_transform(X)
colors = [plt.cm.Spectral(each) for each in np.linspace(0, 1, len(labels_true))]
for k, col in zip([z for z in range(len(labels_true))], colors):
if k == -1:
# Black used for noise.
col = [0, 0, 0, 1]
class_member_mask = (Y == k)
xy = X_embedded[class_member_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col), markeredgecolor='k', markersize=5, label=labels_true[k])
plt.title('TSNE Visualization')
plt.legend(loc='upper left')
plt.show()
|
import json
import logging
import random
import numpy as np
import tflearn
from django.conf import settings
from django.utils import timezone
from classifier.models import Sentence, Train, Category
from classifier.utils import init_network, get_tokenized_words, get_categories, get_words
logger = logging.getLogger(__name__)
def train():
# get a list of all categories to train for
# Check if there unprocessed categories or sentences
if not Category.objects.filter(processed=False).exists() and not Sentence.objects.filter(processed=False).exists():
logger.info('No unprocessed categories or sentences')
return
# Mark all as processed now
categories_qs = Category.objects.all()
categories_qs.filter(processed=False).update(processed=True)
sentences_qs = Sentence.objects.all()
sentences_qs.filter(processed=False).update(processed=True)
# Use same QuerySets to train models
categories = get_categories(qs=categories_qs)
words = get_words(qs=sentences_qs)
train_obj = Train.objects.create(
started=timezone.now(),
data=json.dumps(dict(
words=words,
categories=categories
), indent=2)
)
# a list of tuples with words in the sentence and category name
docs = []
for sentence in Sentence.objects.all():
docs.append((get_tokenized_words(sentence.text), sentence.category.title))
# create our training data
training = []
# create an empty array for our output
output_empty = [0] * len(categories)
for doc in docs:
# initialize our bag of words for each document in the list
# list of tokenized words for the pattern
token_words = doc[0]
# Create bag of words array
bag_of_words = [1 if word in token_words else 0 for word in words]
output_row = list(output_empty)
output_row[categories.index(doc[1])] = 1
# Our training set will contain a the bag of words model
# and the output row that tells which category that bag of words belongs to.
training.append([bag_of_words, output_row])
# Shuffle our features and turn into np.array as TensorFlow takes in numpy array
random.shuffle(training)
training = np.array(training)
# x_inputs contains the Bag of words and y_targets contains the category
x_inputs = list(training[:, 0])
y_targets = list(training[:, 1])
# Define model and setup TensorBoard
x_size = len(x_inputs[0])
y_size = len(y_targets[0])
network = init_network(x_size, y_size)
model = tflearn.DNN(network)
# Start training (apply gradient descent algorithm)
model.fit(x_inputs, y_targets, n_epoch=1000, batch_size=8, show_metric=True)
model.save(settings.CLASSIFIER_MODEL_PATH)
train_obj.finished = timezone.now()
train_obj.save()
|
from .mlp import MLP
from .affine_transform import AffineTransform
|
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.express as px
import pandas as pd
import plotly.figure_factory as ff
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import os
import networkx as nx
import matplotlib.pyplot as plt
def plot_fig(node_list, edge_list, save_name):
# G = nx.random_geometric_graph(40, 0.125)
G = nx.Graph()
node_len = len(node_list)
node_label = [i.replace('the ', '').replace('The ', '') for i in node_list]
edge_pair = edge_list
# edge_weight = np.sqrt(np.array([i[1] for i in edge_pair]))
person_node = list(set([i[0] for i in edge_pair]))
node = np.linspace(1, node_len, node_len, dtype=int) - 1
node_color = ['LightSkyBlue' if i in person_node else 'DarkSlateGrey' for i in node]
G.add_nodes_from(node)
G.add_weighted_edges_from(edge_pair)
# pos = nx.kamada_kawai_layout(G)
pos = nx.spring_layout(G, k=5/np.sqrt(node_len))
# pos = nx.spiral_layout(G)
# pos = nx.multipartite_layout(G)
# pos = nx.spectral_layout(G)
# pos = nx.random_layout(G)
# pos = nx.bipartite_layout(G, person_node)
edge_x = []
edge_y = []
node_size = []
for edge in G.edges():
# x0, y0 = G.nodes[edge[0]]['pos']
# x1, y1 = G.nodes[edge[1]]['pos']
x0, y0 = pos[edge[0]]
x1, y1 = pos[edge[1]]
edge_x.append(x0)
edge_x.append(x1)
edge_x.append(None)
edge_y.append(y0)
edge_y.append(y1)
edge_y.append(None)
edge_trace = go.Scatter(
x=edge_x, y=edge_y,
line=dict(width=0.5, color='#888'),
hoverinfo='none',
mode='lines'
)
node_x = []
node_y = []
for node in G.nodes():
# x, y = G.nodes[node]['pos']
x, y = pos[node]
node_x.append(x)
node_y.append(y)
node_trace = go.Scatter(
x=node_x, y=node_y,
mode='markers+text',
text=node_label,
textposition='top center',
hoverinfo='text',
marker=dict(
showscale=False,
# colorscale options
#'Greys' | 'YlGnBu' | 'Greens' | 'YlOrRd' | 'Bluered' | 'RdBu' |
#'Reds' | 'Blues' | 'Picnic' | 'Rainbow' | 'Portland' | 'Jet' |
#'Hot' | 'Blackbody' | 'Earth' | 'Electric' | 'Viridis' |
# colorscale='YlGnBu',
reversescale=True,
opacity = 0.9,
color=node_color,
size=10,
# colorbar=dict(
# thickness=15,
# title='Node Connections',
# xanchor='left',
# titleside='right'
# ),
line_width=2))
node_adjacencies = np.zeros(node_len)
node_text = node_label
for i in edge_pair:
node_adjacencies[i[0]] += i[2]
node_adjacencies[i[1]] += i[2]
node_adjacencies = node_adjacencies
# for node, adjacencies in enumerate(G.adjacency()):
# node_adjacencies.append(len(adjacencies[1]))
# node_text.append('# of connections: '+str(len(adjacencies[1])))
# node_trace.marker.color = node_adjacencies
node_trace.marker.size = node_adjacencies
node_trace.text = node_text
year_num = save_name[:4]
q_num = int((int(save_name[5:7]) - 1) / 3 + 1)
fig = go.Figure(data=[edge_trace, node_trace],
layout=go.Layout(
title='IRTM NBA News Relation Visulization: {0} Q{1} Human-Team Relations'.format(year_num, q_num),
titlefont_size=35,
showlegend=False,
hovermode='closest',
margin=dict(b=20,l=5,r=5,t=40),
annotations=[ dict(
text="{0} Q{1} Human-Team Relations based on NBA News Report".format(year_num, q_num),
showarrow=False,
xref="paper", yref="paper",
x=0.005, y=-0.002 ) ],
xaxis=dict(showgrid=False, zeroline=False, showticklabels=False),
yaxis=dict(showgrid=False, zeroline=False, showticklabels=False))
)
fig.update_layout(
autosize=False,
width=1500,
height=1500,
margin=dict(
l=50,
r=50,
b=100,
t=100,
pad=4
),
)
if not os.path.exists("images"):
os.mkdir("images")
fig.write_image("images/{}_spring_layout.png".format(save_name))
return fig |
n, k = map(int, input().split())
numbers = map(int,input().split())
remainder_counts = [0] * k
for number in numbers:
remainder_counts[number % k] += 1
result = min(remainder_counts[0], 1)
for i in range(1, k//2 + 1):
if i != k - i:
result += max(
remainder_counts[i],
remainder_counts[k-i]
)
if k % 2 == 0:
result += 1
print(result)
|
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import os
import pandas as pd
def main(txt_file, output_flag, check_all_frames, t_res, imgfmt, layer,
no_stop):
"""Report if all the inputs OR outputs exist"""
idx_table = {'video': 0, 'f-init': 1}
df = pd.read_csv(txt_file, sep=' ', header=None)
check_fn = os.path.isdir
if output_flag:
check_fn = os.path.isfile
for i, row in df.iterrows():
video = row[idx_table['video']]
if output_flag:
video = video + layer
if not check_fn(video):
print(video)
if not no_stop:
break
if check_all_frames and not output_flag:
f_init = row[idx_table['f-init']]
for j in range(f_init, f_init + t_res):
imgfile = os.path.join(video, imgfmt.format(j))
if not os.path.isfile(imgfile):
print(imgfile)
break
if __name__ == '__main__':
description = 'Check missing inputs (frames) or outputs (features)'
p = ArgumentParser(description=description,
formatter_class=ArgumentDefaultsHelpFormatter)
p.add_argument('-i', '--txt-file', required=True,
help='Input/Output list given to extract features')
p.add_argument('-o', '--output-flag', action='store_true',
help='txt-file is an output list')
p.add_argument('-c', '--check-all-frames', action='store_false',
help='Ensure all frames requested by a line are there')
p.add_argument('-l', '--t-res', default=16, type=int,
help='temporal length of the clips')
p.add_argument('-f', '--imgfmt', default='{0:06d}.png',
help='Image format')
p.add_argument('-of', '--layer', default='.fc6-1',
help='Extracted layer')
p.add_argument('-ns', '--no-stop', action='store_true',
help='Nop stop at first error')
main(**vars(p.parse_args()))
|
import random
def main():
print("Welcome to Hi-Low")
the_number = random.randint(1, 100)
guess = get_guess()
while guess != the_number:
if guess > the_number:
print("Sorry, too high")
elif guess < the_number:
print("Sorry too low")
guess = get_guess()
print("Nice work, you got it!")
print("done")
def get_guess():
guess_text = input('Enter a number between 1 and 100: ')
guess = int(guess_text)
return guess
print("Module name is {}".format(__name__))
if __name__ == '__main__':
main()
|
import torch
import torch.nn as nn
import random
class MeanPredictor(nn.Module):
def __init__(self, data):
super(MeanPredictor, self).__init__()
self._throwaway_param = nn.Linear(1, 1)
self.mean = sum(datum.y for datum in data.train) / float(len(data.train))
print('we gonna predict {}'.format(self.mean))
def remove_refs(self, arg):
pass
def forward(self, datum):
return torch.tensor([self.mean]).squeeze()
class RandomPredictor(nn.Module):
def __init__(self, data):
super(RandomPredictor, self).__init__()
self._throwaway_param = nn.Linear(1, 1)
self.data = data
def remove_refs(self, arg):
pass
def forward(self, datum):
guess = random.choice(self.data.train).y
return torch.tensor([guess]).squeeze()
|
"""Test the doctr-versions-menu CLI interface."""
import json
import logging
import platform
import subprocess
import sys
from distutils.dir_util import copy_tree
from pathlib import Path
from click.testing import CliRunner
from pkg_resources import parse_version
import doctr_versions_menu
from doctr_versions_menu.cli import main as doctr_versions_menu_command
def test_version():
"""Test ``doctr-versions-menu --version``."""
runner = CliRunner()
result = runner.invoke(doctr_versions_menu_command, ['--version'])
assert result.exit_code == 0
normalized_version = str(parse_version(doctr_versions_menu.__version__))
assert normalized_version in result.output
def test_bad_config():
"""Test ``doctr-versions-menu --config for non-existing config``."""
runner = CliRunner()
result = runner.invoke(
doctr_versions_menu_command, ['--debug', '--config', 'xxx']
)
assert result.exit_code != 0
if sys.platform.startswith('win'):
# Windows might have slightly different messages
return
msg = "Cannot read configuration file: File 'xxx' does not exist"
if platform.python_version().startswith('3.5'):
# Python 3.5 hits the IOError earlier, resulting in a different message
msg = "No such file or directory"
assert msg in result.stdout
def get_staged_files():
"""Return output of `git ls-files` as list of Path objects."""
proc = subprocess.run(
['git', 'ls-files'],
check=True,
universal_newlines=True,
stdout=subprocess.PIPE,
)
return [Path(file) for file in proc.stdout.split("\n")]
def test_default_run(caplog):
"""Test doctr-versions-menu "default" run."""
root = Path(__file__).with_suffix('') / 'gh_pages_default'
runner = CliRunner()
caplog.set_level(logging.DEBUG)
with runner.isolated_filesystem():
cwd = Path.cwd()
subprocess.run(['git', 'init'], check=True)
copy_tree(str(root), str(cwd))
result = runner.invoke(doctr_versions_menu_command)
assert result.exit_code == 0
staged = get_staged_files()
expected_files = [
'index.html',
'.nojekyll',
'versions.json',
'versions.py',
]
for file in expected_files:
assert (cwd / file).is_file()
assert Path(file) in staged
with (cwd / 'versions.json').open() as versions_json:
versions_data = json.load(versions_json)
assert versions_data['folders'] == ['master', 'v0.1.0', 'v1.0.0']
assert versions_data['versions'] == ['master', 'v1.0.0', 'v0.1.0']
assert versions_data['labels'] == {
'master': 'master',
'v0.1.0': 'v0.1.0',
'v1.0.0': 'v1.0.0 (latest)',
}
assert 'outdated' in versions_data['warnings']['v0.1.0']
assert versions_data['latest'] == 'v1.0.0'
assert versions_data['downloads']['master'] == [
['pdf', '/master/master.pdf'],
['zip', '/master/master.zip'],
['epub', '/master/master.epub'],
]
assert versions_data['downloads']['v1.0.0'] == [
['pdf', 'https://host/v1.0.0/v1.0.0.pdf'],
['html', 'https://host/v1.0.0/v1.0.0.zip'],
['epub', 'https://host/v1.0.0/v1.0.0.epub'],
]
index_html = (cwd / 'index.html').read_text()
# fmt: off
assert '<meta http-equiv="Refresh" content="0; url=v1.0.0" />' in index_html
assert '<p>Go to the <a href="v1.0.0">default documentation</a>.</p>' in index_html
# fmt: on
def test_many_releases(caplog):
"""Test doctr-versions-menu run for project with many releases."""
root = Path(__file__).with_suffix('') / 'gh_pages_many_releases'
runner = CliRunner()
caplog.set_level(logging.DEBUG)
with runner.isolated_filesystem():
cwd = Path.cwd()
subprocess.run(['git', 'init'], check=True)
copy_tree(str(root), str(cwd))
result = runner.invoke(doctr_versions_menu_command)
assert result.exit_code == 0
assert (cwd / 'index.html').is_file()
assert (cwd / '.nojekyll').is_file()
assert (cwd / 'versions.json').is_file()
with (cwd / 'versions.json').open() as versions_json:
versions_data = json.load(versions_json)
assert versions_data == {
'downloads': {
'doc-testing': [],
'master': [
['pdf', '/master/master.pdf'],
['zip', '/master/master.zip'],
['epub', '/master/master.epub'],
],
'testing': [],
'v0.1.0': [
['pdf', '/v0.1.0/v0.1.0.pdf'],
['html', '/v0.1.0/v0.1.0.zip'],
['epub', '/v0.1.0/v0.1.0.epub'],
],
'v0.2.0': [
['pdf', '/v0.2.0/v0.2.0.pdf'],
['html', '/v0.2.0/v0.2.0.zip'],
['epub', '/v0.2.0/v0.2.0.epub'],
],
'v1.0.0': [
['pdf', 'https://host/v1.0.0/v1.0.0.pdf'],
['html', 'https://host/v1.0.0/v1.0.0.zip'],
['epub', 'https://host/v1.0.0/v1.0.0.epub'],
],
'v1.0.0+dev': [],
'v1.0.0-dev0': [],
'v1.0.0-post1': [
['pdf', 'https://host/v1.0.0/v1.0.0.pdf'],
['html', 'https://host/v1.0.0/v1.0.0.zip'],
['epub', 'https://host/v1.0.0/v1.0.0.epub'],
],
'v1.0.0-rc1': [],
'v1.1.0-rc1': [],
},
'folders': [
'doc-testing',
'master',
'testing',
'v0.1.0',
'v0.2.0',
'v1.0.0',
'v1.0.0+dev',
'v1.0.0-dev0',
'v1.0.0-post1',
'v1.0.0-rc1',
'v1.1.0-rc1',
],
'labels': {
'doc-testing': 'doc-testing',
'master': 'master',
'testing': 'testing',
'v0.1.0': 'v0.1.0',
'v0.2.0': 'v0.2.0',
'v1.0.0': 'v1.0.0',
'v1.0.0+dev': 'v1.0.0+dev',
'v1.0.0-dev0': 'v1.0.0-dev0',
'v1.0.0-post1': 'v1.0.0-post1 (latest)',
'v1.0.0-rc1': 'v1.0.0-rc1',
'v1.1.0-rc1': 'v1.1.0-rc1',
},
'latest': 'v1.0.0-post1',
'versions': [
'master',
'v1.1.0-rc1',
'v1.0.0-post1',
'v1.0.0+dev',
'v1.0.0',
'v1.0.0-rc1',
'v1.0.0-dev0',
'v0.2.0',
'v0.1.0',
'testing',
'doc-testing',
],
'warnings': {
'doc-testing': ['unreleased'],
'master': ['unreleased'],
'testing': ['unreleased'],
'v0.1.0': ['outdated'],
'v0.2.0': ['outdated'],
'v1.0.0': ['outdated'],
'v1.0.0+dev': ['outdated', 'unreleased'],
'v1.0.0-dev0': ['outdated', 'prereleased'],
'v1.0.0-post1': [],
'v1.0.0-rc1': ['outdated', 'prereleased'],
'v1.1.0-rc1': ['prereleased'],
},
}
def test_no_release(caplog):
"""Test doctr-versions-menu for when there is no "latest public release".
"""
root = Path(__file__).with_suffix('') / 'gh_pages_no_release'
runner = CliRunner()
caplog.set_level(logging.DEBUG)
with runner.isolated_filesystem():
cwd = Path.cwd()
subprocess.run(['git', 'init'], check=True)
copy_tree(str(root), str(cwd))
result = runner.invoke(doctr_versions_menu_command)
assert result.exit_code == 0
with (cwd / 'versions.json').open() as versions_json:
versions_data = json.load(versions_json)
assert versions_data['latest'] is None
assert versions_data['warnings'] == {
'master': ['unreleased'],
'v1.0.0-rc1': ['prereleased'],
}
index_html = (cwd / 'index.html').read_text()
# fmt: off
assert '<meta http-equiv="Refresh" content="0; url=master" />' in index_html
assert '<p>Go to the <a href="master">default documentation</a>.</p>' in index_html
# fmt: on
def test_custom_index_html(caplog):
"""Test using a custom index.html."""
root = Path(__file__).with_suffix('') / 'gh_pages_custom_index'
runner = CliRunner()
caplog.set_level(logging.DEBUG)
with runner.isolated_filesystem():
cwd = Path.cwd()
subprocess.run(['git', 'init'], check=True)
copy_tree(str(root), str(cwd))
result = runner.invoke(doctr_versions_menu_command)
assert result.exit_code == 0
assert (cwd / 'index.html').is_file()
assert (cwd / '.nojekyll').is_file()
assert (cwd / 'versions.json').is_file()
msg = "This is the index.html for the gh_pages_custom_index test."
assert msg in (cwd / 'index.html').read_text()
if sys.platform.startswith('win'):
# Windows might have slightly different messages
return
assert 'Using index.html template from index.html_t' in caplog.messages
def test_custom_downloads_file(caplog):
"""Test using a custom downloads_file."""
root = Path(__file__).with_suffix('') / 'gh_pages_custom_downloads'
runner = CliRunner()
caplog.set_level(logging.DEBUG)
with runner.isolated_filesystem():
cwd = Path.cwd()
subprocess.run(['git', 'init'], check=True)
copy_tree(str(root), str(cwd))
result = runner.invoke(doctr_versions_menu_command, ['--debug'])
assert result.exit_code == 0
assert (cwd / 'versions.json').is_file()
with (cwd / 'versions.json').open() as versions_json:
versions_data = json.load(versions_json)
assert versions_data['folders'] == ['master', 'v0.1.0', 'v1.0.0']
assert versions_data['downloads']['master'] == [
['pdf', '/master/master.pdf'],
['zip', '/master/master.zip'],
]
assert versions_data['downloads']['v1.0.0'] == [
['pdf', 'https://host/v1.0.0/v1.0.0.pdf'],
['html', 'https://host/v1.0.0/v1.0.0.zip'],
['epub', 'https://host/v1.0.0/v1.0.0.epub'],
]
if sys.platform.startswith('win'):
# Windows might have slightly different messages
return
assert 'Processing downloads_file master/downloads.md' in caplog.messages
assert 'INVALID URL: ./master/master.epub' in caplog.messages
def test_no_downloads_file(caplog):
"""Test using ``--no-downloads-file``."""
root = Path(__file__).with_suffix('') / 'gh_pages_custom_downloads'
runner = CliRunner()
caplog.set_level(logging.DEBUG)
with runner.isolated_filesystem():
cwd = Path.cwd()
subprocess.run(['git', 'init'], check=True)
copy_tree(str(root), str(cwd))
result = runner.invoke(
doctr_versions_menu_command, ['--no-downloads-file', '--debug']
)
assert result.exit_code == 0
assert (cwd / 'versions.json').is_file()
with (cwd / 'versions.json').open() as versions_json:
versions_data = json.load(versions_json)
assert versions_data['folders'] == ['master', 'v0.1.0', 'v1.0.0']
assert versions_data['downloads']['master'] == []
assert versions_data['downloads']['v1.0.0'] == []
assert 'Disable download links (downloads_file is None)' in caplog.messages
def test_no_downloads_file_config(caplog):
"""Test using ``downloads_file = False`` in config."""
root = Path(__file__).with_suffix('') / 'gh_pages_no_downloads'
runner = CliRunner()
caplog.set_level(logging.DEBUG)
with runner.isolated_filesystem():
cwd = Path.cwd()
subprocess.run(['git', 'init'], check=True)
copy_tree(str(root), str(cwd))
result = runner.invoke(doctr_versions_menu_command, ['--debug'])
assert result.exit_code == 0
assert (cwd / 'versions.json').is_file()
with (cwd / 'versions.json').open() as versions_json:
versions_data = json.load(versions_json)
assert versions_data['folders'] == ['master', 'v0.1.0', 'v1.0.0']
assert versions_data['downloads']['master'] == []
assert versions_data['downloads']['v1.0.0'] == []
assert 'Disable download links (downloads_file is None)' in caplog.messages
def test_custom_suffix(caplog):
"""Test using a custom suffixes for latest versions.
Also tests the the -c / --config flag.
"""
root = Path(__file__).with_suffix('') / 'gh_pages_custom_suffix'
runner = CliRunner()
caplog.set_level(logging.DEBUG)
with runner.isolated_filesystem():
cwd = Path.cwd()
subprocess.run(['git', 'init'], check=True)
copy_tree(str(root), str(cwd))
result = runner.invoke(doctr_versions_menu_command, ['-c', 'config'])
assert result.exit_code == 0
assert (cwd / 'versions.json').is_file()
assert not (cwd / 'versions.py').is_file()
with (cwd / 'versions.json').open() as versions_json:
versions_data = json.load(versions_json)
assert versions_data['labels'] == {
'master': 'master',
'v0.1.0': 'v0.1.0',
'v1.0.0': 'v1.0.0 [latest]',
}
def test_custom_envvars(caplog):
"""Test using environment variables for configuration. """
root = Path(__file__).with_suffix('') / 'gh_pages_envvars'
env = {
'DOCTR_VERSIONS_MENU_LATEST': 'master',
'DOCTR_VERSIONS_MENU_DEBUG': "true",
'DOCTR_VERSIONS_MENU_VERSIONS': "<branches>, <releases>",
'DOCTR_VERSIONS_MENU_SUFFIX_LATEST': " [latest]",
'DOCTR_VERSIONS_MENU_WRITE_VERSIONS_PY': 'false',
'DOCTR_VERSIONS_MENU_WRITE_INDEX_HTML': 'false',
'DOCTR_VERSIONS_MENU_ENSURE_NO_JEKYLL': 'false',
'DOCTR_VERSIONS_MENU_DOWNLOADS_FILE': '',
'DOCTR_VERSIONS_MENU_WARNING': "post: <post-releases>; outdated: (<releases> < 0.2); prereleased:",
'DOCTR_VERSIONS_MENU_LABEL': "<releases>: {{ folder | replace('v', '', 1) }}; doc-testing: doc; testing: {{ folder }} (latest dev branch)",
}
runner = CliRunner()
caplog.set_level(logging.DEBUG)
with runner.isolated_filesystem():
cwd = Path.cwd()
subprocess.run(['git', 'init'], check=True)
copy_tree(str(root), str(cwd))
result = runner.invoke(doctr_versions_menu_command, env=env)
assert result.exit_code == 0
assert (cwd / 'versions.json').is_file()
assert not (cwd / 'versions.py').is_file()
assert not (cwd / 'index.html').is_file()
assert not (cwd / '.nojekyll').is_file()
with (cwd / 'versions.json').open() as versions_json:
versions_data = json.load(versions_json)
assert versions_data == {
'downloads': {
'doc-testing': [],
'master': [],
'testing': [],
'v0.1.0': [],
'v0.2.0': [],
'v1.0.0': [],
'v1.0.0+dev': [],
'v1.0.0-dev0': [],
'v1.0.0-post1': [],
'v1.0.0-rc1': [],
'v1.1.0-rc1': [],
},
'folders': [
'doc-testing',
'master',
'testing',
'v0.1.0',
'v0.2.0',
'v1.0.0',
'v1.0.0+dev',
'v1.0.0-dev0',
'v1.0.0-post1',
'v1.0.0-rc1',
'v1.1.0-rc1',
],
'labels': {
'v0.1.0': '0.1.0',
'v0.2.0': '0.2.0',
'v1.0.0-dev0': '1.0.0-dev0',
'v1.0.0-rc1': '1.0.0-rc1',
'v1.0.0': '1.0.0',
'v1.0.0+dev': '1.0.0+dev',
'v1.0.0-post1': '1.0.0-post1',
'v1.1.0-rc1': '1.1.0-rc1',
'doc-testing': 'doc',
'master': 'master [latest]',
'testing': 'testing (latest dev branch)',
},
'latest': 'master',
'versions': [
'v1.1.0-rc1',
'v1.0.0-post1',
'v1.0.0+dev',
'v1.0.0',
'v1.0.0-rc1',
'v1.0.0-dev0',
'v0.2.0',
'v0.1.0',
'testing',
'master',
'doc-testing',
],
'warnings': {
'doc-testing': ['unreleased'],
'master': ['unreleased'],
'testing': ['unreleased'],
'v0.1.0': ['outdated'],
'v0.2.0': [],
'v1.0.0': [],
'v1.0.0+dev': ['unreleased'],
'v1.0.0-dev0': [],
'v1.0.0-post1': ['post'],
'v1.0.0-rc1': [],
'v1.1.0-rc1': [],
},
}
def test_custom_labels_warnings(caplog):
"""Test custom versions, labels, and warnings."""
root = Path(__file__).with_suffix('') / 'gh_pages_custom_labels_warnings'
runner = CliRunner()
caplog.set_level(logging.DEBUG)
expected_versions_data = {
'downloads': {
'doc-testing': [],
'master': [],
'testing': [],
'v0.1.0': [],
'v0.2.0': [],
'v1.0.0': [],
'v1.0.0+dev': [],
'v1.0.0-dev0': [],
'v1.0.0-post1': [],
'v1.0.0-rc1': [],
'v1.1.0-rc1': [],
},
'folders': [
'doc-testing',
'master',
'testing',
'v0.1.0',
'v0.2.0',
'v1.0.0',
'v1.0.0+dev',
'v1.0.0-dev0',
'v1.0.0-post1',
'v1.0.0-rc1',
'v1.1.0-rc1',
],
'labels': {
'doc-testing': 'doc',
'master': 'master (latest dev branch)',
'testing': 'testing',
'v0.1.0': '0.1.0',
'v0.2.0': '0.2.0',
'v1.0.0': '1.0.0 (stable)',
'v1.0.0+dev': '1.0.0+dev',
'v1.0.0-dev0': '1.0.0-dev0',
'v1.0.0-post1': '1.0.0-post1',
'v1.0.0-rc1': '1.0.0-rc1',
'v1.1.0-rc1': '1.1.0-rc1',
},
'latest': 'v1.0.0',
'versions': [
'doc-testing',
'testing',
'v0.1.0',
'v0.2.0',
'v1.0.0-dev0',
'v1.0.0-rc1',
'v1.0.0',
'v1.0.0+dev',
'v1.0.0-post1',
'v1.1.0-rc1',
'master',
],
'warnings': {
'doc-testing': ['unreleased'],
'master': ['unreleased'],
'testing': ['unreleased'],
'v0.1.0': ['outdated'],
'v0.2.0': [],
'v1.0.0': [],
'v1.0.0+dev': ['unreleased'],
'v1.0.0-dev0': [],
'v1.0.0-post1': ['post'],
'v1.0.0-rc1': [],
'v1.1.0-rc1': [],
},
}
with runner.isolated_filesystem():
cwd = Path.cwd()
subprocess.run(['git', 'init'], check=True)
copy_tree(str(root), str(cwd))
result = runner.invoke(doctr_versions_menu_command)
assert result.exit_code == 0
assert (cwd / 'index.html').is_file()
assert (cwd / '.nojekyll').is_file()
assert (cwd / 'versions.json').is_file()
with (cwd / 'versions.json').open() as versions_json:
versions_data = json.load(versions_json)
assert versions_data == expected_versions_data
with runner.isolated_filesystem():
cwd = Path.cwd()
subprocess.run(['git', 'init'], check=True)
copy_tree(str(root), str(cwd))
result = runner.invoke(
doctr_versions_menu_command,
[
'-c',
'noconf',
'--suffix-latest= (stable)',
'--versions',
'((<branches> != master), <releases>, master)[::-1]',
'--no-write-versions-py',
'--warning',
'post',
'<post-releases>',
'--warning',
'outdated',
'(<releases> < 0.2)',
'--warning',
'prereleased',
'',
'--latest=v1.0.0',
'--label',
'<releases>',
"{{ folder | replace('v', '', 1) }}",
'--label',
'doc-testing',
'doc',
'--label',
'master',
'{{ folder }} (latest dev branch)',
],
)
assert result.exit_code == 0
assert (cwd / 'index.html').is_file()
assert (cwd / '.nojekyll').is_file()
assert (cwd / 'versions.json').is_file()
with (cwd / 'versions.json').open() as versions_json:
versions_data = json.load(versions_json)
assert versions_data == expected_versions_data
|
# body = {"email": "[email protected]", "name": "John Smith", "id": 3}
from flask import Flask
from flask_restful import Resource, Api
from flask_restful.reqparse import RequestParser
app = Flask(__name__)
api = Api(app, prefix="/api/v1")
users = [
{"email": "[email protected]",
"name": "Masnun",
"id": 1,
"price": 12.99
}
]
subscriber_request_parser = RequestParser(bundle_errors=True)
subscriber_request_parser.add_argument("name", type=str, required=True, help="Name has to be valid string")
subscriber_request_parser.add_argument("email", required=True)
subscriber_request_parser.add_argument("id", type=int, required=True, help="Please enter valid integer as ID")
subscriber_request_parser.add_argument("price", type=float, required=True, help="Please enter valid price")
class SubscriberCollection(Resource):
def get(self):
return users
def post(self):
args = subscriber_request_parser.parse_args()
#print(args.get("price"))
print(args["price"])
users.append(args)
return {"msg": "Subscriber added", "subscriber_data": args}
class Subscriber(Resource):
def get(self, id):
return {"msg": "Details about user id {}".format(id)}
def put(self, id):
return {"msg": "Update user id {}".format(id)}
def delete(self, id):
return {"msg": "Delete user id {}".format(id)}
api.add_resource(SubscriberCollection, '/subscribers')
api.add_resource(Subscriber, '/subscribers/<int:id>')
if __name__ == '__main__':
app.run(debug=True)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import pyotherside
def read(path):
try:
with open(path, 'r') as f:
read_data = f.read()
pyotherside.send('log', "read file {0}".format(path))
return read_data
except IOError:
pyotherside.send('ioerror', "{0} file not readable".format(path))
if not os.path.exists(os.path.dirname(path)):
pyotherside.send('ioerror', "{0} dir does not exist".format(os.path.dirname(path)))
elif not os.path.isfile(path):
pyotherside.send('ioerror', "{0} file does not exist".format(path))
return ""
def write(path, content):
try:
with open(path, 'w') as f:
f.write(content)
pyotherside.send('log', "content saved")
except IOError:
pyotherside.send('ioerror', "{0} file not writeable".format(path))
if not os.path.exists(os.path.dirname(path)):
pyotherside.send('ioerror', "{0} dir does not exist".format(os.path.dirname(path)))
if not os.path.isfile(path):
pyotherside.send('ioerror', "{0} file does not exist".format(path))
def create(path):
try:
with open(path, 'w+') as f:
pyotherside.send('log', "file {0} created.".format(path))
except IOError:
pyotherside.send('ioerror', "{0} file not writeable".format(path))
if not os.path.exists(os.path.dirname(path)):
pyotherside.send('ioerror', "{0} dir does not exist".format(os.path.dirname(path)))
|
import re
import contextlib
instance = None
frontend = None
RE_REVIEW_URL = re.compile(r"^remote:\s+http://.*/r/(\d+)\s*$")
@contextlib.contextmanager
def settings(user, settings, repository=None):
data = { "settings": [{ "item": item, "value": value }
for item, value in settings.items()] }
if repository:
data["repository"] = repository
# Set requested settings.
with frontend.signin(user):
frontend.operation("savesettings", data=data)
try:
yield
finally:
data = { "settings": [{ "item": item }
for item, value in settings.items()] }
if repository:
data["repository"] = repository
# Reset settings back to the default.
with frontend.signin(user):
frontend.operation("savesettings", data=data)
def createReviewViaPush(work, owner, commit="HEAD"):
with settings(owner, { "review.createViaPush": True }):
remote_url = instance.repository_url(owner)
output = work.run(["push", remote_url, "HEAD"], TERM="dumb")
for line in output.splitlines():
match = RE_REVIEW_URL.match(line)
if match:
return int(match.group(1))
else:
testing.expect.check("<review URL in 'git push' output>",
"<no review URL found>")
|
# Generated by Django 2.2.10 on 2020-05-05 15:25
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('controlled_vocabulary', '0004_remove_controlledvocabulary_test'),
('core', '0025_resourcelanguage'),
]
operations = [
migrations.AlterUniqueTogether(
name='resourcelanguage',
unique_together={('resource', 'language')},
),
]
|
# -*- coding: utf-8 -*-
#*****************************************************************************
# Copyright (C) 2003-2006 Gary Bishop.
# Copyright (C) 2006 Jorgen Stenarson. <[email protected]>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#*****************************************************************************
from __future__ import print_function, unicode_literals, absolute_import
import System
from .common import validkey, KeyPress, make_KeyPress_from_keydescr
c32 = System.ConsoleKey
Shift = System.ConsoleModifiers.Shift
Control = System.ConsoleModifiers.Control
Alt = System.ConsoleModifiers.Alt
# table for translating virtual keys to X windows key symbols
code2sym_map = {#c32.CANCEL: 'Cancel',
c32.Backspace: 'BackSpace',
c32.Tab: 'Tab',
c32.Clear: 'Clear',
c32.Enter: 'Return',
# c32.Shift: 'Shift_L',
# c32.Control: 'Control_L',
# c32.Menu: 'Alt_L',
c32.Pause: 'Pause',
# c32.Capital: 'Caps_Lock',
c32.Escape: 'Escape',
# c32.Space: 'space',
c32.PageUp: 'Prior',
c32.PageDown: 'Next',
c32.End: 'End',
c32.Home: 'Home',
c32.LeftArrow: 'Left',
c32.UpArrow: 'Up',
c32.RightArrow: 'Right',
c32.DownArrow: 'Down',
c32.Select: 'Select',
c32.Print: 'Print',
c32.Execute: 'Execute',
# c32.Snapshot: 'Snapshot',
c32.Insert: 'Insert',
c32.Delete: 'Delete',
c32.Help: 'Help',
c32.F1: 'F1',
c32.F2: 'F2',
c32.F3: 'F3',
c32.F4: 'F4',
c32.F5: 'F5',
c32.F6: 'F6',
c32.F7: 'F7',
c32.F8: 'F8',
c32.F9: 'F9',
c32.F10: 'F10',
c32.F11: 'F11',
c32.F12: 'F12',
c32.F13: 'F13',
c32.F14: 'F14',
c32.F15: 'F15',
c32.F16: 'F16',
c32.F17: 'F17',
c32.F18: 'F18',
c32.F19: 'F19',
c32.F20: 'F20',
c32.F21: 'F21',
c32.F22: 'F22',
c32.F23: 'F23',
c32.F24: 'F24',
# c32.Numlock: 'Num_Lock,',
# c32.Scroll: 'Scroll_Lock',
# c32.Apps: 'VK_APPS',
# c32.ProcesskeY: 'VK_PROCESSKEY',
# c32.Attn: 'VK_ATTN',
# c32.Crsel: 'VK_CRSEL',
# c32.Exsel: 'VK_EXSEL',
# c32.Ereof: 'VK_EREOF',
# c32.Play: 'VK_PLAY',
# c32.Zoom: 'VK_ZOOM',
# c32.Noname: 'VK_NONAME',
# c32.Pa1: 'VK_PA1',
c32.OemClear: 'VK_OEM_CLEAR',
c32.NumPad0: 'NUMPAD0',
c32.NumPad1: 'NUMPAD1',
c32.NumPad2: 'NUMPAD2',
c32.NumPad3: 'NUMPAD3',
c32.NumPad4: 'NUMPAD4',
c32.NumPad5: 'NUMPAD5',
c32.NumPad6: 'NUMPAD6',
c32.NumPad7: 'NUMPAD7',
c32.NumPad8: 'NUMPAD8',
c32.NumPad9: 'NUMPAD9',
c32.Divide: 'Divide',
c32.Multiply: 'Multiply',
c32.Add: 'Add',
c32.Subtract: 'Subtract',
c32.Decimal: 'VK_DECIMAL'
}
# function to handle the mapping
def make_keysym(keycode):
try:
sym = code2sym_map[keycode]
except KeyError:
sym = ''
return sym
sym2code_map = {}
for code,sym in code2sym_map.items():
sym2code_map[sym.lower()] = code
def key_text_to_keyinfo(keytext):
'''Convert a GNU readline style textual description of a key to keycode with modifiers'''
if keytext.startswith('"'): # "
return keyseq_to_keyinfo(keytext[1:-1])
else:
return keyname_to_keyinfo(keytext)
def char_to_keyinfo(char, control=False, meta=False, shift=False):
vk = (ord(char))
if vk & 0xffff == 0xffff:
print('VkKeyScan("%s") = %x' % (char, vk))
raise ValueError('bad key')
if vk & 0x100:
shift = True
if vk & 0x200:
control = True
if vk & 0x400:
meta = True
return (control, meta, shift, vk & 0xff)
def keyname_to_keyinfo(keyname):
control = False
meta = False
shift = False
while 1:
lkeyname = keyname.lower()
if lkeyname.startswith('control-'):
control = True
keyname = keyname[8:]
elif lkeyname.startswith('ctrl-'):
control = True
keyname = keyname[5:]
elif lkeyname.startswith('meta-'):
meta = True
keyname = keyname[5:]
elif lkeyname.startswith('alt-'):
meta = True
keyname = keyname[4:]
elif lkeyname.startswith('shift-'):
shift = True
keyname = keyname[6:]
else:
if len(keyname) > 1:
return (control, meta, shift, sym2code_map.get(keyname.lower()," "))
else:
return char_to_keyinfo(keyname, control, meta, shift)
def keyseq_to_keyinfo(keyseq):
res = []
control = False
meta = False
shift = False
while 1:
if keyseq.startswith('\\C-'):
control = True
keyseq = keyseq[3:]
elif keyseq.startswith('\\M-'):
meta = True
keyseq = keyseq[3:]
elif keyseq.startswith('\\e'):
res.append(char_to_keyinfo('\033', control, meta, shift))
control = meta = shift = False
keyseq = keyseq[2:]
elif len(keyseq) >= 1:
res.append(char_to_keyinfo(keyseq[0], control, meta, shift))
control = meta = shift = False
keyseq = keyseq[1:]
else:
return res[0]
def make_keyinfo(keycode, state):
control = False
meta =False
shift = False
return (control, meta, shift, keycode)
def make_KeyPress(char, state, keycode):
shift = bool(int(state) & int(Shift))
control = bool(int(state) & int(Control))
meta = bool(int(state) & int(Alt))
keyname = code2sym_map.get(keycode, "").lower()
if control and meta: #equivalent to altgr so clear flags
control = False
meta = False
elif control:
char = str(keycode)
return KeyPress(char, shift, control, meta, keyname)
|
import unittest
import torch
import gpytorch
from gpytorch.distributions import MultivariateNormal
from gpytorch.lazy import CholLazyTensor, TriangularLazyTensor
from gpytorch.variational import NaturalVariationalDistribution, TrilNaturalVariationalDistribution
class Float64Test(unittest.TestCase):
def setUp(self):
self.prev_type = torch.get_default_dtype()
torch.set_default_dtype(torch.float64)
def tearDown(self):
torch.set_default_dtype(self.prev_type)
class TestNatVariational(Float64Test):
def test_invertible_init(self, D=5):
mu = torch.randn(D)
cov = torch.randn(D, D).tril_()
dist = MultivariateNormal(mu, CholLazyTensor(TriangularLazyTensor(cov)))
v_dist = NaturalVariationalDistribution(D, mean_init_std=0.0)
v_dist.initialize_variational_distribution(dist)
out_dist = v_dist()
assert torch.allclose(out_dist.mean, dist.mean, rtol=1e-04, atol=1e-06)
assert torch.allclose(out_dist.covariance_matrix, dist.covariance_matrix)
def test_natgrad(self, D=5):
mu = torch.randn(D)
cov = torch.randn(D, D).tril_()
dist = MultivariateNormal(mu, CholLazyTensor(TriangularLazyTensor(cov)))
sample = dist.sample()
v_dist = NaturalVariationalDistribution(D)
v_dist.initialize_variational_distribution(dist)
mu = v_dist().mean.detach()
v_dist().log_prob(sample).squeeze().backward()
eta1 = mu.clone().requires_grad_(True)
eta2 = (mu[:, None] * mu + cov @ cov.t()).requires_grad_(True)
L = torch.cholesky(eta2 - eta1[:, None] * eta1)
dist2 = MultivariateNormal(eta1, CholLazyTensor(TriangularLazyTensor(L)))
dist2.log_prob(sample).squeeze().backward()
assert torch.allclose(v_dist.natural_vec.grad, eta1.grad)
assert torch.allclose(v_dist.natural_mat.grad, eta2.grad)
def test_optimization_optimal_error(self, num_inducing=16, num_data=32, D=2):
inducing_points = torch.randn(num_inducing, D)
class SVGP(gpytorch.models.ApproximateGP):
def __init__(self):
v_dist = NaturalVariationalDistribution(num_inducing)
v_strat = gpytorch.variational.UnwhitenedVariationalStrategy(self, inducing_points, v_dist)
super().__init__(v_strat)
self.mean_module = gpytorch.means.ZeroMean()
self.covar_module = gpytorch.kernels.RBFKernel()
def forward(self, x):
return MultivariateNormal(self.mean_module(x), self.covar_module(x))
model = SVGP().train()
likelihood = gpytorch.likelihoods.GaussianLikelihood().train()
mll = gpytorch.mlls.VariationalELBO(likelihood, model, num_data)
X = torch.randn((num_data, D))
y = torch.randn(num_data)
def loss():
return -mll(model(X), y)
optimizer = torch.optim.SGD(
model.variational_strategy._variational_distribution.parameters(), lr=float(num_data)
)
optimizer.zero_grad()
loss().backward()
optimizer.step() # Now we should be at the optimum
optimizer.zero_grad()
loss().backward()
natgrad_natural_vec2, natgrad_natural_mat2 = (
model.variational_strategy._variational_distribution.natural_vec.grad.clone(),
model.variational_strategy._variational_distribution.natural_mat.grad.clone(),
)
# At the optimum, the (natural) gradients are zero:
assert torch.allclose(natgrad_natural_vec2, torch.zeros(()))
assert torch.allclose(natgrad_natural_mat2, torch.zeros(()))
class TestTrilNatVariational(Float64Test):
def test_invertible_init(self, D=5):
mu = torch.randn(D)
cov = torch.randn(D, D).tril_()
dist = MultivariateNormal(mu, CholLazyTensor(TriangularLazyTensor(cov)))
v_dist = TrilNaturalVariationalDistribution(D, mean_init_std=0.0)
v_dist.initialize_variational_distribution(dist)
out_dist = v_dist()
assert torch.allclose(out_dist.mean, dist.mean)
assert torch.allclose(out_dist.covariance_matrix, dist.covariance_matrix)
def test_natgrad(self, D=5):
mu = torch.randn(D)
cov = torch.randn(D, D)
cov = cov @ cov.t()
dist = MultivariateNormal(mu, CholLazyTensor(TriangularLazyTensor(cov.cholesky())))
sample = dist.sample()
v_dist = TrilNaturalVariationalDistribution(D, mean_init_std=0.0)
v_dist.initialize_variational_distribution(dist)
v_dist().log_prob(sample).squeeze().backward()
dout_dnat1 = v_dist.natural_vec.grad
dout_dnat2 = v_dist.natural_tril_mat.grad
# mean_init_std=0. because we need to ensure both have the same distribution
v_dist_ref = NaturalVariationalDistribution(D, mean_init_std=0.0)
v_dist_ref.initialize_variational_distribution(dist)
v_dist_ref().log_prob(sample).squeeze().backward()
dout_dnat1_noforward_ref = v_dist_ref.natural_vec.grad
dout_dnat2_noforward_ref = v_dist_ref.natural_mat.grad
def f(natural_vec, natural_tril_mat):
"Transform natural_tril_mat to L"
Sigma = torch.inverse(-2 * natural_tril_mat)
mu = natural_vec
return mu, Sigma.cholesky().inverse().tril()
(mu_ref, natural_tril_mat_ref), (dout_dmu_ref, dout_dnat2_ref) = jvp(
f,
(v_dist_ref.natural_vec.detach(), v_dist_ref.natural_mat.detach()),
(dout_dnat1_noforward_ref, dout_dnat2_noforward_ref),
)
assert torch.allclose(natural_tril_mat_ref, v_dist.natural_tril_mat), "Sigma transformation"
assert torch.allclose(dout_dnat2_ref, dout_dnat2), "Sigma gradient"
assert torch.allclose(mu_ref, v_dist.natural_vec), "mu transformation"
assert torch.allclose(dout_dmu_ref, dout_dnat1), "mu gradient"
def jvp(f, x, v):
"Simulate forward-mode AD using two reverse-mode AD"
x = tuple(xx.requires_grad_(True) for xx in x)
v = tuple(vv.requires_grad_(True) for vv in v)
y = f(*x)
grad_x = torch.autograd.grad(y, x, v, create_graph=True)
jvp_val = torch.autograd.grad(grad_x, v, v)
return y, jvp_val
|
# -*- #################
# ---------------------------------------------------------------------------
# Export_JSON_To_Shapefile.py
# Created on: 2016-10-19 15:40:33.00000
# Usage: Export_JSON_To_Shapefile <Point_Input_JSON> <Line_Input_JSON> <Polygon_Input_JSON>
# Description:
# ---------------------------------------------------------------------------
# Import arcpy module
import arcpy
import json
import os, random
import zipfile
# Script arguments
Point_Input_JSON = arcpy.GetParameterAsText(0)
Line_Input_JSON = arcpy.GetParameterAsText(1)
Polygon_Input_JSON = arcpy.GetParameterAsText(2)
# local variables
scatch_ws = arcpy.env.scratchWorkspace
Features_By_Shape = {}
#
# main process
#
if Point_Input_JSON is None or len(Point_Input_JSON.strip()) == 0:
arcpy.AddMessage("empty point data")
else:
try:
Features_By_Shape["Point"] = json.loads(Point_Input_JSON)
except Exception as e:
arcpy.AddError("ignore point data. %s" % e.message)
pass
if Line_Input_JSON is None or len(Line_Input_JSON.strip()) == 0:
arcpy.AddMessage("empty line data")
else:
try:
Features_By_Shape["Line"] = json.loads(Line_Input_JSON)
except Exception as e:
arcpy.AddError("ignore line data. %s" % e.message)
pass
if Polygon_Input_JSON is None or len(Polygon_Input_JSON.strip()) == 0:
arcpy.AddMessage("empty polygon data")
else:
try:
Features_By_Shape["Polygon"] = json.loads(Polygon_Input_JSON)
except Exception as e:
arcpy.AddError("ignore polygon data. %s" % e.message)
pass
# create the staging folders
stg_json_folder = os.path.join(scatch_ws, "json")
os.mkdir(stg_json_folder)
stg_shp_folder = os.path.join(scatch_ws, "shape")
os.mkdir(stg_shp_folder)
# convert json to shapefile
for shape_type in Features_By_Shape.keys():
if len(Features_By_Shape[shape_type]) > 0:
# Process: JSON To Features
with open(os.path.join(stg_json_folder, shape_type+".json"), "w") as json_file:
json_file.write(json.dumps(Features_By_Shape[shape_type]))
featureClass = "in_memory\\" + shape_type
arcpy.JSONToFeatures_conversion(json_file.name, featureClass)
# Process: Feature Class To Shapefile (multiple)
arcpy.FeatureClassToShapefile_conversion(featureClass, stg_shp_folder)
Export_File_Path = os.path.join(scatch_ws, "export.zip")
with zipfile.ZipFile(Export_File_Path, "w", zipfile.ZIP_DEFLATED) as zf:
for dirname, subdirs, files in os.walk(stg_shp_folder):
for filename in files:
zf.write(os.path.join(dirname, filename), filename)
arcpy.SetParameter(3, Export_File_Path)
arcpy.AddMessage("export completed")
|
# MIT License
#
# Copyright (C) 2021. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import numpy as np
import gym, random, re, timeit, sys, ray, torch
import glob, os, argparse, json
from matplotlib import pyplot as plt
import dill
from collections import defaultdict
from smarts.core.utils.episodes import episodes
from ultra.scenarios.common.visualization import (
draw_intersection,
convert_to_gif,
profile_vehicles,
)
from smarts.core.agent import AgentSpec, Agent
from smarts.core.controllers import ActionSpaceType
from smarts.core.agent_interface import (
AgentInterface,
AgentType,
NeighborhoodVehicles,
)
from ultra.scenarios.common.social_vehicle_definitions import get_social_vehicle_color
from ultra.scenarios.generate_scenarios import get_direction
from ultra.scenarios.analysis.base_analysis import BaseAnalysis
num_gpus = 1 if torch.cuda.is_available() else 0
@ray.remote(num_gpus=num_gpus / 2)
class BehaviorAnalysis(BaseAnalysis):
def __init__(self):
super(BaseAnalysis, self).__init__()
self.analysis = []
def draw_plots(self, save_dir, failure_time=None):
start = 0
end = 600
behaviors = {}
for scenario in self.analysis:
for behavior in scenario.keys():
if behavior not in behaviors:
behaviors[behavior] = {
"avg_speed_junction": [],
"min_speed_junction": [],
"max_speed_junction": [],
"avg_accel_junction": [],
"min_accel_junction": [],
"max_accel_junction": [],
"total_steps_junction": [],
"exceeds_speed_limit": 0,
}
s, e = scenario[behavior]["in_junction"][0]
behaviors[behavior]["avg_speed_junction"].append(
np.mean(scenario[behavior]["speeds"][s:e])
)
behaviors[behavior]["min_speed_junction"].append(
scenario[behavior]["min_speed_junction"][0]
)
behaviors[behavior]["max_speed_junction"].append(
scenario[behavior]["max_speed_junction"][0]
)
behaviors[behavior]["avg_accel_junction"].append(
np.mean(scenario[behavior]["accels"][s:e])
)
behaviors[behavior]["min_accel_junction"].append(
scenario[behavior]["min_accel_junction"][0]
)
behaviors[behavior]["max_accel_junction"].append(
scenario[behavior]["max_accel_junction"][0]
)
behaviors[behavior]["total_steps_junction"].append(
scenario[behavior]["steps_in_junction"][0]
)
for key in [
"min_speed_junction",
"max_speed_junction",
"avg_speed_junction",
"min_accel_junction",
"max_accel_junction",
"avg_accel_junction",
"total_steps_junction",
]:
plt.figure()
if "speed" in key:
bins = [i for i in range(40)]
elif "accel" in key:
bins = [i for i in range(-15, 15)]
else:
bins = [i for i in range(1000)]
for behavior, data in behaviors.items():
n, bins, patches = plt.hist(
x=data[key], bins=bins, color="#0504aa", alpha=0.7, rwidth=0.85
)
plt.grid(axis="y", alpha=0.75)
plt.xlabel(key)
plt.title(behavior)
plt.ylabel("Frequency")
maxfreq = n.max()
# Set a clean upper y-axis limit.
plt.ylim(
ymax=np.ceil(maxfreq / 10) * 10 if maxfreq % 10 else maxfreq + 10
)
plt.savefig(f"{save_dir}/{behavior}_{key}.png")
plt.close()
def analyze(self, episode_time, stopwatcher_max_steps, simulation_data):
analysis = {}
for v_id, v_state in self.social_vehicles_states.items():
if "stopwatcher" in v_state["behavior"]:
_, behavior = v_state["behavior"].split("_")
if behavior not in self.analysis:
analysis[behavior] = {
"max_steps": [],
"steps_in_junction": [],
"max_road_speed": None,
"speeds": [],
"accels": [],
"min_speed_junction": [],
"max_speed_junction": [],
"min_accel_junction": [],
"max_accel_junction": [],
"in_junction": [],
"steps_in_junction": [],
}
in_junction = v_state["in_junction"]
speed_in_junction = v_state["speeds"][in_junction[0] : in_junction[1]]
accel_in_junction = v_state["accels"][in_junction[0] : in_junction[1]]
analysis[behavior]["in_junction"].append(in_junction)
analysis[behavior]["min_speed_junction"].append(min(speed_in_junction))
analysis[behavior]["max_speed_junction"].append(max(speed_in_junction))
analysis[behavior]["min_accel_junction"].append(min(accel_in_junction))
analysis[behavior]["max_accel_junction"].append(max(accel_in_junction))
print("in junction", in_junction)
analysis[behavior]["max_steps"].append(v_state["steps"])
analysis[behavior]["steps_in_junction"].append(
int(in_junction[1]) - int(in_junction[0])
)
analysis[behavior][
"max_road_speed"
] = 19.44 # road_speads[v_state["route"]]
analysis[behavior]["speeds"] = v_state["speeds"]
analysis[behavior]["accels"] = v_state["accels"]
self.analysis.append(analysis)
def save_data(self, save_dir):
with open(f"{save_dir}/analysis.pkl", "wb") as handle:
dill.dump(self.analysis, handle)
print(f"saved data to {save_dir}/analysis.pkl")
def load_data(self, path):
with open(path, "rb") as handle:
self.analysis = dill.load(handle)
print(f"loaded data {len(self.analysis)} from {path}")
def run(self, **params):
super().run(**params, analyze_func=self.analyze)
|
import base
from sys import argv
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import operator
from collections import defaultdict
THRESHOLD = 0.4
GRAPHFILE='per-char-analysis.png'
if len(argv)!=2:
print("Usage python h5_analyzer.py <db.h5>")
exit(1)
X, Y = base.H5Dataset(argv[1], "Conjunto").get_XY()
total = X.shape[0]
cant = defaultdict(int)
for y in Y:
cant[chr(base.to_categorical(y)+32)] += 1
cant['Espacios']=cant[' ']
del cant[' ']
cant = sorted(cant.items(), key=operator.itemgetter(1))
for i,(c,q) in enumerate(cant):
print("(#{}) {}: {} ({:.2f}%)".format(len(cant)-i, c, q, q/float(total)*100.))
print('Total: {}'.format(total))
print(list(reversed(cant)))
print('Guardando grafico en {}...'.format(GRAPHFILE))
qotros = sum([q for _,q in cant if q/float(total)*100<THRESHOLD])
cant = [(c,q) for c,q in cant if q/float(total)*100>=THRESHOLD]
cant = sorted(cant+[('Otros', qotros)], key=operator.itemgetter(1))
cant = list(reversed(cant))
cant = [(c,q/float(total)*100.) for c,q in cant]
C, Q = zip(*cant)
#~ print(plt.rcParams["figure.figsize"])
#~ plt.rcParams["figure.figsize"] = [8, 15]
y_pos = np.arange(len(cant))
plt.barh(y_pos, Q, align='center', alpha=0.4)
plt.yticks(y_pos, C)
plt.xlabel('Porcentaje del dataset que representa')
plt.ylabel('Caracter')
plt.grid(True)
plt.title('')
for i, q in enumerate(Q):
plt.text(q + 0.01, i, "{:.2f}%".format(q), color='black', fontweight='bold')
plt.savefig(GRAPHFILE, bbox_inches='tight', dpi = 300, figsize=(100, 100))
plt.clf()
|
import unittest
import numpy as np
from itertools import product
from apal import Khachaturyan
from apal_cxx import PyKhachaturyan
from apal_cxx import pytest_functional_derivative
from apal_cxx import pytest_contract_tensors
from apal_cxx import pytest_B_tensor_element
from apal_cxx import pytest_strain_energy_sphere
from apal.tools import to_full_rank4
class TestKhacaturyan(unittest.TestCase):
K = 50.0
G = 26.0
def get_isotropic_tensor(self):
C = np.zeros((6, 6))
C[0, 0] = C[1, 1] = C[2, 2] = self.K + 4*self.G/3.0
C[0, 1] = C[0, 2] = \
C[1, 0] = C[1, 2] = \
C[2, 0] = C[2, 1] = self.K - 2.0*self.G/3.0
C[3, 3] = C[4, 4] = C[5, 5] = 2*self.G
return C
@property
def poisson(self):
return 0.5*(3*self.K - 2*self.G)/(3*self.K + self.G)
def isotropic_green_function(self, k):
return np.eye(3)/self.G - 0.5*np.outer(k, k)/(self.G*(1.0 - self.poisson))
def get_sphere_voxels(self, N):
shape_func = np.zeros((N, N, N), dtype=np.uint8)
indx = np.array(range(N))
ix, iy, iz = np.meshgrid(indx, indx, indx)
r_sq = (ix-N/2)**2 + (iy-N/2)**2 + (iz-N/2)**2
r = N/8.0
shape_func[r_sq<r] = 1
return shape_func
def eff_stress(self, elastic, misfit):
return np.einsum('ijkl,kl->ij', elastic, misfit)
def get_plate_voxels(self, N):
shape_func = np.zeros((N, N, N), dtype=np.uint8)
width = int(N/4)
shape_func[:width, :width, :2] = 1
return shape_func
def get_needle_voxels(self, N):
shape_func = np.zeros((N, N, N), dtype=np.uint8)
width = int(N/4)
shape_func[:width, :2, :2] = 1
return shape_func
def test_isotropic(self):
misfit = np.eye(3)*0.05
strain = Khachaturyan(elastic_tensor=self.get_isotropic_tensor(),
misfit_strain=misfit)
k = np.array([5.0, -2.0, 7.0])
khat = k/np.sqrt(k.dot(k))
zeroth = strain.zeroth_order_green_function(khat)
self.assertTrue(np.allclose(zeroth, self.isotropic_green_function(khat)))
def eshelby_strain_energy_sphere(self, misfit):
return 2*(1+self.poisson)*self.G*misfit**2/(1-self.poisson)
def eshelby_strain_energy_plate(self, misfit):
return 2*(1+self.poisson)*self.G*misfit**2/(1-self.poisson)
def eshelby_strain_energy_needle(self, misfit):
return 2*(1+self.poisson)*self.G*misfit**2/(1-self.poisson)
def test_green_function_cpp(self):
misfit = np.eye(3)*0.05
elastic = to_full_rank4(self.get_isotropic_tensor())
pykhach = PyKhachaturyan(3, elastic, misfit)
k = np.array([-1.0, 3.0, 2.5])
k /= np.sqrt(k.dot(k))
gf = pykhach.green_function(k)
self.assertTrue(np.allclose(gf, self.isotropic_green_function(k)))
def test_frequency(self):
misfit = np.eye(3)*0.05
ft = np.zeros((8, 8, 8))
elastic = to_full_rank4(self.get_isotropic_tensor())
pykhach = PyKhachaturyan(3, elastic, misfit)
freq = np.fft.fftfreq(ft.shape[0])
for i in range(ft.shape[0]):
indx = np.array([i, 0, 0])
self.assertAlmostEqual(freq[i], pykhach.wave_vector(indx, ft.shape[0])[0])
def test_sphere(self):
eps = 0.05
misfit = np.eye(3)*eps
strain = Khachaturyan(elastic_tensor=self.get_isotropic_tensor(),
misfit_strain=misfit)
sph = self.get_sphere_voxels(256)
E = strain.strain_energy_voxels(sph)
E_eshelby = self.eshelby_strain_energy_sphere(eps)
self.assertAlmostEqual(E, E_eshelby, places=3)
def test_sphere_pure_python(self):
eps = 0.05
misfit = np.eye(3)*eps
strain = Khachaturyan(elastic_tensor=self.get_isotropic_tensor(),
misfit_strain=misfit)
sph = self.get_sphere_voxels(32)
E = strain.strain_energy_voxels(sph)
E_eshelby = self.eshelby_strain_energy_sphere(eps)
self.assertAlmostEqual(E, E_eshelby, places=3)
def test_plate_voxels(self):
eps = 0.05
misfit = np.eye(3)*eps
strain = Khachaturyan(elastic_tensor=self.get_isotropic_tensor(),
misfit_strain=misfit)
plate = self.get_plate_voxels(256)
E = strain.strain_energy_voxels(plate)
E_eshelby = self.eshelby_strain_energy_plate(eps)
self.assertAlmostEqual(E, E_eshelby, places=3)
def test_needle_voxels(self):
eps = 0.05
misfit = np.eye(3)*eps
strain = Khachaturyan(elastic_tensor=self.get_isotropic_tensor(),
misfit_strain=misfit)
needle = self.get_needle_voxels(256)
E = strain.strain_energy_voxels(needle)
E_eshelby = self.eshelby_strain_energy_needle(eps)
self.assertAlmostEqual(E, E_eshelby, places=3)
def test_effective_stress(self):
eps = 0.05
misfit = np.eye(3)*eps
misfit[0, 1] = 0.01
misfit[0, 2] = -0.2
misfit[1, 2] = 0.1
misfit = 0.5*(misfit + misfit.T)
elastic = to_full_rank4(self.get_isotropic_tensor())
stress = self.eff_stress(elastic, misfit)
khac = PyKhachaturyan(3, elastic, misfit)
stress_cpp = khac.effective_stress()
self.assertTrue(np.allclose(stress, stress_cpp))
def test_contract_tensors(self):
t1 = [[0.1, 0.2, 0.1],
[0.1, 5.0, -0.2],
[0.1, -0.2, -2.0]]
t2 = [[-0.11, 2.0, 3.0],
[2.0, 4.0, 0.2],
[3.0, 0.2, -1.0]]
cpp_contract = pytest_contract_tensors(t1, t2)
pycontract = np.einsum("ij,ij", t1, t2)
self.assertAlmostEqual(cpp_contract, pycontract)
def test_B_tensor_element(self):
gf = [[0.5, 0.2, 0.1],
[0.2, -0.2, 0.3],
[0.1, 0.3, 1.0]]
t1 = [[0.1, 0.2, 0.1],
[0.1, 5.0, -0.2],
[0.1, -0.2, -2.0]]
t2 = [[-0.11, 2.0, 3.0],
[2.0, 4.0, 0.2],
[3.0, 0.2, -1.0]]
direction = np.array([0.3, 0.4, -0.1])
direction /= np.sqrt(direction.dot(direction))
cpp_element = pytest_B_tensor_element(direction, gf, t1, t2)
py_elem = np.einsum("i,ij,jk,kl,l", direction, t1, gf, t2, direction)
self.assertAlmostEqual(cpp_element, py_elem)
def test_functional_derivative_one_field(self):
elastic = to_full_rank4(self.get_isotropic_tensor())
misfit = np.eye(3)
misfit[0, 0] = 0.05
misfit[1, 1] = -0.02
misfit[2, 2] = 0.0
init_field = np.zeros((128, 128))
init_field[:15, :15] = 0.8
try:
result = pytest_functional_derivative(elastic, misfit, init_field.ravel())
except RuntimeError as exc:
# If fails, make sure that it is for the right reason
self.assertTrue("The package was compiled without FFTW!" in str(exc))
return
func_deriv = result["func_deriv"]
# Test 1 make sure that all entries outside 15x15 is zero
self.assertTrue(np.allclose(init_field[15:, 15:], 0.0))
init_field /= 0.8
# Make sure field was passed correctly
self.assertTrue(np.allclose(init_field, result["shape_squared_in"]))
ft = np.fft.fft2(init_field**2)
freq = np.fft.fftfreq(ft.shape[0])
# Make sure that the real part of FFT match
self.assertTrue(np.allclose(np.real(ft), result["ft_shape_real"]))
V = 15*15
stress = self.eff_stress(elastic, misfit)
# Anlytical calculation
for indx in product(range(ft.shape[0]), repeat=2):
kvec = np.array([freq[indx[0]], freq[indx[1]], 0.0])
k = np.sqrt(kvec.dot(kvec))
if k < 1E-6:
continue
unit_vec = kvec/k
G = self.isotropic_green_function(unit_vec)
B = np.einsum('i,ij,jk,kl,l', unit_vec, stress, G, stress, unit_vec)
ft[indx] *= B
# Set the origin to the average value of the neighbours
ft[0, 0] = 0.25*(ft[0, 1] + ft[1, 0] + ft[-1, 0] + ft[0, -1])
self.assertTrue(np.allclose(np.real(ft), result["b_tensor_dot_ft_squared"]))
ift_full = np.fft.ifft2(ft)
self.assertTrue(np.allclose(np.imag(ift_full), 0.0))
ift = np.real(ift_full)
misfit_contrib = np.einsum('ijkl,ij,kl', elastic, misfit, misfit)*init_field**2
self.assertTrue(np.allclose(misfit_contrib, result["misfit_energy_contrib"]))
expect = 2*init_field*(misfit_contrib - ift)
self.assertTrue(np.allclose(func_deriv, expect))
def test_strain_field(self):
misfit = np.zeros((3, 3))
misfit[0, 0] = 0.05
khach = Khachaturyan(elastic_tensor=self.get_isotropic_tensor(),
misfit_strain=misfit)
shape = np.zeros((128, 128))
shape[:, :20] = 1.0
strain = khach.strain_field(shape)
# Compare with exact solution
self.assertTrue(np.allclose(strain[(0, 0)], 0.0))
self.assertTrue(np.allclose(strain[(2, 2)], 0.0))
self.assertTrue(np.allclose(strain[(0, 1)], 0.0))
self.assertTrue(np.allclose(strain[(0, 2)], 0.0))
self.assertTrue(np.allclose(strain[(1, 2)], 0.0))
# TODO: Confirm that strain[(1, 1)] also satisfies
# the solution. It seems to have the right structure
# at least...
def test_strain_energy(self):
misfit = np.zeros((3, 3))
misfit[0, 0] = misfit[1, 1] = misfit[2, 2] = 0.05
elastic = to_full_rank4(self.get_isotropic_tensor())
try:
result = pytest_strain_energy_sphere(elastic, misfit)
except RuntimeError as exc:
# If fails, make sure that it is for the right reason
self.assertTrue("The package was compiled without FFTW!" in str(exc))
return
# Expected misfit contribution
vol = result["volume"]
expected_misfit = 0.5*np.einsum("ijkl,ij,kl", elastic, misfit, misfit)
self.assertAlmostEqual(expected_misfit, result["misfit_contrib"]/vol)
eshelby_energy = self.eshelby_strain_energy_sphere(misfit[0, 0])
self.assertAlmostEqual(eshelby_energy, result["energy"], places=2)
if __name__ == "__main__":
unittest.main()
|
from django.contrib import admin
from nano.badge.models import Badge
class BadgeAdmin(admin.ModelAdmin):
list_display = ('level', 'name', 'description')
list_filter = ('level',)
ordering = ('level', 'name',)
admin.site.register(Badge, BadgeAdmin)
|
import pandas as pd
#from datetime import datetime
import numpy as np
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import matplotlib.pyplot as plt
from scipy import interpolate
# some processing stuff
df = pd.read_excel(
"data/cruise-arrivals.xlsx",
dtype= {"YEAR":int, "ARRIVED DATE": str, "TIME": str, "HOURS": str})
df = df[df["Year"] == 2018]
df["ARRIVAL"] = pd.to_datetime(df["ARRIVED DATE"] + " " + df["TIME"] )
df["DEPATURE"] = df["ARRIVAL"] + pd.to_timedelta(df['HOURS'],"h")
# create function to interpolate based on relative time length stay of ship
demand = np.array([12, 12, 10, 6, 4, 4, 4, 4, 6, 10, 12, 12])
timerel = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])/11
f = interpolate.interp1d(timerel, demand)
# create demand profiles for every arrived ships based on its duration
stays = {}
for _, row in df.iterrows():
df_ = pd.DataFrame(index= pd.date_range(row["ARRIVAL"], row["DEPATURE"], freq="h"))
df_["timerel"] = [i/len(df_) for i in range(1, len(df_)+1)]
df_["demand"] = df_.apply(lambda x: f(x["timerel"]), axis=1)
stays[_] = df_
# concat, resample and aggregated values,
demand_agg = pd.concat(stays.values(), axis=0).sort_index()
demand_agg_sum = demand_agg.resample("H").sum()
demand_agg_sum.sum()
# fix missing indices and fillna with 0
demand_agg_sum = demand_agg_sum.reindex(pd.date_range(start="2018", periods=8760, freq="H")).fillna(0)["demand"]
cruise_profile = demand_agg_sum / demand_agg_sum.sum()
cruise_profile.to_csv("data/cruise_ship_profile.csv")
# plot to see how it looks :-)
ax = demand_agg_sum.plot()
ax.set_ylabel("Aggregated Cruise Ship Demand in MW")
plt.savefig("visualization/figures/input-cruise-ship-demand.pdf")
# to see the structure of the arrivals and depatures
df["ARRIVAL TIME"] = pd.to_timedelta(df["TIME"]) / pd.offsets.Hour(1) #.astype("float")
df["HOURS OF STAY"] = pd.to_timedelta(df["HOURS"]) / pd.offsets.Hour(1) #.astype("float")
df.plot.scatter(x="ARRIVAL TIME", y="HOURS OF STAY")
plt.savefig("visualization/figures/input-arrival-and-stay.pdf")
load = pd.read_excel("scenarios/REF.xls", sheet_name="load", index_col=0)
df = pd.read_excel("scenarios/REF.xls", sheet_name="profiles", index_col=0, parse_dates=True)
profiles= df.iloc[:, 0:3]
amount = load["amount"].values
abs_profiles = profiles.multiply(amount)
abs_profiles["BB-Aggregated"] = abs_profiles.sum(axis=1)
ax = abs_profiles.iloc[4:168+4].plot(grid=True, color=["orange", "green", "skyblue", "darkred"])
#ax.set_ylim(0, 400)
ax.set_ylabel("Demand in MWh")
ax.set_xlabel("Hour")
handles, labels = ax.get_legend_handles_labels()
lgd = {k: v for k, v in dict(zip(handles, labels)).items()}
ax.set_ylabel("Demand in MW")
ax.grid(linestyle="--", lw=0.2)
lgd = ax.legend(
list(lgd.keys()),
["el-demand", "evcc-demand", "cruise-demand", "aggregated-demand"],
loc="lower left",
bbox_to_anchor=(0.1, -0.40),
ncol=2,
borderaxespad=0,
frameon=False,
)
inset = inset_axes(ax,
width="30%", # width = 30% of parent_bbox
height=1, # height : 1 inch
loc=1)
abs_profiles.iloc[:,2].plot(ax=inset, color="skyblue")
inset.set_title("Cruise Ships", backgroundcolor='w')
inset.set_ylabel("Demand in MW.", backgroundcolor='w')
inset.set_xlabel("Hour of year", backgroundcolor='w')
inset.set_xticklabels([""], backgroundcolor='w')
plt.savefig(
"visualization/figures/load-profiles-input.pdf",
#bbox_extra_artists=(lgd,),
bbox_inches="tight",
)
|
from django.contrib import admin
from .models import WikiArticleModel,WikiPermModel
# Register your models here.
# Wiki
@admin.register(WikiArticleModel)
class CustomGroupAdmin(admin.ModelAdmin):
search_fields = ('title','level')
@admin.register(WikiPermModel)
class CustomGroupAdmin(admin.ModelAdmin):
pass |
import unittest
import transaction
import random
from pyramid import testing
from pyramid.response import Response
import pytest
from mood_bot.models import (
User,
Sentiments,
get_tm_session,
)
from mood_bot.models.meta import Base
from faker import Faker
from passlib.apps import custom_app_context as context
@pytest.fixture(scope="session")
def configuration(request):
"""Set up a Configurator instance.
This Configurator instance sets up a pointer to the location of the
database.
It also includes the models from your app's model package.
Finally it tears everything down, including the in-memory SQLite database.
This configuration will persist for the entire duration of your PyTest run.
"""
config = testing.setUp(settings={
'sqlalchemy.url': 'postgres://localhost:5432/test_moodybot'
})
config.include("mood_bot.models")
config.include("mood_bot.routes")
config.include("mood_bot.security")
def teardown():
testing.tearDown()
request.addfinalizer(teardown)
return config
@pytest.fixture
def db_session(configuration, request):
"""Create a session for interacting with the test database.
This uses the dbsession_factory on the configurator instance to create a
new database session. It binds that session to the available engine
and returns a new session for every call of the dummy_request object.
"""
SessionFactory = configuration.registry["dbsession_factory"]
session = SessionFactory()
engine = session.bind
Base.metadata.create_all(engine)
FAKE_USER = [
{'username': 'kurtykurt', 'password': context.hash('kurtkurt')},
{'username': 'caseyisawesome', 'password': context.hash('casey')},
{'username': 'ajshorty', 'password': context.hash('shorty')},
{'username': 'annabanana', 'password': context.hash('banana')}
]
fake_data = Faker()
FAKE_DATA = [
{'body': fake_data.text(),
'negative_sentiment': fake_data.random.random(),
'positive_sentiment': fake_data.random.random(),
'user_id': random.randint(1, 3)}
for i in range(20)
]
faker_user = []
for fake in FAKE_USER:
even_newer_result = User(
username=fake['username'],
password=fake['password'],
)
faker_user.append(even_newer_result)
session.add_all(faker_user)
faker_models = []
for fake in FAKE_DATA:
newer_results = Sentiments(
body=fake['body'],
negative_sentiment=fake['negative_sentiment'],
positive_sentiment=fake['positive_sentiment'],
user_id=fake['user_id']
)
faker_models.append(newer_results)
session.add_all(faker_models)
def teardown():
session.transaction.rollback()
Base.metadata.drop_all(engine)
request.addfinalizer(teardown)
return session
@pytest.fixture
def post_request(dummy_request):
dummy_request.method = "POST"
return dummy_request
@pytest.fixture
def dummy_request(db_session):
"""Dummy request fixture."""
return testing.DummyRequest(dbsession=db_session)
def test_home_view_returns_response():
"""Home view returns a Response object."""
from mood_bot.views.default import home_view
request = testing.DummyRequest()
response = home_view(request)
assert isinstance(response, dict)
def test_login_view_returns_response():
"""Login view returns a Response object."""
from mood_bot.views.default import login
request = testing.DummyRequest()
response = login(request)
assert isinstance(response, dict)
def test_login_error(dummy_request):
"""Test error for login."""
from mood_bot.views.default import login
dummy_request.method = "POST"
data_dict = {'username': 'thisismylogin', 'password': 'notmypassword'}
dummy_request.POST = data_dict
response = login(dummy_request)
assert response == {'error': 'Invalid username or password.'}
# def test_login_redirects_to_home_view(post_request):
# """Test that login redirects to the home page after login."""
# from mood_bot.views.default import login
# from pyramid.httpexceptions import HTTPFound
# data_dict = {'username': 'kurtykurt', 'password': 'kurtkurt'}
# post_request.POST = data_dict
# response = login(post_request)
# assert response.status_code == 302
# assert isinstance(response, HTTPFound)
def test_about_view_returns_response():
"""About view returns a Response object."""
from mood_bot.views.default import about_view
request = testing.DummyRequest()
response = about_view(request)
assert isinstance(response, dict)
def test_register_view_returns_response():
"""Register view returns a Response object."""
from mood_bot.views.default import register
request = testing.DummyRequest()
response = register(request)
assert isinstance(response, dict)
# def test_register_user_for_login(dummy_request):
# """Test that checks for user login."""
# from mood_bot.views.default import register
# from pyramid.httpexceptions import HTTPFound
# data_dict = {'username': 'kurtykurt', 'password': 'kurtkurt', 'password-check': 'kurtkurt'}
# dummy_request.POST = data_dict
# response = register(dummy_request)
# assert response.status_code == 302
# assert isinstance(response, HTTPFound)
def test_register_error(dummy_request):
"""Test that login error raises for invalid registration."""
from mood_bot.views.default import register
data_dict = {'username': '', 'password': '', 'password-check': ''}
dummy_request.POST = data_dict
response = register(dummy_request)
assert response == {'error': 'Please provide a username and password.'}
def test_register_error_for_non_matching_password(dummy_request):
"""Test that login error raises for not matching password."""
from mood_bot.views.default import register
data_dict = {'username': 'kurtykurt', 'password': 'kurtkurt', 'password-check': 'kurt'}
dummy_request.POST = data_dict
response = register(dummy_request)
assert response == {'error': 'Passwords do not match.'}
def test_twitter_main_response_is_response():
"""Test that the main function in twitter returns response."""
from mood_bot.scripts.twitter import main
query = 'Dinosaur'
response = main(query)
assert response == response
def test_twitter_main_tweets_is_response():
"""Test the main function does the thing."""
from mood_bot.scripts.twitter import main
query = 'nhuntwalker'
response = main(query)
tweets = []
tweets.extend(response)
assert response == tweets
def test_twitter_view_does_the_thing():
"""About twitter returns a Response object."""
from mood_bot.views.default import twitter_view
request = testing.DummyRequest()
response = twitter_view(request)
assert isinstance(response, dict)
def test_twitter_view_post_request(post_request):
"""Test that twitter post request returns results."""
tweets = 'Dinosaur'
post_request.POST = tweets
response = post_request.POST
assert response == tweets
|
# Copyright 2018 Davide Spadini
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import platform
import sys
import psutil
from pydriller import RepositoryMining
from datetime import datetime
logging.basicConfig(level=logging.WARNING)
skip_remote = True
def test_memory(caplog):
if skip_remote:
return
caplog.set_level(logging.WARNING)
logging.warning("Starting with nothing...")
diff_with_nothing, all_commits_with_nothing = mine(0)
logging.warning("Starting with everything...")
diff_with_everything, all_commits_with_everything = mine(1)
logging.warning("Starting with metrics...")
diff_with_metrics, all_commits_with_metrics = mine(2)
max_values = [max(all_commits_with_nothing),
max(all_commits_with_everything),
max(all_commits_with_metrics)]
logging.warning("Max values are: {}".format(max_values))
minutes_with_everything = (diff_with_everything.seconds % 3600) // 60
minutes_with_metrics = (diff_with_metrics.seconds % 3600) // 60
logging.warning("TIME: With nothing: {}:{}:{} ({} commits/sec), "
"with everything: {}:{}:{} ({} commits/sec), "
"with metrics: {}:{}:{} ({} commits/sec)".format(
diff_with_nothing.seconds // 3600,
(diff_with_nothing.seconds % 3600) // 60,
diff_with_nothing.seconds % 60,
973 // diff_with_nothing.seconds if diff_with_nothing.seconds != 0 else 0,
diff_with_everything.seconds // 3600,
(diff_with_everything.seconds % 3600) // 60,
diff_with_everything.seconds % 60,
973 // diff_with_everything.seconds,
diff_with_metrics.seconds // 3600,
(diff_with_metrics.seconds % 3600) // 60,
diff_with_metrics.seconds % 60,
973 // diff_with_metrics.seconds
))
if any(val > 250 for val in max_values) or \
minutes_with_everything >= 1 or \
minutes_with_metrics >= 7:
# if to analyze 1000 commits requires more than 250MB of RAM,
# more than 1 minute without metrics or
# 7 minutes with metrics, print it
log(diff_with_nothing, all_commits_with_nothing,
diff_with_everything, all_commits_with_everything,
diff_with_metrics, all_commits_with_metrics)
assert 973 == len(all_commits_with_nothing) == len(all_commits_with_everything) == len(all_commits_with_metrics)
def log(diff_with_nothing, all_commits_with_nothing,
diff_with_everything, all_commits_with_everything,
diff_with_metrics, all_commits_with_metrics):
text = "*PYTHON V{}.{} - System: {}*\n" \
"*Max memory (MB)*\n" \
"With nothing: {}, with everything: {}, with metrics: {}\n" \
"*Min memory (MB)*\n" \
"With nothing: {}, with everything: {}, with metrics: {} \n" \
"*Time*\n" \
"With nothing: {}:{}:{}, with everything: {}:{}:{}, with metrics: {}:{}:{} \n" \
"*Total number of commits*: {}\n" \
"*Commits per second:*\n" \
"With nothing: {}, with everything: {}, with metrics: {}"
print(text.format(
sys.version_info[0], sys.version_info[1], platform.system(),
max(all_commits_with_nothing), max(all_commits_with_everything), max(all_commits_with_metrics),
min(all_commits_with_nothing), min(all_commits_with_everything), min(all_commits_with_metrics),
diff_with_nothing.seconds // 3600, (diff_with_nothing.seconds % 3600) // 60, diff_with_nothing.seconds % 60,
diff_with_everything.seconds // 3600, (diff_with_everything.seconds % 3600) // 60,
diff_with_everything.seconds % 60,
diff_with_metrics.seconds // 3600, (diff_with_metrics.seconds % 3600) // 60, diff_with_metrics.seconds % 60,
len(all_commits_with_nothing),
len(all_commits_with_nothing) / diff_with_nothing.seconds,
len(all_commits_with_everything) / diff_with_everything.seconds,
len(all_commits_with_metrics) / diff_with_metrics.seconds
))
def mine(_type):
p = psutil.Process(os.getpid())
dt1 = datetime(2017, 1, 1)
dt2 = datetime(2017, 7, 1)
all_commits = []
start = datetime.now()
for commit in RepositoryMining('test-repos-hadoop/hadoop',
since=dt1,
to=dt2).traverse_commits():
memory = p.memory_info()[0] / (2 ** 20)
all_commits.append(memory)
h = commit.author.name
if _type == 0:
continue
for mod in commit.modifications:
dd = mod.diff
if _type == 1:
continue
if mod.filename.endswith('.java'):
cc = mod.complexity
end = datetime.now()
diff = end - start
return diff, all_commits
|
#!/usr/bin/env python
"""Tests for `deep_translator` package."""
import pytest
from deep_translator import PonsTranslator, exceptions
@pytest.fixture
def pons():
return PonsTranslator(source="english", target="french")
def test_content(pons):
"""Sample pytest test function with the pytest fixture as an argument."""
# from bs4 import BeautifulSoup
# assert 'GitHub' in BeautifulSoup(response.content).title.string
assert pons.translate(word="good") is not None
def test_inputs():
with pytest.raises(exceptions.InvalidSourceOrTargetLanguage):
PonsTranslator(source="", target="")
with pytest.raises(exceptions.InvalidSourceOrTargetLanguage):
PonsTranslator(source="auto", target="")
with pytest.raises(exceptions.InvalidSourceOrTargetLanguage):
PonsTranslator(source="", target="en")
l1 = PonsTranslator("en", "fr")
l2 = PonsTranslator("english", "french")
assert l1._source == l2._source
assert l1._target == l2._target
def test_payload(pons):
with pytest.raises(exceptions.NotValidPayload):
pons.translate(123)
with pytest.raises(exceptions.NotValidPayload):
pons.translate({})
with pytest.raises(exceptions.NotValidPayload):
pons.translate([])
with pytest.raises(exceptions.NotValidLength):
pons.translate("a" * 51)
|
"""
Copyright 2021, Institute e-Austria, Timisoara, Romania
http://www.ieat.ro/
Developers:
* Gabriel Iuhasz, [email protected]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from edelogger import logger
import csv
import os
import io
from io import StringIO
from datetime import datetime
import time
import sys
import pandas as pd
import numpy as np
import glob
from util import csvheaders2colNames # TODO Check ARFF compatibility
from sklearn.feature_extraction import DictVectorizer
from sklearn.preprocessing import StandardScaler, OrdinalEncoder
# from sklearn.externals import joblib # if dump fails
import joblib
import importlib
from functools import reduce
import tqdm
# import weka.core.jvm as jvm
import warnings
# warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=FutureWarning)
class DataFormatter:
def __init__(self, dataDir):
self.dataDir = dataDir
self.fmHead = 0
self.scaler_mod = 'sklearn.preprocessing'
def getJson(self):
return 'load Json'
def getGT(self, data, gt='target'):
if gt is None:
logger.warning('[{}] : [WARN] Ground truth column not defined, fetching last column as target'.format(
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')))
features = data.columns[:-1]
X = data[features]
y = data.iloc[:, -1].values
else:
logger.info('[{}] : [INFO] Ground truth column set to {} '.format(
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), gt))
y = data[gt].values
X = data.drop([gt], axis=1)
return X, y
def computeOnColumns(self, df,
operations,
remove_filtered=True):
if operations:
if 'STD' in list(operations.keys()):
std = operations['STD']
else:
std = None
if 'Mean' in list(operations.keys()):
mean = operations['Mean']
else:
mean = None
if 'Median' in list(operations.keys()):
median = operations['Median']
else:
median = None
all_processed_columns = []
if std or std is not None:
for cl_std in std:
for ncol_n, fcol_n in cl_std.items():
df_std = self.filterColumns(df, lColumns=fcol_n)
logger.info('[{}] : [INFO] Computing standard deviation {} on columns {}'.format(
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), ncol_n, fcol_n))
std_df = df_std.std(axis=1, skipna=True)
df[ncol_n] = std_df
for c in fcol_n:
all_processed_columns.append(c)
if mean or mean is not None:
for cl_mean in mean:
for ncol_n, fcol_n in cl_mean.items():
df_mean = self.filterColumns(df, lColumns=fcol_n)
logger.info('[{}] : [INFO] Computing mean {} on columns {}'.format(
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), ncol_n, fcol_n))
mean_df = df_mean.mean(axis=1, skipna=True)
df[ncol_n] = mean_df
for c in fcol_n:
all_processed_columns.append(c)
if median or median is not None:
for cl_median in median:
for ncol_n, fcol_n in cl_median.items():
df_median = self.filterColumns(df, lColumns=fcol_n)
logger.info('[{}] : [INFO] Computing median {} on columns {}'.format(
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), ncol_n, fcol_n))
median_df = df_median.median(axis=1, skipna=True)
df[ncol_n] = median_df
for c in fcol_n:
all_processed_columns.append(c)
if "Method" in list(operations.keys()):
df = self.__operationMethod(operations['Method'], data=df)
if remove_filtered:
unique_all_processed_columns = list(set(all_processed_columns))
logger.warning('[{}] : [WARN] Droping columns used for computation ...'.format(
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), unique_all_processed_columns))
self.dropColumns(df, unique_all_processed_columns, cp=False)
else:
logger.info('[{}] : [INFO] No data operations/augmentations defined'.format(
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')))
logger.info('[{}] : [INFO] Augmented data shape {}'.format(
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), df.shape))
return df
def filterColumns(self, df, lColumns):
'''
:param df: -> dataframe
:param lColumns: -> column names
:return: -> filtered df
'''
if not isinstance(lColumns, list):
logger.error('[%s] : [ERROR] Dataformatter filter method expects list of column names not %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(lColumns))
sys.exit(1)
if not lColumns in df.columns.values: # todo checK FutureWarning: elementwise comparison failed; returning scalar instead, but in the future will perform elementwise comparison
# print(lColumns)
result = any(elem in lColumns for elem in df.columns.values) # todo check why all doesn't work
if not result:
logger.error('[%s] : [ERROR] Dataformatter filter method unknown columns %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), lColumns)
# print(len(df.columns.values))
# for e in df.columns.values:
# print("{},".format(e))
sys.exit(1)
return df[lColumns]
def filterWildcard(self, df, wild_card, keep=False):
"""
:param df: dataframe to filer
:param wild_card: str wildcard of columns to be filtered
:param keep: if keep True, only cols with wildcard are kept, if False they will be deleted
:return: filtered dataframe
"""
filtr_list = []
mask = df.columns.str.contains(wild_card)
filtr_list.extend(list(df.loc[:, mask].columns.values))
logger.info('[%s] : [INFO] Columns to be filtered based on wildcard: %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), filtr_list)
if keep:
df_wild = df[filtr_list]
else:
df_wild = df.drop(filtr_list, axis=1)
logger.info('[%s] : [INFO] Filtered shape: %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), df_wild.shape)
# print("Columns of filtered data:")
# print(df_concat_filtered.columns)
return df_wild
def filterRows(self, df, ld, gd=0):
'''
:param df: -> dataframe
:param ld: -> less then key based timeframe in utc
:param gd: -> greter then key based timeframe in utc
:return: -> new filtered dataframe
'''
if gd:
try:
df = df[df.time > gd]
return df[df.time < ld]
except Exception as inst:
logger.error('[%s] : [ERROR] Dataformatter filter method row exited with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args)
sys.exit(1)
else:
try:
return df[df.time < ld]
except Exception as inst:
logger.error('[%s] : [ERROR] Dataformatter filter method row exited with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args)
sys.exit(1)
def dropColumns(self, df, lColumns, cp=True):
'''
Inplace true means the selected df will be modified
:param df: dataframe
:param lColumns: filtere clolumns
:param cp: create new df
'''
if cp:
try:
return df.drop(lColumns, axis=1)
except Exception as inst:
logger.error('[%s] : [ERROR] Dataformatter filter method drop columns exited with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args)
sys.exit(1)
else:
try:
df.drop(lColumns, axis=1, inplace=True)
except Exception as inst:
logger.error('[%s] : [ERROR] Dataformatter filter method drop columns exited with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args)
sys.exit(1)
return 0
def filterLowVariance(self, df):
logger.info('[{}] : [INFO] Checking low variance columns ...'.format(
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')))
uniques = df.apply(lambda x: x.nunique())
rm_columns = []
for uindex, uvalue in uniques.iteritems():
if uvalue == 1:
rm_columns.append(uindex)
logger.info('[{}] : [INFO] Found {} low variance columns removing ...'.format(
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), len(rm_columns)))
logger.debug('[{}] : [INFO] Found {} low variance columns: {}'.format(
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), len(rm_columns), rm_columns))
df.drop(rm_columns, inplace=True, axis=1)
def fillMissing(self, df):
logger.info('[{}] : [INFO] Filling in missing values with 0'.format(
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')))
df.fillna(0, inplace=True)
def dropMissing(self, df):
logger.info('[{}] : [INFO] Dropping columns with in missing values'.format(
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')))
df.dropna(axis=1, how='all', inplace=True)
def merge(self, csvOne, csvTwo, merged):
'''
:param csvOne: first csv to load
:param csvTwo: second csv to load
:param merged: merged file name
:return:
'''
fone = pd.read_csv(csvOne)
ftwo = pd.read_csv(csvTwo)
mergedCsv = fone.merge(ftwo, on='key')
mergedCsv.to_csv(merged, index=False)
logger.info('[%s] : [INFO] Merged %s and %s into %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'),
str(csvOne), str(csvTwo), str(merged))
def merge2(self, csvOne, csvTwo, merged):
'''
Second version
:param csvOne: first csv to load
:param csvTwo: second csv to load
:param merged: merged file name
:return:
'''
fone = pd.read_csv(csvOne)
ftwo = pd.read_csv(csvTwo)
mergedCsv = pd.concat([fone, ftwo], axis=1, keys='key')
mergedCsv.to_csv(merged, index=False)
def mergeall(self, datadir, merged):
'''
:param datadir: -> datadir lication
:param merged: -> name of merged file
:return:
'''
all_files = glob.glob(os.path.join(datadir, "*.csv"))
df_from_each_file = (pd.read_csv(f) for f in all_files)
concatDF = pd.concat(df_from_each_file, ignore_index=True)
concatDF.to_csv(merged)
def chainMerge(self, lFiles, colNames, iterStart=1):
'''
:param lFiles: -> list of files to be opened
:param colNames: -> dict with master column names
:param iterStart: -> start of iteration default is 1
:return: -> merged dataframe
'''
#Parsing colNames
slaveCol = {}
for k, v in colNames.items():
slaveCol[k] = '_'.join([v.split('_')[0], 'slave'])
dfList = []
if all(isinstance(x, str) for x in lFiles):
for f in lFiles:
df = pd.read_csv(f)
dfList.append(df)
elif all(isinstance(x, pd.DataFrame) for x in lFiles):
dfList = lFiles
else:
logger.error('[%s] : [ERROR] Cannot merge type %s ',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(type(dfList[0])))
sys.exit(1)
# Get first df and set as master
current = dfList[0].rename(columns=colNames)
for i, frame in enumerate(dfList[1:], iterStart):
iterSlave = {}
for k, v in slaveCol.items():
iterSlave[k] = v+str(i)
current = current.merge(frame).rename(columns=iterSlave)
#current.to_csv(mergedFile)
# current.set_index('key', inplace=True)
return current
def chainMergeNR(self, interface=None, memory=None, load=None, packets=None):
'''
:return: -> merged dataframe System metrics
'''
if interface is None and memory is None and load is None and packets is None:
interface = os.path.join(self.dataDir, "Interface.csv")
memory = os.path.join(self.dataDir, "Memory.csv")
load = os.path.join(self.dataDir, "Load.csv")
packets = os.path.join(self.dataDir, "Packets.csv")
lFiles = [interface, memory, load, packets]
return self.listMerge(lFiles)
def chainMergeDFS(self, dfs=None, dfsfs=None, fsop=None):
'''
:return: -> merged dfs metrics
'''
if dfs is None and dfsfs is None and fsop is None:
dfs = os.path.join(self.dataDir, "DFS.csv")
dfsfs = os.path.join(self.dataDir, "DFSFS.csv")
fsop = os.path.join(self.dataDir, "FSOP.csv")
lFiles = [dfs, dfsfs, fsop]
return self.listMerge(lFiles)
def chainMergeCluster(self, clusterMetrics=None, queue=None, jvmRM=None):
'''
:return: -> merged cluster metrics
'''
if clusterMetrics is None and queue is None and jvmRM is None:
clusterMetrics = os.path.join(self.dataDir, "ClusterMetrics.csv")
queue = os.path.join(self.dataDir, "ResourceManagerQueue.csv")
jvmRM = os.path.join(self.dataDir, "JVM_RM.csv")
# jvmmrapp = os.path.join(self.dataDir, "JVM_MRAPP.csv")
lFiles = [clusterMetrics, queue, jvmRM]
return self.listMerge(lFiles)
def chainMergeNM(self, lNM=None, lNMJvm=None, lShuffle=None):
'''
:return: -> merged namemanager metrics
'''
# Read files
if lNM is None and lNMJvm is None and lShuffle is None:
allNM = glob.glob(os.path.join(self.dataDir, "NM_*.csv"))
allNMJvm = glob.glob(os.path.join(self.dataDir, "JVM_NM_*.csv"))
allShuffle = glob.glob(os.path.join(self.dataDir, "Shuffle_*.csv"))
else:
allNM =lNM
allNMJvm = lNMJvm
allShuffle = lShuffle
# Get column headers and gen dict with new col headers
colNamesNM = csvheaders2colNames(allNM[0], 'slave1')
df_NM = self.chainMerge(allNM, colNamesNM, iterStart=2)
colNamesJVMNM = csvheaders2colNames(allNMJvm[0], 'slave1')
df_NM_JVM = self.chainMerge(allNMJvm, colNamesJVMNM, iterStart=2)
colNamesShuffle = csvheaders2colNames(allShuffle[0], 'slave1')
df_Shuffle = self.chainMerge(allShuffle, colNamesShuffle, iterStart=2)
return df_NM, df_NM_JVM, df_Shuffle
def chainMergeDN(self, lDN=None):
'''
:return: -> merged datanode metrics
'''
# Read files
if lDN is None:
allDN = glob.glob(os.path.join(self.dataDir, "DN_*.csv"))
else:
allDN = lDN
# Get column headers and gen dict with new col headers
colNamesDN = csvheaders2colNames(allDN[0], 'slave1')
df_DN = self.chainMerge(allDN, colNamesDN, iterStart=2)
return df_DN
def chainMergeCassandra(self, lcassandra):
'''
:param lcassandra: -> list of cassandra dataframes
:return: -> merged Cassandra metrics
'''
# Read files
# Get column headers and gen dict with new col headers
colNamesCa = csvheaders2colNames(lcassandra[0], 'node1')
df_CA = self.chainMerge(lcassandra, colNamesCa, iterStart=2)
return df_CA
def chainMergeMongoDB(self, lmongo):
'''
:param lmongo: -> list of mongodb dataframes
:return: -> merged mongodb metrics
'''
# Read files
# Get column headers and gen dict with new col headers
colNamesMD = csvheaders2colNames(lmongo[0], 'node1')
df_MD = self.chainMerge(lmongo, colNamesMD, iterStart=2)
return df_MD
def listMerge(self, lFiles):
'''
:param lFiles: -> list of files
:return: merged dataframe
:note: Only use if dataframes have divergent headers
'''
dfList = []
if all(isinstance(x, str) for x in lFiles):
for f in lFiles:
if not f:
logger.warning('[%s] : [WARN] Found empty string instead of abs path ...',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
try:
df = pd.read_csv(f)
except Exception as inst:
logger.error('[%s] : [ERROR] Cannot load file at %s exiting',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), f)
sys.exit(1)
dfList.append(df)
elif all(isinstance(x, pd.DataFrame) for x in lFiles):
dfList = lFiles
else:
incomp = []
for el in lFiles:
if not isinstance(el, pd.DataFrame):
incomp.append(type(el))
logger.error('[%s] : [ERROR] Incompatible type detected for merging, cannot merge type %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(incomp))
# for d in dfList:
# if d.empty:
# logger.warning('[%s] : [INFO] Detected empty dataframe in final merge, removing ...',
# datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
#
# dfList.pop(dfList.index(d))
try:
current = reduce(lambda x, y: pd.merge(x, y, on='key'), dfList)
except Exception as inst:
logger.error('[%s] : [ERROR] Merge dataframes exception %s with args %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args)
logger.error('[%s] : [ERROR] Merge dataframes exception df list %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), dfList)
sys.exit(1)
# current.set_index('key', inplace=True)
return current
def df2csv(self, dataFrame, mergedFile):
'''
:param dataFrame: dataframe to save as csv
:param mergedFile: merged csv file name
:return:
'''
# dataFrame.set_index('key', inplace=True) -> if inplace it modifies all copies of df including
# in memory resident ones
if dataFrame.empty:
logger.error('[%s] : [ERROR] Received empty dataframe for %s ',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), mergedFile)
print("Received empty dataframe for %s " % mergedFile)
sys.exit(1)
if dataFrame.index.name == 'key':
kDF = dataFrame
else:
try:
kDF = dataFrame.set_index('key')
except Exception as inst:
logger.error('[%s] : [ERROR] Cannot write dataframe exception %s with arguments %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args)
print(dataFrame.index.name)
sys.exit(1)
kDF.to_csv(mergedFile)
def chainMergeSystem(self, linterface=None, lload=None, lmemory=None, lpack=None):
logger.info('[%s] : [INFO] Startig system metrics merge .......',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
# Read files
if linterface is None and lload is None and lmemory is None and lpack is None:
allIterface = glob.glob(os.path.join(self.dataDir, "Interface_*.csv"))
allLoad = glob.glob(os.path.join(self.dataDir, "Load_*.csv"))
allMemory = glob.glob(os.path.join(self.dataDir, "Memory_*.csv"))
allPackets = glob.glob(os.path.join(self.dataDir, "Packets_*.csv"))
# Name of merged files
mergedInterface = os.path.join(self.dataDir, "Interface.csv")
mergedLoad = os.path.join(self.dataDir, "Load.csv")
mergedMemory = os.path.join(self.dataDir, "Memory.csv")
mergedPacket = os.path.join(self.dataDir, "Packets.csv")
ftd = 1
else:
allIterface = linterface
allLoad = lload
allMemory = lmemory
allPackets = lpack
ftd = 0
colNamesInterface = {'rx': 'rx_master', 'tx': 'tx_master'}
df_interface = self.chainMerge(allIterface, colNamesInterface)
logger.info('[%s] : [INFO] Interface metrics merge complete',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
colNamesPacket = {'rx': 'rx_master', 'tx': 'tx_master'}
df_packet = self.chainMerge(allPackets, colNamesPacket)
logger.info('[%s] : [INFO] Packet metrics merge complete',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
colNamesLoad = {'shortterm': 'shortterm_master', 'midterm': 'midterm_master', 'longterm': 'longterm_master'}
df_load = self.chainMerge(allLoad, colNamesLoad)
logger.info('[%s] : [INFO] Load metrics merge complete',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
colNamesMemory = {'cached': 'cached_master', 'buffered': 'buffered_master',
'used': 'used_master', 'free': 'free_master'}
df_memory = self.chainMerge(allMemory, colNamesMemory)
logger.info('[%s] : [INFO] Memory metrics merge complete',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
logger.info('[%s] : [INFO] Sistem metrics merge complete',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
if ftd:
self.df2csv(df_interface, mergedInterface)
self.df2csv(df_packet, mergedPacket)
self.df2csv(df_load, mergedLoad)
self.df2csv(df_memory, mergedMemory)
return 0
else:
return df_interface, df_load, df_memory, df_packet
def mergeFinal(self, dfs=None, cluster=None, nodeMng=None, jvmnodeMng=None, dataNode=None, jvmNameNode=None, shuffle=None, system=None):
if dfs is None and cluster is None and nodeMng is None and jvmnodeMng is None and dataNode is None and jvmNameNode is None and system is None and shuffle is None:
dfs = os.path.join(self.dataDir, "DFS_Merged.csv")
cluster = os.path.join(self.dataDir, "Cluster_Merged.csv")
nodeMng = os.path.join(self.dataDir, "NM_Merged.csv")
jvmnodeMng = os.path.join(self.dataDir, "JVM_NM_Merged.csv")
dataNode = os.path.join(self.dataDir, "NM_Shuffle.csv")
system = os.path.join(self.dataDir, "System.csv")
jvmNameNode = os.path.join(self.dataDir, "JVM_NN.csv")
shuffle = os.path.join(self.dataDir, "Merged_Shuffle.csv")
lFile = [dfs, cluster, nodeMng, jvmnodeMng, dataNode, jvmNameNode, shuffle, system]
merged_df = self.listMerge(lFile)
merged_df.sort_index(axis=1, inplace=True)
# merged_df.set_index('key', inplace=True)
#self.dropMissing(merged_df)
self.fillMissing(merged_df)
self.fmHead = list(merged_df.columns.values)
return merged_df
def dict2csv(self, response, query, filename, df=False):
'''
:param response: elasticsearch response
:param query: elasticserch query
:param filename: name of file
:param df: if set to true method returns dataframe and doesn't save to file.
:return: 0 if saved to file and dataframe if not
'''
requiredMetrics = []
logger.info('[%s] : [INFO] Started response to csv conversion',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
# print "This is the query _------------_-> %s" %query
# print "This is the response _------------_-> %s" %response
for key, value in response['aggregations'].items():
for k, v in value.items():
for r in v:
dictMetrics = {}
# print "This is the dictionary ---------> %s " % str(r)
for rKey, rValue in r.items():
if rKey == 'doc_count' or rKey == 'key_as_string':
pass
elif rKey == 'key':
logger.debug('[%s] : [DEBUG] Request has keys %s and values %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), rKey, rValue)
# print "%s -> %s"% (rKey, rValue)
dictMetrics['key'] = rValue
elif list(query['aggs'].values())[0].values()[1].values()[0].values()[0].values()[0] == 'type_instance.raw' \
or list(query['aggs'].values())[0].values()[1].values()[0].values()[0].values()[0] == 'type_instance':
logger.debug('[%s] : [DEBUG] Detected Memory type aggregation', datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
# print "This is rValue ________________> %s" % str(rValue)
# print "Keys of rValue ________________> %s" % str(rValue.keys())
try:
for val in rValue['buckets']:
dictMetrics[val['key']] = val['1']['value']
except Exception as inst:
logger.error('[%s] : [ERROR] Failed to find key with %s and %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), rKey, rValue['value'])
sys.exit(1)
else:
# print "Values -> %s" % rValue
# print "rKey -> %s" % rKey
# print "This is the rValue ___________> %s " % str(rValue)
logger.debug('[%s] : [DEBUG] Request has keys %s and flattened values %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), rKey, rValue['value'])
dictMetrics[rKey] = rValue['value']
requiredMetrics.append(dictMetrics)
# print "Required Metrics -> %s" % requiredMetrics
csvOut = os.path.join(self.dataDir, filename)
cheaders = []
if list(query['aggs'].values())[0].values()[1].values()[0].values()[0].values()[0] == "type_instance.raw" or \
list(query['aggs'].values())[0].values()[1].values()[0].values()[0].values()[0] == 'type_instance':
logger.debug('[%s] : [DEBUG] Detected Memory type query', datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
try:
cheaders = list(requiredMetrics[0].keys())
except IndexError:
logger.error('[%s] : [ERROR] Empty response detected from DMon, stoping detection, check DMon.', datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
print("Empty response detected from DMon, stoping detection, check DMon")
sys.exit(1)
else:
kvImp = {}
for qKey, qValue in query['aggs'].items():
logger.info('[%s] : [INFO] Value aggs from query %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), qValue['aggs'])
for v, t in qValue['aggs'].items():
kvImp[v] = t['avg']['field']
cheaders.append(v)
cheaders.append('key')
for key, value in kvImp.items():
cheaders[cheaders.index(key)] = value
for e in requiredMetrics:
for krep, vrep in kvImp.items():
e[vrep] = e.pop(krep)
logger.info('[%s] : [INFO] Dict translator %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(kvImp))
logger.info('[%s] : [INFO] Headers detected %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(cheaders))
if not df:
try:
with open(csvOut, 'wb') as csvfile:
w = csv.DictWriter(csvfile, cheaders)
w.writeheader()
for metrics in requiredMetrics:
if set(cheaders) != set(metrics.keys()):
logger.error('[%s] : [ERROR] Headers different from required metrics: headers -> %s, metrics ->%s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(cheaders),
str(list(metrics.keys())))
diff = list(set(metrics.keys()) - set(cheaders))
print("Headers different from required metrics with %s " % diff)
print("Check qInterval setting for all metrics. Try increasing it!")
sys.exit(1)
w.writerow(metrics)
csvfile.close()
except EnvironmentError:
logger.error('[%s] : [ERROR] File %s could not be created', datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), csvOut)
sys.exit(1)
logger.info('[%s] : [INFO] Finished csv %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), filename)
return 0
else:
df = pd.DataFrame(requiredMetrics)
# df.set_index('key', inplace=True)
logger.info('[%s] : [INFO] Created dataframe',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
return df
def prtoDF(self, data,
checkpoint=False,
verbose=False,
index=None,
detect=False):
"""
From PR backend to dataframe
:param data: PR response JSON
:return: dataframe
"""
if not data:
logger.error('[{}] : [ERROR] PR query response is empty, exiting.'.format(
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')))
sys.exit(2)
df = pd.DataFrame()
df_time = pd.DataFrame()
if verbose:
dr = tqdm.tqdm(data['data']['result'])
else:
dr = data['data']['result']
for el in dr:
metric_name = el['metric']['__name__']
instance_name = el['metric']['instance']
new_metric = "{}_{}".format(metric_name, instance_name)
values = el['values']
proc_val = []
proc_time = []
for val in values:
proc_val.append(val[1])
proc_time.append(val[0])
df[new_metric] = proc_val
time_new_metric = "time_{}".format(new_metric)
df_time[time_new_metric] = proc_time
# Calculate the meant time for all metrics
df_time['mean'] = df_time.mean(axis=1)
# Round to np.ceil all metrics
df_time['mean'] = df_time['mean'].apply(np.ceil)
# Add the meant time to rest of metrics
df['time'] = df_time['mean']
logger.info('[{}] : [INFO] PR query resulted in dataframe of size: {}'.format(
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), df.shape))
if index is not None:
df.set_index(index, inplace=True)
logger.warning('[{}] : [WARN] PR query dataframe index set to {}'.format(
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), index))
if checkpoint:
if detect:
pr = "pr_data_detect.csv"
else:
pr = "pr_data.csv"
pr_csv_loc = os.path.join(self.dataDir, pr)
df.to_csv(pr_csv_loc, index=True)
logger.info('[{}] : [INFO] PR query dataframe persisted to {}'.format(
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), self.dataDir))
return df
def df2dict(self, df):
kdf = df.set_index('key')
return kdf.to_dict()
# def dict2arff(self, fileIn, fileOut):
# '''
# :param fileIn: name of csv file
# :param fileOut: name of new arff file
# :return:
# '''
# dataIn = os.path.join(self.dataDir, fileIn)
# dataOut = os.path.join(self.dataDir, fileOut)
# logger.info('[%s] : [INFO] Starting conversion of %s to %s', datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), dataIn, dataOut)
# try:
# jvm.start()
# convertCsvtoArff(dataIn, dataOut)
# except Exception as inst:
# pass
# finally:
# logger.error('[%s] : [ERROR] Exception occured while converting to arff with %s and %s', datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args)
# jvm.stop()
# logger.info('[%s] : [INFO] Finished conversion of %s to %s', datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), dataIn, dataOut)
def normalize(self, dataFrame):
'''
:param dataFrame: dataframe to be normalized
:return: normalized data frame
'''
dataFrame_norm = (dataFrame -dataFrame.mean())/(dataFrame.max()-dataFrame.min())
return dataFrame_norm
def loadData(self, csvList=[]):
'''
:param csvList: list of CSVs
:return: list of data frames
'''
if csvList:
all_files = csvList
else:
all_files = glob.glob(os.path.join(self.dataDir, "*.csv"))
#df_from_each_file = (pd.read_csv(f) for f in all_files)
dfList = []
for f in all_files:
df = pd.read_csv(f)
dfList.append(df)
return dfList
def toDF(self, fileName):
'''
:param fileName: absolute path to file
:return: dataframe
'''
if not os.path.isfile(fileName):
print("File %s does not exist, cannot load data! Exiting ..." % str(fileName))
logger.error('[%s] : [ERROR] File %s does not exist',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(fileName))
sys.exit(1)
df = pd.read_csv(fileName)
return df
def dtoDF(self, dlist):
'''
:param dlist: list of dictionaries
:return: dataframe
'''
df = pd.DataFrame(dlist)
return df
def df2BytesIO(self, df):
out = io.BytesIO()
self.df2csv(df, out)
return out
def df2cStringIO(self, df):
out = StringIO.StringIO()
self.df2csv(df, out)
return out
def ohEncoding(self, data,
cols=None,
replace=True):
if cols is None:
cols = []
for el, v in data.dtypes.items():
if v == 'object':
if el == 'time':
pass
else:
cols.append(el)
logger.info('[%s] : [INFO] Categorical features not set, detected as categorical: %s',
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(cols))
logger.info('[{}] : [INFO] Categorical features now set to {}'.format(
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), str(cols)))
vec = DictVectorizer()
mkdict = lambda row: dict((col, row[col]) for col in cols)
vecData = pd.DataFrame(vec.fit_transform(data[cols].apply(mkdict, axis=1)).toarray())
vecData.columns = vec.get_feature_names()
vecData.index = data.index
if replace is True:
data = data.drop(cols, axis=1)
data = data.join(vecData)
return data, vecData, vec
def scale(self, data,
scaler_type=None,
rindex='time'): # todo, integrate
if not scaler_type:
logger.warning('[{}] : [WARN] No data scaling used!'.format(
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')))
return data
if scaler_type is None:
scaler_type = {"StandardScaler": {"copy": True, "with_mean": True, "with_std": True}}
logger.warning('[{}] : [WARN] No user defined scaler using default'.format(datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), scaler_type))
scaler_name = list(scaler_type.keys())[-1]
scaler_attr = list(scaler_type.values())[-1]
logger.info('[{}] : [INFO] Scaler set to {} with parameters {}.'.format(datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), scaler_name, scaler_attr))
try:
sc_mod = importlib.import_module(self.scaler_mod)
scaler_instance = getattr(sc_mod, scaler_name)
scaler = scaler_instance(**scaler_attr)
except Exception as inst:
logger.error('[{}] : [ERROR] Error while initializing scaler {}'.format(
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), scaler_name))
sys.exit(2)
# Fit and transform data
logger.info('[{}] : [INFO] Scaling data ...'.format(
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')))
scaled_data = scaler.fit_transform(data)
# Transform numpy array into dataframe, re-add columns to scaled numpyarray
df_scaled = pd.DataFrame(scaled_data, columns=data.columns)
df_scaled[rindex] = list(data.index)
df_scaled.set_index(rindex, inplace=True)
scaler_file = '{}.scaler'.format(scaler_name)
logger.info('[{}] : [INFO] Saving scaler instance {} ...'.format(
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), scaler_file))
scale_file_location = os.path.join(self.dataDir, scaler_file)
joblib.dump(scaler, filename=scale_file_location)
return df_scaled
def load_scaler(self, data,
scaler_loc,
rindex='time'):
scaler = joblib.load(scaler_loc)
sdata = scaler.transform(data)
# Transform numpy array into dataframe, re-add columns to scaled numpyarray
df_scaled = pd.DataFrame(sdata, columns=data.columns)
df_scaled[rindex] = list(data.index)
df_scaled.set_index(rindex, inplace=True)
return df_scaled
def __operationMethod(self, method,
data):
try:
logger.info('[{}] : [INFO] Loading user defined operation'.format(
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')))
data_op = method(data)
except Exception as inst:
logger.error('[{}] : [ERROR] Failed to load user operation with {} and {}'.format(
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args))
return data
logger.info('[{}] : [INFO] Finished user operation'.format(
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')))
return data_op
def labelEncoding(self, data_column):
logger.info('[{}] : [INFO] Label encoding ...'.format(
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')))
enc = OrdinalEncoder()
enc.fit(data_column)
enc_data_column = enc.transform(data_column)
return enc_data_column
|
import os
import sys
__version__ = "0.3.2"
from .spectrum import Spectrum
INSTDIR = os.path.join(sys.prefix, 'starfish_inst_data')
__all__ = [
"constants",
"emulator",
"grid_tools",
"models",
"samplers",
"spectrum",
"Spectrum",
"transforms",
"utils",
]
|
import numpy as np
import pandas as pd
from scipy.special import expit
from scipy.special import logit
from scipy.stats import lognorm
from itertools import product
import sys
from operator import mul
from functools import reduce
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import Matern
# Functional form last
# Stationarity first
# Colinearity middle
# Sampling middle
# Measurment occaisons
def random_offset(value):
if value<=0.0001:
return(value+np.random.uniform(low=0.0005, high=0.05))
elif value>=0.9999:
return(value-np.random.uniform(low=0.0005, high=0.05))
else:
return(value)
def piecewise_sample(stationarity_change_points,is_high,index,max_index):
high=stationarity_change_points[max_index]
low=0
if is_high:
high=stationarity_change_points[index]
if index>0:
low=stationarity_change_points[index-1]
else:
low=stationarity_change_points[index]
if index<max_index:
high=stationarity_change_points[index+1]
return(np.random.uniform(low=low, high=high))
def normalize_adjustment(feature_values):
mod_vals=np.ceil(feature_values)
vals=(mod_vals==1)
vals2=(mod_vals==2)*1.1
vals3=(mod_vals==3)*1.25
vals4=(mod_vals>3)*1.5
return(vals+vals2+vals3+vals4)
def get_sample_times(bucket,measurment_occaisons,feature_values,abnormal_ratio,measure_max,sampling_function=None):
if bucket == "equal":
return(np.linspace(0,((0.9999-abnormal_ratio)+abnormal_ratio*(measurment_occaisons/measure_max)),measurment_occaisons))
elif bucket == "random":
return(np.random.uniform(low=0.0, high=((1-abnormal_ratio)+abnormal_ratio*(measurment_occaisons/measure_max)), size=measurment_occaisons))
elif bucket == "not-random":
abnormal_x={}
either=[]
first=True
for feature in feature_values:
ab_x=np.abs(feature_values[feature])>=1
if first:
first=False
either=ab_x
else:
either=either+ab_x
neither=np.logical_not(either)
precent_ab=np.sum(either)/measurment_occaisons
step_size=(((1-abnormal_ratio)+abnormal_ratio*precent_ab*(measurment_occaisons/measure_max))/(measurment_occaisons-1))*np.ones(measurment_occaisons)
for feature in feature_values:
abnormal_x[feature]=(step_size/normalize_adjustment(np.abs(feature_values[feature])))*either
abnormal_x[feature]=np.insert(abnormal_x[feature],0,0)[0:-1]
neither=np.insert(neither,0,False)[0:-1]
return(np.cumsum(step_size*neither+np.minimum.reduce([abnormal_x[x] for x in abnormal_x])))
elif bucket == "custom-no-features":
return(sampling_function(measurment_occaisons,abnormal_ratio,measure_max))
elif bucket == "custom-feature-values":
return(sampling_function(measurment_occaisons,feature_values,abnormal_ratio,measure_max))
def split_between_cutpoints(times,entity_to_split,stationarity_change_points):
stationarity_index=0
i=0
model_count=0
return_dict={}
items_to_sort=len(times)
periods=len(stationarity_change_points)
while i < items_to_sort:
s_time=1
if stationarity_index < periods:
s_time=stationarity_change_points[stationarity_index]
if times[i] < s_time:
if model_count not in return_dict:
return_dict[model_count]=[entity_to_split[i]]
else:
return_dict[model_count].append(entity_to_split[i])
i+=1
else:
stationarity_index+=1
model_count+=1
return(return_dict)
def get_colinearity(bucket,count_vector):
if bucket == "low-low":
return((0.1,round(np.random.uniform(low=0.005, high=0.04),3),np.random.uniform(0.01,0.33)))
elif bucket == "low-moderate":
return((0.1,round(np.random.uniform(low=0.05, high=0.12),2),np.random.uniform(0.01,0.33)))
elif bucket == "low-high":
return((0.1,round(np.random.uniform(low=0.13, high=0.3),2),np.random.uniform(0.01,0.33)))
elif bucket == "moderate-high":
return((round(np.random.uniform(low=0.75, high=0.9),2),1.5,np.random.uniform(0.33,0.66)))
elif bucket == "high-high":
return((round(np.random.uniform(low=1.0, high=2.0),2),1.5,np.random.uniform(0.66,0.999)))
def get_relative_time(sorted_time_points):
offset_time=np.insert(sorted_time_points,0,sorted_time_points[0])
offset_time=np.delete(offset_time,-1)
return(sorted_time_points-offset_time)
def get_stationarity_change_points(stationarity_count):
return(np.random.uniform(low=0.0, high=1.0, size=stationarity_count))
def binary_y(arr_y,probability_threshold):
if probability_threshold==None:
draw=np.random.uniform(0,1,arr_y.size)
return((draw<=arr_y).astype(int))
else:
return((probability_threshold<=arr_y).astype(int))
def data_sorter(b_value,quantile_cutoff,quantile_sub,b_var):
if b_value>quantile_cutoff:
return(quantile_sub+np.random.uniform(-1*np.sqrt(b_var)/10,np.sqrt(b_var)/10))
else:
return(np.abs(b_value))
class patient_model():
def __init__(self,period_index,b_values,coefficient_values,link_fn,obs_error,times,first_obs_index,relative_time,period_features,period_extraneous_variables,stationarity_trend_bucket,probability_threshold):
self.b_values={}
self.coefficient_values={}
for b in b_values:
self.b_values[b]=b_values[b]
for coefficient in coefficient_values:
self.coefficient_values[coefficient]=coefficient_values[coefficient][period_index]
self.time_points=times
self.num_obs=len(times)
self.link_fn=link_fn
self.obs_index=np.array(range(first_obs_index,first_obs_index+self.num_obs))
self.features=period_features
self.extraneous_variables=period_extraneous_variables
self.obs_error=obs_error
self.relative_time=relative_time
self.stationarity_trend_bucket=stationarity_trend_bucket
self.probability_threshold=probability_threshold
def generate_data(self):
y=np.ones(self.num_obs)*self.coefficient_values["intercept"]
y+=np.multiply(np.ones(self.num_obs)*self.coefficient_values["time"],self.time_points)
if self.stationarity_trend_bucket=="quadratic":
y+=np.multiply(np.ones(self.num_obs)*self.coefficient_values["trend-time"],np.sqrt(self.time_points))
elif self.stationarity_trend_bucket=="seasonal":
y+=np.multiply(np.ones(self.num_obs)*self.coefficient_values["trend-time"],np.cos(np.multiply(np.pi*10,self.time_points)))
for feature in self.features:
y+=self.coefficient_values[feature]*np.array(self.features[feature])
for effect in self.b_values:
if effect=="intercept":
y+=np.ones(self.num_obs)*self.b_values[effect]
elif effect=="time":
y+=np.multiply(np.ones(self.num_obs)*self.b_values[effect],self.time_points)
elif effect=="trend-time":
if self.stationarity_trend_bucket=="quadratic":
y+=np.multiply(np.ones(self.num_obs)*self.b_values[effect],np.sqrt(self.time_points))
elif self.stationarity_trend_bucket=="seasonal":
y+=np.multiply(np.ones(self.num_obs)*self.b_values[effect],np.cos(np.multiply(np.pi*10,self.time_points)))
else:
y+=self.b_values[eff]*self.features[effect]
if self.link_fn=="identity":
self.y=y+self.obs_error
elif self.link_fn=="log":
self.y=np.exp(y)+self.obs_error
elif self.link_fn=="logit":
self.y_prob=np.maximum(expit(y),self.obs_error)
self.y=binary_y(self.y_prob,self.probability_threshold)
elif self.link_fn=="inverse":
self.y=np.power(y,-1)+self.obs_error
def export_data_frame(self):
if self.link_fn=="logit":
df={"obs_index":list(self.obs_index),"y":list(self.y),"y_prob":list(self.y_prob),"time":list(self.time_points),"relative_time":list(self.relative_time), "unobserved_error":list(self.obs_error)}
for feature in self.features:
df[feature]=self.features[feature]
for variable in self.extraneous_variables:
df[variable]=self.extraneous_variables[variable]
return(df)
else:
df={"obs_index":list(self.obs_index),"y":list(self.y),"time":list(self.time_points),"relative_time":list(self.relative_time), "unobserved_error":list(self.obs_error)}
for feature in self.features:
df[feature]=self.features[feature]
for variable in self.extraneous_variables:
df[variable]=self.extraneous_variables[variable]
return(df)
class patient:
def __init__(self,pat_id,b_values,features,coefficient_values,extraneous_variables,colinearity,stationarity_change_points
,measurements,sampling_bucket,link_fn,sigma_e,stationarity_trend_bucket,sampling_function,probability_threshold
,abnormal_ratio,measure_max,random_effects_links):
self.b_values=b_values
self.id=pat_id
self.coefficient_values=coefficient_values
self.models=[]
self.sigma_e=sigma_e
self.stationarity_trend_bucket=stationarity_trend_bucket
if measurements<1:
self.measure_count=1
else:
self.measure_count=int(measurements)
obs_error=[]
if link_fn=="identity":
obs_error=np.random.normal(0,self.sigma_e,self.measure_count)
elif link_fn=="log":
obs_error=np.random.poisson(self.sigma_e,self.measure_count)
elif link_fn=="logit":
new_sigma_e=(1.0-np.sqrt(1.0-4.0*self.sigma_e**(3)/self.measure_count))/2.0
obs_error=np.random.binomial(self.measure_count,new_sigma_e,self.measure_count)
elif link_fn=="inverse":
obs_error=np.random.gamma(1,np.sqrt(self.sigma_e)/self.measure_count,self.measure_count)
periods=len(stationarity_change_points)+1
feature_values={}
extraneous_variable_values={}
b_factor=0
if "features" in random_effects_links:
if len(self.b_values) > 0:
if "intercept" in self.b_values:
b_factor=self.b_values["intercept"]
elif "time" in self.b_values:
b_factor=self.b_values["time"]
elif "trend-time" in self.b_values:
b_factor=self.b_values["trend-time"]
else:
for b in b_values:
b_factor=self.b_values[b]
break
if (sampling_bucket=="not-random") | (sampling_bucket=="custom-feature-values"):
total_length=len(features)+len(extraneous_variables)
x_cov_matrix=np.ones((total_length,total_length))*colinearity[2]
np.fill_diagonal(x_cov_matrix,1)
x=np.random.multivariate_normal(tuple(np.ones(total_length)*b_factor), x_cov_matrix, self.measure_count)
index=0
for feature in features:
feature_values[feature]=x[:,index]
index+=1
index=0
for variable in extraneous_variables:
extraneous_variable_values[variable]=x[:,index]
index+=1
time_points=get_sample_times(sampling_bucket,self.measure_count,feature_values,abnormal_ratio,measure_max,sampling_function)
time_points=np.sort(time_points)
if (sampling_bucket!="not-random") & (sampling_bucket!="custom-feature-values"):
kernel=(1.0+np.abs(b_factor)) * Matern(length_scale=colinearity[0], length_scale_bounds=(1e-5, 1e5), nu=colinearity[1])
gp = GaussianProcessRegressor(kernel=kernel)
y_samples = gp.sample_y(time_points[:, np.newaxis],len(features))
index=0
for feature in features:
feature_values[feature]=y_samples.T[index]
index+=1
index=0
for variable in extraneous_variables:
extraneous_variable_values[variable]=y_samples.T[index]
index+=1
relative_time=get_relative_time(time_points)
sorted_times=split_between_cutpoints(time_points,time_points,stationarity_change_points)
relative_time=split_between_cutpoints(time_points,relative_time,stationarity_change_points)
for feature in feature_values:
feature_values[feature]=split_between_cutpoints(time_points,feature_values[feature],stationarity_change_points)
for variable in extraneous_variable_values:
extraneous_variable_values[variable]=split_between_cutpoints(time_points,extraneous_variable_values[variable],stationarity_change_points)
obs_error=split_between_cutpoints(time_points,obs_error,stationarity_change_points)
period_index=0
obs_index=1
for key in sorted_times:
period_features={}
period_extraneous_variables={}
for feature in feature_values:
period_features[feature]=feature_values[feature][key]
for variable in extraneous_variable_values:
period_extraneous_variables[variable]=extraneous_variable_values[variable][key]
self.models.append(patient_model(period_index,self.b_values,self.coefficient_values,link_fn,np.array(obs_error[key]),sorted_times[key],obs_index,relative_time[key],period_features,period_extraneous_variables,stationarity_trend_bucket,probability_threshold))
period_index+=1
obs_index+=len(sorted_times[key])
def export_to_data_frame(self):
first=True
return_frame={}
for model in self.models:
model.generate_data()
if first:
return_frame=model.export_data_frame()
first=False
else:
new_data=model.export_data_frame()
for key in return_frame:
return_frame[key].extend(new_data[key])
if self.measure_count>0:
return_frame['pat_id']=list(np.ones(len(return_frame["time"]))*self.id)
return(return_frame)
class long_data_set:
def __init__(self,n=2000,measurement_distribution="log-normal",measurement_parameters={"loc":25,"scale":5},collinearity_bucket="low-low",trend_bucket="linear",sampling_bucket="random"
,sampling_function=None,b_colin=0.13,beta_var=1,b_var=1,time_importance_factor=3,sigma_e=0.05,num_features=2,num_extraneous_variables=0,link_fn="identity",num_piecewise_breaks=0
,random_effects=["intercept","time","trend-time"],coefficient_values={},time_breaks=[],probability_threshold=None,random_effects_links=["timespan","features","measurements"]
,percentile_sort_cutoff=1,percentile_sub=1):
self.num_of_patients=n
self.measurement_distribution=measurement_distribution
self.measurement_parameters=measurement_parameters
self.colinearity_bucket=collinearity_bucket
self.stationarity_trend_bucket=trend_bucket
self.num_piecewise_breaks=num_piecewise_breaks
self.sampling_bucket=sampling_bucket
self.link_fn=link_fn
self.inflation_factor=time_importance_factor
self.beta_var=beta_var
self.b_var=b_var
self.b_colin=b_colin
self.sigma_e=sigma_e
self.sampling_function=sampling_function
self.percentile_sort_cutoff=percentile_sort_cutoff
self.percentile_sub=percentile_sub
self.random_effects_links=random_effects_links
###############################
self.features=[]
self.probability_threshold=probability_threshold
for i in range(num_features):
self.features.append("x"+str(i+1))
self.extraneous_variables=[]
for i in range(num_extraneous_variables):
self.extraneous_variables.append("ext_"+str(i+1))
self.random_effects=random_effects
self.coefficient_values=coefficient_values
self.time_breaks=time_breaks
###############################
def create_data_set(self):
if len(self.time_breaks)>0:
if len(self.time_breaks)!=self.num_piecewise_breaks:
raise ValueError('Number of specific time_breaks do not match num_piecewise_breaks')
else:
self.change_points=np.sort(self.time_breaks)
else:
self.change_points=np.sort(get_stationarity_change_points(self.num_piecewise_breaks))
measures=[]
if self.measurement_distribution=="equal":
measures=np.ones(self.num_of_patients)*self.measurement_parameters['loc']
elif self.measurement_distribution=="poisson":
measures=np.random.poisson(lam=self.measurement_parameters['loc'], size=self.num_of_patients)
measures=np.sort(measures)
elif self.measurement_distribution=="normal":
measures=np.random.normal(loc=self.measurement_parameters['loc'], scale=self.measurement_parameters['scale'], size=self.num_of_patients)
measures=np.sort(np.round(measures,0))
elif self.measurement_distribution=="log-normal":
measures=lognorm.rvs(0.75,loc=self.measurement_parameters['loc'], scale=self.measurement_parameters['scale'], size=self.num_of_patients, random_state=None)
measures=np.sort(np.round(measures,0))
elif self.measurement_distribution=="gamma":
measures=np.random.gamma(shape=self.measurement_parameters['loc'], scale=self.measurement_parameters['scale'], size=self.num_of_patients)
measures=np.sort(np.round(measures,0))
ro_x=get_colinearity(self.colinearity_bucket,self.num_of_patients)
b_cov_matrix=np.zeros((len(self.random_effects),len(self.random_effects)))
b_cov_matrix.fill(np.sqrt(self.b_var)*self.b_colin)
np.fill_diagonal(b_cov_matrix,self.b_var)
b=np.random.multivariate_normal(tuple(np.zeros(len(self.random_effects))), b_cov_matrix, self.num_of_patients)
self.b_dict={}
b_index=0
for effect in self.random_effects:
self.b_dict[effect]=b[:,b_index]
b_index+=1
if "measurements" in self.random_effects_links:
b_df=pd.DataFrame(self.b_dict)
if len(self.random_effects) > 0:
if "intercept" in self.b_dict:
cut_off=np.quantile(b_df["intercept"],self.percentile_sort_cutoff)
sub_value=np.quantile(b_df["intercept"].abs(),self.percentile_sub)
b_df['sort_col']=b_df["intercept"].apply(lambda v: data_sorter(v,cut_off,sub_value,self.b_var))
elif "time" in self.b_dict:
cut_off=np.quantile(b_df["time"],self.percentile_sort_cutoff)
sub_value=np.quantile(b_df["time"].abs(),self.percentile_sub)
b_df['sort_col']=b_df["time"].apply(lambda v: data_sorter(v,cut_off,sub_value,self.b_var))
elif "trend-time" in self.b_dict:
cut_off=np.quantile(b_df["trend-time"],self.percentile_sort_cutoff)
sub_value=np.quantile(b_df["trend-time"].abs(),self.percentile_sub)
b_df['sort_col']=b_df["trend-time"].apply(lambda v: data_sorter(v,cut_off,sub_value,self.b_var))
else:
sort_col=b_df.columns.values[0]
cut_off=np.quantile(sort_col,self.percentile_sort_cutoff)
b_df['sort_col']=b_df[sort_col]
sub_value=np.quantile(b_df['sort_col'].abs(),self.percentile_sub)
b_df['sort_col']=b_df["sort_col"].apply(lambda v: data_sorter(v,cut_off,sub_value,self.b_var))
b_df=b_df.sort_values(by=['sort_col'])
b_df=b_df.reset_index(drop=True)
for effect in self.random_effects:
self.b_dict[effect]=b_df[effect].values
long_data=[]
first=True
periods=len(self.change_points)+1
if len(self.coefficient_values)==0:
self.coefficient_values={"intercept":np.zeros(periods),"time":np.zeros(periods),"trend-time":np.zeros(periods)}
for feature in self.features:
self.coefficient_values[feature]=np.zeros(periods)
for i in range(periods):
for feature in self.coefficient_values:
if feature=="time":
self.coefficient_values[feature][i]=np.random.normal(self.coefficient_values[feature][i-1],self.inflation_factor*self.beta_var)
elif feature=="trend-time":
self.coefficient_values[feature][i]=np.random.normal(self.coefficient_values[feature][i-1],self.inflation_factor*self.beta_var)
else:
self.coefficient_values[feature][i]=np.random.normal(self.coefficient_values[feature][i-1],self.beta_var)
abnormal_ratio=0
if "timespan" in self.random_effects_links:
for i in range(19):
ratio=1.0/float(i+2)
min_step=(1.0-ratio)/(np.min(measures)-1)
ab_step=0.91/(np.max(measures)-1)
if min_step > (ab_step):
if ratio > abnormal_ratio:
abnormal_ratio=ratio
ratio=1.0-(1.0/float(i+2))
ab_step=0.91/(np.max(measures)-1)
if min_step > ab_step:
if ratio > abnormal_ratio:
abnormal_ratio=ratio
self.abnormal_ratio=abnormal_ratio
for p_id in range(self.num_of_patients):
b_values={}
for b in self.b_dict:
b_values[b]=self.b_dict[b][p_id]
pat=patient(p_id,b_values,self.features,self.coefficient_values,self.extraneous_variables,ro_x,self.change_points,measures[p_id],self.sampling_bucket,self.link_fn,self.sigma_e,self.stationarity_trend_bucket,self.sampling_function,self.probability_threshold,abnormal_ratio,np.max(measures),self.random_effects_links)
if first:
first=False
long_data=pat.export_to_data_frame()
else:
new_data=pat.export_to_data_frame()
for key in long_data:
long_data[key].extend(new_data[key])
self.data_frame=pd.DataFrame(long_data)
def export_to_csv(self,path_name,file_name):
self.data_frame.to_csv(path_name+"data_"+file_name+".csv")
param_values={"piecewise_shifts":np.append(self.change_points,[1]),"cons_":self.coefficient_values["intercept"],"time_linear":self.coefficient_values["time"]}
if self.stationarity_trend_bucket=="quadratic":
param_values["sqrt_time"]=self.coefficient_values["trend-time"]
elif self.stationarity_trend_bucket=="seasonal":
param_values["cos_time"]=self.coefficient_values["trend-time"]
for feature in self.features:
param_values[feature]=self.coefficient_values[feature]
pd.DataFrame(param_values).to_csv(path_name+"params_"+file_name+".csv")
def transform_variable_feature(self,column_names,transformation_function):
comparison_change_points=[0]+list(self.change_points)+[1]
if "time" in column_names:
self.change_points=transformation_function(self.change_points)
new_y=[]
new_prob_y=[]
new_x=[]
self.data_frame=self.data_frame.sort_values(by=['time'])
if self.link_fn=="logit":
self.data_frame["new_y"]=logit(self.data_frame["y_prob"])
elif self.link_fn=="log":
self.data_frame["new_y"]=np.log(self.data_frame["y"]-self.data_frame["unobserved_error"])
elif self.link_fn=="inverse":
self.data_frame["new_y"]=np.power(self.data_frame["y"]-self.data_frame["unobserved_error"],-1)
else:
self.data_frame["new_y"]=self.data_frame["y"]-self.data_frame["unobserved_error"]
for column in column_names:
for period_index in range(1,len(comparison_change_points)):
period_data_frame=self.data_frame[(self.data_frame['time'] > comparison_change_points[period_index-1]) & (self.data_frame['time'] <= comparison_change_points[period_index])]
period_y=[]
period_x=[]
period_x=transformation_function(period_data_frame[column])
if column in self.features:
period_y=period_data_frame["new_y"]+(self.coefficient_values[column][period_index-1]*(period_x-period_data_frame[column]))
if column in self.b_dict:
period_y=period_data_frame["new_y"]+np.multiply(self.b_dict[column][period_index-1],(period_x-period_data_frame[column]))
if column == "time":
if (self.stationarity_trend_bucket=="quadratic") | (self.stationarity_trend_bucket=="seasonal"):
period_y=period_data_frame["new_y"]+(self.coefficient_values["trend-time"][period_index-1]*(period_x-period_data_frame[column]))
if "trend-time" in self.b_dict:
period_y=period_data_frame["new_y"]+np.multiply(self.b_dict["trend-time"][period_index-1],(period_x-period_data_frame[column]))
if len(period_y)>0:
if period_index==1:
new_x=period_x
new_y=period_y
else:
new_x=np.concatenate([new_x,period_x])
new_y=np.concatenate([new_y,period_y])
self.data_frame["new_"+column]=new_x
self.data_frame["new_y"]=new_y
if self.link_fn=="logit":
self.data_frame["new_y_prob"]=expit(self.data_frame["new_y"])
self.data_frame["new_y"]=binary_y(self.data_frame["new_y_prob"])
elif self.link_fn=="log":
self.data_frame["new_y"]=np.exp(self.data_frame["new_y"])+self.data_frame["unobserved_error"]
elif self.link_fn=="inverse":
self.data_frame["new_y"]=np.power(self.data_frame["new_y"],-1)+period_data_frame["unobserved_error"]
else:
self.data_frame["new_y"]=self.data_frame["new_y"]+self.data_frame["unobserved_error"]
|
import jax.numpy as jnp
from jax import random
import chex
from jsl.experimental.seql.utils import classification_loss, regression_loss
class SequentialDataEnvironment:
def __init__(self, X_train: chex.Array,
y_train: chex.Array,
X_test: chex.Array,
y_test: chex.Array,
train_batch_size: int,
test_batch_size: int,
classification: bool):
ntrain, nfeatures = X_train.shape
ntest, _ = X_test.shape
_, out = y_train.shape
# TODO: It will produce an error if ntrain % train_batch_size != 0
ntrain_batches = ntrain // train_batch_size
# TODO: It will produce an error if ntest % test_batch_size != 0
ntest_batches = ntest // test_batch_size
self.X_train = jnp.reshape(X_train, [ntrain_batches, train_batch_size, nfeatures])
self.y_train = jnp.reshape(y_train, [ntrain_batches, train_batch_size, out])
self.X_test = jnp.reshape(X_test, [ntest_batches, test_batch_size, nfeatures])
self.y_test =jnp.reshape(y_test, [ntest_batches, test_batch_size, out])
if classification:
self.loss_fn = classification_loss
else:
self.loss_fn = regression_loss
def get_data(self, t: int):
return self.X_train[t], self.y_train[t], self.X_test, self.y_test
def reward(self, mu_pred: chex.Array,
sigma_pred: chex.Array,
y_test: chex.Array):
loss = self.loss_fn(y_test, mu_pred, sigma_pred)
return -loss
def shuffle_data(self, key: chex.PRNGKey):
train_key, test_key = random.split(key)
ntrain = len(self.X_train)
train_indices = jnp.arange(ntrain)
train_indices = random.shuffle(train_key, train_indices)
self.X_train = self.X_train[train_indices]
self.y_train = self.y_train[train_indices]
ntest = len(self.X_test)
test_indices = jnp.arange(ntest)
test_indices = random.shuffle(test_key, test_indices)
self.X_test = self.X_test[test_indices]
self.y_test = self.y_test[test_indices]
def reset(self, key: chex.PRNGKey):
ntrain_batches, train_batch_size, nfeatures = self.X_train.shape
ntest_batches, test_batch_size, out = self.y_test.shape
self.shuffle_data(key)
self.X_train = jnp.reshape(self.X_train, [ntrain_batches, train_batch_size, nfeatures])
self.y_train = jnp.reshape(self.y_train, [ntrain_batches, train_batch_size, out])
self.X_test = jnp.reshape(self.X_test, [ntest_batches, test_batch_size, nfeatures])
self.y_test =jnp.reshape(self.y_test, [ntest_batches, test_batch_size, out]) |
import random
from selenium.common.exceptions import NoSuchWindowException
from selenium.webdriver.common.by import By
from . import *
URL = "https://web.vconf.garr.it/webapp/conference"
logger = logging.getLogger(__name__)
warnings.filterwarnings("ignore", category=UserWarning)
def run(room='videodrone', y4m='./y4m', lifetime=360,
headless=1, pin=None, **kwargs):
drone_name = build_drone_name(**kwargs)
url = kwargs.get('url') or URL
browser = get_chrome_browser(y4m=y4m, headless=headless)
browser.get(f'{url}/{room}')
time.sleep(3)
browser.find_element_by_id('display-name-dialog-input').send_keys(drone_name)
time.sleep(1)
browser.find_element_by_id('display-name-dialog-ok').click()
time.sleep(3)
browser.find_element_by_id('dialog-pin-input').send_keys(pin)
time.sleep(1)
browser.find_element_by_id('conference-pin-btn').click()
time.sleep(lifetime)
# leave the room
try:
browser.close()
except NoSuchWindowException as e:
logging.warning('Browser already closed.')
logger.info('Drone say goodbye ... Destroyed.')
|
from setuptools import setup
def readme():
with open('README.md') as f:
return f.read()
setup(
name='gap_loader',
version='0.3',
description='Import hook for GAP files in SAGE math.',
long_description=readme(),
long_description_content_type='text/markdown',
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
],
url='http://github.com/jorants/gap-loader',
author='Joran van Apeldoorn (Control-K)',
author_email='[email protected]',
license='MIT',
packages=['gap_loader'],
install_requires=[
'sage',
],
zip_safe=False)
|
import re
def nice_print(t):
s = ""
for l in t:
s += "".join(l[250:]) + "\n"
print(s)
xmin = 108 - 1
w = 537-108+3
ymin = 3
h = 1913-3+1
clay = [["." for _ in range(w)] for _ in range(h)]
while True:
try:
line = input()
except:
break
# (x1, y1, x2, y2)
a, b, c = map(int,re.match(r".=(\d*), .=(\d*)..(\d*)", line).groups())
if line[0] == "x": #vertical
for i in range(b, c+1):
clay[i-ymin][a-xmin] = "#"
else: #horizontal
for i in range(b, c+1):
clay[a-ymin][i-xmin] = "#"
clay[0][500-xmin] = "o"
alive = [(0, 500-xmin)]
for i in range(46423):
# while len(alive) > 0:
print(len(alive))
if i > 46420:
nice_print(clay)
print(len(alive))
#print(alive)
j, i = alive.pop()
if j + 1 == h:
continue
if clay[j+1][i] == "o" and clay[j][i+1] in "o#" and clay[j][i-1] in "o#":
continue
if clay[j+1][i] in "#o":
trapped = True
#to left and right
for k in [-1, 1]:
if clay[j][i+k] in "o#":
continue
d = 0
while True:
d += k
if clay[j][i+d] in ".o" and clay[j+1][i+d-k] in "#o":
clay[j][i+d] = "o"
elif clay[j+1][i+d] == ".":
alive.append((j, i+d-k))
trapped = False
break
else:
break
if trapped:
clay[j-1][i] = "o"
alive.append((j-1, i))
else:
clay[j+1][i] = "o"
alive.append((j+1, i))
nice_print(clay)
print(sum([x.count("o") for x in clay])) |
"""
Various constants and utility functions for lexing
"""
import re
from usfm_utils.usfm.tokens import Token
UNESCAPED_FLAG_PREFIX = "$"
FLAG_PREFIX = re.escape(UNESCAPED_FLAG_PREFIX)
WHITESPACE = " \r\t\n"
def make_flag(flag, boundary=True):
"""
A regex-compatible USFM flag
:param str flag: flag of USFM marker (i.e. "v" for verse numbers)
:param bool boundary: whether returned regex should assert that the flag doesn't
occur inside a larger word
:rtype: str
"""
assert(isinstance(flag, str) or isinstance(flag, unicode))
bound = r"\b" if boundary else ""
return r"{prefix}{flag}{b}".format(prefix=FLAG_PREFIX, flag=flag, b=bound)
def thunk(regex):
"""
:param str regex: regular expression for
"""
def regex_inner(token):
token.value = Token.Builder(token.value)
# raise AssertionError()
return token
regex_inner.__doc__ = regex
return regex_inner
def standalone(flag):
return thunk(make_flag(flag))
def one_arg_regex(flag):
return r"{flag}\s+[^{prefix}\s]+".format(flag=make_flag(flag), prefix=FLAG_PREFIX)
def one_arg(flag):
def one_arg_inner(token):
token.value = Token.Builder(token.value.split()[1])
return token
one_arg_inner.__doc__ = one_arg_regex(flag)
return one_arg_inner
def open_token_regex(flag):
return r"{flag}[^\*]".format(flag=make_flag(flag))
def open_token(flag):
return thunk(open_token_regex(flag))
def close_token_regex(flag):
return r"{flag}\*".format(flag=make_flag(flag))
def close_token(flag):
return thunk(close_token_regex(flag))
def rest_of_line(flag):
def rest_of_line_inner(token):
line = token.value[:-1] # ignore newline
token.value = Token.Builder(drop_first_word(line))
return token
rest_of_line_inner.__doc__ = r"{flag}(\s[^\n]*)?\n".format(flag=make_flag(flag))
return rest_of_line_inner
def until_next_flag(flag):
def until_next_flag_inner(token):
text = token.value
token.value = Token.Builder(drop_first_word(text))
return token
until_next_flag_inner.__doc__ = r"{flag}\s[^{prefix}]*".format(flag=make_flag(flag), prefix=FLAG_PREFIX)
return until_next_flag_inner
def drop_first_word(line):
"""
:param str line:
:rtype: str
"""
match = re.match(r"[^\s]*[\s]*", line)
if match is None:
msg = "Could not drop first word from {}".format(repr(line))
raise ValueError(msg)
return line[match.end():]
def scale(flag):
def scale_inner(token):
text = token.value
match = re.search(r"[0-9]*$", text)
if match is None:
raise ValueError("Malformatted input: {}".format(token.value))
number_str = match.group()
number = 1 if len(number_str) == 0 else int(number_str)
token.value = Token.Builder(text).set_number(number)
return token
scale_inner.__doc__ = r"{flag}[0-9]*\b".format(flag=make_flag(flag, boundary=False))
return scale_inner
def scale_and_rest_of_line(flag):
def scale_and_rest_of_line_inner(token):
line = token.value[:-1]
rgx = r"{flag}([0-9]+)".format(flag=make_flag(flag, boundary=False))
number_match = re.match(rgx, line)
if number_match is None:
number = 1
else:
number = int(number_match.group(1))
rest = drop_first_word(line)
token.value = Token.Builder(rest).set_number(number)
return token
scale_and_rest_of_line_inner.__doc__ = r"{flag}[0-9]*([ \r\t][^\n]*)?\n"\
.format(flag=make_flag(flag, boundary=False))
return scale_and_rest_of_line_inner
|
#!/usr/bin/env python
# encoding: utf-8
from distutils.core import setup
setup(name='clock_puzzle_solver',
description='Script for solving the "clock puzzle" mini-games present in the Final Fantasy XIII-2 role-playing game.',
author='Thomas Nyman',
author_email='[email protected]',
url='http://github.com/thomasnyman/clock_puzzle_solver',
scripts=['bin/clock_puzzle_solver']
)
|
import sys
import torch
import random
ID,FORM,LEMMA,UPOS,XPOS,FEATS,HEAD,DEPREL,DEPS,MISC=range(10)
def read_conllu(inp,max_sent=0,drop_tokens=True,drop_nulls=True):
comments=[]
sent=[]
yielded=0
for line in inp:
line=line.strip()
if line.startswith("#"):
comments.append(line)
elif not line:
if sent:
yield sent,comments
yielded+=1
if max_sent>0 and yielded==max_sent:
break
sent,comments=[],[]
else:
cols=line.split("\t")
if drop_tokens and "-" in cols[ID]:
continue
if drop_nulls and "." in cols[ID]:
continue
sent.append(cols)
else:
if sent:
yield sent,comments
def get_text(comments):
for c in comments:
if c.startswith("# text = "):
return c[len("# text = "):]
return None
if __name__=="__main__":
import torch
import transformers
transformers.BERT_PRETRAINED_MODEL_ARCHIVE_MAP["bert-base-finnish-cased"]="http://dl.turkunlp.org/finbert/torch-transformers/bert-base-finnish-cased/pytorch_model.bin"
transformers.BERT_PRETRAINED_CONFIG_ARCHIVE_MAP["bert-base-finnish-cased"]="http://dl.turkunlp.org/finbert/torch-transformers/bert-base-finnish-cased/config.json"
transformers.tokenization_bert.PRETRAINED_VOCAB_FILES_MAP["vocab_file"]["bert-base-finnish-cased"]="http://dl.turkunlp.org/finbert/torch-transformers/bert-base-finnish-cased/vocab.txt"
transformers.tokenization_bert.PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES["bert-base-finnish-cased"]=512
transformers.tokenization_bert.PRETRAINED_INIT_CONFIGURATION["bert-base-finnish-cased"]={'do_lower_case': False}
tokenizer = transformers.BertTokenizer.from_pretrained("bert-base-finnish-cased")
#print("done loading stuff")
# sent_examples=sent_examples_from_conllu(sys.stdin)
# for idx,x in enumerate(batch(sent_examples, tokenizer)):
# print(idx,end="\r")
#ones=torch.ones((7,),dtype=torch.long)
#print(sentence_example(ones+3,ones+4,tokenizer))
docs=doc_examples_from_plaintext(sys.stdin)
for doc in docs:
for b in document_batch(doc,tokenizer):
for x in b:
print(x.shape)
|
class _VolumeData(object):
def __init__(self,
size_x, size_y, size_z,
delta_x, delta_y, delta_z):
#byte array for speed.
#if we later speed up array serializer
#or make this an API, we should
#convert to float []
self._data = []
self._size_x = size_x
self._size_y = size_y
self._size_z = size_z
self._delta_x = delta_x
self._delta_y = delta_y
self._delta_z = delta_z
self._origin_x = 0.0
self._origin_y = 0.0
self._origin_z = 0.0 |
from os.path import join, dirname, isfile
from PySide2.QtWidgets import QDialog, QMessageBox, QLayout
from PySide2.QtCore import Qt, Signal
from logging import getLogger
from numpy import pi, array, array_equal
from .....GUI.Dialog.DMatLib.DMatSetup.Gen_DMatSetup import Gen_DMatSetup
from .....Classes.Material import Material
from .....Classes.ImportMatrixVal import ImportMatrixVal
from .....Classes.ImportMatrix import ImportMatrix
from .....Classes.ImportMatrixXls import ImportMatrixXls
from .....Functions.path_tools import rel_file_path
from .....loggers import GUI_LOG_NAME
class DMatSetup(Gen_DMatSetup, QDialog):
# Signal to DMatLib to update material treeview
saveNeededChanged = Signal() # Modified / Saved / Canceled (add/remove *)
materialToDelete = Signal() # Material will be deleted in DMatLib
materialToRename = Signal() # Material name/path has changed => rename in DMatLib
materialToRevert = Signal() # Revert reference from DMatLib
materialToSave = Signal() # Material to save (update reference/file/machine)
def __init__(self, parent=None, material=None):
"""Dialog for edit/show material properties
Parameters
----------
material : Material
material to edit
parent : Widget
Parent Widget (DMatLib)
material : Material
Material object to show/edit
"""
# Build the interface according to the .ui file
QDialog.__init__(self)
self.setupUi(self)
self.is_save_needed = False
self.init_name = None # Initial name of current Material (to revert rename)
self.init_path = None # Initial part of current Material (for rename)
self.mat = None # Current material being edited
# Set initial material
if material is not None:
self.set_material(material=material)
# === setup signals ===
# General
self.le_name.editingFinished.connect(self.set_name)
self.cb_material_type.currentIndexChanged.connect(self.set_is_isotropic)
# Elec
self.lf_rho_elec.editingFinished.connect(self.set_rho_elec)
# Magnetics
self.lf_mur_lin.editingFinished.connect(self.set_mur_lin)
self.lf_Brm20.editingFinished.connect(self.set_Brm20)
self.lf_alpha_Br.editingFinished.connect(self.set_alpha_Br)
self.lf_Wlam.editingFinished.connect(self.set_Wlam)
# Economical
self.lf_cost_unit.editingFinished.connect(self.set_cost_unit)
# Thermics
self.lf_Cp.editingFinished.connect(self.set_Cp)
self.lf_alpha.editingFinished.connect(self.set_alpha)
self.lf_L.editingFinished.connect(self.set_lambda)
self.lf_Lx.editingFinished.connect(self.set_lambda_x)
self.lf_Ly.editingFinished.connect(self.set_lambda_y)
self.lf_Lz.editingFinished.connect(self.set_lambda_z)
# Mechanics
self.lf_rho_meca.editingFinished.connect(self.set_rho_meca)
self.lf_E.editingFinished.connect(self.set_E)
self.lf_Ex.editingFinished.connect(self.set_Ex)
self.lf_Ey.editingFinished.connect(self.set_Ey)
self.lf_Ez.editingFinished.connect(self.set_Ez)
self.lf_G.editingFinished.connect(self.set_G)
self.lf_Gxy.editingFinished.connect(self.set_Gxy)
self.lf_Gxz.editingFinished.connect(self.set_Gxz)
self.lf_Gyz.editingFinished.connect(self.set_Gyz)
self.lf_nu.editingFinished.connect(self.set_nu)
self.lf_nu_xy.editingFinished.connect(self.set_nu_xy)
self.lf_nu_xz.editingFinished.connect(self.set_nu_xz)
self.lf_nu_yz.editingFinished.connect(self.set_nu_yz)
self.tab_values.saveNeeded.connect(self.set_table_values)
self.c_type_material.currentIndexChanged.connect(self.change_type_material)
# Connect buttons
self.b_delete.clicked.connect(lambda: self.materialToDelete.emit())
self.b_save.clicked.connect(lambda: self.materialToSave.emit())
self.b_cancel.clicked.connect(lambda: self.materialToRevert.emit())
def set_save_needed(self, is_save_needed=True):
"""Set if there are unsaved modifications within the object
Parameters
----------
self : DMatSetup
A DMatSetup object
is_save_needed : bool
New value for is_save_needed
"""
old = self.is_save_needed # Keep old values
self.is_save_needed = is_save_needed
self.b_save.setEnabled(is_save_needed)
self.b_cancel.setEnabled(is_save_needed)
if is_save_needed != old:
# Raise signal only if value is different
getLogger(GUI_LOG_NAME).debug("DMatSetup: Sending saveNeededChanged")
self.saveNeededChanged.emit()
def set_material(self, material, is_save_needed=False):
"""Update the current material and setup all the widgets
Parameters
----------
self : DMatSetup
A DMatSetup object
material : Material
The material to edit/show
is_save_needed : bool
True if the material is different from the reference
"""
old_mat = self.mat
self.mat = material
self.init_name = self.mat.name # Keep to revert rename
self.init_path = self.mat.path
getLogger(GUI_LOG_NAME).debug("DMatSetup: Setting material " + self.mat.name)
self.le_name.setText(self.mat.name)
if self.mat.is_isotropic:
self.cb_material_type.setCurrentIndex(1)
else:
self.cb_material_type.setCurrentIndex(0)
# === check material attribute and set values ===
# Elec
if self.mat.elec is None:
self.set_default("elec")
self.lf_rho_elec.setValue(self.mat.elec.rho)
# Economical
if self.mat.eco is None:
self.set_default("eco")
self.lf_cost_unit.setValue(self.mat.eco.cost_unit)
# Thermics
if self.mat.HT is None:
self.set_default("HT")
self.lf_Cp.setValue(self.mat.HT.Cp)
self.lf_alpha.setValue(self.mat.HT.alpha)
self.lf_L.setValue(self.mat.HT.lambda_x)
self.lf_Lx.setValue(self.mat.HT.lambda_x)
self.lf_Ly.setValue(self.mat.HT.lambda_y)
self.lf_Lz.setValue(self.mat.HT.lambda_z)
# Structural
if self.mat.struct is None:
self.set_default("struct")
self.lf_rho_meca.setValue(self.mat.struct.rho)
if self.mat.struct.Ex not in [0, None]:
self.lf_E.setValue(self.mat.struct.Ex / 1e9)
self.lf_Ex.setValue(self.mat.struct.Ex / 1e9)
else:
self.lf_E.setValue(self.mat.struct.Ex)
self.lf_Ex.setValue(self.mat.struct.Ex)
if self.mat.struct.Ey not in [0, None]:
self.lf_Ey.setValue(self.mat.struct.Ey / 1e9)
else:
self.lf_Ey.setValue(self.mat.struct.Ey)
if self.mat.struct.Ez not in [0, None]:
self.lf_Ez.setValue(self.mat.struct.Ez / 1e9)
else:
self.lf_Ez.setValue(self.mat.struct.Ez)
if self.mat.struct.Gxy not in [0, None]:
self.lf_G.setValue(self.mat.struct.Gxy / 1e9)
self.lf_Gxy.setValue(self.mat.struct.Gxy / 1e9)
else:
self.lf_G.setValue(self.mat.struct.Gxy)
self.lf_Gxy.setValue(self.mat.struct.Gxy)
if self.mat.struct.Gxz not in [0, None]:
self.lf_Gxz.setValue(self.mat.struct.Gxz / 1e9)
else:
self.lf_Gxz.setValue(self.mat.struct.Gxz)
if self.mat.struct.Gyz not in [0, None]:
self.lf_Gyz.setValue(self.mat.struct.Gyz / 1e9)
else:
self.lf_Gyz.setValue(self.mat.struct.Gyz)
self.lf_nu.setValue(self.mat.struct.nu_xy)
self.lf_nu_xy.setValue(self.mat.struct.nu_xy)
self.lf_nu_xz.setValue(self.mat.struct.nu_xz)
self.lf_nu_yz.setValue(self.mat.struct.nu_yz)
# Magnetical
if self.mat.mag is None:
self.set_default("mag")
self.lf_mur_lin.setValue(self.mat.mag.mur_lin)
self.lf_Brm20.setValue(self.mat.mag.Brm20)
self.lf_alpha_Br.setValue(self.mat.mag.alpha_Br)
self.lf_Wlam.setValue(self.mat.mag.Wlam)
# Setup tab values
if not isinstance(self.mat.mag.BH_curve, ImportMatrixVal):
self.g_BH_import.setChecked(False)
elif array_equal(self.mat.mag.BH_curve.value, array([[0, 0]])):
self.g_BH_import.setChecked(False)
else:
self.g_BH_import.setChecked(True)
self.tab_values.setWindowFlags(self.tab_values.windowFlags() & ~Qt.Dialog)
self.tab_values.title = self.g_BH_import.title()
self.tab_values.N_row_txt = "Nb of Points"
self.tab_values.shape_max = (None, 2)
self.tab_values.shape_min = (None, 2)
self.tab_values.col_header = ["H-curve(A/m)", "B-curve(T)"]
self.tab_values.unit_order = ["First column H", "First column B"]
self.tab_values.button_plot_title = "B(H)"
self.tab_values.si_col.hide()
self.tab_values.in_col.hide()
self.tab_values.b_close.hide()
self.tab_values.b_import.setHidden(False)
self.tab_values.b_export.setHidden(False)
if isinstance(self.mat.mag.BH_curve, ImportMatrixXls):
self.mat.mag.BH_curve = ImportMatrixVal(self.mat.mag.BH_curve.get_data())
self.tab_values.data = self.mat.mag.BH_curve.get_data()
elif not isinstance(self.mat.mag.BH_curve, ImportMatrixVal):
self.tab_values.data = array([[0, 0]])
elif self.mat.mag.BH_curve.get_data() is not None:
self.tab_values.data = self.mat.mag.BH_curve.get_data()
else:
self.tab_values.data = array([[0, 0]])
self.tab_values.update()
if isinstance(self.mat.mag.BH_curve, ImportMatrixVal) and not array_equal(
self.mat.mag.BH_curve.value, array([[0, 0]])
):
self.c_type_material.setCurrentIndex(2)
elif self.mat.mag.Brm20 != 0 and self.mat.mag.alpha_Br != 0:
self.c_type_material.setCurrentIndex(1)
else:
self.c_type_material.setCurrentIndex(0)
self.change_type_material()
# Hide useless widget
self.in_epsr.hide()
self.lf_epsr.hide()
self.unit_epsr.hide()
# Enable/Disable buttons
self.blockSignals(True)
self.set_save_needed(is_save_needed=is_save_needed)
self.blockSignals(False)
def set_default(self, attr):
"""When mat.elec or mat.mag are None, initialize with default values
Parameters
----------
self : DMatSetup
A DMatSetup widget
attr : str
name of the property to set
"""
setattr(self.mat, attr, type(getattr(Material(), attr))())
def set_name(self):
"""Signal to update the value of name according to the line edit
Parameters
----------
self : DMatSetup
A DMatSetup object
"""
file_name = str(self.le_name.text())
if file_name == self.init_name:
return # New name is the same as the previous one
# Check that the user wants to rename the materials
msg = self.tr(
"Do you want to rename your material to "
+ file_name
+ " ?\nAll current modifications (if any) on the material will be saved."
)
reply = QMessageBox.question(
self,
self.tr("Renaming material"),
msg,
QMessageBox.Yes,
QMessageBox.No,
)
self.qmessagebox_question = reply
if reply == QMessageBox.No:
# Revert name
self.le_name.blockSignals(True)
self.le_name.setText(self.init_name)
self.le_name.blockSignals(False)
return
# Check that new name is correct (doesn't exist)
filepath = rel_file_path(
join(dirname(self.mat.path), file_name + ".json"), "MATLIB_DIR"
)
if isfile(filepath):
QMessageBox().critical(
self,
self.tr("Error"),
self.tr(
"A material with the name "
+ file_name
+ " already exist!\nPlease enter another name."
),
)
# Revert name
self.le_name.blockSignals(True)
self.le_name.setText(self.init_name)
self.le_name.blockSignals(False)
return
# Update name and path
self.mat.name = file_name
self.le_name.setText(self.mat.name)
self.mat.path = rel_file_path(
join(dirname(self.mat.path), file_name + ".json"), "MATLIB_DIR"
)
self.set_save_needed(is_save_needed=False)
self.materialToRename.emit() # Update reference and treeview
def set_is_isotropic(self):
"""Signal to update the value of is_isotropic according to the checkbox
Parameters
----------
self :
A DMatSetup object
is_checked :
State of the checkbox
Returns
-------
None
"""
if self.cb_material_type.currentText() == "Isotropic":
self.mat.is_isotropic = True
self.nav_meca.setCurrentIndex(1)
self.nav_ther.setCurrentIndex(1)
elif self.cb_material_type.currentText() == "Orthotropic":
self.mat.is_isotropic = False
self.nav_meca.setCurrentIndex(0)
self.nav_ther.setCurrentIndex(0)
self.set_save_needed(is_save_needed=True)
def set_rho_elec(self):
"""Signal to update the value of rho_elec according to the line edit
Parameters
----------
self :
A DMatSetup object
Returns
-------
None
"""
if self.mat.elec.rho != self.lf_rho_elec.value():
self.mat.elec.rho = self.lf_rho_elec.value()
self.set_save_needed(is_save_needed=True)
def set_mur_lin(self):
"""Signal to update the value of mur_lin according to the line edit
Parameters
----------
self :
A DMatSetup object
Returns
-------
None
"""
if self.mat.mag.mur_lin != self.lf_mur_lin.value():
self.mat.mag.mur_lin = self.lf_mur_lin.value()
self.set_save_needed(is_save_needed=True)
def set_Brm20(self):
"""Signal to update the value of Brm20 according to the line edit
Parameters
----------
self :
A DMatSetup object
Returns
-------
None
"""
if self.mat.mag.Brm20 != self.lf_Brm20.value():
self.mat.mag.Brm20 = self.lf_Brm20.value()
self.set_save_needed(is_save_needed=True)
def set_alpha_Br(self):
"""Signal to update the value of alpha_Br according to the line edit
Parameters
----------
self :
A DMatSetup object
Returns
-------
None
"""
if self.mat.mag.alpha_Br != self.lf_alpha_Br.value():
self.mat.mag.alpha_Br = self.lf_alpha_Br.value()
self.set_save_needed(is_save_needed=True)
def set_Wlam(self):
"""Signal to update the value of Wlam according to the line edit
Parameters
----------
self :
A DMatSetup object
Returns
-------
None
"""
if self.mat.mag.Wlam != self.lf_Wlam.value():
self.mat.mag.Wlam = self.lf_Wlam.value()
self.set_save_needed(is_save_needed=True)
def set_cost_unit(self):
"""Signal to update the value of cost_unit according to the line edit
Parameters
----------
self :
A DMatSetup object
Returns
-------
None
"""
if self.mat.eco.cost_unit != self.lf_cost_unit.value():
self.mat.eco.cost_unit = self.lf_cost_unit.value()
self.set_save_needed(is_save_needed=True)
def set_Cp(self):
"""Signal to update the value of Cp according to the line edit
Parameters
----------
self :
A DMatSetup object
Returns
-------
None
"""
if self.mat.HT.Cp != self.lf_Cp.value():
self.mat.HT.Cp = self.lf_Cp.value()
self.set_save_needed(is_save_needed=True)
def set_alpha(self):
"""Signal to update the value of alpha according to the line edit
Parameters
----------
self :
A DMatSetup object
Returns
-------
None
"""
if self.mat.HT.alpha != self.lf_alpha.value():
self.mat.HT.alpha = self.lf_alpha.value()
self.set_save_needed(is_save_needed=True)
def set_lambda(self):
"""Signal to update the value of lambda according to the line edit
Parameters
----------
self :
A DMatSetup object
Returns
-------
None
"""
if self.mat.HT.lambda_x != self.lf_L.value():
self.mat.HT.lambda_x = self.lf_L.value()
self.mat.HT.lambda_y = self.lf_L.value()
self.mat.HT.lambda_z = self.lf_L.value()
self.set_save_needed(is_save_needed=True)
def set_lambda_x(self):
"""Signal to update the value of lambda_x according to the line edit
Parameters
----------
self :
A DMatSetup object
Returns
-------
None
"""
if self.mat.HT.lambda_x != self.lf_Lx.value():
self.mat.HT.lambda_x = self.lf_Lx.value()
self.set_save_needed(is_save_needed=True)
def set_lambda_y(self):
"""Signal to update the value of lambda_y according to the line edit
Parameters
----------
self :
A DMatSetup object
Returns
-------
None
"""
if self.mat.HT.lambda_y != self.lf_Ly.value():
self.mat.HT.lambda_y = self.lf_Ly.value()
self.set_save_needed(is_save_needed=True)
def set_lambda_z(self):
"""Signal to update the value of lambda_z according to the line edit
Parameters
----------
self :
A DMatSetup object
Returns
-------
None
"""
if self.mat.HT.lambda_z != self.lf_Lz.value():
self.mat.HT.lambda_z = self.lf_Lz.value()
self.set_save_needed(is_save_needed=True)
def set_rho_meca(self):
"""Signal to update the value of rho_meca according to the line edit
Parameters
----------
self :
A DMatSetup object
Returns
-------
None
"""
if self.mat.struct.rho != self.lf_rho_meca.value():
self.mat.struct.rho = self.lf_rho_meca.value()
self.set_save_needed(is_save_needed=True)
def set_E(self):
"""Signal to update the value of Ex according to the line edit
Parameters
----------
self :
A DMatSetup object
Returns
-------
None
"""
if self.mat.struct.Ex != self.lf_E.value() * 1e9:
self.mat.struct.Ex = self.lf_E.value() * 1e9
self.mat.struct.Ey = self.lf_E.value() * 1e9
self.mat.struct.Ez = self.lf_E.value() * 1e9
self.set_save_needed(is_save_needed=True)
def set_Ex(self):
"""Signal to update the value of Ex according to the line edit
Parameters
----------
self :
A DMatSetup object
Returns
-------
None
"""
if self.mat.struct.Ex != self.lf_Ex.value() * 1e9:
self.mat.struct.Ex = self.lf_Ex.value() * 1e9
self.set_save_needed(is_save_needed=True)
def set_Ey(self):
"""Signal to update the value of Ey according to the line edit
Parameters
----------
self :
A DMatSetup object
Returns
-------
None
"""
if self.mat.struct.Ey != self.lf_Ey.value() * 1e9:
self.mat.struct.Ey = self.lf_Ey.value() * 1e9
self.set_save_needed(is_save_needed=True)
def set_Ez(self):
"""Signal to update the value of Ez according to the line edit
Parameters
----------
self :
A DMatSetup object
Returns
-------
None
"""
if self.mat.struct.Ez != self.lf_Ez.value() * 1e9:
self.mat.struct.Ez = self.lf_Ez.value() * 1e9
self.set_save_needed(is_save_needed=True)
def set_G(self):
"""Signal to update the value of G according to the line edit
Parameters
----------
self :
A DMatSetup object
Returns
-------
None
"""
if self.mat.struct.Gxy != self.lf_G.value() * 1e9:
self.mat.struct.Gxy = self.lf_G.value() * 1e9
self.mat.struct.Gxz = self.lf_G.value() * 1e9
self.mat.struct.Gyz = self.lf_G.value() * 1e9
self.set_save_needed(is_save_needed=True)
def set_Gxy(self):
"""Signal to update the value of Gxy according to the line edit
Parameters
----------
self :
A DMatSetup object
Returns
-------
None
"""
if self.mat.struct.Gxy != self.lf_Gxy.value() * 1e9:
self.mat.struct.Gxy = self.lf_Gxy.value() * 1e9
self.set_save_needed(is_save_needed=True)
def set_Gxz(self):
"""Signal to update the value of Gxz according to the line edit
Parameters
----------
self :
A DMatSetup object
Returns
-------
None
"""
if self.mat.struct.Gxz != self.lf_Gxz.value() * 1e9:
self.mat.struct.Gxz = self.lf_Gxz.value() * 1e9
self.set_save_needed(is_save_needed=True)
def set_Gyz(self):
"""Signal to update the value of Gyz according to the line edit
Parameters
----------
self :
A DMatSetup object
Returns
-------
None
"""
if self.mat.struct.Gyz != self.lf_Gyz.value() * 1e9:
self.mat.struct.Gyz = self.lf_Gyz.value() * 1e9
self.set_save_needed(is_save_needed=True)
def set_nu(self):
"""Signal to update the value of nu_xy according to the line edit
Parameters
----------
self :
A DMatSetup object
Returns
-------
None
"""
if self.mat.struct.nu_xy != self.lf_nu.value():
self.mat.struct.nu_xy = self.lf_nu.value()
self.mat.struct.nu_xz = self.lf_nu.value()
self.mat.struct.nu_yz = self.lf_nu.value()
self.set_save_needed(is_save_needed=True)
def set_nu_xy(self):
"""Signal to update the value of nu_xy according to the line edit
Parameters
----------
self :
A DMatSetup object
Returns
-------
None
"""
if self.mat.struct.nu_xy != self.lf_nu_xy.value():
self.mat.struct.nu_xy = self.lf_nu_xy.value()
self.set_save_needed(is_save_needed=True)
def set_nu_xz(self):
"""Signal to update the value of nu_xz according to the line edit
Parameters
----------
self :
A DMatSetup object
Returns
-------
None
"""
if self.mat.struct.nu_xz != self.lf_nu_xz.value():
self.mat.struct.nu_xz = self.lf_nu_xz.value()
self.set_save_needed(is_save_needed=True)
def set_nu_yz(self):
"""Signal to update the value of nu_yz according to the line edit
Parameters
----------
self :
A DMatSetup object
Returns
-------
None
"""
if self.mat.struct.nu_yz != self.lf_nu_yz.value():
self.mat.struct.nu_yz = self.lf_nu_yz.value()
self.set_save_needed(is_save_needed=True)
def set_table_values(self):
"""Signal to update the value of the table according to the table
Parameters
----------
self :
A DMatSetup object
Returns
-------
None
"""
if isinstance(self.mat.mag.BH_curve, ImportMatrixVal):
if not array_equal(self.mat.mag.BH_curve.value, self.tab_values.get_data()):
self.mat.mag.BH_curve.value = self.tab_values.get_data()
self.set_save_needed(is_save_needed=True)
elif isinstance(self.mat.mag.BH_curve, (ImportMatrixXls, ImportMatrix)):
self.mat.mag.BH_curve = ImportMatrixVal(self.tab_values.get_data())
self.set_save_needed(is_save_needed=True)
def change_type_material(self):
"""Hide or show units that need to be defined depending on the type of the material
Parameters
----------
self :
A DMatSetup object
Returns
-------
None
"""
if self.c_type_material.currentIndex() == 0: # Linear
self.in_Brm20.setHidden(True)
self.lf_Brm20.setHidden(True)
self.unit_Brm20.setHidden(True)
self.in_alpha_Br.setHidden(True)
self.lf_alpha_Br.setHidden(True)
self.unit_alpha_Br.setHidden(True)
self.nav_mag.setCurrentIndex(0)
elif self.c_type_material.currentIndex() == 1: # Magnetic
self.in_Brm20.setHidden(False)
self.lf_Brm20.setHidden(False)
self.unit_Brm20.setHidden(False)
self.in_alpha_Br.setHidden(False)
self.lf_alpha_Br.setHidden(False)
self.unit_alpha_Br.setHidden(False)
self.nav_mag.setCurrentIndex(0)
else: # Lamination
self.nav_mag.setCurrentIndex(1)
|
r=float(input('Digite o valor em reais que você possui na carteira: R$ '))
cot=4.85
d=(r/cot)
print(' x.x '*25)
print('Com o valor em sua carteira de R$ {} você pode comprar U$$ {:.2f}!'.format(r,d))
|
from utils.batchManagers import MultiNLIBatchManager, MRPCBatchManager, PDBBatchManager, SICKBatchManager, IBMBatchManager
from math import sqrt
import random
random.seed(42)
class MultiTaskTrainLoader():
""" Custom batch manager for multi-task learning. Iterating over this object yields a batch from one of the datasets (randomly) """
def __init__(self, batch_size, device):
self.batch_size = batch_size
self.device = device
# batchmanagers we care about
self.batchmanagers = {
'NLI' : MultiNLIBatchManager(batch_size, device),
'PDB' : PDBBatchManager(batch_size, device),
'MRPC' : MRPCBatchManager(batch_size, device)
}
self.tasks = list(self.batchmanagers.keys())
# save the iterator of the dataloaders
# need to do so because dataloaders are not iterators directly
self.iter_dataloaders = {task:iter(bm.train_iter) for task, bm in self.batchmanagers.items()}
# this is used to sample the dataloaders. See function description
self.proportions = self._getProportions()
# task iterator (we shuffle every time)
random.shuffle(self.proportions)
self.task_iter = iter(self.proportions)
# total number of batches per one epoch
self.totalBatches = max((bm.task_size() for bm in self.batchmanagers.values())) // self.batch_size
# used to iterate.
self.counter = 0
def getTasksWithNClasses(self):
return {name: len(bm.classes()) for name, bm in self.batchmanagers.items()}
def _getProportions(self):
""" returns a list of strings, each string is the name of the task. The number of strings in this list are proportional to the sizes of the datasets (square rooted)
Returns:
(list(str)): list representing the proportions """
min_size = min((bm.task_size() for bm in self.batchmanagers.values()))
proportions = []
for name, bm in self.batchmanagers.items():
size = round(sqrt(bm.task_size() / min_size))
proportions += [name] * size
return proportions
def __len__(self):
return self.totalBatches
def __iter__(self):
return self
def __next__(self):
""" Iterator main function
Returns:
(str, object): name of the task and a batch """
# if we are out of index, stop the iteration...
# (but restart the counter for next time!)
if self.counter >= self.totalBatches:
self.counter = 0
raise StopIteration
else:
# augment the index
self.counter += 1
# pick a task (this is a string)
try:
task = next(self.task_iter)
except StopIteration:
random.shuffle(self.proportions)
self.task_iter = iter(self.proportions)
task = next(self.task_iter)
# get the corresponding dataloader-iterator
dataloader = self.iter_dataloaders[task]
try:
# get the next batch
batch = next(dataloader)
except StopIteration:
# if this did not work, restart the iterator.
self.iter_dataloaders[task] = iter(self.batchmanagers[task].train_iter)
dataloader = self.iter_dataloaders[task]
batch = next(dataloader)
return task, batch |
import datetime
n = int(input())
fmt = '%a %d %b %Y %H:%M:%S %z'
for i in range(n):
l = input()
m = input()
print(int(abs((datetime.datetime.strptime(l, fmt)-datetime.datetime.strptime(m, fmt)).total_seconds())))
|
# 284. Peeking Iterator
# Given an Iterator class interface with methods: next() and hasNext(),
# design and implement a PeekingIterator that support the peek() operation --
# it essentially peek() at the element that will be returned by the next call to next().
# Here is an example.
# Assume that the iterator is initialized to the beginning of the list: [1, 2, 3].
# Call next() gets you 1, the first element in the list.
# Now you call peek() and it returns 2, the next element. Calling next() after that still return 2.
# You call next() the final time and it returns 3, the last element.
# Calling hasNext() after that should return false.
# Hint:
# Think of "looking ahead". You want to cache the next element.
# Is one variable sufficient? Why or why not?
# Test your design with call order of peek() before next() vs next() before peek().
# For a clean implementation, check out Google's guava library source code.
# Follow up: How would you extend your design to be generic and work with all types, not just integer?
# Below is the interface for Iterator, which is already defined for you.
class Iterator(object):
def __init__(self, nums):
"""
Initializes an iterator object to the beginning of a list.
:type nums: List[int]
"""
self.it = iter(nums)
def hasNext(self):
"""
Returns true if the iteration has more elements.
:rtype: bool
"""
return
def next(self):
"""
Returns the next element in the iteration.
:rtype: int
"""
# it = iter(self.nums)
return next(self.it)
# http://bookshadow.com/weblog/2015/09/21/leetcode-peeking-iterator/
# 引入两个额外的变量nextElement和peekFlag:
# nextElement标识peek操作预先获取的下一个元素,
# peekFlag记录当前是否已经执行过peek操作
class PeekingIterator(object):
def __init__(self, iterator):
"""
Initialize your data structure here.
:type iterator: Iterator
"""
self.iter = iterator
self.peekFlag = False
self.nextElement = None
def peek(self):
"""
Returns the next element in the iteration without advancing the iterator.
:rtype: int
"""
if not self.peekFlag: # save next element and set flag to true
self.nextElement = self.iter.next()
self.peekFlag = True
return self.nextElement
def next(self):
"""
:rtype: int
"""
if not self.peekFlag: # return normal next
return self.iter.next()
nextElement = self.nextElement # save next element
self.peekFlag = False # reset attributes back
self.nextElement = None
return nextElement
def hasNext(self):
"""
:rtype: bool
"""
return self.peekFlag or self.iter.hasNext()
# Your PeekingIterator object will be instantiated and called as such:
nums = [1, 2, 3]
it = PeekingIterator(Iterator(nums))
val = it.peek()
print(val)
it.next()
val = it.peek()
print(val)
# while it.hasNext():
# val = it.peek() # Get the next element but not advance the iterator.
# it.next() # Should return the same value as [val].
|
"""Implements the base crawler which all the rules are based on.
Crawlers, crawl through the trees returned by the parser and
evaluate particular rules.
The intent is that it should be possible for the rules to be expressed
as simply as possible, with as much of the complexity abstracted away.
The evaluation function should take enough arguments that it can evaluate
the position of the given segment in relation to its neighbors, and that
the segment which finally "triggers" the error, should be the one that would
be corrected OR if the rule relates to something that is missing, then it
should flag on the segment FOLLOWING, the place that the desired element is
missing.
"""
import copy
import logging
from collections import namedtuple
from sqlfluff.core.parser import RawSegment, KeywordSegment, BaseSegment, SymbolSegment
from sqlfluff.core.errors import SQLLintError
# The ghost of a rule (mostly used for testing)
RuleGhost = namedtuple("RuleGhost", ["code", "description"])
# Instantiate the rules logger
rules_logger = logging.getLogger("sqlfluff.rules")
class RuleLoggingAdapter(logging.LoggerAdapter):
"""A LoggingAdapter for rules which adds the code of the rule to it."""
def process(self, msg, kwargs):
"""Add the code element to the logging message before emit."""
return "[%s] %s" % (self.extra["code"], msg), kwargs
class LintResult:
"""A class to hold the results of a crawl operation.
Args:
anchor (:obj:`BaseSegment`, optional): A segment which represents
the *position* of the a problem. NB: Each fix will also hold
its own reference to position, so this position is mostly for
alerting the user to where the *problem* is.
fixes (:obj:`list` of :obj:`LintFix`, optional): An array of any
fixes which would correct this issue. If not present then it's
assumed that this issue will have to manually fixed.
memory (:obj:`dict`, optional): An object which stores any working
memory for the crawler. The `memory` returned in any `LintResult`
will be passed as an input to the next segment to be crawled.
description (:obj:`str`, optional): A description of the problem
identified as part of this result. This will override the
description of the rule as what gets reported to the user
with the problem if provided.
"""
def __init__(self, anchor=None, fixes=None, memory=None, description=None):
# An anchor of none, means no issue
self.anchor = anchor
# Fixes might be blank
self.fixes = fixes or []
# When instantiating the result, we filter any fixes which are "trivial".
self.fixes = [f for f in self.fixes if not f.is_trivial()]
# Memory is passed back in the linting result
self.memory = memory
# store a description_override for later
self.description = description
def to_linting_error(self, rule):
"""Convert a linting result to a :exc:`SQLLintError` if appropriate."""
if self.anchor:
# Allow description override from the LintResult
description = self.description or rule.description
return SQLLintError(
rule=rule,
segment=self.anchor,
fixes=self.fixes,
description=description,
)
else:
return None
class LintFix:
"""A class to hold a potential fix to a linting violation.
Args:
edit_type (:obj:`str`): One of `create`, `edit`, `delete` to indicate
the kind of fix this represents.
anchor (:obj:`BaseSegment`): A segment which represents
the *position* that this fix should be applied at. For deletions
it represents the segment to delete, for creations it implies the
position to create at (with the existing element at this position
to be moved *after* the edit), for an `edit` it implies the segment
to be replaced.
edit (:obj:`BaseSegment`, optional): For `edit` and `create` fixes, this
hold the segment, or iterable of segments to create to replace at the
given `anchor` point.
"""
def __init__(self, edit_type, anchor, edit=None):
if edit_type not in ["create", "edit", "delete"]:
raise ValueError("Unexpected edit_type: {0}".format(edit_type))
self.edit_type = edit_type
self.anchor = anchor
# Coerce to list
if isinstance(edit, BaseSegment):
edit = [edit]
# Copy all the elements of edit to stop contamination.
# We're about to start stripping the position markers
# of some of the elements and we don't want to end up
# stripping the positions of the original elements of
# the parsed structure.
self.edit = copy.deepcopy(edit)
if self.edit:
# Strip position markers of anything enriched, otherwise things can get blurry
for seg in self.edit:
seg.pos_marker = seg.pos_marker.strip()
# Once stripped, we shouldn't replace any markers because
# later code may rely on them being accurate, which we
# can't guarantee with edits.
def is_trivial(self):
"""Return true if the fix is trivial.
Trivial edits are:
- Anything of zero length.
- Any edits which result in themselves.
Removing these makes the routines which process fixes much faster.
"""
if self.edit_type == "create":
if isinstance(self.edit, BaseSegment):
if len(self.edit.raw) == 0:
return True
elif all(len(elem.raw) == 0 for elem in self.edit):
return True
elif self.edit_type == "edit" and self.edit == self.anchor:
return True
return False
def __repr__(self):
if self.edit_type == "delete":
detail = "delete:{0!r}".format(self.anchor.raw)
elif self.edit_type in ("edit", "create"):
if hasattr(self.edit, "raw"):
new_detail = self.edit.raw
else:
new_detail = "".join(s.raw for s in self.edit)
if self.edit_type == "edit":
detail = "edt:{0!r}->{1!r}".format(self.anchor.raw, new_detail)
else:
detail = "create:{0!r}".format(new_detail)
else:
detail = ""
return "<LintFix: {0} @{1} {2}>".format(
self.edit_type, self.anchor.pos_marker, detail
)
def __eq__(self, other):
"""Compare equality with another fix.
A fix is equal to another if is in the same place (position), with the
same type and (if appropriate) the same edit values.
"""
if not self.edit_type == other.edit_type:
return False
if not self.anchor == other.anchor:
return False
if not self.edit == other.edit:
return False
return True
class BaseCrawler:
"""The base class for a crawler, of which all rules are derived from.
Args:
code (:obj:`str`): The identifier for this rule, used in inclusion
or exclusion.
description (:obj:`str`): A human readable description of what this
rule does. It will be displayed when any violations are found.
"""
_works_on_unparsable = True
def __init__(self, code, description, **kwargs):
self.description = description
self.code = code
# kwargs represents the config passed to the crawler. Add all kwargs as class attributes
# so they can be accessed in rules which inherit from this class
for key, value in kwargs.items():
self.__dict__[key] = value
# We also define a custom logger here, which also includes the code
# of the rule in the logging.
self.logger = RuleLoggingAdapter(rules_logger, {"code": code})
# Validate that declared configuration options exist
try:
for keyword in self.config_keywords:
if keyword not in kwargs.keys():
raise ValueError(
(
"Unrecognized config '{0}' for Rule {1}. If this "
"is a new option, please add it to "
"`default_config.cfg`"
).format(keyword, code)
)
except AttributeError:
self.logger.info("No config_keywords defined for {0}".format(code))
def _eval(self, **kwargs):
"""Evaluate this rule against the current context.
This should indicate whether a linting violation has occurred and/or
whether there is something to remember from this evaluation.
Note that an evaluate function should always accept `**kwargs`, but
if it relies on any available kwargs, it should explicitly call
them out at definition.
Returns:
:obj:`LintResult` or :obj:`None`.
The reason that this method is called :meth:`_eval` and not `eval` is
a bit of a hack with sphinx autodoc, to make it so that the rule
documentation auto-generates nicely.
"""
raise NotImplementedError(
(
"{0} has not had its `eval` function defined. This is a problem "
"with the rule setup."
).format(self.__class__.__name__)
)
def crawl(
self,
segment,
dialect,
parent_stack=None,
siblings_pre=None,
siblings_post=None,
raw_stack=None,
memory=None,
):
"""Recursively perform the crawl operation on a given segment.
Returns:
A tuple of (vs, raw_stack, fixes, memory)
"""
# parent stack should be a tuple if it exists
# crawlers, should evaluate on segments FIRST, before evaluating on their
# children. They should also return a list of violations.
parent_stack = parent_stack or ()
raw_stack = raw_stack or ()
siblings_post = siblings_post or ()
siblings_pre = siblings_pre or ()
memory = memory or {}
vs = []
fixes = []
# First, check whether we're looking at an unparsable and whether
# this rule will still operate on that.
if not self._works_on_unparsable and segment.is_type("unparsable"):
# Abort here if it doesn't. Otherwise we'll get odd results.
return vs, raw_stack, [], memory
# TODO: Document what options are available to the evaluation function.
try:
res = self._eval(
segment=segment,
parent_stack=parent_stack,
siblings_pre=siblings_pre,
siblings_post=siblings_post,
raw_stack=raw_stack,
memory=memory,
dialect=dialect,
)
# Any exception at this point would halt the linter and
# cause the user to get no results
except Exception as e:
self.logger.critical(
f"Applying rule {self.code} threw an Exception: {e}", exc_info=True
)
vs.append(
SQLLintError(
rule=self,
segment=segment,
fixes=[],
description=(
f"""Unexpected exception: {str(e)};
Could you open an issue at https://github.com/sqlfluff/sqlfluff/issues ?
You can ignore this exception for now, by adding '--noqa: {self.code}' at the end
of line {segment.pos_marker.line_no}
"""
),
)
)
return vs, raw_stack, fixes, memory
if res is None:
# Assume this means no problems (also means no memory)
pass
elif isinstance(res, LintResult):
# Extract any memory
memory = res.memory
lerr = res.to_linting_error(rule=self)
if lerr:
vs.append(lerr)
fixes += res.fixes
elif isinstance(res, list) and all(
isinstance(elem, LintResult) for elem in res
):
# Extract any memory from the *last* one, assuming
# it was the last to be added
memory = res[-1].memory
for elem in res:
lerr = elem.to_linting_error(rule=self)
if lerr:
vs.append(lerr)
fixes += elem.fixes
else:
raise TypeError(
"Got unexpected result [{0!r}] back from linting rule: {1!r}".format(
res, self.code
)
)
# The raw stack only keeps track of the previous raw segments
if len(segment.segments) == 0:
raw_stack += (segment,)
# Parent stack keeps track of all the parent segments
parent_stack += (segment,)
for idx, child in enumerate(segment.segments):
dvs, raw_stack, child_fixes, memory = self.crawl(
segment=child,
parent_stack=parent_stack,
siblings_pre=segment.segments[:idx],
siblings_post=segment.segments[idx + 1 :],
raw_stack=raw_stack,
memory=memory,
dialect=dialect,
)
vs += dvs
fixes += child_fixes
return vs, raw_stack, fixes, memory
# HELPER METHODS --------
@staticmethod
def filter_meta(segments, keep_meta=False):
"""Filter the segments to non-meta.
Or optionally the opposite if keep_meta is True.
"""
buff = []
for elem in segments:
if elem.is_meta is keep_meta:
buff.append(elem)
return tuple(buff)
@classmethod
def get_parent_of(cls, segment, root_segment):
"""Return the segment immediately containing segment.
NB: This is recursive.
Args:
segment: The segment to look for.
root_segment: Some known parent of the segment
we're looking for (although likely not the
direct parent in question).
"""
if segment in root_segment.segments:
return root_segment
elif root_segment.segments:
# try each of the subsegments
for sub in root_segment.segments:
p = cls.get_parent_of(segment, sub)
if p:
return p
# Not directly in the segment and
# no subsegments to check. Return None.
return None
@classmethod
def make_whitespace(cls, raw, pos_marker):
"""Make a whitespace segment."""
WhitespaceSegment = RawSegment.make(" ", name="whitespace", type="whitespace")
return WhitespaceSegment(raw=raw, pos_marker=pos_marker)
@classmethod
def make_newline(cls, pos_marker, raw=None):
"""Make a newline segment."""
# Default the newline to \n
raw = raw or "\n"
nls = RawSegment.make("\n", name="newline", type="newline")
return nls(raw=raw, pos_marker=pos_marker)
@classmethod
def make_keyword(cls, raw, pos_marker):
"""Make a keyword segment."""
# For the name of the segment, we force the string to lowercase.
kws = KeywordSegment.make(raw.lower())
# At the moment we let the rule dictate *case* here.
return kws(raw=raw, pos_marker=pos_marker)
@classmethod
def make_symbol(cls, raw, pos_marker, seg_type, name=None):
"""Make a symbol segment."""
# For the name of the segment, we force the string to lowercase.
symbol_seg = SymbolSegment.make(
raw.lower(), name=name or seg_type, type=seg_type
)
# At the moment we let the rule dictate *case* here.
return symbol_seg(raw=raw, pos_marker=pos_marker)
class RuleSet:
"""Class to define a ruleset.
A rule set is instantiated on module load, but the references
to each of its classes are instantiated at runtime. This means
that configuration values can be passed to those rules live
and be responsive to any changes in configuration from the
path that the file is in.
Rules should be fetched using the :meth:`get_rulelist` command which
also handles any filtering (i.e. whitelisting and blacklisting).
New rules should be added to the instance of this class using the
:meth:`register` decorator. That decorator registers the class, but also
performs basic type and name-convention checks.
The code for the rule will be parsed from the name, the description
from the docstring. The eval function is assumed that it will be
overriden by the subclass, and the parent class raises an error on
this function if not overriden.
"""
def __init__(self, name, config_info):
self.name = name
self.config_info = config_info
self._register = {}
def _validate_config_options(self, config, rule=None):
"""Ensure that all config options are valid.
Config options can also be checked for a specific rule e.g L010.
"""
rule_config = config.get_section("rules")
for config_name, info_dict in self.config_info.items():
config_option = (
rule_config.get(config_name)
if not rule
else rule_config.get(rule).get(config_name)
)
valid_options = info_dict["validation"]
if config_option not in valid_options and config_option is not None:
raise ValueError(
(
"Invalid option '{0}' for {1} configuration. Must be one of {2}"
).format(
config_option,
config_name,
valid_options,
)
)
def register(self, cls):
"""Decorate a class with this to add it to the ruleset.
.. code-block:: python
@myruleset.register
class Rule_L001(BaseCrawler):
"Description of rule."
def eval(self, **kwargs):
return LintResult()
We expect that rules are defined as classes with the name `Rule_XXXX`
where `XXXX` is of the form `LNNN`, where L is a letter (literally L for
*linting* by default) and N is a three digit number.
If this receives classes by any other name, then it will raise a
:exc:`ValueError`.
"""
elems = cls.__name__.split("_")
# Validate the name
if len(elems) != 2 or elems[0] != "Rule" or len(elems[1]) != 4:
raise ValueError(
(
"Tried to register rule on set {0!r} with unexpected " "format: {1}"
).format(self.name, cls.__name__)
)
code = elems[1]
# If the docstring is multiline, then we extract just summary.
description = cls.__doc__.split("\n")[0]
# Keep track of the *class* in the register. Don't instantiate yet.
if code in self._register:
raise ValueError(
"Rule {0!r} has already been registered on RuleSet {1!r}!".format(
code, self.name
)
)
self._register[code] = dict(code=code, description=description, cls=cls)
# Make sure we actually return the original class
return cls
def get_rulelist(self, config):
"""Use the config to return the appropriate rules.
We use the config both for whitelisting and blacklisting, but also
for configuring the rules given the given config.
Returns:
:obj:`list` of instantiated :obj:`BaseCrawler`.
"""
# Validate all generic rule configs
self._validate_config_options(config)
# default the whitelist to all the rules if not set
whitelist = config.get("rule_whitelist") or list(self._register.keys())
blacklist = config.get("rule_blacklist") or []
whitelisted_unknown_rule_codes = [
r for r in whitelist if r not in self._register
]
if any(whitelisted_unknown_rule_codes):
rules_logger.warning(
"Tried to whitelist unknown rules: {0!r}".format(
whitelisted_unknown_rule_codes
)
)
blacklisted_unknown_rule_codes = [
r for r in blacklist if r not in self._register
]
if any(blacklisted_unknown_rule_codes):
rules_logger.warning(
"Tried to blacklist unknown rules: {0!r}".format(
blacklisted_unknown_rule_codes
)
)
keylist = sorted(self._register.keys())
# First we filter the rules
keylist = [r for r in keylist if r in whitelist and r not in blacklist]
# Construct the kwargs for instantiation before we actually do it.
rule_kwargs = {}
for k in keylist:
kwargs = {}
generic_rule_config = config.get_section("rules")
specific_rule_config = config.get_section(
("rules", self._register[k]["code"])
)
if generic_rule_config:
kwargs.update(generic_rule_config)
if specific_rule_config:
# Validate specific rule config before adding
self._validate_config_options(config, self._register[k]["code"])
kwargs.update(specific_rule_config)
kwargs["code"] = self._register[k]["code"]
# Allow variable substitution in making the description
kwargs["description"] = self._register[k]["description"].format(**kwargs)
rule_kwargs[k] = kwargs
# Instantiate in the final step
return [self._register[k]["cls"](**rule_kwargs[k]) for k in keylist]
def copy(self):
"""Return a copy of self with a separate register."""
new_ruleset = copy.copy(self)
new_ruleset._register = self._register.copy()
return new_ruleset
|
import copy
import re
import unicodedata as ud
from enum import Enum
from WiktionaryTags import WiktionaryTags
# State: Is the parser reading an etymology or a pronunciation entry? 0 if no, 1 if etymology, 2 if pronunciation
class State(Enum):
OTHER = 0
ETYM = 1
PRONOUNCE = 2
# Stores a single entry for the etymology dictionary
class WiktionaryEntry(object):
__slots__ = ('word', 'raw_text', 'iso_code', 'pos', 'ipa', 'root_lang', 'nonstandard_root_code', 'root_word',
'root_roman', 'root_ipa', 'derivation', 'etym_number', 'universal_pronunciation', 'other_entries',
'headers', 'TAGS', 'latin_letters')
def __init__(self, word, raw_text):
self.word = self.process_word(word)
self.raw_text = raw_text
self.iso_code = ''
self.pos = []
self.ipa = ''
self.root_lang = ''
self.nonstandard_root_code = ''
self.root_word = []
self.root_roman = ''
self.root_ipa = ''
self.derivation = ''
self.etym_number = 0
self.universal_pronunciation = False
self.other_entries = [] # Stores other etymologies for the same entry
self.headers = [] # To help with debugging
self.TAGS = WiktionaryTags()
self.latin_letters = {} # To ensure that romanizations are in Latin letters
self.parse()
def reinitialize(self, reinit_pos=True):
if reinit_pos:
self.pos = []
if not self.universal_pronunciation:
self.ipa = ''
self.root_lang = ''
self.nonstandard_root_code = ''
self.root_word = []
self.root_roman = ''
self.root_ipa = ''
self.derivation = ''
# Denote reconstructions with *
@staticmethod
def process_word(word):
processed = re.sub('^Reconstruction:[\w ]+/', '*', word)
return processed
def process_src_word_var(self, word):
if type(word) is list:
word = self.combine_last_elements(word)
return word
@staticmethod
def split_key(line):
for i, char in enumerate(line):
if char == '=':
return [line[:i], line[i+1:]]
elif not char.isalpha():
break
return ['', line]
@staticmethod
def separate_pipes(line):
sections = []
start_idx = 0
depth = 0
i = 0
while i + 1 < len(line):
char = line[i]
next_char = line[i+1]
if char == '{' and next_char == '{':
depth += 1
i += 1
elif char == '}' and next_char == '}':
depth -= 1
i += 1
elif char == '|' and depth == 0:
sections.append(line[start_idx:i])
start_idx = i + 1
i += 1
sections.append(line[start_idx:])
return sections
@staticmethod
def get_curly_braces(line):
sections = []
compound_flag = False
paren_flag = False
start_idx = None
depth = 0
i = 0
while i + 1 < len(line):
char = line[i]
next_char = line[i+1]
if char == '(':
paren_flag = True
elif paren_flag:
if char == ')':
paren_flag = False
elif char == '{' and next_char == '{':
if depth == 0:
start_idx = i + 2
depth += 1
i += 1
elif char == '}' and next_char == '}':
depth -= 1
i += 1
if depth == 0:
if compound_flag:
if len(sections):
if type(sections[-1]) is not list:
sections[-1] = [sections[-1]]
sections[-1].append(line[start_idx:i-1])
compound_flag = False
else:
sections.append(line[start_idx:i-1])
elif char == '+' and depth == 0: # Compound
compound_flag = True
i += 1
return sections
def get_base_list(self, l):
if len(l) == 1 and type(l[0]) is list:
return self.get_base_list(l[0])
return l
def process_sub_braces(self, e):
sub = []
children = self.separate_pipes(e)
for i, c in enumerate(children):
key, val = self.split_key(c)
e_child = self.get_curly_braces(val)
if len(e_child):
dub_sub = []
for e_c in self.get_base_list(e_child):
dub_sub.append(self.separate_pipes(e_c))
sub.append([key, dub_sub])
else:
sub.append([key, val])
return sub
def get_all_braces(self, line):
output = []
entries = self.get_curly_braces(line)
for e in entries:
if type(e) is list:
sub = []
for cmp in e:
compound_sub = self.process_sub_braces(cmp)
sub.append(compound_sub)
else:
sub = self.process_sub_braces(e)
output.append(sub)
return output
@staticmethod
def is_header(line):
if line[0] == '=':
return True
return False
@staticmethod
def get_header_depth(header_line):
i = 0
while header_line[i] == '=':
i += 1
return i
@staticmethod
def is_lang(line):
if line[:18] == '{{wikipedia||lang=':
return True
return False
def create_other_entry(self, reinit_pos=True, increment_etym=False):
new_entry = copy.copy(self)
# Only have "self" contain list of other entries (prevent useless deeper recursion)
self.other_entries = new_entry.other_entries
new_entry.other_entries = []
self.other_entries.append(new_entry)
self.reinitialize(reinit_pos)
if increment_etym:
self.etym_number += 1
def process_header(self, line):
header_depth = self.get_header_depth(line)
header = line[header_depth:-header_depth].strip()
self.headers.append(header + '-' + str(header_depth))
lang_depth = 2
if header_depth == lang_depth and header in self.TAGS.lang2iso:
self.iso_code = self.TAGS.lang2iso[header]
elif header == 'Pronunciation':
return State.PRONOUNCE
elif header_depth == 3:
if header[:9] == 'Etymology':
if len(header) > 9 and header != 'Etymology 1': # Create another Entry if multiple etymologies given
self.create_other_entry(increment_etym=True)
return State.ETYM
if header in self.TAGS.POS:
self.pos.append(header)
return State.OTHER
def process_lang(self, line):
lang_id = line[18:-2]
self.iso_code = self.iso2full_iso(lang_id)
def set_derivation(self, der):
if der in self.TAGS.save_ety_tags:
self.derivation = self.TAGS.save_ety_tags[der]
return True
return False
def set_root_lang(self, lang_id):
self.root_lang = self.iso2full_iso(lang_id)
if len(self.root_lang):
return True
return False
def set_root_word(self, src_words, rom, ipa):
result = False
for word in src_words:
if len(word) and word != '-':
self.root_word.append(word)
result = True
if len(rom):
self.root_roman = re.sub(' ', '+', rom)
result = True
if len(ipa):
self.root_ipa = ipa
result = True
return result
@staticmethod
def process_src_word(src_word):
src_word = re.sub('\([^)]*\)', '', src_word)
src_word = re.sub('\[', '', src_word)
src_word = re.sub(']', '', src_word)
# If there is a list of words, split the words into a list
splits = re.search(', ', src_word)
if splits is not None:
words = [src_word[:splits.start()], src_word[splits.end():]]
else:
words = [re.sub('\s+', '+', src_word.strip())]
return words
def set_nonstandard_root(self, lang_id):
self.nonstandard_root_code = lang_id
if lang_id in self.TAGS.nonstandard2standard:
self.root_lang = self.TAGS.nonstandard2standard[lang_id]
def parse_etymology(self, etym):
# Check if compound
if type(etym[0][0]) is list:
cmp_der, cmp_src_lang_id, cmp_src_word, cmp_rom, cmp_ipa = [], [], [], [], []
for cmp in etym:
der, src_lang_id, src_word, rom, ipa = self.get_etym_vars(cmp)
cmp_der.append(der)
cmp_src_lang_id.append(src_lang_id)
if len(src_word):
cmp_src_word.append(src_word)
if len(rom):
cmp_rom.append(rom)
if len(ipa):
cmp_ipa.append(ipa)
der = cmp_der[0]
src_lang_id = cmp_src_lang_id[0]
src_word = '+'.join(cmp_src_word)
rom = '+'.join(cmp_rom)
ipa = '+'.join(cmp_ipa)
else:
der, src_lang_id, src_word, rom, ipa = self.get_etym_vars(etym)
result = True
result *= self.set_derivation(der)
if not self.set_root_lang(src_lang_id):
self.set_nonstandard_root(src_lang_id)
self.set_root_word(self.process_src_word(src_word), rom, ipa)
if not result: # If any of the steps failed, delete the other parts
self.reinitialize(reinit_pos=False)
def compare_entry(self, der, lang_id, src_word, entry):
if der in self.TAGS.save_ety_tags:
der_tag = self.TAGS.save_ety_tags[der]
iso = self.iso2full_iso(lang_id)
if not len(iso):
if lang_id != entry.nonstandard_root_code:
return False
if entry.derivation == der_tag and src_word in entry.root_word:
return True
return False
def iso2full_iso(self, lang_id):
if lang_id not in self.TAGS.iso2lang:
return ''
lang = self.TAGS.iso2lang[lang_id]
iso = self.TAGS.lang2iso[lang]
return iso
def combine_last_elements(self, l):
combined = []
for e in l:
combined.append(e[-1])
return '+'.join(combined)
def is_latin(self, uchr):
try:
return self.latin_letters[uchr]
except KeyError:
return self.latin_letters.setdefault(uchr, 'LATIN' in ud.name(uchr))
def only_roman_chars(self, unistr):
return all(self.is_latin(uchr) for uchr in unistr if uchr.isalpha()) # isalpha suggested by John Machin
def process_tr(self, string):
if type(string) is list:
string = self.combine_last_elements(string)
if not self.only_roman_chars(string):
return ''
processed = re.sub('[\[\]]', '', string)
processed = re.sub(r'<sub>\w*</sub>', '', processed)
processed = re.sub(r'<sup>\w*</sup>\.?', '', processed)
processed = re.sub(r'<sub>\w*</sub>', '', processed)
processed = re.sub(r'<sup>\w*</sup>\.?', '', processed)
return processed
def get_etym_vars(self, etym):
der = src_lang_id = src_word = romanized = ipa = ''
for i, (key, var) in enumerate(etym):
if key == 'tr':
romanized = self.process_tr(var)
elif key == 'ts':
ipa = '/{}/'.format(self.process_tr(var))
elif key == 'sort':
# TODO: implement. Should this accept the next entry as the root word?
pass
elif not len(key):
if i == 0:
der = var
if der not in self.TAGS.save_ety_tags and der != 'm':
break
elif der in ['cognate', 'cog', 'm']:
if i == 1:
src_lang_id = var
elif i == 2:
src_word = self.process_src_word_var(var)
elif i == 2:
src_lang_id = var
elif i == 3:
src_word = self.process_src_word_var(var)
return der, src_lang_id, src_word, romanized, ipa
def process_etymologies(self, line):
etyms = self.get_all_braces(line)
for i, etym in enumerate(etyms):
self.create_other_entry(reinit_pos=False)
self.parse_etymology(etym)
# Some Middle Chinese words are split across two etymology tags
if self.root_lang == 'ltc' and not len(self.root_word):
if i + 1 < len(etyms):
next_etym = etyms[i+1]
if len(next_etym) >= 2 and next_etym[0][1] == 'ltc-l':
word = re.sub('[\[\]\(\)]', '', next_etym[1][1])
self.root_word.append(word)
def is_compound_pair(self, pair):
if len(pair) > 1 and type(pair[1]) is list:
return True
return False
def get_pron_vars(self, pron):
lbl = phonemic = phonetic = ''
for i, pair in enumerate(pron):
if self.is_compound_pair(pair):
continue
key, val = pair
if len(key):
continue
elif i == 0:
lbl = val
if lbl not in ['IPA', 'IPAchar']:
break
elif i > 1 and len(val):
if val[0] == '/' and not len(phonemic):
phonemic = val
break
elif val[0] == '[' and not len(phonetic):
phonetic = val
break
return lbl, phonemic, phonetic
def parse_pronunciation(self, pron, accent=''):
self.universal_pronunciation = True
for header in self.headers:
if header[:9] == 'Etymology':
self.universal_pronunciation = False
break
_, phonemic, phonetic = self.get_pron_vars(pron)
if not len(self.ipa):
if len(phonemic):
self.ipa = phonemic
elif len(phonetic):
self.ipa = phonetic
elif self.ipa[0] == '[' and len(phonemic):
self.ipa = phonemic
def process_pronunciation(self, line):
pronunciations = self.get_all_braces(line)
accent = ''
for i, pron in enumerate(pronunciations):
if len(pron) == 2 and pron[0][1] == 'a':
accent = pron[1][1]
else:
self.parse_pronunciation(pron, accent)
for entry in self.other_entries:
if entry.etym_number == self.etym_number:
entry.ipa = self.ipa
def parse(self):
state = State.OTHER
for line in self.raw_text:
if self.is_header(line):
state = self.process_header(line)
elif self.is_lang(line):
self.process_lang(line)
elif state is State.ETYM:
self.process_etymologies(line)
elif state is State.PRONOUNCE:
self.process_pronunciation(line)
def to_list(self, dist):
output = []
if not len(self.root_word): # If only a transcription or romanization is available
output.append([self.word, self.iso_code, self.pos, self.ipa, self.root_lang, self.nonstandard_root_code, '',
self.root_roman, self.root_ipa, self.derivation, dist, self.etym_number])
for word in self.root_word:
output.append([self.word, self.iso_code, self.pos, self.ipa, self.root_lang, self.nonstandard_root_code, word,
self.root_roman, self.root_ipa, self.derivation, dist, self.etym_number])
return output
def to_full_list(self):
full_list = []
dist = 0
prev_etym_number = self.etym_number
for entry in self.other_entries:
if entry.etym_number != prev_etym_number:
dist = 0
prev_etym_number = entry.etym_number
if len(entry.root_lang) or len(entry.nonstandard_root_code):
if entry.derivation in ['cognate', 'cog', 'm']: # Don't set a dist for words that aren't ancestors
if len(entry.root_word) or len(entry.root_roman) or len(entry.root_ipa):
full_list.extend(entry.to_list(dist=0))
else:
dist += 1
full_list.extend(entry.to_list(dist))
if len(self.root_lang) or len(self.nonstandard_root_code):
if self.derivation in ['cognate', 'cog', 'm']: # Don't set a dist for words that aren't ancestors
if len(self.root_word) or len(self.root_roman) or len(self.root_ipa):
full_list.extend(self.to_list(0))
else:
dist += 1
full_list.extend(self.to_list(dist))
return full_list
@staticmethod
def combine_first(l1, l2, idx):
return l1[idx]
@staticmethod
def combine_pos(l1, l2, idx):
pos1 = l1[idx]
pos2 = l2[idx]
return list(set(pos1) | set(pos2))
@staticmethod
def combine_ipa(l1, l2, idx):
ipa1 = l1[idx]
ipa2 = l2[idx]
if not len(ipa1):
return ipa2
if not len(ipa2):
return ipa1
if ipa1[0] == '/' and ipa2[0] != '/':
return ipa1
if ipa1[0] != '/' and ipa2[0] == '/':
return ipa2
if len(ipa1) >= len(ipa2):
return ipa1
return ipa2
@staticmethod
def combine_by_greater_length(l1, l2, idx):
val1 = l1[idx]
val2 = l2[idx]
if len(val1) >= len(val2):
return val1
return val2
@staticmethod
def combine_der(l1, l2, idx):
der1 = l1[idx]
der2 = l2[idx]
if der1 == 'der' and der2 != 'cog':
return der2
return der1
@staticmethod
def combine_dist(l1, l2, idx):
dist1 = l1[idx]
dist2 = l2[idx]
if dist1 == 0:
return dist2
elif dist2 == 0:
return dist1
elif dist1 <= dist2:
return dist1
return dist2
def combine_duplicates(self, l1, l2):
combined = [self.combine_first(l1, l2, 0)]
combined.append(self.combine_first(l1, l2, 1))
combined.append(self.combine_pos(l1, l2, 2))
combined.append(self.combine_ipa(l1, l2, 3))
combined.append(self.combine_by_greater_length(l1, l2, 4))
combined.append(self.combine_by_greater_length(l1, l2, 5))
combined.append(self.combine_by_greater_length(l1, l2, 6))
combined.append(self.combine_by_greater_length(l1, l2, 7))
combined.append(self.combine_by_greater_length(l1, l2, 8))
combined.append(self.combine_der(l1, l2, 9))
combined.append(self.combine_dist(l1, l2, 10))
combined.append(self.combine_dist(l1, l2, 11))
return combined
def lang_duplicates(self, l1, l2):
lang1 = l1[4]
lang2 = l2[4]
return lang1 == lang2
def word_duplicates(self, l1, l2):
word1 = l1[6]
word2 = l2[6]
if word1 == word2 or not len(word1) or not len(word2):
return True
return False
def are_duplicates(self, l1, l2):
return self.lang_duplicates(l1, l2) and self.word_duplicates(l1, l2)
def check_list_duplicates(self, full_list):
while True:
temp_list = []
merged_idxs = []
for i, l1 in enumerate(full_list):
for j, l2 in enumerate(full_list):
if i >= j or i in merged_idxs or j in merged_idxs:
continue
if self.are_duplicates(l1, l2):
temp_list.append(self.combine_duplicates(l1, l2))
merged_idxs.append(i)
merged_idxs.append(j)
for k, l in enumerate(full_list):
if k not in merged_idxs:
temp_list.append(l)
if len(temp_list) == len(full_list):
break
full_list = temp_list
return full_list
@staticmethod
def list_to_string(entry_list):
pos = entry_list[2]
entry_list[2] = '/'.join(sorted(pos))
entry_list = entry_list[:-1]
entry_list[-1] = str(entry_list[-1])
entry_list.append('wik')
return ','.join(entry_list)
def to_full_string(self):
full_list = self.to_full_list()
final_list = self.check_list_duplicates(full_list)
return '\n'.join([self.list_to_string(x) for x in sorted(final_list, key=lambda x: (x[-1], x[-2]))])
if __name__ == "__main__":
word = 'test'
raw_text = []
with open('inputs/test.txt', 'r', encoding='utf-8') as f:
raw_text = [x for x in f.read().splitlines() if len(x)]
test = WiktionaryEntry(word, raw_text)
# print(test.check_list_duplicates(test.to_full_list()))
print(test.to_full_string())
# line = 'From {{bor|id|jv|ꦲꦭꦱ꧀|t=forest|tr={{l|jv|alas}}}}, from {{der|id|poz-pro|*halas|t=forest, wilderness, woods, jungle}}, from {{der|id|map-pro|*Salas|t=forest, wilderness, woods}}. Cognate to {{cog|ban|ᬳᬮᬲ᭄|t=forest|tr=alas}}.'
# line = 'From {{inh|af|nl|({{l|nl|de}}) {{l|nl|hare}}}}.'
# line = '{{calque|fr|ja|日本||Japan|tr={{l|ja|にほん}}, Nihon}}.'
# line = 'Borrowing from {{bor|ja|vi|東京|sort=とんきん|tr={{l|vi|Đông Kinh}}|t=[[Eastern]] [[capital]]}}, a historical name for [[Hanoi]]. {{rfv-etym|ja}}'
# entries = test.get_all_braces(line)
# for e in entries:
# print(e)
# TODO: {{etyl|la|en}} {{m|la|geologia}}
# TODO: when one entry contains etymologyical relations not contained in those words' entries... separate into chain?
# TODO: check borrowings with dist > 1. Should this be possible?
# TODO: check compounds with space vs. with +, compare with main entry word))
# (TODO: handle non-standard codes)
# (TODO: set distance to word)
# (TODO: include romanization/transcription if available (tr/ts))
# (TODO: check that right-to-left languages are saved properly)
# (TODO: fix "sort=")
# (TODO: check that there are no duplicates)
# (TODO: Fix that pronunciations aren't processed until after the etymologies have already been saved)
# (TODO: include pronunciation for recipient words, if available)
# (TODO: handle compounds > 2)
# TODO: (remove [[ ]], (.*), and add + to internal compounds)
|
import os
import pickle
import requests
from flask import Flask
from datetime import datetime
app = Flask(__name__)
def send_notification(message):
"""Send push notification."""
pushover_token = os.getenv('PUSHOVER_TOKEN')
pushover_url = 'https://api.pushover.net/1/messages.json'
params = {
'token': pushover_token,
'user': 'uga9w2s6wJsnGUwTjpmJnyMQnV6E5q',
'priority': -1,
'message': message,
'title': message
}
requests.post(pushover_url, params=params)
def get_social_time():
rescuetime_token = os.getenv('RESCUETIME_TOKEN')
url = 'https://www.rescuetime.com/anapi/data'
params = {'format': 'json',
'resolution_time': 'minute',
'restrict_kind': 'overview',
'key': rescuetime_token}
time = 0
r = requests.get(url, params=params)
j = r.json()
social_media = [row for row in j['rows'] if 'Social Networking' in row]
if social_media:
time = social_media[0][1] / 60
return int(time)
@app.route('/')
def index():
fp = open('data.pkl', 'rb')
details = pickle.load(fp)
fp.close()
last_distracted = details['last_distracted']
last_mins = details['minutes']
today = datetime.today().strftime('%D')
sent = (last_distracted == today)
used_time = get_social_time()
if not sent:
if used_time > 20:
send_notification('Focus: {}'.format(used_time))
last_mins = used_time
last_distracted = today
else:
if (used_time - last_mins) > 15:
send_notification('Focus: {}'.format(used_time))
last_mins = used_time
details = {'minutes': last_mins,
'last_distracted': last_distracted}
fp = open('data.pkl', 'wb')
pickle.dump(details, fp)
fp.close()
return str(details)
@app.route('/reset')
def reset():
details = {'minutes': 30, 'last_distracted': '10/02/17'}
pickle.dump(details, open('data.pkl', 'wb'))
return 'Done.'
if __name__ == '__main__':
app.run()
|
"""
Simple example custom service, used to drive shell commands on a node.
"""
from typing import Tuple
from core.nodes.base import CoreNode
from core.services.coreservices import CoreService, ServiceMode
class ExampleService(CoreService):
"""
Example Custom CORE Service
:cvar name: name used as a unique ID for this service and is required, no spaces
:cvar group: allows you to group services within the GUI under a common name
:cvar executables: executables this service depends on to function, if executable is
not on the path, service will not be loaded
:cvar dependencies: services that this service depends on for startup, tuple of
service names
:cvar dirs: directories that this service will create within a node
:cvar configs: files that this service will generate, without a full path this file
goes in the node's directory e.g. /tmp/pycore.12345/n1.conf/myfile
:cvar startup: commands used to start this service, any non-zero exit code will
cause a failure
:cvar validate: commands used to validate that a service was started, any non-zero
exit code will cause a failure
:cvar validation_mode: validation mode, used to determine startup success.
NON_BLOCKING - runs startup commands, and validates success with validation commands
BLOCKING - runs startup commands, and validates success with the startup commands themselves
TIMER - runs startup commands, and validates success by waiting for "validation_timer" alone
:cvar validation_timer: time in seconds for a service to wait for validation, before
determining success in TIMER/NON_BLOCKING modes.
:cvar validation_period: period in seconds to wait before retrying validation,
only used in NON_BLOCKING mode
:cvar shutdown: shutdown commands to stop this service
"""
name: str = "ExampleService"
group: str = "Utility"
executables: Tuple[str, ...] = ()
dependencies: Tuple[str, ...] = ()
dirs: Tuple[str, ...] = ()
configs: Tuple[str, ...] = ("myservice1.sh", "myservice2.sh")
startup: Tuple[str, ...] = tuple(f"sh {x}" for x in configs)
validate: Tuple[str, ...] = ()
validation_mode: ServiceMode = ServiceMode.NON_BLOCKING
validation_timer: int = 5
validation_period: float = 0.5
shutdown: Tuple[str, ...] = ()
@classmethod
def on_load(cls) -> None:
"""
Provides a way to run some arbitrary logic when the service is loaded, possibly
to help facilitate dynamic settings for the environment.
:return: nothing
"""
pass
@classmethod
def get_configs(cls, node: CoreNode) -> Tuple[str, ...]:
"""
Provides a way to dynamically generate the config files from the node a service
will run. Defaults to the class definition and can be left out entirely if not
needed.
:param node: core node that the service is being ran on
:return: tuple of config files to create
"""
return cls.configs
@classmethod
def generate_config(cls, node: CoreNode, filename: str) -> str:
"""
Returns a string representation for a file, given the node the service is
starting on the config filename that this information will be used for. This
must be defined, if "configs" are defined.
:param node: core node that the service is being ran on
:param filename: configuration file to generate
:return: configuration file content
"""
cfg = "#!/bin/sh\n"
if filename == cls.configs[0]:
cfg += "# auto-generated by MyService (sample.py)\n"
for iface in node.get_ifaces():
cfg += f'echo "Node {node.name} has interface {iface.name}"\n'
elif filename == cls.configs[1]:
cfg += "echo hello"
return cfg
@classmethod
def get_startup(cls, node: CoreNode) -> Tuple[str, ...]:
"""
Provides a way to dynamically generate the startup commands from the node a
service will run. Defaults to the class definition and can be left out entirely
if not needed.
:param node: core node that the service is being ran on
:return: tuple of startup commands to run
"""
return cls.startup
@classmethod
def get_validate(cls, node: CoreNode) -> Tuple[str, ...]:
"""
Provides a way to dynamically generate the validate commands from the node a
service will run. Defaults to the class definition and can be left out entirely
if not needed.
:param node: core node that the service is being ran on
:return: tuple of commands to validate service startup with
"""
return cls.validate
|
import asyncio
from contextlib import suppress
from sanic import Blueprint, response
from sanic.exceptions import abort
from sanic_openapi import doc
from .. import helpers, settings, utils
from ..models import Template
blueprint = Blueprint("Templates", url_prefix="/templates")
@blueprint.get("/")
@doc.summary("List all templates")
# TODO: https://github.com/jacebrowning/memegen/issues/580
# @doc.consumes(
# doc.String(name="filter", description="Part of the name or example to match"),
# location="query",
# )
@doc.produces(
# Can't use doc.List(Template) because the jsonify method is slightly different
doc.List(
{
"id": str,
"name": str,
"styles": doc.List(str),
"blank": str,
"example": str,
"source": str,
"_self": str,
}
),
description="Successfully returned a list of all templates",
content_type="application/json",
)
async def index(request):
query = request.args.get("filter", "").lower()
data = await asyncio.to_thread(helpers.get_valid_templates, request, query)
return response.json(data)
@blueprint.get("/<id>")
@doc.summary("View a specific template")
@doc.produces(
{
"id": str,
"name": str,
"styles": doc.List(str),
"blank": str,
"example": str,
"source": str,
"_self": str,
},
description="Successfully returned a specific templates",
content_type="application/json",
)
@doc.response(404, str, description="Template not found")
async def detail(request, id):
template = Template.objects.get_or_none(id)
if template:
return response.json(template.jsonify(request.app))
abort(404)
@blueprint.post("/<id>")
@doc.tag("Memes")
@doc.operation("Memes.create_from_template")
@doc.exclude(settings.DEPLOYED)
@doc.summary(settings.PREFIX + "Create a meme from a template")
@doc.consumes(
doc.JsonBody({"text_lines": [str], "extension": str, "redirect": bool}),
content_type="application/json",
location="body",
)
@doc.response(
201, {"url": str}, description="Successfully created a meme from a template"
)
async def build(request, id):
if request.form:
payload = dict(request.form)
with suppress(KeyError):
payload["image_url"] = payload.pop("image_url")[0]
with suppress(KeyError):
payload["extension"] = payload.pop("extension")[0]
with suppress(KeyError):
payload["redirect"] = payload.pop("redirect")[0]
else:
payload = request.json or {}
with suppress(KeyError):
payload["text_lines"] = payload.pop("text_lines[]")
template = Template.objects.get_or_create(id)
url = template.build_custom_url(
request,
payload.get("text_lines") or [],
extension=payload.get("extension"),
)
url, _updated = await utils.meta.tokenize(request, url)
if payload.get("redirect", False):
return response.redirect(url)
if template.valid:
status = 201
else:
status = 404
template.delete()
return response.json({"url": url}, status=status)
@blueprint.post("/custom")
@doc.tag("Memes")
@doc.exclude(settings.DEPLOYED)
@doc.summary(settings.PREFIX + "Create a meme from any image")
@doc.consumes(
doc.JsonBody(
{"image_url": str, "text_lines": [str], "extension": str, "redirect": bool}
),
content_type="application/json",
location="body",
)
@doc.response(
201, {"url": str}, description="Successfully created a meme from a custom image"
)
async def custom(request):
if request.form:
payload = dict(request.form)
with suppress(KeyError):
payload["image_url"] = payload.pop("image_url")[0]
with suppress(KeyError):
payload["extension"] = payload.pop("extension")[0]
with suppress(KeyError):
payload["redirect"] = payload.pop("redirect")[0]
else:
payload = request.json or {}
with suppress(KeyError):
payload["text_lines"] = payload.pop("text_lines[]")
url = Template("_custom").build_custom_url(
request,
payload.get("text_lines") or [],
background=payload.get("image_url", ""),
extension=payload.get("extension", ""),
)
url, _updated = await utils.meta.tokenize(request, url)
if payload.get("redirect", False):
return response.redirect(url)
return response.json({"url": url}, status=201)
|
import sqlite3
from modules.create_connect import create_or_connect as db_link
from modules.utils import dict_factory
from modules import vaccine_lot
from contextlib import closing
from datetime import datetime
class Person:
def __init__(self, person_id, first_name, last_name, address, phone, email, city, birth_date):
self.__id = person_id
self.__first_name = first_name
self.__last_name = last_name
self.__address = address
self.__phone = phone
self.__email = email
self.__city = city
self.__birth_date = birth_date
def set_id(self, person_id):
self.__id = person_id
def get_id(self):
return self.__id
def set_first_name(self, first_name):
self.__first_name = first_name
def get_first_name(self):
return self.__first_name
def set_last_name(self, last_name):
self.__last_name = last_name
def get_last_name(self):
return self.__last_name
def set_address(self, address):
self.__address = address
def get_address(self):
return self.__address
def set_phone(self, phone):
self.__phone = phone
def get_phone(self):
return self.__phone
def set_email(self, email):
self.__email = email
def get_email(self):
return self.__email
def set_city(self, city):
self.__city = city
def get_city(self):
return self.__city
def set_birth_date(self, birth_date):
self.__birth_date = birth_date
def get_birth_date(self):
return self.__birth_date
class Affiliate(Person):
def __init__(self,
person_id,
first_name,
last_name,
address,
phone,
email,
city,
birth_date,
affiliation_date):
self.__affiliation_date = affiliation_date
self.__vaccinated = False
self.__disaffiliation_date = None
super().__init__(person_id, first_name, last_name, address, phone, email, city, birth_date)
def set_affiliation_date(self, affiliation_date):
self.__affiliation_date = affiliation_date
def get_affiliation_date(self):
return self.__affiliation_date
def set_vaccinated(self, vaccinated):
self.__vaccinated = vaccinated
def get_vaccinated(self):
return self.__vaccinated
def set_disaffiliation_date(self, disaffiliation_date):
self.__disaffiliation_date = disaffiliation_date
def get_disaffiliation_date(self):
return self.__disaffiliation_date
"""
OVERVIEW: This module contains the means to create(add), search(find), manage
affiliation information(affiliate/disaffiliate), and manage users'
vaccination status(vaccinate).
"""
class AffiliateManager:
"""
Description:
This function add affiliates to the [affiliate] table.
It manages its feedback once created, being True if everything went ok in addition to a short confirmation reply, or False if something is not
working as expected.
Arguments:
* affiliate_id: Affiliate's ID number.
* first_name: Affiliate's fisrt and possible middle name.
* last_name: Affiliate's last name.
* address: Affiliate's address.
* phone: Affiliate's phone number.
* email: Affiliate's email address.
* city: Affiliate's home city.
* birth_date: Affiliate's birth date.
* affiliation_date: Affiliate's affiliation date.
* vaccinated=False: Affiliate's vaccination status, initialized as
False (as required).
* disaffiliation_date: Affiliate's disaffiliation date, initialized as
none (as required).
"""
def add(self, affiliate: Affiliate):
try:
with db_link() as con:
with closing(con.cursor()) as cur:
cur.execute("INSERT INTO affiliate VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", (
affiliate.get_id(),
affiliate.get_first_name(),
affiliate.get_last_name(),
affiliate.get_address(),
affiliate.get_phone(),
affiliate.get_email(),
affiliate.get_city(),
affiliate.get_birth_date(),
affiliate.get_affiliation_date(),
affiliate.get_vaccinated(),
affiliate.get_disaffiliation_date()
))
return True
except sqlite3.IntegrityError:
return False
"""
Description:
The find function finds..well actually it searches for affiliates data and
confirms if there is an affiliate associated with the search...
Arguments:
* affiliate_id: Affiliate's ID number.
"""
def find(self, affiliate_id):
try:
with db_link() as con:
con.row_factory = dict_factory
with closing(con.cursor()) as cur:
cur.execute("SELECT * from affiliate WHERE affiliate_id = (?)", (affiliate_id, ))
items = cur.fetchone()
return items
except sqlite3.IntegrityError:
return None
"""
Description:
It affiliates an user by its ID and affiliation date.
Arguments:
* affiliate_id: Affiliate's ID number.
* date: This is the dissafiliation date, formatted as timestamp.
"""
def affiliate_(self, affiliate_id, date):
try:
with db_link() as con:
with closing(con.cursor()) as cur:
cur.execute("UPDATE affiliate SET affiliation_date = (?), disaffiliation_date = NULL WHERE affiliate_id = (?)",(date, affiliate_id,))
return True
except sqlite3.IntegrityError:
return False
"""
Description:
It disaffiliates an user by its ID and disaffiliation date.
Arguments:
* affiliate_id: Affiliate's ID number.
* date: This is the disaffiliation date, formatted as timestamp.
"""
def disaffiliate(self, affiliate_id, date):
try:
with db_link() as con:
with closing(con.cursor()) as cur:
cur.execute("UPDATE affiliate SET disaffiliation_date = (?), affiliation_date = NULL WHERE affiliate_id = (?)",(date, affiliate_id,))
return True
except sqlite3.IntegrityError:
return False
"""
Description:
This function is in a quest for trialing every possible flop while
trying to change affiliate's vaccination status.
Arguments:
* affiliate_id: Affiliate's ID number.
Return:
0: User registrantion was successful.
1: There was an error user could not be vaccinated.
2: There is no vaccination plan related to user with given id.
3: User was already vaccinated.
4: There was an error user could not be vaccinated.
"""
def vaccinate(self, affiliate_id):
try:
vaccinated = self.find(affiliate_id)
if vaccinated:
vaccinated = vaccinated['vaccinated']
now = datetime.now()
if not vaccinated:
items = self.get_vaccination_schedule(affiliate_id)
if items:
check_vaccine_lot = vaccine_lot.VaccineLotManager()
if check_vaccine_lot.use_vaccine(items['vaccine_lot_id']):
self.update_status(affiliate_id, True)
return 0
else:
return 1
else:
return 2
else:
return 3
except sqlite3.IntegrityError:
return 4
"""
Description:
Grabs affiliates vaccination schedule... with column names and return an
object with the information.
Arguments:
* affiliate_id: Affiliate's ID number.
"""
def get_vaccination_schedule(self, affiliate_id):
self.affiliate_id = affiliate_id
try:
with db_link() as con:
con.row_factory = dict_factory
with closing(con.cursor()) as cur:
cur.execute("SELECT * from VaccinationSchedule WHERE affiliate_id = (?)",(affiliate_id,))
items = cur.fetchone()
return items
except sqlite3.IntegrityError:
return None
"""
Description:
It actually vaccinates the affiliate, its only purpose is to update the
user's vaccination status once the other considerations have been tested.
Arguments:
* affiliate_id: Affiliate's ID number.
* status: vaccination state, it gets a 1 when vaccinating affiliates.
"""
def update_status(self, affiliate_id, status):
self.affiliate_id = affiliate_id
self.vaccinated = status
try:
with db_link() as con:
with closing(con.cursor()) as cur:
cur.execute("UPDATE affiliate SET vaccinated = (?) WHERE affiliate_id = (?)",(self.vaccinated,self.affiliate_id,))
return True
except sqlite3.IntegrityError:
return False
|
# -*- coding: utf-8 -*-
""" Exploring Box animation.
Inquisitive pair of cubes of LEDs.
"""
import random
from ..engine import Animation
from ..sprites import Cube
class SpiralPath:
def __init__(self, margin, offset=0.0):
steps_x = 8 - margin[0]
steps_y = 8 - margin[1]
self._max_z = 8 - margin[2]
self._xy = []
self._xy += zip([0] * steps_y, range(0, steps_y))
self._xy += zip(range(0, steps_x), [steps_y] * steps_x)
self._xy += zip([steps_x] * steps_y, range(steps_y - 1, -1, -1))
self._xy += zip(range(steps_x - 1, -1, -1), [0] * steps_x)
self._t = int(len(self._xy) * offset)
def next(self):
once_around = len(self._xy)
pz = self._t // once_around
r = self._t % once_around
px, py = self._xy[r]
self._t += 1
self._t %= (self._max_z + 1) * once_around
return (px, py, pz)
class ExploringBox(Animation):
ANIMATION = __name__
ARGS = {
}
def post_init(self):
size1 = random.choice([2, 3, 4, 5])
self._cube1 = Cube(size=size1)
self._spiral_path1 = SpiralPath(
margin=(size1, size1, size1))
size2 = random.choice([2, 3, 4, 5])
self._cube2 = Cube(size=size2)
self._spiral_path2 = SpiralPath(
margin=(size2, size2, size2),
offset=0.5)
def render(self, frame):
self._cube1.pos = self._spiral_path1.next()
self._cube1.render(frame)
self._cube2.pos = self._spiral_path2.next()
self._cube2.render(frame)
|
'''
Base class for DataBear sensors
'''
from databear.errors import SensorConfigError, MeasureError
import datetime
import time
class Sensor:
interface_version = '1.2'
hardware_settings = {}
measurements = [] #List of measurement names
units = {} #List of units associated with measurement names
measurement_description = {}
min_interval = 1 #Minimum interval that sensor can be polled
uses_portlock = False # Set to true in all sensor classes that require a portlock (modbus sensors)
def __init__(self,name,sn,address):
'''
Create a new sensor
Inputs
- name (string): sensor name
- sn (string): serial number
- address (int): default 0
- interval (float): measurement interval in seconds
'''
try:
self.name = name
self.sn = sn
self.address = address
except KeyError as ke:
raise SensorConfigError('YAML missing required sensor setting')
#Define characteristics of this sensor
self.configid = None
self.min_interval = 0 #Minimum interval that sensor can be polled
#Initialize data structure
self.data = {}
for measure_name in self.measurements:
self.data[measure_name] = []
self.connected = False
def __str__(self):
'''
Standardized way to print sensor:
<Sensor Name> - <measure name>:(dt,val), ...
'''
output = self.name + '\n' #Initialize
#Get current values
currentdata = self.getcurrentdata()
#Create output string
for m,v in currentdata.items():
if v:
dtstr = v[0].strftime('%Y-%m-%d %H:%M:%S:%f')
output = output + '{}: {}, {}\n'.format(m,dtstr,v[1])
else:
output = output + '{}: No Data\n'.format(m)
return output
def connect(self,port):
pass
def measure(self):
pass
def getcurrentdata(self):
'''
Return most recent data from sensor
Output:
{'name':(dt,val),'name2'...}
Return None if no data for particular measurement
'''
currentdata = {}
for key,val in self.data.items():
try:
currentdata[key]=val[-1]
except IndexError:
#Assign none if there is nothing in list
currentdata[key]=None
return currentdata
def getdata(self,name,startdt,enddt):
'''
Return a list of values such that
startdt <= timestamps < enddt
- Inputs: datetime objects
'''
output = []
try:
data = self.data[name]
for val in data:
if (val[0]>=startdt) and (val[0]<enddt):
output.append(val)
return output
except KeyError as ke:
print(name + " missing from " + data)
raise MeasureError(name, [], "name missing from dictionary")
def cleardata(self,name,startdt,enddt):
'''
Clear data values for a particular measurement
Loop through values and remove. Note: This is probably
inefficient if the data structure is large.
'''
savedata = []
data = self.data[name]
for val in data:
if (val[0]<startdt) or (val[0]>=enddt):
savedata.append(val)
self.data[name] = savedata
class BusSensor(Sensor):
'''
A base class for a sensor that can be part of
a bus network architecture.
'''
def __init__(self,name,sn,address):
'''
Override base class to add port lock
'''
super().__init__(name,sn,address)
self.portlock = None
def connect(self,port,portlock):
'''
Set up portlock and connection
'''
self.portlock = portlock
def startMeasure(self):
'''
Begin a concurrent measurement
Return the wait time between start and read
'''
return 0
def readMeasure(self,starttime):
'''
Read measurement from sensor
'''
pass
def measure(self):
'''
Coordinate start and read measure with
port locks on the bus
'''
dt = datetime.datetime.now()
try:
#The start measurement sequence
self.portlock.acquire()
s = self.startMeasure()
self.portlock.release()
#Wait s then read
time.sleep(s)
self.portlock.acquire()
self.readMeasure(dt)
self.portlock.release()
except:
#Unlock the port if any exception
self.portlock.release()
#Raise again so that the exception is logged
raise
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Overrides the standard Polyencoder Agent to only return the attention weights.
"""
import torch
import torch.nn
import torch.nn.functional as F
from typing import Optional, Tuple, Dict, Union
from parlai.agents.transformer.modules import (
MultiHeadAttention,
TransformerEncoder,
TransformerEncoderLayer,
)
from parlai.agents.transformer.polyencoder import (
PolyencoderAgent as BasePolyencoderAgent,
PolyEncoderModule,
PolyBasicAttention,
)
from parlai.core.dict import DictionaryAgent
from parlai.core.loader import register_agent
from parlai.core.opt import Opt
from parlai.core.params import ParlaiParser
from parlai.core.torch_agent import Batch
from parlai.utils.torch import PipelineHelper
@register_agent('return_code_weights_agent')
class PolyencoderReturnCodeWeightsAgent(BasePolyencoderAgent):
"""
A polyencoder agent where the model returns attention weights, rather than encoded
context.
"""
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
BasePolyencoderAgent.add_cmdline_args(parser, partial_opt)
group = parser.add_argument_group('Return Weights Poly Group')
group.add_argument(
'--top-k',
type=int,
default=100,
help='How many tokens to output when outputting relevant tokens',
)
return parser
def build_model(self, states=None):
"""
Return built model.
"""
return PolyencoderReturnWeightsModule(self.opt, self.dict, self.NULL_IDX)
def get_ctxt_rep(
self, batch: Batch, get_weights: bool = False
) -> Tuple[torch.Tensor, torch.BoolTensor, torch.Tensor]:
"""
Encode context representation.
Override to extract weights appropriately.
"""
ctxt_rep, ctxt_rep_mask, weights, _ = self.model(
**self._model_context_input(batch)
)
return ctxt_rep, ctxt_rep_mask, weights
def score_candidates(self, batch, cand_vecs, cand_encs=None):
"""
Score candidates.
Override to extract weights appropriately, if needed.
"""
original_dim = cand_vecs.dim()
if original_dim == 2:
cand_vecs = cand_vecs.unsqueeze(1)
ctxt_rep, ret_weights, ctxt_rep_mask, cand_rep = self.model(
**self._model_context_input(batch), cand_tokens=cand_vecs
)
if original_dim == 2:
num_cands = cand_rep.size(0) # will be bsz if using batch cands
cand_rep = (
cand_rep.expand(num_cands, batch.text_vec.size(0), -1)
.transpose(0, 1)
.contiguous()
)
ctxt_code_weights, ctxt_rep_mask = self.model(
ctxt_rep=ctxt_rep, ctxt_rep_mask=ctxt_rep_mask, cand_rep=cand_rep
)
character_weights = torch.bmm(ctxt_code_weights, ret_weights)
return character_weights, ctxt_rep_mask
def _v2t(self, vec):
"""
Convert token indices to string of tokens.
"""
new_vec = []
if hasattr(vec, 'cpu'):
vec = vec.cpu()
for i in vec:
if i == self.END_IDX:
break
elif i != self.START_IDX:
new_vec.append(i)
return self.dict.vec2txt(new_vec)
class TransformerReturnWeightsEncoderLayer(TransformerEncoderLayer):
"""
Overridden TransformerEncoderLayer that returns the self-attn weights.
"""
def forward(
self, tensor: torch.Tensor, mask: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Forward pass.
Override to return weights.
"""
residual = tensor
if self.variant == 'prelayernorm':
tensor = self.norm1(tensor)
####################
# Begin Difference #
####################
attended_tensor, _, raw_weights, *_ = self.attention(tensor, mask=mask)
bsz, seq_len, _ = tensor.size()
weights = (
raw_weights.view(bsz, self.opt['n_heads'], seq_len, seq_len).max(1).values
)
####################
# \End Difference #
####################
tensor = residual + self.dropout(attended_tensor)
if self.variant == 'aiayn' or self.variant == 'xlm' or self.variant == 'bart':
tensor = self.norm1(tensor)
residual = tensor
if self.variant == 'prelayernorm':
tensor = self.norm2(tensor)
tensor = residual + self.dropout(self.ffn(tensor))
if self.variant == 'aiayn' or self.variant == 'xlm' or self.variant == 'bart':
tensor = self.norm2(tensor)
tensor *= mask.unsqueeze(-1).type_as(tensor)
return tensor, weights
class TransformerReturnWeightsEncoder(TransformerEncoder):
"""
Override TransformerEncoder to return the self-attn weights.
"""
def forward( # type: ignore
self,
input: torch.LongTensor,
positions: Optional[torch.LongTensor] = None,
segments: Optional[torch.LongTensor] = None,
**kwargs,
) -> Union[
Tuple[torch.Tensor, Optional[torch.Tensor]],
Tuple[torch.Tensor, torch.BoolTensor, Optional[torch.Tensor]],
]:
"""
Forward pass.
Propagate kwargs
"""
# embed input
tensor, mask = self.forward_embedding(input, positions, segments)
if self.variant == 'xlm' or self.variant == 'bart':
tensor = self.norm_embeddings(tensor)
# --dropout on the embeddings
tensor = self.dropout(tensor)
tensor *= mask.unsqueeze(-1).type_as(tensor)
# apply transformer layers
tensor = self.forward_layers(tensor, mask, **kwargs)
###################
# BEGIN DIFFERENCE#
###################
tensor, weights = tensor
###################
# \End DIFFERENCE#
###################
if self.variant == 'prelayernorm':
tensor = self.norm_embeddings(tensor)
# reduce output
tensor, out_mask = self.reduce_output(tensor, mask)
if out_mask is not None:
return tensor, out_mask, weights
else:
return tensor, weights
def forward_layers(
self, tensor: torch.Tensor, mask: torch.BoolTensor, **kwargs
) -> Union[Tuple[torch.Tensor, Optional[torch.Tensor]], torch.Tensor]:
"""
Override to return attention weights.
"""
weights = None
if getattr(self.layers, 'is_model_parallel', False):
# factored out for readability. It is equivalent to the other
# condition
tensor, weights = self._apply_model_parallel(tensor, mask, **kwargs)
else:
for i in range(self.n_layers):
tensor, weights = self.layers[i](tensor, mask, **kwargs)
return tensor, weights
def _apply_model_parallel(
self, tensor, mask, **kwargs
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Override to return attention weights.
"""
chunks = PipelineHelper.split((tensor, mask))
work_items = PipelineHelper.schedule_work_items(self.layers, chunks)
for chunk_idx, layer_nos, next_device in work_items:
s_weights = None
try:
s_tensor, s_mask = chunks[chunk_idx]
except ValueError:
s_tensor, s_mask, s_weights = chunks[chunk_idx]
for layer_no in layer_nos:
s_tensor, s_weights = self.layers[layer_no](s_tensor, s_mask, **kwargs)
chunks[chunk_idx] = PipelineHelper.chunk_to(
(s_tensor, s_mask, s_weights), next_device
)
joined = PipelineHelper.join(chunks)
tensor_out, out_mask, weights = joined
return tensor_out, weights
class PolyencoderReturnWeightsModule(PolyEncoderModule):
"""
Constructs attentions and saves their weights!
"""
def __init__(self, opt: Opt, dict_: DictionaryAgent, null_idx: int):
super().__init__(opt, dict_, null_idx)
self.opt = opt
assert self.type == 'codes'
if isinstance(self.code_attention, PolyBasicAttention):
self.code_attention.get_weights = True
if self.attention_type != 'multihead':
self.attention.get_weights = True
def get_encoder(self, opt, dict_, null_idx, reduction_type, for_context: bool):
"""
Override to not build the cand encoder.
"""
if not for_context:
wrapped_class = TransformerEncoder
else:
wrapped_class = TransformerReturnWeightsEncoder.with_components(
layer=TransformerReturnWeightsEncoderLayer
)
embeddings = self._get_embeddings(
dict_=dict_, null_idx=null_idx, embedding_size=opt['embedding_size']
)
return wrapped_class(
opt=opt,
embedding=embeddings,
vocabulary_size=len(dict_),
padding_idx=null_idx,
reduction_type=reduction_type,
)
def attend(
self,
attention_layer: torch.nn.Module,
queries: torch.Tensor,
keys: Optional[torch.Tensor],
values: torch.Tensor,
mask: torch.BoolTensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Return attended tensor and weights.
"""
if keys is None:
keys = values
if isinstance(attention_layer, PolyBasicAttention):
attended, weights = attention_layer(
queries, keys, mask_ys=mask, values=values
)
elif isinstance(attention_layer, MultiHeadAttention):
attended, _, weights = attention_layer(queries, keys, values, mask)
else:
raise Exception('Unrecognized type of attention')
return attended, weights
def encode(
self, cand_tokens: Optional[torch.Tensor], **ctxt_inputs: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.BoolTensor, Optional[torch.Tensor]]:
"""
Override Polyencoder.encode to *only* return the coded/self-attn attention
weights.
"""
assert len(ctxt_inputs) > 0
assert 'ctxt_tokens' in ctxt_inputs
assert len(ctxt_inputs['ctxt_tokens'].shape) == 2
assert self.type == 'codes'
cand_embed = None
if cand_tokens is not None:
if len(cand_tokens.shape) != 3:
cand_tokens = cand_tokens.unsqueeze(1)
bsz = cand_tokens.size(0)
num_cands = cand_tokens.size(1)
cand_embed = self.encoder_cand(cand_tokens.view(bsz * num_cands, -1))
cand_embed = cand_embed.view(bsz, num_cands, -1)
bsz = self._get_context_batch_size(**ctxt_inputs)
# get context_representation. Now that depends on the cases.
ctxt_out, ctxt_mask, ctxt_self_attn_weights = self.encoder_ctxt(
**self._context_encoder_input(ctxt_inputs)
)
ctxt_self_attn_weights = F.softmax(ctxt_self_attn_weights, dim=-1)
ctxt_rep, ctxt_code_weights = self.attend(
self.code_attention,
queries=self.codes.repeat(bsz, 1, 1),
keys=ctxt_out,
values=ctxt_out,
mask=ctxt_mask,
)
return ctxt_rep, ctxt_code_weights, ctxt_mask, cand_embed
def score(
self,
ctxt_rep: torch.Tensor,
ctxt_rep_mask: torch.Tensor,
cand_embed: torch.Tensor,
):
"""
Override score to return the attention weights **RATHER THAN THE SCORES**
"""
ones_mask = ctxt_rep.new_ones(
ctxt_rep.size(0), self.n_codes
).byte() # type: ignore
ctxt_final_rep, ctxt_code_weights = self.attend(
self.attention, cand_embed, ctxt_rep, ctxt_rep, ones_mask # type: ignore
)
return ctxt_code_weights, ctxt_rep_mask
|
"""
获取可用于训练网络的训练数据集
需要四十分钟左右,产生的训练数据大小3G左右
"""
import os
import sys
sys.path.append(os.path.split(sys.path[0])[0])
import shutil
from time import time
import numpy as np
from tqdm import tqdm
import SimpleITK as sitk
import scipy.ndimage as ndimage
#Path to store processed data
training_set_path = '/home/ubuntu/Research/dataset/Pancreas-CT_processed_down_scale0.5_expand20'
#Path of original data
train_ct_path = '/home/ubuntu/NIH-Pancreas-CT/data/'
train_seg_path = '/home/ubuntu/NIH-Pancreas-CT/TCIA_pancreas_labels-02-05-2017'
#Maximum value
upper = 240
lower = -100
#Downsampling scale for x and y
down_scale = 0.5
slice_thickness = 1
expand_slice = 20
if os.path.exists(training_set_path):
shutil.rmtree(training_set_path)
new_ct_path = os.path.join(training_set_path, 'ct')
new_seg_dir = os.path.join(training_set_path, 'seg')
os.mkdir(training_set_path)
os.mkdir(new_ct_path)
os.mkdir(new_seg_dir)
start_slices = [43, 151, 167]
end_slices = [227, 368, 405]
FULL_SIZE=True
if not FULL_SIZE:
for i in range(3):
start_slices[i] = start_slices[i] - expand_slice
end_slices[i] = end_slices[i] + expand_slice
# mean_z = []
# mean_y = []
# mean_x = []
start = time()
for file in tqdm(os.listdir(train_ct_path)):
# 将CT和金标准入读内存
print(os.path.join(train_ct_path, file))
ct = sitk.ReadImage(os.path.join(train_ct_path, file), sitk.sitkInt16)
ct_array = sitk.GetArrayFromImage(ct)
# print(ct.GetSpacing())
# print(ct_array.shape)
# print(ct.GetDirection())
# print(ct.GetOrigin())
seg = sitk.ReadImage(os.path.join(train_seg_path, file.replace('PANCREAS_', 'label')), sitk.sitkUInt8)
seg_array = sitk.GetArrayFromImage(seg)
# print(seg.GetSpacing())
# print(seg.GetDirection())
# print(seg.GetOrigin())
# # 将金标准中肝脏和肝肿瘤的标签融合为一个
# seg_array[seg_array > 0] = 1
if ct.GetSpacing()[-1] != slice_thickness:
ct_array = ndimage.zoom(ct_array, (ct.GetSpacing()[-1] / slice_thickness, 1, 1), order=3)
# print(ct_array.shape)
seg_array = ndimage.zoom(seg_array, (ct.GetSpacing()[-1] / slice_thickness, 1, 1), order=0)
# print(seg_array.shape)
if not FULL_SIZE:
for i in range(3):
start_slices[i] = max(0, start_slices[i])
end_slices[i] = min(seg_array.shape[i] - 1, end_slices[i])
ct_array = ct_array[start_slices[0]:end_slices[0] + 1, start_slices[1]:end_slices[1] + 1, start_slices[2]:end_slices[2] + 1]
#The dataset mismatch between label and data
ct_array = np.flip(ct_array, 1)
seg_array = seg_array[start_slices[0]:end_slices[0] + 1, start_slices[1]:end_slices[1] + 1, start_slices[2]:end_slices[2] + 1]
# 对CT数据在横断面上进行降采样,并进行重采样,将所有数据的z轴的spacing调整到1mm
if down_scale != 1:
ct_array = ndimage.zoom(ct_array, (down_scale, down_scale, down_scale), order=3)
# print(ct_array.shape)
seg_array = ndimage.zoom(seg_array, (down_scale, down_scale, down_scale), order=0)
# print(seg_array.shape)
# 将灰度值在阈值之外的截断掉
ct_array[ct_array > upper] = upper
ct_array[ct_array < lower] = lower
# if ct_array.shape[0] < min_z:
# min_z = ct_array.shape[0]
# elif ct_array.shape[0] > max_z:
# max_z = ct_array.shape[0]
# 找到肝脏区域开始和结束的slice,并各向外扩张slice
# z = np.any(seg_array, axis=(1, 2))
# x = np.any(seg_array, axis=(0,1))
# y = np.any(seg_array, axis=(0, 2))
# mean_z.append(np.where(z)[0][[-1]] - np.where(z)[0][[0]])
# mean_x.append(np.where(x)[0][[-1]] - np.where(x)[0][[0]])
# mean_y.append(np.where(y)[0][[-1]] - np.where(y)[0][[0]])
# mean_z.append(np.where(z)[0][[-1]])
# mean_x.append(np.where(x)[0][[-1]])
# mean_y.append(np.where(y)[0][[-1]])
# mean_z.append(np.where(z)[0][[0]])
# mean_x.append(np.where(x)[0][[0]])
# mean_y.append(np.where(y)[0][[0]])
# print(np.where(z)[0][[0]] - np.where(z)[0][[-1]])
# print(np.where(x)[0][[0]] - np.where(x)[0][[-1]])
# print(np.where(y)[0][[0]] - np.where(y)[0][[-1]])
# start_slice, end_slice = np.where(z)[0][[0, -1]]
# 两个方向上各扩张slice
# start_slice = max(0, start_slice - expand_slice)
# end_slice = min(seg_array.shape[0] - 1, end_slice + expand_slice)
# # # 如果这时候剩下的slice数量不足size,直接放弃该数据,这样的数据很少,所以不用担心
# # if end_slice - start_slice + 1 < para.size:
# # print('!!!!!!!!!!!!!!!!')
# # print(file, 'have too little slice', ct_array.shape[0])
# # print('!!!!!!!!!!!!!!!!')
# # continue
print(ct_array.shape)
print(seg_array.shape)
# 最终将数据保存为nii
new_ct = sitk.GetImageFromArray(ct_array)
new_ct.SetDirection(ct.GetDirection())
new_ct.SetOrigin(ct.GetOrigin())
new_ct.SetSpacing((ct.GetSpacing()[0] * int(1 / down_scale), ct.GetSpacing()[1] * int(1 / down_scale), slice_thickness / down_scale))
new_seg = sitk.GetImageFromArray(seg_array)
new_seg.SetDirection(ct.GetDirection())
new_seg.SetOrigin(ct.GetOrigin())
new_seg.SetSpacing((ct.GetSpacing()[0] * int(1 / down_scale), ct.GetSpacing()[1] * int(1 / down_scale), slice_thickness / down_scale))
sitk.WriteImage(new_ct, os.path.join(new_ct_path, file))
sitk.WriteImage(new_seg, os.path.join(new_seg_dir, file.replace('PANCREAS_', 'label')))
# print(min_z, max_z)
# print(np.max(mean_z), np.min(mean_z))
# print(np.max(mean_y), np.min(mean_y))
# print(np.max(mean_x), np.min(mean_x)) |
# coding=utf-8
# 配置库
import torch
from torch import nn
from torch.autograd import Variable
from torchvision import datasets
# load model
# 保存模型
# torch.save(model.state_dict(), './cnn.pth')
# 定义卷积神经网络模型
class Cnn(nn.Module):
def __init__(self, in_dim, n_class): # 28x28x1
super(Cnn, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_dim, 6, 3, stride=1, padding=1), # 28 x28
nn.ReLU(True),
nn.MaxPool2d(2, 2), # 14 x 14
nn.Conv2d(6, 16, 5, stride=1, padding=0), # 10 * 10*16
nn.ReLU(True), nn.MaxPool2d(2, 2)) # 5x5x16
self.fc = nn.Sequential(
nn.Linear(400, 120), # 400 = 5 * 5 * 16
nn.Linear(120, 84),
nn.Linear(84, n_class))
def forward(self, x):
out = self.conv(x)
out = out.view(out.size(0), 400) # 400 = 5 * 5 * 16,
out = self.fc(out)
return out
# 打印模型
print(Cnn)
model = Cnn(1, 10) # 图片大小是28x28, 10
# cnn = torch.load('./cnn.pth')['state_dict']
model.load_state_dict(torch.load('./cnn.pth'))
# 识别
print(model)
test_data = datasets.MNIST(root='./data', train=False, download=True)
test_x = Variable(torch.unsqueeze(test_data.test_data, dim=1), volatile=True).type(torch.FloatTensor)[
:20] / 255.0 # shape from (2000, 28, 28) to (2000, 1, 28, 28), value in range(0,1)
test_y = test_data.test_labels[:20]
print(test_x.size())
test_output = model(test_x[:10])
pred_y = torch.max(test_output, 1)[1].data.numpy().squeeze()
print(pred_y, 'predict result')
print(test_y[:10].numpy(), 'real result')
|
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation
from keras.layers import Bidirectional, GlobalMaxPool1D
from keras.models import Model
from keras import initializers, regularizers, constraints, optimizers, layers
from keras.models import load_model
import pickle
import cleanup
model = load_model('embedding_LSTM.h5')
tokenizer = pickle.load(open('tokenizer.pickle', 'rb'))
index_to_cat = {0: 'math.MP', 1: 'math.CO', 2: 'math.AG', 3: 'math.PR', 4: 'math.AP', 5: 'math.DG', 6: 'math.IT', 7: 'math.NT', 8: 'math.DS', 9: 'math.OC', 10: 'math.FA', 11: 'math.RT', 12: 'math.NA', 13: 'math.GT', 14: 'math.QA', 15: 'math.CA', 16: 'math.GR', 17: 'math.ST', 18: 'math.RA', 19: 'math.CV', 20: 'math.AT', 21: 'math.OA', 22: 'math.AC', 23: 'math.LO', 24: 'math.MG', 25: 'math.SG', 26: 'math.SP', 27: 'math.CT', 28: 'math.KT', 29: 'math.GN', 30: 'math.GM', 31: 'math.HO'}
cat_to_index = {'math.MP': 0, 'math.CO': 1, 'math.AG': 2, 'math.PR': 3, 'math.AP': 4, 'math.DG': 5, 'math.IT': 6, 'math.NT': 7, 'math.DS': 8, 'math.OC': 9, 'math.FA': 10, 'math.RT': 11, 'math.NA': 12, 'math.GT': 13, 'math.QA': 14, 'math.CA': 15, 'math.GR': 16, 'math.ST': 17, 'math.RA': 18, 'math.CV': 19, 'math.AT': 20, 'math.OA': 21, 'math.AC': 22, 'math.LO': 23, 'math.MG': 24, 'math.SG': 25, 'math.SP': 26, 'math.CT': 27, 'math.KT': 28, 'math.GN': 29, 'math.GM': 30, 'math.HO': 31}
def abstract_category_predict(text, model=model, tokenizer=tokenizer):
clean = cleanup.cleanup(stem_on=True)
X = [clean.transform(text)]
X_tt = tokenizer.texts_to_sequences(X)
maxlen = 200
X = pad_sequences(X_tt, maxlen=maxlen)
y_pred = model.predict(X)
y_pred2 = (y_pred > 0.5).astype(int)
y_pred2 = y_pred2[0]
Ans = []
for i in range(len(index_to_cat)):
if y_pred2[i] == 1:
Ans += [index_to_cat[i]]
return Ans
####example
#print(predict("Using schubert variety to construct degenration of MV cycles and it results in represnetation of simisimple lie algebra "))
|
import optparse
import os
import sys
import tempfile
import src.model.network as network
from src.model.mp2vec_s import MP2Vec
__author__ = 'sheep'
def HIN2vec(graph_fname, output_datafold, options):
'''\
%prog [options] <graph_fname> <node_vec_fname> <path_vec_fname>
graph_fname: the graph file
It can be a file contained edges per line (e.g., res/karate_club_edges.txt)
or a pickled graph file.
node_vec_fname: the output file for nodes' vectors
path_vec_fname: the output file for meta-paths' vectors
'''
node_vec_fname = output_datafold + "node.txt"
path_vec_fname = output_datafold + "metapath.txt"
options.allow_circle = False
options.correct_neg = False
print('Load a HIN...')
g = load_a_HIN(graph_fname)
print('Generate random walks...')
_, tmp_walk_fname = tempfile.mkstemp()
# print(tmp_walk_fname)
with open(tmp_walk_fname, 'w') as f:
for walk in g.random_walks(options.num_walks, options.walk_length):
f.write('%s\n' % ' '.join(map(str, walk)))
_, tmp_node_vec_fname = tempfile.mkstemp()
_, tmp_path_vec_fname = tempfile.mkstemp()
model = MP2Vec(size=options.dim,
window=options.window_size,
neg=options.neg_num,
num_processes=options.num_workers,
# iterations=i,
alpha=options.alpha,
same_w=True,
normed=False,
is_no_circle_path=False,
)
neighbors = None
if options.correct_neg:
for id_ in g.graph:
g._get_k_hop_neighborhood(id_, options.window_size)
neighbors = g.k_hop_neighbors[options.window_size]
model.train(g,
tmp_walk_fname,
g.class_nodes,
k_hop_neighbors=neighbors,
)
model.dump_to_file(tmp_node_vec_fname, type_='node')
model.dump_to_file(tmp_path_vec_fname, type_='path')
print('Dump vectors...')
output_node2vec(g, tmp_node_vec_fname, node_vec_fname)
output_path2vec(g, tmp_path_vec_fname, path_vec_fname)
return 0
def output_node2vec(g, tmp_node_vec_fname, node_vec_fname):
with open(tmp_node_vec_fname) as f:
with open(node_vec_fname, 'w') as fo:
id2node = dict([(v, k) for k, v in g.node2id.items()])
first = True
for line in f:
if first:
first = False
fo.write(line)
continue
id_, vectors = line.strip().split(' ', 1)
line = '%s %s\n' % (id2node[int(id_)], vectors)
fo.write(line)
#FIXME: to support more than 10 different meta-paths
def output_path2vec(g, tmp_path_vec_fname, path_vec_fname):
with open(tmp_path_vec_fname) as f:
with open(path_vec_fname, 'w') as fo:
id2edge_class = dict([(v, k) for k, v
in g.edge_class2id.items()])
print(id2edge_class)
first = True
for line in f:
if first:
first = False
fo.write(line)
continue
ids, vectors = line.strip().split(' ', 1)
ids = map(int, ids.split(','))
edge = ','.join([id2edge_class[id_] for id_ in ids])
line = '%s %s\n' % (edge, vectors)
fo.write(line)
def load_a_HIN(fname):
g = network.HIN()
relation_dict = fname.relation_dict
for relation in relation_dict:
src_class = relation[0]
dst_class = relation[2]
edge_class = relation
for src in relation_dict[relation]:
for dst in relation_dict[relation][src]:
g.add_edge(src_class+src, src_class, dst_class+dst, dst_class, edge_class)
#g.print_statistics()
return g |
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from django.contrib.auth import get_user_model
from django import forms
|
from ltypes import i64
def fib(n: i64) -> i64:
if n < 2:
return n
else:
return fib(n - 1) + fib(n - 2)
def main0():
ans: i64
ans = fib(15)
assert ans == 610
def main():
# test of issue-529
ans: i64
ans = fib(10)
assert ans == 55
main0()
main()
|
'''
@authors: Adrian Oeftiger
@date: 17/04/2015
'''
import numpy as np
from scipy.constants import c, epsilon_0, pi
from scipy.interpolate import splrep, splev
from functools import wraps
from PyHEADTAIL.general.element import Element
from PyHEADTAIL.general import pmath as pm
from PyHEADTAIL.particles.slicing import clean_slices
class LongSpaceCharge(Element):
'''Contains longitudinal space charge (SC) via Chao's expression:
dp' = - e^2 * g * lambda'(z) / (2 * pi * eps_0 * gamma^2 * p_0)
cf. the original HEADTAIL version.
'''
'''Geometry factor for long bunched bunches.
Involved approximations:
- transversely round beam
- finite wall resistivity (perfectly conducting boundary)
- geometry factor averaged along z
(considering equivalent linear longitudinal electric field)
use directSC = 0.67 for further assumptions:
- ellipsoidally bunched beam of uniform density
- bunch length > 3/2 pipe radius
- transversely averaged contribution
use directSC = 0.5 for further assumptions:
- continuous beam
- low frequency disturbance (displacement currents neglected)
- emittance dominated beam
- transversely averaged contribution
use directSC = 1.0 for further assumptions:
- same as directSC = 0.5 only transversely maximum contribution
directly on z-axis
cf. Martin Reiser's discussion in
'Theory and Design of Charged Particle Beams'.
'''
directSC = 0.67
def __init__(self, slicer, pipe_radius, length, n_slice_sigma=3,
*args, **kwargs):
'''Arguments:
- pipe_radius is the the radius of the vacuum pipe in metres.
- length is an s interval (in metres) along which the SC force
is integrated. Usually you want to set this to circumference
in conjunction with the LongitudinalOneTurnMap RFSystems.
- n_slice_sigma indicates the number of slices taken as a
sigma for the Gaussian kernel that smoothens the line charge
density derivative (see SliceSet.lambda_prime_bins for more
info).
'''
self.slicer = slicer
self.pipe_radius = pipe_radius
self.length = length
self.n_slice_sigma = n_slice_sigma
self._gfactor = self._gfactor0
def track(self, beam):
'''Add the longitudinal space charge contribution to the beam's
dp kick.
'''
slices = beam.get_slices(self.slicer)
lambda_prime = slices.lambda_prime_bins(sigma=self.n_slice_sigma)
lambda_prime_z = lambda_prime / slices.slice_widths[0]
slice_kicks = (self._prefactor(slices) * self._gfactor(beam) *
lambda_prime_z) * (self.length / (beam.beta * c))
kicks = slices.convert_to_particles(slice_kicks)
beam.dp -= kicks
@staticmethod
def _prefactor(beam):
return (beam.charge /
(4.*np.pi*epsilon_0 * beam.gamma**2 * beam.p0))
def _gfactor0(self, beam):
'''Geometry factor for Gaussian beam on-axis
in symmetric vacuum pipe.
'''
# transverse beam size:
# (sigx+sigz)/2 * sqrt(2) <<< formula is for uniform distribution,
# corresponding Gaussian sigmae are sqrt(2) larger
r_beam = (beam.sigma_x() + beam.sigma_y()) / np.sqrt(8.)
return self.directSC + 2. * pm.log(self.pipe_radius / r_beam)
def make_force(self, beam):
'''Return the electric force field due to space charge
of the given SliceSet instance as a function of z
in units of Coul*Volt/metre.
'''
sliceset = beam.get_slices(self.slicer)
gfac_spline = splrep(
sliceset.z_centers,
pm.ones(sliceset.n_slices) * self._gfactor(beam),
s=0)
def force(z):
gfac = splev(z, gfac_spline, ext=1)
return (self._prefactor(beam) * gfac *
-sliceset.lambda_prime_z(z) * beam.p0)
return force
def make_potential(self, beam):
'''Return the electric potential energy due to space charge
of the given SliceSet instance as a function of z
in units of Coul*Volt.
'''
sliceset = beam.get_slices(self.slicer)
gfac_spline = splrep(
sliceset.z_centers,
pm.ones(sliceset.n_slices) * self._gfactor(beam),
s=0)
def potential(z):
gfac = splev(z, gfac_spline, ext=1)
return (self._prefactor(beam) * gfac *
sliceset.lambda_z(z) * beam.p0)
return potential
class LongSpaceChargeRectPipe(LongSpaceCharge):
'''Longitudinal space charge with a gfactor computed for
a rectangular beam pipe.
See K.Y. Ng, Part. Accel. 16, 63 (1984),
L. Wang and Y. Li, Phys. Rev. ST Accel Beams 18, 024201 (2015)
'''
def __init__(self, slicer, pipe_width, pipe_height, length,
n_slice_sigma=3, *args, **kwargs):
'''Assumes a rectangular beam pipe.
Arguments:
- pipe_width is the the width of the vacuum pipe in metres.
- pipe_height is the the height of the vacuum pipe in metres.
- length is an s interval (in metres) along which the SC force
is integrated. Usually you want to set this to circumference
in conjunction with the LongitudinalOneTurnMap RFSystems.
- n_slice_sigma indicates the number of slices taken as a
sigma for the Gaussian kernel that smoothens the line charge
density derivative (see SliceSet.lambda_prime_bins for more
info).
'''
pipe_radius = 2 * pipe_height / np.pi * np.tanh(
np.pi * pipe_width / (2 * pipe_height))
super().__init__(slicer, abs(pipe_radius), length, n_slice_sigma,
*args, **kwargs)
class TransverseGaussianSpaceCharge(Element):
'''Contains transverse space charge for a Gaussian configuration.
Applies the Bassetti-Erskine electric field expression slice-wise
for each particle centred around the slice centre.
'''
'''Threshold for relative transverse beam size difference
below which the beam is assumed to be round:
abs(1 - sig_y / sig_x) < ratio_threshold ==> round beam
'''
ratio_threshold = 1e-3
'''Threshold for absolute transverse beam size difference
below which the beam is assumed to be round:
abs(sig_y - sig_x) < absolute_threshold ==> round beam
'''
absolute_threshold = 1e-10
def __init__(self, slicer, length, sig_check=True, other_efieldn=None):
'''Arguments:
- slicer determines the slicing parameters for the slices over
which the Bassetti-Erskine electric field expression is applied,
given a slicer with n_slices == 1, you can apply a
longitudinally averaged kick over the whole beam.
- length is an s interval along which the space charge force
is integrated.
- sig_check exchanges x and y quantities for sigma_x < sigma_y
and applies the round beam formula for sigma_x == sigma_y .
sig_check defaults to True and should not usually be False.
- other_efieldn can be used to use a different implementation of
the charge-normalised electric field expression (there are four
different implementations to choose from in this class:
_efieldn_mit, _efield_mitmod, _efieldn_koelbig,
_efieldn_pyecloud; in order of computational time consumption)
'''
self.slicer = slicer
self.length = length
if other_efieldn is None:
self._efieldn = self._efieldn_mit
else:
self._efieldn = other_efieldn
if sig_check:
self._efieldn = self.add_sigma_check(self._efieldn)
def track(self, beam):
'''Add the transverse space charge contribution to the beam's
transverse kicks.
'''
slices = beam.get_slices(
self.slicer, statistics=["mean_x", "mean_y", "sigma_x", "sigma_y"])
prefactor = (beam.charge * self.length /
(beam.p0 * beam.betagamma * beam.gamma * c))
# Nlambda_i is the line density [Coul/m] for the current slice
for s_i, (Nlambda_i, mean_x, mean_y, sig_x, sig_y) in enumerate(zip(
slices.lambda_bins(smoothen=False)/slices.slice_widths,
slices.mean_x, slices.mean_y,
slices.sigma_x, slices.sigma_y)):
p_id = slices.particle_indices_of_slice(s_i)
if len(p_id) == 0:
continue
en_x, en_y = self.get_efieldn(
pm.take(beam.x, p_id), pm.take(beam.y, p_id),
mean_x, mean_y, sig_x, sig_y)
kicks_x = (en_x * Nlambda_i) * prefactor
kicks_y = (en_y * Nlambda_i) * prefactor
kicked_xp = pm.take(beam.xp, p_id) + kicks_x
kicked_yp = pm.take(beam.yp, p_id) + kicks_y
pm.put(beam.xp, p_id, kicked_xp)
pm.put(beam.yp, p_id, kicked_yp)
def get_efieldn(self, xr, yr, mean_x, mean_y, sig_x, sig_y):
'''The charge-normalised electric field components of a
two-dimensional Gaussian charge distribution according to
M. Bassetti and G. A. Erskine in CERN-ISR-TH/80-06.
Return (E_x / Q, E_y / Q).
'''
x = xr - mean_x
y = yr - mean_y
# absolute values for convergence reasons of erfc
en_x, en_y = self._efieldn(pm.abs(x), pm.abs(y), sig_x, sig_y)
en_x = pm.abs(en_x) * pm.sign(x)
en_y = pm.abs(en_y) * pm.sign(y)
return en_x, en_y
@staticmethod
def _sig_sqrt(sig_x, sig_y):
return pm.sqrt(2 * (sig_x**2 - sig_y**2))
@staticmethod
def _efieldn_mit(x, y, sig_x, sig_y):
'''The charge-normalised electric field components of a
two-dimensional Gaussian charge distribution according to
M. Bassetti and G. A. Erskine in CERN-ISR-TH/80-06.
Return (E_x / Q, E_y / Q).
Assumes sig_x > sig_y and mean_x == 0 as well as mean_y == 0.
For convergence reasons of the erfc, use only x > 0 and y > 0.
Uses FADDEEVA C++ implementation from MIT (via SciPy >= 0.13.0).
'''
# timing was ~0.522 ms for:
# x = np.arange(-1e-5, 1e-5, 1e-8)
# y = np.empty(len(x))
# sig_x = 1.2e-6
# sig_y = 1e-6
sig_sqrt = TransverseGaussianSpaceCharge._sig_sqrt(sig_x, sig_y)
w1re, w1im = pm.wofz(x / sig_sqrt, y / sig_sqrt)
ex = pm.exp(-x*x / (2 * sig_x*sig_x) +
-y*y / (2 * sig_y*sig_y))
w2re, w2im = pm.wofz(x * sig_y/(sig_x*sig_sqrt),
y * sig_x/(sig_y*sig_sqrt))
denom = 2. * epsilon_0 * np.sqrt(pi) * sig_sqrt
return (w1im - ex * w2im) / denom, (w1re - ex * w2re) / denom
@staticmethod
def _efieldn_mitmod(x, y, sig_x, sig_y):
'''The charge-normalised electric field components of a
two-dimensional Gaussian charge distribution according to
M. Bassetti and G. A. Erskine in CERN-ISR-TH/80-06.
Return (E_x / Q, E_y / Q).
Assumes sig_x > sig_y and mean_x == 0 as well as mean_y == 0.
For convergence reasons of the erfc, use only x > 0 and y > 0.
Uses erfc C++ implementation from MIT (via SciPy >= 0.13.0)
and calculates wofz (FADDEEVA function) explicitely.
'''
# timing was ~1.01ms for same situation as _efieldn_mit
sig_sqrt = TransverseGaussianSpaceCharge._sig_sqrt(sig_x, sig_y)
w1 = pm._errfadd((x + 1j * y) / sig_sqrt)
ex = pm.exp(-x*x / (2 * sig_x*sig_x) +
-y*y / (2 * sig_y*sig_y))
w2 = pm._errfadd(x * sig_y/(sig_x*sig_sqrt) +
y * sig_x/(sig_y*sig_sqrt) * 1j)
val = (w1 - ex * w2) / (2 * epsilon_0 * np.sqrt(pi) * sig_sqrt)
return val.imag, val.real
@staticmethod
def _efieldn_koelbig(x, y, sig_x, sig_y):
'''The charge-normalised electric field components of a
two-dimensional Gaussian charge distribution according to
M. Bassetti and G. A. Erskine in CERN-ISR-TH/80-06.
Return (E_x / Q, E_y / Q).
Assumes sig_x > sig_y and mean_x == 0 as well as mean_y == 0.
For convergence reasons of the erfc, use only x > 0 and y > 0.
Uses CERN library from K. Koelbig.
'''
# timing was ~3.35ms for same situation as _efieldn_mit
if not pm._errf:
raise ImportError('errfff cannot be imported for using ' +
'TransverseSpaceCharge._efield_koelbig .' +
'Did you call make (or f2py general/errfff.f)?')
sig_sqrt = TransverseGaussianSpaceCharge._sig_sqrt(sig_x, sig_y)
w1re, w1im = pm._errf(x/sig_sqrt, y/sig_sqrt)
ex = pm.exp(-x*x / (2 * sig_x*sig_x) +
-y*y / (2 * sig_y*sig_y))
w2re, w2im = pm._errf(x * sig_y/(sig_x*sig_sqrt),
y * sig_x/(sig_y*sig_sqrt))
pref = 1. / (2 * epsilon_0 * np.sqrt(pi) * sig_sqrt)
return pref * (w1im - ex * w2im), pref * (w1re - ex * w2re)
@staticmethod
def wfun(z):
'''FADDEEVA function as implemented in PyECLOUD, vectorised.'''
x=z.real
y=z.imag
if not pm._errf:
raise ImportError('errfff cannot be imported for using ' +
'TransverseSpaceCharge._efield_pyecloud .' +
'Did you f2py errfff.f?')
wx,wy=pm._errf(x,y) # in PyECLOUD only pm._errf_f (not vectorised)
return wx+1j*wy
@staticmethod
def _efieldn_pyecloud(xin, yin, sigmax, sigmay):
'''The charge-normalised electric field components of a
two-dimensional Gaussian charge distribution according to
M. Bassetti and G. A. Erskine in CERN-ISR-TH/80-06.
Return (E_x / Q, E_y / Q).
Effective copy of PyECLOUD.BassErsk.BassErsk implementation.
'''
# timing was ~3.52ms for same situation as _efieldn_mit
wfun = TransverseGaussianSpaceCharge.wfun
x=abs(xin);
y=abs(yin);
eps0=8.854187817620e-12;
if sigmax>sigmay:
S=np.sqrt(2*(sigmax*sigmax-sigmay*sigmay));
factBE=1/(2*eps0*np.sqrt(pi)*S);
etaBE=sigmay/sigmax*x+1j*sigmax/sigmay*y;
zetaBE=x+1j*y;
val=factBE*(wfun(zetaBE/S)-
np.exp( -x*x/(2*sigmax*sigmax)-y*y/(2*sigmay*sigmay))*
wfun(etaBE/S) );
Ex=abs(val.imag)*np.sign(xin);
Ey=abs(val.real)*np.sign(yin);
else:
S=np.sqrt(2*(sigmay*sigmay-sigmax*sigmax));
factBE=1/(2*eps0*np.sqrt(pi)*S);
etaBE=sigmax/sigmay*y+1j*sigmay/sigmax*x;
yetaBE=y+1j*x;
val=factBE*(wfun(yetaBE/S)-
np.exp( -y*y/(2*sigmay*sigmay)-x*x/(2*sigmax*sigmax))*
wfun(etaBE/S) );
Ey=abs(val.imag)*np.sign(yin);
Ex=abs(val.real)*np.sign(xin);
return Ex, Ey
@staticmethod
def _efieldn_round(x, y, sig_r):
'''Return (E_x / Q, E_y / Q) for a round distribution
with sigma_x == sigma_y == sig_r .
'''
r2 = x*x + y*y
amplitude = (1 - pm.exp(-r2/(2*sig_r*sig_r))) / (2*pi*epsilon_0 * r2)
return x * amplitude, y * amplitude
@staticmethod
def add_sigma_check(efieldn):
'''Wrapper for a normalised electric field function.
Adds the following actions before calculating the field:
- exchange x and y quantities if sigma_x < sigma_y
- apply round beam field formula when sigma_x close to sigma_y
'''
efieldn_round = TransverseGaussianSpaceCharge._efieldn_round
@wraps(efieldn)
def efieldn_checked(x, y, sig_x, sig_y, *args, **kwargs):
tol_kwargs = dict(
rtol=TransverseGaussianSpaceCharge.ratio_threshold,
atol=TransverseGaussianSpaceCharge.absolute_threshold
)
if pm.allclose(sig_y, sig_x, **tol_kwargs):
if pm.almost_zero(sig_y, **tol_kwargs):
en_x = en_y = pm.zeros(x.shape, dtype=x.dtype)
else:
en_x, en_y = efieldn_round(x, y, sig_x, *args, **kwargs)
elif pm.all(sig_x < sig_y):
en_y, en_x = efieldn(y, x, sig_y, sig_x, *args, **kwargs)
else:
en_x, en_y = efieldn(x, y, sig_x, sig_y, *args, **kwargs)
return en_x, en_y
return efieldn_checked
|
from torch import nn
import torch
class GRU4REC(nn.Module):
def __init__(self, input_size, hidden_size, output_size, num_layers=1, final_act='tanh',
dropout_hidden=.5, dropout_input=0, batch_size=50, embedding_dim=-1, use_cuda=False):
super(GRU4REC, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.num_layers = num_layers
self.dropout_hidden = dropout_hidden
self.dropout_input = dropout_input
self.embedding_dim = embedding_dim
self.batch_size = batch_size
self.use_cuda = use_cuda
self.device = torch.device('cuda' if use_cuda else 'cpu')
self.onehot_buffer = self.init_emb()
self.h2o = nn.Linear(hidden_size, output_size)
self.create_final_activation(final_act)
if self.embedding_dim != -1:
self.look_up = nn.Embedding(input_size, self.embedding_dim)
self.gru = nn.GRU(self.embedding_dim, self.hidden_size, self.num_layers, dropout=self.dropout_hidden)
else:
self.gru = nn.GRU(self.input_size, self.hidden_size, self.num_layers, dropout=self.dropout_hidden)
self = self.to(self.device)
def create_final_activation(self, final_act):
if final_act == 'tanh':
self.final_activation = nn.Tanh()
elif final_act == 'relu':
self.final_activation = nn.ReLU()
elif final_act == 'softmax':
self.final_activation = nn.Softmax()
elif final_act == 'softmax_logit':
self.final_activation = nn.LogSoftmax()
elif final_act.startswith('elu-'):
self.final_activation = nn.ELU(alpha=float(final_act.split('-')[1]))
elif final_act.startswith('leaky-'):
self.final_activation = nn.LeakyReLU(negative_slope=float(final_act.split('-')[1]))
def forward(self, input, hidden):
'''
Args:
input (B,): a batch of item indices from a session-parallel mini-batch.
target (B,): torch.LongTensor of next item indices from a session-parallel mini-batch.
Returns:
logit (B,C): Variable that stores the logits for the next items in the session-parallel mini-batch
hidden: GRU hidden state
'''
if self.embedding_dim == -1:
embedded = self.onehot_encode(input)
if self.training and self.dropout_input > 0: embedded = self.embedding_dropout(embedded)
embedded = embedded.unsqueeze(0)
else:
embedded = input.unsqueeze(0)
embedded = self.look_up(embedded)
output, hidden = self.gru(embedded, hidden) #(num_layer, B, H)
output = output.view(-1, output.size(-1)) #(B,H)
logit = self.final_activation(self.h2o(output))
return logit, hidden
def init_emb(self):
'''
Initialize the one_hot embedding buffer, which will be used for producing the one-hot embeddings efficiently
'''
onehot_buffer = torch.FloatTensor(self.batch_size, self.output_size)
onehot_buffer = onehot_buffer.to(self.device)
return onehot_buffer
def onehot_encode(self, input):
"""
Returns a one-hot vector corresponding to the input
Args:
input (B,): torch.LongTensor of item indices
buffer (B,output_size): buffer that stores the one-hot vector
Returns:
one_hot (B,C): torch.FloatTensor of one-hot vectors
"""
self.onehot_buffer.zero_()
index = input.view(-1, 1)
one_hot = self.onehot_buffer.scatter_(1, index, 1)
return one_hot
def embedding_dropout(self, input):
p_drop = torch.Tensor(input.size(0), 1).fill_(1 - self.dropout_input)
mask = torch.bernoulli(p_drop).expand_as(input) / (1 - self.dropout_input)
mask = mask.to(self.device)
input = input * mask
return input
def init_hidden(self):
'''
Initialize the hidden state of the GRU
'''
try:
h0 = torch.zeros(self.num_layers, self.batch_size, self.hidden_size).to(self.device)
except:
self.device = 'cpu'
h0 = torch.zeros(self.num_layers, self.batch_size, self.hidden_size).to(self.device)
return h0 |
import enrich_db
import couchDB
import auth
db = couchDB.Cloudant(auth.user, auth.password) |
from aiger import atom
from aiger_analysis import is_satisfiable
x, y = atom('x'), atom('y')
expr_sat = x | y
expr_unsat = expr_sat & ~ expr_sat
def test_satisfiable():
assert is_satisfiable(expr_sat)
def test_satisfiable_2():
assert is_satisfiable(atom(True))
def test_unsatisfiable():
assert not is_satisfiable(expr_unsat)
def test_unsatisfiable_2():
assert not is_satisfiable(atom(False))
def test_unsatisfiable_aig():
assert not is_satisfiable(expr_unsat.aig)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of OTC Tool released under MIT license.
# Copyright (C) 2016 T-systems Kurt Garloff, Zsolt Nagy
from otcclient.core.OtcConfig import OtcConfig
from otcclient.utils import utils_http
from otcclient.utils import utils_http, utils_templates
from otcclient.core.otcpluginbase import otcpluginbase
from otcclient.core.pluginmanager import getplugin
import base64
from time import sleep
import sys
import json
import os
class ecs(otcpluginbase):
ar = {}
@staticmethod
def otcOutputHandler():
return getplugin(OtcConfig.OUTPUT_FORMAT)
def otctype(self):
return "func"
@staticmethod
def describe_instances():
url = "https://" + OtcConfig.DEFAULT_HOST + "/v2/" + OtcConfig.PROJECT_ID + "/servers"
if not OtcConfig.INSTANCE_NAME is None:
ecs.convertINSTANCENameToId()
if OtcConfig.INSTANCE_ID is None:
ret = utils_http.get(url)
ecs.otcOutputHandler().print_output(ret, mainkey = "servers", listkey={"id", "name"})
else:
ret = utils_http.get(url + '/' + OtcConfig.INSTANCE_ID )
maindata = json.loads(ret)
if "itemNotFound" in maindata:
raise RuntimeError("Not found!")
ecs.otcOutputHandler().print_output(ret,mainkey="server")
return ret
@staticmethod
def describe_vpcs():
url = "https://" + OtcConfig.DEFAULT_HOST+ "/v1/" + OtcConfig.PROJECT_ID + "/vpcs"
ret = utils_http.get(url)
ecs.otcOutputHandler().print_output(ret, mainkey = "vpcs", listkey={"id", "name", "status", "cidr"})
return ret
@staticmethod
def describe_addresses():
url = "https://" + OtcConfig.DEFAULT_HOST+ "/v1/" + OtcConfig.PROJECT_ID + "/publicips"
ret = utils_http.get(url)
ecs.otcOutputHandler().print_output(ret, mainkey="publicips", listkey={"id", "status", "public_ip_address", "private_ip_address", "type", "create_time", "bandwidth_size"})
return ret
@staticmethod
def describe_bandwiths():
url = "https://" + OtcConfig.DEFAULT_HOST+ "/v1/" + OtcConfig.PROJECT_ID + "/bandwidths"
ret = utils_http.get(url)
ecs.otcOutputHandler().print_output(ret, mainkey="bandwidths", listkey={"id", "name", "publicip_info", "size"})
return ret
@staticmethod
def describe_private_addresses():
if not (OtcConfig.VPCNAME is None):
ecs.convertVPCNameToId()
if not OtcConfig.SUBNETNAME is None:
ecs.convertSUBNETNameToId()
if OtcConfig.VPCID is None:
print("VPC definition not Correct ! Check VPCs:")
print("otc ecs describe-vpcs")
os._exit(1)
if OtcConfig.SUBNETID is None:
print("Subnet definition not Correct ! Check subnets:")
print("otc ecs describe-subnets")
os._exit(1)
url = "https://" + OtcConfig.DEFAULT_HOST+ "/v1/" + OtcConfig.PROJECT_ID + "/subnets/" + OtcConfig.SUBNETID + "/privateips"
ret = utils_http.get(url)
ecs.otcOutputHandler().print_output(ret, mainkey="privateips", listkey={"id", "status", "ip_address", "device_owner", "subnet_id"})
return ret
@staticmethod
def describe_security_groups():
if (not (OtcConfig.SECUGROUPNAME is None)) or (not (OtcConfig.SECUGROUP is None)):
if (not (OtcConfig.SECUGROUPNAME is None)):
ecs.convertSECUGROUPNameToId()
url = "https://" + OtcConfig.DEFAULT_HOST+ "/v2.0/security-group-rules?security_group_id=" + OtcConfig.SECUGROUP
ret = utils_http.get(url)
ecs.otcOutputHandler().print_output(ret, mainkey= "security_group_rules", listkey={"id","direction", "protocol","port_range_min","port_range_max" })
else:
url="https://" + OtcConfig.DEFAULT_HOST+ "/v1/" + OtcConfig.PROJECT_ID + "/security-groups"
ret = utils_http.get(url)
ecs.otcOutputHandler().print_output(ret, mainkey= "security_groups", listkey={"id", "name", "vpc_id" })
return ret
@staticmethod
def describe_subnets():
url = "https://" + OtcConfig.DEFAULT_HOST+ "/v1/" + OtcConfig.PROJECT_ID + "/subnets"
ret = utils_http.get(url)
ecs.otcOutputHandler().print_output(ret, mainkey="subnets", listkey={"id", "name", "cidr", "status", "vpc_id", "gateway_ip", "primary_dns", "availability_zone"})
return ret
@staticmethod
def describe_network_interfaces():
if not OtcConfig.INSTANCE_NAME is None:
ecs.convertINSTANCENameToId()
url = "https://" + OtcConfig.DEFAULT_HOST+ "/v2/" + OtcConfig.PROJECT_ID + "/servers/" + OtcConfig.INSTANCE_ID + "/os-interface"
ret = utils_http.get(url)
# print ret
ecs.otcOutputHandler().print_output(ret, mainkey="interfaceAttachments", listkey={"port_state", "fixed_ips", "port_id", "net_id", "mac_addr"})
return ret
@staticmethod
def describe_images():
url = "https://" + OtcConfig.DEFAULT_HOST+ "/v2/cloudimages"
ret = utils_http.get(url)
ecs.otcOutputHandler().print_output(ret, mainkey="images", listkey={"id", "name", "__os_type", "updated_at", "deleted"})
return ret
@staticmethod
def describe_flavors():
url = "https://" + OtcConfig.DEFAULT_HOST+ "/v1/" + OtcConfig.PROJECT_ID + "/cloudservers/flavors"
ret = utils_http.get(url)
ecs.otcOutputHandler().print_output(ret, mainkey="flavors", listkey= {"id", "name", "vcpus", "ram", "disk", "swap"})
return ret
@staticmethod
def describe_key_pairs():
url = "https://" + OtcConfig.DEFAULT_HOST+ "/v2/" + OtcConfig.PROJECT_ID + "/os-keypairs"
ret = utils_http.get( url )
ecs.otcOutputHandler().print_output(ret, mainkey="keypairs", subkey="keypair", listkey={"name", "fingerprint", "public_key"})
return ret
@staticmethod
def create_key_pair():
REQ_CREATE_KEYPAIR = "{ \"keypair\": { \"name\": \"" + OtcConfig.KEYNAME + "\", " + "\"public_key\": \"" + OtcConfig.PUBLICKEY + "\" } }"
url = "https://" + OtcConfig.DEFAULT_HOST+ "/v2/" + OtcConfig.PROJECT_ID + "/os-keypairs"
ret = utils_http.post(url, REQ_CREATE_KEYPAIR)
parsed = json.loads(ret)
if "keypair" not in parsed:
print("Can not create:" +ret)
os._exit( 1 )
ecs.otcOutputHandler().print_output(ret, mainkey="keypair")
return ret
@staticmethod
def allocate_address():
REQ_CREATE_PUBLICIP = "{\"publicip\":{\"type\":\"5_bgp\"},\"bandwidth\":{\"name\":\"apiTest\",\"size\":5,\"share_type\":\"PER\",\"charge_mode\":\"traffic\"}}"
url = "https://" + OtcConfig.DEFAULT_HOST+ "/v1/" + OtcConfig.PROJECT_ID + "/publicips"
ret = utils_http.post(url, REQ_CREATE_PUBLICIP)
print( ret )
maindata = json.loads(ret)
if "code" in maindata:
print("Can not create:" +maindata["message"])
os._exit( 1 )
ecs.otcOutputHandler().print_output(ret, mainkey="publicip")
return ret
@staticmethod
def release_address():
if not (OtcConfig.PUBLICIP is None):
ecs.convertPublicIpNameToId()
url = "https://" + OtcConfig.DEFAULT_HOST+ "/v1/" + OtcConfig.PROJECT_ID + "/publicips" + \
"/" + OtcConfig.PUBLICIPID
ret = utils_http.delete(url)
print(ret)
return ret
@staticmethod
def release_private_address():
url = "https://" + OtcConfig.DEFAULT_HOST+ "/v1/" + OtcConfig.PROJECT_ID + "/privateips" + OtcConfig.PRIVATEIPID
ret = utils_http.delete(url)
print(ret)
return ret
@staticmethod
def associate_address():
REQ_ASSOCIATE_PUBLICIP = "{ \"publicip\": { \"port_id\": \"" + OtcConfig.NETWORKINTERFACEID + "\" } }"
#print REQ_ASSOCIATE_PUBLICIP
if not (OtcConfig.PUBLICIP is None):
ecs.convertPublicIpNameToId()
url = "https://" + OtcConfig.DEFAULT_HOST+ "/v1/" + OtcConfig.PROJECT_ID + "/publicips" + "/" + OtcConfig.PUBLICIPID
ret = utils_http.put(url, REQ_ASSOCIATE_PUBLICIP)
print(ret)
return ret
@staticmethod
def resize_instance():
if not OtcConfig.INSTANCE_NAME is None:
ecs.convertINSTANCENameToId()
if not OtcConfig.INSTANCE_TYPE_NAME is None:
ecs.convertFlavorNameToId()
if OtcConfig.INSTANCE_ID is None :
raise RuntimeError( "Error. Must be specify the Instance Name or ID!")
url = "https://" + OtcConfig.DEFAULT_HOST+ "/v1/" + OtcConfig.PROJECT_ID + "/cloudservers/" + OtcConfig.INSTANCE_ID + "/resize"
req = utils_templates.create_request("ecs_resize")
ret = utils_http.post(url, req)
print(ret)
return ret
@staticmethod
def delete_key_pair():
""" generated source for method KEYPAIRDelete """
url = "https://" + OtcConfig.DEFAULT_HOST+ "/v2/" + OtcConfig.PROJECT_ID + "/os-keypairs"+ "/" + OtcConfig.KEYNAME
ret = utils_http.delete(url )
return ret
@staticmethod
def getECSJOBList():
""" generated source for method getECSJOBList """
url = "https://" + OtcConfig.DEFAULT_HOST+ "/v1/" + OtcConfig.PROJECT_ID + "/jobs/" + OtcConfig.ECSTASKID
ret = utils_http.get(url)
newstatus = str( json.loads(ret)["status"]).upper()
if newstatus != OtcConfig.ECSCREATEJOBSTATUS:
sys.stdout.write( "\n" + newstatus )
OtcConfig.ECSCREATEJOBSTATUS = newstatus
return OtcConfig.ECSCREATEJOBSTATUS
@staticmethod
def getFileContentJSON( aSource, aTarget):
""" generated source for method getFileContentJSON """
with open(aSource, "rb") as _file:
FILECONTENT = base64.b64encode(_file.read())
FILE_TEMPLATE = "{ \"path\": \"" + aTarget + "\", \"contents\": \"" + FILECONTENT + "\" }"
return FILE_TEMPLATE
@staticmethod
def getUserDataContent(aSource):
USER_DATA = ""
with open(aSource, "rb") as _file:
USER_DATA = base64.b64encode(_file.read())
return USER_DATA
@staticmethod
def getPersonalizationJSON():
""" generated source for method getPersonalizationJSON """
FILEJSONITEM = ""
if not OtcConfig.FILE1 is None:
ar = str(OtcConfig.FILE1).split("=")
FILEJSONITEM = ecs.getFileContentJSON(ar[1], ar[0])
FILECOLLECTIONJSON = FILEJSONITEM
FILEJSONITEM = ""
if not OtcConfig.FILE2 is None:
ar = str(OtcConfig.FILE2).split("=")
if len(FILECOLLECTIONJSON) > 0:
FILEJSONITEM = ","
FILEJSONITEM = FILEJSONITEM + ecs.getFileContentJSON(ar[1], ar[0])
FILECOLLECTIONJSON = FILECOLLECTIONJSON + FILEJSONITEM
FILEJSONITEM = ""
if not OtcConfig.FILE3 is None:
ar = str(OtcConfig.FILE3).split("=")
if len(FILECOLLECTIONJSON) > 0:
FILEJSONITEM = ","
FILEJSONITEM = ecs.getFileContentJSON(ar[1], ar[0])
FILECOLLECTIONJSON = FILECOLLECTIONJSON + FILEJSONITEM
FILEJSONITEM = ""
if not OtcConfig.FILE4 is None:
ar = str(OtcConfig.FILE4).split("=")
if len(FILECOLLECTIONJSON) > 0:
FILEJSONITEM = ","
FILEJSONITEM = ecs.getFileContentJSON(ar[1], ar[0])
FILECOLLECTIONJSON = FILECOLLECTIONJSON + FILEJSONITEM
FILEJSONITEM = ""
if not OtcConfig.FILE5 is None:
ar = str(OtcConfig.FILE5).split("=")
if len(FILECOLLECTIONJSON) > 0:
FILEJSONITEM = ","
FILEJSONITEM = ecs.getFileContentJSON(ar[1], ar[0])
FILECOLLECTIONJSON = FILECOLLECTIONJSON + FILEJSONITEM
PERSONALIZATION = ""
if len(FILECOLLECTIONJSON) > 0:
PERSONALIZATION = "\"personality\": [ " + FILECOLLECTIONJSON + "],"
return PERSONALIZATION
@staticmethod
def ECSAction():
if not OtcConfig.INSTANCE_NAME is None:
ecs.convertINSTANCENameToId()
if OtcConfig.INSTANCE_ID is None :
raise RuntimeError( "Error. Must be specify the Instance Name or ID!")
REQ_ECS_ACTION_VM = "{ " + " \"" + OtcConfig.ECSACTION + "\": " + " { " + " \"type\":\"" + OtcConfig.ECSACTIONTYPE + "\", " + " \"servers\": [ { \"id\": \"" + OtcConfig.INSTANCE_ID + "\" }] " + " } " + "}"
url = "https://" + OtcConfig.DEFAULT_HOST+ "/v1/" + OtcConfig.PROJECT_ID + "/cloudservers/action"
ret = utils_http.post(url, REQ_ECS_ACTION_VM)
print(ret)
return ret
@staticmethod
def start_instances():
OtcConfig.ECSACTION = "os-start"
ecs.ECSAction()
@staticmethod
def stop_instances():
OtcConfig.ECSACTION = "os-stop"
ecs.ECSAction()
@staticmethod
def delete_instances():
if not OtcConfig.INSTANCE_NAME is None:
ecs.convertINSTANCENameToId()
if OtcConfig.INSTANCE_ID is None :
raise RuntimeError( "Error. Must be specify the Instance ID!")
REQ_ECS_DELETE_VM = "{ \"servers\": [ { \"id\": \"" + OtcConfig.INSTANCE_ID + "\" } ]," + " \"delete_publicip\": \"" + OtcConfig.DELETE_PUBLICIP + "\", \"delete_volume\": \"" + OtcConfig.DELETE_VOLUME + "\" }"
url = "https://" + OtcConfig.DEFAULT_HOST+ "/v1/" + OtcConfig.PROJECT_ID + "/cloudservers" + "/delete"
ret = utils_http.post(url, REQ_ECS_DELETE_VM)
print(ret)
return ret
@staticmethod
def create_vpc():
REQ_CREATE_VPC = "{ \"vpc\": { \"name\": \"" + OtcConfig.VPCNAME + "\", \"cidr\": \"" + OtcConfig.CIDR + "\" } }"
url = "https://" + OtcConfig.DEFAULT_HOST+ "/v1/" + OtcConfig.PROJECT_ID + "/vpcs"
ret = utils_http.post(url, REQ_CREATE_VPC)
print(ret)
return ret
@staticmethod
def delete_vpc():
if not (OtcConfig.VPCNAME is None):
ecs.convertVPCNameToId()
url = "https://" + OtcConfig.DEFAULT_HOST+ "/v1/" + OtcConfig.PROJECT_ID + "/vpcs" + OtcConfig.VPCID
ret = utils_http.delete(url)
print(ret)
return ret
@staticmethod
def create_subnet():
if not (OtcConfig.VPCNAME is None):
ecs.convertVPCNameToId()
REQ_CREATE_SUBNET = "{ \"subnet\": { \"name\": \"" + OtcConfig.SUBNETNAME + "\", \"cidr\": \"" + OtcConfig.CIDR + "\", \"gateway_ip\": \"" + OtcConfig.GWIP + "\", \"dhcp_enable\": \"true\", \"primary_dns\": \"" + OtcConfig.PRIMARYDNS + "\", \"secondary_dns\": \"" + OtcConfig.SECDNS + "\", \"availability_zone\":\"" + OtcConfig.AZ + "\", \"vpc_id\":\"" + OtcConfig.VPCID + "\" } }"
url = "https://" + OtcConfig.DEFAULT_HOST+ "/v1/" + OtcConfig.PROJECT_ID + "/subnets"
ret = utils_http.post(url, REQ_CREATE_SUBNET)
print(ret)
return ret
@staticmethod
def delete_subnet():
if OtcConfig.SUBNETNAME:
ecs.convertSUBNETNameToId()
url = "https://" + OtcConfig.DEFAULT_HOST+ "/v1/" + OtcConfig.PROJECT_ID + "/subnets" + OtcConfig.SUBNETID
ret = utils_http.delete(url)
return ret
@staticmethod
def create_network_interface():
if not (OtcConfig.VPCNAME is None):
ecs.convertVPCNameToId()
if not OtcConfig.SUBNETNAME is None:
ecs.convertSUBNETNameToId()
if not OtcConfig.SECUGROUPNAME is None:
ecs.convertSECUGROUPNameToId()
if not OtcConfig.INSTANCE_NAME is None:
ecs.convertINSTANCENameToId()
url = "https://" + OtcConfig.DEFAULT_HOST+ "/v1/" + OtcConfig.PROJECT_ID + "/cloudservers/" + OtcConfig.INSTANCE_ID + "/nics"
req = utils_templates.create_request("add_nics")
ret = utils_http.post(url, req)
return ret
@staticmethod
def create_security_group():
if not (OtcConfig.VPCNAME is None):
ecs.convertVPCNameToId()
REQ_CREATE_SECGROUP = "{ \"security_group\": { \"name\":\"" + OtcConfig.SECUGROUPNAME + "\", \"vpc_id\" : \"" + OtcConfig.VPCID + "\" } }"
url = "https://" + OtcConfig.DEFAULT_HOST+ "/v1/" + OtcConfig.PROJECT_ID + "/security-groups"
ret = utils_http.post(url, REQ_CREATE_SECGROUP)
return ret
@staticmethod
def delete_security_group():
if not (OtcConfig.VPCNAME is None):
ecs.convertVPCNameToId()
if not (OtcConfig.SECUGROUPNAME is None):
ecs.convertSECUGROUPNameToId()
url = "https://" + OtcConfig.DEFAULT_HOST+ "/v2.0/" + "/security-groups" + "/"+ OtcConfig.SECUGROUP
ret = utils_http.delete(url)
return ret
@staticmethod
def authorize_security_group_ingress():
OtcConfig.DIRECTION = "ingress"
ecs._secgrouprulecreate()
@staticmethod
def authorize_security_group_egress():
OtcConfig.DIRECTION = "egress"
ecs._secgrouprulecreate()
@staticmethod
def _secgrouprulecreate():
if not (OtcConfig.VPCNAME is None):
ecs.convertVPCNameToId()
if not (OtcConfig.SECUGROUPNAME is None):
ecs.convertSECUGROUPNameToId()
sourceIp = ""
if not OtcConfig.CIDR is None:
sourceIp = "\", \"remote_ip_prefix\":\"" + OtcConfig.CIDR
remoteGroup = ""
if not OtcConfig.SOURCE_GROUP_ID is None:
remoteGroup = "\", \"remote_group_id\":\"" + OtcConfig.SOURCE_GROUP_ID
portrange = ""
if not OtcConfig.PORTMIN is None and not OtcConfig.PORTMAX is None:
portrange = "\", \"port_range_min\":\"" + OtcConfig.PORTMIN + "\", \"port_range_max\":\"" ''+ OtcConfig.PORTMAX
REQ_CREATE_SECGROUPRULE = "{\"security_group_rule\":{ \"direction\":\"" + OtcConfig.DIRECTION + "\",\"ethertype\":\"" + OtcConfig.ETHERTYPE + "\", \"protocol\":\""+ OtcConfig.PROTOCOL+ portrange +remoteGroup + sourceIp+ "\" , \"security_group_id\":\""+ OtcConfig.SECUGROUP + "\" } }"
#REQ_CREATE_SECGROUPRULE = "{\"security_group_rule\":{ \"direction\":\"" + OtcConfig.DIRECTION + "\", \"port_range_min\":\"" + OtcConfig.PORTMIN + "\", \"ethertype\":\"" + OtcConfig.ETHERTYPE + "\", \"port_range_max\":\"" ''+ OtcConfig.PORTMAX+ "\", \"protocol\":\""+ OtcConfig.PROTOCOL+ remoteGroup + sourceIp+ "\" , \"security_group_id\":\""+ OtcConfig.SECUGROUP + "\" } }"
url = "https://" + OtcConfig.DEFAULT_HOST+ "/v2.0/security-group-rules"
ret = utils_http.post(url, REQ_CREATE_SECGROUPRULE)
#print REQ_CREATE_SECGROUPRULE
print (ret)
ecs.otcOutputHandler().print_output(ret, mainkey="security_group_rule")
return ret
@staticmethod
def run_instances():
if not OtcConfig.VPCNAME is None:
ecs.convertVPCNameToId()
if not OtcConfig.SUBNETNAME is None:
ecs.convertSUBNETNameToId()
if not OtcConfig.IMAGENAME is None:
ecs.convertIMAGENameToId()
if not OtcConfig.SECUGROUPNAME is None:
ecs.convertSECUGROUPNameToId()
if not OtcConfig.INSTANCE_TYPE_NAME is None:
ecs.convertFlavorNameToId()
if OtcConfig.IMAGE_ID is None:
print("Image definition not Correct ! Check images:")
print("otc ecs describe-images")
os._exit(1)
if OtcConfig.INSTANCE_TYPE is None:
print("Instance Type definition not Correct ! Check flavors:")
print("otc ecs describe-flavors")
os._exit(1)
if OtcConfig.VPCID is None:
print("VPC definition not Correct ! Check VPCs:")
print("otc ecs describe-vpcs")
os._exit(1)
if OtcConfig.SECUGROUP is None:
print("Security Group definition not Correct ! Check security groups:")
print("otc ecs describe-security-groups")
os._exit(1)
if OtcConfig.SUBNETID is None:
print("Subnet definition not Correct ! Check subnets:")
print("otc ecs describe-subnets")
os._exit(1)
PUBLICIPJSON = ""
# if OtcConfig.CREATE_ECS_WITH_PUBLIC_IP:
# PUBLICIPJSON = "\"publicip\": { \"eip\": { \"iptype\": \"5_bgp\", \"bandwidth\": { \"size\": 5, \"sharetype\": \"PER\", \"chargemode\": \"traffic\" } } },"
PERSONALIZATION = ecs.getPersonalizationJSON()
if not OtcConfig.USER_DATA_PATH is None:
USER_DATA = ecs.getUserDataContent(OtcConfig.USER_DATA_PATH)
OtcConfig.USER_DATA = USER_DATA
# OtcConfig.PUBLICIPJSON = PUBLICIPJSON
OtcConfig.PERSONALIZATION = PERSONALIZATION
# REQ_CREATE_VM = " { " + " \"server\": { " + " \"availability_zone\": \"" + OtcConfig.AZ + "\", " + " \"name\": \"" + OtcConfig.INSTANCE_NAME + "\", " + " \"imageRef\": \"" + OtcConfig.IMAGE_ID + "\", " + " \"root_volume\": { " + " \"volumetype\": \"SATA\" " + " }, " + " \"flavorRef\": \"" + OtcConfig.INSTANCE_TYPE + "\"," + PERSONALIZATION + " \"vpcid\": \"" + OtcConfig.VPCID + "\", " + " \"security_groups\": [ " + " { " + " \"id\": \"" + OtcConfig.SECUGROUP + "\" " + " } " + " ], " + " \"nics\": [ " + " { " + " \"subnet_id\": \"" + OtcConfig.SUBNETID + "\" " + " } " + " ], " + PUBLICIPJSON + " \"key_name\": \"" + OtcConfig.KEYNAME + "\", " + " \"adminPass\": \"" + OtcConfig.ADMINPASS + "\", " + " \"count\": \"" + OtcConfig.NUMCOUNT + "\", " + " \"},\": { " + " \"__vnc_keymap\": \"de\" " + " } " + " } " + " } " + " "
REQ_CREATE_VM=utils_templates.create_request("create_vm")
url = "https://" + OtcConfig.DEFAULT_HOST+ "/v1/" + OtcConfig.PROJECT_ID + "/cloudservers"
#print REQ_CREATE_VM
ret = utils_http.post(url, REQ_CREATE_VM)
#print ret
OtcConfig.ECSTASKID = json.loads(ret)["job_id"]
if OtcConfig.WAIT_CREATE:
ecs.getECSJOBList()
while OtcConfig.ECSCREATEJOBSTATUS in ["RUNNING", "INIT"]:
sleep(10)
ecs.getECSJOBList()
sys.stdout.write('.')
#sys.stdout.flush()
if "SUCCESS" == OtcConfig.ECSCREATEJOBSTATUS:
return OtcConfig.ECSCREATEJOBSTATUS
print("ECS Creation status: " + OtcConfig.ECSCREATEJOBSTATUS)
return ret
@staticmethod
def getIamToken():
if OtcConfig.PROJECT_NAME != None:
project = "\"name\": \"" + OtcConfig.PROJECT_NAME + "\" "
else:
project = "\"id\": \"" + OtcConfig.PROJECT_ID + "\""
REQ_IAM = " {" + " \"auth\": { " + " \"identity\": { " + " \"methods\": [" + " \"password\" " + " ], " + " \"password\": { " + " \"user\": { " + " \"name\": \"" + OtcConfig.USERNAME + "\", " + " \"password\": \"" + OtcConfig.PASSWORD + "\"," + " \"domain\": { " + " \"name\": \"" + OtcConfig.DOMAIN + "\" " + " } " + " } " + " } " + " }, " + " \"scope\": { " + " \"project\": {" + project + " } " + " } " + " } " + " }"
url = "https://" + OtcConfig.DEFAULT_HOST +":443/v3/auth/tokens"
ret = utils_http.post(url, REQ_IAM)
maindata = json.loads(ret)
OtcConfig.PROJECT_ID = maindata['token']['project']['id']
return ret
@staticmethod
def getIamTokenAKSK():
if OtcConfig.PROJECT_NAME != None:
project = "\"name\": \"" + OtcConfig.PROJECT_NAME + "\" "
else:
project = "\"id\": \"" + OtcConfig.PROJECT_ID + "\""
REQ_IAM = " {" + " \"auth\": { " + " \"identity\": { " + " \"methods\": [" + " \"password\" " + " ], " + " \"password\": { " + " \"user\": { " + " \"name\": \"" + OtcConfig.USERNAME + "\", " + " \"password\": \"" + OtcConfig.PASSWORD + "\"," + " \"domain\": { " + " \"name\": \"" + OtcConfig.DOMAIN + "\" " + " } " + " } " + " } " + " }, " + " \"scope\": { " + " \"project\": {" + project + " } " + " } " + " } " + " }"
url = "https://" + OtcConfig.DEFAULT_HOST +":443/v3/auth/tokens"
ret = utils_http.post(url, REQ_IAM)
maindata = json.loads(ret)
OtcConfig.PROJECT_ID = maindata['token']['project']['id']
return ret
@staticmethod
def convertFlavorNameToId():
""" generated source for method convertFlavorNameToId """
url = "https://" + OtcConfig.DEFAULT_HOST+ "/v1/" + OtcConfig.PROJECT_ID + "/cloudservers/flavors"
JSON = utils_http.get(url)
parsed = json.loads(JSON)
flavors = parsed["flavors"]
ret = None
for flavor in flavors:
if flavor.get("name") == OtcConfig.INSTANCE_TYPE_NAME:
ret = flavor["id"]
OtcConfig.INSTANCE_TYPE = ret
@staticmethod
def convertPublicIpNameToId():
url = "https://" + OtcConfig.DEFAULT_HOST+ "/v1/" + OtcConfig.PROJECT_ID + "/publicips"
JSON = utils_http.get(url)
parsed = json.loads(JSON)
publicips = parsed["publicips"]
ret = None
for publicip in publicips:
if publicip.get("public_ip_address") == OtcConfig.PUBLICIP:
ret = publicip["id"]
OtcConfig.PUBLICIPID = ret
@staticmethod
def convertVPCNameToId():
url = "https://" + OtcConfig.DEFAULT_HOST+ "/v1/" + OtcConfig.PROJECT_ID + "/vpcs"
JSON = utils_http.get(url)
parsed = json.loads(JSON)
vpcs = parsed["vpcs"]
ret = None
for vpc in vpcs:
if vpc.get("name") == OtcConfig.VPCNAME:
ret = vpc["id"]
OtcConfig.VPCID = ret
@staticmethod
def convertVOLUMENameToId():
url = "https://" + OtcConfig.DEFAULT_HOST+ "/v2/" + OtcConfig.PROJECT_ID + "/cloudvolumes"
JSON = utils_http.get(url)
parsed = json.loads(JSON)
#print JSON
cloudvolumes = parsed["volumes"]
ret = None
for cloudvolume in cloudvolumes:
if cloudvolume.get("name") == OtcConfig.VOLUME_NAME:
ret = cloudvolume["id"]
OtcConfig.VOLUME_ID = ret
@staticmethod
def convertSUBNETNameToId():
url = "https://" + OtcConfig.DEFAULT_HOST+ "/v1/" + OtcConfig.PROJECT_ID + "/subnets"
ar = []
ar.append(OtcConfig.SUBNETNAME)
if "," in OtcConfig.SUBNETNAME:
ar=str(OtcConfig.SUBNETNAME).split(",")
JSON = utils_http.get(url)
parsed = json.loads(JSON)
subnets = parsed["subnets"]
ret = ""
for item in ar:
for subnet in subnets:
if subnet.get("name") == item and subnet.get("vpc_id") == OtcConfig.VPCID:
if len(ret) > 0:
ret = ret + ","
ret = ret + subnet["id"]
OtcConfig.SUBNETID = ret
@staticmethod
def convertIMAGENameToId():
url = "https://" + OtcConfig.DEFAULT_HOST+ "/v2/cloudimages"
JSON = utils_http.get(url)
parsed = json.loads(JSON)
images = parsed["images"]
ret = None
for image in images:
if image.get("name") == OtcConfig.IMAGENAME:
ret = image["id"]
OtcConfig.IMAGE_ID = ret
@staticmethod
def convertINSTANCENameToId():
url = "https://" + OtcConfig.DEFAULT_HOST+ "/v2/" + OtcConfig.PROJECT_ID + "/servers"
JSON = utils_http.get(url)
parsed = json.loads(JSON)
servers = parsed["servers"]
ret = None
for server in servers:
if server.get("name") == OtcConfig.INSTANCE_NAME:
ret = server["id"]
OtcConfig.INSTANCE_ID = ret
@staticmethod
def convertSECUGROUPNameToId():
url = "https://" + OtcConfig.DEFAULT_HOST+ "/v1/" + OtcConfig.PROJECT_ID + "/security-groups"
JSON = utils_http.get(url)
parsed = json.loads(JSON)
security_groups = parsed["security_groups"]
if not (OtcConfig.VPCNAME is None):
ecs.convertVPCNameToId()
for security_group in security_groups:
if security_group.get("name") == OtcConfig.SECUGROUPNAME and ( security_group.get("vpc_id") == OtcConfig.VPCID or OtcConfig.VPCID is None ) :
OtcConfig.SECUGROUP = security_group["id"]
if security_group.get("name") == OtcConfig.SOURCE_GROUP and ( security_group.get("vpc_id") == OtcConfig.VPCID or OtcConfig.VPCID is None ) :
OtcConfig.SOURCE_GROUP_ID = security_group["id"]
OtcConfig.SECUGROUP = OtcConfig.SECUGROUP
return OtcConfig.SECUGROUP
@staticmethod
def describe_volumes():
url = "https://" + OtcConfig.DEFAULT_HOST+ "/v2/" + OtcConfig.PROJECT_ID + "/cloudvolumes"+ "/detail"
ret = utils_http.get( url )
ecs.otcOutputHandler().print_output(ret, mainkey = "volumes", listkey= {"id", "name", "volume_type", "size", "status", "bootable", "availability_zone", "limit", "attachments", "source_volid", "snapshot_id", "description", "created_at"})
return ret
@staticmethod
def list_volumes():
if not OtcConfig.INSTANCE_NAME is None:
ecs.convertINSTANCENameToId()
url = "https://" + OtcConfig.DEFAULT_HOST+ "/v2/" + OtcConfig.PROJECT_ID + "/servers/"+ OtcConfig.INSTANCE_ID + "/os-volume_attachments"
ret = utils_http.get( url )
print ret
# TODO: output fix need
#ecs.otcOutputHandler().print_output(ret, mainkey = "volumes", listkey= {"id", "name", "volume_type", "size", "status", "bootable", "availability_zone", "limit", "attachments", "source_volid", "snapshot_id", "description", "created_at"})
return ret
@staticmethod
def create_volume():
REQ_CREATE_CLOUDVOLUMES = "{ \"volume\": { \"backup_id\": " + OtcConfig.SNAPSHOTID + ", " + "\"count\": " + OtcConfig.NUMCOUNT + ", \"availability_zone\": \"" + OtcConfig.AZ + "\",\"description\": \"" + OtcConfig.VOLUME_NAME + "\", \"size\": " + OtcConfig.VOLUME_SIZE + ", \"name\": \"" + OtcConfig.VOLUME_NAME + "\", \"imageRef\": " + "null" + ", \"volume_type\": \"" + OtcConfig.VOLUME_TYPE + "\" } }"
#print REQ_CREATE_CLOUDVOLUMES
url = "https://" + OtcConfig.DEFAULT_HOST+ "/v2/" + OtcConfig.PROJECT_ID + "/cloudvolumes"
ret = utils_http.post(url, REQ_CREATE_CLOUDVOLUMES)
print(ret)
return ret
@staticmethod
def attach_volume():
""" generated source for method AttachVolume """
REQ_ATTACH_CLOUDVOLUMES = "{ \"volumeAttachment\": { \"volumeId\": \"" + OtcConfig.VOLUME_ID + "\", \"device\": \"" + OtcConfig.EVS_DEVICE + "\" } }"
url = "https://" + OtcConfig.DEFAULT_HOST+ "/v1/" + OtcConfig.PROJECT_ID + "/cloudservers/" + OtcConfig.INSTANCE_ID + "/attachvolume"
ret = utils_http.post(url, REQ_ATTACH_CLOUDVOLUMES)
print(ret)
return ret
@staticmethod
def detach_volume():
""" generated source for method DetachVolume """
url = "https://" + OtcConfig.DEFAULT_HOST+ "/v1/" + OtcConfig.PROJECT_ID + "/cloudservers/" + OtcConfig.INSTANCE_ID + "/detachvolume/" + OtcConfig.VOLUME_ID
ret = utils_http.delete(url)
print(ret)
return ret
@staticmethod
def delete_volume():
if not OtcConfig.VOLUME_NAME is None:
ecs.convertVOLUMENameToId()
url = "https://" + OtcConfig.DEFAULT_HOST+ "/v2/" + OtcConfig.PROJECT_ID + "/cloudvolumes" + "/" + OtcConfig.VOLUME_ID
ret = utils_http.delete(url)
print(ret)
return ret
@staticmethod
def describe_quotas():
url = "https://" + OtcConfig.DEFAULT_HOST+ "/v1/" + OtcConfig.PROJECT_ID + "/cloudservers/limits"
ret = utils_http.get(url)
ecs.otcOutputHandler().print_output(ret, mainkey="absolute")
return ret
@staticmethod
def describe_snapshots():
url = "https://" + OtcConfig.DEFAULT_HOST+ "/v2/" + OtcConfig.PROJECT_ID + "/backups/detail"
ret = utils_http.get(url)
ecs.otcOutputHandler().print_output(ret, mainkey = "backups", listkey={"name","id","size","status","description","created_at", "created_at"} )
return ret
@staticmethod
def restore_snapshot():
if not OtcConfig.VOLUME_NAME is None:
ecs.convertVOLUMENameToId()
if OtcConfig.VOLUME_ID is None or OtcConfig.SNAPSHOTID is None:
print("Image definition not Correct ! Check images:")
print("otc ecs describe-backups")
os._exit(1)
REQ_RESTORE_BACKUP = "{ \"restore\":{ \"volume_id\":\"" + OtcConfig.VOLUME_ID + "\" } }"
#print REQ_RESTORE_BACKUP
url = "https://" + OtcConfig.DEFAULT_HOST+ "/v2/" + OtcConfig.PROJECT_ID + "/cloudbackups" +"/" + OtcConfig.SNAPSHOTID + "/restore"
ret = utils_http.post(url, REQ_RESTORE_BACKUP)
print(ret)
return ret
@staticmethod
def delete_snapshot():
url = "https://" + OtcConfig.DEFAULT_HOST+ "/v2/" + OtcConfig.PROJECT_ID + "/cloudbackups"+ "/" + OtcConfig.SNAPSHOTID
ret = utils_http.post( url , "")
print(ret)
return ret
@staticmethod
def create_snapshot():
if not OtcConfig.VOLUME_NAME is None:
ecs.convertVOLUMENameToId()
if not OtcConfig.DESCRIPTION is None:
OtcConfig.DESCRIPTION = OtcConfig.VOLUME_ID
if not OtcConfig.VOLUME_NAME is None:
OtcConfig.DESCRIPTION = OtcConfig.VOLUME_NAME
REQ_CREATE_BACKUP = "{ \"backup\":{ \"" + "volume_id\":\"" + OtcConfig.VOLUME_ID + "\", " + "\"name\":\"" + OtcConfig.DESCRIPTION + "\", \"description\":\"" + OtcConfig.DESCRIPTION + "\" } }"
#print REQ_CREATE_BACKUP
url = "https://" + OtcConfig.DEFAULT_HOST+ "/v2/" + OtcConfig.PROJECT_ID + "/cloudbackups"
ret = utils_http.post(url, REQ_CREATE_BACKUP)
print (ret)
return ret
|
# Imports the monkeyrunner modules used by this program
from com.android.monkeyrunner import MonkeyRunner, MonkeyDevice
import sys, os
import random
class areas:
GOLD = ((900,600),(1000, 750))
# Connects to the current device, returning a MonkeyDevice object
device = MonkeyRunner.waitForConnection()
def get_tap_on_gold():
return (
random.randint(areas.GOLD[0][0], areas.GOLD[1][0]),
random.randint(areas.GOLD[0][1], areas.GOLD[1][1])
)
def iterate():
tap = get_tap_on_gold()
print(" click: %s" % (tap,) )
device.touch(tap[0],tap[1],MonkeyDevice.DOWN_AND_UP)
MonkeyRunner.sleep(random.randint(9,12)/1.0)
def must_exit():
f = open('must_exit', 'r')
request = f.read()
f.close()
print(request)
return request.strip() == "1"
counter = 0
#while True:
for j in range(1000):
if must_exit():
sys.exit(0)
print("click: %d" % (j) )
iterate()
# Takes a screenshot
#result = device.takeSnapshot()
# Output file image
#fileImage = os.path.join(os.path.dirname(os.sys.argv[0]), "shot1.png")
# Writes the screenshot to a file
#result.writeToFile(fileImage,'png')
|
from oadr_core.oadr_payloads.oadr_payloads_general import ELEMENTS, eiResponse
def oadrCanceledOpt(code, description, requestID, optID):
canceled_element = ELEMENTS['oadr'].oadrCanceledOpt(
eiResponse(code, description, requestID),
ELEMENTS['ei'].optID(optID)
)
return canceled_element
def oadrCreatedOpt(code, description, requestID, optID):
created_element = ELEMENTS['oadr'].oadrCreatedOpt(
eiResponse(code, description, requestID),
ELEMENTS['ei'].optID(optID)
)
return created_element
|
# Problem: https://docs.google.com/document/d/1zaGh6rGnx07dI_ooeY8tp_y9h68fAMg6D3CA6a_giIY/edit?usp=sharing
import math
l,s=map(int,open(0))
print(math.ceil(l*20/s))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.