max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
bank/src/base_app/serializers.py | yuramorozov01/bank_system | 0 | 12792651 | from base_app.models import BankSettings
from rest_framework import serializers
class BankSettingsDetailsSerializer(serializers.ModelSerializer):
'''Serializer for a bank settings.
This serializer provides detailed information about bank settings.
'''
class Meta:
model = BankSettings
fields = ['curr_bank_day']
read_only_fields = ['curr_bank_day']
| 2.421875 | 2 |
tests/test_setups/test_setups_functions.py | openml/openml-python-contrib | 1 | 12792652 | <filename>tests/test_setups/test_setups_functions.py
import ConfigSpace
import openml
import openmlcontrib
import os
import pickle
from openmlcontrib.testing import TestBase
class TestSetupFunctions(TestBase):
def setUp(self):
self.live_server = "https://www.openml.org/api/v1/xml/"
self.test_server = "https://test.openml.org/api/v1/xml/"
openml.config.server = self.test_server
openml.config.apikey = ''
def test_obtain_setups_by_ids(self):
setup_ids = [i for i in range(1, 30)]
setups = openmlcontrib.setups.obtain_setups_by_ids(setup_ids, limit=7)
self.assertEqual(set(setups.keys()), set(setup_ids))
def test_obtain_setups_by_ids_incomplete_raise(self):
with self.assertRaises(ValueError):
setup_ids = [i for i in range(30)]
openmlcontrib.setups.obtain_setups_by_ids(setup_ids, limit=7)
def test_obtain_setups_by_ids_incomplete(self):
setup_ids = [i for i in range(30)]
openmlcontrib.setups.obtain_setups_by_ids(setup_ids, require_all=False, limit=7)
def test_filter_setup_list_nominal(self):
openml.config.server = self.live_server
setupid_setup = openml.setups.list_setups(flow=7707, size=100) # pipeline with libsvm svc
poly_setups = openmlcontrib.setups.filter_setup_list(setupid_setup, 'kernel', allowed_values=['poly'])
sigm_setups = openmlcontrib.setups.filter_setup_list(setupid_setup, 'kernel', allowed_values=['sigmoid'])
poly_ids = set(poly_setups.keys())
sigm_ids = set(sigm_setups.keys())
inters = poly_ids.intersection(sigm_ids)
self.assertEqual(len(inters), 0)
self.assertGreater(len(poly_ids) + len(sigm_ids), 20)
self.assertGreater(len(poly_ids), 10)
self.assertGreater(len(sigm_ids), 10)
poly_setups_prime = openmlcontrib.setups.filter_setup_list(poly_setups, 'kernel', allowed_values=['poly'])
self.assertEqual(poly_ids, set(poly_setups_prime.keys()))
def test_filter_setup_list_nominal_numeric(self):
openml.config.server = self.live_server
setupid_setup = openml.setups.list_setups(flow=7707, size=100) # pipeline with libsvm svc
threshold = 3
poly_setups = openmlcontrib.setups.filter_setup_list(setupid_setup, 'kernel', allowed_values=['poly'])
poly_setups_smaller = openmlcontrib.setups.filter_setup_list(poly_setups, 'degree', max=threshold)
poly_setups_bigger = openmlcontrib.setups.filter_setup_list(poly_setups, 'degree', min=threshold+1)
smaller_ids = set(poly_setups_smaller.keys())
bigger_ids = set(poly_setups_bigger.keys())
all_ids = set(poly_setups.keys())
inters = smaller_ids.intersection(bigger_ids)
self.assertEqual(len(inters), 0)
self.assertEqual(len(smaller_ids) + len(bigger_ids), len(all_ids))
def test_setup_in_configuration_space(self):
cs = TestBase._get_libsvm_svc_config_space()
for setup_file in os.listdir('../data/setups'):
with open(os.path.join('../data/setups', setup_file), 'rb') as fp:
setup = pickle.load(fp)
with open('../data/flows/%d.pkl' % setup.flow_id, 'rb') as fp:
flow = pickle.load(fp)
self.assertTrue(openmlcontrib.setups.setup_in_config_space(setup, flow, cs))
def test_filter_setup_list_by_config_space(self):
cs = TestBase._get_libsvm_svc_config_space()
setups = {}
for setup_file in os.listdir('../data/setups'):
with open(os.path.join('../data/setups', setup_file), 'rb') as fp:
setup = pickle.load(fp)
setups[setup.setup_id] = setup
# all flow ids are supposed to be the same
with open('../data/flows/%d.pkl' % setup.flow_id, 'rb') as fp:
flow = pickle.load(fp)
self.assertEqual(len(setups), 20)
setups_filtered = openmlcontrib.setups.filter_setup_list_by_config_space(setups, flow, cs)
self.assertEqual(len(setups), len(setups_filtered))
def test_filter_setup_list_by_config_space_fails(self):
degree = ConfigSpace.UniformIntegerHyperparameter("degree", -5, -1, default_value=-3)
cs = ConfigSpace.ConfigurationSpace()
cs.add_hyperparameters([degree])
setups = {}
for setup_file in os.listdir('../data/setups'):
with open(os.path.join('../data/setups', setup_file), 'rb') as fp:
setup = pickle.load(fp)
setups[setup.setup_id] = setup
# all flow ids are supposed to be the same
with open('../data/flows/%d.pkl' % setup.flow_id, 'rb') as fp:
flow = pickle.load(fp)
self.assertEqual(len(setups), 20)
setups_filtered = openmlcontrib.setups.filter_setup_list_by_config_space(setups, flow, cs)
self.assertEqual(len(setups_filtered), 0)
def test_setup_in_configuration_space_illegal_value(self):
degree = ConfigSpace.UniformIntegerHyperparameter("degree", -5, -1, default_value=-3)
cs = ConfigSpace.ConfigurationSpace()
cs.add_hyperparameters([degree])
for setup_file in os.listdir('../data/setups'):
with open(os.path.join('../data/setups', setup_file), 'rb') as fp:
setup = pickle.load(fp)
with open('../data/flows/%d.pkl' % setup.flow_id, 'rb') as fp:
flow = pickle.load(fp)
self.assertFalse(openmlcontrib.setups.setup_in_config_space(setup, flow, cs))
def test_setup_in_configuration_space_param_not_present(self):
degree = ConfigSpace.UniformIntegerHyperparameter("test123", -20, 20, default_value=-3)
cs = ConfigSpace.ConfigurationSpace()
cs.add_hyperparameters([degree])
for setup_file in os.listdir('../data/setups'):
with open(os.path.join('../data/setups', setup_file), 'rb') as fp:
setup = pickle.load(fp)
with open('../data/flows/%d.pkl' % setup.flow_id, 'rb') as fp:
flow = pickle.load(fp)
self.assertFalse(openmlcontrib.setups.setup_in_config_space(setup, flow, cs))
def test_setup_to_parameter_dict(self):
expected_active_parameters = TestBase._libsvm_expected_active_hyperparameters()
cs = TestBase._get_libsvm_svc_config_space()
for setup_file in os.listdir('../data/setups'):
with open(os.path.join('../data/setups', setup_file), 'rb') as fp:
setup = pickle.load(fp)
with open('../data/flows/%d.pkl' % setup.flow_id, 'rb') as fp:
flow = pickle.load(fp)
result = openmlcontrib.setups.setup_to_parameter_dict(setup, flow, True, cs)
self.assertEqual(expected_active_parameters[result['classifier__kernel']], set(result.keys()))
| 2.1875 | 2 |
test_data1E.py | jfs60/Group-147-PartIA-Flood-Warning-System | 0 | 12792653 | from floodsystem.geo import build_station_list
from floodsystem.geo import rivers_by_station_number
def test_rivers_by_station_number():
"""Tests to check that the outputs from funtion rivers_by_station_number are as expected"""
stations = build_station_list()
test = rivers_by_station_number(stations, 9)
for station in test:
assert type(station) is tuple
assert type(station[1]) is int
i=0
for i in range(0,len(test)-1):
assert test[i][1] >= test[i+1][1]
| 3.3125 | 3 |
flask_web/flask_app/deep_learning/machine_learning/ml_utils.py | Yakings/system_demo | 7 | 12792654 | <filename>flask_web/flask_app/deep_learning/machine_learning/ml_utils.py
import os
import numpy as np | 1.195313 | 1 |
src/Deque/palindrome_deque.py | shapovalovdev/AlgorythmsAndDataStructures | 0 | 12792655 | from src.Deque.deque_scratch import Deque
def is_palindrome(string_to_check):
string_to_check=string_to_check.strip()
if not string_to_check:
raise Exception("The string is empty")
deq=Deque()
for el in string_to_check:
deq.addTail(el)
front=deq.removeFront()
end=deq.removeTail()
while front == end and front is not None and end is not None:
if deq.size() == 1:
return True
front=deq.removeFront()
end=deq.removeTail()
if deq.size() == 0:
return True
else:
return False
| 3.765625 | 4 |
experiments/series_1/experiment_2/experiment_setup.py | TomaszOdrzygozdz/gym-splendor | 1 | 12792656 | <reponame>TomaszOdrzygozdz/gym-splendor
TRAIN_DIR = '/net/archive/groups/plggluna/plgtodrzygozdz/lvl1/train_epochs_new_eval'
VALID_FILE = '/net/archive/groups/plggluna/plgtodrzygozdz/lvl1/valid_new/valid_eval.pickle'
| 1.085938 | 1 |
Geeks-For-Geeks/Practice/Array/Plus-One.py | HetDaftary/Competitive-Coding-Solutions | 0 | 12792657 | <filename>Geeks-For-Geeks/Practice/Array/Plus-One.py<gh_stars>0
#User function Template for python3
class Solution:
def increment(self, arr, N):
# code here
i = N - 1
arr[i] += 1
while i > 0 and arr[i] > 9:
arr[i] = 0
arr[i - 1] += 1
i -= 1
if arr[0] > 9:
arr[0] = 0
arr.insert(0, 1)
return arr
#{
# Driver Code Starts
#Initial Template for Python 3
if __name__ == '__main__':
t = int (input ())
for _ in range (t):
N=int(input())
arr=list(map(int,input().split()))
ob = Solution()
ptr = ob.increment(arr,N)
for i in ptr:
print(i,end=" ")
print()
# } Driver Code Ends | 3.578125 | 4 |
problemsets/Codeforces/Python/B705.py | juarezpaulino/coderemite | 0 | 12792658 | <reponame>juarezpaulino/coderemite
"""
*
* Author: <NAME>(coderemite)
* Email: <EMAIL>
*
"""
r=0
for x in map(int,[*open(0)][1].split()):
r=(r+x-1)%2
print([2,1][r]) | 3.078125 | 3 |
utils/misc.py | bbbbbbzhou/DuDoRNet | 35 | 12792659 | <filename>utils/misc.py
__all__ = ['read_dir', 'get_last_checkpoint', 'compute_metrics', 'get_aapm_minmax',
'convert_coefficient2hu', 'convert_hu2coefficient']
import os
import os.path as path
import scipy.io as sio
import numpy as np
from tqdm import tqdm
from skimage.measure import compare_ssim, compare_psnr
def read_dir(dir_path, predicate=None, name_only=False):
if predicate in {'dir', 'file'}:
predicate = {
'dir': lambda x: path.isdir(path.join(dir_path, x)),
'file':lambda x: path.isfile(path.join(dir_path, x))
}[predicate]
return [f if name_only else path.join(dir_path, f)
for f in os.listdir(dir_path)
if (True if predicate is None else predicate(f))]
def get_last_checkpoint(checkpoint_dir, predicate=None, pattern=None):
if predicate is None:
predicate = lambda x: x.endswith('pth') or x.endswith('pt')
checkpoints = read_dir(checkpoint_dir, predicate)
if len(checkpoints) == 0:
return None
checkpoints = sorted(checkpoints, key=lambda x: path.getmtime(x))
checkpoint = checkpoints[-1]
if pattern is None:
pattern = lambda x: int(path.basename(x).split('_')[-1].split('.')[0])
return checkpoint, pattern(checkpoint)
def compute_metrics(lq_image, hq_image, metrics=None):
psnr = compare_psnr(lq_image, hq_image, hq_image.max())
ssim = compare_ssim(lq_image, hq_image, data_range=hq_image.max())
if metrics is None:
return {'psnr': [psnr], 'ssim': [ssim]}
else:
metrics['psnr'].append(psnr)
metrics['ssim'].append(ssim)
return metrics
def convert_coefficient2hu(image):
image = (image - 0.0192) / 0.0192 * 1000
return image
def convert_hu2coefficient(image):
image = image * 0.0192 / 1000 + 0.0192
return image
def get_aapm_minmax(data_dir,
splits=('test', 'train', 'val'), tags=('dense_view', 'sparse_view')):
data_files = []
for s in splits:
split_dir = path.join(data_dir, s)
for d in os.listdir(split_dir):
study_dir = path.join(split_dir, d)
for f in os.listdir(study_dir):
data_file = path.join(study_dir, f)
if f.endswith('.mat'):
data_files.append(data_file)
val_max = -float('inf')
val_min = float('inf')
for f in tqdm(data_files):
data = sio.loadmat(f)
data = np.array([data[t] for t in tags])
if data.max() > val_max:
val_max = data.max()
if data.min() < val_min:
val_min = data.min()
return val_min, val_max | 2.125 | 2 |
python/pyfgag/simu_compiler.py | FAIG2014/fpga-projects | 0 | 12792660 | <reponame>FAIG2014/fpga-projects
import os
import sys
import pyfgag.dependency_builder as deps
import subprocess
import pathlib
class SimuCompiler(object):
def __init__(self, build_dir=os.getcwd()):
self.current_folder = os.getcwd()
self.build_dir = build_dir
pathlib.Path(self.build_dir).mkdir(parents=True, exist_ok=True)
def create_cmds_compile(self, top_level_file_path:str):
raise NotImplementedError("Subclasses should implement this!")
def create_cmds_run(self, top_level_file_path:str):
raise NotImplementedError("Subclasses should implement this!")
def compile(self, top_level_file_path:str):
cmds = self.create_cmds_compile(top_level_file_path=top_level_file_path)
for cmd in cmds:
print("RUNING %s" % cmd)
cmd_ret = subprocess.call(cmd, shell=True, cwd=self.build_dir)
if cmd_ret:
print("ERROR: compiles went wrong")
raise Exception()
def run_simu(self, top_level_file_path:str):
cmds = self.create_cmds_run(top_level_file_path=top_level_file_path)
for cmd in cmds:
print("RUNING %s" % cmd)
cmd_ret = subprocess.call(cmd, shell=True, cwd=self.build_dir)
if cmd_ret:
raise Exception()
@staticmethod
def get_module_name_from_path(file_path:str):
# we are assuming that we are logical, and the file name is the module name.
basename = os.path.basename(file_path)
return os.path.splitext(basename)[0]
class CompilerIverilog(SimuCompiler):
def __init__(self, build_dir=os.getcwd()):
super(CompilerIverilog, self).__init__(build_dir=build_dir)
def create_cmds_compile(self, top_level_file_path:str):
build_lib = deps.FpgaLib(self.current_folder)
cmd = "iverilog -g2012 "
# define
cmd += " -D SIMULATION "
# includes
for include in build_lib.get_full_include_dependencies():
cmd += " -I %s " % include
# files
for sv_file in build_lib.get_full_file_dependencies():
cmd += " %s " % sv_file
cmd += " %s " % top_level_file_path
print(cmd)
return [cmd]
def create_cmds_run(self, top_level_file_path:str):
cmd = "vvp a.out "
return [cmd]
class CompilerModelsim(SimuCompiler):
def __init__(self, build_dir=os.getcwd()):
super(CompilerModelsim, self).__init__(build_dir=build_dir)
def create_cmds_compile(self, top_level_file_path:str):
cmds = []
build_lib = deps.FpgaLib(self.current_folder)
cmds.append("which vlib")
cmds.append("vlib work")
cmds.append("vmap work work")
cmd = "vlog "
# define
cmd += " +define+SIMULATION "
# includes
for include in build_lib.get_full_include_dependencies():
cmd += " +incdir+%s " % include
# files
for sv_file in build_lib.get_full_file_dependencies():
cmd += " %s " % sv_file
cmd += " %s " % top_level_file_path
cmds.append(cmd)
return cmds
def create_cmds_run(self, top_level_file_path:str):
# crazy warning we want to remove
#cmd += " +nowarn3116"
cmd = "vsim +nowarn3116 -t ps -c -do \"log -r /* ; run 20 ms; quit -f \" %s " % self.get_module_name_from_path(top_level_file_path)
return [cmd]
class CompilerVerilator(SimuCompiler):
def __init__(self, build_dir=os.getcwd()):
super(CompilerVerilator, self).__init__(build_dir=build_dir)
@staticmethod
def sv_top_name(top_level_file_path:str):
main_name = CompilerVerilator.get_module_name_from_path(top_level_file_path)
return main_name.split("_main")[0]
#return CompilerVerilator.get_module_name_from_path(top_level_file_path)
def create_cmds_compile(self, top_level_file_path:str):
cmds = []
build_lib = deps.FpgaLib(self.current_folder)
sv_top_name = self.sv_top_name(top_level_file_path)
cmd = "verilator -Wall --cc %s.sv -GTEST=2" % sv_top_name
# define
cmd += " +define+SIMULATION --dump-defines "
# includes
for include in build_lib.get_full_include_dependencies():
cmd += " +incdir+%s " % include
# files
for sv_file in build_lib.get_full_file_dependencies():
cmd += " %s " % sv_file
cmd += " --exe %s " % top_level_file_path
cmds.append(cmd)
cmds.append("make -j -C obj_dir -f V%s.mk V%s" % (sv_top_name, sv_top_name) )
return cmds
def create_cmds_run(self, top_level_file_path:str):
sv_top_name = self.sv_top_name(top_level_file_path)
cmd = "obj_dir/V%s" % sv_top_name
return [cmd]
class CompilerVivado(SimuCompiler):
def __init__(self, build_dir=os.getcwd()):
super(CompilerVivado, self).__init__(build_dir=build_dir)
def create_cmds_compile(self, top_level_file_path:str):
cmds = []
build_lib = deps.FpgaLib(self.current_folder)
sv_top_name = CompilerVivado.get_module_name_from_path(top_level_file_path)
cmd = "xvlog -sv "
# define
cmd += " -d SIMULATION"
# includes
for include in build_lib.get_full_include_dependencies():
cmd += " -i %s " % include
# files
for sv_file in build_lib.get_full_file_dependencies():
cmd += " %s " % sv_file
cmd += " %s " % top_level_file_path
cmds.append(cmd)
cmds.append("xelab -debug typical %s -s %s_sim" % (sv_top_name, sv_top_name) )
return cmds
def create_cmds_run(self, top_level_file_path:str):
sv_top_name = CompilerVivado.get_module_name_from_path(top_level_file_path)
cmds = []
cmd = """echo "run 20ns
current_time
quit" > run.tcl
"""
cmds.append(cmd)
cmd = "xsim %s_sim -tclbatch run.tcl" % (sv_top_name)
cmds.append(cmd)
return cmds | 2.640625 | 3 |
7.36.0.dev0/ietf/__init__.py | kesara/ietf-datatracker | 0 | 12792661 | # Copyright The IETF Trust 2007-2020, All Rights Reserved
# -*- coding: utf-8 -*-
from . import checks # pyflakes:ignore
# Don't add patch number here:
__version__ = "7.36.0"
# set this to ".p1", ".p2", etc. after patching
__patch__ = ""
__date__ = "$Date: 2021-08-07 03:05:11 +1200 (Sat, 07 Aug 2021) $"
__rev__ = "$Rev: 19289 $"
__id__ = "$Id: __init__.py 19289 2021-08-06 15:05:11Z <EMAIL> $"
| 1.195313 | 1 |
generate_figures.py | max-liu-112/STGAN-WO | 45 | 12792662 |
import os
import pickle
import numpy as np
import PIL.Image
import dnnlib
import dnnlib.tflib as tflib
import config
from training import misc
synthesis_kwargs = dict(minibatch_size=8)
_Gs_cache = dict()
def load_Gs(url):
if url not in _Gs_cache:
with dnnlib.util.open_url(url, cache_dir=config.cache_dir) as f:
_G, _D, Gs = pickle.load(f)
_Gs_cache[url] = Gs
return _Gs_cache[url]
def draw_figure(png, Gs, seeds):
avg_dlantents_b = Gs.get_var('dlatent_avg_b')
avg_dlantents_c = Gs.get_var('dlatent_avg_c')
for seed in seeds:
rnd = np.random.RandomState(seed)
b1 = rnd.randn(Gs.input_shapes[0][1])
b1 = b1[np.newaxis]
b1 = Gs.components.mapping_b.run(b1, None)
b1_v = b1[0, 0, :]
#
b1[:, :] = (b1_v - avg_dlantents_b) * 0.9 + avg_dlantents_b
# change C
for i in range(20):
c = rnd.randn(Gs.input_shapes[1][1])
c = c[np.newaxis]
c = Gs.components.mapping_c.run(c, None) # [seed, layer, component]
c_v = c[0, 0, :]
c[:, :] = (c_v - avg_dlantents_c) * 0.7 + avg_dlantents_c
current_png = png + '/seedc_%d_%d' % (seed, i) + '.png'
gen = Gs.components.synthesis.run(b1, c, randomize_noise=False, **synthesis_kwargs)[-1]
misc.save_image_grid(gen, current_png, drange=[-1, 1], grid_size=(1, 1))
b1_v = b1[0, 0, :]
c = rnd.randn(Gs.input_shapes[1][1])
c = c[np.newaxis]
c = Gs.components.mapping_c.run(c, None) # [seed, layer, component]
c[:, :] = avg_dlantents_c
for j in range(80):
random_b2 = rnd.randn(Gs.input_shapes[0][1])
random_b2 = random_b2[np.newaxis]
random_b2 = Gs.components.mapping_b.run(random_b2, None)
b2_v = (random_b2[0, 0, :] - avg_dlantents_b) * 0.5 + avg_dlantents_b
print(b2_v.shape)
# gram-schmidt process
a1 = np.sum(b1_v * b2_v, dtype=np.float32)
a2 = np.sum(b1_v * b1_v, dtype=np.float32)
print(a1)
print(a2)
b2_v = b2_v - a1 / a2 * b1_v
print(b1_v.shape)
print(b2_v.shape)
print(np.sum(b1_v * b2_v))
for i in range(10):
tmp = np.empty_like(b1)
tmp[:, :] = b1_v + 0.1 * i * b2_v
current_png = png + '/seedb%d_%d_%d' % (seed, j, i) + '.png'
gen = Gs.components.synthesis.run(tmp, c, randomize_noise=False, **synthesis_kwargs)[-1]
misc.save_image_grid(gen, current_png, drange=[-1, 1], grid_size=(1, 1))
#---------------------------------------------------------------------------
# Main program.
def main():
tflib.init_tf()
os.makedirs(config.result_dir, exist_ok=True)
network_pkl = 'network-snapshot-010000.pkl'
G, D, Gs = misc.load_pkl(network_pkl)
draw_figure(config.result_dir, Gs, seeds = [2, 7, 8, 11, 23])
#----------------------------------------------------------------------------
if __name__ == "__main__":
main()
#----------------------------------------------------------------------------
| 2.21875 | 2 |
hangover/python/main.py | jimjh/challenges | 0 | 12792663 | import sys
for line in sys.stdin:
if line == "0.00\n": break;
c = float(line.strip())
(curr_n, curr_len) = (0, 0.0)
while c > curr_len:
curr_n += 1
curr_len += 1.0/(1 + curr_n)
print curr_n, "card(s)"
| 3.4375 | 3 |
Ball Handle/imu.py | maxbot5/Ball-Handle-Software | 0 | 12792664 | # -*- coding: utf-8 -*-
import time
import numpy as np
from classes import Debug, KalmanFilter
import smbus
bus = smbus.SMBus(2) # bus = smbus.SMBus(0) fuer Revision 1
address = 0x68 # via i2cdetect
power_mgmt_1 = 0x6b
ACCEL_CONFIG = 0x1C # Reg 28
ACCEL_CONFIG2 = 0x1D # Reg 29
class Imu(Debug, KalmanFilter):
def __init__(self, sim_mode=False):
self.debug = Debug('imu')
self.sim_mode = sim_mode
self.kf = self.filter_config()
self.raw = self.read_raw()
self.offset = self.offset_calc()
#self.port = port
self.imu_config()
def filter_config(self):
# paramter for kalman filter
dt = 1.0 / 50.0
# state transition model, A
F = np.array([[1, dt, 0], [0, 1, dt], [0, 0, 1]])
H = np.array([0, 0, 1]).reshape(1, 3) # transponieren #observation model C
q = 0.05
Q = np.array([[q, q, 0], [q, q, 0], [0, 0, 0]]) # process noise
R = np.array([0.8]).reshape(1, 1) # observation noise
return KalmanFilter(F=F, H=H, Q=Q, R=R)
def imu_config(self):
# Aktivieren, um das Modul ansprechen zu koennen
bus.write_byte_data(address, power_mgmt_1, 0) # full power mode
# bus.write_byte_data(address, power_mgmt_2, 0b00001111) #disabele=1, disabled accel_z, gyro_x bis _z
# setzt Accelerometer Full Scale Select (hier auf +-2g)
bus.write_byte_data(address, ACCEL_CONFIG, 0b00100000)
# setzt den Tiefpass-Filter
bus.write_byte_data(address, ACCEL_CONFIG2,
0b00000100) # entspricht dem Wert 4, also 19,8 ms ~50Hz
#print("IMU config ready..")
def read_word(self, reg):
h = bus.read_byte_data(address, reg)
l = bus.read_byte_data(address, reg + 1)
# h = bus.read_byte_data(self.address, reg)
# l = bus.read_byte_data(self.address, reg + 1)
value = (h << 8) + l
return value
def read_word_2c(self, reg):
val = self.read_word(reg)
if (val >= 0x8000):
return -((65535 - val) + 1)
else:
return val
def read_raw(self):
if self.sim_mode == True:
return 100, 200, 20
else:
beschleunigung_xout = self.read_word_2c(0x3b)
beschleunigung_yout = self.read_word_2c(0x3d)
gyroskop_zout = self.read_word_2c(0x47)
beschleunigung_xout_skaliert = beschleunigung_xout / 16384.0 # value from sensor documentation
beschleunigung_yout_skaliert = beschleunigung_yout / 16384.0
gyroskop_zout_skaliert = gyroskop_zout / 131
return beschleunigung_xout_skaliert, beschleunigung_yout_skaliert, gyroskop_zout_skaliert
def offset_calc(self):
init_data = []
print("offset calc start...")
for count in range(0, 200):
init_data.append(self.read_raw())
offset = np.array(init_data)
print("finished calc..")
#print("offset:",offset)
return np.median(offset, axis=0)
def kalman_filter(self, z):
# das ist meine C matrix für den Ausgang, also müsste das mittlere die geschwindigkeit sein
np.dot(self.kf.H, self.kf.predict())
self.kf.update(z)
#print("kalmanfilter: ", self.kf.x[0], self.kf.x[1], self.kf.x[2])
return self.kf.x[1]
def process(self):
return self.kalman_filter(self.read_raw() - self.offset)
'''
def test_imu(save=False, draw=False):
print("stat testing...")
imu = Imu(sim_mode=False)
t_ref = int(round(time.time() * 1000))
if imu.sim_mode:
for i in range(0, 1000):
try:
imu.debug.V_X, imu.debug.V_Y, imu.debug.w_Z = imu.run()
imu.debug.excecute(t_ref)
time.sleep(0.1)
except KeyboardInterrupt:
break
else:
while KeyboardInterrupt is not True:
try:
imu.debug.V_X, imu.debug.V_Y, imu.debug.w_Z = imu.run()
imu.debug.excecute(t_ref)
except KeyboardInterrupt:
break
if save:
imu.debug.save()
if draw:
imu.debug.draw()
return
# if __name__== "__main":
test_imu(save=True)
'''
| 2.5 | 2 |
doobi/doobi_pack/apps.py | bryanopew/doobi | 0 | 12792665 | <filename>doobi/doobi_pack/apps.py
from django.apps import AppConfig
class DoobiPackConfig(AppConfig):
name = 'doobi.doobi_pack'
| 1.28125 | 1 |
hivemind/mpi/worker.py | enj/hivemind | 1 | 12792666 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Represents a Worker node."""
from ..util import tags, MASTER
class Worker(object):
"""A Worker node runs Tasks that are provided by the Master node."""
def __init__(self, mpi, master=MASTER):
"""Construct a Worker with the global MPI object.
:param mpi: the global MPI object
:type mpi: MPI
:param master: the id of the Master node, defaults to MASTER (0)
:type master: int, optional
"""
self.mpi = mpi
self.comm = mpi.COMM_WORLD
self.status = mpi.Status()
self.tag = tags.WORK
self.master = master
if __debug__:
name = mpi.Get_processor_name()
rank = self.comm.Get_rank()
from logging import getLogger
self.log = getLogger("{} {} {}".format(__name__, name, rank))
def send(self, message):
"""Send the given message to the Master node.
:param message: the message to send
:type message: object
"""
self.comm.send(message, dest=self.master, tag=self.tag)
def receive(self):
"""Receive and act upon a message/Task from the Master node."""
task = self.comm.recv(source=self.master, tag=self.mpi.ANY_TAG, status=self.status)
self.tag = self.status.Get_tag()
if self.tag == tags.WORK:
self.task = task
def run(self):
"""Run the current Task unless exiting. Sends the PID and UID to the Master after completion."""
if self.tag == tags.EXIT:
return
if __debug__:
self.log.debug("Start Task {}".format(self.task._uid))
if self.task.skip is False: # This should always be False
self.task.run()
if __debug__:
self.log.debug("Finished Task {}".format(self.task))
self.send((self.task._pid, self.task._uid))
def loop(self):
"""Loop between receive and run until told to exit."""
while self.tag != tags.EXIT:
self.receive()
self.run()
| 2.921875 | 3 |
src/chitty/web/cli.py | zgoda/chitty-server | 0 | 12792667 | from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, Namespace
from dotenv import find_dotenv, load_dotenv
from werkzeug import run_simple
def parse_args() -> Namespace:
parser_kw = {'formatter_class': ArgumentDefaultsHelpFormatter}
parser = ArgumentParser(description='Chitty auxiliary web service')
subparsers = parser.add_subparsers(help='Available commands')
run_parser = subparsers.add_parser('run', help='Launch the server', **parser_kw)
run_parser.add_argument(
'-H', '--host', default='127.0.0.1', help='IP address to bind to'
)
run_parser.add_argument(
'-p', '--port', type=int, default=5001, help='port number to bind to'
)
return parser.parse_args()
def main():
load_dotenv(find_dotenv())
from .app import make_app
application = make_app()
opts = parse_args()
run_simple(opts.host, opts.port, application, use_reloader=True, use_debugger=False)
| 2.515625 | 3 |
scripts/get_basin_data.py | jsadler2/preprocess_nwm_pgdl | 0 | 12792668 | <gh_stars>0
# coding: utf-8
import pandas as pd
import json
import ulmo
import hydrofunctions as hf
from hydrofunctions.exceptions import HydroNoDataError
from utils import get_sites_in_basin
def get_json_site_param(huc, param, file_name=None):
# get all the sites in the huc
sites_in_huc, data = get_sites_in_basin(huc)
# get all the sites in the huc that have param
sites_with_param = ulmo.usgs.nwis.get_sites(sites=sites_in_huc,
parameter_code=param)
# get geojson just for sites with param
sites_with_param_data = []
for site in data['features']:
site_id = site['properties']['identifier']
# is returned as USGS-[site_num]
id_num = site_id.split('-')[1]
# check if id is in the list of sites with param
if id_num in sites_with_param.keys():
sites_with_param_data.append(site)
data['features'] = sites_with_param_data
if file_name:
with open(file_name, 'w') as fl:
json.dump(data, fl)
return data
def get_data_from_sites(sites, service, parameter_code, start_date, end_date):
data_sites = []
sites_with_param = []
for site in sites:
try:
site_data = hf.NWIS(site, service, start_date, end_date,
parameterCd=parameter_code)
site_data_df = site_data.get_data().df()
data_sites.append(site_data_df)
sites_with_param.append(site)
print('got data for {} ', site)
except HydroNoDataError:
print("no data for {}".format(site))
data_from_sites_combined = pd.concat(data_sites, axis=1)
return data_from_sites_combined
def get_data_for_huc(huc, param, start_date, end_date, service='dv'):
huc_site_list, data = get_sites_in_basin(huc)
site_data = get_data_from_sites(huc_site_list, service, param, start_date,
end_date)
return site_data
# get all sites for a HUC 12
# huc = "020402060105"
#
# parameter_code = "00060"
# start_date = "2018-01-01"
# end_date = "2019-01-10"
# service = 'dv'
| 2.765625 | 3 |
backend/youngun/youngun/apps/usermanager/migrations/0008_auto_20200710_1945.py | aakashbajaj/Youngun-Campaign-Tracking | 0 | 12792669 | <reponame>aakashbajaj/Youngun-Campaign-Tracking<filename>backend/youngun/youngun/apps/usermanager/migrations/0008_auto_20200710_1945.py
# Generated by Django 3.0.7 on 2020-07-10 19:45
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('usermanager', '0007_staffprofile_added_by'),
]
operations = [
migrations.AddField(
model_name='clientprofile',
name='brand',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='client_profile', to='usermanager.Brand', verbose_name='Brand'),
),
migrations.AddField(
model_name='staffprofile',
name='brand',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='staff_profile', to='usermanager.Brand', verbose_name='Brand'),
),
]
| 1.40625 | 1 |
py_tdlib/constructors/user_profile_photo.py | Mr-TelegramBot/python-tdlib | 24 | 12792670 | <gh_stars>10-100
from ..factory import Type
class userProfilePhoto(Type):
id = None # type: "int64"
added_date = None # type: "int32"
sizes = None # type: "vector<photoSize>"
| 1.703125 | 2 |
Day 24/Core Team/python.py | ChetasShree/MarchCode | 9 | 12792671 | <gh_stars>1-10
arr = [ 5,3,5,2,41,4,3,1,4,4 ]
for i in range(10):
if (arr[i]!= -1):
for j in range (i+1, 10):
if (arr[i] == arr[j]):
arr[j] = -1
print(arr[i], end = ' ') | 3.359375 | 3 |
rssfly/extractor/pixiv_fanbox.py | lidavidm/rssfly | 1 | 12792672 | <reponame>lidavidm/rssfly
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import structlog
from rssfly.extractor.common import Chapter, Comic, Context, Extractor
logger = structlog.get_logger(__name__)
_DEFAULT_HEADERS = {
"user-agent": "User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:92.0) Gecko/20100101 Firefox/92.0",
"accept": "application/json",
}
class FanboxExtractor(Extractor):
@property
def name(self):
return "pixiv_fanbox"
@property
def publisher(self):
return "Fanbox"
def extract(self, context: Context, comic_id: str) -> Comic:
url = f"https://api.fanbox.cc/post.listCreator?creatorId={comic_id}&limit=10"
logger.info("Fetching from Fanbox API", url=url)
headers = _DEFAULT_HEADERS.copy()
headers["referer"] = f"https://{comic_id}.fanbox.cc"
headers["origin"] = headers["referer"]
raw_bytes = context.get_bytes(url, headers=headers)
response = json.loads(raw_bytes)
chapters = {}
for chapter in response["body"]["items"]:
chapter_id = "{:012}".format(int(chapter["id"]))
chapter_title = chapter["title"]
chapter_url = f"https://{comic_id}.fanbox.cc/posts/{chapter['id']}"
# Deduplicate by URL
chapters[chapter_url] = Chapter(
chapter_id=chapter_id,
name=chapter_title,
url=chapter_url,
)
chapter_list = list(
sorted(chapters.values(), key=lambda chapter: chapter.chapter_id)
)
return Comic(
publisher=self.publisher,
comic_id=comic_id,
name=response["body"]["items"][0]["user"]["name"] + "’s Fanbox",
url=f"https://{comic_id}.fanbox.cc",
chapters=chapter_list,
)
| 2.203125 | 2 |
datawinners/project/tests/form_model_generator.py | ICT4H/dcs-web | 1 | 12792673 | <filename>datawinners/project/tests/form_model_generator.py<gh_stars>1-10
# vim: ai ts=4 sts=4 et sw=4 encoding=utf-8
from mock import Mock
from mangrove.form_model.field import TextField, SelectField, DateField, GeoCodeField, UniqueIdField
from mangrove.form_model.form_model import FormModel
class FormModelGenerator(object):
def __init__(self, database_manager):
self.database_manager = database_manager
self.init_form_model_fields()
def form_model(self, form_code="cli002"):
return FormModel(self.database_manager, name="AIDS", label="Aids form_model", form_code=form_code,
fields=[self.eid_field, self.rp_field, self.symptoms_field, self.blood_type_field],
)
def subject_form_model_without_rp(self):
return FormModel(self.database_manager, name="AIDS", label="Aids form_model", form_code="cli002",
fields=[self.eid_field, self.symptoms_field, self.blood_type_field], )
def summary_form_model_without_rp(self):
return FormModel(self.database_manager, name="AIDS", label="Aids form_model", form_code="cli002",
fields=[self.eid_field, self.symptoms_field, self.blood_type_field])
def summary_form_model_with_rp(self):
return FormModel(self.database_manager, name="AIDS", label="Aids form_model", form_code="cli002",
fields=[self.rp_field, self.eid_field, self.symptoms_field, self.blood_type_field])
def form_model_with_gps_question(self):
return FormModel(self.database_manager, name="AIDS", label="Aids form_model", form_code="cli002",
fields=[self.eid_field, self.gps_field], )
def init_form_model_fields(self):
self.eid_field = UniqueIdField(unique_id_type='clinic',label="What is associated entity?", code="EID", name="What is associatéd entity?")
self.rp_field = DateField(label="Report date", code="RD", name="What is réporting date?",
date_format="dd.mm.yyyy", instruction="Answer must be a date in the following format: day.month.year. Example: 25.12.2011")
self.symptoms_field = SelectField(label="Zhat are symptoms?", code="SY", name="Zhat are symptoms?",
options=[("Rapid weight loss", "a"), ("Dry cough", "2b"), ("Pneumonia", "c"),
("Memory loss", "d"), ("Neurological disorders ", "e")], single_select_flag=False)
self.blood_type_field = SelectField(label="What is your blood group?", code="BG",
name="What is your blood group?",
options=[("O+", "a"), ("O-", "b"), ("AB", "c"), ("B+", "d")], single_select_flag=True)
self.gps_field = GeoCodeField(name="field1_Loc", code="gps", label="Where do you stay?")
| 2.484375 | 2 |
yahoo_panoptes/plugins/enrichment/generic/snmp/ciena/waveserver/plugin_enrichment_cienaws_light_metrics.py | mdrbh/panoptes | 86 | 12792674 | <filename>yahoo_panoptes/plugins/enrichment/generic/snmp/ciena/waveserver/plugin_enrichment_cienaws_light_metrics.py<gh_stars>10-100
"""
This module implements a Panoptes Plugin that can poll Ciena Waveserver devices for transceiver light level Metrics
"""
from cached_property import threaded_cached_property
from yahoo_panoptes.enrichment.schema.generic.snmp import PanoptesGenericSNMPMetricsEnrichmentGroup
from yahoo_panoptes.framework.enrichment import PanoptesEnrichmentSet
from yahoo.contrib.panoptes.plugins.enrichment.generic.snmp.plugin_enrichment_generic_snmp \
import PanoptesEnrichmentGenericSNMPPlugin
MIB_CIENA_CHASSIS = '.1.3.6.1.4.1.1271'
cwsPortIdName = MIB_CIENA_CHASSIS + '.3.4.7.4.1.2'
cwsPtpPtpPropertiesXcvrType = MIB_CIENA_CHASSIS + '.3.4.8.6.1.2'
cwsXcvrRxPowerActual = MIB_CIENA_CHASSIS + '.3.4.8.11.1.2'
cwsXcvrTxPowerActual = MIB_CIENA_CHASSIS + '.3.4.8.13.1.2'
class CienaWSLightMetricsEnrichment(PanoptesGenericSNMPMetricsEnrichmentGroup):
pass
class CienaPluginWSLightMetricsEnrichment(PanoptesEnrichmentGenericSNMPPlugin):
def __init__(self):
self._plugin_context = None
self._logger = None
self._ciena_model = None
self._snmp_connection = None
self._max_repetitions = None
self._polling_execute_frequency = None
super(CienaPluginWSLightMetricsEnrichment, self).__init__()
@property
def metrics_enrichment_class(self):
return CienaWSLightMetricsEnrichment
@threaded_cached_property
def _xcvr_interfaces_id(self):
varbinds_int_type = self._snmp_connection.bulk_walk(cwsPtpPtpPropertiesXcvrType)
varbinds_interface = self._snmp_connection.bulk_walk(cwsPortIdName)
interface_id = {}
xcvr_index = []
for varbind in varbinds_int_type:
if varbind.value == '4':
xcvr_index.append(varbind.index)
for varbind_int in varbinds_interface:
if varbind_int.index in xcvr_index:
interface_id[varbind_int.index] = varbind_int.value
return interface_id
@threaded_cached_property
def _xcvr_rx_power_levels(self):
rx_power_actual = {}
for ind, name in self._xcvr_interfaces_id.items():
rx_oid = cwsXcvrRxPowerActual + '.' + ind.strip('.0')
rx = self._snmp_connection.bulk_walk(rx_oid)
for i in rx:
rx_dbm = float(i.value) / 10
rx_power_actual[name] = rx_dbm
return rx_power_actual
@threaded_cached_property
def _xcvr_tx_power_levels(self):
tx_power_actual = {}
for ind, name in self._xcvr_interfaces_id.items():
tx_oid = cwsXcvrTxPowerActual + '.' + ind.strip('.0')
tx = self._snmp_connection.bulk_walk(tx_oid)
for i in tx:
tx_dbm = float(i.value) / 10
tx_power_actual[name] = tx_dbm
return tx_power_actual
def _build_metrics_oids_map(self):
self._oids_map = {
"xcvr_interfaces": {
"method": "static",
"values": self._xcvr_interfaces_id
},
"rx_light_level": {
"method": "bulk_walk",
"oid": cwsXcvrRxPowerActual,
"values": self._xcvr_rx_power_levels
},
"tx_light_level": {
"method": "bulk_walk",
"oid": cwsXcvrTxPowerActual,
"values": self._xcvr_tx_power_levels
}
}
def _build_metrics_groups_conf(self):
self._metrics_groups = [
{
"group_name": "light_levels",
"dimensions": {},
"metrics": {
"xcvr_interfaces": {
"metric_type": "gauge",
"value": "xcvr_interfaces.$index"
},
"rx_light_level": {
"metric_type": "gauge",
"value": "rx_light_level.$index"
},
"tx_light_level": {
"metric_type": "gauge",
"value": "tx_light_level.$index"
}
}
}
]
def get_enrichment(self):
self._ciena_model = self._plugin_context.data.resource_metadata.get('model', 'unknown')
self._build_metrics_oids_map()
self._build_metrics_groups_conf()
enrichment_set = {
"oids": self.oids_map,
"metrics_groups": self.metrics_groups
}
try:
self.enrichment_group.add_enrichment_set(PanoptesEnrichmentSet(self.device_fqdn, enrichment_set))
except Exception as e:
self._logger.error('Error while adding enrichment set {} to enrichment group for the device {}: {}'.
format(enrichment_set, self.device_fqdn, repr(e)))
self.enrichment_group_set.add_enrichment_group(self.enrichment_group)
self._logger.debug('Metrics enrichment for device {}: {}'.format(self.device_fqdn, self.enrichment_group_set))
return self.enrichment_group_set
| 1.953125 | 2 |
statssite/home/www-data/stats/frontpage/__init__.py | tigran-a/ACDCStats | 0 | 12792675 | <reponame>tigran-a/ACDCStats
#!/usr/bin/env python3
"""
(c) Copyright 2015 <NAME>, SnT, University of Luxembourg
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from flask import Flask, Blueprint, request
import flask
import urllib
fpbp = Blueprint('frontpage', __name__, template_folder="templ")
def get_param_str():
res = urllib.parse.urlencode(request.args)
if res != "":
return "?"+res
else:
return res
@fpbp.route('/')
@fpbp.route('/overview')
def overview():
return flask.render_template('overview.html', parm = get_param_str())
@fpbp.route('/graph')
@fpbp.route('/graph/')
@fpbp.route('/graph/<exp>')
def g1(exp=None):
return flask.render_template('graphs.html', exp=exp, parm = get_param_str())
@fpbp.route('/attack')
@fpbp.route('/attack/')
@fpbp.route('/attack/<exp>')
def attack(exp=None):
return flask.render_template('attack.html', exp=exp, parm = get_param_str())
@fpbp.route('/bot')
@fpbp.route('/bot/')
@fpbp.route('/bot/<exp>')
def bot(exp=None):
return flask.render_template('bot.html', exp=exp, parm = get_param_str())
@fpbp.route('/botnet')
@fpbp.route('/botnet/')
@fpbp.route('/botnet/<exp>')
def botnet(exp=None):
return flask.render_template('botnet.html', exp=exp, parm=get_param_str())
@fpbp.route('/c2_server')
@fpbp.route('/c2_server/')
@fpbp.route('/c2_server/<exp>')
def c2_server(exp=None):
return flask.render_template('c2_server.html', exp = exp, parm = get_param_str())
@fpbp.route('/malware')
@fpbp.route('/malware/')
@fpbp.route('/malware/<exp>')
def malware(exp=None):
return flask.render_template('malware.html', exp=exp, parm=get_param_str())
@fpbp.route('/malicious_uri')
@fpbp.route('/malicious_uri/')
@fpbp.route('/malicious_uri/<exp>')
def malicious_uri(exp=None):
return flask.render_template('malicious_uri.html', exp=exp, parm=get_param_str())
@fpbp.route('/spam_campaign')
@fpbp.route('/spam_campaign/')
@fpbp.route('/spam_campaign/<exp>')
def spam_campaign(exp=None):
return flask.render_template('spam_campaign.html', exp=exp, parm = get_param_str())
@fpbp.route('/fast_flux')
@fpbp.route('/fast_flux/')
@fpbp.route('/fast_flux/<exp>')
def fast_flux(exp=None):
return flask.render_template('fast_flux.html', exp=exp, parm = get_param_str())
@fpbp.route('/vulnerable_uri')
@fpbp.route('/vulnerable_uri/')
@fpbp.route('/vulnerable_uri/<exp>')
def vulnerable_uri(exp=None):
return flask.render_template('vulnerable_uri.html', exp=exp, parm = get_param_str())
@fpbp.route('/tops')
@fpbp.route('/tops/')
@fpbp.route('/tops/<exp>')
def tops(exp=None):
return flask.render_template('tops.html', exp=exp, parm = get_param_str())
@fpbp.route('/ddos')
def ddos():
return flask.render_template('ddos.html')
| 2.03125 | 2 |
authors/apps/articles/tests/test_highlight_text.py | andela/ah-django-unchained | 0 | 12792676 | import json
from django.urls import reverse
from rest_framework.views import status
from rest_framework.test import APITestCase, APIClient
class CommentsTestCase(APITestCase):
def setUp(self):
self.client = APIClient()
self.signup_url = reverse('authentication:auth-register')
self.create_article_url = reverse('articles:articles-listcreate')
self.user_two_details = {
"user": {
"username": "andela",
"email": "<EMAIL>",
"password": "<PASSWORD>"
}}
self.create_article_data = {
"title": "Programming Languages",
"body": "There are variety of programming languagr",
"description": "Programming",
"tagList": ["Programming", "language", "python"]
}
self.highlighted_text = {
"comment": {
"body": "Good work here!!",
"start_highlight_position": 2,
"end_highlight_position": 15
}}
self.selection_start_index_larger_than_end_index = {
"comment": {
"body": "Good work here!!",
"start_highlight_position": 28,
"end_highlight_position": 15
}}
self.invalid_index_datatype = {
"comment": {
"body": "Good work here!!",
"start_highlight_position": "one",
"end_highlight_position": 15
}}
self.missing_field = {
"comment": {
"body": "Good work here!!",
"end_highlight_position": 15
}}
self.update_comment = {
"comment": {
"body": "Nice Idea"
}}
def register_user(self, user_details):
"""Sign up a new user to get a token"""
register = self.client.post(self.signup_url,
user_details,
format='json')
token = register.data["token"]
return token
def create_article(self, token):
"""Create an article."""
response = self.client.post(
self.create_article_url,
self.create_article_data,
format='json',
HTTP_AUTHORIZATION='token {}'.format(token))
slug = response.data['slug']
return slug
def test_comment_highlighted_text(self):
"""Test comment highlighted text."""
token = self.register_user(self.user_two_details)
slug = self.create_article(token)
response = self.client.post(
reverse('articles:high_light', kwargs={'slug': slug}),
self.highlighted_text,
HTTP_AUTHORIZATION='token {}'.format(token),
format='json')
self.assertIn('selected_text', response.data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_rejects_start_index_larger_than_end_index(self):
"""Test rejects start index larger than end index."""
token = self.register_user(self.user_two_details)
slug = self.create_article(token)
response = self.client.post(
reverse('articles:high_light', kwargs={'slug': slug}),
self.selection_start_index_larger_than_end_index,
HTTP_AUTHORIZATION='token {}'.format(token),
format='json')
self.assertEqual(response.data['error'],
'The start_index_position should not '
'be greater or equal end_index_position')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_rejects_invalid_types_for_highlight_index(self):
"""Test rejects index data type that are not integers."""
token = self.register_user(self.user_two_details)
slug = self.create_article(token)
response = self.client.post(
reverse('articles:high_light', kwargs={'slug': slug}),
self.invalid_index_datatype,
HTTP_AUTHORIZATION='token {}'.format(token),
format='json')
self.assertEqual(response.data['error'],
'Start of highlight and end of highlight'
' indices should be both integers')
self.assertEqual(response.status_code,
status.HTTP_422_UNPROCESSABLE_ENTITY)
def test_rejects_missing_required_field(self):
"""Test for missing field."""
token = self.register_user(self.user_two_details)
slug = self.create_article(token)
response = self.client.post(
reverse('articles:high_light', kwargs={'slug': slug}),
self.missing_field,
HTTP_AUTHORIZATION='token {}'.format(token),
format='json')
self.assertEqual(response.data['error'],
'start_highlight_position is required')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_get_all_comments(self):
"""Test get all comments."""
token = self.register_user(self.user_two_details)
# create an article
response = self.client.post(
self.create_article_url,
self.create_article_data,
format='json',
HTTP_AUTHORIZATION='token {}'.format(token))
slug = response.data['slug']
# highlight a text and comment it
self.client.post(
reverse('articles:high_light', kwargs={'slug': slug}),
self.highlighted_text,
HTTP_AUTHORIZATION='token {}'.format(token),
format='json')
# get all comments
response = self.client.get(
reverse('articles:high_light', kwargs={'slug': slug}),
format='json')
response_data = json.loads(json.dumps(response.data))
self.assertIn('selected_text', response_data[0])
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_get_single_comments(self):
"""Test get single comments."""
token = self.register_user(self.user_two_details)
# create an article
response = self.client.post(
self.create_article_url,
self.create_article_data,
format='json',
HTTP_AUTHORIZATION='token {}'.format(token))
slug = response.data['slug']
# highlight a text and comment it
response = self.client.post(
reverse('articles:high_light', kwargs={'slug': slug}),
self.highlighted_text,
HTTP_AUTHORIZATION='token {}'.format(token),
format='json')
# get single comment
article_id = response.data['id']
response = self.client.get(
'/api/articles/{}/highlight/{}'.format(slug, article_id),
format='json')
response_data = json.loads(json.dumps(response.data))
self.assertIn('selected_text', response_data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_delete_single_comments(self):
"""Test delete single comments."""
token = self.register_user(self.user_two_details)
# create an article
response = self.client.post(
self.create_article_url,
self.create_article_data,
format='json',
HTTP_AUTHORIZATION='token {}'.format(token))
slug = response.data['slug']
# highlight a text and comment it
response = self.client.post(
reverse('articles:high_light', kwargs={'slug': slug}),
self.highlighted_text,
HTTP_AUTHORIZATION='token {}'.format(token),
format='json')
# delete single comment
article_id = response.data['id']
response = self.client.delete(
'/api/articles/{}/highlight/{}'.format(slug, article_id),
HTTP_AUTHORIZATION='token {}'.format(token),
format='json')
response_data = json.loads(json.dumps(response.data))
self.assertEqual(response.data['message'],
'Comment on highlighted text deleted successfully')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_update_single_comments(self):
"""Test update single comment."""
token = self.register_user(self.user_two_details)
# create an article
response = self.client.post(
self.create_article_url,
self.create_article_data,
format='json',
HTTP_AUTHORIZATION='token {}'.format(token))
slug = response.data['slug']
# highlight a text and comment on it
response = self.client.post(
reverse('articles:high_light', kwargs={'slug': slug}),
self.highlighted_text,
HTTP_AUTHORIZATION='token {}'.format(token),
format='json')
article_id = response.data['id']
# update the comment
response = self.client.put(
'/api/articles/{}/highlight/{}'.format(slug, article_id),
self.update_comment,
HTTP_AUTHORIZATION='token {}'.format(token),
format='json')
response_data = json.loads(json.dumps(response.data))
self.assertIn('selected_text', response_data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_update_unexisting_comments(self):
"""Test update unexisting comment."""
token = self.register_user(self.user_two_details)
# create an article
response = self.client.post(
self.create_article_url,
self.create_article_data,
format='json',
HTTP_AUTHORIZATION='token {}'.format(token))
slug = response.data['slug']
# update the comment
response = self.client.put(
'/api/articles/{}/highlight/{}'.format(slug, 2),
self.update_comment,
HTTP_AUTHORIZATION='token {}'.format(token),
format='json')
response_data = json.loads(json.dumps(response.data))
self.assertEqual(response.data['error'], 'The comment does not exist')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_get_delete_unexisting_comments(self):
"""Delete unexisting comment"""
token = self.register_user(self.user_two_details)
# create an article
response = self.client.post(
self.create_article_url,
self.create_article_data,
format='json',
HTTP_AUTHORIZATION='token {}'.format(token))
slug = response.data['slug']
# update the comment
response = self.client.delete(
'/api/articles/{}/highlight/{}'.format(slug, 2),
self.update_comment,
HTTP_AUTHORIZATION='token {}'.format(token),
format='json')
response_data = json.loads(json.dumps(response.data))
self.assertEqual(response.data["error"], "The comment does not exist")
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
| 2.453125 | 2 |
examples/38-benzene.py | hebrewsnabla/pyAutoMR | 5 | 12792677 | from pyscf import lib, scf
#from pyphf import guess, suscf
from automr import autocas, guess
lib.num_threads(8)
xyz = '''C -2.94294278 0.39039038 0.00000000
C -1.54778278 0.39039038 0.00000000
C -0.85024478 1.59814138 0.00000000
C -1.54789878 2.80665038 -0.00119900
C -2.94272378 2.80657238 -0.00167800
C -3.64032478 1.59836638 -0.00068200
H -3.49270178 -0.56192662 0.00045000
H -0.99827478 -0.56212262 0.00131500
H 0.24943522 1.59822138 0.00063400
H -0.99769878 3.75879338 -0.00125800
H -3.49284578 3.75885338 -0.00263100
H -4.73992878 1.59854938 -0.00086200
'''
bas = 'def2-svp'
mf = guess.mix(xyz, bas, conv='tight')
mf2 = autocas.cas(mf)
| 1.8125 | 2 |
silk/migrations/0008_fix_request_prof_file_null.py | pikhovkin/django-silk | 2 | 12792678 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2018-01-11 09:51
from __future__ import unicode_literals
from django.db import migrations, models
import silk.storage
class Migration(migrations.Migration):
dependencies = [
('silk', '0007_add_support_oracle'),
]
operations = [
migrations.AlterField(
model_name='request',
name='prof_file',
field=models.FileField(default='', max_length=300, null=True, storage=silk.storage.ProfilerResultStorage(), upload_to=b''),
),
]
| 1.625 | 2 |
src/frobs_rl/templates/CustomRobotEnv.py | jmfajardod/gym_gazebo_sb3 | 67 | 12792679 | #!/bin/python3
from gym import spaces
from gym.envs.registration import register
from frobs_rl.envs import robot_BasicEnv
import rospy
#- Uncomment the library modules as neeeed
# from frobs_rl.common import ros_gazebo
# from frobs_rl.common import ros_controllers
# from frobs_rl.common import ros_node
# from frobs_rl.common import ros_launch
# from frobs_rl.common import ros_params
# from frobs_rl.common import ros_urdf
# from frobs_rl.common import ros_spawn
"""
Although it is best to register only the task environment, one can also register the
robot environment.
"""
# register(
# id='CustomRobotEnv-v0',
# entry_point='frobs_rl.templates.CustomRobotEnv:CustomRobotEnv',
# max_episode_steps=10000,
# )
class CustomRobotEnv(robot_BasicEnv.RobotBasicEnv):
"""
Custom Robot Env, use this for all task envs using the custom robot.
"""
def __init__(self):
"""
Describe the robot used in the env.
"""
rospy.loginfo("Starting Custom Robot Env")
"""
If launching Gazebo with the env then set the corresponding environment variables.
"""
launch_gazebo=False
gazebo_init_paused=True
gazebo_use_gui=True
gazebo_recording=False
gazebo_freq=100
gazebo_max_freq=None
gazebo_timestep=None
"""
If launching Gazebo with a custom world then set the corresponding environment variables.
"""
world_path=None
world_pkg=None
world_filename=None
"""
If spawning the robot using the given spawner then set the corresponding environment variables.
"""
spawn_robot=False
model_name_in_gazebo="robot"
namespace="/robot"
pkg_name=None
urdf_file=None
urdf_folder="/urdf"
controller_file=None
controller_list=None
urdf_xacro_args=None
rob_state_publisher_max_freq= None
model_pos_x=0.0; model_pos_y=0.0; model_pos_z=0.0
model_ori_x=0.0; model_ori_y=0.0; model_ori_z=0.0; model_ori_w=0.0
"""
Set if the controllers in "controller_list" will be reset at the beginning of each episode, default is False.
"""
reset_controllers=False
"""
Set the reset mode of gazebo at the beginning of each episode: 1 is "reset_world", 2 is "reset_simulation". Default is 1.
"""
reset_mode=1
"""
Set the step mode of Gazebo. 1 is "using ROS services", 2 is "using step function of Gazebo". Default is 1.
If using the step mode 2 then set the number of steps of Gazebo to take in each episode. Default is 1.
"""
step_mode=1
num_gazebo_steps=1
"""
Init the parent class with the corresponding variables.
"""
super(CustomRobotEnv, self).__init__( launch_gazebo=launch_gazebo, gazebo_init_paused=gazebo_init_paused,
gazebo_use_gui=gazebo_use_gui, gazebo_recording=gazebo_recording, gazebo_freq=gazebo_freq, world_path=world_path,
world_pkg=world_pkg, world_filename=world_filename, gazebo_max_freq=gazebo_max_freq, gazebo_timestep=gazebo_timestep,
spawn_robot=spawn_robot, model_name_in_gazebo=model_name_in_gazebo, namespace=namespace, pkg_name=pkg_name,
urdf_file=urdf_file, urdf_folder=urdf_folder, controller_file=controller_file, controller_list=controller_list,
urdf_xacro_args=urdf_xacro_args, rob_state_publisher_max_freq= rob_state_publisher_max_freq,
model_pos_x=model_pos_x, model_pos_y=model_pos_y, model_pos_z=model_pos_z,
model_ori_x=model_ori_x, model_ori_y=model_ori_y, model_ori_z=model_ori_z, model_ori_w=model_ori_w,
reset_controllers=reset_controllers, reset_mode=reset_mode, step_mode=step_mode, num_gazebo_steps=num_gazebo_steps)
"""
Define publisher or subscribers as needed.
"""
# self.pub1 = rospy.Publisher('/robot/controller_manager/command', JointState, queue_size=1)
# self.sub1 = rospy.Subscriber('/robot/joint_states', JointState, self.callback1)
"""
If using the __check_subs_and_pubs_connection method, then un-comment the lines below.
"""
# ros_gazebo.gazebo_unpause_physics()
# self._check_subs_and_pubs_connection()
# ros_gazebo.gazebo_pause_physics()
"""
Finished __init__ method
"""
rospy.loginfo("Finished Init of Custom Robot env")
#------------------------------------------#
# Custom methods for the CustomRobotEnv #
def _check_subs_and_pubs_connection(self):
"""
Function to check if the Gazebo and ROS connections are ready
"""
return True
#-------------------------------------------------------#
# Custom available methods for the CustomRobotEnv #
# Although it is best to implement these methods in #
# the Task Env, one can use them here if needed. #
def _send_action(self, action):
"""
Function to send an action to the robot
"""
raise NotImplementedError()
def _get_observation(self):
"""
Function to get the observation from the enviroment.
"""
raise NotImplementedError()
def _get_reward(self):
"""
Function to get the reward from the enviroment.
"""
raise NotImplementedError()
def _check_if_done(self):
"""
Function to check if the episode is done.
If the episode has a success condition then set done as:
self.info['is_success'] = 1.0
"""
raise NotImplementedError()
def _set_episode_init_params(self):
"""
Function to set some parameters, like the position of the robot, at the begining of each episode.
"""
raise NotImplementedError() | 2.421875 | 2 |
ib_async/order.py | ondergetekende/ib_async | 17 | 12792680 | <filename>ib_async/order.py
import enum
import typing # noqa
from ib_async.errors import UnsupportedFeature
from ib_async.event import Event
from ib_async import execution # noqa
from ib_async.instrument import Instrument # noqa
from ib_async.protocol import ProtocolInterface, Serializable, ProtocolVersion, IncomingMessage, OutgoingMessage
class OrderOrigin(enum.Enum):
Customer = 0
Firm = 1
Unknown = 2
class AuctionStrategy(enum.Enum):
Unset = 0
Match = 1
Improvement = 2
Transparent = 3
class Action(str, enum.Enum):
Buy = 'BUY'
Sell = 'SELL'
SShort = 'SSHORT'
SLONG = 'SLONG'
class VolatilityType(enum.Enum):
Daily = 1
Annual = 2
class OrderType(str, enum.Enum):
Undefined = ""
BoxTop = 'BOX TOP'
Limit = 'LMT'
LimitIfTouched = 'LIT'
LimitOnClose = 'LOC'
Market = 'MKT'
MarketIfTouched = 'MIT'
MarketOnClose = 'MOC'
MarketToLimit = 'MTL'
MarketWithProtection = 'MKT PRT'
PassiveRelative = 'PASSV REL'
PeggedToMidpoint = 'PEG MID'
PeggedToMarket = 'PEG MKT'
PeggedToStock = 'PEG STK'
PeggedToBenchmark = 'PEG BENCH'
Relative = 'REL'
RelativeLimitCombo = 'REL + LMT'
RelativeMarketCombo = 'REL + MKT'
Stop = "STP"
StopLimit = "STP LMT"
StopWithProtection = "STP PRT"
TrailingStop = "TRAIL"
TrailingStopLimit = "TRAIL LIMIT"
Volatility = 'VOL'
class TimeInForce(str, enum.Enum):
Day = "DAY" # Valid for the day only.
# Good until canceled. The order will continue to work within the system and in the marketplace until it executes
# or is canceled. GTC orders will be automatically be cancelled under the following conditions: If a corporate
# action on a security results in a stock split (forward or reverse), exchange for shares, or distribution of
# shares. If you do not log into your IB account for 90 days.
# At the end of the calendar quarter following the current quarter. For example, an order placed during the third
# quarter of 2011 will be canceled at the end of the first quarter of 2012. If the last day is a non-trading day,
# the cancellation will occur at the close of the final trading day of that quarter. For example, if the last day
# of the quarter is Sunday, the orders will be cancelled on the preceding Friday.
# Orders that are modified will be assigned a new “Auto Expire” date consistent with the end of the calendar
# quarter following the current quarter.
# Orders submitted to IB that remain in force for more than one day will not be reduced for dividends. To allow
# adjustment to your order price on ex-dividend date, consider using a Good-Til-Date/Time (GTD) or
# Good-after-Time/Date (GAT) order type, or a combination of the two.
GoodTillCancel = "GTC"
# Immediate or Cancel. Any portion that is not filled as soon as it becomes available in the market is canceled.
ImmediateOrCancel = "IOC"
# Good until Date. It will remain working within the system and in the marketplace until it executes or until the
# close of the market on the date specified
GoodTillDate = "GTD"
Opening = "OPG" # Use OPG to send a market-on-open (MOO) or limit-on-open (LOO) self
# If the entire Fill-or-Kill order does not execute as soon as it becomes available, the entire order is canceled.
FillOrKill = "FOK"
DayTillCancel = "DTC" # Day until Canceled
class Order(Serializable):
def __init__(self, parent: ProtocolInterface) -> None:
self._parent = parent
self.instrument = None # type: Instrument
# Filled by status messages
self.status = None # type: str
self.filled = None # type: float
self.remaining = None # type: float
self.average_fill_price = None # type: float
self.perm_id = None # type: int
self.parent_id = None # type: int
self.last_fill_price = None # type: float
self.client_id = None # type: int
self.why_held = None # type: str
self.market_cap_price = None # type: float
self.order_ref = None # type: str
self.combo_legs_description = None # type: str
self.inital_margin = None # type: str
self.maintenance_margin = None # type: str
self.equity_with_loan = None # type: str
self.commission = None # type: float
self.min_commission = None # type: float
self.max_commission = None # type: float
self.commission_currency = None # type: str
self.warning_text = None # type: str
self.order_id = 0
self.client_id = 0
self.perm_id = 0
# main order fields
self.action = None # type: Action
self.total_quantity = 0.0
self.order_type = OrderType.Market
# The LIMIT price. Used for limit, stop-limit and relative orders. In all other cases specify zero. For
# relative orders with no limit price, also specify zero.
self.limit_price = None # type: float
# Generic field to contain the stop price for STP LMT orders, trailing amount, etc.
self.aux_price = None # type: float
# extended order fields
self.time_in_force = TimeInForce.GoodTillCancel
# for GTC orders.
self.active_start_time = None # type: datetime.datetime
self.active_stop_time = None # type: datetime.datetime
self.oca_group = ""
self.oca_type = 0
self.order_reference = ""
self.transmit = True
self.parent_id = 0
self.block_order = False
self.sweep_to_fill = False
self.display_size = 0
self.trigger_method = 0
self.outside_regular_trading_hours = False
self.hidden = False
self.good_after_time = None # type: datetime.datetime
self.good_till_date = None # type: datetime.datetime
self.rule80a = ""
self.all_or_none = False
self.min_quantity = None # type: int
self.percent_offset = None # type: float
self.override_percentage_constraints = False
self.trail_stop_price = None # type: float
self.trailing_percent = None # type: float
# financial advisors only
self.fa_group = ""
self.fa_profile = ""
self.fa_method = ""
self.fa_percentage = ""
# institutional (ie non-cleared) only
self.open_close = "O"
self.origin = OrderOrigin.Customer
self.short_sale_slot = 0
self.designated_location = ""
self.exempt_code = -1
# SMART routing only
self.discretionary_amount = 0.0
self.etrade_only = True
self.firm_quote_only = True
self.nbbo_price_cap = None # type: float
self.opt_out_smart_routing = False
# BOX exchange orders only
self.auction_strategy = AuctionStrategy.Unset
self.starting_price = None # type: float
self.stock_ref_price = None # type: float
self.delta = None # type: float
# pegged to stock and VOL orders only
self.stock_range_lower = None # type: float
self.stock_range_upper = None # type: float
self.randomize_price = False
self.randomize_size = False
# VOLATILITY ORDERS ONLY
self.volatility = None # type: float
self.volatility_type = None # type: VolatilityType
self.delta_neutral_order_type = ""
self.delta_neutral_aux_price = None # type: float
self.delta_neutral_contract_id = 0
self.delta_neutral_settling_firm = ""
self.delta_neutral_clearing_account = ""
self.delta_neutral_clearing_intent = ""
self.delta_neutral_open_close = ""
self.delta_neutral_short_sale = False
self.delta_neutral_short_sale_slot = 0
self.delta_neutral_designated_location = ""
self.continuous_update = False
self.reference_price_type = None # type: int # 1=Average, 2 = BidOrAsk
# COMBO ORDERS ONLY
self.basis_points = None # type: float # EFP orders only
self.basis_points_type = None # type: int # EFP orders only
# SCALE ORDERS ONLY
self.scale_init_level_size = None # type: int
self.scale_subs_level_size = None # type: int
self.scale_price_increment = None # type: float
self.scale_price_adjust_value = None # type: float
self.scale_price_adjust_interval = None # type: int
self.scale_profit_offset = None # type: float
self.scale_auto_reset = False
self.scale_init_position = None # type: int
self.scale_init_fill_quantity = None # type: int
self.scale_random_percent = False
self.scale_table = ""
#
# HEDGE ORDERS
self.hedge_type = "" # 'D' - delta, 'B' - beta, 'F' - FX, 'P' - pair
self.hedge_param = "" # 'beta=X' value for beta hedge, 'ratio=Y' for pair hedge
# Clearing info
self.account = "" # IB account
self.settling_firm = ""
self.clearing_account = "" # True beneficiary of the order
self.clearing_intent = "" # "" (Default), "IB", "Away", "PTA" (PostTrade)
# ALGO ORDERS ONLY
self.algo_strategy = ""
self.algo_parameters = {} # type: typing.Dict[str, str]
self.smart_combo_routing_params = {} # type: typing.Dict[str, str]
self.algo_id = ""
# What-if
self.what_if = False
# Not Held
self.not_held = False
self.solicited = False
self.model_code = ""
self.order_miscellaneous_options = {} # type: typing.Dict[str, str]
self.reference_contract_id = 0
self.pegged_change_amount = 0.0
self.is_pegged_change_amount_decrease = False
self.reference_change_amount = 0.0
self.reference_exchange_id = ""
self.adjusted_order_type = OrderType.Undefined
self.trigger_price = 1.7976931348623157e+308 # type: float
self.limit_price_offset = 1.7976931348623157e+308 # type: float
self.adjusted_stop_price = 1.7976931348623157e+308 # type: float
self.adjusted_stop_limit_price = 1.7976931348623157e+308 # type: float
self.adjusted_trailing_amount = 1.7976931348623157e+308 # type: float
self.adjustable_trailing_unit = 0
self.conditions = [] # type: typing.List[None] # not suppored yet
self.conditions_cancel_order = False
self.conditions_ignore_regular_trading_hours = False
self.ext_operator = ""
self.soft_dollar_tier_name = ""
self.soft_dollar_tier_value = ""
self.soft_dollar_tier_display_name = ""
# native cash quantity
self.cash_quantity = 1.7976931348623157e+308 # type: float
self.mifid2_decision_maker = ""
self.mifid2_decision_algo = ""
self.mifid2_execution_trader = ""
self.mifid2_execution_algo = ""
updated = Event() # type: Event[None]
on_execution = Event() # type: Event[execution.Execution]
def serialize(self, message: OutgoingMessage):
message.add(self.order_id)
message.add(self.instrument)
if self.instrument.security_ids:
security_id_type, security_id = next(iter(self.instrument.security_ids.items()))
else:
security_id_type = security_id = None
message.add(security_id_type)
message.add(security_id)
message.add(self.action)
message.add(self.total_quantity)
message.add(self.order_type)
message.add(self.limit_price)
message.add(self.aux_price)
message.add(self.time_in_force)
message.add(self.oca_group)
message.add(self.account)
message.add(self.open_close)
message.add(self.origin)
message.add(self.order_reference)
message.add(self.transmit)
message.add(self.parent_id)
message.add(self.block_order)
message.add(self.sweep_to_fill)
message.add(self.display_size)
message.add(self.trigger_method)
message.add(self.outside_regular_trading_hours)
message.add(self.hidden)
assert self.instrument.security_type != 'BAG' # not supported
message.add("") # deprecated sharesAllocation field
message.add(self.discretionary_amount)
message.add(self.good_after_time)
message.add(self.good_till_date)
message.add(self.fa_group)
message.add(self.fa_method)
message.add(self.fa_percentage)
message.add(self.fa_profile)
message.add(self.model_code, min_version=ProtocolVersion.MODELS_SUPPORT)
# institutional short saleslot data (srv v18 and above)
message.add(self.short_sale_slot) # # 0 for retail, 1 or 2 for institutions
message.add(self.designated_location) # # populate only when shortSaleSlot = 2.
message.add(self.exempt_code)
message.add(self.oca_type)
message.add(self.rule80a)
message.add(self.settling_firm)
message.add(self.all_or_none)
message.add(self.min_quantity)
message.add(self.percent_offset)
message.add(self.etrade_only)
message.add(self.firm_quote_only)
message.add(self.nbbo_price_cap)
message.add(self.auction_strategy) # AUCTION_MATCH, AUCTION_IMPROVEMENT, AUCTION_TRANSPARENT
message.add(self.starting_price)
message.add(self.stock_ref_price)
message.add(self.delta)
# Volatility orders had specific watermark price attribs in server version 26
# double lower = (protocol_version == 26 && isVolOrder) ? DBL_MAX : selfstockRangeLower;
# double upper = (protocol_version == 26 && isVolOrder) ? DBL_MAX : selfstockRangeUpper;
message.add(self.stock_range_lower)
message.add(self.stock_range_upper)
message.add(self.override_percentage_constraints)
# Volatility orders (srv v26 and above)
message.add(self.volatility)
message.add(self.volatility_type)
message.add(self.delta_neutral_order_type)
message.add(self.delta_neutral_aux_price)
if self.delta_neutral_order_type:
# pragma: no cover (I don't have actual examples of these)
message.add(self.delta_neutral_contract_id)
message.add(self.delta_neutral_settling_firm)
message.add(self.delta_neutral_clearing_account)
message.add(self.delta_neutral_clearing_intent)
message.add(self.delta_neutral_open_close)
message.add(self.delta_neutral_short_sale)
message.add(self.delta_neutral_short_sale_slot)
message.add(self.delta_neutral_designated_location)
message.add(self.continuous_update)
message.add(self.reference_price_type)
message.add(self.trail_stop_price)
message.add(self.trailing_percent)
# SCALE orders
message.add(self.scale_init_level_size)
message.add(self.scale_subs_level_size)
message.add(self.scale_price_increment)
if self.scale_price_increment and self.scale_price_increment > 0.0:
# pragma: no cover (I don't have actual examples of these)
message.add(self.scale_price_adjust_value)
message.add(self.scale_price_adjust_interval)
message.add(self.scale_profit_offset)
message.add(self.scale_auto_reset)
message.add(self.scale_init_position)
message.add(self.scale_init_fill_quantity)
message.add(self.scale_random_percent)
message.add(self.scale_table)
message.add(self.active_start_time)
message.add(self.active_stop_time)
# HEDGE orders
message.add(self.hedge_type)
if self.hedge_type: # pragma: no cover (I don't have actual examples of these)
message.add(self.hedge_param)
message.add(self.opt_out_smart_routing)
message.add(self.clearing_account)
message.add(self.clearing_intent)
message.add(self.not_held)
message.add(bool(self.instrument.underlying_component))
if self.instrument.underlying_component: # pragma: no cover (I don't have actual examples of these)
message.add(self.instrument.underlying_component)
message.add(self.algo_strategy)
if self.algo_strategy: # pragma: no cover (I don't have actual examples of these)
message.add(self.algo_parameters)
message.add(self.algo_id)
message.add(self.what_if)
message.add("".join("%s=%s;" % (k, v) for (k, v) in self.order_miscellaneous_options.items()))
message.add(self.solicited)
message.add(self.randomize_size)
message.add(self.randomize_price)
if self.order_type == "PEG BENCH": # pragma: no cover (I don't have actual examples of these)
message.add(self.reference_contract_id,
self.is_pegged_change_amount_decrease,
self.pegged_change_amount,
self.reference_change_amount,
self.reference_exchange_id,
min_version=ProtocolVersion.PEGGED_TO_BENCHMARK)
if self.conditions: # pragma: no cover (Not implemented)
raise UnsupportedFeature("Order conditions")
message.add(0, # no conditions
self.adjusted_order_type,
self.trigger_price,
self.limit_price_offset,
self.adjusted_stop_price,
self.adjusted_stop_limit_price,
self.adjusted_trailing_amount,
self.adjustable_trailing_unit,
min_version=ProtocolVersion.PEGGED_TO_BENCHMARK)
message.add(self.ext_operator,
min_version=ProtocolVersion.EXT_OPERATOR)
message.add(self.soft_dollar_tier_name,
self.soft_dollar_tier_value,
min_version=ProtocolVersion.SOFT_DOLLAR_TIER)
message.add(self.cash_quantity,
min_version=ProtocolVersion.CASH_QTY)
message.add(self.mifid2_decision_maker,
self.mifid2_decision_algo,
min_version=ProtocolVersion.DECISION_MAKER)
message.add(self.mifid2_execution_trader,
self.mifid2_execution_algo,
min_version=ProtocolVersion.MIFID_EXECUTION)
def deserialize(self, message: IncomingMessage):
assert False, "Implemented in message handlers"
| 2.453125 | 2 |
Ago-Dic-2019/Ricardo_Romero_Medina/Practica1/Practica_3-5.py | Arbupa/DAS_Sistemas | 41 | 12792681 | <filename>Ago-Dic-2019/Ricardo_Romero_Medina/Practica1/Practica_3-5.py
invitados=['<NAME>','<NAME>','<NAME>','<NAME>','<NAME>']
message1="El invitado " + invitados[1].title() + " no asistira a la cena"
print(message1)
invitados[1]="<NAME>"
for i in range(len(invitados)):
message="Hola como estas " + invitados[i].title() + " te invito a una cena esta noche en el restaurante Meson Principal."
print(message) | 3.53125 | 4 |
models.py | hxf1228/dtcnn_elm_lnd | 8 | 12792682 | <reponame>hxf1228/dtcnn_elm_lnd
"""Deep Transfer Convolutional Neural Network (DTCNN)
Created on: 2019/12/31 22:01
@File: models.py
@Author:<NAME> (<EMAIL> & <EMAIL>)
@Copy Right: Copyright © 2019-2020 HUST. All Rights Reserved.
@Requirement: Python-3.7.4, TensorFlow-1.4, Kears-2.2.4
"""
import warnings
import keras.backend as K
from keras.layers import Input, Lambda, GlobalAvgPool2D, Dense
from keras.models import Model
from keras.applications.resnet50 import ResNet50
from keras.applications.xception import Xception
from keras.applications.nasnet import NASNetMobile
from keras.applications.mobilenet_v2 import MobileNetV2
from libs.keras_efficientnets.efficientnet import EfficientNetB5
warnings.filterwarnings("ignore")
# Resnet50
# <NAME>, <NAME>, <NAME>, <NAME>, Deep Residual Learning for Image Recognition,
# in: 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), IEEE, Las Vegas, NV, USA, 2016: pp. 770–778.
# https://doi.org/10.1109/CVPR.2016.90.
def resnet50_model(input_shape=(None, None), num_classes=2):
input_gray = Input(shape=input_shape)
input_fakeRgb = Lambda(
lambda x: K.repeat_elements(
K.expand_dims(
x, 3), 3, 3))(input_gray)
base_model = ResNet50(include_top=False, input_tensor=input_fakeRgb)
output = GlobalAvgPool2D()(base_model.output)
predict = Dense(num_classes, activation='softmax')(output)
model = Model(inputs=base_model.input, outputs=predict)
return model
# Xception
# F. Chollet, Xception: Deep learning with depthwise separable convolutions,
# in: Proceedings of the IEEE Conference on Computer Vision and Pattern
# Recognition, 2017: pp. 1251–1258.
def xception_model(input_shape=(None, None), num_classes=2):
input_gray = Input(shape=input_shape)
input_fakeRgb = Lambda(
lambda x: K.repeat_elements(
K.expand_dims(
x, 3), 3, 3))(input_gray)
base_model = Xception(include_top=False, input_tensor=input_fakeRgb)
output = GlobalAvgPool2D()(base_model.output)
predict = Dense(num_classes, activation='softmax')(output)
model = Model(inputs=base_model.input, outputs=predict)
return model
# NASNetMobile
# [1]<NAME>, <NAME>, <NAME>, <NAME>, Learning transferable architectures for scalable image recognition,
# in: Proceedings of the IEEE Conference on Computer Vision and Pattern
# Recognition, 2018: pp. 8697–8710.
def nasnetmobile_model(input_shape=(None, None), num_classes=2):
input_gray = Input(shape=input_shape)
input_fakeRgb = Lambda(
lambda x: K.repeat_elements(
K.expand_dims(
x, 3), 3, 3))(input_gray)
base_model = NASNetMobile(include_top=False, input_tensor=input_fakeRgb)
output = GlobalAvgPool2D()(base_model.output)
predict = Dense(num_classes, activation='softmax')(output)
model = Model(inputs=base_model.input, outputs=predict)
return model
# MobileNetV2
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, Mobilenetv2: Inverted residuals and linear bottlenecks,
# in: Proceedings of the IEEE Conference on Computer Vision and Pattern
# Recognition, 2018: pp. 4510–4520.
def mobilenetv2_model(input_shape=(None, None), num_classes=2):
input_gray = Input(shape=input_shape)
input_fakeRgb = Lambda(
lambda x: K.repeat_elements(
K.expand_dims(
x, 3), 3, 3))(input_gray)
base_model = MobileNetV2(include_top=False, input_tensor=input_fakeRgb)
output = GlobalAvgPool2D()(base_model.output)
predict = Dense(num_classes, activation='softmax')(output)
model = Model(inputs=base_model.input, outputs=predict)
return model
# EfficientNet-B5
# <NAME>, <NAME>, EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,
# in: International Conference on Machine Learning, 2019.
def efficientnetb5_model(input_shape=(None, None), num_classes=2):
input_gray = Input(shape=input_shape)
input_fakeRgb = Lambda(
lambda x: K.repeat_elements(
K.expand_dims(
x, 3), 3, 3))(input_gray)
base_model = EfficientNetB5(include_top=False, input_tensor=input_fakeRgb)
output = GlobalAvgPool2D()(base_model.output)
predict = Dense(num_classes, activation='softmax')(output)
model = Model(inputs=base_model.input, outputs=predict)
return model
#Test DTCNN functions
#if __name__ == '__main__':
#model = resnet50_model((64, 64), 2)
#model = xception_model((64, 64), 2)
#model = nasnetmobile_model((64, 64), 2)
#model = mobilenetv2_model((64, 64), 2)
#model = efficientnetb5_model((64, 64), 2)
#model.summary() | 2.046875 | 2 |
autofit/example/analysis.py | rhayes777/AutoFit | 0 | 12792683 | from os import path
import os
import matplotlib.pyplot as plt
import numpy as np
import autofit as af
"""
The `analysis.py` module contains the dataset and log likelihood function which given a model instance (set up by
the non-linear search) fits the dataset and returns the log likelihood of that model.
"""
class Analysis(af.Analysis):
def __init__(self, data: np.ndarray, noise_map:np.ndarray):
"""
In this example the `Analysis` object only contains the data and noise-map. It can be easily extended,
for more complex data-sets and model fitting problems.
Parameters
----------
data
A 1D numpy array containing the data (e.g. a noisy 1D Gaussian) fitted in the workspace examples.
noise_map
A 1D numpy array containing the noise values of the data, used for computing the goodness of fit
metric.
"""
super().__init__()
self.data = data
self.noise_map = noise_map
def log_likelihood_function(self, instance: af.ModelInstance) -> float:
"""
Determine the log likelihood of a fit of multiple profiles to the dataset.
Parameters
----------
instance : af.Collection
The model instances of the profiles.
Returns
-------
The log likelihood value indicating how well this model fit the dataset.
"""
xvalues = np.arange(self.data.shape[0])
try:
model_data_1d = sum(
profile.model_data_1d_via_xvalues_from(xvalues=xvalues) for profile in instance
)
except TypeError:
model_data_1d = instance.model_data_1d_via_xvalues_from(xvalues=xvalues)
residual_map = self.data - model_data_1d
chi_squared_map = (residual_map / self.noise_map) ** 2.0
log_likelihood = -0.5 * sum(chi_squared_map)
return log_likelihood
def visualize(self, paths: af.DirectoryPaths, instance: af.ModelInstance, during_analysis : bool):
"""
During a model-fit, the `visualize` method is called throughout the non-linear search and is used to output
images indicating the quality of the fit so far..
The `instance` passed into the visualize method is maximum log likelihood solution obtained by the model-fit
so far and it can be used to provide on-the-fly images showing how the model-fit is going.
For your model-fitting problem this function will be overwritten with plotting functions specific to your
problem.
Parameters
----------
paths
The PyAutoFit paths object which manages all paths, e.g. where the non-linear search outputs are stored,
visualization, and the pickled objects used by the aggregator output by this function.
instance
An instance of the model that is being fitted to the data by this analysis (whose parameters have been set
via a non-linear search).
during_analysis
If True the visualization is being performed midway through the non-linear search before it is finished,
which may change which images are output.
"""
xvalues = np.arange(self.data.shape[0])
try:
model_data_1d = sum(
profile.model_data_1d_via_xvalues_from(xvalues=xvalues) for profile in instance
)
except TypeError:
model_data_1d = instance.model_data_1d_via_xvalues_from(xvalues=xvalues)
plt.errorbar(
x=xvalues,
y=self.data,
yerr=self.noise_map,
color="k",
ecolor="k",
elinewidth=1,
capsize=2,
)
plt.plot(range(self.data.shape[0]), model_data_1d, color="r")
plt.title("Dynesty model fit to 1D Gaussian + Exponential dataset.")
plt.xlabel("x values of profile")
plt.ylabel("Profile normalization")
os.makedirs(paths.image_path, exist_ok=True)
plt.savefig(path.join(paths.image_path, "model_fit.png"))
plt.clf()
| 3.15625 | 3 |
app/controllers/__init__.py | jattoabdul/vanhack-cms | 15 | 12792684 | <reponame>jattoabdul/vanhack-cms
'''The controllers package'''
from .base_controller import BaseController
| 1.085938 | 1 |
rnaseq_pipeline/utils.py | ppavlidis/rnaseq-pipeline | 0 | 12792685 | import logging
import uuid
import luigi
from luigi.task import flatten_output
from luigi.parameter import ParameterVisibility
logger = logging.getLogger('luigi-interface')
class IlluminaFastqHeader:
@classmethod
def parse(cls, s):
pieces = s.split(':')
if len(pieces) == 5:
device, flowcell_lane, tile, x, y = pieces
return cls(device, flowcell_lane=flowcell_lane, tile=tile, x=x, y=y)
elif len(pieces) == 7:
return cls(*pieces)
else:
raise TypeError('Unsupported Illumina FASTQ header format {}.'.format(s))
def __init__(self, device, run=None, flowcell=None, flowcell_lane=None, tile=None, x=None, y=None):
self.device = device
self.run = run
self.flowcell = flowcell
self.flowcell_lane = flowcell_lane
self.tile = tile
self.x = x
self.y = y
@property
def batch_factor(self):
if self.flowcell is None:
return self.device, self.flowcell_lane
return self.device, self.flowcell, self.flowcell_lane
def parse_illumina_fastq_header(s):
return IlluminaFastqHeader(*s.split(':'))
def max_retry(count):
"""
Set the maximum number of time a task can be retried before being disabled
as per Luigi retry policy.
"""
def wrapper(cls):
cls.retry_count = count
return cls
return wrapper
no_retry = max_retry(0)
class TaskWithPriorityMixin:
"""Mixin that adds a --priority flag to a given task."""
priority = luigi.IntParameter(default=0, positional=False, significant=False)
class RerunnableTaskMixin:
"""
Mixin for a task that can be rerun regardless of its completion status.
"""
rerun = luigi.BoolParameter(default=False, positional=False, significant=False)
def __init__(self, *kwargs, **kwds):
super().__init__(*kwargs, **kwds)
self._has_rerun = False
def run(self):
try:
return super().run()
finally:
self._has_rerun = True
def complete(self):
return (not self.rerun or self._has_rerun) and super().complete()
class CheckAfterCompleteMixin:
"""Ensures that a task is completed after a successful run()."""
def run(self):
ret = super().run()
if not self.complete():
raise RuntimeError('{} is not completed after successful run().'.format(repr(self)))
return ret
def remove_task_output(task):
logger.info('Cleaning up %s...', repr(task))
for out in flatten_output(task):
if hasattr(out, 'remove') and out.exists():
try:
out.remove()
logger.info('Removed %s.', repr(out))
except:
logger.exception('Failed to remove %s.', repr(out))
| 2.234375 | 2 |
python/target_selection/cartons/mwm_yso.py | sdss/target_selection | 3 | 12792686 | <reponame>sdss/target_selection
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @Author: <NAME> (<EMAIL>)
# @Date: 2020-06-10
# @Filename: mwm_yso.py
# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
import peewee
from sdssdb.peewee.sdss5db.catalogdb import (MIPSGAL, AllWise, Catalog,
CatalogToTIC_v8, Gaia_DR2,
Sagitta, TIC_v8, TwoMassPSC,
YSO_Clustering, Zari18pms)
from target_selection.cartons import BaseCarton
from target_selection.exceptions import TargetSelectionError
# See catalog.py for the name of peewee model names corresponding
# to postgres table names:
# https://github.com/sdss/sdssdb/blob/master/python/sdssdb/peewee/sdss5db/catalogdb.py
class MWM_YSO_Disk_APOGEE_Carton(BaseCarton):
"""YSOs - Disk APOGEE (IR excess).
Shorthand name: mwm_yso_disk_apogee
old class name: MWM_YSO_S1_Carton
old shorthand name: mwm_yso_s1
Simplified Description of selection criteria:
selection of YSOs based on IR excess,
with WISE colors W1-W2>0.25, W2-W3>0.5, W3-W4>1.5,
closer than parallax>0.3, and brighter than H<13
(should have ~21.5K sources)
Wiki page:
https://wiki.sdss.org/display/MWM/YSO+selection+function
Additional source catalogs needed: Gaia, 2mass, allwise
Additional cross-matching needed:
Note: Using the Gaia xmatch somehow misses half the sources.
Selection was done on the allwise catalog that
had 2mass photometry,
and then the resulting selection was crossmatched against against
Gaia with 1" search radius.
Return columns: Gaia id, 2mass id, allwise id, G, BP, RP,
J, H, K, W1, W2, W3, W4,parallax
cadence options for these targets
(list all options,
even though no single target will receive more than one):
Pseudo SQL (optional):
Implementation: h_m<13 and w1mpro-w2mpro>0.25 and
w2mpro-w3mpro>0.5 and w3mpro-w4mpro>1.5 and parallax>0.3
"""
name = 'mwm_yso_disk_apogee'
category = 'science'
instrument = 'APOGEE'
cadence = 'bright_3x1'
program = 'mwm_yso'
mapper = 'MWM'
priority = 2700
def build_query(self, version_id, query_region=None):
query = (CatalogToTIC_v8
.select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id,
Gaia_DR2.ra.alias('gaia_dr2_ra'),
Gaia_DR2.dec.alias('gaia_dr2_dec'),
TwoMassPSC.pts_key,
TwoMassPSC.designation.alias('twomass_psc_designation'),
AllWise.designation.alias('allwise_designation'),
Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag,
Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'),
TwoMassPSC.j_m, TwoMassPSC.h_m,
TwoMassPSC.k_m,
Gaia_DR2.parallax)
.join(TIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id))
.join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id))
.switch(TIC_v8)
.join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation))
.switch(TIC_v8)
.join(AllWise, on=(TIC_v8.allwise == AllWise.designation))
.where(CatalogToTIC_v8.version_id == version_id,
CatalogToTIC_v8.best >> True,
TwoMassPSC.h_m < 13,
(AllWise.w1mpro - AllWise.w2mpro) > 0.25,
(AllWise.w2mpro - AllWise.w3mpro) > 0.50,
(AllWise.w3mpro - AllWise.w4mpro) > 1.50,
Gaia_DR2.parallax > 0.3))
# Gaia_DR2 pweewee model class corresponds to
# table catalogdb.gaia_dr2_source.
#
# All values of TIC_v8.plx (for non-null entries) are not the same as
# values of Gaia_DR2.parallax.
# Hence, in the above query, we cannot use TIC_v8.plx instead
# of Gaia_DR2.parallax.
if query_region:
query = (query
.join_from(CatalogToTIC_v8, Catalog)
.where(peewee.fn.q3c_radial_query(Catalog.ra,
Catalog.dec,
query_region[0],
query_region[1],
query_region[2])))
return query
class MWM_YSO_Disk_BOSS_Carton(BaseCarton):
"""YSOs - Disk BOSS (IR excess).
Shorthand name: mwm_yso_disk_boss
old class name: MWM_YSO_S1_Carton
old shorthand name: mwm_yso_s1
Simplified Description of selection criteria:
selection of YSOs based on IR excess,
with WISE colors W1-W2>0.25, W2-W3>0.5, W3-W4>1.5,
closer than parallax>0.3, and brighter than H<13
(should have ~21.5K sources)
Wiki page:
https://wiki.sdss.org/display/MWM/YSO+selection+function
Additional source catalogs needed: Gaia, 2mass, allwise
Additional cross-matching needed:
Note: Using the Gaia xmatch somehow misses half the sources.
Selection was done on the allwise catalog that
had 2mass photometry,
and then the resulting selection was crossmatched against against
Gaia with 1" search radius.
Return columns: Gaia id, 2mass id, allwise id, G, BP, RP,
J, H, K, W1, W2, W3, W4,parallax
cadence options for these targets
(list all options,
even though no single target will receive more than one):
boss_bright_3x1 if RP<14.76 |
boss_bright_4x1 if RP<15.075 |
boss_bright_5x1 if RP<15.29 |
boss_bright_6x1 if RP<15.5
Pseudo SQL (optional):
Implementation: phot_rp_mean_mag<15.5 and w1mpro-w2mpro>0.25 and
w2mpro-w3mpro>0.5 and w3mpro-w4mpro>1.5 and parallax>0.3
Comments: Split from mwm_yso_s1 to request BOSS observations,
same color selection but assigning cadence and faint limit for carton based
on RP instead of H
"""
name = 'mwm_yso_disk_boss'
category = 'science'
instrument = None # instrument is set in post_process()
cadence = None # cadence is set in post_process()
program = 'mwm_yso'
mapper = 'MWM'
priority = 2700
def build_query(self, version_id, query_region=None):
query = (CatalogToTIC_v8
.select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id,
Gaia_DR2.ra.alias('gaia_dr2_ra'),
Gaia_DR2.dec.alias('gaia_dr2_dec'),
TwoMassPSC.pts_key,
TwoMassPSC.designation.alias('twomass_psc_designation'),
AllWise.designation.alias('allwise_designation'),
Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag,
Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'),
TwoMassPSC.j_m, TwoMassPSC.h_m,
TwoMassPSC.k_m,
Gaia_DR2.parallax)
.join(TIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id))
.join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id))
.switch(TIC_v8)
.join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation))
.switch(TIC_v8)
.join(AllWise, on=(TIC_v8.allwise == AllWise.designation))
.where(CatalogToTIC_v8.version_id == version_id,
CatalogToTIC_v8.best >> True,
Gaia_DR2.phot_rp_mean_mag < 15.5,
(AllWise.w1mpro - AllWise.w2mpro) > 0.25,
(AllWise.w2mpro - AllWise.w3mpro) > 0.50,
(AllWise.w3mpro - AllWise.w4mpro) > 1.50,
Gaia_DR2.parallax > 0.3))
# Gaia_DR2 pweewee model class corresponds to
# table catalogdb.gaia_dr2_source.
#
# All values of TIC_v8.plx (for non-null entries) are not the same as
# values of Gaia_DR2.parallax.
# Hence, in the above query, we cannot use TIC_v8.plx instead
# of Gaia_DR2.parallax.
if query_region:
query = (query
.join_from(CatalogToTIC_v8, Catalog)
.where(peewee.fn.q3c_radial_query(Catalog.ra,
Catalog.dec,
query_region[0],
query_region[1],
query_region[2])))
return query
def post_process(self, model):
"""
cadence options for these targets:
boss_bright_3x1 if RP<14.76 |
boss_bright_4x1 if RP<15.075 |
boss_bright_5x1 if RP<15.29 |
boss_bright_6x1 if RP<15.5
"""
cursor = self.database.execute_sql(
"select catalogid, gaia_dr2_rp from " +
" sandbox.temp_mwm_yso_disk_boss ;")
output = cursor.fetchall()
for i in range(len(output)):
current_catalogid = output[i][0]
current_rp = output[i][1]
if(current_rp < 14.76):
current_instrument = 'BOSS'
current_cadence = 'bright_3x1'
elif(current_rp < 15.075):
current_instrument = 'BOSS'
current_cadence = 'bright_4x1'
elif(current_rp < 15.29):
current_instrument = 'BOSS'
current_cadence = 'bright_5x1'
elif(current_rp < 15.5):
current_instrument = 'BOSS'
current_cadence = 'bright_6x1'
else:
# All cases should be covered above so we should not get here.
current_instrument = None
current_cadence = None
raise TargetSelectionError('error in mwm_yso_disk_boss ' +
'post_process(): ' +
'instrument = None, cadence= None')
if current_instrument is not None:
self.database.execute_sql(
" update sandbox.temp_mwm_yso_disk_boss " +
" set instrument = '" + current_instrument + "'"
" where catalogid = " + str(current_catalogid) + ";")
if current_cadence is not None:
self.database.execute_sql(
" update sandbox.temp_mwm_yso_disk_boss " +
" set cadence = '" + current_cadence + "'"
" where catalogid = " + str(current_catalogid) + ";")
class MWM_YSO_Embedded_APOGEE_Carton(BaseCarton):
"""YSOs - Embedded APOGEE (optically invisible).
Shorthand name: mwm_yso_embedded_apogee
old class name: MWM_YSO_S2_Carton
old shorthand name: mwm_yso_s2
Simplified Description of selection criteria:
selection of YSOs, brighter than H<13, fainter than G>15 or
without gaia detection,
colors J-H>0,5, W1-W2>0.5, W2-W3>1, W3-W4>1.5, and
relates (W3-W4)>(W1-W2)*0.5+1.1
(should have ~11.6K sources)
Wiki page:
https://wiki.sdss.org/display/MWM/YSO+selection+function
Additional source catalogs needed: 2mass+allwise, gaia
(allow sources that lack gaia xmatch)
Additional cross-matching needed:
Note: Using the Gaia xmatch somehow misses half the sources.
Selection was done on the allwise catalog
that had 2mass photometry,
and then the resulting selection was crossmatched
against against Gaia with 1" search radius.
Return columns: Gaia id, 2mass id, allwise id, G, BP, RP,
J, H, K, W1, W2, W3, W4
cadence options for these targets
(list all options,
even though no single target will receive more than one):
Pseudo SQL (optional):
Implementation: h_m<13 and
(phot_g_mean_mag>18.5 or phot_g_mean_mag is null)
and j_m-h_m>1
and h_m-ks_m>0.5
and w1mpro-w2mpro>0.5
and w2mpro-w3mpro>1
and w3mpro-w4mpro>1.5
and w3mpro-w4mpro>(w1mpro-w2mpro)*0.8+1.1
"""
name = 'mwm_yso_embedded_apogee'
category = 'science'
instrument = 'APOGEE'
cadence = 'bright_3x1'
program = 'mwm_yso'
mapper = 'MWM'
priority = 2700
def build_query(self, version_id, query_region=None):
query = (AllWise
.select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id,
Gaia_DR2.ra.alias('gaia_dr2_ra'),
Gaia_DR2.dec.alias('gaia_dr2_dec'),
TwoMassPSC.pts_key,
TwoMassPSC.designation.alias('twomass_psc_designation'),
AllWise.designation.alias('allwise_designation'),
Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag,
Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'),
TwoMassPSC.j_m, TwoMassPSC.h_m,
TwoMassPSC.k_m,
Gaia_DR2.parallax)
.join(TIC_v8, on=(TIC_v8.allwise == AllWise.designation))
.join(TwoMassPSC,
on=(TIC_v8.twomass_psc == TwoMassPSC.designation))
.switch(TIC_v8)
.join(Gaia_DR2, peewee.JOIN.LEFT_OUTER,
on=(TIC_v8.gaia_int == Gaia_DR2.source_id))
.switch(TIC_v8)
.join(CatalogToTIC_v8,
on=(CatalogToTIC_v8.target_id == TIC_v8.id))
.where(CatalogToTIC_v8.version_id == version_id,
CatalogToTIC_v8.best >> True,
TwoMassPSC.h_m < 13,
(Gaia_DR2.phot_g_mean_mag > 18.5) |
(Gaia_DR2.phot_g_mean_mag >> None),
(AllWise.j_m_2mass - AllWise.h_m_2mass) > 1.0,
(AllWise.h_m_2mass - AllWise.k_m_2mass) > 0.5,
(AllWise.w1mpro - AllWise.w2mpro) > 0.50,
(AllWise.w2mpro - AllWise.w3mpro) > 1.00,
(AllWise.w3mpro - AllWise.w4mpro) > 1.50,
(AllWise.w3mpro - AllWise.w4mpro) >
(AllWise.w1mpro - AllWise.w2mpro) * 0.8 + 1.1))
if query_region:
query = (query
.join_from(CatalogToTIC_v8, Catalog)
.where(peewee.fn.q3c_radial_query(Catalog.ra,
Catalog.dec,
query_region[0],
query_region[1],
query_region[2])))
return query
class MWM_YSO_Nebula_APOGEE_Carton(BaseCarton):
"""YSOs - Nebula APOGEE(optically invisible, WISE saturated).
Shorthand name: mwm_yso_nebula_apogee
old class name: MWM_YSO_S2_5_Carton
old shorthand name: mwm_yso_s2_5
Simplified Description of selection criteria:
selection of YSOs, brighter than H<15,
saturated (blank) W4 with W2-W3>4,
or saturated W3 and W2, with J-H>1.1.
Some contaminants from scanning are
filtered on the plane of the sky:
all the targets should be within 5 deg of the plane+
few sources that can be located
further south of the plane if l>180
(should have ~1.2K sources)
Wiki page:
https://wiki.sdss.org/display/MWM/YSO+selection+function
Additional source catalogs needed: 2mass, allwise
Additional cross-matching needed:
Return columns: 2mass id, allwise id, J, H, K, W1, W2, W3, W4
cadence options for these targets
(list all options,
even though no single target will receive more than one):
Pseudo SQL (optional):
Implementation: h_m<13 and
(w2mpro-w3mpro>4 and w4mpro is null) or
(w3mpro is null and w4mpro is null and j_m-h_m>1.1)
and (b>-5 or l>180) and b<-5
"""
name = 'mwm_yso_nebula_apogee'
category = 'science'
instrument = 'APOGEE'
cadence = 'bright_3x1'
program = 'mwm_yso'
mapper = 'MWM'
priority = 2700
# Above implementation has below clause
# and (b>-5 or l>180) and b<-5
# Replace (b>-5 or l>180) and b<-5 as below based on the text.
# In words:
# all the targets should be within 5 deg of the plane+
# few sources that can be
# located further south of the plane if l>180
# Hence:
# ((b>-5) and (b<5)) or ((b<-5) and (l > 180))
# l, b in Gaia_DR2 are gallong and gallat in TIC_v8.
# We are using the values from Gaia since
# TIC propagates the coordinates back to epoch 2000.0
# (b>-5 or l>180) and b<-5
# S2_5 query below has the same part before where() as S2 query.
def build_query(self, version_id, query_region=None):
query = (AllWise
.select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id,
Gaia_DR2.ra.alias('gaia_dr2_ra'),
Gaia_DR2.dec.alias('gaia_dr2_dec'),
TwoMassPSC.pts_key,
TwoMassPSC.designation.alias('twomass_psc_designation'),
AllWise.designation.alias('allwise_designation'),
Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag,
Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'),
TwoMassPSC.j_m, TwoMassPSC.h_m,
TwoMassPSC.k_m,
Gaia_DR2.parallax)
.join(TIC_v8, on=(TIC_v8.allwise == AllWise.designation))
.join(TwoMassPSC,
on=(TIC_v8.twomass_psc == TwoMassPSC.designation))
.switch(TIC_v8)
.join(Gaia_DR2, peewee.JOIN.LEFT_OUTER,
on=(TIC_v8.gaia_int == Gaia_DR2.source_id))
.switch(TIC_v8)
.join(CatalogToTIC_v8,
on=(CatalogToTIC_v8.target_id == TIC_v8.id))
.where(CatalogToTIC_v8.version_id == version_id,
CatalogToTIC_v8.best >> True,
TwoMassPSC.h_m < 13,
(((AllWise.w2mpro - AllWise.w3mpro) > 4) &
(AllWise.w4mpro >> None)) |
((AllWise.w3mpro >> None) &
(AllWise.w4mpro >> None) &
((AllWise.j_m_2mass - AllWise.h_m_2mass) > 1.1)),
((Gaia_DR2.b > -5) & (Gaia_DR2.b < 5)) |
((Gaia_DR2.b < -5) & (Gaia_DR2.l > 180)) |
((Gaia_DR2.b >> None) & (Gaia_DR2.l >> None))))
if query_region:
query = (query
.join_from(CatalogToTIC_v8, Catalog)
.where(peewee.fn.q3c_radial_query(Catalog.ra,
Catalog.dec,
query_region[0],
query_region[1],
query_region[2])))
return query
class MWM_YSO_Variable_APOGEE_Carton(BaseCarton):
"""YSOs - Variable APOGEE (pre-main sequence optical variables).
Shorthand name: mwm_yso_variable_apogee
old class name: MWM_YSO_S3_Carton
old shorthand name: mwm_yso_s3
Simplified Description of selection criteria:
selection of YSOs brighter than H<13, closer than parallax>0.3.
Filter on the position of the HR diagram to
select cool pre-main sequence stars,
with BP-RP>13, (BP-RP)*2.5+2.5>M_G, (BP-RP)*2.5-1<M_G,
requiring variability in g,bp,rp>0.02
(with var_x defined as
sqrt(phot_x_n_obs)/phot_x_mean_flux_over_error),
have relations in variability of
var_g<var_bp<var_g^0.75, 0.75*var_g<var_rp<var_g^0.95,
and log10(var_bp)*5+11<M_BP, in which M_x is the absolute mag
(should have ~52.7K sources)
Wiki page:
https://wiki.sdss.org/display/MWM/YSO+selection+function
Additional source catalogs needed: 2mass, gaia
Additional cross-matching needed:
Return columns: Gaia id, 2mass id, G, BP, RP, J, H, K, parallax
cadence options for these targets
(list all options,
even though no single target will receive more than one):
Pseudo SQL (optional):
Implementation:
phot_g_mean_mag < 18.5 and h_m <13 and parallax >0.3 and
bp_rp*2.5+2.5 > phot_g_mean_mag-5*(log10(1000/parallax)-1) and
bp_rp*2.5-1 < phot_g_mean_mag-5*(log10(1000/parallax)-1) and
sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error>
sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error and
sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error>
sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error*0.75 and
sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error<
power(sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error,0.75) and
sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error<
power(sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error,0.95) and
log10(sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error)*5+11<
phot_bp_mean_mag-5*(log10(1000/parallax)-1) and
bp_rp>1.3 and sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error>0.02
and
sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error>0.02 and
sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error>0.02
"""
name = 'mwm_yso_variable_apogee'
category = 'science'
instrument = 'APOGEE'
cadence = 'bright_3x1'
program = 'mwm_yso'
mapper = 'MWM'
priority = 2700
def build_query(self, version_id, query_region=None):
query = (CatalogToTIC_v8
.select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id,
Gaia_DR2.ra.alias('gaia_dr2_ra'),
Gaia_DR2.dec.alias('gaia_dr2_dec'),
TwoMassPSC.pts_key,
TwoMassPSC.designation.alias('twomass_psc_designation'),
Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag,
Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'),
TwoMassPSC.j_m, TwoMassPSC.h_m,
TwoMassPSC.k_m,
Gaia_DR2.parallax)
.join(TIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id))
.join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation))
.switch(TIC_v8)
.join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id))
.where(CatalogToTIC_v8.version_id == version_id,
CatalogToTIC_v8.best >> True,
Gaia_DR2.phot_g_mean_mag < 18.5,
TwoMassPSC.h_m < 13,
Gaia_DR2.parallax > 0.3,
Gaia_DR2.bp_rp * 2.5 + 2.5 >
Gaia_DR2.phot_g_mean_mag -
5 * (peewee.fn.log(1000 / Gaia_DR2.parallax) - 1),
Gaia_DR2.bp_rp * 2.5 - 1 <
Gaia_DR2.phot_g_mean_mag -
5 * (peewee.fn.log(1000 / Gaia_DR2.parallax) - 1),
peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) /
Gaia_DR2.phot_bp_mean_flux_over_error >
peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) /
Gaia_DR2.phot_g_mean_flux_over_error,
peewee.fn.sqrt(Gaia_DR2.phot_rp_n_obs) /
Gaia_DR2.phot_rp_mean_flux_over_error >
peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) /
Gaia_DR2.phot_g_mean_flux_over_error * 0.75,
peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) /
Gaia_DR2.phot_bp_mean_flux_over_error <
peewee.fn.power(
peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) /
Gaia_DR2.phot_g_mean_flux_over_error, 0.75),
peewee.fn.sqrt(Gaia_DR2.phot_rp_n_obs) /
Gaia_DR2.phot_rp_mean_flux_over_error <
peewee.fn.power(
peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) /
Gaia_DR2.phot_g_mean_flux_over_error, 0.95),
peewee.fn.log(
peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) /
Gaia_DR2.phot_bp_mean_flux_over_error) * 5 + 11 <
Gaia_DR2.phot_bp_mean_mag -
5 * (peewee.fn.log(1000 / Gaia_DR2.parallax) - 1),
Gaia_DR2.bp_rp > 1.3,
peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) /
Gaia_DR2.phot_g_mean_flux_over_error > 0.02,
peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) /
Gaia_DR2.phot_bp_mean_flux_over_error > 0.02,
peewee.fn.sqrt(Gaia_DR2.phot_rp_n_obs) /
Gaia_DR2.phot_rp_mean_flux_over_error > 0.02))
if query_region:
query = (query
.join_from(CatalogToTIC_v8, Catalog)
.where(peewee.fn.q3c_radial_query(Catalog.ra,
Catalog.dec,
query_region[0],
query_region[1],
query_region[2])))
return query
class MWM_YSO_Variable_BOSS_Carton(BaseCarton):
"""YSOs - Variable BOSS (pre-main sequence optical variables).
Shorthand name: mwm_yso_variable_boss
old class name: MWM_YSO_S3_Carton
old shorthand name: mwm_yso_s3
Simplified Description of selection criteria:
selection of YSOs brighter than H<13, closer than parallax>0.3.
Filter on the position of the HR diagram to
select cool pre-main sequence stars,
with BP-RP>13, (BP-RP)*2.5+2.5>M_G, (BP-RP)*2.5-1<M_G,
requiring variability in g,bp,rp>0.02
(with var_x defined as
sqrt(phot_x_n_obs)/phot_x_mean_flux_over_error),
have relations in variability of
var_g<var_bp<var_g^0.75, 0.75*var_g<var_rp<var_g^0.95,
and log10(var_bp)*5+11<M_BP, in which M_x is the absolute mag
(should have ~52.7K sources)
Wiki page:
https://wiki.sdss.org/display/MWM/YSO+selection+function
Additional source catalogs needed: 2mass, gaia
Additional cross-matching needed:
Return columns: Gaia id, 2mass id, G, BP, RP, J, H, K, parallax
cadence options for these targets
(list all options,
even though no single target will receive more than one):
boss_bright_3x1 if RP<14.76 |
boss_bright_4x1 if RP<15.075 |
boss_bright_5x1 if RP<15.29 |
boss_bright_6x1 if RP<15.5
Pseudo SQL (optional):
Implementation:
phot_rp_mean_mag<15.5 and phot_g_mean_mag < 18.5 and h_m <13 and parallax >0.3 and
bp_rp*2.5+2.5 > phot_g_mean_mag-5*(log10(1000/parallax)-1) and
bp_rp*2.5-1 < phot_g_mean_mag-5*(log10(1000/parallax)-1) and
sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error>
sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error and
sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error>
sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error*0.75 and
sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error<
power(sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error,0.75) and
sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error<
power(sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error,0.95) and
log10(sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error)*5+11<
phot_bp_mean_mag-5*(log10(1000/parallax)-1) and
bp_rp>1.3 and sqrt(phot_g_n_obs)/phot_g_mean_flux_over_error>0.02
and
sqrt(phot_bp_n_obs)/phot_bp_mean_flux_over_error>0.02 and
sqrt(phot_rp_n_obs)/phot_rp_mean_flux_over_error>0.02
Comments: Split from mwm_yso_s3 to request BOSS observations,
RP magnitude check added to the previous selection
"""
name = 'mwm_yso_variable_boss'
category = 'science'
instrument = None # instrument is set in post_process()
cadence = None # cadence is set in post_process()
program = 'mwm_yso'
mapper = 'MWM'
priority = 2700
def build_query(self, version_id, query_region=None):
query = (CatalogToTIC_v8
.select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id,
Gaia_DR2.ra.alias('gaia_dr2_ra'),
Gaia_DR2.dec.alias('gaia_dr2_dec'),
TwoMassPSC.pts_key,
TwoMassPSC.designation.alias('twomass_psc_designation'),
Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag,
Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'),
TwoMassPSC.j_m, TwoMassPSC.h_m,
TwoMassPSC.k_m, Gaia_DR2.parallax)
.join(TIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id))
.join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation))
.switch(TIC_v8)
.join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id))
.where(CatalogToTIC_v8.version_id == version_id,
CatalogToTIC_v8.best >> True,
Gaia_DR2.phot_rp_mean_mag < 15.5,
Gaia_DR2.phot_g_mean_mag < 18.5,
TwoMassPSC.h_m < 13,
Gaia_DR2.parallax > 0.3,
Gaia_DR2.bp_rp * 2.5 + 2.5 >
Gaia_DR2.phot_g_mean_mag -
5 * (peewee.fn.log(1000 / Gaia_DR2.parallax) - 1),
Gaia_DR2.bp_rp * 2.5 - 1 <
Gaia_DR2.phot_g_mean_mag -
5 * (peewee.fn.log(1000 / Gaia_DR2.parallax) - 1),
peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) /
Gaia_DR2.phot_bp_mean_flux_over_error >
peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) /
Gaia_DR2.phot_g_mean_flux_over_error,
peewee.fn.sqrt(Gaia_DR2.phot_rp_n_obs) /
Gaia_DR2.phot_rp_mean_flux_over_error >
peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) /
Gaia_DR2.phot_g_mean_flux_over_error * 0.75,
peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) /
Gaia_DR2.phot_bp_mean_flux_over_error <
peewee.fn.power(
peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) /
Gaia_DR2.phot_g_mean_flux_over_error, 0.75),
peewee.fn.sqrt(Gaia_DR2.phot_rp_n_obs) /
Gaia_DR2.phot_rp_mean_flux_over_error <
peewee.fn.power(
peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) /
Gaia_DR2.phot_g_mean_flux_over_error, 0.95),
peewee.fn.log(
peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) /
Gaia_DR2.phot_bp_mean_flux_over_error) * 5 + 11 <
Gaia_DR2.phot_bp_mean_mag -
5 * (peewee.fn.log(1000 / Gaia_DR2.parallax) - 1),
Gaia_DR2.bp_rp > 1.3,
peewee.fn.sqrt(Gaia_DR2.phot_g_n_obs) /
Gaia_DR2.phot_g_mean_flux_over_error > 0.02,
peewee.fn.sqrt(Gaia_DR2.phot_bp_n_obs) /
Gaia_DR2.phot_bp_mean_flux_over_error > 0.02,
peewee.fn.sqrt(Gaia_DR2.phot_rp_n_obs) /
Gaia_DR2.phot_rp_mean_flux_over_error > 0.02))
if query_region:
query = (query
.join_from(CatalogToTIC_v8, Catalog)
.where(peewee.fn.q3c_radial_query(Catalog.ra,
Catalog.dec,
query_region[0],
query_region[1],
query_region[2])))
return query
def post_process(self, model):
"""
cadence options for these targets:
boss_bright_3x1 if RP<14.76 |
boss_bright_4x1 if RP<15.075 |
boss_bright_5x1 if RP<15.29 |
boss_bright_6x1 if RP<15.5
"""
cursor = self.database.execute_sql(
"select catalogid, gaia_dr2_rp from " +
" sandbox.temp_mwm_yso_variable_boss ;")
output = cursor.fetchall()
for i in range(len(output)):
current_catalogid = output[i][0]
current_rp = output[i][1]
if(current_rp < 14.76):
current_instrument = 'BOSS'
current_cadence = 'bright_3x1'
elif(current_rp < 15.075):
current_instrument = 'BOSS'
current_cadence = 'bright_4x1'
elif(current_rp < 15.29):
current_instrument = 'BOSS'
current_cadence = 'bright_5x1'
elif(current_rp < 15.5):
current_instrument = 'BOSS'
current_cadence = 'bright_6x1'
else:
# All cases should be covered above so we should not get here.
current_instrument = None
current_cadence = None
raise TargetSelectionError('error in mwm_yso_variable_boss ' +
'post_process(): ' +
'instrument = None, cadence= None')
if current_instrument is not None:
self.database.execute_sql(
" update sandbox.temp_mwm_yso_variable_boss " +
" set instrument = '" + current_instrument + "'"
" where catalogid = " + str(current_catalogid) + ";")
if current_cadence is not None:
self.database.execute_sql(
" update sandbox.temp_mwm_yso_variable_boss " +
" set cadence = '" + current_cadence + "'"
" where catalogid = " + str(current_catalogid) + ";")
class MWM_YSO_OB_APOGEE_Carton(BaseCarton):
"""YSOs - OB APOGEE Upper (pre-)Main Sequence.
Shorthand name: mwm_yso_ob_apogee
old class name: MWM_YSO_OB_Carton
old shorthand name: mwm_yso_ob
Simplified Description of selection criteria:
Selecting the OB stars at the tip of the main sequence,
brighter than H<13, G<18 mag, closer than parallax>0.3,
color -0.2<BP-RP<1.1, and M_G<(BP-RP)*1.6-2.2
(should have ~8.7K sources)
Wiki page:
https://wiki.sdss.org/display/MWM/YSO+selection+function
Additional source catalogs needed: 2mass, gaia
Additional cross-matching needed:
Return columns: Gaia id, 2mass id, G, BP, RP, J, H, K, parallax
cadence options for these targets
(list all options,
even though no single target will receive more than one):
Pseudo SQL (optional):
Implementation: h_m<13 and bp_rp between -0.2 and 1.1 and
phot_g_mean_mag<18 and
phot_g_mean_mag-5*(log10(1000/parallax)-1) <
1.6*bp_rp-2.2 and parallax>0.3
"""
name = 'mwm_yso_ob_apogee'
category = 'science'
instrument = 'APOGEE'
cadence = 'bright_3x1'
program = 'mwm_yso'
mapper = 'MWM'
priority = 2700
def build_query(self, version_id, query_region=None):
query = (CatalogToTIC_v8
.select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id,
Gaia_DR2.ra.alias('gaia_dr2_ra'),
Gaia_DR2.dec.alias('gaia_dr2_dec'),
TwoMassPSC.pts_key,
TwoMassPSC.designation.alias('twomass_psc_designation'),
Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag,
Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'),
TwoMassPSC.j_m, TwoMassPSC.h_m,
TwoMassPSC.k_m, Gaia_DR2.parallax)
.join(TIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id))
.join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation))
.switch(TIC_v8)
.join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id))
.where(CatalogToTIC_v8.version_id == version_id,
CatalogToTIC_v8.best >> True,
TwoMassPSC.h_m < 13,
(Gaia_DR2.bp_rp > -0.2) & (Gaia_DR2.bp_rp < 1.1),
Gaia_DR2.phot_g_mean_mag < 18,
Gaia_DR2.phot_g_mean_mag -
5 * (peewee.fn.log(1000 / Gaia_DR2.parallax) - 1) <
1.6 * Gaia_DR2.bp_rp - 2.2,
Gaia_DR2.parallax > 0.3))
if query_region:
query = (query
.join_from(CatalogToTIC_v8, Catalog)
.where(peewee.fn.q3c_radial_query(Catalog.ra,
Catalog.dec,
query_region[0],
query_region[1],
query_region[2])))
return query
class MWM_YSO_OB_BOSS_Carton(BaseCarton):
"""YSOs - OB BOSS Upper (pre-)Main Sequence.
Shorthand name: mwm_yso_ob_boss
old class name: MWM_YSO_OB_Carton
old shorthand name: mwm_yso_ob
Simplified Description of selection criteria:
Selecting the OB stars at the tip of the main sequence,
brighter than rp<15.5, G<18 mag, closer than parallax>0.3,
color -0.2<BP-RP<1.1, and M_G<(BP-RP)*1.6-2.2
(should have ~8.7K sources)
Wiki page:
https://wiki.sdss.org/display/MWM/YSO+selection+function
Additional source catalogs needed: 2mass, gaia
Additional cross-matching needed:
Return columns: Gaia id, 2mass id, G, BP, RP, J, H, K, parallax
cadence options for these targets
(list all options,
even though no single target will receive more than one):
boss_bright_3x1 if RP<14.76 |
boss_bright_4x1 if RP<15.075 |
boss_bright_5x1 if RP<15.29 |
boss_bright_6x1 if RP<15.5
Pseudo SQL (optional):
Implementation: rp<15.5 and bp_rp between -0.2 and 1.1 and
phot_g_mean_mag<18 and
phot_g_mean_mag-5*(log10(1000/parallax)-1) <
1.6*bp_rp-2.2 and parallax>0.3
Comments: Split from mwm_yso_ob to request BOSS observations,
assigning cadence and faint limit for carton based on RP instead of H
"""
name = 'mwm_yso_ob_boss'
category = 'science'
instrument = None # instrument is set in post_process()
cadence = None # cadence is set in post_process()
program = 'mwm_yso'
mapper = 'MWM'
priority = 2700
def build_query(self, version_id, query_region=None):
query = (CatalogToTIC_v8
.select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id,
Gaia_DR2.ra.alias('gaia_dr2_ra'),
Gaia_DR2.dec.alias('gaia_dr2_dec'),
TwoMassPSC.pts_key,
TwoMassPSC.designation.alias('twomass_psc_designation'),
Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag,
Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'),
TwoMassPSC.j_m, TwoMassPSC.h_m,
TwoMassPSC.k_m, Gaia_DR2.parallax)
.join(TIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id))
.join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation))
.switch(TIC_v8)
.join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id))
.where(CatalogToTIC_v8.version_id == version_id,
CatalogToTIC_v8.best >> True,
Gaia_DR2.phot_rp_mean_mag < 15.5,
(Gaia_DR2.bp_rp > -0.2) & (Gaia_DR2.bp_rp < 1.1),
Gaia_DR2.phot_g_mean_mag < 18,
Gaia_DR2.phot_g_mean_mag -
5 * (peewee.fn.log(1000 / Gaia_DR2.parallax) - 1) <
1.6 * Gaia_DR2.bp_rp - 2.2,
Gaia_DR2.parallax > 0.3))
if query_region:
query = (query
.join_from(CatalogToTIC_v8, Catalog)
.where(peewee.fn.q3c_radial_query(Catalog.ra,
Catalog.dec,
query_region[0],
query_region[1],
query_region[2])))
return query
def post_process(self, model):
"""
cadence options for these targets:
boss_bright_3x1 if RP<14.76 |
boss_bright_4x1 if RP<15.075 |
boss_bright_5x1 if RP<15.29 |
boss_bright_6x1 if RP<15.5
"""
cursor = self.database.execute_sql(
"select catalogid, gaia_dr2_rp from " +
" sandbox.temp_mwm_yso_ob_boss ;")
output = cursor.fetchall()
for i in range(len(output)):
current_catalogid = output[i][0]
current_rp = output[i][1]
if(current_rp < 14.76):
current_instrument = 'BOSS'
current_cadence = 'bright_3x1'
elif(current_rp < 15.075):
current_instrument = 'BOSS'
current_cadence = 'bright_4x1'
elif(current_rp < 15.29):
current_instrument = 'BOSS'
current_cadence = 'bright_5x1'
elif(current_rp < 15.5):
current_instrument = 'BOSS'
current_cadence = 'bright_6x1'
else:
# All cases should be covered above so we should not get here.
current_instrument = None
current_cadence = None
raise TargetSelectionError('error in mwm_yso_ob_boss ' +
'post_process(): ' +
'instrument = None, cadence= None')
if current_instrument is not None:
self.database.execute_sql(
" update sandbox.temp_mwm_yso_ob_boss " +
" set instrument = '" + current_instrument + "'"
" where catalogid = " + str(current_catalogid) + ";")
if current_cadence is not None:
self.database.execute_sql(
" update sandbox.temp_mwm_yso_ob_boss " +
" set cadence = '" + current_cadence + "'"
" where catalogid = " + str(current_catalogid) + ";")
class MWM_YSO_CMZ_APOGEE_Carton(BaseCarton):
"""YSOs - Central Molecular Zone APOGEE.
Shorthand name: mwm_yso_cmz_apogee
old class name: MWM_YSO_CMZ_Carton
old shorthand name: mwm_yso_cmz
Simplified Description of selection criteria:
selection of sources in the central molecular zone
based on spitzer fluxes from mipsgal.
brighter than H<13, have color 8.0-24>2.5, and
have parallax<0.2 or lack a Gaia xmatch.
(should have ~3.2K sources)
Wiki page:
https://wiki.sdss.org/display/MWM/YSO+selection+function
Additional source catalogs needed: mipsgal
Additional cross-matching needed: the table has xmatch included
Return columns:
mipsgal id, 2mass id, j, h, k, 3.6, 4.8, 8.0, 24 mag
cadence options for these targets
(list all options,
even though no single target will receive more than one):
'apogee_bright_3x1'
Pseudo SQL (optional):
Implementation: Hmag<13 and _8_0_-_24_>2.5 and
(parallax<0.2 or parallax is null)
For CMZ, the raw sql query would be:
select ct.catalogid from mipsgal m
join twomass_psc t on twomass_name = designation
join tic_v8 tic on tic.twomass_psc = t.designation
left outer join gaia_dr2_source g on g.source_id = tic.gaia_int
join catalog_to_tic_v8 ct on ct.target_id = tic.id
where m.hmag < 13 and
(m.mag_8_0 - m.mag_24) > 2.5 and
(g.parallax < 0.2 or g.parallax is null)
and ct.version_id = 13 and ct.best is true;
Note you only need one left outer join between TIC and Gaia
(all MIPSGAL targets have a counterpart in 2MASS,
and all 2MASS have an entry in TIC,
but not all the TIC entries have a Gaia counterpart).
Comments: Formerly mwm_yso_cmz, removed check on the position on the sky:
Removed below condition.
l is glon (galactic longitude)
b is glat (galactic latitude)
All four statements below are equivalent.
(l> 358 or l< 2) and
b between -1 and 1
(m.glon > 358 or m.glon < 2) and
(m.glat > -1 and m.glat < 1) and
Sources are within 2 degrees in l and
1 degree in b from the galactic center,
(MIPSGAL.glon > 358) | (MIPSGAL.glon < 2),
(MIPSGAL.glat > -1) & (MIPSGAL.glat < 1),
"""
name = 'mwm_yso_cmz_apogee'
category = 'science'
instrument = 'APOGEE'
cadence = 'bright_3x1'
program = 'mwm_yso'
mapper = 'MWM'
priority = 2700
# mipsgal is a subset of 2MASS
# mipsgal can be joined to twomass_psc via
# mipsgal.twomass_name = TwoMassPSC.designation.
# Then join via TIC and catalog_to_tic.
#
# mipsgal is a subset of 2MASS
# 2MASS is a subset of TIC_v8
# Gaia_DR2 is a subset of TIC_v8
#
# 2MASS is not a subset of Gaia_DR2
# Gaia_DR2 is not a subset of 2MASS
#
# table catalogdb.mipsgal
# Foreign-key constraints:
# "twomass_name_fk" FOREIGN KEY (twomass_name)
# REFERENCES twomass_psc(designation)
#
# Due to below, we do not need a between to Catalog and CatalogToTIC_v8
# Catalog.catalogid == CatalogToTIC_v8.catalogid
# We can remove the join with Catalog in all the cartons
# since catalogid is completely unique (even across different version_id)
# so the join with Catalog doesn't give us anything extra and it's a costly join.
def build_query(self, version_id, query_region=None):
query = (MIPSGAL.select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id,
Gaia_DR2.ra.alias('gaia_dr2_ra'),
Gaia_DR2.dec.alias('gaia_dr2_dec'),
TwoMassPSC.pts_key,
TwoMassPSC.designation.alias('twomass_psc_designation'),
TwoMassPSC.j_m, TwoMassPSC.h_m,
TwoMassPSC.k_m, MIPSGAL.mag_3_6, MIPSGAL.mag_4_5,
MIPSGAL.mag_5_8, MIPSGAL.mag_8_0, MIPSGAL.mag_24,
MIPSGAL.hmag, Gaia_DR2.parallax,
MIPSGAL.glon, MIPSGAL.glat)
.join(TwoMassPSC, on=(MIPSGAL.twomass_name == TwoMassPSC.designation))
.join(TIC_v8, on=(TIC_v8.twomass_psc == TwoMassPSC.designation))
.join(Gaia_DR2, peewee.JOIN.LEFT_OUTER,
on=(Gaia_DR2.source_id == TIC_v8.gaia_int))
.switch(TIC_v8)
.join(CatalogToTIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id))
.where(CatalogToTIC_v8.version_id == version_id,
CatalogToTIC_v8.best >> True,
MIPSGAL.hmag < 13,
(MIPSGAL.mag_8_0 - MIPSGAL.mag_24) > 2.5,
(Gaia_DR2.parallax < 0.2) |
(Gaia_DR2.parallax >> None)))
if query_region:
query = (query
.join_from(CatalogToTIC_v8, Catalog)
.where(peewee.fn.q3c_radial_query(Catalog.ra,
Catalog.dec,
query_region[0],
query_region[1],
query_region[2])))
return query
class MWM_YSO_Cluster_APOGEE_Carton(BaseCarton):
"""YSOs - Cluster APOGEE Catalog
Shorthand name: mwm_yso_cluster_apogee
old class name: MWM_YSO_Cluster_Carton
old shorthand name: mwm_yso_cluster
Simplified Description of selection criteria:
Selecting the clustered sources from
the catalog of clustered structures,
with age<7.5 dex and brighter than H<13 mag.
(should have ~45.5K sources)
Wiki page:
https://wiki.sdss.org/display/MWM/YSO+selection+function
Additional source catalogs needed: Kounkel+20 clustered catalog
Additional cross-matching needed:
Return columns: Gaia id, 2mass id, G, BP, RP, J, H, K, parallax
cadence options for these targets
(list all options,
even though no single target will receive more than one):
Pseudo SQL (optional):
Implementation: age<7.5 and h<13
"""
name = 'mwm_yso_cluster_apogee'
category = 'science'
instrument = 'APOGEE'
cadence = 'bright_3x1'
program = 'mwm_yso'
mapper = 'MWM'
priority = 2700
# yso_clustering is a subset of gaia and
# can be joined to gaia_dr2_source via source_id.
#
# table catalogdb.yso_clustering
# Foreign-key constraints:
# "yso_clustering_source_id_fkey" FOREIGN KEY (source_id)
# REFERENCES gaia_dr2_source(source_id)
def build_query(self, version_id, query_region=None):
query = (CatalogToTIC_v8
.select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id,
Gaia_DR2.ra.alias('gaia_dr2_ra'),
Gaia_DR2.dec.alias('gaia_dr2_dec'),
YSO_Clustering.twomass,
Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag,
Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'),
YSO_Clustering.j, YSO_Clustering.h,
YSO_Clustering.k, Gaia_DR2.parallax)
.join(TIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id))
.join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id))
.join(YSO_Clustering,
on=(Gaia_DR2.source_id == YSO_Clustering.source_id))
.where(CatalogToTIC_v8.version_id == version_id,
CatalogToTIC_v8.best >> True,
YSO_Clustering.h < 13,
YSO_Clustering.age < 7.5))
if query_region:
query = (query
.join_from(CatalogToTIC_v8, Catalog)
.where(peewee.fn.q3c_radial_query(Catalog.ra,
Catalog.dec,
query_region[0],
query_region[1],
query_region[2])))
return query
class MWM_YSO_Cluster_BOSS_Carton(BaseCarton):
"""YSOs - Cluster BOSS Catalog
Shorthand name: mwm_yso_cluster_boss
old class name: MWM_YSO_Cluster_Carton
old shorthand name: mwm_yso_cluster
Simplified Description of selection criteria:
Selecting the clustered sources from
the catalog of clustered structures,
with age<7.5 dex and brighter than rp<15.5 mag.
Wiki page:
https://wiki.sdss.org/display/MWM/YSO+selection+function
Additional source catalogs needed: Kounkel+20 clustered catalog
Additional cross-matching needed:
Return columns: Gaia id, 2mass id, G, BP, RP, J, H, K, parallax
cadence options for these targets
(list all options,
even though no single target will receive more than one):
cadence options for these targets:
boss_bright_3x1 if RP<14.76 |
boss_bright_4x1 if RP<15.075 |
boss_bright_5x1 if RP<15.29 |
boss_bright_6x1 if RP<15.5
Pseudo SQL (optional):
Implementation: age<7.5 and rp<15.5
Comments: Split from Cluster to request BOSS observations,
assigning cadence and faint limit for carton based on RP instead of H
"""
name = 'mwm_yso_cluster_boss'
category = 'science'
instrument = None # instrument is set in post_process()
cadence = None # cadence is set in post_process()
program = 'mwm_yso'
mapper = 'MWM'
priority = 2700
# yso_clustering is a subset of gaia and
# can be joined to gaia_dr2_source via source_id.
#
# table catalogdb.yso_clustering
# Foreign-key constraints:
# "yso_clustering_source_id_fkey" FOREIGN KEY (source_id)
# REFERENCES gaia_dr2_source(source_id)
def build_query(self, version_id, query_region=None):
query = (CatalogToTIC_v8
.select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id,
Gaia_DR2.ra.alias('gaia_dr2_ra'),
Gaia_DR2.dec.alias('gaia_dr2_dec'),
YSO_Clustering.twomass,
Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag,
Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'),
YSO_Clustering.j, YSO_Clustering.h,
YSO_Clustering.k, Gaia_DR2.parallax)
.join(TIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id))
.join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id))
.join(YSO_Clustering,
on=(Gaia_DR2.source_id == YSO_Clustering.source_id))
.where(CatalogToTIC_v8.version_id == version_id,
CatalogToTIC_v8.best >> True,
Gaia_DR2.phot_rp_mean_mag < 15.5,
YSO_Clustering.age < 7.5))
if query_region:
query = (query
.join_from(CatalogToTIC_v8, Catalog)
.where(peewee.fn.q3c_radial_query(Catalog.ra,
Catalog.dec,
query_region[0],
query_region[1],
query_region[2])))
return query
def post_process(self, model):
"""
cadence options for these targets:
boss_bright_3x1 if RP<14.76 |
boss_bright_4x1 if RP<15.075 |
boss_bright_5x1 if RP<15.29 |
boss_bright_6x1 if RP<15.5
"""
cursor = self.database.execute_sql(
"select catalogid, gaia_dr2_rp from " +
" sandbox.temp_mwm_yso_cluster_boss ;")
output = cursor.fetchall()
for i in range(len(output)):
current_catalogid = output[i][0]
current_rp = output[i][1]
if(current_rp < 14.76):
current_instrument = 'BOSS'
current_cadence = 'bright_3x1'
elif(current_rp < 15.075):
current_instrument = 'BOSS'
current_cadence = 'bright_4x1'
elif(current_rp < 15.29):
current_instrument = 'BOSS'
current_cadence = 'bright_5x1'
elif(current_rp < 15.5):
current_instrument = 'BOSS'
current_cadence = 'bright_6x1'
else:
# All cases should be covered above so we should not get here.
current_instrument = None
current_cadence = None
raise TargetSelectionError('error in mwm_yso_cluster_boss ' +
'post_process(): ' +
'instrument = None, cadence= None')
if current_instrument is not None:
self.database.execute_sql(
" update sandbox.temp_mwm_yso_cluster_boss " +
" set instrument = '" + current_instrument + "'"
" where catalogid = " + str(current_catalogid) + ";")
if current_cadence is not None:
self.database.execute_sql(
" update sandbox.temp_mwm_yso_cluster_boss " +
" set cadence = '" + current_cadence + "'"
" where catalogid = " + str(current_catalogid) + ";")
class MWM_YSO_PMS_APOGEE_Carton(BaseCarton):
"""
YSOs - Pre-main sequence, APOGEE
Shorthand name: mwm_yso_pms_apogee
Comments: New
Simplified Description of selection criteria:
Selecting the clustered sources from the catalog of vetted
pre-main sequence stars
Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function
Additional source catalogs needed: catalogdb.sagitta, catalogdb.zari18pms
Return columns: Gaia id, 2mass id, G, BP, RP, J, H, K, parallax
cadence options for these targets
(list all options, even though no single target will receive more than one):
apogee_bright_3x1 (for 7 < H < 13)
Implementation: (in sagitta | in zari18pms) & h<13
lead contact:<NAME>
"""
# peewee Model name ---> postgres table name
# Gaia_DR2(CatalogdbModel)--->'gaia_dr2_source'
# Zari18pms(CatalogdbModel)--->'catalogdb.zari18pms'
# Zari18ums(CatalogdbModel)--->'catalogdb.zari18ums'
# Sagitta(CatalogdbModel)--->'catalogdb.sagitta'
# TwoMassPSC(CatalogdbModel)--->'catalogdb.twomass_psc'
name = 'mwm_yso_pms_apogee'
category = 'science'
instrument = 'APOGEE'
cadence = 'bright_3x1'
program = 'mwm_yso'
mapper = 'MWM'
priority = 2700
def build_query(self, version_id, query_region=None):
# join with Sagitta
query1 = (CatalogToTIC_v8
.select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id,
Gaia_DR2.ra.alias('gaia_dr2_ra'),
Gaia_DR2.dec.alias('gaia_dr2_dec'),
TwoMassPSC.pts_key,
TwoMassPSC.designation.alias('twomass_psc_designation'),
Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag,
Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'),
TwoMassPSC.j_m, TwoMassPSC.h_m,
TwoMassPSC.k_m, Gaia_DR2.parallax)
.join(TIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id))
.join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id))
.switch(TIC_v8)
.join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation))
.switch(Gaia_DR2)
.join(Sagitta,
on=(Gaia_DR2.source_id == Sagitta.source_id))
.where(CatalogToTIC_v8.version_id == version_id,
CatalogToTIC_v8.best >> True,
TwoMassPSC.h_m < 13))
# join with Zari18pms
query2 = (CatalogToTIC_v8
.select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id,
Gaia_DR2.ra.alias('gaia_dr2_ra'),
Gaia_DR2.dec.alias('gaia_dr2_dec'),
TwoMassPSC.pts_key,
TwoMassPSC.designation.alias('twomass_psc_designation'),
Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag,
Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'),
TwoMassPSC.j_m, TwoMassPSC.h_m,
TwoMassPSC.k_m, Gaia_DR2.parallax)
.join(TIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id))
.join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id))
.switch(TIC_v8)
.join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation))
.switch(Gaia_DR2)
.join(Zari18pms,
on=(Gaia_DR2.source_id == Zari18pms.source))
.where(CatalogToTIC_v8.version_id == version_id,
CatalogToTIC_v8.best >> True,
TwoMassPSC.h_m < 13))
# | is for peewee SQL union
query = query1 | query2
if query_region:
query = (query
.join_from(CatalogToTIC_v8, Catalog)
.where(peewee.fn.q3c_radial_query(Catalog.ra,
Catalog.dec,
query_region[0],
query_region[1],
query_region[2])))
return query
class MWM_YSO_PMS_BOSS_Carton(BaseCarton):
"""
YSOs - Pre-main sequence, BOSS
Shorthand name: mwm_yso_pms_boss
Comments: New, Split from PMS
Simplified Description of selection criteria:
Selecting the clustered sources from the catalog of vetted
pre-main sequence stars
Wiki page: https://wiki.sdss.org/display/MWM/YSO+selection+function
Additional source catalogs needed: catalogdb.sagitta, catalogdb.zari18pms
Return columns: Gaia id, 2mass id, G, BP, RP, J, H, K, parallax
cadence options for these targets:
boss_bright_3x1 if RP<14.76 |
boss_bright_4x1 if RP<15.075 |
boss_bright_5x1 if RP<15.29 |
boss_bright_6x1 if RP<15.5
Implementation: (in sagitta | in zari18pms) & rp<15.5
lead contact:<NAME>
"""
# peewee Model name ---> postgres table name
# Gaia_DR2(CatalogdbModel)--->'gaia_dr2_source'
# Zari18pms(CatalogdbModel)--->'catalogdb.zari18pms'
# Zari18ums(CatalogdbModel)--->'catalogdb.zari18ums'
# Sagitta(CatalogdbModel)--->'catalogdb.sagitta'
# TwoMassPSC(CatalogdbModel)--->'catalogdb.twomass_psc'
name = 'mwm_yso_pms_boss'
category = 'science'
instrument = None # instrument is set in post_process()
cadence = None # cadence is set in post_process()
program = 'mwm_yso'
mapper = 'MWM'
priority = 2700
def build_query(self, version_id, query_region=None):
# join with Sagitta
query1 = (CatalogToTIC_v8
.select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id,
Gaia_DR2.ra.alias('gaia_dr2_ra'),
Gaia_DR2.dec.alias('gaia_dr2_dec'),
TwoMassPSC.pts_key,
TwoMassPSC.designation.alias('twomass_psc_designation'),
Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag,
Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'),
TwoMassPSC.j_m, TwoMassPSC.h_m,
TwoMassPSC.k_m, Gaia_DR2.parallax)
.join(TIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id))
.join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id))
.switch(TIC_v8)
.join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation))
.switch(Gaia_DR2)
.join(Sagitta,
on=(Gaia_DR2.source_id == Sagitta.source_id))
.where(CatalogToTIC_v8.version_id == version_id,
CatalogToTIC_v8.best >> True,
Gaia_DR2.phot_rp_mean_mag < 15.5))
# join with Zari18pms
query2 = (CatalogToTIC_v8
.select(CatalogToTIC_v8.catalogid, Gaia_DR2.source_id,
Gaia_DR2.ra.alias('gaia_dr2_ra'),
Gaia_DR2.dec.alias('gaia_dr2_dec'),
TwoMassPSC.pts_key,
TwoMassPSC.designation.alias('twomass_psc_designation'),
Gaia_DR2.phot_g_mean_mag, Gaia_DR2.phot_bp_mean_mag,
Gaia_DR2.phot_rp_mean_mag.alias('gaia_dr2_rp'),
TwoMassPSC.j_m, TwoMassPSC.h_m,
TwoMassPSC.k_m, Gaia_DR2.parallax)
.join(TIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id))
.join(Gaia_DR2, on=(TIC_v8.gaia_int == Gaia_DR2.source_id))
.switch(TIC_v8)
.join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation))
.switch(Gaia_DR2)
.join(Zari18pms,
on=(Gaia_DR2.source_id == Zari18pms.source))
.where(CatalogToTIC_v8.version_id == version_id,
CatalogToTIC_v8.best >> True,
Gaia_DR2.phot_rp_mean_mag < 15.5))
# | is for peewee SQL union
query = query1 | query2
if query_region:
query = (query
.join_from(CatalogToTIC_v8, Catalog)
.where(peewee.fn.q3c_radial_query(Catalog.ra,
Catalog.dec,
query_region[0],
query_region[1],
query_region[2])))
return query
def post_process(self, model):
"""
cadence options for these targets:
boss_bright_3x1 if RP<14.76 |
boss_bright_4x1 if RP<15.075 |
boss_bright_5x1 if RP<15.29 |
boss_bright_6x1 if RP<15.5
"""
cursor = self.database.execute_sql(
"select catalogid, gaia_dr2_rp from " +
" sandbox.temp_mwm_yso_pms_boss ;")
output = cursor.fetchall()
for i in range(len(output)):
current_catalogid = output[i][0]
current_rp = output[i][1]
if(current_rp < 14.76):
current_instrument = 'BOSS'
current_cadence = 'bright_3x1'
elif(current_rp < 15.075):
current_instrument = 'BOSS'
current_cadence = 'bright_4x1'
elif(current_rp < 15.29):
current_instrument = 'BOSS'
current_cadence = 'bright_5x1'
elif(current_rp < 15.5):
current_instrument = 'BOSS'
current_cadence = 'bright_6x1'
else:
# All cases should be covered above so we should not get here.
current_instrument = None
current_cadence = None
raise TargetSelectionError('error in mwm_yso_pms_boss ' +
'post_process(): ' +
'instrument = None, cadence= None')
if current_instrument is not None:
self.database.execute_sql(
" update sandbox.temp_mwm_yso_pms_boss " +
" set instrument = '" + current_instrument + "'"
" where catalogid = " + str(current_catalogid) + ";")
if current_cadence is not None:
self.database.execute_sql(
" update sandbox.temp_mwm_yso_pms_boss " +
" set cadence = '" + current_cadence + "'"
" where catalogid = " + str(current_catalogid) + ";")
| 1.90625 | 2 |
docker_compose_deploy/loonflow_shutongflow/setup_shutongflow.py | youjiajia/loonflow | 2 | 12792687 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
@author: children1987
"""
from utils import get_config_info, replace_in_file
def main():
cfg_file = '/opt/shutongFlow/apps/apps/settings.py'
# 修改数据库配置
replace_in_file(cfg_file, "'USER': 'shutongflow'", "'USER': 'root'")
config_info = get_config_info()
password = config_info['mysql']['root_password']
replace_in_file(
cfg_file, "'PASSWORD': '<PASSWORD>'", "'PASSWORD': '{}'".format(password)
)
# 使前端项目在dev模式下可被外部访问
f = '/opt/shutongFlow/fronted/config/index.js'
replace_in_file(f, "host: 'localhost'", "host: '0.0.0.0'")
f = '/opt/shutongFlow/fronted/src/main.js'
ip = config_info['ip']
replace_in_file(
f,
"axios.defaults.baseURL = 'http://127.0.0.1:6062/'",
"axios.defaults.baseURL = 'http://{}:6062/'".format(ip)
)
if __name__ == '__main__':
main()
| 2.015625 | 2 |
docker/scripts/split_primers.py | OncoRNALab/CIRCprimerXL | 0 | 12792688 | <gh_stars>0
#!/usr/bin/python3
import argparse
parser = argparse.ArgumentParser(description='give arguments to main primer_xc script')
parser.add_argument('-i', nargs=1, required=True, help='input primer file')
args = parser.parse_args()
input_primers = args.i[0]
primer_in = open(input_primers)
primers = {}
circ_info_keys = ("SEQUENCE_ID", "SEQUENCE_TEMPLATE", "SEQUENCE_TARGET")
# read all info into dictionary
for line in primer_in:
key, value = line.split("=")
value = value.rstrip()
primers[key] = value
template = primers["SEQUENCE_TEMPLATE"]
circRNA = primers["SEQUENCE_ID"]
circ_ID, chrom, start, end = circRNA.split("_")
nr_p_out = primers["PRIMER_LEFT_NUM_RETURNED"]
primer_in.close()
# read general info into file
general_info = open("general_primer_design_info_" + circ_ID + ".txt", "a")
for info in primers:
if "_NUM_" in info or "_EXPLAIN" in info or any(x in info for x in circ_info_keys):
general_info.write(info + '=' + str(primers[info]) +'\n')
general_info.close()
# make file for bowtie
primer_file = open("primer_spec_input_" + circ_ID + ".txt", "a")
# make general file with list primers
all_primers = open("all_primers_" + circ_ID + ".txt", 'w')
all_amplicon = open("amplicon_folding_input_" + circ_ID + ".txt", 'w')
all_primers_dict = {}
for primer_index in range(int(nr_p_out)):
FWD = primers[("PRIMER_LEFT_" + str(primer_index) + "_SEQUENCE")]
FWD_qual = len(FWD) * "I"
REV = primers[("PRIMER_RIGHT_" + str(primer_index) + "_SEQUENCE")]
REV_qual = len(REV) * "I"
PRIMER_LEFT_TM = primers[("PRIMER_LEFT_" + str(primer_index) + "_TM")]
PRIMER_RIGHT_TM = primers[("PRIMER_RIGHT_" + str(primer_index) + "_TM")]
PRIMER_LEFT_GC_PERCENT = primers[("PRIMER_LEFT_" + str(primer_index) + "_GC_PERCENT")]
PRIMER_RIGHT_GC_PERCENT = primers[("PRIMER_RIGHT_" + str(primer_index) + "_GC_PERCENT")]
# bowtie input file
# write FWD + REV
primer_file.write(circ_ID + "_primer_" + str(primer_index) + "_FWD_REV" + "\t")
primer_file.write(FWD + "\t" + FWD_qual + "\t" + REV + "\t" + REV_qual + "\n")
# write REV + FWD
primer_file.write(circ_ID + "_primer_" + str(primer_index) + "_REV_FWD" + "\t")
primer_file.write(REV + "\t" + REV_qual + "\t" + FWD + "\t" + FWD_qual + "\n")
# write FWD + FWD
primer_file.write(circ_ID + "_primer_" + str(primer_index) + "_FWD_FWD" + "\t")
primer_file.write(FWD + "\t" + FWD_qual + "\t" + FWD + "\t" + FWD_qual + "\n")
# write REV + REV
primer_file.write(circ_ID + "_primer_" + str(primer_index) + "_REV_REV" + "\t")
primer_file.write(REV + "\t" + REV_qual + "\t" + REV + "\t" + REV_qual + "\n")
# get amplicon and make file for NUPACK
FWD_pos, FWD_len = primers['PRIMER_LEFT_'+ str(primer_index)].split(",")
REV_pos, REV_len = primers['PRIMER_RIGHT_'+ str(primer_index)].split(",")
amplicon = template[int(FWD_pos):int(REV_pos) + 1]
all_amplicon.write("> amplicon_" + circ_ID + "_primer" + str(primer_index) + "_" + amplicon + "\n")
# general primer file (for filtering), first put in dict, will be sorted (see below)
all_primers_dict[circ_ID + "\t" + chrom + "\t" + start + "\t" + end + '\t' + str(primer_index) + '\t' + FWD + '\t' + REV + '\t' +
FWD_pos + '\t' + FWD_len + '\t' + REV_pos +'\t' + REV_len + '\t' + PRIMER_LEFT_TM + '\t' + PRIMER_RIGHT_TM + '\t' +
PRIMER_LEFT_GC_PERCENT + '\t' + PRIMER_RIGHT_GC_PERCENT + '\t' + amplicon + '\n'] = len(amplicon)
# sort primers according to amp size (smallest is best) and then print to all_amplicon
all_primers_sorted = {k: v for k, v in sorted(all_primers_dict.items(), key=lambda item: item[1])}
for primer in all_primers_sorted:
all_primers.write(primer)
primer_file.close()
all_primers.close()
all_amplicon.close()
| 3.078125 | 3 |
src/pce/adaptive_pce.py | QianWanghhu/IES-FF | 0 | 12792689 |
import numpy as np
import pandas as pd
from veneer.pest_runtime import *
from veneer.manage import start,kill_all_now
import pyapprox as pya
from functools import partial
from pyapprox.adaptive_sparse_grid import max_level_admissibility_function
from pyapprox.adaptive_polynomial_chaos import variance_pce_refinement_indicator
from pyapprox.univariate_quadrature import clenshaw_curtis_rule_growth
from pyapprox.variable_transformations import AffineRandomVariableTransformation
from funcs.read_data import variables_prep, file_settings
from funcs.modeling_funcs import vs_settings, \
modeling_settings, paralell_vs, obtain_initials, change_param_values
# Create the copy of models and veneer list
project_name = 'MW_BASE_RC10.rsproj'
veneer_name = 'vcmd45\\FlowMatters.Source.VeneerCmd.exe'
first_port=15000; num_copies = 8
_, things_to_record, _, _, _ = modeling_settings()
processes, ports = paralell_vs(first_port, num_copies, project_name, veneer_name)
vs_list = vs_settings(ports, things_to_record)
# obtain the initial values of parameters
initial_values = obtain_initials(vs_list[0])
def run_source_lsq(vars, vs_list=vs_list):
"""
Script used to run_source and return the output file.
The function is called by AdaptiveLejaPCE.
"""
from funcs.modeling_funcs import modeling_settings, generate_observation_ensemble
import spotpy as sp
print('Read Parameters')
parameters = pd.read_csv('../data/Parameters-PCE.csv', index_col='Index')
# Define objective functions
# Use annual or monthly loads
def timeseries_sum(df, temp_scale = 'annual'):
"""
Obtain the sum of timeseries of different temporal scale.
temp_scale: str, default is 'Y', monthly using 'M'
"""
assert temp_scale in ['monthly', 'annual'], 'The temporal scale given is not supported.'
if temp_scale == 'monthly':
sum_126001A = df.resample('M').sum()
else:
month_126001A = df.resample('M').sum()
sum_126001A = pd.DataFrame(index = np.arange(df.index[0].year, df.index[-1].year),
columns=df.columns)
for i in range(sum_126001A.shape[0]):
sum_126001A.iloc[i, :] = month_126001A.iloc[i*12: (i+1)*12, :].sum()
return sum_126001A
# End timeseries_sum()
# import observation if the output.txt requires the use of obs.
date_range = pd.to_datetime(['2009/07/01', '2018/06/30'])
observed_din = pd.read_csv(f'{file_settings()[1]}126001A.csv', index_col='Date')
observed_din.index = pd.to_datetime(observed_din.index)
observed_din = observed_din.loc[date_range[0]:date_range[1], :].filter(items=[observed_din.columns[0]]).apply(lambda x: 1000 * x)
# loop over the vars and try to use parallel
parameter_df = pd.DataFrame(index=np.arange(vars.shape[1]), columns=parameters.Name_short)
for i in range(vars.shape[1]):
parameter_df.iloc[i] = vars[:, i]
# set the time period of the results
retrieve_time = [pd.Timestamp('2009-07-01'), pd.Timestamp('2018-06-30')]
# define the modeling period and the recording variables
_, _, criteria, start_date, end_date = modeling_settings()
din = generate_observation_ensemble(vs_list,
criteria, start_date, end_date, parameter_df, retrieve_time)
# obtain the sum at a given temporal scale
# din_pbias = sp.objectivefunctions.pbias(observed_din[observed_din.columns[0]], din[column_names[0]])
din_126001A = timeseries_sum(din, temp_scale = 'annual')
obs_din = timeseries_sum(observed_din, temp_scale = 'annual')
din_126001A = pd.DataFrame(din_126001A,dtype='float').values
obs_din = pd.DataFrame(obs_din,dtype='float').values
# breakpoint()
resid = din_126001A - obs_din
rmse = (np.mean(resid ** 2, axis=0)) ** 0.5
if rmse[0] == 0: rmse[0] = 1e-8
rmse = rmse.reshape(rmse.shape[0], 1)
print(f'Finish {rmse.shape[0]} run')
return rmse
# END run_source_lsq()
# read parameter distributions
datapath = file_settings()[1]
para_info = pd.read_csv(datapath + 'Parameters-PCE.csv')
# define the variables for PCE
param_file = file_settings()[-1]
ind_vars, variable = variables_prep(param_file, product_uniform='uniform', dummy=False)
var_trans = AffineRandomVariableTransformation(variable, enforce_bounds=True)
# Create PyApprox model
n_candidate_samples = 10000
candidate_samples = -np.cos(np.pi*pya.sobol_sequence(var_trans.num_vars(),
n_candidate_samples))
pce = pya.AdaptiveLejaPCE(var_trans.num_vars(), candidate_samples=candidate_samples)
# Define criteria
max_level = 6
err_tol = 1e-8
max_num_samples = 100
max_level_1d = [max_level]*(pce.num_vars)
admissibility_function = partial(
max_level_admissibility_function, max_level, max_level_1d,
max_num_samples, err_tol)
refinement_indicator = variance_pce_refinement_indicator
pce.set_function(run_source_lsq, var_trans)
pce.set_refinement_functions(
refinement_indicator,
admissibility_function,
clenshaw_curtis_rule_growth
)
# Generate emulator
pce.build()
# store PCE
import pickle
pickle.dump(pce, open(f'{file_settings()[0]}\pce-rmse.pkl', "wb"))
# set the parameter values to initial values
for vs in vs_list:
vs = change_param_values(vs, initial_values, fromList=True)
kill_all_now(processes) | 2.046875 | 2 |
app/models/base.py | xiaojieluo/flask_restapi_template | 0 | 12792690 | from datetime import datetime
from flask_sqlalchemy import SQLAlchemy as _SQLAlchemy, BaseQuery
from sqlalchemy import inspect, Column, Integer, SmallInteger, orm
from contextlib import contextmanager
from app.libs.error_code import NotFound
class SQLAlchemy(_SQLAlchemy):
@contextmanager
def auto_commit(self):
try:
yield
self.session.commit()
except Exception as e:
db.session.rollback()
raise e
class Query(BaseQuery):
def filter_query(self, **kwargs):
if 'status' not in kwargs.keys():
kwargs['status'] = 1
return super(Query, self).filter_by(**kwargs)
db = SQLAlchemy(query_class = Query)
class Base(db.Model):
__abstract__ = True
def set_attrs(self, attrs_dict):
for key, value in attrs_dict.items():
if hasattr(self, key) and key != 'id':
setattr(self, key, value)
def keys(self):
return self.fields
class MixinJSONSerializer:
@orm.reconstructor
def init_on_load(self):
self._fields = []
# self._include = []
self._exclude = []
self._set_fields()
self.__prune_fields()
def _set_fields(self):
pass
def __prune_fields(self):
columns = inspect(self.__class__).columns
if not self._fields:
all_columns = set(columns.keys())
self._fields = list(all_columns - set(self._exclude))
def hide(self, *args):
for key in args:
self._fields.remove(key)
return self
def keys(self):
return self._fields
def __getitem__(self, key):
return getattr(self, key)
| 2.3125 | 2 |
PhaseBot/bot.py | Pythogon/PhaseBot | 6 | 12792691 | import os
import discord
import Cogs #type: ignore
import glo #type: ignore
from discord.ext import commands
class PhaseBot(commands.Bot):
""" The bot """
async def on_ready(self):
print("Discodo!") # Great, it's working
await bot.change_presence(activity = discord.Activity(name = f"my startup...", type = discord.ActivityType.watching)) # Simplistic help
ud = glo.JSONREAD("userdata.json")
del ud["default"]
for k in ud:
k = int(k)
u = bot.get_user(k)
if u is None:
name = "Member left"
else:
name = u.name
glo.SETNAME(k, name)
await bot.change_presence(activity = discord.Activity(name = f"le noir | v{glo.VERSION}", type = discord.ActivityType.watching)) # Simplistic help
async def on_message(self, message):
if message.channel.id == 796374619900084255:
os.system("git pull")
os.system("pm2 restart Phase")
if message.author.bot: return # We don't like bots
return await bot.process_commands(message)
bot = PhaseBot(command_prefix = glo.PREFIX, intents = discord.Intents.all()) # Writing the embed
bot.remove_command('help') # Removing default help (I don't like it)
bot.add_cog(Cogs.Admin(bot)) # Many cog
bot.add_cog(Cogs.Bank(bot))
bot.add_cog(Cogs.Counting(bot))
bot.add_cog(Cogs.General(bot))
bot.add_cog(Cogs.Listeners(bot))
bot.add_cog(Cogs.Starboard(bot))
bot.add_cog(Cogs.Tasks(bot))
bot.run(glo.GLOBAL_READ("token"))
| 2.359375 | 2 |
usl_score/models/__init__.py | vitouphy/usl_dialogue_metric | 5 | 12792692 | name="models"
from .VUPScorer import *
from .NUPScorer import *
from .MLMScorer import *
from .distinct import *
from .composite import *
| 1.070313 | 1 |
rastro_python/setup.py | duncaneddy/rastro | 1 | 12792693 | <reponame>duncaneddy/rastro
#!/usr/bin/env python
from setuptools import setup
from setuptools_rust import RustExtension
from os import path
if __name__ == "__main__":
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name="rastro",
version="0.0.0", # Do NOT edit. Will be updated for release by CI pipeline
classifiers=[
"License :: OSI Approved :: MIT License",
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Rust",
],
packages=["rastro"],
rust_extensions=[
RustExtension("rastro_python.constants"),
RustExtension("rastro_python.orbits")
],
include_package_data=True,
zip_safe=False,
long_description=long_description,
long_description_content_type='text/markdown',
) | 1.351563 | 1 |
Its-Fair-Game-Day-JC.py | maleich/Its-Fair-Game-Day-JC | 0 | 12792694 | import random
import time
print("this game is blackjack. If you are wondering which focus day this is connected to, it isn't connected to any of them. ")
print()
print("I started making it, and I forgot it had to be related to a focus day, but it was too late to switch, so here it is ")
print()
print("how to play: your goal is to get your card total closest to 21, and to beat the dealer. If you get over 21, you lose. stand to give the turn to the dealer, and hit to draw a new card")
#defines lists and values
cardlist=[]
dealerlist=[]
cardlistStr=[]
dealerlistStr=[]
c1=0
c2=0
c3=0
c4=0
a=0
b=0
da=0
db=0
winx=0
losex=0
#defines the list for where I will take card names
carddeck=['A','2','3','4','5','6','7','8','9','10','J','Q','K','A','2','3','4','5','6','7','8','9','10','J','Q','K','A','2','3','4','5','6','7','8','9','10','J','Q','K','A','2','3','4','5','6','7','8','9','10','J','Q','K']
#Assigns values to the card names
cardvalue={
'A': 11,
'2': 2,
'3': 3,
'4': 4,
'5': 5,
'6': 6,
'7': 7,
'8': 8,
'9': 9,
'10': 10,
'J': 10,
'Q': 10,
'K': 10
}
#this function crashes python
def crash():
try:
crash()
except:
crash()
#blakcjack funtion
def blackjack():
#define lose, tie, win functions that happen when you lose, win or tie
def lose():
global cardlist
global dealerlist
global cardlistStr
global dealerlistStr
global losex
losex=losex+1
print('you have won '+str(winx) + "times and lost "+str(losex)+" times")
print()
print('you lost :(')
print("The dealer's cards are ")
print(dealerlistStr)
print()
print("your cards are ")
print(cardlistStr)
cardlist=[]
dealerlist=[]
cardlistStr=[]
dealerlistStr=[]
again=input('try again? ')
if again==('yes'):
blackjack()
if again==('no'):
crash()
else:
again=input('yes or no')
if again==('yes'):
blackjack()
if again==('no'):
crash()
def win():
global cardlist
global dealerlist
global cardlistStr
global dealerlistStr
global winx
winx=winx+1
print('you have won '+str(winx) + " times and lost "+str(losex)+" times")
print()
print('you won :)')
print("The dealer's cards are ")
print(dealerlistStr)
print()
print("your cards are ")
print(cardlistStr)
cardlist=[]
dealerlist=[]
cardlistStr=[]
dealerlistStr=[]
again2=input('play again? ')
if again2==('yes'):
blackjack()
if again2==('no'):
crash()
if again2 != ('yes') or again2 != ('no'):
again2=input('yes or no')
if again2==('yes'):
blackjack()
if again2==('no'):
crash()
def tie():
global cardlist
global dealerlist
global cardlistStr
global dealerlistStr
print("The dealer's cards are ")
print(dealerlistStr)
print()
print("your cards are ")
print(cardlistStr)
cardlist=[]
dealerlist=[]
cardlistStr=[]
dealerlistStr=[]
again2=input('you tied, play again? ')
if again2==('yes'):
blackjack()
if again2==('no'):
print('ok')
crash()
if again2 != ('yes') or again2 != ('no'):
again2=input('yes or no')
if again2==('yes'):
blackjack()
if again2==('no'):
print('ok')
crash()
#globals the lists
global cardlist
global dealerlist
global cardlistStr
global dealerlistStr
#defines lists and some random ints
cardlist=[]
dealerlist=[]
cardlistStr=[]
dealerlistStr=[]
c1=(random.randint(0,51))
c2=(random.randint(0,51))
c3=(random.randint(0,51))
c4=(random.randint(0,51))
#this prints what your cards are at the start of the game
print('Your cards are '+str(carddeck[c1])+' and '+str(carddeck[c2]))
print("The dealer's open card is "+str(carddeck[c3]))
#after the dealer finishes their turn, this code checks who wins, loses, or ties
def standcheck():
if sum(dealerlist)<=(21):
if sum(dealerlist)>sum(cardlist):
lose()
if sum(cardlist)>sum(dealerlist):
win()
if sum(dealerlist)==(21):
if sum(dealerlist)==sum(cardlist):
tie()
else:
lose()
if sum(dealerlist)>(21):
for x in range(len(dealerlist)):
if dealerlist[x]==(11):
dealerlist[x]=(1)
if sum(dealerlist)>(21):
win()
#This determines what move the dealer does when it is their turn
def stand():
if sum(dealerlist)>(17):
standcheck()
if sum(dealerlist)==sum(cardlist):
standcheck()
if sum(dealerlist)>sum(cardlist):
lose()
else:
dc1=(random.randint(0,51))
dealerlist.append(cardvalue[carddeck[dc1]])
dealerlistStr.append(carddeck[dc1])
while sum(dealerlist)<=(16):
dc2=(random.randint(0,51))
dealerlist.append(cardvalue[carddeck[dc2]])
dealerlistStr.append(carddeck[dc2])
standcheck()
if sum(dealerlist)>(17):
standcheck()
#Adds all the beginning variables to their resepctive lists
cardlist.append(cardvalue[carddeck[c1]])
cardlist.append(cardvalue[carddeck[c2]])
dealerlist.append(cardvalue[carddeck[c3]])
dealerlist.append(cardvalue[carddeck[c4]])
cardlistStr.append(carddeck[c1])
cardlistStr.append(carddeck[c2])
dealerlistStr.append(carddeck[c3])
dealerlistStr.append(carddeck[c4])
#asks w1=input('Hit or stand? ')
choice1=input('Hit or stand? ')
while choice1!=('hit') and choice1!=('stand'):
choice1=input('Pick either hit or stand ')
if (cardlist)==(21):
win()
if choice1==('hit'):
c5=random.randint(0,51)
cardlist.append(cardvalue[carddeck[c5]])
cardlistStr.append(carddeck[c5])
print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', and '+ str(carddeck[c5]))
if sum(cardlist)>(21):
for x in range(len(cardlist)):
if cardlist[x]==(11):
cardlist[x]=(1)
if sum(cardlist)>(21):
lose()
if sum(cardlist)==21:
print('BLACKJACK')
win()
if sum(cardlist)<(21):
choice1=input('Hit or stand? ')
while choice1!=('hit') and choice1!=('stand'):
choice1=input('Pick either hit or stand ')
if choice1==('stand'):
stand()
if choice1==('hit'):
c6=random.randint(0,51)
cardlist.append(cardvalue[carddeck[c6]])
cardlistStr.append(carddeck[c6])
print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', and '+(carddeck[c6]))
if sum(cardlist)>21:
for x in range(len(cardlist)):
if cardlist[x]==(11):
cardlist[x]=(1)
if sum(cardlist)>(21):
lose()
if sum(cardlist)==21:
print('BLACKJACK')
win()
else:
choice1=input('Hit or stand? ')
while choice1!=('hit') and choice1!=('stand'):
choice1=input('Pick either hit or stand ')
if choice1==('stand'):
stand()
if choice1==('hit'):
c7=(random.randint(0,51))
cardlist.append(cardvalue[carddeck[c7]])
cardlistStr.append(carddeck[c7])
print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', and '+(carddeck[c7]))
if sum(cardlist)>21:
for x in range(len(cardlist)):
if cardlist[x]==(11):
cardlist[x]=(1)
if sum(cardlist)>(21):
lose()
if sum(cardlist)==21:
print('BLACKJACK')
win()
else:
choice1=input('Hit or stand? ')
while choice1!=('hit') and choice1!=('stand'):
choice1=input('Pick either hit or stand ')
if choice1==('stand'):
stand()
if choice1==('hit'):
c8=(random.randint(0,51))
cardlist.append(cardvalue[carddeck[c8]])
cardlistStr.append(carddeck[c8])
print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', '+(carddeck[c7])+', and '+(carddeck[c8]))
if sum(cardlist)>21:
for x in range(len(cardlist)):
if cardlist[x]==(11):
cardlist[x]=(1)
if sum(cardlist)>(21):
lose()
if sum(cardlist)==21:
print('BLACKJACK')
win()
else:
choice1=input('Hit or stand? ')
while choice1!=('hit') and choice1!=('stand'):
choice1=input('Pick either hit or stand ')
if choice1==('stand'):
stand()
if choice1==('hit'):
c9=(random.randint(0,51))
cardlist.append(cardvalue[carddeck[c9]])
cardlistStr.append(carddeck[c9])
print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', '+(carddeck[c7])+', '+(carddeck[c8])+', and '+(carddeck[c9]))
if sum(cardlist)>21:
for x in range(len(cardlist)):
if cardlist[x]==(11):
cardlist[x]=(1)
if sum(cardlist)>(21):
lose()
if sum(cardlist)==21:
print('BLACKJACK')
win()
else:
choice1=input('Hit or stand? ')
while choice1!=('hit') and choice1!=('stand'):
choice1=input('Pick either hit or stand ')
if choice1==('stand'):
stand()
if choice1==('hit'):
c10=(random.randint(0,51))
cardlist.append(cardvalue[carddeck[c10]])
cardlistStr.append(carddeck[c10])
print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', '+(carddeck[c7])+', '+(carddeck[c8])+', '+(carddeck[c9])+', '+(carddeck(c10)))
if sum(cardlist)>21:
for x in range(len(cardlist)):
if cardlist[x]==(11):
cardlist[x]=(1)
if sum(cardlist)>(21):
lose()
if sum(cardlist)==21:
print('BLACKJACK')
win()
else:
choice1=input('Hit or stand? ')
while choice1!=('hit') and choice1!=('stand'):
choice1=input('Pick either hit or stand ')
if choice1==('stand'):
stand()
if choice1==('hit'):
c11=(random.randint(0,51))
cardlist.append(cardvalue[carddeck[c11]])
cardlistStr.append(carddeck[c11])
print('Your cards are '+str(carddeck[c1])+', '+str(carddeck[c2])+', '+ str(carddeck[c5])+', '+(carddeck[c6])+', '+(carddeck[c7])+', '+(carddeck[c8])+', '+(carddeck[c9])+', '+(carddeck[c10])+" and "+(carddeck[c11]))
if sum(cardlist)>21:
for x in range(len(cardlist)):
if cardlist[x]==(11):
cardlist[x]=(1)
if sum(cardlist)>(21):
lose()
if sum(cardlist)==21:
print('BLACKJACK')
win()
if choice1==('stand'):
stand()
#a
blackjack()
| 3.6875 | 4 |
tests/cli/check_mode_test.py | twanh/note-system | 1 | 12792695 | <reponame>twanh/note-system
import os
from typing import List
from unittest.mock import Mock
from unittest.mock import patch
import pytest
from py.path import local as Path
from notesystem.modes.base_mode import ModeOptions
from notesystem.modes.check_mode.check_mode import CheckMode
from notesystem.modes.check_mode.check_mode import CheckModeArgs
from notesystem.modes.check_mode.errors.markdown_errors import MathError
from notesystem.modes.check_mode.errors.markdown_errors import TodoError
from notesystem.notesystem import main
def test_required_arguments():
"""Does the check mode fail without in path"""
with pytest.raises(SystemExit):
main(['check'])
@patch('notesystem.modes.check_mode.check_mode.CheckMode.start')
def test_check_mode_called_with_only_in_path(mock_check_mode_start: Mock):
"""Tests that the correct arguments are passed
to check mode with only a input path
"""
main(['check', 'tests/test_documents'])
expected_args: CheckModeArgs = {
'in_path': 'tests/test_documents',
'fix': False,
'disabled_errors': [],
'simple_errors': False,
}
expected_options: ModeOptions = {
'visual': True,
'args': expected_args, # type: ignore
}
mock_check_mode_start.assert_called_with(expected_options)
@patch('notesystem.modes.check_mode.check_mode.CheckMode.start')
def test_check_mode_called_with_in_path_and_fix(mock_check_mode_start: Mock):
"""Tests that the correct arguments are passed to check mode with
a input path and fixing enabled
"""
main(['check', 'tests/test_documents', '-f'])
expected_args: CheckModeArgs = {
'in_path': 'tests/test_documents',
'fix': True,
'disabled_errors': [],
'simple_errors': False,
}
expected_options: ModeOptions = {
'visual': True,
'args': expected_args, # type: ignore
}
mock_check_mode_start.assert_called_with(expected_options)
print(mock_check_mode_start)
main(['check', 'tests/test_documents', '--fix'])
mock_check_mode_start.assert_called_with(expected_options)
@patch('notesystem.modes.check_mode.check_mode.CheckMode._check_dir')
def test_check_mode_checks_dir_when_given_dir(mock: Mock):
"""Test that when given a directory path, _check_dir is called"""
main(['check', 'tests/test_documents'])
mock.assert_called_once_with('tests/test_documents')
@patch('notesystem.modes.check_mode.check_mode.CheckMode._check_dir')
@patch('notesystem.modes.check_mode.check_mode.CheckMode._check_file')
def test_check_mode_checks_file_when_given_file(
_check_file: Mock,
_check_dir: Mock,
):
"""Test that when given a filepath only _check_file is called"""
# Some parts need access to the terminal,
# but they don'y have access so they raise value error
# Which can be ignored
try:
main(['check', 'tests/test_documents/ast_error_test_1.md'])
except ValueError:
pass
# _check_file should be called with the filepath
_check_file.assert_called_with('tests/test_documents/ast_error_test_1.md')
# Check dir should not be called
_check_dir.assert_not_called()
# Test that fix is called
@patch('notesystem.modes.check_mode.check_mode.CheckMode._fix_doc_errors')
def test_fix_is_called_when_fix_arg_is_passed(_fix_doc_errors: Mock):
"""Test that _fix_doc_errors is called when fixing is enabled"""
try:
main(['check', 'tests/test_documents/ast_error_test_1.md', '-f'])
except ValueError:
pass
_fix_doc_errors.assert_called()
# Test errors
def test_check_mode_raises_with_non_existing_dir_or_file():
"""Test that when a invalid path is given SystemExit is raised"""
with pytest.raises(SystemExit):
main(['check', 'no dir'])
def test_check_mode_check_dir_raises_with_file_and_not_existing_dir():
"""Test that _check_dir raises when the input is not a dir"""
check_mode = CheckMode()
# Totally invalid dir
with pytest.raises(NotADirectoryError):
check_mode._check_dir('not a dir')
# With filepath
with pytest.raises(NotADirectoryError):
check_mode._check_dir('tests/test_documents/ast_error_test_1.md')
def test_check_mode_check_dir_returns():
"""Test that check_mode dirs returns as much doc errors as are
present in the folder
TODO: Make test independent of test/test_documents file amount
"""
check_mode = CheckMode()
# Set the _disabled_errros manually, because it is set in start()
# which is not run in this test
check_mode._disabled_errors = []
errors = check_mode._check_dir('tests/test_documents')
assert len(errors) == 3
def test_check_mode_check_file_returns():
"""Test that _check_file checks the file and returns
errors and the correct filepath
"""
check_mode = CheckMode()
check_mode._disabled_errors = []
errors = check_mode._check_file('tests/test_documents/contains_errors.md')
assert errors['errors'] is not None
assert errors['file_path'] == 'tests/test_documents/contains_errors.md'
@pytest.mark.parametrize(
'wrong,good', [
(
"""\
[ ] Invalid todo
[x] Ivalid todo
""", """\
- [ ] Invalid todo
- [x] Ivalid todo
""",
),
(
"""\
- [ ] Should be good
- [x] Deff is good
""", """\
- [ ] Should be good
- [x] Deff is good
""",
),
],
)
def test_check_mode_fix_file(tmpdir, wrong, good):
file = tmpdir.join('test.md')
file.write(wrong)
check_mode = CheckMode()
check_mode._disabled_errors = []
errors = check_mode._check_file(file.strpath)
check_mode._fix_doc_errors(errors)
c1 = file.read()
assert c1 == good
# Test disabling errors
# Test passing the flag
# Passing the flag should result in the _disabled_errors being set
@patch('notesystem.modes.check_mode.check_mode.CheckMode.start')
def test_check_mode_disable_errors_with_one_flag(mock_check_mode_start: Mock):
main([
'check',
'tests/test_documents/contains_errors.md',
'--disable-todo',
])
expected_args: CheckModeArgs = {
'in_path': 'tests/test_documents/contains_errors.md',
'fix': False,
'disabled_errors': [TodoError.get_error_name()],
'simple_errors': False,
}
expected_options: ModeOptions = {
'visual': True,
'args': expected_args, # type: ignore
}
mock_check_mode_start.assert_called_once_with(expected_options)
@patch('notesystem.modes.check_mode.check_mode.CheckMode.start')
def test_check_mode_disable_errors_with_multiple_flags(
mock_check_mode_start: Mock,
):
main([
'check', 'tests/test_documents/contains_errors.md',
'--disable-todo', '--disable-math-error',
])
expected_args: CheckModeArgs = {
'in_path': 'tests/test_documents/contains_errors.md',
'fix': False,
'disabled_errors': [
MathError.get_error_name(),
TodoError.get_error_name(),
],
'simple_errors': False,
}
expected_options: ModeOptions = {
'visual': True,
'args': expected_args, # type: ignore
}
mock_check_mode_start.assert_called_once_with(expected_options)
# Test the actual disabling of the error
# When an error is in _disabled_errors is should not be found
# in a document that contains the disabled error
@pytest.mark.parametrize(
'file_contents,disabled_errors,valid', [
(
"""\
[ ] Invalid todo
[x] Ivalid todo
""",
TodoError.get_error_name(),
True,
),
(
"""
There is $$invalid$$ math in this line
There only is correct $math$ in this line
There is one $correct$ and one $$wrong$$ math block
""",
[MathError.get_error_name()],
True,
), (
"""\
[ ] Invalid todo
There is $$invalid$$ math in this line
""",
[MathError.get_error_name(), TodoError.get_error_name()],
True,
),
(
"""\
[ ] Invalid todo
There is $$invalid$$ math in this line
""",
[TodoError.get_error_name()], # Only disable todo errors
False,
),
],
)
def test_check_mode_disbled_errors_are_not_returned(
tmpdir: Path,
file_contents: str,
disabled_errors: List[str],
valid: bool,
):
file = tmpdir.join('test.md')
file.write(file_contents)
check_mode = CheckMode()
check_mode._disabled_errors = disabled_errors
doc_errors = check_mode._check_file(file.strpath)
for error in doc_errors['errors']:
assert error['error_type'].get_error_name() not in disabled_errors
if valid:
assert len(doc_errors['errors']) == 0
else:
assert len(doc_errors['errors']) > 0
@patch('notesystem.modes.check_mode.check_mode.CheckMode._run')
def test_simple_errors_is_passed_through_correctly(mock: Mock):
# Default check -- should be disabled (false)
main(('check', 'in_path'))
assert mock.call_args.args[0]['simple_errors'] == False
# Enabled check
main(('check', 'in_path', '--simple-errors'))
assert mock.call_args.args[0]['simple_errors'] == True
@patch('notesystem.modes.check_mode.check_mode.print_simple_doc_error')
def test_print_simple_doc_error_is_called(mock: Mock):
main(['check', 'tests/test_documents', '--simple-errors'])
assert mock.call_count == len(os.listdir('tests/test_documents'))
| 2.28125 | 2 |
inst/tdda/referencetest/referencetest.py | noamross/tdda | 4 | 12792696 | <filename>inst/tdda/referencetest/referencetest.py
# -*- coding: utf-8 -*-
"""
referencetest.py: refererence testing for test-driven data analysis.
Source repository: http://github.com/tdda/tdda
License: MIT
Copyright (c) Stochastic Solutions Limited 2016
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import sys
import tempfile
from tdda.referencetest.checkpandas import PandasComparison
from tdda.referencetest.checkfiles import FilesComparison
# DEFAULT_FAIL_DIR is the default location for writing failing output
# if assertStringCorrect or assertFileCorrect fail with 'preprocessing'
# in place. This can be overridden using the set_defaults() class method.
DEFAULT_FAIL_DIR = os.environ.get('TDDA_FAIL_DIR', tempfile.gettempdir())
class ReferenceTest(object):
"""
Class for comparing results against saved "known to be correct" reference
results.
This is typically useful when software produces either a (text or csv)
file or a string as output.
The main features are:
- If the comparison between a string and a file fails,
the actual string is written to a file and a diff
command is suggested for seeing the differences between
the actual output and the expected output.
- There is support for ignoring lines within the strings/files
that contain particular patterns or regular expressions.
This is typically useful for filtering out things like
version numbers and timestamps that vary in the output
from run to run, but which do not indicate a problem.
- There is support for re-writing the reference output
with the actual output. This, obviously, should be used
only after careful checking that the new output is correct,
either because the previous output was in fact wrong,
or because the intended behaviour has changed.
The functionality provided by this class is available through python's
standard unittest framework, via the referencetestcase module. This
provides the ReferenceTestCase class, which is a subclass of, and drop-in
replacement for unittest.TestCase. It extends that class with all of
the methods from ReferenceTest.
The functionality is also available through the pytest framework, via
the referencepytest module. This module provides all of the methods from
ReferenceTest, as functions that can be called directly as part of a
pytest suite.
"""
# Verbose flag
verbose = True
# Temporary directory
tmp_dir = DEFAULT_FAIL_DIR
# Dictionary describing which kinds of reference files should be
# regenerated when the tests are run. This should be set using the
# set_regeneration() class-method. Can be initialized via the -w option.
regenerate = {}
# Dictionary describing default location for reference data, for
# each kind. Can be initialized by set_default_data_location().
default_data_locations = {}
@classmethod
def set_defaults(cls, **kwargs):
"""
Set default parameters, at the class level. These defaults will
apply to all instances of ReferenceTest subsequently created.
The following parameters can be set:
verbose Sets the boolean verbose flag globally, to control
reporting of errors while running tests. Reference
tests tend to take longer to run than traditional
unit tests, so it is often useful to be able to see
information from failing tests as they happen, rather
than waiting for the full report at the end. Verbose
is set to True by default.
print_fn Sets the print function globally, to specify the
function to use to display information while running
tests. The function should have the same signature
as python's __future__ print function. If not
specified, a default print function is used which
writes unbuffered to sys.stdout.
tmp_dir Sets the tmp_dir property globally, to specify the
directory where temporary files are written.
Temporary files are created whenever a text file
check fails and a 'preprocess' function has been
specified. It's useful to be able to see the contents
of the files after preprocessing has taken place,
so preprocessed versions of the files are written
to this directory, and their pathnames are included
in the failure messages. If not explicitly set by
set_defaults(), the environment variable TDDA_FAIL_DIR
is used, or, if that is not defined, it defaults to
/tmp, c:\temp or whatever tempfile.gettempdir()
returns, as appropriate.
"""
for k in kwargs:
if k == 'verbose':
cls.verbose = kwargs[k]
elif k == 'print_fn':
cls.print_fn = kwargs[k]
elif k == 'tmp_dir':
cls.tmp_dir = kwargs[k]
else:
raise Exception('set_defaults: Unrecogized option %s' % k)
@classmethod
def set_regeneration(cls, kind=None, regenerate=True):
"""
Set the regeneration flag for a particular kind of reference file,
globally, for all instances of the ReferenceTest class.
If the regenerate flag is set to True, then the framework will
regenerate reference data of that kind, rather than comparing.
All of the regenerate flags are set to False by default.
"""
cls.regenerate[kind] = regenerate
@classmethod
def set_default_data_location(self, location, kind=None):
"""
Declare the default filesystem location for reference files of a
particular kind. This sets the location globally, and will affect
all instances of the ReferenceTest class subsequently created.
The instance method set_data_location() can be used to set
the per-kind data locations for an individual instance of the class.
If calls to assertFileCorrect() (etc) are made for kinds of reference
data that hasn't had its location defined explicitly, then the
default location is used. This is the location declared for kind=None,
which *must* be specified.
If you haven't even defined the None default, and you make calls to
assertFileCorrect() (etc) using relative pathnames for the reference
data files, then it can't check correctness, so it will raise an
exception.
"""
self.default_data_locations[kind] = location
def __init__(self, assert_fn):
"""
Initializer for a ReferenceTest instance.
assert_fn Function to be used to make assertions for
unit-tests. It should take two parameters:
- a value (which should evaluate as true for
the test to pass)
- a string (to report details of how a test
failed, if the value does not evaluate as
true).
"""
self.assert_fn = assert_fn
self.reference_data_locations = dict(self.default_data_locations)
self.pandas = PandasComparison(print_fn=self.print_fn,
verbose=self.verbose)
self.files = FilesComparison(print_fn=self.print_fn,
verbose=self.verbose,
tmp_dir=self.tmp_dir)
def set_data_location(self, location, kind=None):
"""
Declare the filesystem location for reference files of a particular
kind. Typically you would subclass ReferenceTestCase and pass in these
locations though its __init__ method when constructing an instance
of ReferenceTestCase as a superclass.
If calls to assertFileCorrect() (etc) are made for kinds of reference
data that hasn't had its location defined explicitly, then the
default location is used. This is the location declared for kind=None,
which *must* be specified.
This method overrides any global defaults set from calls to the
set_default_data_location class-method.
If you haven't even defined the None default, and you make calls to
assertFileCorrect() (etc) using relative pathnames for the reference
data files, then it can't check correctness, so it will raise an
exception.
"""
self.reference_data_locations[kind] = location
def assertDatasetsEqual(self, df, ref_df,
actual_path=None, expected_path=None,
check_data=None, check_types=None,
check_order=None, condition=None, sortby=None,
precision=None):
"""
Check that an in-memory Pandas dataframe matches an in-memory
reference one.
df Actual dataframe.
ref_df Expected dataframe.
actual_path Optional parameter, giving path for file where
actual dataframe originated, used for error
messages.
expected_path Optional parameter, giving path for file where
expected dataframe originated, used for error
messages.
check_data Option to specify fields to compare values.
check_types Option to specify fields to compare typees.
check_order Option to specify fields to compare field order.
check_extra_cols Option to specify fields in the actual dataset
to use to check that there are no unexpected
extra columns.
sortby Option to specify fields to sort by before comparing.
condition Filter to be applied to datasets before comparing.
It can be None, or can be a function that takes
a dataframe as its single parameter and returns
a vector of booleans (to specify which rows should
be compared).
precision Number of decimal places to compare float values.
The check_* comparison flags can be of any of the following:
- None (to apply that kind of comparison to all fields)
- False (to skip that kind of comparison completely)
- a list of field names
- a function taking a dataframe as its single parameter, and
returning a list of field names to use.
Raises NotImplementedError if Pandas is not available.
"""
r = self.pandas.check_dataframe(df, ref_df,
actual_path=actual_path,
expected_path=expected_path,
check_data=check_data,
check_types=check_types,
check_order=check_order,
condition=condition,
sortby=sortby,
precision=precision)
(failures, msgs) = r
self.check_failures(failures, msgs)
def assertDatasetCorrect(self, df, ref_csv, actual_path=None,
kind='csv', csv_read_fn=None,
check_data=None, check_types=None,
check_order=None, condition=None, sortby=None,
precision=None, **kwargs):
"""
Check that an in-memory Pandas dataset matches a reference one from
a saved reference csv file.
df Actual dataframe.
ref_csv Name of reference csv file. The location of the
reference file is determined by the configuration
via set_data_location().
actual_path Optional parameter, giving path for file where
actual dataframe originated, used for error
messages.
kind Reference kind, used to locate the reference csv
file.
check_data Option to specify fields to compare values.
check_types Option to specify fields to compare typees.
check_order Option to specify fields to compare field order.
check_extra_cols Option to specify fields in the actual dataset
to use to check that there are no unexpected
extra columns.
sortby Option to specify fields to sort by before comparing.
condition Filter to be applied to datasets before comparing.
It can be None, or can be a function that takes
a dataframe as its single parameter and returns
a vector of booleans (to specify which rows should
be compared).
precision Number of decimal places to compare float values.
loader Function to use to read a csv file to obtain
a pandas dataframe. If None, then a default csv
loader is used.
The check_* comparison flags can be of any of the following:
- None (to apply that kind of comparison to all fields)
- False (to skip that kind of comparison completely)
- a list of field names
- a function taking a dataframe as its single parameter, and
returning a list of field names to use.
The default csv loader function is a wrapper around pandas
pd.read_csv(), with default options as follows:
index_col is None
infer_datetime_format is True
quotechar is ""
quoting is csv.QUOTE_MINIMAL
escapechar is \
na_values are the empty string, NaN, and NULL
keep_default_na is False
Raises NotImplementedError if Pandas is not available.
"""
expected_path = self.resolve_reference_path(ref_csv, kind=kind)
if self.should_regenerate(kind):
self.write_reference_file(actual_path, expected_path)
else:
ref_df = self.pandas.load_csv(expected_path, loader=csv_read_fn)
self.assertDatasetsEqual(df, ref_df,
actual_path=actual_path,
expected_path=expected_path,
check_data=check_data,
check_types=check_types,
check_order=check_order,
condition=condition,
sortby=sortby,
precision=precision)
def assertCSVFileCorrect(self, actual_path, ref_csv,
kind='csv', csv_read_fn=None,
check_data=None, check_types=None,
check_order=None, condition=None, sortby=None,
precision=None, **kwargs):
"""
Check that a csv file matches a reference one.
actual_path Actual csv file.
ref_csv Name of reference csv file. The location of the
reference file is determined by the configuration
via set_data_location().
kind Reference kind, used to locate the reference csv
file.
csv_read_fn A function to use to read a csv file to obtain
a pandas dataframe. If None, then a default csv
loader is used, which takes the same parameters
as the standard pandas pd.read_csv() function.
check_data Option to specify fields to compare values.
check_types Option to specify fields to compare typees.
check_order Option to specify fields to compare field order.
check_extra_cols Option to specify fields in the actual dataset
to use to check that there are no unexpected
extra columns.
sortby Option to specify fields to sort by before comparing.
condition Filter to be applied to datasets before comparing.
It can be None, or can be a function that takes
a dataframe as its single parameter and returns
a vector of booleans (to specify which rows should
be compared).
precision Number of decimal places to compare float values.
**kwargs Any additional named parameters are passed straight
through to the csv_read_fn function.
The check_* comparison flags can be of any of the following:
- None (to apply that kind of comparison to all fields)
- False (to skip that kind of comparison completely)
- a list of field names
- a function taking a dataframe as its single parameter, and
returning a list of field names to use.
The default csv loader function is a wrapper around pandas
pd.read_csv(), with default options as follows:
index_col is None
infer_datetime_format is True
quotechar is ""
quoting is csv.QUOTE_MINIMAL
escapechar is \
na_values are the empty string, NaN, and NULL
keep_default_na is False
Raises NotImplementedError if Pandas is not available.
"""
expected_path = self.resolve_reference_path(ref_csv, kind=kind)
if self.should_regenerate(kind):
self.write_reference_file(actual_path, expected_path)
else:
r = self.pandas.check_csv_file(actual_path, expected_path,
check_types=check_types,
check_order=check_order,
condition=condition,
sortby=sortby,
precision=precision)
(failures, msgs) = r
self.check_failures(failures, msgs)
def assertCSVFilesCorrect(self, actual_paths, ref_csvs,
kind='csv', csv_read_fn=None,
check_data=None, check_types=None,
check_order=None, condition=None, sortby=None,
precision=None, **kwargs):
"""
Check that a csv file matches a reference one.
actual_paths List of Actual csv files.
ref_csvs List of names of matching reference csv file. The
location of the reference files is determined by
the configuration via set_data_location().
kind Reference kind, used to locate the reference csv
files.
csv_read_fn A function to use to read a csv file to obtain
a pandas dataframe. If None, then a default csv
loader is used, which takes the same parameters
as the standard pandas pd.read_csv() function.
check_data Option to specify fields to compare values.
check_types Option to specify fields to compare typees.
check_order Option to specify fields to compare field order.
check_extra_cols Option to specify fields in the actual dataset
to use to check that there are no unexpected
extra columns.
sortby Option to specify fields to sort by before comparing.
condition Filter to be applied to datasets before comparing.
It can be None, or can be a function that takes
a dataframe as its single parameter and returns
a vector of booleans (to specify which rows should
be compared).
precision Number of decimal places to compare float values.
**kwargs Any additional named parameters are passed straight
through to the csv_read_fn function.
The check_* comparison flags can be of any of the following:
- None (to apply that kind of comparison to all fields)
- False (to skip that kind of comparison completely)
- a list of field names
- a function taking a dataframe as its single parameter, and
returning a list of field names to use.
The default csv loader function is a wrapper around pandas
pd.read_csv(), with default options as follows:
index_col is None
infer_datetime_format is True
quotechar is ""
quoting is csv.QUOTE_MINIMAL
escapechar is \
na_values are the empty string, NaN, and NULL
keep_default_na is False
Raises NotImplementedError if Pandas is not available.
"""
expected_paths = self.resolve_reference_paths(ref_csvs, kind=kind)
if self.should_regenerate(kind):
self.write_reference_files(actual_paths, expected_paths)
else:
r = self.pandas.check_csv_files(actual_paths, expected_paths,
check_types=check_types,
check_order=check_order,
condition=condition,
sortby=sortby,
precision=precision)
(failures, msgs) = r
self.check_failures(failures, msgs)
def assertStringCorrect(self, string, ref_csv, kind=None,
lstrip=False, rstrip=False,
ignore_substrings=None,
ignore_patterns=None, preprocess=None,
max_permutation_cases=0):
"""
Check that an in-memory string matches the contents from a reference
text file.
string is the actual string.
ref_csv is the name of the reference csv file. The
location of the reference file is determined by
the configuration via set_data_location().
kind is the reference kind, used to locate the
reference csv file.
lstrip if set to true, both strings are left stripped
before the comparison is carried out.
Note: the stripping on a per-line basis.
rstrip if set to true, both strings are right stripped
before the comparison is carried out.
Note: the stripping on a per-line basis.
ignore_substrings is an optional list of substrings; lines
containing any of these substrings will be
ignored in the comparison.
ignore_patterns is an optional list of regular expressions;
lines will be considered to be the same if
they only differ in substrings that match one
of these regular expressions. The expressions
must not contain parenthesised groups, and
should only include explicit anchors if they
need refer to the whole line.
preprocess is an optional function that takes a list of
strings and preprocesses it in some way; this
function will be applied to both the actual
and expected.
max_permutation_cases is an optional number specifying the maximum
number of permutations allowed; if the actual
and expected lists differ only in that their
lines are permutations of each other, and
the number of such permutations does not
exceed this limit, then the two are considered
to be identical.
"""
expected_path = self.resolve_reference_path(ref_csv, kind=kind)
if self.should_regenerate(kind):
self.write_reference_result(string, expected_path)
else:
ilc = ignore_substrings
ip = ignore_patterns
mpc = max_permutation_cases
r = self.files.check_string_against_file(string, expected_path,
actual_path=None,
lstrip=lstrip,
rstrip=rstrip,
ignore_substrings=ilc,
ignore_patterns=ip,
preprocess=preprocess,
max_permutation_cases=mpc)
(failures, msgs) = r
self.check_failures(failures, msgs)
def assertFileCorrect(self, actual_path, ref_path, kind=None,
lstrip=False, rstrip=False,
ignore_substrings=None,
ignore_patterns=None, preprocess=None,
max_permutation_cases=0):
"""
Check that a file matches the contents from a reference text file.
actual_path is a path for a text file.
ref_path is the name of the reference file. The
location of the reference file is determined by
the configuration via set_data_location().
kind is the reference kind, used to locate the
reference file.
lstrip if set to true, both strings are left stripped
before the comparison is carried out.
Note: the stripping on a per-line basis.
rstrip if set to true, both strings are right stripped
before the comparison is carried out.
Note: the stripping on a per-line basis.
ignore_substrings is an optional list of substrings; lines
containing any of these substrings will be
ignored in the comparison.
ignore_patterns is an optional list of regular expressions;
lines will be considered to be the same if
they only differ in substrings that match one
of these regular expressions. The expressions
must not contain parenthesised groups, and
should only include explicit anchors if they
need refer to the whole line.
preprocess is an optional function that takes a list of
strings and preprocesses it in some way; this
function will be applied to both the actual
and expected.
max_permutation_cases is an optional number specifying the maximum
number of permutations allowed; if the actual
and expected lists differ only in that their
lines are permutations of each other, and
the number of such permutations does not
exceed this limit, then the two are considered
to be identical.
This should be used for unstructured data such as logfiles, etc.
For csv files, use assertCSVFileCorrect instead.
"""
expected_path = self.resolve_reference_path(ref_path, kind=kind)
if self.should_regenerate(kind):
self.write_reference_file(actual_path, expected_path)
else:
mpc = max_permutation_cases
r = self.files.check_file(actual_path, expected_path,
lstrip=lstrip, rstrip=rstrip,
ignore_substrings=ignore_substrings,
ignore_patterns=ignore_patterns,
preprocess=preprocess,
max_permutation_cases=mpc)
(failures, msgs) = r
self.check_failures(failures, msgs)
def assertFilesCorrect(self, actual_paths, ref_paths, kind=None,
lstrip=False, rstrip=False,
ignore_substrings=None,
ignore_patterns=None, preprocess=None,
max_permutation_cases=0):
"""
Check that a collection of files matche the contents from
matching collection of reference text files.
actual_paths is a list of paths for text files.
ref_paths is a list of names of the matching reference
files. The location of the reference files
is determined by the configuration via
set_data_location().
kind is the reference kind, used to locate the
reference files.
lstrip if set to true, both strings are left stripped
before the comparison is carried out.
Note: the stripping on a per-line basis.
rstrip if set to true, both strings are right stripped
before the comparison is carried out.
Note: the stripping on a per-line basis.
ignore_substrings is an optional list of substrings; lines
containing any of these substrings will be
ignored in the comparison.
ignore_patterns is an optional list of regular expressions;
lines will be considered to be the same if
they only differ in substrings that match one
of these regular expressions. The expressions
must not contain parenthesised groups, and
should only include explicit anchors if they
need refer to the whole line.
preprocess is an optional function that takes a list of
strings and preprocesses it in some way; this
function will be applied to both the actual
and expected.
max_permutation_cases is an optional number specifying the maximum
number of permutations allowed; if the actual
and expected lists differ only in that their
lines are permutations of each other, and
the number of such permutations does not
exceed this limit, then the two are considered
to be identical.
This should be used for unstructured data such as logfiles, etc.
For csv files, use assertCSVFileCorrect instead.
"""
expected_paths = self.resolve_reference_paths(ref_paths, kind=kind)
if self.should_regenerate(kind):
self.write_reference_files(actual_paths, expected_paths)
else:
mpc = max_permutation_cases
r = self.files.check_files(actual_paths, expected_paths,
lstrip=lstrip, rstrip=rstrip,
ignore_substrings=ignore_substrings,
ignore_patterns=ignore_patterns,
preprocess=preprocess,
max_permutation_cases=mpc)
(failures, msgs) = r
self.check_failures(failures, msgs)
def resolve_reference_path(self, path, kind=None):
"""
Internal method for deciding where a reference data file should
be looked for, if it has been specified using a relative path.
"""
if self.reference_data_locations and not os.path.isabs(path):
if kind not in self.reference_data_locations:
kind = None
if kind in self.reference_data_locations:
path = os.path.join(self.reference_data_locations[kind], path)
else:
raise Exception('No reference data location for "%s"' % kind)
return path
def resolve_reference_paths(self, paths, kind=None):
"""
Internal method for resolving a list of reference data files,
all of the same kind.
"""
return [self.resolve_reference_path(p, kind=kind) for p in paths]
def should_regenerate(self, kind):
"""
Internal method to determine if a particular kind of file
should be regenerated.
"""
if kind not in self.regenerate:
kind = None
return kind in self.regenerate and self.regenerate[kind]
def write_reference_file(self, actual_path, reference_path):
"""
Internal method for regenerating reference data.
"""
with open(actual_path) as fin:
actual = fin.read()
self.write_reference_result(actual, reference_path)
def write_reference_files(self, actual_paths, reference_paths):
"""
Internal method for regenerating reference data for a list of
files.
"""
for (actual_path, expected_path) in zip(actual_paths, reference_paths):
self.write_reference_file(actual_path, reference_path)
def write_reference_result(self, result, reference_path):
"""
Internal method for regenerating reference data from in-memory
results.
"""
with open(reference_path, 'w') as fout:
fout.write(result)
if self.verbose and self.print_fn:
self.print_fn('Written %s' % reference_path)
def check_failures(self, failures, msgs):
"""
Internal method for check for failures and reporting them.
"""
self.assert_fn(failures == 0, '\n'.join(msgs))
@staticmethod
def default_print_fn(*args, **kwargs):
"""
Sometimes the framework needs to print messages. By default, it
will use this print function, but you can override it by passing
in a print_fn parameter to __init__.
"""
print(*args, **kwargs)
outfile = kwargs.get('file', sys.stdout)
outfile.flush()
# Default print function
print_fn = default_print_fn
# Magic so that an instance of this class can masquerade as a module,
# so that all of its methods can be made available as top-level functions,
# to work will with frameworks like pytest.
ReferenceTest.__all__ = dir(ReferenceTest)
| 2.546875 | 3 |
OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/GL/NV/texgen_reflection.py | JE-Chen/je_old_repo | 0 | 12792697 | <gh_stars>0
'''OpenGL extension NV.texgen_reflection
This module customises the behaviour of the
OpenGL.raw.GL.NV.texgen_reflection to provide a more
Python-friendly API
Overview (from the spec)
This extension provides two new texture coordinate generation modes
that are useful texture-based lighting and environment mapping.
The reflection map mode generates texture coordinates (s,t,r)
matching the vertex's eye-space reflection vector. The reflection
map mode is useful for environment mapping without the singularity
inherent in sphere mapping. The normal map mode generates texture
coordinates (s,t,r) matching the vertex's transformed eye-space
normal. The normal map mode is useful for sophisticated cube map
texturing-based diffuse lighting models.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/NV/texgen_reflection.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.NV.texgen_reflection import *
from OpenGL.raw.GL.NV.texgen_reflection import _EXTENSION_NAME
def glInitTexgenReflectionNV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | 1.351563 | 1 |
mlpractice/datasets/__init__.py | SYAN83/machine-learning-practice | 0 | 12792698 |
import pandas
import os
PATH_TO_DATASETS = './mlpractice/datasets/'
class DataSet(object):
def __init__(self, dir_name, extensions=['.csv'], path_to_datasets=PATH_TO_DATASETS):
data_dir = os.path.join(path_to_datasets, dir_name)
for file_name in os.listdir(data_dir):
name, ext = os.path.splitext(file_name)
if ext in extensions:
data = pandas.read_csv(filepath_or_buffer=os.path.join(data_dir, file_name))
setattr(self, name, data)
def load_iris():
return DataSet(dir_name='iris/')
def load_movieLens():
return DataSet(dir_name='ml-latest-small/') | 2.96875 | 3 |
meggie/actions/raw_resample/dialogs/resamplingDialogUi.py | Teekuningas/meggie | 4 | 12792699 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'resamplingDialogUi.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_resamplingDialog(object):
def setupUi(self, resamplingDialog):
resamplingDialog.setObjectName("resamplingDialog")
resamplingDialog.resize(406, 540)
self.gridLayout = QtWidgets.QGridLayout(resamplingDialog)
self.gridLayout.setObjectName("gridLayout")
self.scrollArea = QtWidgets.QScrollArea(resamplingDialog)
self.scrollArea.setMinimumSize(QtCore.QSize(0, 0))
self.scrollArea.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.scrollArea.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.scrollArea.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustToContents)
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName("scrollArea")
self.scrollAreaWidgetContents = QtWidgets.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 386, 489))
self.scrollAreaWidgetContents.setMinimumSize(QtCore.QSize(0, 0))
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
self.gridLayout_2 = QtWidgets.QGridLayout(self.scrollAreaWidgetContents)
self.gridLayout_2.setObjectName("gridLayout_2")
self.groupBoxResample = QtWidgets.QGroupBox(self.scrollAreaWidgetContents)
self.groupBoxResample.setObjectName("groupBoxResample")
self.formLayout = QtWidgets.QFormLayout(self.groupBoxResample)
self.formLayout.setObjectName("formLayout")
self.labelCurrentRateHeading = QtWidgets.QLabel(self.groupBoxResample)
self.labelCurrentRateHeading.setObjectName("labelCurrentRateHeading")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.labelCurrentRateHeading)
self.labelCurrentRateValue = QtWidgets.QLabel(self.groupBoxResample)
self.labelCurrentRateValue.setText("")
self.labelCurrentRateValue.setObjectName("labelCurrentRateValue")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.labelCurrentRateValue)
self.labelNewRateHeading = QtWidgets.QLabel(self.groupBoxResample)
self.labelNewRateHeading.setObjectName("labelNewRateHeading")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.labelNewRateHeading)
self.doubleSpinBoxNewRate = QtWidgets.QDoubleSpinBox(self.groupBoxResample)
self.doubleSpinBoxNewRate.setMaximum(10000.0)
self.doubleSpinBoxNewRate.setProperty("value", 100.0)
self.doubleSpinBoxNewRate.setObjectName("doubleSpinBoxNewRate")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.doubleSpinBoxNewRate)
self.gridLayout_2.addWidget(self.groupBoxResample, 0, 0, 1, 1)
self.groupBoxBatching = QtWidgets.QGroupBox(self.scrollAreaWidgetContents)
self.groupBoxBatching.setObjectName("groupBoxBatching")
self.gridLayoutBatching = QtWidgets.QGridLayout(self.groupBoxBatching)
self.gridLayoutBatching.setObjectName("gridLayoutBatching")
self.batchingWidgetPlaceholder = QtWidgets.QWidget(self.groupBoxBatching)
self.batchingWidgetPlaceholder.setMinimumSize(QtCore.QSize(300, 300))
self.batchingWidgetPlaceholder.setObjectName("batchingWidgetPlaceholder")
self.gridLayoutBatching.addWidget(self.batchingWidgetPlaceholder, 0, 0, 1, 1)
self.gridLayout_2.addWidget(self.groupBoxBatching, 1, 0, 1, 1)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_2.addItem(spacerItem, 2, 0, 1, 1)
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.gridLayout.addWidget(self.scrollArea, 0, 0, 1, 1)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem1)
self.pushButtonCancel = QtWidgets.QPushButton(resamplingDialog)
self.pushButtonCancel.setObjectName("pushButtonCancel")
self.horizontalLayout.addWidget(self.pushButtonCancel)
self.pushButtonBatch = QtWidgets.QPushButton(resamplingDialog)
self.pushButtonBatch.setObjectName("pushButtonBatch")
self.horizontalLayout.addWidget(self.pushButtonBatch)
self.pushButtonApply = QtWidgets.QPushButton(resamplingDialog)
self.pushButtonApply.setObjectName("pushButtonApply")
self.horizontalLayout.addWidget(self.pushButtonApply)
self.gridLayout.addLayout(self.horizontalLayout, 1, 0, 1, 1)
self.retranslateUi(resamplingDialog)
self.pushButtonCancel.clicked.connect(resamplingDialog.reject)
self.pushButtonApply.clicked.connect(resamplingDialog.accept)
self.pushButtonBatch.clicked.connect(resamplingDialog.acceptBatch)
QtCore.QMetaObject.connectSlotsByName(resamplingDialog)
def retranslateUi(self, resamplingDialog):
_translate = QtCore.QCoreApplication.translate
resamplingDialog.setWindowTitle(_translate("resamplingDialog", "Meggie - Resampling"))
self.groupBoxResample.setTitle(_translate("resamplingDialog", "Resampling options:"))
self.labelCurrentRateHeading.setText(_translate("resamplingDialog", "Current rate:"))
self.labelNewRateHeading.setText(_translate("resamplingDialog", "Resample to:"))
self.groupBoxBatching.setTitle(_translate("resamplingDialog", "Batching"))
self.pushButtonCancel.setText(_translate("resamplingDialog", "Cancel"))
self.pushButtonBatch.setText(_translate("resamplingDialog", "Batch"))
self.pushButtonApply.setText(_translate("resamplingDialog", "Apply"))
| 1.875 | 2 |
tests/st/ops/gpu/test_relu_op.py | GuoSuiming/mindspore | 3,200 | 12792700 | <reponame>GuoSuiming/mindspore<filename>tests/st/ops/gpu/test_relu_op.py
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import operations as P
from mindspore.ops.operations import _inner_ops as inner
class NetRelu(nn.Cell):
def __init__(self):
super(NetRelu, self).__init__()
self.relu = P.ReLU()
def construct(self, x):
return self.relu(x)
class NetReluDynamic(nn.Cell):
def __init__(self):
super(NetReluDynamic, self).__init__()
self.conv = inner.GpuConvertToDynamicShape()
self.relu = P.ReLU()
def construct(self, x):
x_conv = self.conv(x)
return self.relu(x_conv)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_relu_float32():
x = Tensor(np.array([[[[-1, 1, 10],
[1, -1, 1],
[10, 1, -1]]]]).astype(np.float32))
expect = np.array([[[[0, 1, 10,],
[1, 0, 1,],
[10, 1, 0.]]]]).astype(np.float32)
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
relu = NetRelu()
output = relu(x)
assert (output.asnumpy() == expect).all()
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
relu = NetRelu()
output = relu(x)
assert (output.asnumpy() == expect).all()
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_relu_int8():
x = Tensor(np.array([[[[-1, 1, 10],
[1, -1, 1],
[10, 1, -1]]]]).astype(np.int8))
expect = np.array([[[[0, 1, 10,],
[1, 0, 1,],
[10, 1, 0.]]]]).astype(np.int8)
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
relu = NetRelu()
output = relu(x)
assert (output.asnumpy() == expect).all()
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
relu = NetRelu()
output = relu(x)
assert (output.asnumpy() == expect).all()
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_relu_int32():
x = Tensor(np.array([[[[-1, 1, 10],
[1, -1, 1],
[10, 1, -1]]]]).astype(np.int32))
expect = np.array([[[[0, 1, 10,],
[1, 0, 1,],
[10, 1, 0.]]]]).astype(np.int32)
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
relu = NetRelu()
output = relu(x)
assert (output.asnumpy() == expect).all()
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
relu = NetRelu()
output = relu(x)
assert (output.asnumpy() == expect).all()
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_relu_int64():
x = Tensor(np.array([[[[-1, 1, 10],
[1, -1, 1],
[10, 1, -1]]]]).astype(np.int64))
expect = np.array([[[[0, 1, 10,],
[1, 0, 1,],
[10, 1, 0.]]]]).astype(np.int64)
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
relu = NetRelu()
output = relu(x)
print(output.asnumpy(), expect)
assert (output.asnumpy() == expect).all()
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
relu = NetRelu()
output = relu(x)
assert (output.asnumpy() == expect).all()
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_relu_int64_dynamic_shape():
x = Tensor(np.array([[[[-1, 1, 10],
[1, -1, 1],
[10, 1, -1]]]]).astype(np.int64))
expect = np.array([[[[0, 1, 10,],
[1, 0, 1,],
[10, 1, 0.]]]]).astype(np.int64)
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
relu_dynamic = NetReluDynamic()
output = relu_dynamic(x)
assert (output.asnumpy() == expect).all()
| 2.171875 | 2 |
tests/project/app.py | Zadigo/Zah | 0 | 12792701 | from zah.router.app import Router
# from zah.store import Store
from zah.urls import render, render_page
from zah.core.servers import BaseServer, DevelopmentServer
from zah.shortcuts import get_default_server
app = BaseServer()
# app = get_default_server()
# app.use_component(Router)
# app.use_component(Store)
# def view1(request, **kwargs):
# return render(request, 'home.html')
# @app.as_route('/test2', 'test2')
# def view2(request, **kwargs):
# return render(request, 'home.html')
# app.add_route('/test', view1, 'test1')
# app.add_route('/test3', render_page('home.html'))
| 1.960938 | 2 |
decipher/parse_decipher.py | MRCIEU/mendelvar_standalone | 3 | 12792702 | <filename>decipher/parse_decipher.py
#!/usr/bin/env python
import csv, re, argparse
ap = argparse.ArgumentParser()
ap.add_argument('--delimit',required=True,type=str,help='Delimiter used in the file')
ap.add_argument('--input',required=True,type=str,help='Input file')
args = ap.parse_args()
hpo_re = r"HP\:\d+"
if args.delimit == "csv":
delimit = ","
else:
delimit = "\t"
with open(args.input) as csvfile:
csv_reader = csv.reader(csvfile, delimiter=delimit, quotechar='"')
header = next(csv_reader)
sub_header = ["hgnc_gene_name", "omim_gene_id", "disease_name", "omim_disease_id", "hpo", "organ_specificity_list", "hgnc_id"]
print('\t'.join(sub_header))
for row in csv_reader:
if (row[4] == "possible"):
pass
else:
all_matches_hpo = list()
all_matches_hpo.extend(re.findall(hpo_re, row[7]))
#Unique HPO terms
all_matches_hpo = [a.replace("HP:", "") for a in all_matches_hpo]
all_matches_hpo = set(all_matches_hpo)
row[7] = ";".join(all_matches_hpo)
sub_row = [row[0], row[1], row[2], row[3], row[7], row[8], row[12]]
print('\t'.join(sub_row))
| 3.421875 | 3 |
util/util.py | pkufergus/pingeci | 2 | 12792703 | <reponame>pkufergus/pingeci
# -*- coding: utf-8 -*-
import os
import logging
import logging.handlers
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
log = None
def get_log(name="main"):
global log
if log:
return log
log = logging.getLogger('log')
log.setLevel(logging.DEBUG)
# logG.setLevel(logging.INFO)
fmt = "%(filename)s:%(module)s:%(funcName)s:%(lineno)d:%(levelname)s:%(asctime)s>>%(message)s"
formater = logging.Formatter(fmt)
if not os.path.exists("./log/"):
os.mkdir("./log/")
handler = logging.handlers.TimedRotatingFileHandler("./log/{}.log".format(name), "midnight", 1, 7)
handler.setFormatter(formater)
log.addHandler(handler)
return log
log = get_log() | 2.234375 | 2 |
Utilities/Scripts/SlicerWizard/__version__.py | forfullstack/slicersources-src | 0 | 12792704 | __version_info__ = (
4,
11,
0,
"dev0"
)
__version__ = ".".join(map(str, __version_info__))
| 1.3125 | 1 |
tools/visualization/alpha_bind.py | happywu/mmaction2-CycleContrast | 0 | 12792705 | import argparse
import os.path as osp
import mmcv
def parse_args():
parser = argparse.ArgumentParser(
description='Process a checkpoint to be published')
parser.add_argument('img_dir', help='img config directory')
parser.add_argument('gt_dir', help='gt config directory')
parser.add_argument('out_dir', help='output config directory')
args = parser.parse_args()
return args
# def main():
# args = parse_args()
# img_suffix = '_leftImg8bit.png'
# seg_map_suffix = '_gtFine_color.png'
# mmcv.mkdir_or_exist(args.out_dir)
# for img_file in mmcv.scandir(args.img_dir, suffix=img_suffix):
# seg_file = img_file.replace(img_suffix, seg_map_suffix)
# img = mmcv.imread(osp.join(args.img_dir, img_file))
# seg = mmcv.imread(osp.join(args.gt_dir, seg_file))
# binded = img * 0.5 + seg * 0.5
# mmcv.imwrite(binded, osp.join(args.out_dir, img_file))
def main():
args = parse_args()
img_suffix = '.jpg'
seg_map_suffix = '.png'
mmcv.mkdir_or_exist(args.out_dir)
for img_file in mmcv.scandir(
args.img_dir, suffix=img_suffix, recursive=True):
seg_file = img_file.replace(img_suffix, seg_map_suffix)
if not osp.exists(osp.join(args.gt_dir, seg_file)):
continue
img = mmcv.imread(osp.join(args.img_dir, img_file))
seg = mmcv.imread(osp.join(args.gt_dir, seg_file))
binded = img * 0.5 + seg * 0.5
mmcv.imwrite(binded, osp.join(args.out_dir, img_file))
if __name__ == '__main__':
main()
| 2.703125 | 3 |
codewars/8 kyu/is-the-string-uppercase.py | sirken/coding-practice | 0 | 12792706 | <reponame>sirken/coding-practice
from Test import Test, Test as test
'''
Is the string uppercase?
Task
Create a method is_uppercase() to see whether the string is ALL CAPS. For example:
is_uppercase("c") == False
is_uppercase("C") == True
is_uppercase("hello I AM DONALD") == False
is_uppercase("HELLO I AM DONALD") == True
is_uppercase("ACSKLDFJSgSKLDFJSKLDFJ") == False
is_uppercase("ACSKLDFJSGSKLDFJSKLDFJ") == True
In this Kata, a string is said to be in ALL CAPS whenever it does not contain any lowercase letter so any string containing no letters at all is trivially considered to be in ALL CAPS.
'''
def is_uppercase(inp):
return inp.isupper()
def gen_test_case(inp, res):
test.assert_equals(is_uppercase(inp), res, inp)
test.describe("Basic Tests")
gen_test_case("c", False)
gen_test_case("C", True)
gen_test_case("hello I AM DONALD", False)
gen_test_case("HELLO I AM DONALD", True) | 4.5 | 4 |
plumeria/util/__init__.py | sk89q/plumeria | 18 | 12792707 | <gh_stars>10-100
MIME_TYPES = {
'.png': 'image/png',
'.jpg': 'image/jpeg',
'.jpeg': 'image/jpeg',
'.gif': 'image/gif',
'.txt': 'text/plain',
}
def to_mimetype(ext):
if ext.lower() in MIME_TYPES:
return MIME_TYPES[ext.lower()]
else:
return "application/octet-stream"
| 2.390625 | 2 |
netanalysis/tls/domain_ip_validator.py | Jigsaw-Code/net-analysis | 88 | 12792708 | <filename>netanalysis/tls/domain_ip_validator.py
# Copyright 2018 Jigsaw Operations LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import asyncio
import certifi
import ipaddress
import logging
import pprint
import ssl
import sys
_SSL_CONTEXT = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH, cafile=certifi.where())
_SSL_CONTEXT.check_hostname = False
class DomainIpValidator:
async def get_cert(self, domain: str, ip: str, timeout=2.0):
ip = str(ip)
transport, _proto = await asyncio.wait_for(asyncio.get_event_loop().create_connection(
asyncio.Protocol,
host=ip,
port=443,
ssl=_SSL_CONTEXT,
server_hostname=domain), timeout)
transport.close()
return transport.get_extra_info("peercert")
async def validate_ip(self, domain: str, ip: str, timeout=2.0):
"""
Returns successfully if the IP is valid for the domain.
Raises exception if the validation fails.
"""
cert = await self.get_cert(domain, ip, timeout)
if logging.getLogger().isEnabledFor(logging.DEBUG):
logging.debug("Certificate:\n{}".format(pprint.pformat(cert)))
ssl.match_hostname(cert, domain)
def main(args):
logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)
validator = DomainIpValidator()
all_good = True
for ip_address in args.ip_address:
try:
asyncio.get_event_loop().run_until_complete(
validator.validate_ip(args.domain, str(ip_address), timeout=args.timeout))
result_str = "VALID"
except (ssl.CertificateError, ConnectionRefusedError, OSError, asyncio.TimeoutError) as e:
all_good = False
result_str = "UNKNOWN (%s)" % repr(e)
print("IP {} is {}".format(ip_address, result_str))
return 0 if all_good else 1
if __name__ == "__main__":
parser = argparse.ArgumentParser(
"Checks if the given IP addresses are valid for the domain")
parser.add_argument("domain", type=str,
help="The domain to validate the IPs for")
parser.add_argument("ip_address", type=ipaddress.ip_address,
nargs="+", help="The IP address to query")
parser.add_argument("--debug", action="store_true")
parser.add_argument("--timeout", type=float, default=2.0,
help="Timeout in seconds for getting the certificate")
sys.exit(main(parser.parse_args()))
| 2.34375 | 2 |
koku/reporting_common/migrations/0022_auto_20200505_1707.py | cgoodfred/koku | 2 | 12792709 | <filename>koku/reporting_common/migrations/0022_auto_20200505_1707.py
# Generated by Django 2.2.12 on 2020-05-05 17:07
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("reporting_common", "0021_delete_reportcolumnmap")]
operations = [
migrations.RunSQL(
"""
DROP TABLE IF EXISTS "worker_cache_table";
CREATE TABLE "worker_cache_table" (
"cache_key" varchar(255) NOT NULL PRIMARY KEY,
"value" text NOT NULL,
"expires" timestamp with time zone NOT NULL
);
DROP INDEX IF EXISTS "worker_cache_table_expires";
CREATE INDEX "worker_cache_table_expires" ON "worker_cache_table" ("expires");
"""
)
]
| 1.445313 | 1 |
cliente_chat.py | albertopeces2000/Chat | 0 | 12792710 | <gh_stars>0
import socket
import sys
cliente_chat = socket.socket()
cliente_chat.connect( ('192.168.1.45',8080) ) #Se conecta con el servidor_chat
repetir = True
while repetir:
try:
##################################################
msg = input('>>: ')
if msg == 'salir':
mensaje = str.encode(msg)
# We must write bytes, not a string
cliente_chat.send(mensaje)
sys.exit(1)
else:
mensaje = str.encode(msg)
# We must write bytes, not a string
cliente_chat.send(mensaje)
#Aquí envías un primer mensaje al servidor_chat a la espera de una respuesta.
#necesitmaos convertir el tipo string en bytes para que pueda ser enviada al servidor.
##################################################
mensaje_server = cliente_chat.recv(1024)#Aquí puedes obtener el mensaje que has escrito en el servidor.
exit = str.encode("salir")
if mensaje_server is exit:
#cliente_chat.close()
print('Usted acaba de salir del chat. Pulse el enter para salir.')
sys.exit(1)
#repetir = False
else:
print(mensaje_server)
repetir = True
except KeyboardInterrupt:
cliente_chat.close()
print('El chat se está cerrando...')
repetir = False
except ConnectionAbortedError:
cliente_chat.close()
print('El chat se está cerrando...')
repetir = False
| 3.109375 | 3 |
tests/test_mujoco_rl.py | HumanCompatibleAI/seals | 23 | 12792711 | <filename>tests/test_mujoco_rl.py
"""Test RL on MuJoCo adapted environments."""
from typing import Tuple
import gym
import pytest
import stable_baselines3
from stable_baselines3.common import evaluation
import seals # noqa: F401 Import required for env registration
def _eval_env(
env_name: str,
total_timesteps: int,
) -> Tuple[float, float]: # pragma: no cover
"""Train PPO2 for `total_timesteps` on `env_name` and evaluate returns."""
env = gym.make(env_name)
model = stable_baselines3.PPO("MlpPolicy", env)
model.learn(total_timesteps=total_timesteps)
res = evaluation.evaluate_policy(model, env)
assert isinstance(res[0], float)
return res
# SOMEDAY(adam): tests are flaky and consistently fail in some environments
# Unclear if they even should pass in some cases.
# See discussion in GH#6 and GH#40.
@pytest.mark.expensive
@pytest.mark.parametrize(
"env_base",
["HalfCheetah", "Ant", "Hopper", "Humanoid", "Swimmer", "Walker2d"],
)
def test_fixed_env_model_as_good_as_gym_env_model(env_base: str): # pragma: no cover
"""Compare original and modified MuJoCo v3 envs."""
train_timesteps = 200000
gym_reward, _ = _eval_env(f"{env_base}-v3", total_timesteps=train_timesteps)
fixed_reward, _ = _eval_env(
f"seals/{env_base}-v0",
total_timesteps=train_timesteps,
)
epsilon = 0.1
sign = 1 if gym_reward > 0 else -1
assert (1 - sign * epsilon) * gym_reward <= fixed_reward
| 2.375 | 2 |
mundo 3/083.py | thiagofreitascarneiro/Curso-de-Python---Curso-em-Video | 1 | 12792712 | <reponame>thiagofreitascarneiro/Curso-de-Python---Curso-em-Video<gh_stars>1-10
valor = str(input('Digite uma expressão: '))
quant_Abrir = valor.count('(')
quant_Fechar = valor.count(')')
print(quant_Fechar)
print(quant_Abrir)
if quant_Fechar % 2 == 0 and quant_Abrir % 2 == 0:
print('Essa expressão está correta!')
else:
print('essa expressão esta Equivocada!')
| 3.71875 | 4 |
experiment/ConvNet.py | callous-youth/IAPTT-GM | 2 | 12792713 |
import torch.nn as nn
import logging
logger = logging.getLogger(__name__)
def conv3x3(in_channels, out_channels,activation=nn.ReLU(inplace=True)):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, padding=1), nn.MaxPool2d(2),
nn.BatchNorm2d(out_channels, momentum=1., affine=True,
track_running_stats=False # When this is true is called the "transductive setting"
), activation
)
class FullyConnectedLayer(nn.Module):
def __init__(self, num_layer=2):
super(FullyConnectedLayer, self).__init__()
'''
self.classifier = nn.Sequential(
nn.Flatten(),
nn.Linear(in_shape, out_features))
self.hidden_size = self.hidden_size
'''
self.fc_net = nn.Sequential(nn.Linear(1,64),nn.ReLU(inplace=True),nn.LayerNorm(normalized_shape=64))
for j in range(num_layer-1):
self.fc_net = nn.Sequential(self.fc_net, nn.Linear(64,64),nn.ReLU(inplace=True),nn.LayerNorm(normalized_shape=64)
)
def forward(self, inputs, params=None):
#features = inputs.view((inputs.size(0), -1))
logits = self.fc_net(inputs)
return logits
def trainable_parameters(self):
"""
Returns an iterator over the trainable parameters of the model.
"""
for param in self.parameters():
if param.requires_grad:
yield param
class TaskFullyConnectedLayer(nn.Module):
def __init__(self,num_layer=1, task_conv=0):
super(TaskFullyConnectedLayer, self).__init__()
'''
self.classifier = nn.Sequential(
nn.Flatten(),
nn.Linear(in_shape, out_features))
self.hidden_size = self.hidden_size
'''
if num_layer>1:
self.classifier = nn.Linear(64,1)
self.classifier = nn.Sequential(nn.Linear(64,1))
def forward(self, inputs, params=None):
#features = inputs.view((inputs.size(0), -1))
logits = self.classifier(inputs)
return logits
def trainable_parameters(self):
"""
Returns an iterator over the trainable parameters of the model.
"""
for param in self.parameters():
if param.requires_grad:
yield param
class TaskLinearLayer(nn.Module):
def __init__(self, in_shape, out_features,hidden_size=32,task_conv=0,dfc=True):
super(TaskLinearLayer, self).__init__()
self.in_shape = in_shape
self.out_features = out_features
if task_conv ==0 and not dfc:
self.classifier = nn.Sequential(
nn.Flatten(),
nn.Linear(in_shape, out_features))
elif dfc:
self.classifier = nn.Sequential(conv3x3(hidden_size, hidden_size,activation=nn.Softplus()), nn.Flatten(), nn.Linear(128, out_features))
else:
self.classifier = conv3x3(hidden_size, hidden_size)
for j in range(task_conv-1):
self.classifier = nn.Sequential(self.classifier, conv3x3(hidden_size, hidden_size,activation=nn.Softplus()))
self.classifier = nn.Sequential(self.classifier,
nn.Flatten(), nn.Linear(in_shape, out_features))
def forward(self, inputs, params=None):
#features = inputs.view((inputs.size(0), -1))
logits = self.classifier(inputs)
return logits
def trainable_parameters(self):
"""
Returns an iterator over the trainable parameters of the model.
"""
for param in self.parameters():
if param.requires_grad:
yield param
class ConvolutionalNeuralNetwork(nn.Module):
def __init__(self, in_channels, out_features, hidden_size=32,device=None,task_conv=0):
super(ConvolutionalNeuralNetwork, self).__init__()
self.in_channels = in_channels
self.out_features = out_features
self.hidden_size = hidden_size
assert task_conv >= 0, "Wrong call for task nets!"
self.features = conv3x3(in_channels, hidden_size)
for i in range(3-task_conv):
self.features = nn.Sequential(self.features, conv3x3(hidden_size, hidden_size))
def forward(self, inputs, params=None):
features = self.features(inputs)
return features
def trainable_parameters(self):
"""
Returns an iterator over the trainable parameters of the model.
"""
for param in self.parameters():
if param.requires_grad:
yield param
| 2.703125 | 3 |
audioengine/model/finetuning/wav2vec2/helper/wav2vec2_trainer.py | NiklasHoltmeyer/stt-audioengine | 0 | 12792714 | # Source: https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/Fine_Tune_XLSR_Wav2Vec2_on_Turkish_ASR_with_%F0%9F%A4%97_Transformers.ipynb#scrollTo=lbQf5GuZyQ4_
import collections
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
import transformers
from audioengine.metrics.wer import Jiwer
from datasets import load_metric
from torch import nn
from torch.cuda.amp import autocast
from tqdm import tqdm
from transformers import (
Trainer,
Wav2Vec2Processor,
)
from transformers.trainer_pt_utils import LengthGroupedSampler, DistributedLengthGroupedSampler
@dataclass
class DataCollatorCTCWithPadding:
"""
Data collator that will dynamically pad the inputs received.
Args:
processor (:class:`~transformers.Wav2Vec2Processor`)
The processor used for proccessing the data.
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
maximum acceptable input length for the model if that argument is not provided.
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
different lengths).
max_length (:obj:`int`, `optional`):
Maximum length of the ``input_values`` of the returned list and optionally padding length (see above).
max_length_labels (:obj:`int`, `optional`):
Maximum length of the ``labels`` returned list and optionally padding length (see above).
pad_to_multiple_of (:obj:`int`, `optional`):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
"""
processor: Wav2Vec2Processor
padding: Union[bool, str] = True
max_length: Optional[int] = None
max_length_labels: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
pad_to_multiple_of_labels: Optional[int] = None
def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
input_features = [{"input_values": feature["input_values"]} for feature in features]
label_features = [{"input_ids": feature["labels"]} for feature in features]
batch = self.processor.pad(
input_features,
padding=self.padding,
max_length=self.max_length,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors="pt",
)
with self.processor.as_target_processor():
labels_batch = self.processor.pad(
label_features,
padding=self.padding,
max_length=self.max_length_labels,
pad_to_multiple_of=self.pad_to_multiple_of_labels,
return_tensors="pt",
)
# replace padding with -100 to ignore loss correctly
labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100)
batch["labels"] = labels
return batch
class CTCTrainer(Trainer):
def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor:
"""
Perform a training step on a batch of inputs.
Subclass and override to inject custom behavior.
Args:
model (:obj:`nn.Module`):
The model to train.
inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument :obj:`labels`. Check your model's documentation for all accepted arguments.
Return:
:obj:`torch.Tensor`: The tensor with training loss on this batch.
"""
model.train()
inputs = self._prepare_inputs(inputs)
if self.use_amp:
with autocast():
loss = self.compute_loss(model, inputs)
else:
loss = self.compute_loss(model, inputs)
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
loss = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
loss = loss.sum() / (inputs["labels"] >= 0).sum()
else:
raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']")
if self.args.gradient_accumulation_steps > 1:
loss = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(loss).backward()
# elif self.use_apex:
# with amp.scale_loss(loss, self.optimizer) as scaled_loss:
# scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(loss)
else:
loss.backward()
return loss.detach()
# add less aggressive smoothing to progress bar for better estimate
class CustomProgressBarCallback(transformers.trainer_callback.ProgressCallback):
def on_train_begin(self, args, state, control, **kwargs):
if state.is_local_process_zero:
self.training_bar = tqdm(total=state.max_steps, smoothing=0.1)
self.current_step = 0
# solution from https://discuss.huggingface.co/t/spanish-asr-fine-tuning-wav2vec2/4586/6
class GroupedLengthsTrainer(CTCTrainer):
# length_field_name should possibly be part of TrainingArguments instead
def __init__(self, train_seq_lengths: List[int], *args, **kwargs):
super().__init__(*args, **kwargs)
self.train_seq_lengths = train_seq_lengths
def _get_train_sampler(self) -> Optional[torch.utils.data.sampler.Sampler]:
if isinstance(self.train_dataset, torch.utils.data.IterableDataset) or not isinstance(
self.train_dataset, collections.abc.Sized
):
return None
# Build the sampler.
if self.args.group_by_length:
# lengths = self.train_dataset[self.length_field_name] if self.length_field_name is not None else None
model_input_name = self.tokenizer.model_input_names[0] if self.tokenizer is not None else None
if self.args.world_size <= 1:
return LengthGroupedSampler(
self.train_dataset, self.args.train_batch_size, lengths=self.train_seq_lengths,
model_input_name=model_input_name
)
else:
return DistributedLengthGroupedSampler(
self.train_dataset,
self.args.train_batch_size,
num_replicas=self.args.world_size,
rank=self.args.process_index,
lengths=self.train_seq_lengths,
model_input_name=model_input_name,
)
else:
return super()._get_train_sampler()
wer_metric = load_metric("wer")
def compute_metrics(processor):
def __call__(pred):
pred_logits = pred.predictions
pred_ids = np.argmax(pred_logits, axis=-1)
pred.label_ids[pred.label_ids == -100] = processor.tokenizer.pad_token_id
pred_str = processor.batch_decode(pred_ids)
# we do not want to group tokens when computing the metrics
label_str = processor.batch_decode(pred.label_ids, group_tokens=False)
wer = wer_metric.compute(predictions=pred_str, references=label_str)
return {"wer": wer}
return __call__
| 2.734375 | 3 |
apps/app/urls.py | MiloshBogdanovic/Auri-Soft | 0 | 12792715 | <reponame>MiloshBogdanovic/Auri-Soft
# -*- encoding: utf-8 -*-
"""
Copyright (c) 2019 - present AppSeed.us
"""
from django.urls import path, re_path
from apps.app import views
urlpatterns = [
# The home page
path('', views.index, name='home'),
# Matches any html file
path('bonus-faccata/', views.bonus, name='bonus'),
path('bonus-faccata/<int:fff>', views.data_iniziali_view, name='data-iniziali'),
path('bonus-faccata/legal<int:form>/<int:fff>', views.legal, name='legal'),
path('bonus-faccata/individual<int:form>/<int:fff>', views.individual, name='individual'),
path('bonus-faccata/catastal<int:form>', views.catastal, name='catastal'),
path('search', views.search, name='search_results'),
# Api views for editing tables
path('edit-table-data', views.save_table_data, name='edit-table-data'),
path('condominium', views.condo_list, name="condominium"),
path('catastal', views.catastal_list, name="catastal"),
path('admin-legal', views.admin_legal_list, name="admin-legal"),
path('admin-individual', views.admin_individual_list, name="admin-individual"),
path('edit-form/<str:table>/<int:id>', views.edit_form, name="edit-form"),
path('generate-contract/<int:id>', views.generate_contract, name="generate-contract"),
re_path(r'^.*\.*', views.pages, name='pages')
]
| 1.90625 | 2 |
Curso_Guanabara/aula57.py | lucianojunnior17/Python | 1 | 12792716 | <filename>Curso_Guanabara/aula57.py<gh_stars>1-10
sexo = str(input('Informe seu sexo : ')).strip().upper()[0]
while sexo not in 'MmFf':
sexo = str(input('Dados invalidos Por favor digite novamente : ')).strip().upper()[0]
print('Sexo {} registrado com sucesso'.format(sexo))
| 3.65625 | 4 |
Source/Chapter5/Linear.py | irmoralesb/MLForDevsBook | 0 | 12792717 | from Chapter5.TransferFunction import TransferFunction
import numpy as np
class Linear(TransferFunction):
def getTransferFunction(x):
return x
def getTransferFunctionDerivative(x):
return np.ones(len(x))
| 3.0625 | 3 |
pillow_heif/__init__.py | bigcat88/pillow_heif | 20 | 12792718 | <gh_stars>10-100
from .constants import * # pylint: disable=unused-wildcard-import
from .reader import HeifFile, UndecodedHeifFile, check, read, open # pylint: disable=redefined-builtin,unused-import
from .writer import write # pylint: disable=unused-import
from .error import HeifError # pylint: disable=unused-import
from .as_opener import register_heif_opener, check_heif_magic # pylint: disable=unused-import
from . import _libheif # pylint: disable=import-self
__version__ = "0.1.4"
def libheif_version():
return _libheif.ffi.string(_libheif.lib.heif_get_version()).decode()
| 1.757813 | 2 |
notebooks/GTO/Transits/GJ436_GRISMR/PyNRC_2D_Spec_Gen.py | kammerje/pynrc | 1 | 12792719 | import numpy, sys, math, batman
import matplotlib.pyplot as plt
from scipy import interpolate
file = numpy.load('GJ436b_Trans_SED.npz')
SEDarray = file['SEDarray']
print(SEDarray.shape)
plt.imshow(SEDarray)
plt.show()
stellarwave, stellarspec = numpy.loadtxt('ODFNEW_GJ436.spec', unpack=True, skiprows=800)
stellarwave /= 10000. # to um
relevant = numpy.where((stellarwave>1.5) & (stellarwave<5.5))
stellarwave = stellarwave[relevant]
stellarspec = stellarspec[relevant]
StellarInterp = interpolate.interp1d(stellarwave, stellarspec, kind='cubic')
planetwave, planetspec = numpy.loadtxt('../Transmission_Spec/GJ436b_trans_PyNRC_GRISMR.txt', unpack=True)
PlanetInterp = interpolate.interp1d(planetwave, planetspec, kind='cubic')
time = numpy.linspace(0.0,0.1,5000)
f = open('../BATMAN_Generation/Used/BatmanParams_PyNRC_GRISMR.txt', 'r')
params = batman.TransitParams
params.t0 = float(f.readline().split('=')[1]) # hardcoded readlines b/c the file I'm using has a fixed format
params.per = float(f.readline().split('=')[1])
params.inc = float(f.readline().split('=')[1])
params.rp = float(f.readline().split('=')[1])
params.a = float(f.readline().split('=')[1])
params.w = float(f.readline().split('=')[1])
params.ecc = float(f.readline().split('=')[1])
params.fp = float(f.readline().split('=')[1])
params.t_secondary = float(f.readline().split('=')[1])
limbdark = f.readline().split('=')[1] # ugh
u1 = float(limbdark.split(',')[0][2:])
u2 = float(limbdark.split(',')[1][1:-2])
params.u = [u1, u2]
params.limb_dark = "quadratic"
transitmodel = batman.TransitModel(params, time) # creates a transit model object using the time array; we can change the depth now by changing what's in params
SEDarray = numpy.zeros(time.shape[0]) # initialize so that we can vstack onto this
wave = numpy.linspace(1.75,5.25,3500)
for waveval in wave:
params.rp = math.sqrt(PlanetInterp(waveval)) # sqrt b/c trans. spec is in depth, but batman wants rp/rs
fluxtransit = transitmodel.light_curve(params)
actualflux = fluxtransit * StellarInterp(waveval)
SEDarray = numpy.vstack((SEDarray, actualflux))
SEDarray = numpy.delete(SEDarray, 0, 0) # trim that initial row with all zeroes
numpy.savez('GJ436b_Trans_SED', SEDarray=SEDarray, time=time, wave=wave)
plt.imshow(SEDarray)
plt.show() | 1.9375 | 2 |
wagtailmenus/migrations/0010_auto_20160201_1558.py | pierremanceaux/wagtailmenus | 329 | 12792720 | <reponame>pierremanceaux/wagtailmenus
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wagtailmenus', '0009_auto_20160201_0859'),
]
operations = [
migrations.RenameField(
model_name='mainmenuitem',
old_name='add_subnav',
new_name='allow_subnav',
),
]
| 1.570313 | 2 |
envrun/utils.py | JanLikar/envrun | 1 | 12792721 | <reponame>JanLikar/envrun
import sys
def eprint(*args, **kwargs):
"""Print to stderr."""
print(*args, file=sys.stderr, **kwargs)
def bail(error: str):
"""Print message to stderr and exit with an error code."""
eprint(error)
sys.exit(1)
| 2.265625 | 2 |
test/integration/data_consistency_test.py | dineshsonachalam/dynofunc | 6 | 12792722 | <gh_stars>1-10
import json
import pytest
from functools import partial
from boto3 import client
from test.integration.fixtures import db
from dynofunc import (
create,
find,
add,
update,
delete,
query
)
def test_numbers_are_not_changed(db):
"""Asserts that numbers inserted into dynamo are not converted
to another type - **namely Decimals**. See https://github.com/boto/boto3/issues/369
This test is to assert that dynofunc correctly handles this by converting
any Decimals that dynamo returns back to a default json parsable property.
"""
db(create(
table_name='data_const',
hash_key='id'))
db(add(table_name='data_const', item={
'id': 'aaaaaa',
'prefrences': {
'volume': 9
}
}))
user = db(find(table_name='data_const', key={
'id': 'aaaaaa'
}))
volume = user.item().get('prefrences').get('volume')
assert volume == 9
assert isinstance(volume, int)
| 2.53125 | 3 |
src/zapv2/users.py | tnir/zap-api-python | 146 | 12792723 | # Zed Attack Proxy (ZAP) and its related class files.
#
# ZAP is an HTTP/HTTPS proxy for assessing web application security.
#
# Copyright 2017 the ZAP development team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file was automatically generated.
"""
import six
class users(object):
def __init__(self, zap):
self.zap = zap
def users_list(self, contextid=None):
"""
Gets a list of users that belong to the context with the given ID, or all users if none provided.
"""
params = {}
if contextid is not None:
params['contextId'] = contextid
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/usersList/', params)))
def get_user_by_id(self, contextid, userid):
"""
Gets the data of the user with the given ID that belongs to the context with the given ID.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getUserById/', {'contextId': contextid, 'userId': userid})))
def get_authentication_credentials_config_params(self, contextid):
"""
Gets the configuration parameters for the credentials of the context with the given ID.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationCredentialsConfigParams/', {'contextId': contextid})))
def get_authentication_credentials(self, contextid, userid):
"""
Gets the authentication credentials of the user with given ID that belongs to the context with the given ID.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationCredentials/', {'contextId': contextid, 'userId': userid})))
def get_authentication_state(self, contextid, userid):
"""
Gets the authentication state information for the user identified by the Context and User Ids.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationState/', {'contextId': contextid, 'userId': userid})))
def get_authentication_session(self, contextid, userid):
"""
Gets the authentication session information for the user identified by the Context and User Ids, e.g. cookies and realm credentials.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/view/getAuthenticationSession/', {'contextId': contextid, 'userId': userid})))
def new_user(self, contextid, name, apikey=''):
"""
Creates a new user with the given name for the context with the given ID.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/newUser/', {'contextId': contextid, 'name': name, 'apikey': apikey})))
def remove_user(self, contextid, userid, apikey=''):
"""
Removes the user with the given ID that belongs to the context with the given ID.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/removeUser/', {'contextId': contextid, 'userId': userid, 'apikey': apikey})))
def set_user_enabled(self, contextid, userid, enabled, apikey=''):
"""
Sets whether or not the user, with the given ID that belongs to the context with the given ID, should be enabled.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setUserEnabled/', {'contextId': contextid, 'userId': userid, 'enabled': enabled, 'apikey': apikey})))
def set_user_name(self, contextid, userid, name, apikey=''):
"""
Renames the user with the given ID that belongs to the context with the given ID.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setUserName/', {'contextId': contextid, 'userId': userid, 'name': name, 'apikey': apikey})))
def set_authentication_credentials(self, contextid, userid, authcredentialsconfigparams=None, apikey=''):
"""
Sets the authentication credentials for the user with the given ID that belongs to the context with the given ID.
"""
params = {'contextId': contextid, 'userId': userid, 'apikey': apikey}
if authcredentialsconfigparams is not None:
params['authCredentialsConfigParams'] = authcredentialsconfigparams
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setAuthenticationCredentials/', params)))
def authenticate_as_user(self, contextid, userid, apikey=''):
"""
Tries to authenticate as the identified user, returning the authentication request and whether it appears to have succeeded.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/authenticateAsUser/', {'contextId': contextid, 'userId': userid, 'apikey': apikey})))
def poll_as_user(self, contextid, userid, apikey=''):
"""
Tries to poll as the identified user, returning the authentication request and whether it appears to have succeeded. This will only work if the polling verification strategy has been configured.
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/pollAsUser/', {'contextId': contextid, 'userId': userid, 'apikey': apikey})))
def set_authentication_state(self, contextid, userid, lastpollresult=None, lastpolltimeinms=None, requestssincelastpoll=None, apikey=''):
"""
Sets fields in the authentication state for the user identified by the Context and User Ids.
"""
params = {'contextId': contextid, 'userId': userid, 'apikey': apikey}
if lastpollresult is not None:
params['lastPollResult'] = lastpollresult
if lastpolltimeinms is not None:
params['lastPollTimeInMs'] = lastpolltimeinms
if requestssincelastpoll is not None:
params['requestsSinceLastPoll'] = requestssincelastpoll
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setAuthenticationState/', params)))
def set_cookie(self, contextid, userid, domain, name, value, path=None, secure=None, apikey=''):
"""
Sets the specified cookie for the user identified by the Context and User Ids.
"""
params = {'contextId': contextid, 'userId': userid, 'domain': domain, 'name': name, 'value': value, 'apikey': apikey}
if path is not None:
params['path'] = path
if secure is not None:
params['secure'] = secure
return six.next(six.itervalues(self.zap._request(self.zap.base + 'users/action/setCookie/', params)))
| 2.234375 | 2 |
webgrid/tests/test_testing.py | sourcery-ai-bot/webgrid | 9 | 12792724 | <reponame>sourcery-ai-bot/webgrid<filename>webgrid/tests/test_testing.py
import datetime as dt
from io import BytesIO
from unittest import mock
import pytest
import xlsxwriter
import xlwt
from webgrid import testing
from webgrid_ta.grids import RadioGrid, TemporalGrid
from webgrid_ta.model.entities import Person, db
def setup_module():
import flask
assert not flask.request
class TestAssertListEqual:
"""Verify the `assert_list_equal` method performs as expected"""
def test_simple_equivalents(self):
testing.assert_list_equal([], [])
testing.assert_list_equal([1, 2, 3], [1, 2, 3])
testing.assert_list_equal((1, 2, 3), [1, 2, 3])
testing.assert_list_equal('123', '123')
def test_different_lengths(self):
with pytest.raises(AssertionError):
testing.assert_list_equal([], [1])
with pytest.raises(AssertionError):
testing.assert_list_equal([1], [])
def test_different_elements(self):
with pytest.raises(AssertionError):
testing.assert_list_equal([1, 2, 3], [1, 2, 4])
def test_order_is_significant(self):
with pytest.raises(AssertionError):
testing.assert_list_equal([1, 2, 3], [2, 3, 1])
def test_generators(self):
testing.assert_list_equal((x for x in range(3)), (x for x in range(3)))
testing.assert_list_equal((x for x in range(3)), [0, 1, 2])
testing.assert_list_equal([0, 1, 2], (x for x in range(3)))
class TestAssertRenderedXlsMatches:
def setup(self):
self.workbook = xlwt.Workbook()
self.sheet = self.workbook.add_sheet('sheet1')
self.stream = BytesIO()
self.headers_written = False
def set_headers(self, headers):
for index, header in enumerate(headers):
self.sheet.write(0, index, header)
self.headers_written = True
def set_values(self, values):
row_offset = 0
if self.headers_written:
row_offset = 1
for row_index, row in enumerate(values, start=row_offset):
for col_index, value in enumerate(row):
self.sheet.write(row_index, col_index, value)
def assert_matches(self, xls_headers, xls_rows):
self.workbook.save(self.stream)
testing.assert_rendered_xls_matches(self.stream.getvalue(), xls_headers, xls_rows)
def test_empty_xls(self):
with pytest.raises(AssertionError):
testing.assert_rendered_xls_matches(b'', None, None)
with pytest.raises(AssertionError):
testing.assert_rendered_xls_matches(None, None, None)
with pytest.raises(AssertionError):
testing.assert_rendered_xls_matches(None, [], [])
def test_blank_workbook(self):
self.assert_matches([], [])
def test_single_header(self):
self.set_headers(['Foo'])
self.assert_matches(['Foo'], [])
def test_multiple_headers(self):
self.set_headers(['Foo', 'Bar'])
self.assert_matches(['Foo', 'Bar'], [])
def test_single_row(self):
self.set_values([[1, 2, 3]])
self.assert_matches([], [[1, 2, 3]])
def test_multiple_rows(self):
self.set_values([
[1, 2, 3],
[2, 3, 4]
])
self.assert_matches([], [
[1, 2, 3],
[2, 3, 4]
])
def test_headers_and_rows(self):
self.set_headers(['Foo', 'Bar'])
self.set_values([
[1, 2],
[2, 3],
[3, 4]
])
self.assert_matches(
['Foo', 'Bar'],
[
[1, 2],
[2, 3],
[3, 4]
]
)
def test_value_types(self):
self.set_values([
[1, 1.23, 'hello', None, True, False]
])
self.assert_matches([], [
[1, 1.23, 'hello', '', True, False]
])
def test_none_is_mangled(self):
self.set_values([
[None, 1, 1.23, 'hello', None]
])
# the left `None` becomes an empty string
# the right `None` gets dropped
self.assert_matches([], [
['', 1, 1.23, 'hello']
])
class TestAssertRenderedXlsxMatches:
def setup(self):
self.stream = BytesIO()
self.workbook = xlsxwriter.Workbook(self.stream, options={'in_memory': True})
self.sheet = self.workbook.add_worksheet('sheet1')
self.headers_written = None
def test_openpyxl_requirement(self):
with mock.patch('webgrid.testing.openpyxl', None):
with pytest.raises(Exception, match=r'openpyxl is required.*'):
self.assert_matches([], [])
def set_headers(self, headers):
assert self.headers_written is None
self.set_values(headers)
self.headers_written = len(headers)
def set_values(self, values):
row_offset = 0
if self.headers_written:
row_offset = self.headers_written
for row_index, row in enumerate(values, start=row_offset):
for col_index, value in enumerate(row):
self.sheet.write(row_index, col_index, value)
def assert_matches(self, xlsx_headers, xlsx_rows):
self.workbook.close()
testing.assert_rendered_xlsx_matches(self.workbook, xlsx_headers, xlsx_rows)
def test_empty_xlsx(self):
with pytest.raises(AssertionError):
testing.assert_rendered_xlsx_matches(b'', None, None)
with pytest.raises(AssertionError):
testing.assert_rendered_xlsx_matches(None, None, None)
with pytest.raises(AssertionError):
testing.assert_rendered_xlsx_matches(None, [], [])
def test_blank_workbook(self):
self.assert_matches([], [])
def test_single_header(self):
self.set_headers([['Foo']])
self.assert_matches([['Foo']], [])
def test_multiple_headers(self):
self.set_headers([['Foo', 'Bar']])
self.assert_matches([['Foo', 'Bar']], [])
def test_single_row(self):
self.set_values([[1, 2, 3]])
self.assert_matches([], [[1, 2, 3]])
def test_multiple_rows(self):
self.set_values([
[1, 2, 3],
[2, 3, 4]
])
self.assert_matches([], [
[1, 2, 3],
[2, 3, 4]
])
def test_headers_and_rows(self):
self.set_headers([
['Foo', 'Bar'],
['Snoopy', 'Dog'],
])
self.set_values([
[1, 2],
[2, 3],
[3, 4]
])
self.assert_matches(
[
['Foo', 'Bar'],
['Snoopy', 'Dog'],
],
[
[1, 2],
[2, 3],
[3, 4]
]
)
def test_value_types(self):
self.set_values([
[1, 1.23, 'hello', None, True, False]
])
self.assert_matches([], [
[1, 1.23, 'hello', None, True, False]
])
def test_none_is_mangled(self):
self.set_values([
[None, 1, 1.23, 'hello', None]
])
# the right `None` gets dropped
self.assert_matches([], [
[None, 1, 1.23, 'hello']
])
class TestGridBase(testing.GridBase):
grid_cls = TemporalGrid
sort_tests = (
('createdts', 'persons.createdts'),
('due_date', 'persons.due_date'),
('start_time', 'persons.start_time'),
)
@classmethod
def setup_class(cls):
if db.engine.dialect.name != 'sqlite':
pytest.skip('sqlite-only test')
@property
def filters(self):
return (
('createdts', 'eq', dt.datetime(2018, 1, 1, 5, 30),
"WHERE persons.createdts BETWEEN '2018-01-01 05:30:00.000000'"),
('due_date', 'eq', dt.date(2018, 1, 1), "WHERE persons.due_date = '2018-01-01'"),
('start_time', 'eq', dt.time(1, 30).strftime('%H:%M'),
"WHERE persons.start_time BETWEEN CAST('01:30:00.000000' AS TIME)"),
)
def setup_method(self, _):
Person.delete_cascaded()
Person.testing_create(
createdts=dt.datetime(2018, 1, 1, 5, 30),
due_date=dt.date(2019, 5, 31),
start_time=dt.time(1, 30),
)
def test_expected_rows(self):
self.expect_table_header((('Created', 'Due Date', 'Start Time'), ))
self.expect_table_contents((('01/01/2018 05:30 AM', '05/31/2019', '01:30 AM'), ))
class TestGridBasePG(testing.GridBase):
grid_cls = TemporalGrid
sort_tests = (
('createdts', 'persons.createdts'),
('due_date', 'persons.due_date'),
('start_time', 'persons.start_time'),
)
@classmethod
def setup_class(cls):
if db.engine.dialect.name != 'postgresql':
pytest.skip('postgres-only test')
@property
def filters(self):
return (
('createdts', 'eq', dt.datetime(2018, 1, 1, 5, 30),
"WHERE persons.createdts BETWEEN '2018-01-01 05:30:00.000000'"),
('due_date', 'eq', dt.date(2018, 1, 1), "WHERE persons.due_date = '2018-01-01'"),
('start_time', 'eq', dt.time(1, 30).strftime('%H:%M'),
"WHERE persons.start_time BETWEEN CAST('01:30:00.000000' AS TIME WITHOUT TIME ZONE)"),
)
class TestGridBaseMSSQLDates(testing.MSSQLGridBase):
grid_cls = TemporalGrid
sort_tests = (
('createdts', 'persons.createdts'),
('due_date', 'persons.due_date'),
('start_time', 'persons.start_time'),
)
@classmethod
def setup_class(cls):
if db.engine.dialect.name != 'mssql':
pytest.skip('sql server-only test')
@property
def filters(self):
return (
('createdts', 'eq', dt.datetime(2018, 1, 1, 5, 30),
"WHERE persons.createdts BETWEEN '2018-01-01 05:30:00.000000'"),
('due_date', 'eq', '2018-01-01', "WHERE persons.due_date = '2018-01-01'"),
('start_time', 'eq', dt.time(1, 30).strftime('%H:%M'),
"WHERE persons.start_time BETWEEN CAST('01:30:00.000000' AS TIME)"),
)
class TestGridBaseMSSQLStrings(testing.MSSQLGridBase):
grid_cls = RadioGrid
@property
def filters(self):
return (
('make', 'eq', 'foo', "WHERE sabwp_radios.make = 'foo'"),
('model', 'eq', 'foo', "WHERE sabwp_radios.model = 'foo'"),
('year', 'eq', '1945', "WHERE sabwp_radios.year = 1945"),
)
@classmethod
def setup_class(cls):
if db.engine.dialect.name != 'mssql':
pytest.skip('sql server-only test')
| 2.375 | 2 |
setup.py | jianlins/quicksect | 0 | 12792725 | from Cython.Build import cythonize
from setuptools.extension import Extension
from setuptools import setup, find_packages
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
include_dirs = [dir_path + "/src", dir_path]
macros = [("CYTHON_TRACE", "1")]
extensions = [Extension("quicksect", ["src/quicksect.pyx"],
define_macros=macros,
include_dirs=include_dirs)]
setup(version='0.2.2',
name='quicksect',
description="fast, simple interval intersection",
ext_modules = cythonize(extensions, language_level=3),
long_description=open('README.rst').read(),
author="<NAME>,<NAME>",
author_email="<EMAIL>, <EMAIL>",
packages=find_packages(),
setup_requires=['cython'],
install_requires=['cython'],
test_suite='nose.collector',
license = 'The MIT License',
tests_require='nose',
package_data={'': ['*.pyx', '*.pxd']},
include_dirs=["."],
)
| 1.578125 | 2 |
server/python_server/notes_app_server/server.py | jarmoj/notesapp-react-redux-boilerplate | 1 | 12792726 | """A simple server with a REST API for the Notes App frontend."""
import tornado.escape
import tornado.ioloop
import tornado.web
import tornado.escape
from tornado_cors import CorsMixin
import logging
import json
import os
import signal
import sys
PORT = 3456
DB_PATH = "db.json"
TEST_DB_PATH = "test/test_db.json"
db = {
'version': {
'version': '0.0.1',
'api_version': '0.1',
'is_test_db': True
},
'notes': [
{
'title': 'some note title',
'text': 'some note text'
},
{
'title': 'other note title',
'text': 'other note text'
}
]
}
def tokenize(s):
"""Split string into tokens."""
return [p.lower() for p in s.split(" ") if p]
class NoteAlreadyExists(Exception):
"""Raised if trying to add a new note with title that is already taken."""
def __init__(self, title):
"""Show exception with the note title."""
super(NoteAlreadyExists, self).__init__(title)
class NoSuchNoteExists(Exception):
"""Raised if trying to delete a note that doesn't exist."""
def __init__(self, title):
"""Show exception with the note title."""
super(NoSuchNoteExists, self).__init__(title)
def add_note(note):
"""Add note to notes."""
if find_note(note["title"]):
raise NoteAlreadyExists(note["title"])
db['notes'].append(note)
def delete_note(title):
"""Delete note from notes."""
found = find_note(title)
if not found:
raise NoSuchNoteExists(title)
del db['notes'][found[0]]
def update_note(title, note):
"""Update an existing note with a given title, possibly retitling it."""
found = find_note(title)
if not found:
raise NoSuchNoteExists(title)
note["timestamp"]["created"] = found[1]["timestamp"]["created"]
db['notes'][found[0]] = note
def find_note(title):
"""Return (index, note) of note that has title or False if no such note."""
for i, note in enumerate(db['notes']):
if note["title"] == title:
return i, note
return False
def search_notes(query):
"""Search notes by query."""
def match_token(note, tokens):
"""Test if note contains any of the tokens.
A very simple implementation still. Return False if any of the tokens
is missing, True if any match.
"""
tokens_found = []
for token in tokens:
s = note["title"] + " " + note["text"]
if token not in s.lower():
return False
tokens_found.append(token)
return len(tokens_found) == len(tokens)
notes = []
query_tokens = tokenize(query)
for note in db['notes']:
if match_token(note, query_tokens):
notes.append(note)
return notes
class CorsBaseHandler(CorsMixin, tornado.web.RequestHandler):
"""Set up CORS and allow separate origin for the client."""
CORS_ORIGIN = 'http://localhost:8080'
CORS_METHODS = 'GET, PUT, DELETE'
CORS_HEADERS = (
'Access-Control-Allow-Headers, '
'Origin, '
'Accept, '
'X-Requested-With, '
'Content-Type, '
'Access-Control-Request-Method, '
'Access-Control-Request-Headers'
)
class VersionRootHandler(CorsBaseHandler):
"""Handle /version ."""
def get(self):
"""Handle get and return verision and api_version."""
response = {
'version': '0.0.1',
'api_version': '0.1',
'is_test_db': True
}
self.write(response)
class NotesRootHandler(CorsBaseHandler):
"""Handle /notes ."""
def get(self):
"""Handle get and return all notes from database."""
response = {
'notes': db['notes']
}
self.write(response)
def put(self, *args, **kwargs):
"""Handle put and create / update give note."""
note = json.loads(self.request.body.decode('utf-8'))
title_update = note["title"]
if isinstance(title_update, dict):
find_title = title_update["old"]
new_title = title_update["new"]
else:
find_title = title_update
new_title = title_update
_note = {
'title': new_title,
'text': note["text"],
'timestamp': note["timestamp"]
}
found = find_note(find_title)
if not found:
add_note(_note)
self.clear()
self.set_status(200)
self.finish("Note '{}' added.".format(find_title))
else:
update_note(find_title, _note)
self.clear()
self.set_status(204)
self.finish("Note '{}' updated.".format(new_title))
class NoteHandler(CorsBaseHandler):
"""Handle /note/(.*) .
/note/:title
GET
DELETE
"""
def get(self, title):
"""Handle get and return note with given title from database."""
found = find_note(title)
if not found:
self.clear()
self.set_status(404)
self.finish("Note '{}'' not found!".format(title))
return
response = found[1]
self.write(response)
def delete(self, title):
"""Handle delete and delete note with given title from database."""
try:
delete_note(title)
except NoSuchNoteExists:
self.clear()
self.set_status(404)
self.finish("Note '{}' does not even exist.".format(title))
class NotesTitlesHandler(CorsBaseHandler):
"""Handle /notes/titles ."""
def get(self):
"""Handle get and return all note titles from database."""
response = {
'note_titles': [note["title"] for note in db['notes']]
}
self.write(response)
class NotesSearchHandler(CorsBaseHandler):
"""Handle /search?q=(.*) ."""
def get(self):
"""Handle get and return all notes matching search query."""
response = {
'notes': []
}
if self.get_argument('q') == "":
response = {
'notes': db['notes']
}
else:
response = {
'notes': search_notes(self.get_argument('q'))
}
self.write(response)
class TestBeginHandler(CorsBaseHandler):
"""Handle /test/begin ."""
def get(self):
"""Setup test to have expected state."""
read_db()
class TestEndHandler(CorsBaseHandler):
"""Handle /test/begin ."""
def get(self):
"""Setup test to have end with expected state afterwards."""
read_db()
def is_using_test_db():
"""Check if started with use test db flag."""
return "--use-test-db" in sys.argv
routes = [
(r"/version", VersionRootHandler),
(r"/notes", NotesRootHandler),
(r"/notes/titles", NotesTitlesHandler),
(r"/note/(.*)", NoteHandler),
(r"/search", NotesSearchHandler),
]
test_routes = [
(r"/test/begin", TestBeginHandler),
(r"/test/end", TestEndHandler)
]
if is_using_test_db():
routes.extend(test_routes)
application = tornado.web.Application(routes)
def read_db():
"""'Read in' database for use."""
global db
db_path = DB_PATH
if is_using_test_db():
db_path = TEST_DB_PATH
logging.info("server path:", os.path.abspath(__file__))
logging.info("server: db_path:", db_path)
with open(db_path) as f:
db = json.load(f)
is_closing = False
def signal_handler(signum, frame):
"""Signal handler for closing tornado."""
global is_closing
logging.info('exiting...')
is_closing = True
def try_exit():
"""Try closing tornado."""
global is_closing
if is_closing:
# clean up here
tornado.ioloop.IOLoop.instance().stop()
logging.info('exit success')
def start():
"""Start tornado."""
logging.info("Starting server...")
read_db()
signal.signal(signal.SIGINT, signal_handler)
application.listen(PORT)
tornado.ioloop.PeriodicCallback(try_exit, 500).start()
tornado.ioloop.IOLoop.instance().start()
logging.info("Server stopped.")
if __name__ == "__main__":
start()
| 2.765625 | 3 |
test/inserted_test.py | screamingskulls/sofi | 402 | 12792727 | from sofi.ui import Inserted
def test_basic():
assert(str(Inserted()) == "<ins></ins>")
def test_text():
assert(str(Inserted("text")) == "<ins>text</ins>")
def test_custom_class_ident_style_and_attrs():
assert(str(Inserted("text", cl='abclass', ident='123', style="font-size:0.9em;", attrs={"data-test": 'abc'}))
== "<ins id=\"123\" class=\"abclass\" style=\"font-size:0.9em;\" data-test=\"abc\">text</ins>")
| 2.375 | 2 |
Unsupervised-Texture-Segmentation-Using-Gabor-Filter-master/_utils.py | yimingq/DC-Project | 10 | 12792728 |
import math
import sklearn.cluster as clstr
import cv2
import numpy as np
from PIL import Image, ImageOps, ImageDraw
import os, glob
import matplotlib.pyplot as pyplt
import scipy.cluster.vq as vq
import argparse
import glob
# We can specify these if need be.
brodatz = "D:\\ImageProcessing\\project\\OriginalBrodatz\\"
concatOut = "D:\\ImageProcessing\\project\\concat.png"
# This is the function that checks boundaries when performing spatial convolution.
def getRanges_for_window_with_adjust(row, col, height, width, W):
mRange = []
nRange = []
mRange.append(0)
mRange.append(W-1)
nRange.append(0)
nRange.append(W-1)
initm = int(round(row - math.floor(W / 2)))
initn = int(round(col - math.floor(W / 2)))
if (initm < 0):
mRange[1] += initm
initm = 0
if (initn < 0):
nRange[1] += initn
initn = 0
if(initm + mRange[1] > (height - 1)):
diff = ((initm + mRange[1]) - (height - 1))
mRange[1] -= diff
if(initn + nRange[1] > (width-1)):
diff = ((initn + nRange[1]) - (width - 1))
nRange[1] -= diff
windowHeight = mRange[1] - mRange[0]
windowWidth = nRange[1] - nRange[0]
return int(round(windowHeight)), int(round(windowWidth)), int(round(initm)), int(round(initn))
# Used to normalize data before clustering occurs.
# Whiten sets the variance to be 1 (unit variance),
# spatial weighting also takes place here.
# The mean can be subtracted if specified by the implementation.
def normalizeData(featureVectors, setMeanToZero, spatialWeight=1):
means = []
for col in range(0, len(featureVectors[0])):
colMean = 0
for row in range(0, len(featureVectors)):
colMean += featureVectors[row][col]
colMean /= len(featureVectors)
means.append(colMean)
for col in range(2, len(featureVectors[0])):
for row in range(0, len(featureVectors)):
featureVectors[row][col] -= means[col]
copy = vq.whiten(featureVectors)
if (setMeanToZero):
for row in range(0, len(featureVectors)):
for col in range(0, len(featureVectors[0])):
copy[row][col] -= means[col]
for row in range(0, len(featureVectors)):
copy[row][0] *= spatialWeight
copy[row][1] *= spatialWeight
return copy
# Create the feature vectors and add in row and column data
def constructFeatureVectors(featureImages, img):
featureVectors = []
height, width = img.shape
for row in range(height):
for col in range(width):
featureVector = []
featureVector.append(row)
featureVector.append(col)
for featureImage in featureImages:
featureVector.append(featureImage[row][col])
featureVectors.append(featureVector)
return featureVectors
# An extra function if we are looking to save our feature vectors for later
def printFeatureVectors(outDir, featureVectors):
f = open(outDir, 'w')
for vector in featureVectors:
for item in vector:
f.write(str(item) + " ")
f.write("\n")
f.close()
# If we want to read in some feature vectors instead of creating them.
def readInFeatureVectorsFromFile(dir):
list = [line.rstrip('\n') for line in open(dir)]
list = [i.split() for i in list]
newList = []
for row in list:
newRow = []
for item in row:
floatitem = float(item)
newRow.append(floatitem)
newList.append(newRow)
return newList
# Print the intermediate results before clustering occurs
def printFeatureImages(featureImages, naming, printlocation):
i =0
for image in featureImages:
# Normalize to intensity values
imageToPrint = cv2.normalize(image, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
cv2.imwrite(printlocation + "\\" + naming + str(i) + ".png", imageToPrint)
i+=1
# Print the final result, the user can also choose to make the output grey
def printClassifiedImage(labels, k, img, outdir, greyOutput):
if(greyOutput):
labels = labels.reshape(img.shape)
for row in range(0, len(labels)):
for col in range(0, len(labels[0])):
outputIntensity = (255/k)*labels[row][col]
labels[row][col] = outputIntensity
cv2.imwrite(outdir, labels.reshape(img.shape))
else:
pyplt.imsave(outdir, labels.reshape(img.shape))
# Call the k means algorithm for classification
def clusterFeatureVectors(featureVectors, k):
kmeans = clstr.KMeans(n_clusters=k)
kmeans.fit(featureVectors)
labels = kmeans.labels_
return labels
# To clean up old filter and feature images if the user chose to print them.
def deleteExistingSubResults(outputPath):
for filename in os.listdir(outputPath):
if (filename.startswith("filter") or filename.startswith("feature")):
os.remove(filename)
# Checks user input (i.e. cannot have a negative mask size value)
def check_positive_int(n):
int_n = int(n)
if int_n < 0:
raise argparse.ArgumentTypeError("%s is negative" % n)
return int_n
# Checks user input (i.e. cannot have a negative weighting value)
def check_positive_float(n):
float_n = float(n)
if float_n < 0:
raise argparse.ArgumentTypeError("%s is negative " % n)
return float_n
#--------------------------------------------------------------------------
# All of the functions below were left here to demonstrate how I went about
# cropping the input images. I left them here, in the case that Brodatz
# textures were downloaded and cropped as new input images.
#--------------------------------------------------------------------------
def cropTexture(x_offset, Y_offset, width, height, inDir, outDir):
box = (x_offset, Y_offset, width, height)
image = Image.open(inDir)
crop = image.crop(box)
crop.save(outDir, "PNG")
def deleteCroppedImages():
for filename in glob.glob(brodatz + "*crop*"):
os.remove(filename)
def concatentationOfBrodatzTexturesIntoRows(pathsToImages, outdir, axisType):
images = []
for thisImage in pathsToImages:
images.append(cv2.imread(thisImage, cv2.CV_LOAD_IMAGE_GRAYSCALE))
cv2.imwrite(outdir, np.concatenate(images, axis=axisType))
outimg = cv2.imread(outdir, cv2.CV_LOAD_IMAGE_GRAYSCALE)
return outimg
def createGrid(listOfBrodatzInts, outName, howManyPerRow):
listOfRowOutputs = []
for i in range(len(listOfBrodatzInts)):
brodatzCropInput = brodatz + "D" + str(listOfBrodatzInts[i]) + ".png"
brodatzCropOutput = brodatz + "cropD" + str(listOfBrodatzInts[i]) + ".png"
# 128x128 crops, in order to generate a 512x512 image
cropTexture(256, 256, 384, 384, brodatzCropInput, brodatzCropOutput)
listOfRowOutputs.append(brodatzCropOutput)
subOuts = [listOfRowOutputs[x:x + howManyPerRow] for x in xrange(0,len(listOfRowOutputs), howManyPerRow)]
dests = []
for i in range(len(subOuts)):
dest = brodatz + "cropRow" + str(i) + ".png"
dests.append(dest)
concatentationOfBrodatzTexturesIntoRows(subOuts[i], brodatz + "cropRow" + str(i) + ".png", 1)
concatentationOfBrodatzTexturesIntoRows(dests, brodatz + outName, 0)
# Destroy all sub crops (we can make this optional if we want!)
deleteCroppedImages()
def createGridWithCircle(listOfBrodatzInts, circleInt, outName):
listOfRowOutputs = []
for i in range(len(listOfBrodatzInts)):
brodatzCropInput = brodatz + "D" + str(listOfBrodatzInts[i]) + ".png"
brodatzCropOutput = brodatz + "cropD" + str(listOfBrodatzInts[i]) + ".png"
# 128x128 crops, in order to generate a 256x256 image
cropTexture(256, 256, 384, 384, brodatzCropInput, brodatzCropOutput)
listOfRowOutputs.append(brodatzCropOutput)
subOuts = [listOfRowOutputs[x:x + 2] for x in xrange(0, len(listOfRowOutputs), 2)]
dests = []
for i in range(len(subOuts)):
dest = brodatz + "cropRow" + str(i) + ".png"
dests.append(dest)
concatentationOfBrodatzTexturesIntoRows(subOuts[i], brodatz + "cropRow" + str(i) + ".png", 1)
concatentationOfBrodatzTexturesIntoRows(dests, brodatz + "Nat5crop.png", 0)
size = (128, 128)
mask = Image.new('L', size, color=255)
draw = ImageDraw.Draw(mask)
draw.ellipse((0, 0) + size, fill=0)
im = Image.open(brodatz + "D" + str(circleInt) + ".png")
output = ImageOps.fit(im, mask.size, centering=(0.5, 0.5))
output.paste(0, mask=mask)
output.save(brodatz + 'circlecrop.png', transparency=0)
img = Image.open(brodatz + 'circlecrop.png').convert("RGBA")
img_w, img_h = img.size
background = Image.open(brodatz + "Nat5crop.png")
bg_w, bg_h = background.size
offset = ((bg_w - img_w) / 2, (bg_h - img_h) / 2)
background.paste(output, offset, img)
background.save(brodatz + outName, format="png")
deleteCroppedImages()
def createTexturePair(pair, outName):
pathsToTemp = [brodatz + "D" + str(pair[0]) + ".png", brodatz + "D" + str(pair[1]) + ".png"]
cropTexture(256, 256, 384, 384, pathsToTemp[0], brodatz + "outcrop1.png")
cropTexture(256, 256, 384, 384, pathsToTemp[1], brodatz + "outcrop2.png")
cropsToConcat = [brodatz + "outcrop1.png", brodatz + "outcrop2.png"]
concatentationOfBrodatzTexturesIntoRows(cropsToConcat, outName, 1)
deleteCroppedImages()
#--------------------------------------------------------------------------
# Create test images
#--------------------------------------------------------------------------
# Note that I did not write this to have an exhaustive approach in mind,
# where I pair all of the textures to every other texture. If I did so,
# I would have made it a little more efficient, instead I just decided to
# use the images that were in the papers already.
# # We can use any of the 112 images from the Brodatz album here
# nat16 = [29,12,17,55,32,5,84,68,77,24,9,4,3,33,51,54]
# howManyPerRow = 4
# outName = "Nat16.png"
# createGrid(nat16, outName, howManyPerRow)
#
# grid4 = [3,68,17,77]
# howManyPerRow = 2
# outName = "grid4.png"
# createGrid(grid4, outName, howManyPerRow)
# #the last int is the circle in the middle of the image!
# nat5 = [77,55,84,17]
# circleInt = 24
# outName = 'Nat5.png'
# createGridWithCircle(nat5, circleInt, outName)
#
# texturePairs = [[17,77],[3,68],[3,17],[55,68]]
# count = 0
# for pair in texturePairs:
# outName = brodatz + "pair" + str(count) + ".png"
# createTexturePair(pair, outName)
# count += 1
| 2.578125 | 3 |
psc_kmed.py | suwangcompling/picking-apart-story-salads | 5 | 12792729 | # Copyright 2018 @<NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import sys
sys.path.insert(0, os.getcwd())
import time
import random
import shutil
import dill
import numpy as np
import tensorflow as tf
from tensorflow.contrib.rnn import LSTMCell, MultiRNNCell, DropoutWrapper
from helpers import Indexer, batch, checkpoint_model
from itertools import chain, product
from collections import defaultdict
from kmedoids import kMedoids
from scipy.spatial.distance import pdist, squareform
from sklearn.metrics import accuracy_score
from pairwise_classifier import *
class MixtureReader:
def __init__(self, data_dir, data_type, context):
assert data_type in ['nyt', 'wiki']
self.data_dir = data_dir
self.data_type = data_type
self.context = context # int: 0 or context-length.
def get_mixture(self, filename):
if self.data_type == 'nyt':
return self.__get_nyt_mixture(filename)
else: # == wiki
return self.__get_wiki_mixture(filename)
def __get_nyt_mixture(self, filename):
da, db, doc_mix = dill.load(open(self.data_dir+filename, 'rb'))
doc_lbs = []
for sentcode in doc_mix:
if sentcode in da:
doc_lbs.append(0)
else:
doc_lbs.append(1)
if self.context:
CTX_LEN = self.context
doc_mix_flat = list(chain.from_iterable(doc_mix))
doc_mix_len = len(doc_mix_flat)
ctx = np.array([doc_mix_flat[:CTX_LEN]]) if doc_mix_len>=CTX_LEN else np.array([doc_mix_flat+[0]*(CTX_LEN-doc_mix_len)])
return doc_mix, doc_lbs, ctx
return doc_mix, doc_lbs
def __get_wiki_mixture(self, filename):
doc_mix, doc_lbs = dill.load(open(self.data_dir+filename, 'rb'))
if self.context:
CTX_LEN = self.context
doc_mix_flat = list(chain.from_iterable(doc_mix))
doc_mix_len = len(doc_mix_flat)
ctx = np.array([doc_mix_flat[:CTX_LEN]]) if doc_mix_len>=CTX_LEN else np.array([doc_mix_flat+[0]*(CTX_LEN-doc_mix_len)])
return doc_mix, doc_lbs, ctx
return doc_mix, doc_lbs
class PscKMedoids:
def __init__(self, psc_clf, data_type):
self.psc_clf = psc_clf
self.mix_reader = MixtureReader(self.psc_clf.config['data_dir'],
data_type='nyt' if 'nyt' in self.psc_clf.config['data_dir'] else 'wiki',
context=self.psc_clf.config['context_length'] if self.psc_clf.config['context'] else 0)
self.out_file_path = psc_clf.config['out_file_path']
def __to_sentence(self, indices):
words = []
for index in indices:
word = self.psc_clf.indexer.get_object(index)
if word is None:
words.append('UNK')
else:
words.append(word)
return ' '.join(words)
def __to_labels(self, C, doc_len): # C: {cls:[datum_id, ...], ...}
lbs = [0]*doc_len
for idx in C[1]:
lbs[idx] = 1
return lbs
def __flip_clust(self, clust):
return np.array([0 if i==1 else 1 for i in clust])
def __clust_accuracy(self, true, pred):
return max(accuracy_score(true, pred),
accuracy_score(true, self.__flip_clust(pred)))
def __dist(self, x1, x2):
x1, x1_len = batch([x1])
x2, x2_len = batch([x2])
fd = {self.psc_clf.input_x1:x1, self.psc_clf.input_x1_length:x1_len,
self.psc_clf.input_x2:x2, self.psc_clf.input_x2_length:x2_len,
self.psc_clf.keep_prob:1.0}
if self.psc_clf.config['context']:
fd[self.psc_clf.input_ctx] = self.ctx
conf = self.psc_clf.sess.run(self.psc_clf.scores, feed_dict=fd)
return 1-conf[0]
def evaluate_single(self, doc_mix, doc_lbs, ctx=None, method='average', return_pred=True):
if ctx is not None:
self.ctx = ctx
doc_mix_sq, _ = batch(doc_mix)
doc_mix_sq = doc_mix_sq.T
_, doc_mix_clust = kMedoids(squareform(pdist(doc_mix_sq,metric=self.__dist)), 2)
doc_prd = self.__to_labels(doc_mix_clust, len(doc_mix))
acc = self.__clust_accuracy(doc_lbs, doc_prd)
if return_pred:
return acc, doc_prd
return acc
def evaluate_rand(self, k=100, verbose=True):
accs = []
filenames = np.random.choice(self.psc_clf.FILENAMES, size=k, replace=False)
if self.out_file_path is not None: # clear out file for new writing.
out_file = open(self.out_file_path, 'w')
for filename in filenames:
if self.mix_reader.context:
doc_mix, doc_lbs, ctx = self.mix_reader.get_mixture(filename)
result = self.evaluate_single(doc_mix, doc_lbs, ctx, self.out_file_path is not None)
else:
doc_mix, doc_lbs = self.mix_reader.get_mixture(filename, self.out_file_path is not None)
result = self.evaluate_single(doc_mix, doc_lbs)
if out_file_path is None:
acc = result
else:
acc, prd = result
out_file.write('FILE ID: ' + str(filename) + '\n')
for prd_lb, true_lb, indices in zip(prd, doc_lbs, doc_mix):
out_file.write('TRUE = '+str(true_lb)+' | '+'PRED = '+str(prd_lb)+' | '+self.__to_sentence(indices)+'\n')
out_file.write('\n\n')
accs.append(acc)
if verbose:
print('File {}: acc = {}'.format(filename, acc))
out_file.close()
avg_acc = np.mean(accs)
print('\nAverage accuracy = {}'.format(avg_acc))
return avg_acc
def evaluate_given(self, filenames, verbose=True):
accs = []
if self.out_file_path is not None: # clear out file for new writing.
out_file = open(self.out_file_path, 'w')
for filename in filenames:
if self.mix_reader.context:
doc_mix, doc_lbs, ctx = self.mix_reader.get_mixture(filename)
result = self.evaluate_single(doc_mix, doc_lbs, ctx, self.out_file_path is not None)
else:
doc_mix, doc_lbs = self.mix_reader.get_mixture(filename)
result = self.evaluate_single(doc_mix, doc_lbs)
if self.out_file_path is None:
acc = result
else:
acc, prd = result
out_file.write('FILE ID: ' + str(filename) + '\n')
for prd_lb, true_lb, indices in zip(prd, doc_lbs, doc_mix):
out_file.write('TRUE = '+str(true_lb)+' | '+'PRED = '+str(prd_lb)+' | '+self.__to_sentence(indices)+'\n')
out_file.write('\n\n')
accs.append(acc)
if verbose:
print('File {}: acc = {}'.format(filename, acc))
out_file.close()
avg_acc = np.mean(accs)
print('\nAverage accuracy = {}'.format(avg_acc))
return avg_acc
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int)
parser.add_argument('--vocab_size', type=int)
parser.add_argument('--emb_size', type=int)
parser.add_argument('--n_layer', type=int)
parser.add_argument('--hid_size', type=int)
parser.add_argument('--keep_prob', type=float)
parser.add_argument('--learning_rate', type=float)
parser.add_argument('--n_epoch', type=int)
parser.add_argument('--train_size', type=int)
parser.add_argument('--verbose', type=int)
parser.add_argument('--save_freq', type=int)
parser.add_argument('--data_dir', type=str)
parser.add_argument('--info_path', type=str)
parser.add_argument('--init_with_glove', type=bool)
parser.add_argument('--save_dir', type=str)
parser.add_argument('--save_name', type=str)
parser.add_argument('--restore_dir', type=str)
parser.add_argument('--restore_name', type=str)
parser.add_argument('--load_from_saved', type=bool)
parser.add_argument('--track_dir', type=str)
parser.add_argument('--new_track', type=bool)
parser.add_argument('--session_id', type=str)
parser.add_argument('--mutual_attention', type=bool)
parser.add_argument('--context', type=bool)
parser.add_argument('--context_length', type=int)
parser.add_argument('--out_file_path', type=str)
args = parser.parse_args()
config = {'batch_size': args.batch_size, 'vocab_size': args.vocab_size, 'emb_size': args.emb_size,
'n_layer': args.n_layer, 'hid_size': args.hid_size,
'keep_prob': args.keep_prob, 'learning_rate': args.learning_rate,
'n_epoch': args.n_epoch, 'train_size': args.train_size, 'verbose': args.verbose,
'save_freq': args.save_freq,
'data_dir': args.data_dir, 'info_path': args.info_path,
'init_with_glove': args.init_with_glove,
'save_dir': args.save_dir, 'save_name': args.save_name,
'restore_dir': args.restore_dir, 'restore_name': args.restore_name,
'load_from_saved': args.load_from_saved,
'track_dir': args.track_dir, 'new_track': args.new_track, 'session_id': args.session_id,
'mutual_attention': args.mutual_attention,
'context': args.context, 'context_length': args.context_length,
'out_file_path': args.out_file_path}
psc_clf = PairwiseSentenceClassifier(config)
kmed = PscKMedoids(psc_clf, data_type='nyt')
print('\n')
sample_files = os.listdir('nyt_sample/')
kmed.evaluate_given(sample_files)
| 1.789063 | 2 |
assets/data_asset/mnist_index_files.py | natashadsilva/sample.edge-mnist-notebook | 0 | 12792730 | <reponame>natashadsilva/sample.edge-mnist-notebook
import numpy as np
from PIL import Image, ImageOps
import io
# Training and test data set files, all labeled
FN_TEST_LABELS = 'data/mnist/t10k-labels-idx1-ubyte'
FN_TEST_IMAGES = 'data/mnist/t10k-images-idx3-ubyte'
FN_TRAIN_LABELS = 'data/mnist/train-labels-idx1-ubyte'
FN_TRAIN_IMAGES = 'data/mnist/train-images-idx3-ubyte'
# This function will read the entire file in and return a single multi-dimensional array,
# of the appropriate data type, rank, and dimensionality for the given file.
# Note that this may consume a lot of memory, and may take a while to read in the whole
# file before it returns.
def read_idx_file(filename, start=0, count=None):
# Helper map of type enum values to dtype strings for numpy
dtypes = {8:'>u1', 9:'>i1', 0xb:'>i2',0xc:'>i4',0xd:'>f4',0xe:'>f8'}
dtypesizes = {8:1, 9:1, 0xb:2,0xc:4,0xd:4,0xe:8}
with open(filename, 'rb') as f:
# Ok, let's parse one of these files
# first read a uint32be as the magic number, yielding data type (size of each value, and format), and number of dimensions
dummy, = np.fromfile(f, dtype='>u2', count=1)
dte,dims = np.fromfile(f, dtype='>u1', count=2)
#print(dummy, dte, dtypes[dte], dtypesizes[dte], dims)
# Then comes a uint32be number per dimension, yielding the size of the n-dimensional array in that dimension
# Only after all those dimension sizes, comes the data, all big-endian.
# The arrays are in C-style, where the last dimension index changes most frequently.
dsizes = np.fromfile(f, dtype='>u4', count=dims)
unit_size = int(np.prod(dsizes[1:])) if len(dsizes) > 1 else 1
seek_delta = int(start * unit_size * dtypesizes[dte])
read_units = dsizes[0] if count is None else min(dsizes[0], count)
nshape = dsizes
nshape[0] = read_units
#print(dsizes)
#print(np.prod(dsizes), unit_size, seek_delta, read_units)
f.seek(seek_delta, 1)
# So now, we can loop over the outer dimensions, setting the indexes appropriately,
# and read the inner dimension as a vector all in one go
return np.reshape(np.fromfile(f, dtype=dtypes[dte], count=int(unit_size * read_units)), newshape=nshape, order='C')
# This version, on the other hand, is a generator, reading one unit from the file at a time.
# A "unit" in this context is all the data except the top-most dimension.
# So, a rank-1 file would just yield individual scalars on each call.
# A rank-3 file would yield rank-2 arrays on each call, consisting of the 2 lowest dimension.
# To make things concrete, the digits labels file is rank-1, with 60k units, each one a single uint8. Each yield would generate the next uint8.
# The digits images files, however, are rank-3, with 60k units, each one a 28x28 array of uint8. Each yield would generate the next 28x28 array of uint8.
# The optional start index (defaults to 0), will skip the first "start" count of units, and generate the one after that.
# Subsequent generations would continue from that point.
def read_idx_units(filename, start=0, count=None):
# Helper map of type enum values to dtype strings for numpy
dtypes = {8:'>u1', 9:'>i1', 0xb:'>i2',0xc:'>i4',0xd:'>f4',0xe:'>f8'}
dtypesizes = {8:1, 9:1, 0xb:2,0xc:4,0xd:4,0xe:8}
with open(filename, 'rb') as f:
# Ok, let's parse one of these files
# first read a uint32be as the magic number, yielding data type (size of each value, and format), and number of dimensions
dummy, = np.fromfile(f, dtype='>u2', count=1)
dte,dims = np.fromfile(f, dtype='>u1', count=2)
#print(dummy, dte, dtypes[dte], dtypesizes[dte], dims)
# Then comes a uint32be number per dimension, yielding the size of the n-dimensional array in that dimension
# Only after all those dimension sizes, comes the data, all big-endian.
# The arrays are in C-style, where the last dimension index changes most frequently.
dsizes = np.fromfile(f, dtype='>u4', count=dims)
unit_size = int(np.prod(dsizes[1:])) if len(dsizes) > 1 else 1
seek_delta = int(start * unit_size * dtypesizes[dte])
read_units = dsizes[0] if count is None else min(dsizes[0], count)
#print(dsizes)
#print(np.prod(dsizes), unit_size, seek_delta, read_units)
f.seek(seek_delta, 1)
for i in range(read_units):
# Read the next unit
if len(dsizes) > 1:
yield np.reshape(np.fromfile(f, dtype=dtypes[dte], count=unit_size), newshape=dsizes[1:], order='C')
else:
# Special case for the scalar situation, where re-shaping doesn't make sense.
yield np.fromfile(f, dtype=dtypes[dte], count=1)[0]
# From an MNIST image, put the data into a BytesIO filehandle and return that filehandle for later use,
# making it act just as a PNG file handle would.
def to_filehandle(image):
of = io.BytesIO()
ImageOps.invert(Image.fromarray(image)).convert("RGB").save(of, "PNG")
of.seek(0)
return of
| 2.703125 | 3 |
info/reviews/db_utils.py | DennisKasper/info | 6 | 12792731 | <gh_stars>1-10
from aiopg.sa import SAConnection as SAConn
from aiopg.sa.result import RowProxy
from info.reviews.tables import review
async def select_review_by_id(conn: SAConn, pk: int) -> RowProxy:
cursor = await conn.execute(
review.select().where(review.c.id == pk)
)
item = await cursor.fetchone()
return item
async def create_review(conn: SAConn, text: str) ->RowProxy:
cursor = await conn.execute(
review.insert().values(
{
'text': text,
}
)
)
item = await cursor.fetchone()
return item.id
| 2.484375 | 2 |
botcommands/utils.py | pastorhudson/mtb-pykeybasebot | 0 | 12792732 | <reponame>pastorhudson/mtb-pykeybasebot
from crud import s
from models import Team, User
def get_team_user(team_name, username):
team = s.query(Team).filter_by(name=team_name).first()
user = s.query(User).filter_by(username=username).first()
return team, user
def get_team(team_name):
team = s.query(Team).filter_by(name=team_name).first()
return team
| 2.390625 | 2 |
website.py | 85599/power-napp | 2 | 12792733 | <reponame>85599/power-napp<filename>website.py
from flask import Flask, render_template, request, redirect, session
app = Flask(__name__)
@app.route('/',methods=['GET','POST'])
def website():
if request.method=="GET":
return render_template('index.html')
else:
return render_template('index.html')
if __name__ == '__main__':
app.run(debug=True) | 2.328125 | 2 |
tests/test_script_pipeline.py | Forks-yugander-krishan-singh/jenkins-job-builder-pipeline | 0 | 12792734 | <filename>tests/test_script_pipeline.py
from base import assert_case
def test_script_pipeline():
assert_case('script_pipeline')
| 1.734375 | 2 |
data camp/Dictionarty_part_2.py | hamzashabbir11/dataStructures | 0 | 12792735 | <reponame>hamzashabbir11/dataStructures<filename>data camp/Dictionarty_part_2.py
europe = { 'spain': { 'capital':'madrid', 'population':46.77 },
'france': { 'capital':'paris', 'population':66.03 },
'germany': { 'capital':'berlin', 'population':80.62 },
'norway': { 'capital':'oslo', 'population':5.084 } }
print(europe['france']['population'])
data={'cpaital':'Islamabad', 'population ': 220}
europe['Pakistan']=data
print(europe)
| 2.859375 | 3 |
insights/components/openstack.py | lhuett/insights-core | 121 | 12792736 | """
IsOpenStackCompute
==================
The ``IsOpenStackCompute`` component uses ``PsAuxcww`` parser to determine
OpenStack Compute node. It checks if 'nova-compute' process exist, if not raises
``SkipComponent`` so that the dependent component will not fire. Can be added as
a dependency of a parser so that the parser only fires if the
``IsIsOpenStackCompute`` dependency is met.
"""
from insights.core.plugins import component
from insights.parsers.ps import PsAuxcww
from insights.core.dr import SkipComponent
@component(PsAuxcww)
class IsOpenStackCompute(object):
"""The ``IsOpenStackCompute`` component uses ``PsAuxcww`` parser to determine
OpenStack Compute node. It checks if ``nova-compute`` process exist, if not
raises ``SkipComponent``.
Raises:
SkipComponent: When ``nova-compute`` process does not exist.
"""
def __init__(self, ps):
if 'nova-compute' not in ps.running:
raise SkipComponent('Not OpenStack Compute node')
| 2.46875 | 2 |
APITest/TestSourceAPI.py | josephramsay/LDSAPI | 0 | 12792737 | <reponame>josephramsay/LDSAPI
'''
Created on 17/12/2013
@author: jramsay
'''
#https://koordinates.com/services/api/v1/sources/1/
import unittest
import json
import os
from APIInterface.LDSAPI import SourceAPI
from .TestFileReader import FileReader
from .TestSuper import APITestCase
sources = (
("Alices' Mapinfo Server", "mapinfo", "No Proxy - No Auth", [], 3, "alice", "alicespassword", "https://alice.example.com/Mapinfo/rest/services", "@hourly"),
("Bob's ArcGIS 10 Server", "arcgis", "Proxy (CNTLM) - No Auth", [], 3, "bob", "bobspassword", "https://bob.example.com/ArcGIS/rest/services", "@daily"),
("Carol's PostGIS 8 Server", "postgis", "Proxy (Corp Web) - No Auth", [], 3, "carol", "carols password", "https://carol.example.com/PostGis/rest/services", "@weekly"),
("Dan's SpatiaLite 4 Server", "spatialite", "No Proxy - Auth (aio)", [], 3, "dan", "danspassword", "https://dan.example.com/SpatiaLite/rest/services", "@yearly"),
("Erin's Grass 12 server", "grass", "Proxy (CNTLM) - Auth (aio)", [], 3, "erin", "erinspassword", "https://erin.example.com/Grass/rest/services", "@quarterly"),
("Frank's FileGDB 2 Server", "filegdb", "Proxy (Corp Web) - Auth (aio)", [], 3, "frank", "frankspassword", "https://frank.example.com/FileGDB/rest/services", "@occasionally"),
("WORKING PG SERVER", "postgis", "Proxy (Corp Web) - Auth (aio)", [], 3, "pguser", "pgpass", "https://linz.govt.nz/PostGIS/rest/services", "@daily")
)
class SourcesTester(APITestCase):
def setUp(self):
print('S')
self.api = SourceAPI(FileReader.creds,self.cdir+self.cfile)
self.api.setParams()
def tearDown(self):
self.api = None
def test_21_GetNoProxyAuth(self):
self.api.connect()
self.api.dispRes(self.api.res)
def test_30_BasicJSONWrite(self):
self.api.connect()
be = json.dumps(self.api.res)
pp = json.dumps(self.api.res, sort_keys=True, indent=4, separators=(',', ': '))
print(be,pp)
if __name__ == '__main__':
unittest.main() | 1.4375 | 1 |
02_oop/01_classes_and_objects/03_initializing_with_optional_params.py | doanthanhnhan/learningPY | 1 | 12792738 | <filename>02_oop/01_classes_and_objects/03_initializing_with_optional_params.py
class Employee:
# defining the properties and assigning None to them
def __init__(self, ID=None, salary=0, department=None):
self.ID = ID
self.salary = salary
self.department = department
# creating an object of the Employee class with default parameters
Steve = Employee()
Mark = Employee("3789", 2500, "Human Resources")
# Printing properties of Steve and Mark
print("Steve")
print("ID :", Steve.ID)
print("Salary :", Steve.salary)
print("Department :", Steve.department)
print("Mark")
print("ID :", Mark.ID)
print("Salary :", Mark.salary)
print("Department :", Mark.department) | 4.15625 | 4 |
income_expense_tracker/dashboard/urls.py | gokul-sarath07/Nymblelabs-Expence-Tracker | 0 | 12792739 | <gh_stars>0
from django.urls import path
from . import views
"""Url pattern for dashboard view."""
urlpatterns = [
path('', views.index, name="home"),
]
| 1.460938 | 1 |
config/custom_components/motioneye/camera.py | azogue/hassio_config | 18 | 12792740 | <gh_stars>10-100
"""
Support for MotionEye Cameras.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/camera.motioneye/
"""
import asyncio
import logging
from urllib.parse import urlparse
import re
import aiohttp
import async_timeout
import voluptuous as vol
from homeassistant.const import CONF_NAME
from homeassistant.components.camera import (
PLATFORM_SCHEMA, DEFAULT_CONTENT_TYPE, Camera)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers import config_validation as cv
from homeassistant.util.dt import utcnow
# TODO http://192.168.1.30:7999/3/config/set?emulate_motion=on/off
# TODO implement ffmpeg_output_movies control: curl http://192.168.1.30:7999/3/config/set?ffmpeg_output_movies=off
_LOGGER = logging.getLogger(__name__)
CONF_CONTROL_PORT = 'control_port'
CONF_CONTROL_CAM_ID = 'camera_id'
CONF_SNAPSHOT_URL = 'snapshot_url'
CONF_WITH_MOTION_CONTROL = 'with_motion_control'
DEFAULT_NAME = 'MotionEye Camera'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_SNAPSHOT_URL): cv.url,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_WITH_MOTION_CONTROL, default=False): cv.boolean,
vol.Optional(CONF_CONTROL_PORT, default=7999): cv.positive_int,
vol.Optional(CONF_CONTROL_CAM_ID, default=1): cv.positive_int
})
RG_STATUS = re.compile(' Detection status (\w+)\s?\n')
RG_CONTROL = re.compile(' Detection (\w+)\s?\n')
# pylint: disable=unused-argument
async def async_setup_platform(hass, config,
async_add_entities, discovery_info=None):
"""Set up a generic IP Camera."""
async_add_entities([MotionEyeCamera(hass, config)])
class MotionEyeCamera(Camera):
"""A very simple implementation of a MotionEye camera,
using the snapshot url."""
def __init__(self, hass, device_info):
"""Initialize a generic camera."""
super().__init__()
self.hass = hass
self._name = device_info.get(CONF_NAME)
self.content_type = DEFAULT_CONTENT_TYPE
self._snapshot_url = device_info[CONF_SNAPSHOT_URL]
self._control_url = None
self._with_motion_detection = device_info[CONF_WITH_MOTION_CONTROL]
if self._with_motion_detection:
# ParseResult(scheme, netloc, url, params, query, fragment)
url_p = urlparse(self._snapshot_url)
control_port = device_info[CONF_CONTROL_PORT]
cam_id = device_info[CONF_CONTROL_CAM_ID]
self._control_url = (
f"{url_p.scheme}://{url_p.netloc.split(':')[0]}"
f":{control_port}/{cam_id}/detection/"
)
self._online = True
self._last_image = None
self._last_status = None
self._motion_detection_active = False
self.is_streaming = False
# self._motion_detected = False
async def async_added_to_hass(self):
"""Handle all entity which are about to be added."""
# TODO add some periodic status pull as well! (scan_interval = 120)
await self.async_get_camera_motion_status(command='status')
@property
def is_recording(self):
"""Return true if the device is recording."""
# return self._motion_detected
return self._motion_detection_active
@property
def brand(self):
"""Return the camera brand."""
return "MotionEye"
@property
def motion_detection_enabled(self):
"""Return the camera motion detection status."""
return self._motion_detection_active
@property
def model(self):
"""Return the camera model."""
return "MotionEye Snapshot Camera"
async def async_enable_motion_detection(self):
"""Enable motion detection in the camera."""
self.is_streaming = True
await self.async_get_camera_motion_status(command='start')
self.async_schedule_update_ha_state()
async def async_disable_motion_detection(self):
"""Disable motion detection in camera."""
self.is_streaming = False
await self.async_get_camera_motion_status(command='pause')
self.async_schedule_update_ha_state()
def camera_image(self):
"""Return bytes of camera image."""
return asyncio.run_coroutine_threadsafe(
self.async_camera_image(), self.hass.loop).result()
async def async_camera_image(self):
"""Return a still image response from the camera."""
try:
websession = async_get_clientsession(self.hass)
with async_timeout.timeout(10, loop=self.hass.loop):
response = await websession.get(self._snapshot_url)
self._last_image = await response.read()
if not self._online:
_LOGGER.warning("%s: Recovered camera image", self.entity_id)
self._online = True
if (self._control_url is None or
(self._last_status is not None
and (utcnow() - self._last_status).total_seconds() < 60)):
return self._last_image
await self.async_get_camera_motion_status(command='status')
except asyncio.TimeoutError:
if self._online:
_LOGGER.warning("%s: Timeout getting camera image", self.entity_id)
self._online = False
except aiohttp.ClientError as err:
if self._online:
_LOGGER.error(
"%s: ClientError getting new camera image: %s", self.name, err
)
self._online = False
return self._last_image
async def async_get_camera_motion_status(self, command='status'):
"""Asks for the motion detection status of the camera."""
if self._control_url is None:
self._motion_detection_active = False
return
url = self._control_url + command
reg_expr = RG_STATUS if command == 'status' else RG_CONTROL
try:
websession = async_get_clientsession(self.hass)
with async_timeout.timeout(10, loop=self.hass.loop):
response = await websession.get(url)
raw = await response.read()
if not raw:
_LOGGER.error(f"No control response in {url}")
status_found = reg_expr.findall(raw.decode())
if not status_found:
_LOGGER.error(f"Bad control response from {url}: "
f"{raw}, no pattern found")
self._motion_detection_active = False
elif status_found[0] in ['ACTIVE', 'resumed']:
self._motion_detection_active = True
else:
self._motion_detection_active = False
self._last_status = utcnow()
except asyncio.TimeoutError:
_LOGGER.warning(f"Timeout in motion detection control at {url}")
# return
except aiohttp.ClientError as err:
_LOGGER.error(f"Error in motion detection control at {url}: "
f"{str(err)}")
@property
def name(self):
"""Return the name of this device."""
return self._name
| 2.015625 | 2 |
Tesi/other/benchmarkChart.py | LucaCamerani/EcoFin-library | 9 | 12792741 | <reponame>LucaCamerani/EcoFin-library
"""
benchmarkChart.py
Created by <NAME> at 06/02/2021, University of Milano-Bicocca.
(<EMAIL>)
All rights reserved.
This file is part of the EcoFin-Library (https://github.com/LucaCamerani/EcoFin-Library),
and is released under the "BSD Open Source License".
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from tqdm import tqdm
# -------------------------[Set-up]-------------------------
ticker_list = [line.rstrip('\n') for line in open(r'../INDEXs/DJIA.txt')]
maturity_min = 15
base_path = r'../Export/BackTest_C'
start_date = 0
driver = 'SpotPrice'
# ----------------------------------------------------------
data = {driver: {}}
for tick in tqdm(ticker_list, desc='Importing data'):
try:
# Import data and clean-up
source = pd.read_excel(r'{}/{}/backTest_[{}].xlsx'.format(base_path, tick, maturity_min), engine='openpyxl')
source = source.loc[source['Date'] >= start_date, ~source.columns.str.contains('^Unnamed')]
source.set_index(pd.to_datetime(source['Date'], format='%Y%m%d'), drop=True, inplace=True)
data[driver][tick] = source[driver]
except:
pass
# Merge data
data[driver] = pd.concat(data[driver], axis=1)
# Compute ln-returns table
data['lnReturns'] = np.log(data[driver].shift(-1) / data[driver])
# Plot results
fig, axs = plt.subplots(2, figsize=(15, 8), sharex=True)
fig.suptitle('Benchmark', fontsize=16)
# Plot benchmark
axs[0].set_title('Underlying returns')
for tick in data[driver].keys():
axs[0].plot(np.cumsum(data['lnReturns'][tick]), label=tick)
axs[0].set(ylabel=r'ln-returns ($X_t$)')
axs[0].legend(ncol=4)
# Plot strategy return vs. benchmark (portfolio)
axs[1].set_title('Portfolio return')
axs[1].plot(np.cumsum(data['lnReturns'].sum(axis=1)), label='Benchmark')
axs[1].set(xlabel=r'Time ($t$)', ylabel=r'ln-returns ($X_t$)')
axs[1].legend()
# Compute performance metrics
SR_b = data['lnReturns'].sum(axis=1).sum() / data['lnReturns'].sum(axis=1).std()
print('Sharpe-Ratio:\n • Benchmark: {}'.format(SR_b))
plt.show()
| 2 | 2 |
alipay/aop/api/domain/SceneConfigQueryDTO.py | antopen/alipay-sdk-python-all | 0 | 12792742 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class SceneConfigQueryDTO(object):
def __init__(self):
self._business_scene = None
self._group_id = None
self._isv_name = None
self._pid = None
self._school_id = None
self._school_name = None
self._school_std_code = None
self._sign_app_id = None
self._status = None
@property
def business_scene(self):
return self._business_scene
@business_scene.setter
def business_scene(self, value):
if isinstance(value, list):
self._business_scene = list()
for i in value:
self._business_scene.append(i)
@property
def group_id(self):
return self._group_id
@group_id.setter
def group_id(self, value):
self._group_id = value
@property
def isv_name(self):
return self._isv_name
@isv_name.setter
def isv_name(self, value):
self._isv_name = value
@property
def pid(self):
return self._pid
@pid.setter
def pid(self, value):
self._pid = value
@property
def school_id(self):
return self._school_id
@school_id.setter
def school_id(self, value):
self._school_id = value
@property
def school_name(self):
return self._school_name
@school_name.setter
def school_name(self, value):
self._school_name = value
@property
def school_std_code(self):
return self._school_std_code
@school_std_code.setter
def school_std_code(self, value):
self._school_std_code = value
@property
def sign_app_id(self):
return self._sign_app_id
@sign_app_id.setter
def sign_app_id(self, value):
self._sign_app_id = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
def to_alipay_dict(self):
params = dict()
if self.business_scene:
if isinstance(self.business_scene, list):
for i in range(0, len(self.business_scene)):
element = self.business_scene[i]
if hasattr(element, 'to_alipay_dict'):
self.business_scene[i] = element.to_alipay_dict()
if hasattr(self.business_scene, 'to_alipay_dict'):
params['business_scene'] = self.business_scene.to_alipay_dict()
else:
params['business_scene'] = self.business_scene
if self.group_id:
if hasattr(self.group_id, 'to_alipay_dict'):
params['group_id'] = self.group_id.to_alipay_dict()
else:
params['group_id'] = self.group_id
if self.isv_name:
if hasattr(self.isv_name, 'to_alipay_dict'):
params['isv_name'] = self.isv_name.to_alipay_dict()
else:
params['isv_name'] = self.isv_name
if self.pid:
if hasattr(self.pid, 'to_alipay_dict'):
params['pid'] = self.pid.to_alipay_dict()
else:
params['pid'] = self.pid
if self.school_id:
if hasattr(self.school_id, 'to_alipay_dict'):
params['school_id'] = self.school_id.to_alipay_dict()
else:
params['school_id'] = self.school_id
if self.school_name:
if hasattr(self.school_name, 'to_alipay_dict'):
params['school_name'] = self.school_name.to_alipay_dict()
else:
params['school_name'] = self.school_name
if self.school_std_code:
if hasattr(self.school_std_code, 'to_alipay_dict'):
params['school_std_code'] = self.school_std_code.to_alipay_dict()
else:
params['school_std_code'] = self.school_std_code
if self.sign_app_id:
if hasattr(self.sign_app_id, 'to_alipay_dict'):
params['sign_app_id'] = self.sign_app_id.to_alipay_dict()
else:
params['sign_app_id'] = self.sign_app_id
if self.status:
if hasattr(self.status, 'to_alipay_dict'):
params['status'] = self.status.to_alipay_dict()
else:
params['status'] = self.status
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = SceneConfigQueryDTO()
if 'business_scene' in d:
o.business_scene = d['business_scene']
if 'group_id' in d:
o.group_id = d['group_id']
if 'isv_name' in d:
o.isv_name = d['isv_name']
if 'pid' in d:
o.pid = d['pid']
if 'school_id' in d:
o.school_id = d['school_id']
if 'school_name' in d:
o.school_name = d['school_name']
if 'school_std_code' in d:
o.school_std_code = d['school_std_code']
if 'sign_app_id' in d:
o.sign_app_id = d['sign_app_id']
if 'status' in d:
o.status = d['status']
return o
| 1.945313 | 2 |
src/wafec/fi/hypothesis/controllers/_route.py | wafec/wafec-fi-hypothesis | 0 | 12792743 | from . import app, api
from . import TestController, TestParameterController
from threading import Lock
TEST_PARAMETER_LOCK = Lock()
api.add_resource(TestController, '/api/tests')
api.add_resource(TestParameterController, '/api/parameters',
resource_class_kwargs ={'lock_obj': TEST_PARAMETER_LOCK})
| 2.03125 | 2 |
ape_console_extras.py | unparalleled-js/ape-demo-project | 0 | 12792744 | <gh_stars>0
import json
import click
from ape.logging import logger
def ape_init_extras(accounts, project, config, networks):
ecosystem = networks.provider.network.ecosystem.name
network = networks.provider.network.name
extras = {}
try:
if ecosystem in config.deployments:
ecosystem_deployments = config.deployments[ecosystem]
if network in ecosystem_deployments:
network_deployments = ecosystem[network]
deployments = [
d for d in network_deployments if d["contract_type"] == "TestContract"
]
latest_address = deployments[-1]["address"]
extras["test_contract_address"] = latest_address
# Mimic fixtures
owner = accounts.test_accounts[0]
logger.info(f"Deploying {project.FundMe} in ape_console_extras.py")
contract = project.FundMe.deploy(sender=owner)
extras = {
"owner": owner,
"sender": accounts.test_accounts[1],
"fund_me": contract,
"contract": contract,
**extras,
}
# Add remaining accounts
index = 2
for acct in accounts.test_accounts[2:]:
extras[f"acct{index}"] = acct
index += 1
except Exception as err:
logger.error(err)
pass
extras["list_extras"] = lambda: click.echo(
json.dumps({k: str(v) for k, v in extras.items() if k != "list_extras"}, indent=2)
)
return extras
| 2.046875 | 2 |
calc_HI_rms.py | sjforeman/RadioFisher | 3 | 12792745 | #!/usr/bin/python
"""
Calculate signal power as a function of frequency.
"""
import numpy as np
import pylab as P
import scipy.integrate
import baofisher
import experiments
from experiments import cosmo
from units import *
import copy
nu21 = 1420. # Line frequency at z=0
# Pre-calculate cosmological quantities
k, pk = np.genfromtxt("cache_pk.dat")[:-1].T
H, r, D, f = baofisher.background_evolution_splines(cosmo)
def W_tophat(k, r):
return 3. * ( np.sin(k * r) - k * r * np.cos(k * r) ) / ((k * r)**3.)
def calculate_rms(z, expt):
"""
Calculate RMS of HI signal at a given redshift.
"""
theta_b = 3e8 * (1. + z) / (1e6 * expt['nu_line']) / expt['Ddish'] # Beam FWHM
rnu = C * (1.+z)**2. / H(z)
Tb = baofisher.Tb(z, cosmo)
bHI = 1. #baofisher.bias_HI(z, cosmo)
# Calculate pixel volume at given redshift
Vpix = (r(z) * theta_b)**2. * rnu * expt['dnu'] / nu21
Rpix = Vpix**(1./3.)
# Integrate P(k) to get correlation fn. averaged in a ball, xi(Rpix)
y = k**2. * pk * W_tophat(k, Rpix)
xi = scipy.integrate.simps(y, k) / (2. * np.pi**2.)
# Return rms HI fluctuation
return Tb * D(z) * bHI * np.sqrt(xi) # in mK
# Choose experiment
e = experiments
expts = [ e.SKA1MIDbase1, e.SKA1MIDbase2, e.SKA1MIDfull1, e.SKA1MIDfull2,
e.SKA1SURbase1, e.SKA1SURbase2, e.SKA1SURfull1, e.SKA1SURfull2 ]
names = [ 'SKA1MIDbase1', 'SKA1MIDbase2', 'SKA1MIDfull1', 'SKA1MIDfull2',
'SKA1SURbase1', 'SKA1SURbase2', 'SKA1SURfull1', 'SKA1SURfull2' ]
# Calculate sigma_HI for a range of redshift
#z = np.linspace(1e-2, 3., 100)
#Tb = baofisher.Tb(z, cosmo)
#sigma_HI = np.array([calculate_rms(zz, expt) for zz in z])
# Output noise per voxel (single-dish)
for j in range(len(expts)):
expt = expts[j]
zs, zc = baofisher.zbins_const_dnu(expt, cosmo, dnu=60.)
dnu = expt['dnu']
sigma_T = baofisher.noise_rms_per_voxel(zc, expt)
expt2 = copy.copy(expt)
expt2['dnu'] = 60. # 60 MHz
sigma_60 = baofisher.noise_rms_per_voxel(zc, expt2)
# Output data
print ""
print "-"*40
print names[j]
print "-"*40
print " zc / dz / sigma_T [uK] / sigma_T [uK]"
print " -- / -- / (%2.2f MHz) / (60 MHz)" % dnu
print "-"*40
for i in range(zc.size):
#sigma_HI = calculate_rms(zc[i], expt)
print "%2.2f %4.4f %4.4f %4.4f" % (zc[i], zs[i+1]-zs[i], 1e3*sigma_T[i], 1e3*sigma_60[i])
expts = [ e.SKA1MIDbase1, e.SKA1MIDbase2, e.SKA1MIDfull1, e.SKA1MIDfull2 ]
names = [ 'SKA1MIDbase1', 'SKA1MIDbase2', 'SKA1MIDfull1', 'SKA1MIDfull2' ]
# Output noise per voxel (interferom.)
for j in range(len(expts)):
expt = expts[j]
zs, zc = baofisher.zbins_const_dnu(expt, cosmo, dnu=60.)
dnu = expt['dnu']
expt['Sarea'] = 100.*(D2RAD)**2.
sigma_T = baofisher.noise_rms_per_voxel_interferom(zc, expt)
expt['dnu'] = 60. # 60 MHz
sigma_60 = baofisher.noise_rms_per_voxel_interferom(zc, expt)
#n_x = load_interferom_file(expt['n(x)'])
#x = u / nu # x = u / (freq [MHz])
#n_u = n_x(x) / nu**2. # n(x) = n(u) * nu^2
# Output data
print ""
print "-"*40
print names[j], "(INTERFEROMETER)"
print "-"*40
print " zc / dz / sqrt[n(u)] * sigma_T [uK] / sqrt[n(u)] * sigma_T [uK] / lambda [m] / Tsys [K]"
print " -- / -- / (%2.2f MHz) / (60 MHz)" % dnu
print "-"*40
for i in range(zc.size):
# Calculate quantities from Eq. 9.38 of Rohlfs & Wilson (5th Ed.)
l = 3e8 * (1. + zc[i]) / 1420.e6
Ddish = expt['Ddish']
Tsky = 60e3 * (300.*(1.+zc[i])/expt['nu_line'])**2.55 # Foreground sky signal (mK)
Tsys = expt['Tinst'] + Tsky
#sigma_HI = calculate_rms(zc[i], expt)
print "%2.2f %4.4f %8.8f %8.8f %4.4f %4.4f" % \
(zc[i], zs[i+1]-zs[i], 1e3*sigma_T[i], 1e3*sigma_60[i],
l, Tsys/1e3)
exit()
# Plot results
P.subplot(111)
P.plot(z, Tb*1e3, lw=1.4, label="$T_b(z)$")
P.plot(z, sigma_HI*1e3, lw=1.4, label="$\sigma_\mathrm{HI}(z)$")
P.plot(z, sigma_T*1e3, lw=1.4, label="$\sigma_T(z)$")
P.plot([0.5, 1., 1.5, 2.], [155.8, 210.9, 245.6, 260.8], 'bo') # mean Tb, from SKA RFC
P.plot([0.5, 1., 1.5, 2.], [40.1, 28.0, 20.9, 16.4], 'go') # rms Tb, from SKA RFC
P.xlabel("z")
P.ylabel("uK")
P.legend(loc='upper left')
P.show()
| 2.609375 | 3 |
analysis-master/tra_analysis/equation/parser/__init__.py | titanscout2022/red-alliance-analysis | 2 | 12792746 | # Titan Robotics Team 2022: Expression submodule
# Written by <NAME>
# Notes:
# this should be imported as a python module using 'from tra_analysis.Equation import parser'
# setup:
__version__ = "0.0.4-alpha"
__changelog__ = """changelog:
0.0.4-alpha:
- moved individual parsers to their own files
0.0.3-alpha:
- readded old regex based parser as RegexInplaceParser
0.0.2-alpha:
- wrote BNF using pyparsing and uses a BNF metasyntax
- renamed this submodule parser
0.0.1-alpha:
- took items from equation.ipynb and ported here
"""
__author__ = (
"<NAME> <<EMAIL>>",
)
__all__ = {
"BNF",
"RegexInplaceParser",
"HybridExpressionParser"
}
from .BNF import BNF as BNF
from .RegexInplaceParser import RegexInplaceParser as RegexInplaceParser
from .Hybrid import HybridExpressionParser
from .Hybrid_Utils import equation_base, Core | 1.890625 | 2 |
tirelire-auth/tests/unit/test_handlers.py | AgRenaud/tirelire | 0 | 12792747 | <reponame>AgRenaud/tirelire
from unittest import TestCase
from typing import List
from app import bootstrap
from app.domain import commands, model
from app.service_layer import handlers
from app.service_layer.unit_of_work import UnitOfWork
from app.service_layer.auth_service import AuthService
class FakeRepository:
def __init__(self, users: List[model.User]):
self._users = set(users)
self.seen = set()
def add(self, user: model.User):
self._users.add(user)
def get(self, id: str):
return next((u for u in self._users if u.id == id), None)
def get_by_email(self, email: str):
return next((u for u in self._users if u.email == email), None)
def list(self):
return self._users
class FakeAuthService:
def verify_password(self, password: str, user: model.User) -> bool:
return hash(password) == hash(user.password)
def encrypt_password(self, password: str) -> str:
return hash(password)
def generate_token(self, password: str, user: model.User) -> dict:
return password
def verify_token(self, token: str) -> bool:
return token
class FakeUnitOfWork(UnitOfWork):
def __init__(self):
self.users: AbstractUserRepository = FakeRepository([])
self.auth_service = FakeAuthService()
self.committed = False
def _commit(self):
self.committed = True
def rollback(self):
pass
def bootstrap_test_app():
return bootstrap.bootstrap(
start_orm=False,
uow=FakeUnitOfWork(),
)
class TestHandlers(TestCase):
def test_create_user_must_create_user(self):
uow = bootstrap_test_app()
command = commands.CreateUser(
"id1234",
"secure_password",
"john",
"doe",
"<EMAIL>"
)
handlers.create_user(command, uow, lambda *args: None)
self.assertIsNotNone(uow.users.get('id1234'))
def test_add_app_auth_to_user_must_return(self):
uow = bootstrap_test_app()
command = commands.CreateUser(
"id1234",
"secure_password",
"john",
"doe",
"<EMAIL>"
)
handlers.create_user(command, uow, lambda *args: None)
app_auth_1 = model.AppAuthorization(model.App.TIRELIRE_APP)
command = commands.AddAuthorizationToUser("id1234", app_auth_1)
handlers.add_app_auth_to_user(command, uow)
app_auth_2 = model.AppAuthorization(model.App.TIRELIRE_WEB)
command = commands.AddAuthorizationToUser("id1234", app_auth_2)
handlers.add_app_auth_to_user(command, uow)
user = uow.users.get('id1234')
self.assertSetEqual(user._applications_auth, {app_auth_1, app_auth_2})
def test_get_token_must_return_token(self):
uow = bootstrap_test_app()
command = commands.CreateUser(
"id1234",
"secure_password",
"john",
"doe",
"<EMAIL>"
)
handlers.create_user(command, uow, lambda *args: None)
cmd = commands.Authenticate("<EMAIL>", "secure_password")
token = handlers.get_token(cmd, uow)
# TODO: Fake token generation
def verify_token_must_return(self):
pass
| 2.578125 | 3 |
venv/Lib/site-packages/txclib/wizard.py | star10919/drf | 2 | 12792748 | import os
from slugify import slugify
from txclib import messages
from txclib import utils
from txclib.api import Api
from txclib.project import Project
from txclib.log import logger
from six.moves import input
COLOR = "CYAN"
try:
import readline
readline.set_completer_delims(' \t\n;')
readline.parse_and_bind("tab: complete")
except ImportError:
pass
def validate_source_file(path):
return os.path.isfile(os.path.abspath(path))
def validate_expression(expression):
return '<lang>' in expression
def validate_int(choice, length):
try:
choice = int(choice)
except ValueError:
return False
return 0 < choice <= length
def choice_prompt(l, key):
"""
l: A list of tuples (key, display_value) with the valid choices
key: one of 'formats', 'organizations', 'projects'
returns the key of the selected choice
"""
a = "\n".join([" {}. {}".format(i+1, f[1])
for i, f in enumerate(l)])
a = a + "\n"
print(a)
choice = ''
first_time = True
r = '1' if len(l) == 1 else '1-{}'.format(len(l))
while not validate_int(choice, len(l)):
if not first_time:
print(messages.TEXTS[key]["error"])
choice = input(utils.color_text(
messages.TEXTS[key]['message'].format(r=r), COLOR))
first_time = False
return l[int(choice) - 1][0]
def input_prompt(key, validation_method):
user_input = ''
first_time = True
while not validation_method(user_input):
if not first_time:
print(messages.TEXTS[key]['error'])
user_input = input(
utils.color_text(messages.TEXTS[key]['message'], COLOR))
first_time = False
return user_input
class Wizard(object):
def __init__(self, path_to_tx):
p = Project(path_to_tx)
self.host = p.config.get('main', 'host')
username, token_or_password = p.getset_host_credentials(
self.host, only_token=True)
self.api = Api(username=username, password=<PASSWORD>,
host=self.host, path_to_tx=p.txrc_file)
def get_organizations(self):
try:
organizations = self.api.get('organizations')
except Exception as e:
logger.error(e)
raise
# return org list sorted by name
return sorted(
[(o['slug'], o['name']) for o in organizations],
key=lambda x: x[1]
)
def get_projects_for_org(self, organization):
try:
projects = self.api.get('projects', organization=organization)
except Exception as e:
logger.error(e)
raise
# return project list sorted by name
return sorted(
[p for p in projects if not p['archived']],
key=lambda x: x['name']
)
def get_formats(self, filename):
_, extension = os.path.splitext(filename)
try:
formats = self.api.get('formats')
except Exception as e:
logger.error(e)
raise
def display_format(v):
return '{} - {}'.format(v['description'], v['file-extensions'])
formats = [(k, display_format(v)) for k, v in formats.items()
if extension in v['file-extensions']]
if not formats:
raise Exception(messages.TEXTS['formats']['empty'])
return sorted(formats, key=lambda x: x[0])
def run(self):
"""
Runs the interactive wizard for `tx set` command and populates the
parser's options with the user input. Options `local` and `execute`
are by default True when interactive wizard is run.
Returns: the options dictionary.
"""
TEXTS = messages.TEXTS
print(TEXTS['source_file']['description'])
source_file = input_prompt('source_file', validate_source_file)
print(
TEXTS['expression']['description'].format(source_file=source_file)
)
expression = input_prompt('expression', validate_expression)
formats = self.get_formats(os.path.basename(source_file))
print(TEXTS['formats']['description'])
i18n_type = choice_prompt(formats, 'formats')
organizations = self.get_organizations()
print(TEXTS['organization']['description'])
org_slug = choice_prompt(organizations, 'organization')
projects = []
first_time = True
create_project = ("tx:new_project",
"Create new project (show instructions)...")
project = None
while not project:
if not first_time:
retry_message = "Hit Enter to try selecting a project again: "
input(utils.color_text(retry_message, COLOR))
projects = self.get_projects_for_org(org_slug)
p_choices = [(p['slug'], p['name']) for p in projects]
p_choices.append(create_project)
if projects:
print(TEXTS['projects']['description'])
else:
print("We found no projects in this organization!")
first_time = False
project_slug = choice_prompt(p_choices, 'projects')
if project_slug == 'tx:new_project':
print(messages.create_project_instructions.format(
host=self.host, org=org_slug
))
else:
project = [p for p in projects
if p['slug'] == project_slug][0]
source_language = project['source_language']['code']
resource_slug = slugify(os.path.basename(source_file))
resource = '{}.{}'.format(project_slug, resource_slug)
options = {
'source_file': source_file,
'expression': expression,
'i18n_type': i18n_type,
'source_language': source_language,
'resource': resource,
}
return options
| 2.3125 | 2 |
Monopoly/Tiles.py | KGB33/Monopoly | 2 | 12792749 | <reponame>KGB33/Monopoly<gh_stars>1-10
from abc import ABC, abstractmethod
from UserEntity import Player, Bank, FreeParking
from InputValidation import get_yes_or_no_input
from random import randint
from Exceptions import TilesClassNotFoundError
class Location(ABC):
"""
Abstract Parent Class for all locations on the board
Attributes:
:location: (int)
position, (0 - 39), on the monopoly board
:name: (String)
Name of the location
:price: (int)
purchase cost of the tile
:owner: (UserEntity Object)
Current Owner of the tile
:is_mortgaged: (Boolean)
mortgage state of the tile
"""
@abstractmethod
def __init__(self, location, name, price):
"""
:param location: (int)
Location, (0 - 39) on the monopoly board
:param name: (String)
Name of the Tile
:param price: (int)
purchase cost of the tile
"""
self.location = location
self.name = name
self.price = price
self.owner = Bank
self.is_mortgaged = False
super().__init__()
def landed_on(self, player):
"""
Calls the proper function depending on
who landed on the property and who owns the property
:param player: (Player Object)
The player that landed on the tile
"""
if self.owner == Bank:
self.owned_by_bank(player)
elif self.owner != player:
self.owned_by_player(player)
def owned_by_bank(self, player):
"""
Gives the player the option to purchase the tile,
if the tile is purchased, transfers money,
updates owner, and sets is_mortgaged to False
:param player: (Player Object)
Player that landed on the tile
"""
buy_or_pass = self.ask_buy_or_pass()
if buy_or_pass: # buy
player.money = player.money - self.price
self.owner = player
player.owned_properites.update({self.location: self})
self.is_mortgaged = False
self.price = self.price * 2
def owned_by_player(self, player):
"""
Charges player rent, transfers rent between owner and player
:param player: (Player Object)
Player that landed on tile
"""
self.owner.exchange_money(player, self.rent[self.number_of_houses])
def ask_buy_or_pass(self):
"""
Asks the player if they would like to purchase the
property, displays the Name and price
:return: (Boolean)
True if the player would like to buy
False if the player would not like to buy
"""
buy_or_pass = get_yes_or_no_input(
"Would you like to buy " + self.name +
" for $" + str(self.price) + "? y/n")
return buy_or_pass
def mortgage(self):
"""
Sets is_mortgaged to True,
Gives owner mortgage value (1/2 price),
Sets price to 1/2 price,
Sets owner to Bank,
"""
self.is_mortgaged = True
self.price = self.price / 2
Bank.exchange_money(self.owner, self.price)
self.owner = Bank
def unmortgage(self, player):
"""
Sets is_mortgaged to False,
Sets price to full price
Sets owner to player
Charges Player unmortgage price
:param player: (Player Object)
Player that is unmortgageing the tile
"""
self.is_mortgaged = False
self.price = self.price * 2
self.owner = player
self.owner.exchange_money(self.owner, self.price * -.75)
def format_owner(self):
"""
Formats current owner information for __str__()
:return: (String)
Easy to read owner information
"""
if isinstance(self.owner, Bank):
owned_by = "Owner: {0}, Current Rent {1}" \
.format(self.owner, self.format_current_rent())
else:
owned_by = "Owner: {0}, Price: {1}, Morgaged: {2}" \
.format(self.owner, self.price, self.is_mortgaged)
return owned_by
def __str__(self):
"""
:return: (String)
Easy to read tile description
"""
output = "{0} {1}" \
"\n\t{2}".format(self.location, self.name, self.format_owner())
return output
class Property(Location):
"""
Defines all the Properties on the board
Does not include railroads or utilities
Attributes:
---- From Location Class ----
:location: (int)
position, (0 - 39), on the monopoly board
:name: (String)
Name of the location
:price: (int)
purchase cost of the tile
:owner: (UserEntity Object)
Current Owner of the tile
:is_mortgaged: (Boolean)
mortgage state of the tile
---- New in Property Class ----
:color: (String)
Color of the property
:rent: (1x6 array-like)
Rent tiers for the property
:number_of_houses: (int)
Number of houses on the property, 0 - 5
Zero is No houses
Five is a hotel
:cost_per_house: (int)
Price of one house
"""
def __init__(self, location, name, property_data):
"""
:param location: (Int)
position on the board, int from 0 to 39
:param property_data: (1x9 array-like)
list with various data formatted as follows
["Color", Price, rent, rent_1_house, ..., rent_hotel]
"""
self.color = property_data[0]
self.rent = property_data[2:]
self.number_of_houses = 0
self.cost_per_house = self.set_cost_per_house(location)
super().__init__(location, name, int(property_data[1]))
@staticmethod
def set_cost_per_house(location):
"""
Determines the price for one house based on the location
:param location: (int)
location on the board
:return: (int)
cost for one house
"""
if location > 30:
return 200
elif location > 20:
return 150
elif location > 10:
return 100
else:
return 50
def __str__(self):
"""
:return: (String)
Easy to read tile description
"""
rent_tiers = ''
for tier in self.rent:
rent_tiers += str(tier) + ', '
owned_by = self.format_owner()
output = "{0} {1} {2}" \
"\n\t{3}" \
"\n\tCost Per House: {4}, Number Of Houses: {5}" \
"\n\tRent Tiers {6}"\
.format(self.location, self.name, self.color, owned_by,
self.cost_per_house, self.number_of_houses, rent_tiers)
return output
def format_current_rent(self):
"""
Formats Current rent for __str__
:return: (String)
Current Rent
"""
return str(self.rent[self.number_of_houses])
class Utility(Location):
"""
Defines all utilities
i.e. Electric Company and Water Works
Attributes:
---- From Location Class ----
:location: (int)
position, (0 - 39), on the monopoly board
:name: (String)
Name of the location
:price: (int)
purchase cost of the tile
:owner: (UserEntity Object)
Current Owner of the tile
:is_mortgaged: (Boolean)
mortgage state of the tile
"""
def __init__(self, location, name, price=150):
"""
:param location: (int)
Location, (0 - 39) on the monopoly board
:param name: (String)
Name of the Tile
:param price: (Optional, int, default=150)
purchase cost of the tile
"""
super().__init__(location, name, price)
def owned_by_player(self, player):
"""
Charges player rent, transfers rent between owner and player
:param player: (Player Object)
Player that landed on tile
"""
num_utils_owned = 0
multiplier = {1: 4, 2: 10}
roll = randint(1, 6)
for key in self.owner.owned_properites:
if isinstance(self.owner.owned_properites[key], Utility):
num_utils_owned += 1
self.owner.exchange_money(player, roll * multiplier[num_utils_owned])
class Railroad(Location):
"""
Defines all 4 railroads
Attributes:
---- From Location Class ----
:location: (int)
position, (0 - 39), on the monopoly board
:name: (String)
Name of the location
:price: (int)
purchase cost of the tile
:owner: (UserEntity Object)
Current Owner of the tile
:is_mortgaged: (Boolean)
mortgage state of the tile
"""
def __init__(self, location, name, price=200):
"""
:param location: (int)
Location, (0 - 39) on the monopoly board
:param name: (String)
Name of the Tile
:param price: (Optional, int, default=200)
purchase cost of the tile
"""
super().__init__(location, name, price)
def owned_by_player(self, player):
"""
Charges player rent, transfers rent between owner and player
:param player: (Player Object)
Player that landed on tile
"""
num_railroads_owned = 0
cost = {1: 50, 2: 100, 3: 150, 4: 200}
for key in self.owner.owned_properites:
if isinstance(self.owner.owned_properites[key], Railroad):
num_railroads_owned += 1
self.owner.exchange_money(player, cost[num_railroads_owned])
class Effect(ABC):
"""
Parent class for all squares where an effect is
applied. Including Chance, Community Chest, Income tax, etc.
Attributes:
:location: (int)
position, (0 - 39), on the monopoly board
:name: (String)
Name of the location
"""
@abstractmethod
def __init__(self, location, name):
"""
:param location: (int)
Location, (0 - 39) on the monopoly board
:param name: (String)
Name of the Tile
"""
self.location = location
self.name = name
@abstractmethod
def landed_on(self, player):
pass
class Card(Effect):
"""
Parent Class for Chance and Community Chest Cards
Attributes:
---- From Effect Class ----
:location: (int)
position, (0 - 39), on the monopoly board
:name: (String)
Name of the location
---- New In Card Class ----
:active_player: (Player Object)
Player that the card will be affecting
"""
def __init__(self, location, name):
"""
:param location: (int)
Location, (0 - 39) on the monopoly board
:param name: (String)
Name of the Tile
"""
self.active_player = None
super().__init__(location, name)
def landed_on(self, player):
"""
Sets Active player to player, then calls draw_card()
:param player: (Player Object)
Player that landed on card tile
:return:
calls draw_card
"""
self.active_player = player
return self.draw_card()
def draw_card(self):
pass
# -------------Card effects --------------
def advance_to_tile(self, tile_num):
"""
Moves player to specified tile and calls that tile's landed_on method
:param tile_num: (int)
Tile the active player will be moved to
"""
# Checks if player will pass go
if self.active_player.position >= tile_num:
self.active_player.money += 200
self.active_player.position = tile_num
print("You've been moved to :", Board.spaces[tile_num],
"\n\tTile Number:", tile_num)
return Board.spaces[self.active_player.position].landed_on(self.active_player)
def advance_to_next(self, class_type):
"""
Advances active player to the next tile of specified class type
:param class_type: (Object)
class of tile to advance to
examples: Railroad, Utility, Card
"""
location_to_check = self.active_player.position + 1
passed_go = False
while not isinstance(
Board.spaces[location_to_check], class_type):
location_to_check += 1
if location_to_check > 39:
location_to_check = location_to_check % 40
passed_go = True
self.active_player.position = location_to_check
if passed_go:
self.active_player.money += 200
print("You've advanced to the next ", str(class_type),
"\n\tTile Number: ", self.active_player.position)
return Board.spaces[self.active_player.position].landed_on(self.active_player)
def gain_money(self, amount):
"""
Give player money
:param amount: (int)
Amount of money to give active player
"""
print("You've gained $", amount)
self.active_player.money += amount
def lose_money(self, amount):
"""
Takes player's money
:param amount: (int)
amount of money to take from active player
"""
print("You've lost $", amount)
Board.spaces[20].exchange_money(self.active_player, amount)
def get_out_of_jail_free(self):
"""
Gives player a get out of jail free card
"""
print("You got a get out of jail free card",
"\n\t you now have ", self.active_player.get_out_of_jail_cards)
self.active_player.get_out_of_jail_cards += 1
def go_back(self, num_tiles):
"""
Moves player back specified number of spaces and calls that tiles landed_on method.
:param num_tiles: (int)
Number of tiles to be moved back
"""
self.active_player.position -= num_tiles
print("You've been sent back ", num_tiles, "tiles.",
"\nYou're now on tile number: ", self.active_player.position)
return Board.spaces[self.active_player.position].landed_on()
def go_to_jail(self):
"""
Sends the player to jail, player does not pass go and does not collect $200
"""
print("Oh No! you've been sent to jail!!")
self.active_player.position = 'jail'
def house_repairs(self):
"""
Charges player house repairs
"""
owed_money = 0
for key in Board.spaces:
try:
if Board.spaces[key].owner == self.active_player:
hold = Board.spaces[key].number_of_houses
owed_money += 25 * hold
except AttributeError:
# Corner Tiles have no attribute owner, skipped
pass
print("House repairs are expensive!")
if owed_money == 0:
print("Lucky for you, you have no houses")
else:
print("You paid: $", owed_money)
Board.spaces[20].exchange_money(self.active_player, owed_money)
def pay_all_other_players(self, amount):
"""
Active player pays all other players specified amount
:param amount: (int)
amount to pay other players
"""
# TODO: implement pay all other players
print("Lucky for {} I don't know how to make you pay everyone else... yet".format(self.active_player))
def get_money_from_all_other_players(self, amount):
"""
Active player gets money from all other players
:param amount: (int)
amount gotten from other players
"""
amount = amount * -1
self.pay_all_other_players(amount)
def __str__(self):
"""
:return: (String)
Easy to read tile description
"""
output = "{0} {1}".format(self.location, self.name)
return output
class Chance(Card):
"""
All Chance Cards
Attributes:
---- From Card Class ----
:location: (int)
position, (0 - 39), on the monopoly board
:name: (String)
Name of the location
:active_player: (Player Object)
Player that the card will be affecting
"""
def __init__(self, location, name):
"""
:param location: (int)
Location, (0 - 39) on the monopoly board
:param name: (String)
Name of the Tile
"""
super().__init__(location, name)
def draw_card(self):
"""
Chooses a random random card and calls the appropriate method
"""
key = randint(0, 16)
if key == 0:
return self.advance_to_tile(0)
elif key == 1:
return self.advance_to_tile(24)
elif key == 2:
return self.advance_to_tile(11)
elif key == 3:
return self.advance_to_next(Utility)
elif key == 4:
return self.advance_to_next(Railroad)
elif key == 5:
return self.advance_to_next(Railroad)
elif key == 6:
return self.gain_money(50)
elif key == 7:
self.get_out_of_jail_free()
elif key == 8:
return self.go_back(3)
elif key == 9:
return self.go_to_jail()
elif key == 10:
return self.house_repairs()
elif key == 11:
return self.lose_money(15)
elif key == 12:
return self.advance_to_tile(5)
elif key == 13:
return self.advance_to_tile(39)
elif key == 14:
return self.pay_all_other_players(50)
elif key == 15:
return self.gain_money(150)
elif key == 16:
return self.gain_money(100)
else:
return print("Bad Chance Card Draw")
class CommunityChest(Card):
"""
All Community Chest Cards
Attributes:
---- From Card Class ----
:location: (int)
position, (0 - 39), on the monopoly board
:name: (String)
Name of the location
:active_player: (Player Object)
Player that the card will be affecting
"""
def __init__(self, location, name):
"""
:param location: (int)
Location, (0 - 39) on the monopoly board
:param name: (String)
Name of the Tile
"""
super().__init__(location, name)
def draw_card(self):
"""
Chooses a random random card and calls the appropriate method
"""
key = randint(0, 16)
if key == 0:
return self.advance_to_tile(0)
elif key == 1:
return self.gain_money(200)
elif key == 2:
return self.lose_money(50)
elif key == 3:
return self.gain_money(50)
elif key == 4:
return self.get_out_of_jail_free()
elif key == 5:
return self.go_to_jail()
elif key == 6:
return self.get_money_from_all_other_players(50)
elif key == 7:
return self.gain_money(100)
elif key == 8:
return self.gain_money(20)
elif key == 9:
return self.get_money_from_all_other_players(10)
elif key == 10:
return self.gain_money(100)
elif key == 11:
return self.lose_money(50)
elif key == 12:
return self.lose_money(150)
elif key == 13:
return self.gain_money(25)
elif key == 14:
return self.house_repairs()
elif key == 15:
return self.gain_money(10)
elif key == 16:
return self.gain_money(100)
else:
print("bad CC draw")
class Board(object):
"""
The Monopoly Board
Attributes:
:spaces: (Dict)
A dictionary where the key is the location of a tile and the content is the property
"""
spaces = {}
@classmethod
def default_board(cls):
"""
Builds a default board for testing
"""
cls.spaces = {}
streets = {x: Property(x, "Name", ["Color", 150, 5, 10, 20, 40, 80, 160])
for x in range(0, 40)}
railroads = {x: Railroad(x, "Name") for x in [5, 15, 25, 35]}
utilities = {x: Utility(x, "Name") for x in [12, 28]}
chances = {x: Chance(x, "Chance Card") for x in [7, 22, 36]}
community_chest = {x: CommunityChest(x, "Community Chest Card")
for x in [2, 17, 33]}
free_parking = {20: FreeParking()}
cls.spaces.update(streets)
cls.spaces.update(railroads)
cls.spaces.update(utilities)
cls.spaces.update(chances)
cls.spaces.update(community_chest)
cls.spaces.update(free_parking)
@classmethod
def read_in_board(cls):
"""
read in a board from file. Each line in the file should be formatted as follows:
Square# ClassType class data
"""
loop_value = True
while loop_value:
try:
if get_yes_or_no_input('Would You Like To Use The Standard Board?'):
file_name = 'StandardBoard'
else:
file_name = input("Please enter the file Name: ")
with open('boards/' + file_name) as file:
for line in file:
if not line.startswith('#'):
data = line.split()
new_tile = TileFactory.create_tile(data)
cls.spaces.update(new_tile)
loop_value = False
except FileNotFoundError:
if file_name == "Q":
quit()
print("File Not found, please try again.\n\tOr Enter Q to quit\n")
@classmethod
def __str__(cls):
"""
:return: (String)
Formatted __str__ method for all objects in spaces
"""
output = ''
for key in cls.spaces:
output = output + "\n" + cls.spaces[key].__str__()
return output
# construct the default board for testing
Board.default_board()
class TileFactory:
"""
Creates all possible different tiles, used with read_in_board in Board
"""
@staticmethod
def create_tile(data):
"""
Creates a tile based on the data provided
:param data:
Data read in from a file
:return:
A tile to be added to the board
"""
while True:
try:
if data is not None:
position = int(data[0])
class_type = data[1]
name = data[2]
try:
data = [int(x) for x in data[3:]]
except ValueError:
last_part_data = [int(x) for x in data[4:]]
data = [data[3], ] + last_part_data
if class_type == "Property":
return {position: Property(position, name, data)}
elif class_type == "Utility":
return {position: Utility(position, name)}
elif class_type == "Railroad":
return {position: Railroad(position, name)}
elif class_type == "Chance":
return {position: Chance(position, name)}
elif class_type == "CommunityChest":
return {position: CommunityChest(position, name)}
elif class_type == "SetTax":
return {position: SetTax(position, name, data[0])}
elif class_type == "PercentTax":
return {position: PercentTax(position, name, data[0])}
elif class_type == "FreeParking":
return {position: FreeParking(position)}
elif class_type == "Go":
return {0: Go()}
elif class_type == 'JustVisiting':
return {10: JustVisiting()}
elif class_type == 'GoToJail':
return {30: GoToJail()}
elif class_type == 'jail':
return {'jail': Jail()}
else:
raise TilesClassNotFoundError
except TilesClassNotFoundError:
print("\n\nError!!\n\tClass Type: ", data[1], " Not Found!")
break
except IndexError:
data = None
class SetTax(Effect):
"""
Charges player a set tax amount, is not dependant on the player's wealth
Attributes:
---- From Effect Class ----
:location: (int)
position, (0 - 39), on the monopoly board
:name: (String)
Name of the location
---- New in SetTax Class ----
:amount:
Amount to tax the player
"""
def __init__(self, location, name, amount):
"""
:param location: (int)
Location, (0 - 39) on the monopoly board
:param name: (String)
Name of the Tile
:param amount: (int)
amount to tax the player
"""
self.amount = int(amount)
super().__init__(location, name)
def landed_on(self, player):
"""
Takes amount from player and adds it to Free Parking
:param player: (Player Object)
Player that landed on tile
"""
Board.spaces[20].exchange_money(player, self.amount)
def __str__(self):
"""
:return: (String)
Easy to read tile description
"""
output = "{0} {1}" \
"\n\tTax Amount: ${2}"\
.format(self.location, self.name, self.amount)
return output
class PercentTax(Effect):
"""
Charges player a set tax amount, is not dependant on the player's wealth
Attributes:
---- From Effect Class ----
:location: (int)
position, (0 - 39), on the monopoly board
:name: (String)
Name of the location
---- New in PercentTax Class ----
:percent:
percent to tax the player
"""
def __init__(self, location, name, percent):
"""
:param location: (int)
Location, (0 - 39) on the monopoly board
:param name: (String)
Name of the Tile
:param percent: (float or String)
percent to tax the player
"""
self.percent = float(percent)
super().__init__(location, name)
def landed_on(self, player):
"""
Charges player percent of their total wealth and gives it to free parking
:param player: (Player Object)
Player that landed on Percent Tax
"""
Board.spaces[20].exchange_money(player, player.money * self.percent)
def __str__(self):
"""
:return: (String)
Easy to read tile description
"""
output = "{0} {1}" \
"\n\tTax percent: {2}%"\
.format(self.location, self.name, self.percent)
class CornerTile(ABC):
"""
Parent Class For Each of the corner tiles
Excluding Free Parking.
Attributes:
:location: (int)
position, (0 - 39), on the monopoly board
:name: (String)
Name of the location
"""
@abstractmethod
def __init__(self, location, name):
"""
:param location: (int)
Location, (0 - 39) on the monopoly board
:param name: (String)
Name of the Tile
"""
self.location = location
self.name = name
@abstractmethod
def landed_on(self, player):
pass
def __str__(self):
"""
:return: (String)
Description of the tile
"""
output = "{0} {1}".format(self.location, self.name)
return output
class Go(CornerTile):
"""
Models GO Tile
Attributes:
---- From CornerTile Class ----
:location: (int)
position, (0 - 39), on the monopoly board
:name: (String)
Name of the location
"""
def __init__(self, location=0, name='GO'):
"""
:param location: (int)
Location, (0 - 39) on the monopoly board
:param name: (Optional, String, default=GO)
Name of the Tile
"""
super().__init__(location, name)
def landed_on(self, player):
print("Landed on Go!")
class JustVisiting(CornerTile):
"""
Models Just Visiting (jail) tile
Attributes:
---- From CornerTile Class ----
:location: (int)
position, (0 - 39), on the monopoly board
:name: (String)
Name of the location
"""
def __init__(self, location=10, name="JustVisiting"):
"""
:param location: (int)
Location, (0 - 39) on the monopoly board
:param name: (Optional, String, default=JustVisiting)
Name of the Tile
"""
super().__init__(location, name)
def landed_on(self, player):
# TODO: find a way to print out what players are in jail
print("Just Visiting Jail")
class GoToJail(CornerTile):
"""
Class that sends people to jail
"""
def __init__(self, location=30, name='Go To Jail'):
super().__init__(location, name)
def landed_on(self, player):
player.position = 'jail'
print("Go To Jail!!!")
class Jail(CornerTile):
def __init__(self, location='jail', name='jail'):
super().__init__(location, name)
def landed_on(self, player):
pass
def __str__(self):
return "This is the jail"
| 3.6875 | 4 |
cli.py | GIScience/hot-tm-critical-numbers | 1 | 12792750 | <gh_stars>1-10
import click
import json
from critical_numbers import app
from critical_numbers.logic import api_requests, converter
@click.group()
def cli():
pass
@cli.command('serve')
def serve():
'''serves webapp to 127.0.0.1:5000'''
app.run()
@cli.command('getall')
def getall():
'''gets all projects from the HOT Tasking Manager as GeoJSON'''
hot_tm_projects = api_requests.get_stats()
hot_tm_projects = converter.convert_to_geojson(hot_tm_projects)
with open('hot-tm-projects.geojson', 'w') as f:
json.dump(hot_tm_projects, f)
click.echo('GeoJSON of all hot-tm projects succsesfully written to "hot-tm-projects.geojson"')
if __name__ == '__main__':
cli()
| 2.4375 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.