ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a399839fe05ff11e04acf910808c59f29451c1f | """add project settings
Revision ID: 1bb8cb3abf60
Revises: 393a0cce62c7
Create Date: 2018-08-22 15:20:47.132129
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '1bb8cb3abf60'
down_revision = '393a0cce62c7'
branch_labels = None
depends_on = None
def upgrade():
op.create_table('mm_project_settings',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(), nullable=False),
sa.Column('project', sa.String(), nullable=False),
sa.Column('settings', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('mm_project_settings_user',
sa.Column('mm_project_settings_id', sa.Integer(), nullable=True),
sa.Column('mb_user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['mb_user_id'], ['mb_user.mb_user_id'], ),
sa.ForeignKeyConstraint(['mm_project_settings_id'], ['mm_project_settings.id'], )
)
def downgrade():
op.drop_table('mm_project_settings_user')
op.drop_table('mm_project_settings')
|
py | 1a399a4ad3524e7e45e318b709cc5412e966777a | # coding: utf-8
"""
Finnhub API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import finnhub
from finnhub.models.covid19 import Covid19 # noqa: E501
from finnhub.rest import ApiException
class TestCovid19(unittest.TestCase):
"""Covid19 unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test Covid19
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = finnhub.models.covid19.Covid19() # noqa: E501
if include_optional :
return Covid19(
state = '0',
case = 1.337,
death = 1.337,
updated = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f')
)
else :
return Covid19(
)
def testCovid19(self):
"""Test Covid19"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
|
py | 1a399bb582a06f13c39dca44fe86296fea52690d | from __future__ import absolute_import, division, print_function
import os,sys
from iotbx import pdb
from iotbx import reflection_file_reader
from iotbx import file_reader
from mmtbx.refinement.real_space import individual_sites
import mmtbx
import libtbx.phil.command_line
from six.moves import range
master_phil = libtbx.phil.parse("""
flip_base {
pdb_file = None
.type = path
.help = '''input PDB file'''
reflection_file = None
.type = path
.help = '''Reflection file'''
out_pdb_file = None
.type = str
.help = '''input PDB file'''
chain = None
.type = str
.help = '''Chain of the residue that is to be flipped'''
alt_loc = None
.type = str
.help = '''Alternate location of the residue that is to be flipped'''
res_num = None
.type = int
.help = '''Residue number of the residue that is to be flipped'''
n_refine_cycles = 3
.type = int
.help = '''Number of real-space refinement cycles'''
help = False
.type = bool
.help = '''Show help message'''
}
""", process_includes=True)
def usage(msg='', log=sys.stderr):
s = '''
******************************************************************************
Usage :
python.phenix flipbase.py xxxx.mtz yyyy.pdb chain=A res_num=1
Will flip base of chain A residue 1 of yyyy.pdb and do a real-space
refinement using xxxx.mtz.
Required :
pdb_file input PDB file
reflection_file Reflection file
chain Chain of the residue that is to be flipped
res_num Residue number of the residue that is to be flipped
Options :
out_pdb_file input PDB file
alt_loc Alternate location of the residue that is to be flipped
n_refine_cycles Number of real-space refinement cycles
help Show help message
******************************************************************************
'''
if msg != '' :
s = '*'*79 + '\n\n!!!!! %s !!!!!\n' % msg + s
print(s);sys.exit()
base_rotation_axes = {
"A" : ["C1'", "N9"],
"G" : ["C1'", "N9"],
"C" : ["C1'", "N1"],
"T" : ["C1'", "N1"],
"U" : ["C1'", "N1"],
}
base_rotatable_atoms = {
"A" : ["N1", "C2", "H2", "N3", "C4", "C5", "C6", "N6", "H61", "H62", "N7",
"C8", "H8"],
"G" : ["N1", "H1", "C2", "N2", "H21", "H22", "N3", "C4", "C5", "C6", "O6",
"N7", "C8", "H8"],
"C" : ["C2", "O2", "N3", "C4", "N4", "H41", "H42", "C5", "H5", "C6", "H6"],
"T" : ["C2", "O2", "N3", "H3", "C4", "O4", "C5", "C7", "H71", "H72", "H73",
"C6", "H6"],
"U" : ["C2", "O2", "N3", "H3", "C4", "O4", "C5", "H5", "C6", "H6"],
}
def flip_base(atom_group, angle=180):
import scitbx.matrix
axis_point_1 = axis_point_2 = None
rotateable_atoms = []
base_name = atom_group.resname.strip()
if ("r" in base_name):
base_name = base_name.replace("r")
elif (base_name.startswith("D") and len(base_name) == 2):
base_name = base_name[1]
assert base_name in base_rotation_axes, base_name
for atom in atom_group.atoms():
atom_name = atom.name.strip()
if (atom_name == base_rotation_axes[base_name][0]):
axis_point_1 = atom.xyz
elif (atom_name == base_rotation_axes[base_name][1]):
axis_point_2 = atom.xyz
elif (atom_name in base_rotatable_atoms[base_name]):
rotateable_atoms.append(atom)
if (None in [axis_point_1, axis_point_2]):
raise RuntimeError("Missing atom(s) for rotateable axis.")
elif (len(rotateable_atoms) == 0):
raise RuntimeError("Missing nucleotide base.")
for atom in rotateable_atoms :
atom.xyz = scitbx.matrix.rotate_point_around_axis(
axis_point_1=axis_point_1,
axis_point_2=axis_point_2,
point=atom.xyz,
angle=angle,
deg=True)
def get_target_map(reflection_file_name, log=sys.stderr):
miller_arrays = reflection_file_reader.any_reflection_file(file_name =
reflection_file_name).as_miller_arrays()
ma = miller_arrays[0]
fft_map = ma.fft_map(resolution_factor=0.25)
fft_map.apply_sigma_scaling()
print("\nUsing sigma scaled map.\n", file=log)
target_map = fft_map.real_map_unpadded()
return target_map
def flip_and_refine(pdb_hierarchy,
xray_structure,
target_map,
geometry_restraints_manager,
chain,
res_num,
alt_loc = None,
n_refine_cycles = 3,
log = sys.stdout):
sites_cart = xray_structure.sites_cart()
ero = False
for ch in pdb_hierarchy.chains():
if ch.id.strip() != chain : continue
for rg in ch.residue_groups():
if rg.resseq_as_int() != res_num : continue
if rg.have_conformers() and not alt_loc :
s = 'Specified residue has alternate conformations. Please specify '
raise RuntimeError(s + 'alt_loc on the command line')
for residue in rg.atom_groups():
if alt_loc and alt_loc != residue.altloc.strip():
continue
flip_base(residue, angle=180)
sites_cart.set_selected(residue.atoms().extract_i_seq(),
residue.atoms().extract_xyz())
xray_structure = xray_structure.replace_sites_cart(sites_cart)
sele = residue.atoms().extract_i_seq()
print('real-space refinement BEGIN'.center(79,'*'), file=log)
for i in range(n_refine_cycles):
print('real-space refinement cycle %i...' % (i + 1), file=log)
ero = individual_sites.easy(
map_data = target_map,
xray_structure = xray_structure,
pdb_hierarchy = pdb_hierarchy,
geometry_restraints_manager = geometry_restraints_manager,
selection = sele)
print('real-space refinement FINISHED'.center(79,'*'), file=log)
if not ero : raise RuntimeError('Specified residue not found')
return ero.pdb_hierarchy
def run(args):
# phil parsing----------------------------------------------------------
interpreter = libtbx.phil.command_line.argument_interpreter(master_phil=master_phil)
sources = []
for arg in args:
if os.path.isfile(arg): #Handles loose filenames
input_file = file_reader.any_file(arg)
if (input_file.file_type == "pdb"):
sources.append(interpreter.process(arg="pdb_file=\"%s\"" % arg))
if (input_file.file_type == "hkl"):
sources.append(interpreter.process(arg="reflection_file=\"%s\"" % arg))
elif (input_file.file_type == "phil"):
sources.append(input_file.file_object)
else: #Handles arguments with xxx=yyy formatting
arg_phil = interpreter.process(arg=arg)
sources.append(arg_phil)
work_phil = master_phil.fetch(sources=sources)
work_params = work_phil.extract()
params = work_params.flip_base
if work_params.flip_base.pdb_file == None :
usage('PDB file not provided!')
if work_params.flip_base.reflection_file == None :
usage('Reflection file not provided!')
if work_params.flip_base.chain == None :
usage('chain not provided!')
if work_params.flip_base.res_num == None :
usage('res_num file not provided!')
if work_params.flip_base.out_pdb_file == None :
fn = work_params.flip_base.pdb_file.replace('.pdb','_baseflip.pdb')
work_params.flip_base.out_pdb_file = fn
#usage('out_pdb_file file not provided!')
params = work_params.flip_base
if params.help:
usage()
sys.exit()
# end phil parsing ------------------------------------------------------
pdb_file_name = params.pdb_file
reflection_file_name = params.reflection_file
log = sys.stdout
print('\ngettinsg target_map...\n', file=log)
target_map = get_target_map(reflection_file_name, log)
ppf = mmtbx.utils.process_pdb_file_srv(log=False).process_pdb_files(
[pdb_file_name])[0]
grm = mmtbx.restraints.manager(
geometry = ppf.geometry_restraints_manager(show_energies = False),
normalization = True)
pdb_hierarchy = ppf.all_chain_proxies.pdb_hierarchy
pdb_hierarchy.atoms().reset_i_seq()
xray_structure = ppf.xray_structure(show_summary = False)
flip_hierarchy = flip_and_refine(pdb_hierarchy,
xray_structure,
target_map = target_map,
geometry_restraints_manager = grm,
chain = params.chain,
res_num = params.res_num,
alt_loc = params.alt_loc,
n_refine_cycles = params.n_refine_cycles,
log= log)
flip_hierarchy.write_pdb_file(params.out_pdb_file)
print('\nOut written to %s' % params.out_pdb_file, file=log)
if __name__ == "__main__":
run(sys.argv[1:])
|
py | 1a399c0a6392579acedb4af6326f0a0a5c1543d8 | # Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
""" Constants for the Message Exit Handler """
SUCCESS_STATUS = "SUCCEEDED"
FAILURE_STATUS = "FAILED"
|
py | 1a399d3e1149b9027a3fcba005ff5fc114ef28c7 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import sys
from google.api import http_pb2
from google.iam.v1 import iam_policy_pb2
from google.iam.v1 import policy_pb2
from google.protobuf import any_pb2
from google.protobuf import descriptor_pb2
from google.protobuf import duration_pb2
from google.protobuf import empty_pb2
from google.protobuf import field_mask_pb2
from google.protobuf import timestamp_pb2
from google.rpc import status_pb2
from google.api_core.protobuf_helpers import get_messages
from google.cloud.tasks_v2beta2.proto import cloudtasks_pb2
from google.cloud.tasks_v2beta2.proto import queue_pb2
from google.cloud.tasks_v2beta2.proto import target_pb2
from google.cloud.tasks_v2beta2.proto import task_pb2
_shared_modules = [
http_pb2,
iam_policy_pb2,
policy_pb2,
any_pb2,
descriptor_pb2,
duration_pb2,
empty_pb2,
field_mask_pb2,
timestamp_pb2,
status_pb2,
]
_local_modules = [
cloudtasks_pb2,
queue_pb2,
target_pb2,
task_pb2,
]
names = []
for module in _shared_modules:
for name, message in get_messages(module).items():
setattr(sys.modules[__name__], name, message)
names.append(name)
for module in _local_modules:
for name, message in get_messages(module).items():
message.__module__ = 'google.cloud.tasks_v2beta2.types'
setattr(sys.modules[__name__], name, message)
names.append(name)
__all__ = tuple(sorted(names))
|
py | 1a399e649ef92790264114056994e9f26e440dbe | import os
import sys
sys.path.insert(0, os.getcwd())
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from torch.nn import Linear
import torch.optim as optim
import networkx as nx
from torch_geometric.data import InMemoryDataset, Data
from torch_geometric.nn import GCNConv
import argparse
import mlflow
from fedgraphconv.prep_mhealth import prep_mhealth
from fedgraphconv.prep_wisdm import prep_wisdm
from fedgraphconv.data_utils import HARData, HARDataCentral
from fedgraphconv.models import GCN_mhealth, GCN_mhealth_Attn, GCN_wisdm, GCN_wisdm_Attn
from fedgraphconv.fed_utils import average_weights
import time
import tqdm
import random
import copy
import datetime as dttm
since = dttm.datetime.now()
since_str = dttm.datetime.strftime(since, '%d-%m-%y %H:%M:%S')
parser = argparse.ArgumentParser()
parser.add_argument('--data',
default= 'wisdm',
help = 'Dataset to use')
parser.add_argument('--num_sample',
default= 32,
type= int,
help = 'Number of samples in each window')
parser.add_argument('--dist_thresh',
default= 0.3,
type = float,
help = 'Minimum euclidean distance to draw an edge')
parser.add_argument('--train_prop',
default= 0.7,
type = float,
help = 'Proportion of data to include in training.')
parser.add_argument('--local_epochs',
default= 10,
type = int,
help = 'Number of local epochs to run')
parser.add_argument('--batch_size',
default= 4,
type = int,
help = 'Batch size in each iteration')
parser.add_argument('--lr',
default= 0.01,
type = float,
help = 'Learning rate')
parser.add_argument('--num_rounds',
default= 10,
type = int,
help = 'Number of federated rounds')
parser.add_argument('--fl_sample',
default= 0.4,
type = float,
help = 'Proportion of agents that participate in each federation round')
parser.add_argument('--attention',
default=False,
help = 'Use graph attention instead of convolution.')
def train(data, criterion):
model.train()
optimizer.zero_grad()
out = model(data.x, data.edge_index)
y = data.y.squeeze().t() - 1
loss = criterion(out[data.train_mask], y[data.train_mask] )
loss.backward()
optimizer.step()
return loss
def evaluate(data):
global_model.eval()
y = data.y.squeeze().t() - 1
with torch.no_grad():
out = global_model(data.x, data.edge_index)
accuracy = torch.mean((torch.argmax(out[~data.train_mask] , 1) == y[~data.train_mask]).float())
return accuracy
if __name__ == '__main__':
args = parser.parse_args()
if args.attention:
mlflow.set_experiment('gnn_federated_attention')
else:
print("setting attention to false")
mlflow.set_experiment('gnn_federated_1')
DATADIR = 'data/processed'
if args.data == 'mhealth':
prep_mhealth(args.num_sample, args.dist_thresh, args.train_prop)
num_class = 12
input_dim = 23
DATADIR = 'data/processed/mhealth'
if args.attention:
global_model = GCN_mhealth_Attn(input_dim, num_class)
else:
global_model = GCN_mhealth(input_dim, num_class)
elif args.data == 'wisdm':
prep_wisdm(args.num_sample, args.dist_thresh, args.train_prop)
num_class = 6
input_dim = 9
DATADIR = 'data/processed/wisdm'
if args.attention:
global_model = GCN_wisdm_Attn(input_dim, num_class)
else:
global_model = GCN_wisdm(input_dim, num_class)
mlflow.set_tag('dataset', args.data)
FL_AGENTS = os.listdir(DATADIR)
NUM_ROUNDS = args.num_rounds
FL_SAMPLE = args.fl_sample
EPOCHS = args.local_epochs
mlflow.log_params({
'num_sample': args.num_sample,
'dist_thresh': args.dist_thresh,
'train_prop' : args.train_prop,
'local_epochs' : EPOCHS,
'lr': args.lr,
'num_rounds': NUM_ROUNDS,
'fl_sample': FL_SAMPLE
})
excel = []
for each_round in tqdm.tqdm(range(NUM_ROUNDS)):
agents_to_train = random.sample(FL_AGENTS, k= int(FL_SAMPLE * len(FL_AGENTS)))
model_list = []
metrics = {}
_n = 0
_a = 0
for each_agent in agents_to_train:
# read the data.
dataset = HARData(os.path.join(DATADIR, str(each_agent)))[0]
loss = nn.CrossEntropyLoss()
model = copy.deepcopy(global_model)
optimizer = optim.Adam(model.parameters(), args.lr)
for epoch in range(EPOCHS):
loss_ = train(dataset, loss)
model_list.append(model.state_dict())
# average weight at end of round.
avg_weights = average_weights(model_list)
global_model.load_state_dict(avg_weights)
# get accuracy at end of round.
dataset = HARDataCentral(DATADIR)
i = 1
for each_data in dataset:
accuracy = evaluate(each_data) # evaluate the global model on each data.
metrics['accuracy-agent_{0}'.format(i)]= accuracy.item()
_n += each_data.x[~each_data.train_mask].size()[0]
_a += each_data.x[~each_data.train_mask].size()[0] * accuracy.item()
i+=1
metrics['accuracy'] = _a / _n
mlflow.log_metrics(metrics, step = each_round)
now = dttm.datetime.now()
excel.append((epoch, since_str, _a / _n, now.strftime('%y-%m-%d %H:%M:%S'), (now-since).total_seconds()))
df = pd.DataFrame(excel)
df.columns =['epoch', 'time_start', 'accuracy', 'log_time', 'time_elapsed']
df.to_csv('logs_{0}_gnn_federated.csv'.format(args.data), index= None)
|
py | 1a399eb4d370b334a7e6af8e3ae61256feadc926 | import sys
sys.path.insert(0, '/home/paul/.conda/envs/tensorflow/lib/python3.6/site-packages')
import keras.preprocessing.image
import deepometry.image.iterator_balanced, deepometry.image.iterator
class ImageDataGenerator(keras.preprocessing.image.ImageDataGenerator):
def __init__(self,
height_shift_range=0.0,
horizontal_flip=False,
preprocessing_function=None,
rotation_range=0.0,
vertical_flip=False,
width_shift_range=0.0):
super(ImageDataGenerator, self).__init__(
height_shift_range=height_shift_range,
horizontal_flip=horizontal_flip,
preprocessing_function=preprocessing_function,
rotation_range=rotation_range,
vertical_flip=vertical_flip,
width_shift_range=width_shift_range
)
def flow(self, x,
y=None,
batch_size=32,
shuffle=True,
seed=None,
save_to_dir=None,
save_prefix="",
save_format="tif",
balance=True,
mixup_alpha=0.0):
if balance:
return deepometry.image.iterator_balanced.NumpyArrayIterator(
x, y, self,
batch_size=batch_size,
shuffle=shuffle,
seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
mixup_alpha=mixup_alpha
)
else:
return deepometry.image.iterator.NumpyArrayIterator(
x, y, self,
batch_size=batch_size,
shuffle=shuffle,
seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
mixup_alpha=mixup_alpha
)
def flow_from_directory(self, directory,
target_size=(48, 48),
color_mode="rgb",
classes=None,
class_mode="categorical",
batch_size=32,
shuffle=True,
seed=None,
save_to_dir=None,
save_prefix="",
save_format="tif",
follow_links=False):
raise NotImplementedError()
|
py | 1a399ee80ad7f4be42bf7dc35ee5b5f2be171b3a | from django.contrib import admin
from django.conf.urls.i18n import i18n_patterns
from django.urls import include, path
from django.conf import settings
from django.conf.urls.static import static
from myproject.apps.categories1 import views as categories1_views
urlpatterns = i18n_patterns(
path("admin/", admin.site.urls),
path("accounts/", include("django.contrib.auth.urls")),
path(
"idea-categories1/",
categories1_views.IdeaCategoryList.as_view(),
name="idea_categories1",
),
path("ideas1/", include(("myproject.apps.ideas1.urls", "ideas1"), namespace="ideas1")),
)
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static("/media/", document_root=settings.MEDIA_ROOT)
|
py | 1a399fa994dd9182aa62282db60660952db54ed6 | # Copyright 2019-2020 ETH Zurich and the DaCe authors. All rights reserved.
import os
import multiprocessing as mp
from simple_systolic_array import P, N, make_sdfg
from dace.config import Config
import dace.dtypes
import numpy as np
def run_test(do_async):
Config.set("compiler", "intel_fpga", "launch_async", value=do_async)
name = "async_test"
sdfg = make_sdfg(name)
sdfg.specialize({"P": P.get(), "N": N.get()})
# We don't care about the result, as long as it compiles and runs
sdfg(A=A)
if __name__ == "__main__":
N.set(128)
P.set(4)
Config.set("compiler", "fpga_vendor", value="intel_fpga")
Config.set("compiler", "intel_fpga", "mode", value="emulator")
A = np.empty((N.get()), dtype=np.int32)
for v in [False, True]:
# Has to be a separate process, as the Intel FPGA runtime cannot be
# initialized twice in the same executable
p = mp.Process(target=run_test, args=(v,))
p.start()
p.join()
|
py | 1a39a084c67d5bb82ed261606b0827469a216ceb | # Import statements
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
import pandas as pd
import numpy as np
# Read the data.
data = np.asarray(pd.read_csv('data.csv', header=None))
# Assign the features to the variable X, and the labels to the variable y.
X = data[:,0:2]
y = data[:,2]
# TODO: Create the model and assign it to the variable model.
# Find the right parameters for this model to achieve 100% accuracy on the dataset.
model = SVC(kernel='rbf', gamma=27)
# TODO: Fit the model.
model.fit(X,y)
# TODO: Make predictions. Store them in the variable y_pred.
y_pred = model.predict(X)
# TODO: Calculate the accuracy and assign it to the variable acc.
acc = accuracy_score(y, y_pred) |
py | 1a39a0ce271f850c28c3cfdc3d1a4cb3444be209 | from com.bridgelabz.utility.queue import Queue
class Banking:
def run(self):
queue=Queue()
money=0
char ='y'
while(char!='n'):
choice=input("select e for enqueue and d for dequeue")
if(choice=='e'):
name=input("enter name")
queue.enqueue(name)
select=input("enter w for withdraw and d for deposit")
if(select=='w'):
amount=int(input("withdraw amount"))
if(money<amount):
print("insufficient funds")
else:
print("funds added")
money=money+amount
else:
amount = int(input("deposit amount"))
amount=amount+money
else:
queue.dequeue()
char=input("do you want to continue")
queue.display()
return
Banking().run()
|
py | 1a39a181afe275439fd9c485c176ce0f3de8b9ec | # copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Modifications copyright (c) 2021 DocYard Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import cv2
import numpy as np
__all__ = ["SASTProcessTrain"]
class SASTProcessTrain(object):
def __init__(
self,
image_shape=[512, 512],
min_crop_size=24,
min_crop_side_ratio=0.3,
min_text_size=10,
max_text_size=512,
**kwargs
):
self.input_size = image_shape[1]
self.min_crop_size = min_crop_size
self.min_crop_side_ratio = min_crop_side_ratio
self.min_text_size = min_text_size
self.max_text_size = max_text_size
def quad_area(self, poly):
"""
compute area of a polygon
:param poly:
:return:
"""
edge = [
(poly[1][0] - poly[0][0]) * (poly[1][1] + poly[0][1]),
(poly[2][0] - poly[1][0]) * (poly[2][1] + poly[1][1]),
(poly[3][0] - poly[2][0]) * (poly[3][1] + poly[2][1]),
(poly[0][0] - poly[3][0]) * (poly[0][1] + poly[3][1]),
]
return np.sum(edge) / 2.0
def gen_quad_from_poly(self, poly):
"""
Generate min area quad from poly.
"""
point_num = poly.shape[0]
min_area_quad = np.zeros((4, 2), dtype=np.float32)
if True:
rect = cv2.minAreaRect(
poly.astype(np.int32)
) # (center (x,y), (width, height), angle of rotation)
rect[0]
box = np.array(cv2.boxPoints(rect))
first_point_idx = 0
min_dist = 1e4
for i in range(4):
dist = (
np.linalg.norm(box[(i + 0) % 4] - poly[0])
+ np.linalg.norm(
box[(i + 1) % 4] - poly[point_num // 2 - 1]
)
+ np.linalg.norm(box[(i + 2) % 4] - poly[point_num // 2])
+ np.linalg.norm(box[(i + 3) % 4] - poly[-1])
)
if dist < min_dist:
min_dist = dist
first_point_idx = i
for i in range(4):
min_area_quad[i] = box[(first_point_idx + i) % 4]
return min_area_quad
def check_and_validate_polys(self, polys, tags, xxx_todo_changeme):
"""
check so that the text poly is in the same direction,
and also filter some invalid polygons
:param polys:
:param tags:
:return:
"""
(h, w) = xxx_todo_changeme
if polys.shape[0] == 0:
return polys, np.array([]), np.array([])
polys[:, :, 0] = np.clip(polys[:, :, 0], 0, w - 1)
polys[:, :, 1] = np.clip(polys[:, :, 1], 0, h - 1)
validated_polys = []
validated_tags = []
hv_tags = []
for poly, tag in zip(polys, tags):
quad = self.gen_quad_from_poly(poly)
p_area = self.quad_area(quad)
if abs(p_area) < 1:
print("invalid poly")
continue
if p_area > 0:
if not tag:
print("poly in wrong direction")
tag = True # reversed cases should be ignore
poly = poly[
(0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1), :
]
quad = quad[(0, 3, 2, 1), :]
len_w = np.linalg.norm(quad[0] - quad[1]) + np.linalg.norm(
quad[3] - quad[2]
)
len_h = np.linalg.norm(quad[0] - quad[3]) + np.linalg.norm(
quad[1] - quad[2]
)
hv_tag = 1
if len_w * 2.0 < len_h:
hv_tag = 0
validated_polys.append(poly)
validated_tags.append(tag)
hv_tags.append(hv_tag)
return (
np.array(validated_polys),
np.array(validated_tags),
np.array(hv_tags),
)
def crop_area(
self, im, polys, tags, hv_tags, crop_background=False, max_tries=25
):
"""
make random crop from the input image
:param im:
:param polys:
:param tags:
:param crop_background:
:param max_tries: 50 -> 25
:return:
"""
h, w, _ = im.shape
pad_h = h // 10
pad_w = w // 10
h_array = np.zeros((h + pad_h * 2), dtype=np.int32)
w_array = np.zeros((w + pad_w * 2), dtype=np.int32)
for poly in polys:
poly = np.round(poly, decimals=0).astype(np.int32)
minx = np.min(poly[:, 0])
maxx = np.max(poly[:, 0])
w_array[minx + pad_w : maxx + pad_w] = 1
miny = np.min(poly[:, 1])
maxy = np.max(poly[:, 1])
h_array[miny + pad_h : maxy + pad_h] = 1
# ensure the cropped area not across a text
h_axis = np.where(h_array == 0)[0]
w_axis = np.where(w_array == 0)[0]
if len(h_axis) == 0 or len(w_axis) == 0:
return im, polys, tags, hv_tags
for i in range(max_tries):
xx = np.random.choice(w_axis, size=2)
xmin = np.min(xx) - pad_w
xmax = np.max(xx) - pad_w
xmin = np.clip(xmin, 0, w - 1)
xmax = np.clip(xmax, 0, w - 1)
yy = np.random.choice(h_axis, size=2)
ymin = np.min(yy) - pad_h
ymax = np.max(yy) - pad_h
ymin = np.clip(ymin, 0, h - 1)
ymax = np.clip(ymax, 0, h - 1)
# if xmax - xmin < ARGS.min_crop_side_ratio * w or \
# ymax - ymin < ARGS.min_crop_side_ratio * h:
if (
xmax - xmin < self.min_crop_size
or ymax - ymin < self.min_crop_size
):
# area too small
continue
if polys.shape[0] != 0:
poly_axis_in_area = (
(polys[:, :, 0] >= xmin)
& (polys[:, :, 0] <= xmax)
& (polys[:, :, 1] >= ymin)
& (polys[:, :, 1] <= ymax)
)
selected_polys = np.where(
np.sum(poly_axis_in_area, axis=1) == 4
)[0]
else:
selected_polys = []
if len(selected_polys) == 0:
# no text in this area
if crop_background:
return (
im[ymin : ymax + 1, xmin : xmax + 1, :],
polys[selected_polys],
tags[selected_polys],
hv_tags[selected_polys],
)
else:
continue
im = im[ymin : ymax + 1, xmin : xmax + 1, :]
polys = polys[selected_polys]
tags = tags[selected_polys]
hv_tags = hv_tags[selected_polys]
polys[:, :, 0] -= xmin
polys[:, :, 1] -= ymin
return im, polys, tags, hv_tags
return im, polys, tags, hv_tags
def generate_direction_map(self, poly_quads, direction_map):
""""""
width_list = []
height_list = []
for quad in poly_quads:
quad_w = (
np.linalg.norm(quad[0] - quad[1])
+ np.linalg.norm(quad[2] - quad[3])
) / 2.0
quad_h = (
np.linalg.norm(quad[0] - quad[3])
+ np.linalg.norm(quad[2] - quad[1])
) / 2.0
width_list.append(quad_w)
height_list.append(quad_h)
norm_width = max(sum(width_list) / (len(width_list) + 1e-6), 1.0)
average_height = max(sum(height_list) / (len(height_list) + 1e-6), 1.0)
for quad in poly_quads:
direct_vector_full = (
(quad[1] + quad[2]) - (quad[0] + quad[3])
) / 2.0
direct_vector = (
direct_vector_full
/ (np.linalg.norm(direct_vector_full) + 1e-6)
* norm_width
)
direction_label = tuple(
map(
float,
[
direct_vector[0],
direct_vector[1],
1.0 / (average_height + 1e-6),
],
)
)
cv2.fillPoly(
direction_map,
quad.round().astype(np.int32)[np.newaxis, :, :],
direction_label,
)
return direction_map
def calculate_average_height(self, poly_quads):
""""""
height_list = []
for quad in poly_quads:
quad_h = (
np.linalg.norm(quad[0] - quad[3])
+ np.linalg.norm(quad[2] - quad[1])
) / 2.0
height_list.append(quad_h)
average_height = max(sum(height_list) / len(height_list), 1.0)
return average_height
def generate_tcl_label(
self,
hw,
polys,
tags,
ds_ratio,
tcl_ratio=0.3,
shrink_ratio_of_width=0.15,
):
"""
Generate polygon.
"""
h, w = hw
h, w = int(h * ds_ratio), int(w * ds_ratio)
polys = polys * ds_ratio
score_map = np.zeros(
(
h,
w,
),
dtype=np.float32,
)
tbo_map = np.zeros((h, w, 5), dtype=np.float32)
training_mask = np.ones(
(
h,
w,
),
dtype=np.float32,
)
_ = np.ones((h, w, 3)) * np.array([0, 0, 1]).reshape([1, 1, 3]).astype(
np.float32
)
for _, poly_tag in enumerate(zip(polys, tags)):
poly = poly_tag[0]
tag = poly_tag[1]
# generate min_area_quad
min_area_quad, center_point = self.gen_min_area_quad_from_poly(
poly
)
min_area_quad_h = 0.5 * (
np.linalg.norm(min_area_quad[0] - min_area_quad[3])
+ np.linalg.norm(min_area_quad[1] - min_area_quad[2])
)
min_area_quad_w = 0.5 * (
np.linalg.norm(min_area_quad[0] - min_area_quad[1])
+ np.linalg.norm(min_area_quad[2] - min_area_quad[3])
)
if (
min(min_area_quad_h, min_area_quad_w)
< self.min_text_size * ds_ratio
or min(min_area_quad_h, min_area_quad_w)
> self.max_text_size * ds_ratio
):
continue
if tag:
# continue
cv2.fillPoly(
training_mask,
poly.astype(np.int32)[np.newaxis, :, :],
0.15,
)
else:
tcl_poly = self.poly2tcl(poly, tcl_ratio)
tcl_quads = self.poly2quads(tcl_poly)
poly_quads = self.poly2quads(poly)
# stcl map
stcl_quads, quad_index = self.shrink_poly_along_width(
tcl_quads,
shrink_ratio_of_width=shrink_ratio_of_width,
expand_height_ratio=1.0 / tcl_ratio,
)
# generate tcl map
cv2.fillPoly(
score_map, np.round(stcl_quads).astype(np.int32), 1.0
)
# generate tbo map
for idx, quad in enumerate(stcl_quads):
quad_mask = np.zeros((h, w), dtype=np.float32)
quad_mask = cv2.fillPoly(
quad_mask,
np.round(quad[np.newaxis, :, :]).astype(np.int32),
1.0,
)
tbo_map = self.gen_quad_tbo(
poly_quads[quad_index[idx]], quad_mask, tbo_map
)
return score_map, tbo_map, training_mask
def generate_tvo_and_tco(
self, hw, polys, tags, tcl_ratio=0.3, ds_ratio=0.25
):
"""
Generate tcl map, tvo map and tbo map.
"""
h, w = hw
h, w = int(h * ds_ratio), int(w * ds_ratio)
polys = polys * ds_ratio
poly_mask = np.zeros((h, w), dtype=np.float32)
tvo_map = np.ones((9, h, w), dtype=np.float32)
tvo_map[0:-1:2] = np.tile(np.arange(0, w), (h, 1))
tvo_map[1:-1:2] = np.tile(np.arange(0, w), (h, 1)).T
poly_tv_xy_map = np.zeros((8, h, w), dtype=np.float32)
# tco map
tco_map = np.ones((3, h, w), dtype=np.float32)
tco_map[0] = np.tile(np.arange(0, w), (h, 1))
tco_map[1] = np.tile(np.arange(0, w), (h, 1)).T
poly_tc_xy_map = np.zeros((2, h, w), dtype=np.float32)
poly_short_edge_map = np.ones((h, w), dtype=np.float32)
for poly, poly_tag in zip(polys, tags):
if poly_tag:
continue
# adjust point order for vertical poly
poly = self.adjust_point(poly)
# generate min_area_quad
min_area_quad, center_point = self.gen_min_area_quad_from_poly(
poly
)
min_area_quad_h = 0.5 * (
np.linalg.norm(min_area_quad[0] - min_area_quad[3])
+ np.linalg.norm(min_area_quad[1] - min_area_quad[2])
)
min_area_quad_w = 0.5 * (
np.linalg.norm(min_area_quad[0] - min_area_quad[1])
+ np.linalg.norm(min_area_quad[2] - min_area_quad[3])
)
# generate tcl map and text, 128 * 128
tcl_poly = self.poly2tcl(poly, tcl_ratio)
# generate poly_tv_xy_map
for idx in range(4):
cv2.fillPoly(
poly_tv_xy_map[2 * idx],
np.round(tcl_poly[np.newaxis, :, :]).astype(np.int32),
float(min(max(min_area_quad[idx, 0], 0), w)),
)
cv2.fillPoly(
poly_tv_xy_map[2 * idx + 1],
np.round(tcl_poly[np.newaxis, :, :]).astype(np.int32),
float(min(max(min_area_quad[idx, 1], 0), h)),
)
# generate poly_tc_xy_map
for idx in range(2):
cv2.fillPoly(
poly_tc_xy_map[idx],
np.round(tcl_poly[np.newaxis, :, :]).astype(np.int32),
float(center_point[idx]),
)
# generate poly_short_edge_map
cv2.fillPoly(
poly_short_edge_map,
np.round(tcl_poly[np.newaxis, :, :]).astype(np.int32),
float(max(min(min_area_quad_h, min_area_quad_w), 1.0)),
)
# generate poly_mask and training_mask
cv2.fillPoly(
poly_mask,
np.round(tcl_poly[np.newaxis, :, :]).astype(np.int32),
1,
)
tvo_map *= poly_mask
tvo_map[:8] -= poly_tv_xy_map
tvo_map[-1] /= poly_short_edge_map
tvo_map = tvo_map.transpose((1, 2, 0))
tco_map *= poly_mask
tco_map[:2] -= poly_tc_xy_map
tco_map[-1] /= poly_short_edge_map
tco_map = tco_map.transpose((1, 2, 0))
return tvo_map, tco_map
def adjust_point(self, poly):
"""
adjust point order.
"""
point_num = poly.shape[0]
if point_num == 4:
len_1 = np.linalg.norm(poly[0] - poly[1])
len_2 = np.linalg.norm(poly[1] - poly[2])
len_3 = np.linalg.norm(poly[2] - poly[3])
len_4 = np.linalg.norm(poly[3] - poly[0])
if (len_1 + len_3) * 1.5 < (len_2 + len_4):
poly = poly[[1, 2, 3, 0], :]
elif point_num > 4:
vector_1 = poly[0] - poly[1]
vector_2 = poly[1] - poly[2]
cos_theta = np.dot(vector_1, vector_2) / (
np.linalg.norm(vector_1) * np.linalg.norm(vector_2) + 1e-6
)
theta = np.arccos(np.round(cos_theta, decimals=4))
if abs(theta) > (70 / 180 * math.pi):
index = list(range(1, point_num)) + [0]
poly = poly[np.array(index), :]
return poly
def gen_min_area_quad_from_poly(self, poly):
"""
Generate min area quad from poly.
"""
point_num = poly.shape[0]
min_area_quad = np.zeros((4, 2), dtype=np.float32)
if point_num == 4:
min_area_quad = poly
center_point = np.sum(poly, axis=0) / 4
else:
rect = cv2.minAreaRect(
poly.astype(np.int32)
) # (center (x,y), (width, height), angle of rotation)
center_point = rect[0]
box = np.array(cv2.boxPoints(rect))
first_point_idx = 0
min_dist = 1e4
for i in range(4):
dist = (
np.linalg.norm(box[(i + 0) % 4] - poly[0])
+ np.linalg.norm(
box[(i + 1) % 4] - poly[point_num // 2 - 1]
)
+ np.linalg.norm(box[(i + 2) % 4] - poly[point_num // 2])
+ np.linalg.norm(box[(i + 3) % 4] - poly[-1])
)
if dist < min_dist:
min_dist = dist
first_point_idx = i
for i in range(4):
min_area_quad[i] = box[(first_point_idx + i) % 4]
return min_area_quad, center_point
def shrink_quad_along_width(
self, quad, begin_width_ratio=0.0, end_width_ratio=1.0
):
"""
Generate shrink_quad_along_width.
"""
ratio_pair = np.array(
[[begin_width_ratio], [end_width_ratio]], dtype=np.float32
)
p0_1 = quad[0] + (quad[1] - quad[0]) * ratio_pair
p3_2 = quad[3] + (quad[2] - quad[3]) * ratio_pair
return np.array([p0_1[0], p0_1[1], p3_2[1], p3_2[0]])
def shrink_poly_along_width(
self, quads, shrink_ratio_of_width, expand_height_ratio=1.0
):
"""
shrink poly with given length.
"""
upper_edge_list = []
def get_cut_info(edge_len_list, cut_len):
for idx, edge_len in enumerate(edge_len_list):
cut_len -= edge_len
if cut_len <= 0.000001:
ratio = (cut_len + edge_len_list[idx]) / edge_len_list[idx]
return idx, ratio
for quad in quads:
upper_edge_len = np.linalg.norm(quad[0] - quad[1])
upper_edge_list.append(upper_edge_len)
# length of left edge and right edge.
left_length = (
np.linalg.norm(quads[0][0] - quads[0][3]) * expand_height_ratio
)
right_length = (
np.linalg.norm(quads[-1][1] - quads[-1][2]) * expand_height_ratio
)
shrink_length = (
min(left_length, right_length, sum(upper_edge_list))
* shrink_ratio_of_width
)
# shrinking length
upper_len_left = shrink_length
upper_len_right = sum(upper_edge_list) - shrink_length
left_idx, left_ratio = get_cut_info(upper_edge_list, upper_len_left)
left_quad = self.shrink_quad_along_width(
quads[left_idx], begin_width_ratio=left_ratio, end_width_ratio=1
)
right_idx, right_ratio = get_cut_info(upper_edge_list, upper_len_right)
right_quad = self.shrink_quad_along_width(
quads[right_idx], begin_width_ratio=0, end_width_ratio=right_ratio
)
out_quad_list = []
if left_idx == right_idx:
out_quad_list.append(
[left_quad[0], right_quad[1], right_quad[2], left_quad[3]]
)
else:
out_quad_list.append(left_quad)
for idx in range(left_idx + 1, right_idx):
out_quad_list.append(quads[idx])
out_quad_list.append(right_quad)
return np.array(out_quad_list), list(range(left_idx, right_idx + 1))
def vector_angle(self, A, B):
"""
Calculate the angle between vector AB and x-axis positive direction.
"""
AB = np.array([B[1] - A[1], B[0] - A[0]])
return np.arctan2(*AB)
def theta_line_cross_point(self, theta, point):
"""
Calculate the line through given point and angle in ax + by + c =0 form.
"""
x, y = point
cos = np.cos(theta)
sin = np.sin(theta)
return [sin, -cos, cos * y - sin * x]
def line_cross_two_point(self, A, B):
"""
Calculate the line through given point A and B in ax + by + c =0 form.
"""
angle = self.vector_angle(A, B)
return self.theta_line_cross_point(angle, A)
def average_angle(self, poly):
"""
Calculate the average angle between left and right edge in given poly.
"""
p0, p1, p2, p3 = poly
angle30 = self.vector_angle(p3, p0)
angle21 = self.vector_angle(p2, p1)
return (angle30 + angle21) / 2
def line_cross_point(self, line1, line2):
"""
line1 and line2 in 0=ax+by+c form, compute the cross point of line1 and line2
"""
a1, b1, c1 = line1
a2, b2, c2 = line2
d = a1 * b2 - a2 * b1
if d == 0:
# print("line1", line1)
# print("line2", line2)
print("Cross point does not exist")
return np.array([0, 0], dtype=np.float32)
else:
x = (b1 * c2 - b2 * c1) / d
y = (a2 * c1 - a1 * c2) / d
return np.array([x, y], dtype=np.float32)
def quad2tcl(self, poly, ratio):
"""
Generate center line by poly clock-wise point. (4, 2)
"""
ratio_pair = np.array(
[[0.5 - ratio / 2], [0.5 + ratio / 2]], dtype=np.float32
)
p0_3 = poly[0] + (poly[3] - poly[0]) * ratio_pair
p1_2 = poly[1] + (poly[2] - poly[1]) * ratio_pair
return np.array([p0_3[0], p1_2[0], p1_2[1], p0_3[1]])
def poly2tcl(self, poly, ratio):
"""
Generate center line by poly clock-wise point.
"""
ratio_pair = np.array(
[[0.5 - ratio / 2], [0.5 + ratio / 2]], dtype=np.float32
)
tcl_poly = np.zeros_like(poly)
point_num = poly.shape[0]
for idx in range(point_num // 2):
point_pair = (
poly[idx]
+ (poly[point_num - 1 - idx] - poly[idx]) * ratio_pair
)
tcl_poly[idx] = point_pair[0]
tcl_poly[point_num - 1 - idx] = point_pair[1]
return tcl_poly
def gen_quad_tbo(self, quad, tcl_mask, tbo_map):
"""
Generate tbo_map for give quad.
"""
# upper and lower line function: ax + by + c = 0;
up_line = self.line_cross_two_point(quad[0], quad[1])
lower_line = self.line_cross_two_point(quad[3], quad[2])
quad_h = 0.5 * (
np.linalg.norm(quad[0] - quad[3])
+ np.linalg.norm(quad[1] - quad[2])
)
quad_w = 0.5 * (
np.linalg.norm(quad[0] - quad[1])
+ np.linalg.norm(quad[2] - quad[3])
)
# average angle of left and right line.
angle = self.average_angle(quad)
xy_in_poly = np.argwhere(tcl_mask == 1)
for y, x in xy_in_poly:
point = (x, y)
line = self.theta_line_cross_point(angle, point)
cross_point_upper = self.line_cross_point(up_line, line)
cross_point_lower = self.line_cross_point(lower_line, line)
# FIX, offset reverse
upper_offset_x, upper_offset_y = cross_point_upper - point
lower_offset_x, lower_offset_y = cross_point_lower - point
tbo_map[y, x, 0] = upper_offset_y
tbo_map[y, x, 1] = upper_offset_x
tbo_map[y, x, 2] = lower_offset_y
tbo_map[y, x, 3] = lower_offset_x
tbo_map[y, x, 4] = 1.0 / max(min(quad_h, quad_w), 1.0) * 2
return tbo_map
def poly2quads(self, poly):
"""
Split poly into quads.
"""
quad_list = []
point_num = poly.shape[0]
# point pair
point_pair_list = []
for idx in range(point_num // 2):
point_pair = [poly[idx], poly[point_num - 1 - idx]]
point_pair_list.append(point_pair)
quad_num = point_num // 2 - 1
for idx in range(quad_num):
# reshape and adjust to clock-wise
quad_list.append(
(np.array(point_pair_list)[[idx, idx + 1]]).reshape(4, 2)[
[0, 2, 3, 1]
]
)
return np.array(quad_list)
def __call__(self, data):
im = data["image"]
text_polys = data["polys"]
text_tags = data["ignore_tags"]
if im is None:
return None
if text_polys.shape[0] == 0:
return None
h, w, _ = im.shape
text_polys, text_tags, hv_tags = self.check_and_validate_polys(
text_polys, text_tags, (h, w)
)
if text_polys.shape[0] == 0:
return None
# set aspect ratio and keep area fix
asp_scales = np.arange(1.0, 1.55, 0.1)
asp_scale = np.random.choice(asp_scales)
if np.random.rand() < 0.5:
asp_scale = 1.0 / asp_scale
asp_scale = math.sqrt(asp_scale)
asp_wx = asp_scale
asp_hy = 1.0 / asp_scale
im = cv2.resize(im, dsize=None, fx=asp_wx, fy=asp_hy)
text_polys[:, :, 0] *= asp_wx
text_polys[:, :, 1] *= asp_hy
h, w, _ = im.shape
if max(h, w) > 2048:
rd_scale = 2048.0 / max(h, w)
im = cv2.resize(im, dsize=None, fx=rd_scale, fy=rd_scale)
text_polys *= rd_scale
h, w, _ = im.shape
if min(h, w) < 16:
return None
# no background
im, text_polys, text_tags, hv_tags = self.crop_area(
im, text_polys, text_tags, hv_tags, crop_background=False
)
if text_polys.shape[0] == 0:
return None
# continue for all ignore case
if np.sum((text_tags * 1.0)) >= text_tags.size:
return None
new_h, new_w, _ = im.shape
if (new_h is None) or (new_w is None):
return None
# resize image
std_ratio = float(self.input_size) / max(new_w, new_h)
rand_scales = np.array(
[0.25, 0.375, 0.5, 0.625, 0.75, 0.875, 1.0, 1.0, 1.0, 1.0, 1.0]
)
rz_scale = std_ratio * np.random.choice(rand_scales)
im = cv2.resize(im, dsize=None, fx=rz_scale, fy=rz_scale)
text_polys[:, :, 0] *= rz_scale
text_polys[:, :, 1] *= rz_scale
# add gaussian blur
if np.random.rand() < 0.1 * 0.5:
ks = np.random.permutation(5)[0] + 1
ks = int(ks / 2) * 2 + 1
im = cv2.GaussianBlur(im, ksize=(ks, ks), sigmaX=0, sigmaY=0)
# add brighter
if np.random.rand() < 0.1 * 0.5:
im = im * (1.0 + np.random.rand() * 0.5)
im = np.clip(im, 0.0, 255.0)
# add darker
if np.random.rand() < 0.1 * 0.5:
im = im * (1.0 - np.random.rand() * 0.5)
im = np.clip(im, 0.0, 255.0)
# Padding the im to [input_size, input_size]
new_h, new_w, _ = im.shape
if min(new_w, new_h) < self.input_size * 0.5:
return None
im_padded = np.ones(
(self.input_size, self.input_size, 3), dtype=np.float32
)
im_padded[:, :, 2] = 0.485 * 255
im_padded[:, :, 1] = 0.456 * 255
im_padded[:, :, 0] = 0.406 * 255
# Random the start position
del_h = self.input_size - new_h
del_w = self.input_size - new_w
sh, sw = 0, 0
if del_h > 1:
sh = int(np.random.rand() * del_h)
if del_w > 1:
sw = int(np.random.rand() * del_w)
# Padding
im_padded[sh : sh + new_h, sw : sw + new_w, :] = im.copy()
text_polys[:, :, 0] += sw
text_polys[:, :, 1] += sh
score_map, border_map, training_mask = self.generate_tcl_label(
(self.input_size, self.input_size), text_polys, text_tags, 0.25
)
# SAST head
tvo_map, tco_map = self.generate_tvo_and_tco(
(self.input_size, self.input_size),
text_polys,
text_tags,
tcl_ratio=0.3,
ds_ratio=0.25,
)
# print("test--------tvo_map shape:", tvo_map.shape)
im_padded[:, :, 2] -= 0.485 * 255
im_padded[:, :, 1] -= 0.456 * 255
im_padded[:, :, 0] -= 0.406 * 255
im_padded[:, :, 2] /= 255.0 * 0.229
im_padded[:, :, 1] /= 255.0 * 0.224
im_padded[:, :, 0] /= 255.0 * 0.225
im_padded = im_padded.transpose((2, 0, 1))
data["image"] = im_padded[::-1, :, :]
data["score_map"] = score_map[np.newaxis, :, :]
data["border_map"] = border_map.transpose((2, 0, 1))
data["training_mask"] = training_mask[np.newaxis, :, :]
data["tvo_map"] = tvo_map.transpose((2, 0, 1))
data["tco_map"] = tco_map.transpose((2, 0, 1))
return data
|
py | 1a39a181e668d2e5a087164bbabb676ee83deaeb | import errno
from collections import defaultdict
from select import select
import socket
from circus import util
from circus import logger
from zmq.eventloop import ioloop
class BaseStatsCollector(ioloop.PeriodicCallback):
def __init__(self, streamer, name, callback_time=1., io_loop=None):
ioloop.PeriodicCallback.__init__(self, self._callback,
callback_time * 1000, io_loop)
self.streamer = streamer
self.name = name
def _callback(self):
logger.debug('Publishing stats about {0}'.format(self.name))
for stats in self.collect_stats():
if stats is None:
continue
self.streamer.publisher.publish(self.name, stats)
def collect_stats(self):
# should be implemented in subclasses
raise NotImplementedError() # PRAGMA: NOCOVER
class WatcherStatsCollector(BaseStatsCollector):
def _aggregate(self, aggregate):
res = {'pid': list(aggregate.keys())}
stats = list(aggregate.values())
# aggregating CPU does not mean anything
# but the average can be a good indicator
cpu = [stat['cpu'] for stat in stats]
if 'N/A' in cpu:
res['cpu'] = 'N/A'
else:
try:
res['cpu'] = sum(cpu) / len(cpu)
except ZeroDivisionError:
res['cpu'] = 0.
# aggregating memory does make sense
mem = [stat['mem'] for stat in stats]
if 'N/A' in mem:
res['mem'] = 'N/A'
else:
res['mem'] = sum(mem)
# finding out the older process
ages = [stat['age'] for stat in stats if stat['age'] != 'N/A']
if len(ages) == 0:
res['age'] = 'N/A'
else:
res['age'] = max(ages)
return res
def collect_stats(self):
aggregate = {}
# sending by pids
for pid in self.streamer.get_pids(self.name):
name = None
if self.name == 'circus':
if pid in self.streamer.circus_pids:
name = self.streamer.circus_pids[pid]
try:
info = util.get_info(pid)
aggregate[pid] = info
info['subtopic'] = pid
info['name'] = name
yield info
except util.NoSuchProcess:
# the process is gone !
pass
except Exception as e:
logger.exception('Failed to get info for %d. %s' % (pid,
str(e)))
# now sending the aggregation
yield self._aggregate(aggregate)
# RESOLUTION is a value in seconds that will be used
# to determine the poller timeout of the sockets stats collector
#
# The PeriodicCallback calls the poller every LOOP_RES ms, and block
# for RESOLUTION seconds unless a read ready event occurs in the
# socket.
#
# This timer is used to limit the number of polls done on the
# socket, so the circusd-stats process don't eat all your CPU
# when you have a high-loaded socket.
#
_RESOLUTION = .1
_LOOP_RES = 10
class SocketStatsCollector(BaseStatsCollector):
def __init__(self, streamer, name, callback_time=1., io_loop=None):
super(SocketStatsCollector, self).__init__(streamer, name,
callback_time, io_loop)
self._rstats = defaultdict(int)
self.sockets = [sock for sock, address, fd in self.streamer.sockets]
self._p = ioloop.PeriodicCallback(self._select, _LOOP_RES,
io_loop=io_loop)
def start(self):
self._p.start()
super(SocketStatsCollector, self).start()
def stop(self):
self._p.stop()
BaseStatsCollector.stop(self)
def _select(self):
try:
rlist, wlist, xlist = select(self.sockets, [], [], .01)
except socket.error as err:
if err.errno == errno.EBADF:
return
raise
if len(rlist) == 0:
return
for sock in rlist:
try:
fileno = sock.fileno()
except socket.error as err:
if err.errno == errno.EBADF:
continue
else:
raise
self._rstats[fileno] += 1
def _aggregate(self, aggregate):
raise NotImplementedError()
def collect_stats(self):
# sending hits by sockets
sockets = self.streamer.sockets
if len(sockets) == 0:
yield None
else:
fds = []
for sock, address, fd in sockets:
try:
fileno = sock.fileno()
except socket.error as err:
if err.errno == errno.EBADF:
continue
else:
raise
fds.append((address, fileno, fd))
total = {'addresses': [], 'reads': 0}
# we might lose a few hits here but it's ok
for address, monitored_fd, fd in fds:
info = {}
info['fd'] = info['subtopic'] = fd
info['reads'] = self._rstats[monitored_fd]
total['reads'] += info['reads']
total['addresses'].append(address)
info['address'] = address
self._rstats[monitored_fd] = 0
yield info
yield total
|
py | 1a39a19e3ba311a8c226f58bc81d2c1f880cb486 | from user.definitions import Definition
Index = {
'__init__': Definition.__init__, # Do not remove this function, Index must not be void
}
|
py | 1a39a2facd4fbbb66b1da7f8b2f11b40af9010f7 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# Copyright 2018 Michael Still and Aptira
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This module is utility methods that privsep depends on. Privsep isn't allowed
# to depend on anything outside the privsep directory, so these need to be
# here. That said, other parts of nova can call into these utilities if
# needed.
import errno
import mmap
import os
from oslo_log import log as logging
from oslo_utils import excutils
LOG = logging.getLogger(__name__)
def supports_direct_io(dirpath):
if not hasattr(os, 'O_DIRECT'):
LOG.debug("This python runtime does not support direct I/O")
return False
testfile = os.path.join(dirpath, ".directio.test")
hasDirectIO = True
fd = None
try:
fd = os.open(testfile, os.O_CREAT | os.O_WRONLY | os.O_DIRECT)
# Check is the write allowed with 4096 byte alignment
align_size = 4096
m = mmap.mmap(-1, align_size)
m.write(b"x" * align_size)
os.write(fd, m)
LOG.debug("Path '%(path)s' supports direct I/O",
{'path': dirpath})
except OSError as e:
if e.errno == errno.EINVAL:
LOG.debug("Path '%(path)s' does not support direct I/O: "
"'%(ex)s'", {'path': dirpath, 'ex': e})
hasDirectIO = False
else:
with excutils.save_and_reraise_exception():
LOG.error("Error on '%(path)s' while checking "
"direct I/O: '%(ex)s'",
{'path': dirpath, 'ex': e})
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error("Error on '%(path)s' while checking direct I/O: "
"'%(ex)s'", {'path': dirpath, 'ex': e})
finally:
# ensure unlink(filepath) will actually remove the file by deleting
# the remaining link to it in close(fd)
if fd is not None:
os.close(fd)
try:
os.unlink(testfile)
except Exception:
pass
return hasDirectIO
|
py | 1a39a3097d3880d1297dbc10879f5805da984a81 | import errno
import json
import logging
import os
import re
import subprocess
import sys
from distutils.util import strtobool
from urllib.parse import parse_qs
sys.path.insert(0, "lib")
import requests # noqa: E402
def get_database_config(development_mode=False):
if any(
[x.startswith("MXRUNTIME_Database") for x in list(os.environ.keys())]
):
return {}
url = get_database_uri_from_vcap()
if url is None:
url = os.environ["DATABASE_URL"]
patterns = [
r"(?P<type>[a-zA-Z0-9]+)://(?P<user>[^:]+):(?P<password>[^@]+)@(?P<host>[^/]+)/(?P<dbname>[^?]*)(?P<extra>\?.*)?", # noqa: E501
r"jdbc:(?P<type>[a-zA-Z0-9]+)://(?P<host>[^;]+);database=(?P<dbname>[^;]*);user=(?P<user>[^;]+);password=(?P<password>.*)$", # noqa: E501
]
supported_databases = {
"postgres": "PostgreSQL",
"postgresql": "PostgreSQL",
"mysql": "MySQL",
"db2": "Db2",
"sqlserver": "SQLSERVER",
}
for pattern in patterns:
match = re.search(pattern, url)
if match is not None:
break
else:
raise Exception(
"Could not parse database credentials from database uri %s" % url
)
database_type_input = match.group("type")
if database_type_input not in supported_databases:
raise Exception("Unknown database type: %s", database_type_input)
database_type = supported_databases[database_type_input]
config = {
"DatabaseType": database_type,
"DatabaseUserName": match.group("user"),
"DatabasePassword": match.group("password"),
"DatabaseHost": match.group("host"),
"DatabaseName": match.group("dbname"),
}
if "extra" in match.groupdict() and match.group("extra"):
extra = match.group("extra").lstrip("?")
jdbc_params = parse_qs(extra)
if "sslmode" in jdbc_params:
sslmode = jdbc_params["sslmode"]
if sslmode and sslmode[0] == "require":
config.update({"DatabaseUseSsl": True})
if development_mode:
config.update(
{
"ConnectionPoolingMaxIdle": 1,
"ConnectionPoolingMaxActive": 20,
"ConnectionPoolingNumTestsPerEvictionRun": 50,
"ConnectionPoolingSoftMinEvictableIdleTimeMillis": 1000,
"ConnectionPoolingTimeBetweenEvictionRunsMillis": 1000,
}
)
elif database_type_input == "mysql":
config.update(
{
"ConnectionPoolingNumTestsPerEvictionRun": 50,
"ConnectionPoolingSoftMinEvictableIdleTimeMillis": 10000,
"ConnectionPoolingTimeBetweenEvictionRunsMillis": 10000,
}
)
return config
def get_vcap_services_data():
if os.environ.get("VCAP_SERVICES"):
return json.loads(os.environ.get("VCAP_SERVICES"))
else:
return {}
def get_vcap_data():
if os.environ.get("VCAP_APPLICATION"):
return json.loads(os.environ.get("VCAP_APPLICATION"))
else:
return {
"application_uris": ["example.com"],
"application_name": "My App",
}
def get_database_uri_from_vcap():
vcap_services = get_vcap_services_data()
for service_type_name in (
"p-mysql",
"p.mysql",
"elephantsql",
"cleardb",
"PostgreSQL",
"dashDB",
"mariadb",
"postgresql",
"rds",
"postgresql_shared",
):
if vcap_services and service_type_name in vcap_services:
return vcap_services[service_type_name][0]["credentials"]["uri"]
if "azure-sqldb" in vcap_services:
return vcap_services["azure-sqldb"][0]["credentials"]["jdbcUrl"]
for key in vcap_services:
try:
uri = vcap_services[key][0]["credentials"]["uri"]
if key.startswith("rds"):
return uri
if key.startswith("dashDB"):
return uri
if uri.startswith("postgres"):
return uri
if uri.startswith("mysql"):
return uri
except (TypeError, KeyError):
pass
return None
def appdynamics_used():
for k, v in os.environ.items():
if k.startswith("APPDYNAMICS_"):
return True
return False
def get_new_relic_license_key():
vcap_services = get_vcap_services_data()
if vcap_services and "newrelic" in vcap_services:
return vcap_services["newrelic"][0]["credentials"]["licenseKey"]
return None
def is_appmetrics_enabled():
return os.getenv("APPMETRICS_TARGET") is not None
def get_tags():
return json.loads(os.getenv("TAGS", os.getenv("DD_TAGS", "[]")))
def get_hostname():
dd_hostname = os.environ.get("DD_HOSTNAME")
if dd_hostname is None:
domain = get_vcap_data()["application_uris"][0].split("/")[0]
dd_hostname = domain + "-" + os.getenv("CF_INSTANCE_INDEX", "")
return dd_hostname
def get_blobstore_url(filename):
main_url = os.environ.get("BLOBSTORE", "https://cdn.mendix.com")
if main_url[-1] == "/":
main_url = main_url[0:-1]
return main_url + filename
def download_and_unpack(url, destination, cache_dir="/tmp/downloads"):
file_name = url.split("/")[-1]
mkdir_p(cache_dir)
mkdir_p(destination)
cached_location = os.path.join(cache_dir, file_name)
logging.debug(
"Looking for {cached_location}".format(cached_location=cached_location)
)
if not os.path.isfile(cached_location):
download(url, cached_location)
logging.debug(
"downloaded to {cached_location}".format(
cached_location=cached_location
)
)
else:
logging.debug(
"found in cache: {cached_location}".format(
cached_location=cached_location
)
)
logging.debug(
"extracting: {cached_location} to {dest}".format(
cached_location=cached_location, dest=destination
)
)
if file_name.endswith(".tar.gz") or file_name.endswith(".tgz"):
unpack_cmd = ["tar", "xf", cached_location, "-C", destination]
if file_name.startswith(("mono-", "jdk-", "jre-")):
unpack_cmd.extend(("--strip", "1"))
subprocess.check_call(unpack_cmd)
else:
raise Exception(
"do not know how to unpack {cached_location}".format(
cached_location=cached_location
)
)
logging.debug(
"source {file_name} retrieved & unpacked in {destination}".format(
file_name=file_name, destination=destination
)
)
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def get_buildpack_loglevel():
if os.getenv("BUILDPACK_XTRACE", "false") == "true":
return logging.DEBUG
else:
return logging.INFO
def download(url, destination):
logging.debug(
"downloading {url} to {destination}".format(
url=url, destination=destination
)
)
with open(destination, "wb") as file_handle:
response = requests.get(url, stream=True)
if not response.ok:
response.raise_for_status()
for block in response.iter_content(4096):
if not block:
break
file_handle.write(block)
def get_existing_directory_or_raise(dirs, error):
for directory in dirs:
if os.path.isdir(directory):
return directory
raise NotFoundException(error)
class NotFoundException(Exception):
pass
def get_java_version(mx_version):
versions = {"7": "7u80", "8u51": "8u51", "8": "8"}
if mx_version >= 6.6:
default = "8"
elif mx_version >= 5.18:
default = "8u51"
else:
default = "7"
main_java_version = os.getenv("JAVA_VERSION", default)
if main_java_version not in list(versions.keys()):
raise Exception(
"Invalid Java version specified: %s" % main_java_version
)
return versions[main_java_version]
def get_mpr_file_from_dir(directory):
mprs = [x for x in os.listdir(directory) if x.endswith(".mpr")]
if len(mprs) == 1:
return os.path.join(directory, mprs[0])
elif len(mprs) > 1:
raise Exception("More than one .mpr file found, can not continue")
else:
return None
def ensure_mxbuild_in_directory(directory, mx_version, cache_dir):
if os.path.isdir(os.path.join(directory, "modeler")):
return
mkdir_p(directory)
url = os.environ.get("FORCED_MXBUILD_URL")
if url:
# don't ever cache with a FORCED_MXBUILD_URL
download_and_unpack(url, directory, cache_dir="/tmp/downloads")
else:
try:
_checkout_from_git_rootfs(directory, mx_version)
except NotFoundException as e:
logging.debug(str(e))
download_and_unpack(
get_blobstore_url(
"/runtime/mxbuild-%s.tar.gz" % str(mx_version)
),
directory,
cache_dir=cache_dir,
)
def _checkout_from_git_rootfs(directory, mx_version):
mendix_runtimes_path = "/usr/local/share/mendix-runtimes.git"
if not os.path.isdir(mendix_runtimes_path):
raise NotFoundException()
env = dict(os.environ)
env["GIT_WORK_TREE"] = directory
# checkout the runtime version
try:
subprocess.check_call(
("git", "checkout", str(mx_version), "-f"),
cwd=mendix_runtimes_path,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
return
except Exception:
try:
subprocess.check_call(
(
"git",
"fetch",
"origin",
"refs/tags/{0}:refs/tags/{0}".format(str(mx_version)),
),
cwd=mendix_runtimes_path,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
subprocess.check_call(
("git", "checkout", str(mx_version), "-f"),
cwd=mendix_runtimes_path,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
logging.debug("found mx version after updating runtimes.git")
return
except Exception:
logging.debug("tried updating git repo, also failed")
raise NotFoundException(
"Could not download mxbuild "
+ str(mx_version)
+ " from updated git repo"
)
def _get_env_with_monolib(mono_dir):
env = dict(os.environ)
env["LD_LIBRARY_PATH"] = mono_dir + "/lib"
env["MONO_STRICT_MS_COMPLIANT"] = "yes"
if not os.path.isfile(os.path.join(mono_dir, "lib", "libgdiplus.so")):
raise Exception("libgdiplus.so not found in dir %s" % mono_dir)
return env
def _detect_mono_version(mx_version):
logging.debug(
"Detecting Mono Runtime using mendix version: " + str(mx_version)
)
if mx_version < 7:
target = "mono-3.10.0"
else:
target = "mono-4.6.2.16"
logging.info("Selecting Mono Runtime: " + target)
return target
def _get_mono_path(directory, mono_version):
return get_existing_directory_or_raise(
[
os.path.join(directory, mono_version),
"/opt/" + mono_version,
"/tmp/" + mono_version,
],
"Mono not found",
)
def lazy_remove_file(filename):
try:
os.remove(filename)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def ensure_and_get_mono(mx_version, cache_dir):
logging.debug(
"ensuring mono for mendix {mx_version}".format(
mx_version=str(mx_version)
)
)
mono_version = _detect_mono_version(mx_version)
fallback_location = "/tmp/opt"
try:
mono_location = _get_mono_path("/tmp/opt", mono_version)
except NotFoundException:
logging.debug("Mono not found in default locations")
download_and_unpack(
get_blobstore_url("/mx-buildpack/" + mono_version + "-mx.tar.gz"),
os.path.join(fallback_location, mono_version),
cache_dir,
)
mono_location = _get_mono_path(fallback_location, mono_version)
logging.debug("Using {mono_location}".format(mono_location=mono_location))
return mono_location
def ensure_and_get_jvm(
mx_version, cache_dir, dot_local_location, package="jdk"
):
logging.debug("Begin download and install java %s" % package)
java_version = get_java_version(mx_version)
rootfs_java_path = "/usr/lib/jvm/jdk-%s-oracle-x64" % java_version
if not os.path.isdir(rootfs_java_path):
logging.debug("rootfs without java sdk detected")
download_and_unpack(
get_blobstore_url(
"/mx-buildpack/%s-%s-linux-x64.tar.gz"
% (package, java_version)
),
os.path.join(
dot_local_location,
"usr/lib/jvm/%s-%s-oracle-x64" % (package, java_version),
),
cache_dir,
)
else:
logging.debug("rootfs with java sdk detected")
logging.debug("end download and install java %s" % package)
return get_existing_directory_or_raise(
[
"/usr/lib/jvm/jdk-%s-oracle-x64" % java_version,
os.path.join(
dot_local_location,
"usr/lib/jvm/%s-%s-oracle-x64" % (package, java_version),
),
],
"Java not found",
)
def i_am_primary_instance():
return os.getenv("CF_INSTANCE_INDEX", "0") == "0"
def bypass_loggregator_logging():
env_var = os.getenv("BYPASS_LOGGREGATOR", "False")
# Throws a useful message if you put in a nonsensical value.
# Necessary since we store these in cloud portal as strings.
try:
bypass_loggregator = strtobool(env_var)
except ValueError as e:
logging.warning(
"Bypass loggregator has a nonsensical value: %s. "
"Falling back to old loggregator-based metric reporting.",
env_var,
)
return False
if bypass_loggregator:
if os.getenv("TRENDS_STORAGE_URL"):
return True
else:
logging.warning(
"BYPASS_LOGGREGATOR is set to true, but no metrics URL is "
"set. Falling back to old loggregator-based metric reporting."
)
return False
return False
def get_metrics_url():
return os.getenv("TRENDS_STORAGE_URL")
|
py | 1a39a3d205801c9446635163b21edc2b323583fb | # def printme(str):
# print(str)
#
# printme('hello')
#pass by reference vs value
# def changeme(mylist):
# mylist.append([1,2,3,4,5])
# print('Values inside the functions:', mylist)
# return
# mylist = [10,20,34,3]
# changeme(mylist)
# print("values outside the functions:", mylist)
|
py | 1a39a42400249e0e997490b3dc2049054d44e8ca | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dataloader utils functions."""
# Import libraries
from absl.testing import parameterized
import tensorflow as tf
from official.vision.beta.dataloaders import utils
class UtilsTest(tf.test.TestCase, parameterized.TestCase):
def test_process_empty_source_id(self):
source_id = tf.constant([], dtype=tf.int64)
source_id = tf.strings.as_string(source_id)
self.assertEqual(-1, utils.process_source_id(source_id=source_id))
@parameterized.parameters(
([128, 256], [128, 256]),
([128, 32, 16], [128, 32, 16]),
)
def test_process_source_id(self, source_id, expected_result):
source_id = tf.constant(source_id, dtype=tf.int64)
source_id = tf.strings.as_string(source_id)
self.assertSequenceAlmostEqual(expected_result,
utils.process_source_id(source_id=source_id))
@parameterized.parameters(
([[10, 20, 30, 40]], [[100]], [[0]], 10, None),
([[0.1, 0.2, 0.5, 0.6]], [[0.5]], [[1]], 2, [[1.0, 2.0]]),
)
def test_pad_groundtruths_to_fixed_size(self, boxes, area, classes, size,
attributes):
groundtruths = {}
groundtruths['boxes'] = tf.constant(boxes)
groundtruths['is_crowds'] = tf.constant([[0]])
groundtruths['areas'] = tf.constant(area)
groundtruths['classes'] = tf.constant(classes)
if attributes:
groundtruths['attributes'] = {'depth': tf.constant(attributes)}
actual_result = utils.pad_groundtruths_to_fixed_size(
groundtruths=groundtruths, size=size)
# Check that the first dimension is padded to the expected size.
for key in actual_result:
if key == 'attributes':
for _, v in actual_result[key].items():
pad_shape = v.shape[0]
self.assertEqual(size, pad_shape)
else:
pad_shape = actual_result[key].shape[0]
self.assertEqual(size, pad_shape)
if __name__ == '__main__':
tf.test.main()
|
py | 1a39a44407ab98a357a5e70b09db1f5741d98f80 | # Code source from Jiayuan Gu: https://github.com/Jiayuan-Gu/torkit3d
import torch
import torch.nn as nn
from ..common.mlp import mlp1d_bn_relu, mlp_bn_relu, mlp_relu, mlp1d_relu
__all__ = ["PointNet"]
class PointNet(nn.Module):
"""PointNet for classification.
Notes:
1. The original implementation includes dropout for global MLPs.
2. The original implementation decays the BN momentum.
"""
def __init__(
self,
in_channels=3,
local_channels=(64, 64, 64, 128, 1024),
global_channels=(512, 256),
):
super().__init__()
self.in_channels = in_channels
self.out_channels = (local_channels + global_channels)[-1]
self.mlp_local = mlp1d_bn_relu(in_channels, local_channels)
self.mlp_global = mlp_bn_relu(local_channels[-1], global_channels)
self.reset_parameters()
def forward(self, points, points_feature=None, points_mask=None) -> dict:
# points: [B, 3, N]; points_feature: [B, C, N], points_mask: [B, N]
if points_feature is not None:
input_feature = torch.cat([points, points_feature], dim=1)
else:
input_feature = points
local_feature = self.mlp_local(input_feature)
if points_mask is not None:
local_feature = torch.where(
points_mask.unsqueeze(1), local_feature, torch.zeros_like(local_feature)
)
global_feature, max_indices = torch.max(local_feature, 2)
output_feature = self.mlp_global(global_feature)
return {"feature": output_feature, "max_indices": max_indices}
def reset_parameters(self):
for name, module in self.named_modules():
if isinstance(module, (nn.Linear, nn.Conv1d, nn.Conv2d)):
if module.bias is not None:
nn.init.zeros_(module.bias)
if isinstance(module, (nn.BatchNorm1d, nn.BatchNorm2d)):
module.momentum = 0.01
|
py | 1a39a6e198d525c1f1e60a961f4c35eae8efda2b | # coding: utf-8
"""
Gitea API.
This documentation describes the Gitea API. # noqa: E501
OpenAPI spec version: 1.16.7
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class CreateIssueOption(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'assignee': 'str',
'assignees': 'list[str]',
'body': 'str',
'closed': 'bool',
'due_date': 'datetime',
'labels': 'list[int]',
'milestone': 'int',
'ref': 'str',
'title': 'str'
}
attribute_map = {
'assignee': 'assignee',
'assignees': 'assignees',
'body': 'body',
'closed': 'closed',
'due_date': 'due_date',
'labels': 'labels',
'milestone': 'milestone',
'ref': 'ref',
'title': 'title'
}
def __init__(self, assignee=None, assignees=None, body=None, closed=None, due_date=None, labels=None, milestone=None, ref=None, title=None): # noqa: E501
"""CreateIssueOption - a model defined in Swagger""" # noqa: E501
self._assignee = None
self._assignees = None
self._body = None
self._closed = None
self._due_date = None
self._labels = None
self._milestone = None
self._ref = None
self._title = None
self.discriminator = None
if assignee is not None:
self.assignee = assignee
if assignees is not None:
self.assignees = assignees
if body is not None:
self.body = body
if closed is not None:
self.closed = closed
if due_date is not None:
self.due_date = due_date
if labels is not None:
self.labels = labels
if milestone is not None:
self.milestone = milestone
if ref is not None:
self.ref = ref
self.title = title
@property
def assignee(self):
"""Gets the assignee of this CreateIssueOption. # noqa: E501
deprecated # noqa: E501
:return: The assignee of this CreateIssueOption. # noqa: E501
:rtype: str
"""
return self._assignee
@assignee.setter
def assignee(self, assignee):
"""Sets the assignee of this CreateIssueOption.
deprecated # noqa: E501
:param assignee: The assignee of this CreateIssueOption. # noqa: E501
:type: str
"""
self._assignee = assignee
@property
def assignees(self):
"""Gets the assignees of this CreateIssueOption. # noqa: E501
:return: The assignees of this CreateIssueOption. # noqa: E501
:rtype: list[str]
"""
return self._assignees
@assignees.setter
def assignees(self, assignees):
"""Sets the assignees of this CreateIssueOption.
:param assignees: The assignees of this CreateIssueOption. # noqa: E501
:type: list[str]
"""
self._assignees = assignees
@property
def body(self):
"""Gets the body of this CreateIssueOption. # noqa: E501
:return: The body of this CreateIssueOption. # noqa: E501
:rtype: str
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this CreateIssueOption.
:param body: The body of this CreateIssueOption. # noqa: E501
:type: str
"""
self._body = body
@property
def closed(self):
"""Gets the closed of this CreateIssueOption. # noqa: E501
:return: The closed of this CreateIssueOption. # noqa: E501
:rtype: bool
"""
return self._closed
@closed.setter
def closed(self, closed):
"""Sets the closed of this CreateIssueOption.
:param closed: The closed of this CreateIssueOption. # noqa: E501
:type: bool
"""
self._closed = closed
@property
def due_date(self):
"""Gets the due_date of this CreateIssueOption. # noqa: E501
:return: The due_date of this CreateIssueOption. # noqa: E501
:rtype: datetime
"""
return self._due_date
@due_date.setter
def due_date(self, due_date):
"""Sets the due_date of this CreateIssueOption.
:param due_date: The due_date of this CreateIssueOption. # noqa: E501
:type: datetime
"""
self._due_date = due_date
@property
def labels(self):
"""Gets the labels of this CreateIssueOption. # noqa: E501
list of label ids # noqa: E501
:return: The labels of this CreateIssueOption. # noqa: E501
:rtype: list[int]
"""
return self._labels
@labels.setter
def labels(self, labels):
"""Sets the labels of this CreateIssueOption.
list of label ids # noqa: E501
:param labels: The labels of this CreateIssueOption. # noqa: E501
:type: list[int]
"""
self._labels = labels
@property
def milestone(self):
"""Gets the milestone of this CreateIssueOption. # noqa: E501
milestone id # noqa: E501
:return: The milestone of this CreateIssueOption. # noqa: E501
:rtype: int
"""
return self._milestone
@milestone.setter
def milestone(self, milestone):
"""Sets the milestone of this CreateIssueOption.
milestone id # noqa: E501
:param milestone: The milestone of this CreateIssueOption. # noqa: E501
:type: int
"""
self._milestone = milestone
@property
def ref(self):
"""Gets the ref of this CreateIssueOption. # noqa: E501
:return: The ref of this CreateIssueOption. # noqa: E501
:rtype: str
"""
return self._ref
@ref.setter
def ref(self, ref):
"""Sets the ref of this CreateIssueOption.
:param ref: The ref of this CreateIssueOption. # noqa: E501
:type: str
"""
self._ref = ref
@property
def title(self):
"""Gets the title of this CreateIssueOption. # noqa: E501
:return: The title of this CreateIssueOption. # noqa: E501
:rtype: str
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this CreateIssueOption.
:param title: The title of this CreateIssueOption. # noqa: E501
:type: str
"""
if title is None:
raise ValueError("Invalid value for `title`, must not be `None`") # noqa: E501
self._title = title
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(CreateIssueOption, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateIssueOption):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | 1a39a77446d8f5a41b36b89bc4d185715036a802 | import os
import pymongo
from crawl_terms import make_vocab_to_def_json
# using dotenv to fetch MongoDB Atlas URL environment variable
MONGO_URL = os.getenv("MONGO_URL")
print("Connecting MongoDB Atlas to: " + MONGO_URL)
# accessing MongoDB Atlas with pymongo MongoClient
client = pymongo.MongoClient(MONGO_URL)
# connect to the mongodb atlas database and collection
vocab_db = client.get_database("vocab")
vocab_terms_collection = vocab_db.vocab_terms
# testing database connection
db = client.test
print(db)
# testing collection connection
db = client.get_database("vocab")
vocab_terms_mongodb = db.vocab_terms
all_documents_no = vocab_terms_mongodb.count_documents({})
print(all_documents_no)
# insert a test document at the collection
new_term = {"name": "namaewa?", "url": "localhost"}
vocab_terms_collection.insert_one(new_term)
# insert investopedia data to the mongodb atlas
test_json_document = make_vocab_to_def_json(
"https://www.investopedia.com/terms/b/buyersmarket.asp"
)
vocab_terms_collection.insert_one(test_json_document)
# insert multiple test documents at the collection
new_terms = [
{"name": "namaewa?", "url": "localhost"},
{"name": "taki-kun!", "url": "localhost"},
]
vocab_terms_collection.insert_many(new_terms)
# find individual documents from mongodb atlas
one_doc = vocab_terms_collection.find_one({"vocabulary": "buyersmarket"})
print(one_doc)
# find all documents from mongodb atlas
all_docs = list(vocab_terms_collection.find())
print(all_docs, type(all_docs))
# update a single document
one_update = {"vocabulary": "sellersmarket"}
vocab_terms_collection.update_one({"vocabulary": "buyersmarket"}, {"$set": one_update})
# delete a single document
vocab_terms_collection.delete_one({"vocabulary": "sellersmarket"})
|
py | 1a39a8baca706a2ee89296d2890dd39502302969 | import logging
from logging import handlers
class logger(object):
level_relations = {
'debug':logging.DEBUG,
'info':logging.INFO,
'warning':logging.WARNING,
'error':logging.ERROR,
'crit':logging.CRITICAL
}#日志级别关系映射
#def __init__(self,filename,level='info',when='D',backCount=3,fmt='%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s'):
def __init__(self,filename,level='info',when='D',backCount=3,fmt='%(asctime)s - %(levelname)s: %(message)s'):
self.logger = logging.getLogger(filename)
format_str = logging.Formatter(fmt)#设置日志格式
self.logger.setLevel(self.level_relations.get(level))#设置日志级别
sh = logging.StreamHandler()#往屏幕上输出
sh.setFormatter(format_str) #设置屏幕上显示的格式
th = handlers.TimedRotatingFileHandler(filename=filename,when=when,backupCount=backCount,encoding='utf-8')#往文件里写入#指定间隔时间自动生成文件的处理器
#实例化TimedRotatingFileHandler
#interval是时间间隔,backupCount是备份文件的个数,如果超过这个个数,就会自动删除,when是间隔的时间单位,单位有以下几种:
# S 秒
# M 分
# H 小时、
# D 天、
# W 每星期(interval==0时代表星期一)
# midnight 每天凌晨
th.setFormatter(format_str)#设置文件里写入的格式
self.logger.addHandler(sh) #把对象加到logger里
self.logger.addHandler(th)
|
py | 1a39a91cbfe32af918da918821a0b538cadf6fcb | from pybullet_utils import pd_controller_stable
from pybullet_envs.deep_mimic.env import humanoid_pose_interpolator
import math
chest = 1
neck = 2
rightHip = 3
rightKnee = 4
rightAnkle = 5
rightShoulder = 6
rightElbow = 7
leftHip = 9
leftKnee = 10
leftAnkle = 11
leftShoulder = 12
leftElbow = 13
jointFrictionForce = 0
class HumanoidStablePD(object):
def __init__(self, pybullet_client, mocap_data, timeStep, useFixedBase=True):
self._pybullet_client = pybullet_client
self._mocap_data = mocap_data
print("LOADING humanoid!")
self._sim_model = self._pybullet_client.loadURDF(
"humanoid/humanoid.urdf", [0, 0.889540259, 0],
globalScaling=0.25,
useFixedBase=useFixedBase,
flags=self._pybullet_client.URDF_MAINTAIN_LINK_ORDER)
#self._pybullet_client.setCollisionFilterGroupMask(self._sim_model,-1,collisionFilterGroup=0,collisionFilterMask=0)
#for j in range (self._pybullet_client.getNumJoints(self._sim_model)):
# self._pybullet_client.setCollisionFilterGroupMask(self._sim_model,j,collisionFilterGroup=0,collisionFilterMask=0)
self._end_effectors = [5, 8, 11, 14] #ankle and wrist, both left and right
self._kin_model = self._pybullet_client.loadURDF(
"humanoid/humanoid.urdf", [0, 0.85, 0],
globalScaling=0.25,
useFixedBase=True,
flags=self._pybullet_client.URDF_MAINTAIN_LINK_ORDER)
self._pybullet_client.changeDynamics(self._sim_model, -1, lateralFriction=0.9)
for j in range(self._pybullet_client.getNumJoints(self._sim_model)):
self._pybullet_client.changeDynamics(self._sim_model, j, lateralFriction=0.9)
self._pybullet_client.changeDynamics(self._sim_model, -1, linearDamping=0, angularDamping=0)
self._pybullet_client.changeDynamics(self._kin_model, -1, linearDamping=0, angularDamping=0)
#todo: add feature to disable simulation for a particular object. Until then, disable all collisions
self._pybullet_client.setCollisionFilterGroupMask(self._kin_model,
-1,
collisionFilterGroup=0,
collisionFilterMask=0)
self._pybullet_client.changeDynamics(
self._kin_model,
-1,
activationState=self._pybullet_client.ACTIVATION_STATE_SLEEP +
self._pybullet_client.ACTIVATION_STATE_ENABLE_SLEEPING +
self._pybullet_client.ACTIVATION_STATE_DISABLE_WAKEUP)
alpha = 0.4
self._pybullet_client.changeVisualShape(self._kin_model, -1, rgbaColor=[1, 1, 1, alpha])
for j in range(self._pybullet_client.getNumJoints(self._kin_model)):
self._pybullet_client.setCollisionFilterGroupMask(self._kin_model,
j,
collisionFilterGroup=0,
collisionFilterMask=0)
self._pybullet_client.changeDynamics(
self._kin_model,
j,
activationState=self._pybullet_client.ACTIVATION_STATE_SLEEP +
self._pybullet_client.ACTIVATION_STATE_ENABLE_SLEEPING +
self._pybullet_client.ACTIVATION_STATE_DISABLE_WAKEUP)
self._pybullet_client.changeVisualShape(self._kin_model, j, rgbaColor=[1, 1, 1, alpha])
self._poseInterpolator = humanoid_pose_interpolator.HumanoidPoseInterpolator()
for i in range(self._mocap_data.NumFrames() - 1):
frameData = self._mocap_data._motion_data['Frames'][i]
self._poseInterpolator.PostProcessMotionData(frameData)
self._stablePD = pd_controller_stable.PDControllerStableMultiDof(self._pybullet_client)
self._timeStep = timeStep
self._kpOrg = [
0, 0, 0, 0, 0, 0, 0, 1000, 1000, 1000, 1000, 100, 100, 100, 100, 500, 500, 500, 500, 500,
400, 400, 400, 400, 400, 400, 400, 400, 300, 500, 500, 500, 500, 500, 400, 400, 400, 400,
400, 400, 400, 400, 300
]
self._kdOrg = [
0, 0, 0, 0, 0, 0, 0, 100, 100, 100, 100, 10, 10, 10, 10, 50, 50, 50, 50, 50, 40, 40, 40,
40, 40, 40, 40, 40, 30, 50, 50, 50, 50, 50, 40, 40, 40, 40, 40, 40, 40, 40, 30
]
self._jointIndicesAll = [
chest, neck, rightHip, rightKnee, rightAnkle, rightShoulder, rightElbow, leftHip, leftKnee,
leftAnkle, leftShoulder, leftElbow
]
for j in self._jointIndicesAll:
#self._pybullet_client.setJointMotorControlMultiDof(self._sim_model, j, self._pybullet_client.POSITION_CONTROL, force=[1,1,1])
self._pybullet_client.setJointMotorControl2(self._sim_model,
j,
self._pybullet_client.POSITION_CONTROL,
targetPosition=0,
positionGain=0,
targetVelocity=0,
force=jointFrictionForce)
self._pybullet_client.setJointMotorControlMultiDof(
self._sim_model,
j,
self._pybullet_client.POSITION_CONTROL,
targetPosition=[0, 0, 0, 1],
targetVelocity=[0, 0, 0],
positionGain=0,
velocityGain=1,
force=[jointFrictionForce, jointFrictionForce, jointFrictionForce])
self._pybullet_client.setJointMotorControl2(self._kin_model,
j,
self._pybullet_client.POSITION_CONTROL,
targetPosition=0,
positionGain=0,
targetVelocity=0,
force=0)
self._pybullet_client.setJointMotorControlMultiDof(
self._kin_model,
j,
self._pybullet_client.POSITION_CONTROL,
targetPosition=[0, 0, 0, 1],
targetVelocity=[0, 0, 0],
positionGain=0,
velocityGain=1,
force=[jointFrictionForce, jointFrictionForce, 0])
self._jointDofCounts = [4, 4, 4, 1, 4, 4, 1, 4, 1, 4, 4, 1]
#only those body parts/links are allowed to touch the ground, otherwise the episode terminates
self._allowed_body_parts = [5, 11]
#[x,y,z] base position and [x,y,z,w] base orientation!
self._totalDofs = 7
for dof in self._jointDofCounts:
self._totalDofs += dof
self.setSimTime(0)
self.resetPose()
def resetPose(self):
#print("resetPose with self._frame=", self._frame, " and self._frameFraction=",self._frameFraction)
pose = self.computePose(self._frameFraction)
self.initializePose(self._poseInterpolator, self._sim_model, initBase=True)
self.initializePose(self._poseInterpolator, self._kin_model, initBase=False)
def initializePose(self, pose, phys_model, initBase, initializeVelocity=True):
if initializeVelocity:
if initBase:
self._pybullet_client.resetBasePositionAndOrientation(phys_model, pose._basePos,
pose._baseOrn)
self._pybullet_client.resetBaseVelocity(phys_model, pose._baseLinVel, pose._baseAngVel)
self._pybullet_client.resetJointStateMultiDof(phys_model, chest, pose._chestRot,
pose._chestVel)
self._pybullet_client.resetJointStateMultiDof(phys_model, neck, pose._neckRot, pose._neckVel)
self._pybullet_client.resetJointStateMultiDof(phys_model, rightHip, pose._rightHipRot,
pose._rightHipVel)
self._pybullet_client.resetJointStateMultiDof(phys_model, rightKnee, pose._rightKneeRot,
pose._rightKneeVel)
self._pybullet_client.resetJointStateMultiDof(phys_model, rightAnkle, pose._rightAnkleRot,
pose._rightAnkleVel)
self._pybullet_client.resetJointStateMultiDof(phys_model, rightShoulder,
pose._rightShoulderRot, pose._rightShoulderVel)
self._pybullet_client.resetJointStateMultiDof(phys_model, rightElbow, pose._rightElbowRot,
pose._rightElbowVel)
self._pybullet_client.resetJointStateMultiDof(phys_model, leftHip, pose._leftHipRot,
pose._leftHipVel)
self._pybullet_client.resetJointStateMultiDof(phys_model, leftKnee, pose._leftKneeRot,
pose._leftKneeVel)
self._pybullet_client.resetJointStateMultiDof(phys_model, leftAnkle, pose._leftAnkleRot,
pose._leftAnkleVel)
self._pybullet_client.resetJointStateMultiDof(phys_model, leftShoulder,
pose._leftShoulderRot, pose._leftShoulderVel)
self._pybullet_client.resetJointStateMultiDof(phys_model, leftElbow, pose._leftElbowRot,
pose._leftElbowVel)
else:
if initBase:
self._pybullet_client.resetBasePositionAndOrientation(phys_model, pose._basePos,
pose._baseOrn)
self._pybullet_client.resetJointStateMultiDof(phys_model, chest, pose._chestRot, [0, 0, 0])
self._pybullet_client.resetJointStateMultiDof(phys_model, neck, pose._neckRot, [0, 0, 0])
self._pybullet_client.resetJointStateMultiDof(phys_model, rightHip, pose._rightHipRot,
[0, 0, 0])
self._pybullet_client.resetJointStateMultiDof(phys_model, rightKnee, pose._rightKneeRot, [0])
self._pybullet_client.resetJointStateMultiDof(phys_model, rightAnkle, pose._rightAnkleRot,
[0, 0, 0])
self._pybullet_client.resetJointStateMultiDof(phys_model, rightShoulder,
pose._rightShoulderRot, [0, 0, 0])
self._pybullet_client.resetJointStateMultiDof(phys_model, rightElbow, pose._rightElbowRot,
[0])
self._pybullet_client.resetJointStateMultiDof(phys_model, leftHip, pose._leftHipRot,
[0, 0, 0])
self._pybullet_client.resetJointStateMultiDof(phys_model, leftKnee, pose._leftKneeRot, [0])
self._pybullet_client.resetJointStateMultiDof(phys_model, leftAnkle, pose._leftAnkleRot,
[0, 0, 0])
self._pybullet_client.resetJointStateMultiDof(phys_model, leftShoulder,
pose._leftShoulderRot, [0, 0, 0])
self._pybullet_client.resetJointStateMultiDof(phys_model, leftElbow, pose._leftElbowRot, [0])
def calcCycleCount(self, simTime, cycleTime):
phases = simTime / cycleTime
count = math.floor(phases)
loop = True
#count = (loop) ? count : cMathUtil::Clamp(count, 0, 1);
return count
def getCycleTime(self):
keyFrameDuration = self._mocap_data.KeyFrameDuraction()
cycleTime = keyFrameDuration * (self._mocap_data.NumFrames() - 1)
return cycleTime
def setSimTime(self, t):
self._simTime = t
#print("SetTimeTime time =",t)
keyFrameDuration = self._mocap_data.KeyFrameDuraction()
cycleTime = self.getCycleTime()
#print("self._motion_data.NumFrames()=",self._mocap_data.NumFrames())
self._cycleCount = self.calcCycleCount(t, cycleTime)
#print("cycles=",cycles)
frameTime = t - self._cycleCount * cycleTime
if (frameTime < 0):
frameTime += cycleTime
#print("keyFrameDuration=",keyFrameDuration)
#print("frameTime=",frameTime)
self._frame = int(frameTime / keyFrameDuration)
#print("self._frame=",self._frame)
self._frameNext = self._frame + 1
if (self._frameNext >= self._mocap_data.NumFrames()):
self._frameNext = self._frame
self._frameFraction = (frameTime - self._frame * keyFrameDuration) / (keyFrameDuration)
def computeCycleOffset(self):
firstFrame = 0
lastFrame = self._mocap_data.NumFrames() - 1
frameData = self._mocap_data._motion_data['Frames'][0]
frameDataNext = self._mocap_data._motion_data['Frames'][lastFrame]
basePosStart = [frameData[1], frameData[2], frameData[3]]
basePosEnd = [frameDataNext[1], frameDataNext[2], frameDataNext[3]]
self._cycleOffset = [
basePosEnd[0] - basePosStart[0], basePosEnd[1] - basePosStart[1],
basePosEnd[2] - basePosStart[2]
]
return self._cycleOffset
def computePose(self, frameFraction):
frameData = self._mocap_data._motion_data['Frames'][self._frame]
frameDataNext = self._mocap_data._motion_data['Frames'][self._frameNext]
self._poseInterpolator.Slerp(frameFraction, frameData, frameDataNext, self._pybullet_client)
#print("self._poseInterpolator.Slerp(", frameFraction,")=", pose)
self.computeCycleOffset()
oldPos = self._poseInterpolator._basePos
self._poseInterpolator._basePos = [
oldPos[0] + self._cycleCount * self._cycleOffset[0],
oldPos[1] + self._cycleCount * self._cycleOffset[1],
oldPos[2] + self._cycleCount * self._cycleOffset[2]
]
pose = self._poseInterpolator.GetPose()
return pose
def convertActionToPose(self, action):
pose = self._poseInterpolator.ConvertFromAction(self._pybullet_client, action)
return pose
def computePDForces(self, desiredPositions, desiredVelocities, maxForces):
if desiredVelocities == None:
desiredVelocities = [0] * self._totalDofs
taus = self._stablePD.computePD(bodyUniqueId=self._sim_model,
jointIndices=self._jointIndicesAll,
desiredPositions=desiredPositions,
desiredVelocities=desiredVelocities,
kps=self._kpOrg,
kds=self._kdOrg,
maxForces=maxForces,
timeStep=self._timeStep)
return taus
def applyPDForces(self, taus):
dofIndex = 7
scaling = 1
for index in range(len(self._jointIndicesAll)):
jointIndex = self._jointIndicesAll[index]
if self._jointDofCounts[index] == 4:
force = [
scaling * taus[dofIndex + 0], scaling * taus[dofIndex + 1],
scaling * taus[dofIndex + 2]
]
#print("force[", jointIndex,"]=",force)
self._pybullet_client.setJointMotorControlMultiDof(self._sim_model,
jointIndex,
self._pybullet_client.TORQUE_CONTROL,
force=force)
if self._jointDofCounts[index] == 1:
force = [scaling * taus[dofIndex]]
#print("force[", jointIndex,"]=",force)
self._pybullet_client.setJointMotorControlMultiDof(
self._sim_model,
jointIndex,
controlMode=self._pybullet_client.TORQUE_CONTROL,
force=force)
dofIndex += self._jointDofCounts[index]
def setJointMotors(self, desiredPositions, maxForces):
controlMode = self._pybullet_client.POSITION_CONTROL
startIndex = 7
chest = 1
neck = 2
rightHip = 3
rightKnee = 4
rightAnkle = 5
rightShoulder = 6
rightElbow = 7
leftHip = 9
leftKnee = 10
leftAnkle = 11
leftShoulder = 12
leftElbow = 13
kp = 0.2
forceScale = 1
#self._jointDofCounts=[4,4,4,1,4,4,1,4,1,4,4,1]
maxForce = [
forceScale * maxForces[startIndex], forceScale * maxForces[startIndex + 1],
forceScale * maxForces[startIndex + 2], forceScale * maxForces[startIndex + 3]
]
startIndex += 4
self._pybullet_client.setJointMotorControlMultiDof(
self._sim_model,
chest,
controlMode,
targetPosition=self._poseInterpolator._chestRot,
positionGain=kp,
force=maxForce)
maxForce = [
maxForces[startIndex], maxForces[startIndex + 1], maxForces[startIndex + 2],
maxForces[startIndex + 3]
]
startIndex += 4
self._pybullet_client.setJointMotorControlMultiDof(
self._sim_model,
neck,
controlMode,
targetPosition=self._poseInterpolator._neckRot,
positionGain=kp,
force=maxForce)
maxForce = [
maxForces[startIndex], maxForces[startIndex + 1], maxForces[startIndex + 2],
maxForces[startIndex + 3]
]
startIndex += 4
self._pybullet_client.setJointMotorControlMultiDof(
self._sim_model,
rightHip,
controlMode,
targetPosition=self._poseInterpolator._rightHipRot,
positionGain=kp,
force=maxForce)
maxForce = [forceScale * maxForces[startIndex]]
startIndex += 1
self._pybullet_client.setJointMotorControlMultiDof(
self._sim_model,
rightKnee,
controlMode,
targetPosition=self._poseInterpolator._rightKneeRot,
positionGain=kp,
force=maxForce)
maxForce = [
maxForces[startIndex], maxForces[startIndex + 1], maxForces[startIndex + 2],
maxForces[startIndex + 3]
]
startIndex += 4
self._pybullet_client.setJointMotorControlMultiDof(
self._sim_model,
rightAnkle,
controlMode,
targetPosition=self._poseInterpolator._rightAnkleRot,
positionGain=kp,
force=maxForce)
maxForce = [
forceScale * maxForces[startIndex], forceScale * maxForces[startIndex + 1],
forceScale * maxForces[startIndex + 2], forceScale * maxForces[startIndex + 3]
]
startIndex += 4
maxForce = [forceScale * maxForces[startIndex]]
startIndex += 1
self._pybullet_client.setJointMotorControlMultiDof(
self._sim_model,
rightElbow,
controlMode,
targetPosition=self._poseInterpolator._rightElbowRot,
positionGain=kp,
force=maxForce)
maxForce = [
maxForces[startIndex], maxForces[startIndex + 1], maxForces[startIndex + 2],
maxForces[startIndex + 3]
]
startIndex += 4
self._pybullet_client.setJointMotorControlMultiDof(
self._sim_model,
leftHip,
controlMode,
targetPosition=self._poseInterpolator._leftHipRot,
positionGain=kp,
force=maxForce)
maxForce = [forceScale * maxForces[startIndex]]
startIndex += 1
self._pybullet_client.setJointMotorControlMultiDof(
self._sim_model,
leftKnee,
controlMode,
targetPosition=self._poseInterpolator._leftKneeRot,
positionGain=kp,
force=maxForce)
maxForce = [
maxForces[startIndex], maxForces[startIndex + 1], maxForces[startIndex + 2],
maxForces[startIndex + 3]
]
startIndex += 4
self._pybullet_client.setJointMotorControlMultiDof(
self._sim_model,
leftAnkle,
controlMode,
targetPosition=self._poseInterpolator._leftAnkleRot,
positionGain=kp,
force=maxForce)
maxForce = [
maxForces[startIndex], maxForces[startIndex + 1], maxForces[startIndex + 2],
maxForces[startIndex + 3]
]
startIndex += 4
self._pybullet_client.setJointMotorControlMultiDof(
self._sim_model,
leftShoulder,
controlMode,
targetPosition=self._poseInterpolator._leftShoulderRot,
positionGain=kp,
force=maxForce)
maxForce = [forceScale * maxForces[startIndex]]
startIndex += 1
self._pybullet_client.setJointMotorControlMultiDof(
self._sim_model,
leftElbow,
controlMode,
targetPosition=self._poseInterpolator._leftElbowRot,
positionGain=kp,
force=maxForce)
#print("startIndex=",startIndex)
def getPhase(self):
keyFrameDuration = self._mocap_data.KeyFrameDuraction()
cycleTime = keyFrameDuration * (self._mocap_data.NumFrames() - 1)
phase = self._simTime / cycleTime
phase = math.fmod(phase, 1.0)
if (phase < 0):
phase += 1
return phase
def buildHeadingTrans(self, rootOrn):
#align root transform 'forward' with world-space x axis
eul = self._pybullet_client.getEulerFromQuaternion(rootOrn)
refDir = [1, 0, 0]
rotVec = self._pybullet_client.rotateVector(rootOrn, refDir)
heading = math.atan2(-rotVec[2], rotVec[0])
heading2 = eul[1]
#print("heading=",heading)
headingOrn = self._pybullet_client.getQuaternionFromAxisAngle([0, 1, 0], -heading)
return headingOrn
def buildOriginTrans(self):
rootPos, rootOrn = self._pybullet_client.getBasePositionAndOrientation(self._sim_model)
#print("rootPos=",rootPos, " rootOrn=",rootOrn)
invRootPos = [-rootPos[0], 0, -rootPos[2]]
#invOrigTransPos, invOrigTransOrn = self._pybullet_client.invertTransform(rootPos,rootOrn)
headingOrn = self.buildHeadingTrans(rootOrn)
#print("headingOrn=",headingOrn)
headingMat = self._pybullet_client.getMatrixFromQuaternion(headingOrn)
#print("headingMat=",headingMat)
#dummy, rootOrnWithoutHeading = self._pybullet_client.multiplyTransforms([0,0,0],headingOrn, [0,0,0], rootOrn)
#dummy, invOrigTransOrn = self._pybullet_client.multiplyTransforms([0,0,0],rootOrnWithoutHeading, invOrigTransPos, invOrigTransOrn)
invOrigTransPos, invOrigTransOrn = self._pybullet_client.multiplyTransforms([0, 0, 0],
headingOrn,
invRootPos,
[0, 0, 0, 1])
#print("invOrigTransPos=",invOrigTransPos)
#print("invOrigTransOrn=",invOrigTransOrn)
invOrigTransMat = self._pybullet_client.getMatrixFromQuaternion(invOrigTransOrn)
#print("invOrigTransMat =",invOrigTransMat )
return invOrigTransPos, invOrigTransOrn
def getState(self):
stateVector = []
phase = self.getPhase()
#print("phase=",phase)
stateVector.append(phase)
rootTransPos, rootTransOrn = self.buildOriginTrans()
basePos, baseOrn = self._pybullet_client.getBasePositionAndOrientation(self._sim_model)
rootPosRel, dummy = self._pybullet_client.multiplyTransforms(rootTransPos, rootTransOrn,
basePos, [0, 0, 0, 1])
#print("!!!rootPosRel =",rootPosRel )
#print("rootTransPos=",rootTransPos)
#print("basePos=",basePos)
localPos, localOrn = self._pybullet_client.multiplyTransforms(rootTransPos, rootTransOrn,
basePos, baseOrn)
localPos = [
localPos[0] - rootPosRel[0], localPos[1] - rootPosRel[1], localPos[2] - rootPosRel[2]
]
#print("localPos=",localPos)
stateVector.append(rootPosRel[1])
#self.pb2dmJoints=[0,1,2,9,10,11,3,4,5,12,13,14,6,7,8]
self.pb2dmJoints = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
for pbJoint in range(self._pybullet_client.getNumJoints(self._sim_model)):
j = self.pb2dmJoints[pbJoint]
#print("joint order:",j)
ls = self._pybullet_client.getLinkState(self._sim_model, j, computeForwardKinematics=True)
linkPos = ls[0]
linkOrn = ls[1]
linkPosLocal, linkOrnLocal = self._pybullet_client.multiplyTransforms(
rootTransPos, rootTransOrn, linkPos, linkOrn)
if (linkOrnLocal[3] < 0):
linkOrnLocal = [-linkOrnLocal[0], -linkOrnLocal[1], -linkOrnLocal[2], -linkOrnLocal[3]]
linkPosLocal = [
linkPosLocal[0] - rootPosRel[0], linkPosLocal[1] - rootPosRel[1],
linkPosLocal[2] - rootPosRel[2]
]
for l in linkPosLocal:
stateVector.append(l)
#re-order the quaternion, DeepMimic uses w,x,y,z
if (linkOrnLocal[3] < 0):
linkOrnLocal[0] *= -1
linkOrnLocal[1] *= -1
linkOrnLocal[2] *= -1
linkOrnLocal[3] *= -1
stateVector.append(linkOrnLocal[3])
stateVector.append(linkOrnLocal[0])
stateVector.append(linkOrnLocal[1])
stateVector.append(linkOrnLocal[2])
for pbJoint in range(self._pybullet_client.getNumJoints(self._sim_model)):
j = self.pb2dmJoints[pbJoint]
ls = self._pybullet_client.getLinkState(self._sim_model, j, computeLinkVelocity=True)
linkLinVel = ls[6]
linkAngVel = ls[7]
linkLinVelLocal, unused = self._pybullet_client.multiplyTransforms([0, 0, 0], rootTransOrn,
linkLinVel, [0, 0, 0, 1])
#linkLinVelLocal=[linkLinVelLocal[0]-rootPosRel[0],linkLinVelLocal[1]-rootPosRel[1],linkLinVelLocal[2]-rootPosRel[2]]
linkAngVelLocal, unused = self._pybullet_client.multiplyTransforms([0, 0, 0], rootTransOrn,
linkAngVel, [0, 0, 0, 1])
for l in linkLinVelLocal:
stateVector.append(l)
for l in linkAngVelLocal:
stateVector.append(l)
#print("stateVector len=",len(stateVector))
#for st in range (len(stateVector)):
# print("state[",st,"]=",stateVector[st])
return stateVector
def terminates(self):
#check if any non-allowed body part hits the ground
terminates = False
pts = self._pybullet_client.getContactPoints()
for p in pts:
part = -1
#ignore self-collision
if (p[1] == p[2]):
continue
if (p[1] == self._sim_model):
part = p[3]
if (p[2] == self._sim_model):
part = p[4]
if (part >= 0 and part not in self._allowed_body_parts):
#print("terminating part:", part)
terminates = True
return terminates
def quatMul(self, q1, q2):
return [
q1[3] * q2[0] + q1[0] * q2[3] + q1[1] * q2[2] - q1[2] * q2[1],
q1[3] * q2[1] + q1[1] * q2[3] + q1[2] * q2[0] - q1[0] * q2[2],
q1[3] * q2[2] + q1[2] * q2[3] + q1[0] * q2[1] - q1[1] * q2[0],
q1[3] * q2[3] - q1[0] * q2[0] - q1[1] * q2[1] - q1[2] * q2[2]
]
def calcRootAngVelErr(self, vel0, vel1):
diff = [vel0[0] - vel1[0], vel0[1] - vel1[1], vel0[2] - vel1[2]]
return diff[0] * diff[0] + diff[1] * diff[1] + diff[2] * diff[2]
def calcRootRotDiff(self, orn0, orn1):
orn0Conj = [-orn0[0], -orn0[1], -orn0[2], orn0[3]]
q_diff = self.quatMul(orn1, orn0Conj)
axis, angle = self._pybullet_client.getAxisAngleFromQuaternion(q_diff)
return angle * angle
def getReward(self, pose):
#from DeepMimic double cSceneImitate::CalcRewardImitate
#todo: compensate for ground height in some parts, once we move to non-flat terrain
pose_w = 0.5
vel_w = 0.05
end_eff_w = 0.15
root_w = 0.2
com_w = 0 #0.1
total_w = pose_w + vel_w + end_eff_w + root_w + com_w
pose_w /= total_w
vel_w /= total_w
end_eff_w /= total_w
root_w /= total_w
com_w /= total_w
pose_scale = 2
vel_scale = 0.1
end_eff_scale = 40
root_scale = 5
com_scale = 10
err_scale = 1
reward = 0
pose_err = 0
vel_err = 0
end_eff_err = 0
root_err = 0
com_err = 0
heading_err = 0
#create a mimic reward, comparing the dynamics humanoid with a kinematic one
#pose = self.InitializePoseFromMotionData()
#print("self._kin_model=",self._kin_model)
#print("kinematicHumanoid #joints=",self._pybullet_client.getNumJoints(self._kin_model))
#self.ApplyPose(pose, True, True, self._kin_model, self._pybullet_client)
#const Eigen::VectorXd& pose0 = sim_char.GetPose();
#const Eigen::VectorXd& vel0 = sim_char.GetVel();
#const Eigen::VectorXd& pose1 = kin_char.GetPose();
#const Eigen::VectorXd& vel1 = kin_char.GetVel();
#tMatrix origin_trans = sim_char.BuildOriginTrans();
#tMatrix kin_origin_trans = kin_char.BuildOriginTrans();
#
#tVector com0_world = sim_char.CalcCOM();
#tVector com_vel0_world = sim_char.CalcCOMVel();
#tVector com1_world;
#tVector com_vel1_world;
#cRBDUtil::CalcCoM(joint_mat, body_defs, pose1, vel1, com1_world, com_vel1_world);
#
root_id = 0
#tVector root_pos0 = cKinTree::GetRootPos(joint_mat, pose0);
#tVector root_pos1 = cKinTree::GetRootPos(joint_mat, pose1);
#tQuaternion root_rot0 = cKinTree::GetRootRot(joint_mat, pose0);
#tQuaternion root_rot1 = cKinTree::GetRootRot(joint_mat, pose1);
#tVector root_vel0 = cKinTree::GetRootVel(joint_mat, vel0);
#tVector root_vel1 = cKinTree::GetRootVel(joint_mat, vel1);
#tVector root_ang_vel0 = cKinTree::GetRootAngVel(joint_mat, vel0);
#tVector root_ang_vel1 = cKinTree::GetRootAngVel(joint_mat, vel1);
mJointWeights = [
0.20833, 0.10416, 0.0625, 0.10416, 0.0625, 0.041666666666666671, 0.0625, 0.0416, 0.00,
0.10416, 0.0625, 0.0416, 0.0625, 0.0416, 0.0000
]
num_end_effs = 0
num_joints = 15
root_rot_w = mJointWeights[root_id]
rootPosSim, rootOrnSim = self._pybullet_client.getBasePositionAndOrientation(self._sim_model)
rootPosKin, rootOrnKin = self._pybullet_client.getBasePositionAndOrientation(self._kin_model)
linVelSim, angVelSim = self._pybullet_client.getBaseVelocity(self._sim_model)
linVelKin, angVelKin = self._pybullet_client.getBaseVelocity(self._kin_model)
root_rot_err = self.calcRootRotDiff(rootOrnSim, rootOrnKin)
pose_err += root_rot_w * root_rot_err
root_vel_diff = [
linVelSim[0] - linVelKin[0], linVelSim[1] - linVelKin[1], linVelSim[2] - linVelKin[2]
]
root_vel_err = root_vel_diff[0] * root_vel_diff[0] + root_vel_diff[1] * root_vel_diff[
1] + root_vel_diff[2] * root_vel_diff[2]
root_ang_vel_err = self.calcRootAngVelErr(angVelSim, angVelKin)
vel_err += root_rot_w * root_ang_vel_err
for j in range(num_joints):
curr_pose_err = 0
curr_vel_err = 0
w = mJointWeights[j]
simJointInfo = self._pybullet_client.getJointStateMultiDof(self._sim_model, j)
#print("simJointInfo.pos=",simJointInfo[0])
#print("simJointInfo.vel=",simJointInfo[1])
kinJointInfo = self._pybullet_client.getJointStateMultiDof(self._kin_model, j)
#print("kinJointInfo.pos=",kinJointInfo[0])
#print("kinJointInfo.vel=",kinJointInfo[1])
if (len(simJointInfo[0]) == 1):
angle = simJointInfo[0][0] - kinJointInfo[0][0]
curr_pose_err = angle * angle
velDiff = simJointInfo[1][0] - kinJointInfo[1][0]
curr_vel_err = velDiff * velDiff
if (len(simJointInfo[0]) == 4):
#print("quaternion diff")
diffQuat = self._pybullet_client.getDifferenceQuaternion(simJointInfo[0], kinJointInfo[0])
axis, angle = self._pybullet_client.getAxisAngleFromQuaternion(diffQuat)
curr_pose_err = angle * angle
diffVel = [
simJointInfo[1][0] - kinJointInfo[1][0], simJointInfo[1][1] - kinJointInfo[1][1],
simJointInfo[1][2] - kinJointInfo[1][2]
]
curr_vel_err = diffVel[0] * diffVel[0] + diffVel[1] * diffVel[1] + diffVel[2] * diffVel[2]
pose_err += w * curr_pose_err
vel_err += w * curr_vel_err
is_end_eff = j in self._end_effectors
if is_end_eff:
linkStateSim = self._pybullet_client.getLinkState(self._sim_model, j)
linkStateKin = self._pybullet_client.getLinkState(self._kin_model, j)
linkPosSim = linkStateSim[0]
linkPosKin = linkStateKin[0]
linkPosDiff = [
linkPosSim[0] - linkPosKin[0], linkPosSim[1] - linkPosKin[1],
linkPosSim[2] - linkPosKin[2]
]
curr_end_err = linkPosDiff[0] * linkPosDiff[0] + linkPosDiff[1] * linkPosDiff[
1] + linkPosDiff[2] * linkPosDiff[2]
end_eff_err += curr_end_err
num_end_effs += 1
if (num_end_effs > 0):
end_eff_err /= num_end_effs
#double root_ground_h0 = mGround->SampleHeight(sim_char.GetRootPos())
#double root_ground_h1 = kin_char.GetOriginPos()[1]
#root_pos0[1] -= root_ground_h0
#root_pos1[1] -= root_ground_h1
root_pos_diff = [
rootPosSim[0] - rootPosKin[0], rootPosSim[1] - rootPosKin[1], rootPosSim[2] - rootPosKin[2]
]
root_pos_err = root_pos_diff[0] * root_pos_diff[0] + root_pos_diff[1] * root_pos_diff[
1] + root_pos_diff[2] * root_pos_diff[2]
#
#root_rot_err = cMathUtil::QuatDiffTheta(root_rot0, root_rot1)
#root_rot_err *= root_rot_err
#root_vel_err = (root_vel1 - root_vel0).squaredNorm()
#root_ang_vel_err = (root_ang_vel1 - root_ang_vel0).squaredNorm()
root_err = root_pos_err + 0.1 * root_rot_err + 0.01 * root_vel_err + 0.001 * root_ang_vel_err
#com_err = 0.1 * (com_vel1_world - com_vel0_world).squaredNorm()
#print("pose_err=",pose_err)
#print("vel_err=",vel_err)
pose_reward = math.exp(-err_scale * pose_scale * pose_err)
vel_reward = math.exp(-err_scale * vel_scale * vel_err)
end_eff_reward = math.exp(-err_scale * end_eff_scale * end_eff_err)
root_reward = math.exp(-err_scale * root_scale * root_err)
com_reward = math.exp(-err_scale * com_scale * com_err)
reward = pose_w * pose_reward + vel_w * vel_reward + end_eff_w * end_eff_reward + root_w * root_reward + com_w * com_reward
# pose_reward,vel_reward,end_eff_reward, root_reward, com_reward);
#print("reward=",reward)
#print("pose_reward=",pose_reward)
#print("vel_reward=",vel_reward)
#print("end_eff_reward=",end_eff_reward)
#print("root_reward=",root_reward)
#print("com_reward=",com_reward)
return reward
|
py | 1a39a96b5af785f92a709c9fb8fd4e8a8a66b233 | import sys
from notebook import Notebook
class Menu:
'''display a menu and respond to choices when run.'''
def __init__(self):
self._notebook = Notebook()
self._choices = {"1" : self._show_notes,
"2": self._search_notes,
"3": self._add_note,
"4": self._modify_note,
"5": self._quit}
def run(self):
'''Display the menu and respond to choices'''
while True:
self._display_menu()
choice = input("Enter an option: ")
action = self._choices.get(choice)
if action:
action()
else:
print("{0} is not a valid choice".format(choice))
def _display_menu(self):
print("""Notebook Menu
1. Show all notes
2. Search notes
3. Add note
4. Modify note
5. Quit
""")
def _show_notes(self, notes=None):
if not notes:
notes = self._notebook.notes
for note in notes:
print("{0}: {1}\n{2}".format(note.id, note.tags, note.memo))
def _search_notes(self):
filter = input("Search for: ")
notes = self._notebook.search(filter)
self._show_notes(notes)
def _add_note(self):
memo = input("Enter a memo: ")
self._notebook.new_note(memo)
print("Your note has been added.")
def _modify_note(self):
id = input("Enter a note id: ")
memo = input("Enter a memo: ")
tags = ("Enter tags: ")
if memo:
self._notebook.modify_memo(id, memo)
if tags:
self._notebook.modify_tags(id, tags)
def _quit(self):
print("Thank you for using your notebook today.")
sys.exit(0)
if __name__ == "__main__":
Menu().run()
|
py | 1a39ab6aee0a8336b8bb2b3aac246b579061d36a | # coding=utf-8
import json
from cStringIO import StringIO
import cv2
import numpy as np
import requests
class InferenceService(object):
"""
Die Klasse ist für das Senden von Bildern zum Inferece Server zuständig.
"""
def __init__(self, interference_server_address):
"""
Konstruktor zum Erzeugen eines neuen InferenceService.
:param interference_server_address: URL des Inference Servers
"""
self.interference_server_address = interference_server_address
def run_interference_on_images(self, images):
"""
Sendet die übergebenen Bilder an den Inference Server und gibt die erkannten ImageNet-Knoten mit Score zurück.
:param images: zu erkennenden Bilder als numpy-Matrix im BGR-Format.
:return: vom Inference Server gelieferten Predictions mit ImageNet-Knoten und Score
"""
converted_images = []
for image in images:
cv2_rgb_img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
converted_images.append(cv2_rgb_img)
stacked_images = np.stack(images)
memory_file = StringIO() # erzeugt eine in-memory datei, die für np.save verwendet wird
np.save(memory_file, stacked_images, allow_pickle=False)
memory_file.seek(0)
res = requests.post(self.interference_server_address, data=memory_file)
if res.ok:
predictions = json.loads(res.content)
return predictions
|
py | 1a39ac0b9c7766faaef3630aadde2730214bc0d4 | """
:copyright: 2010-2015 by Ronny Pfannschmidt
:license: MIT
"""
import os
import warnings
from .config import Configuration
from .utils import function_has_arg, string_types
from .version import format_version, meta
from .discover import iter_matching_entrypoints
PRETEND_KEY = "SETUPTOOLS_SCM_PRETEND_VERSION"
TEMPLATES = {
".py": """\
# coding: utf-8
# file generated by setuptools_scm
# don't change, don't track in version control
version = {version!r}
""",
".txt": "{version}",
}
def version_from_scm(root):
warnings.warn(
"version_from_scm is deprecated please use get_version",
category=DeprecationWarning,
)
config = Configuration()
config.root = root
# TODO: Is it API?
return _version_from_entrypoint(config, "setuptools_scm.parse_scm")
def _call_entrypoint_fn(config, fn):
if function_has_arg(fn, "config"):
return fn(config.absolute_root, config=config)
else:
warnings.warn(
"parse functions are required to provide a named argument"
" 'config' in the future.",
category=PendingDeprecationWarning,
stacklevel=2,
)
return fn(config.absolute_root)
def _version_from_entrypoint(config, entrypoint):
for ep in iter_matching_entrypoints(config.absolute_root, entrypoint):
version = _call_entrypoint_fn(config, ep.load())
if version:
return version
def dump_version(root, version, write_to, template=None):
assert isinstance(version, string_types)
if not write_to:
return
target = os.path.normpath(os.path.join(root, write_to))
ext = os.path.splitext(target)[1]
template = template or TEMPLATES.get(ext)
if template is None:
raise ValueError(
"bad file format: '{}' (of {}) \nonly *.txt and *.py are supported".format(
os.path.splitext(target)[1], target
)
)
with open(target, "w") as fp:
fp.write(template.format(version=version))
def _do_parse(config):
pretended = os.environ.get(PRETEND_KEY)
if pretended:
# we use meta here since the pretended version
# must adhere to the pep to begin with
return meta(tag=pretended, preformatted=True, config=config)
if config.parse:
parse_result = _call_entrypoint_fn(config, config.parse)
if isinstance(parse_result, string_types):
raise TypeError(
"version parse result was a string\nplease return a parsed version"
)
version = parse_result or _version_from_entrypoint(
config, "setuptools_scm.parse_scm_fallback"
)
else:
# include fallbacks after dropping them from the main entrypoint
version = _version_from_entrypoint(
config, "setuptools_scm.parse_scm"
) or _version_from_entrypoint(
config, "setuptools_scm.parse_scm_fallback"
)
if version:
return version
raise LookupError(
"setuptools-scm was unable to detect version for %r.\n\n"
"Make sure you're either building from a fully intact git repository "
"or PyPI tarballs. Most other sources (such as GitHub's tarballs, a "
"git checkout without the .git folder) don't contain the necessary "
"metadata and will not work.\n\n"
"For example, if you're using pip, instead of "
"https://github.com/user/proj/archive/master.zip "
"use git+https://github.com/user/proj.git#egg=proj" % config.absolute_root
)
def get_version(
root=".",
version_scheme="guess-next-dev",
local_scheme="node-and-date",
write_to=None,
write_to_template=None,
relative_to=None,
tag_regex=None,
parse=None,
):
"""
If supplied, relative_to should be a file from which root may
be resolved. Typically called by a script or module that is not
in the root of the repository to direct setuptools_scm to the
root of the repository by supplying ``__file__``.
"""
config = Configuration()
config.root = root
config.version_scheme = version_scheme
config.local_scheme = local_scheme
config.write_to = write_to
config.write_to_template = write_to_template
config.relative_to = relative_to
config.tag_regex = tag_regex
config.parse = parse
parsed_version = _do_parse(config)
if parsed_version:
version_string = format_version(
parsed_version, version_scheme=version_scheme, local_scheme=local_scheme
)
dump_version(
root=root,
version=version_string,
write_to=write_to,
template=write_to_template,
)
return version_string
|
py | 1a39ace3c3753a60dc054075751f49cac813244e |
import pandas as pd
import altair as alt
import numpy as np
df1 = pd.read_csv('../../processed_data/D4_single_0.3.csv')
df1['Dataset'] = 'D4'
df2 = pd.read_csv('../../processed_data/AmpC_single_0.3.csv')
df2['Dataset'] = 'AmpC'
df = pd.concat([df1, df2])
df['gain'] = df['N hits wanted']/0.003 / df['N hits explored']
df['Days'] = df['N hits explored'] / 60 / 60 /24
cols = list(df.columns)
print(cols)
cols[2]='Training set size'
cols[-1] = 'Computation days (single cpu)'
df.columns = cols
##Plot single iteration, enrichment:
line = alt.Chart(df).mark_line(color='black',size=2,opacity=0.5).encode(
x=alt.X('Training set size:Q', scale=alt.Scale(type='log')),
y=alt.Y('gain',aggregate='mean',title='Enrichment'),
color=alt.Color('N hits wanted:N')
)
pts = alt.Chart(df).mark_point(filled=False,size=40).encode(
x=alt.X('Training set size:Q'),
y=alt.Y('gain',aggregate='mean',title='Enrichment'),
color=alt.Color('N hits wanted:N'),
tooltip=alt.Tooltip('N hits wanted', aggregate='mean', title='Enrichment')
)
error_bars = alt.Chart(df).mark_errorbar(extent='ci').encode(
x=alt.X('Training set size:Q',),
y=alt.Y('gain', title='Enrichment'),
color=alt.Color('N hits wanted:N')
)
ch = (line+pts+error_bars).properties(width=300, height=250).facet('Dataset:N')
ch.resolve_scale(y='independent').save('../../figures/single_it_enrichment.html')
#ch.save('../../figures/single_it_enrichment.html')
##Plot single iteration, computation days:
line = alt.Chart(df).mark_line(color='black',size=2,opacity=0.5).encode(
x=alt.X('Training set size:Q', scale=alt.Scale(type='log')),
y=alt.Y('Computation days (single cpu)',aggregate='mean',),
color=alt.Color('N hits wanted:N')
)
pts = alt.Chart(df).mark_point(filled=False,size=40).encode(
x=alt.X('Training set size:Q'),
y=alt.Y('Computation days (single cpu)',aggregate='mean',),
color=alt.Color('N hits wanted:N'),
# tooltip=alt.Tooltip('Computation days (single cpu)')
)
error_bars = alt.Chart(df).mark_errorbar(extent='ci').encode(
x=alt.X('Training set size:Q',),
y=alt.Y('Computation days (single cpu)'),
color=alt.Color('N hits wanted:N')
)
ch = line+pts+error_bars
ch = ch.properties(width=300, height=250).facet('Dataset:N',)
ch.resolve_scale(y='independent').save('../../figures/single_it_computationdays.html')
#ch.save('../../figures/single_it_computationdays.html')
#####
##Active learning approach:
#####
df1 = pd.read_csv('../../processed_data/ampc_reconstruction_0.3_1_.csv')
df1['Algorithm'] = 'AmpC:LogReg (ours)'
df2 = pd.read_csv('../../processed_data/D4_reconstruction_0.3_1_.csv')
df2['Algorithm'] = 'D4:LogReg (ours)'
df = pd.concat([df1, df2])
prev_results = [['AmpC:RF (Graff)', 400_000, 71.4, 2.1], ['AmpC:NN (Graff)', 400_000, 74.7, 1.4],
['AmpC:MPN (Graff)',400_000, 87.9, 2.3],
['AmpC:RF (Graff)', 200_000, 45.5, 1.8],
['AmpC:NN (Graff)', 200_000, 52.8, 0.5],
['AmpC:MPN (Graff)', 200_000, 67.1, 2.1],
['AmpC:RF (Graff)', 100_000, 24.0, 2.2],
['AmpC:NN (Graff)', 100_000 , 33.3,0.3],
['AmpC:MPN (Graff)', 100_000, 52.0, 0.5]]
coley = pd.DataFrame(columns=['Algorithm', 'Training size', 'N ligands explored', '% top-k found'])
count = 0
for res in prev_results:
desired_std_dev = res[3]
samples = np.array([-1,0,1]).astype(float)
samples *= (desired_std_dev/np.std(samples))
for s in samples:
coley.loc[count]= [res[0], res[1], res[1]*6, (s+res[2])/100]
count+=1
concat = pd.concat([df, coley])
concat['% top-k found']*=100
concat.columns = ['nan', 'Algorithm', 'Training set size', 'N ligands explored', '% top-k found']
concat['Training set size'] = concat['Training set size'].apply(lambda num: f"{num:,d}",)
concat['Computation days (single CPU)'] = concat['N ligands explored'] / 60 / 60 /24
error_bars = alt.Chart(concat).mark_errorbar(extent='ci').encode(
x=alt.X('N ligands explored:Q',title='Number of ligands sampled'),
y=alt.Y('% top-k found:Q', title='% top 50,000 found'),
color=alt.Color('Algorithm')
)
points = alt.Chart(concat).mark_point(filled=False, size=40, color='black').encode(
x=alt.X('N ligands explored:Q'),
y=alt.Y('% top-k found:Q',aggregate='mean',title='% top 50,000 found'),
color=alt.Color('Algorithm'),
tooltip=alt.Tooltip('% top-k found:Q',aggregate='mean',title='% top 50,000 found')
)
line = alt.Chart(concat).mark_line(color='black',size=2,opacity=0.5).encode(
x=alt.X('N ligands explored:Q'),
y=alt.Y('% top-k found:Q',aggregate='mean',title='% top 50,000 found'),
color=alt.Color('Algorithm')
)
ch = (error_bars+points+line).properties(height=300,width=150).facet(
column=alt.Column('Training set size:N',sort=alt.Sort([0.004, 0.002, 0.001])),
).resolve_scale(x='independent')
ch.save('../../figures/active_learning_percentage.html')
error_bars = alt.Chart(concat).mark_errorbar(extent='ci').encode(
x=alt.X('Computation days (single CPU):Q',),
y=alt.Y('% top-k found:Q', title='% top 50,000 found'),
color=alt.Color('Algorithm')
)
points = alt.Chart(concat).mark_point(filled=False, size=40, color='black').encode(
x=alt.X('Computation days (single CPU):Q'),
y=alt.Y('% top-k found:Q',aggregate='mean',title='% top 50,000 found'),
color=alt.Color('Algorithm'),
tooltip=alt.Tooltip('Computation days (single CPU):Q')
)
line = alt.Chart(concat).mark_line(color='black',size=2,opacity=0.5).encode(
x=alt.X('Computation days (single CPU):Q'),
y=alt.Y('% top-k found:Q',aggregate='mean',title='% top 50,000 found'),
color=alt.Color('Algorithm')
)
ch = (error_bars+points+line).properties(height=300,width=150).facet(
column=alt.Column('Training set size:N',sort=alt.Sort([0.004, 0.002, 0.001])),
).resolve_scale(x='independent')
ch.save('../../figures/active_learning_computationdays.html')
|
py | 1a39adcec69ca10c3fdf8bb7ab28840c8e90682e | # Copyright (c) 2008-2015 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Test the `cdm` module."""
import numpy as np
import pytest
from metpy.io.cdm import Dataset
def test_group():
"""Test `Group`/`Dataset` behavior."""
ds = Dataset()
ds.createDimension('x', 5)
ds.createVariable('data', 'f4', ('x',), 5)
ds.conventions = 'CF-1.5'
assert 'x' in ds.dimensions
assert 'data' in ds.variables
assert 'conventions' in ds.ncattrs()
assert str(ds) == ('root\n\nDimensions:\n'
"<class 'metpy.io.cdm.Dimension'>: name = x, size = 5\n\n"
"Variables:\n<class 'metpy.io.cdm.Variable'>: float32 data(x)\n\t"
'shape = 5\n\nAttributes:\n\tconventions: CF-1.5')
def test_dim():
"""Test `Dimension` behavior."""
ds = Dataset()
dim = ds.createDimension('x', 5)
assert dim.size == 5
assert dim.group() is ds
assert str(dim) == "<class 'metpy.io.cdm.Dimension'>: name = x, size = 5"
def test_var():
"""Test `Variable` behavior."""
ds = Dataset()
ds.createDimension('x', 2)
var = ds.createVariable('data', 'f4', ('x',), 5)
assert 'data' in ds.variables
assert var.shape == (2,)
assert var.size == 2
assert var.ndim == 1
assert var.dtype == np.float32
assert var[0] == 5
var.units = 'meters'
assert 'units' in var.ncattrs()
assert var.units == 'meters'
assert var.group() is ds
assert str(var) == ("<class 'metpy.io.cdm.Variable'>: float32 data(x)"
'\n\tunits: meters\n\tshape = 2')
def test_multidim_var():
"""Test multi-dim `Variable`."""
ds = Dataset()
ds.createDimension('x', 2)
ds.createDimension('y', 3)
var = ds.createVariable('data', 'i8', ('x', 'y'))
assert var.shape == (2, 3)
assert var.size == 6
assert var.ndim == 2
assert var.dtype == np.int64
assert str(var) == ("<class 'metpy.io.cdm.Variable'>: int64 data(x, y)"
'\n\tshape = (2, 3)')
def test_remove_attr():
"""Test removing an attribute."""
ds = Dataset()
ds.maker = 'me'
assert 'maker' in ds.ncattrs()
del ds.maker
assert not hasattr(ds, 'maker')
assert 'maker' not in ds.ncattrs()
def test_add_group():
"""Test adding a group."""
ds = Dataset()
grp = ds.createGroup('myGroup')
assert grp.name == 'myGroup'
assert 'myGroup' in ds.groups
assert str(ds) == 'root\nGroups:\nmyGroup'
def test_variable_size_check():
"""Test `Variable` checking size of passed array."""
ds = Dataset()
xdim = ds.createDimension('x', 2)
ydim = ds.createDimension('y', 3)
# Create array with dims flipped
arr = np.empty((ydim.size, xdim.size), dtype='f4')
with pytest.raises(ValueError):
ds.createVariable('data', 'f4', ('x', 'y'), wrap_array=arr)
|
py | 1a39ae7ac170fc621c12a1fa2d6c6f516c963823 | #!/usr/bin/env python
'''
Copyright (c) 2016 anti-XSS developers
'''
import sys
import os
from lib.var.reporttext import ReportText
def gnrReport(xssScripts):
ReportText().addText('')
ReportText().addText('Summary')
ReportText().addText('There are ' + str(len(xssScripts)) + ' XSS vulnerabilities found.\n\n')
if len(xssScripts) < 1:
return None
ReportText().addText('')
ReportText().addText('Found vulnerabilities')
for xssScript in xssScripts:
head = xssScript.split('\t')[0]
tail = xssScript.split('\t')[1]
ReportText().addText('Payload: ' + head)
ReportText().addText('From: ' + tail)
ReportText().addText('')
|
py | 1a39b032ed184b748c82433665dd4a1ddd6e8b10 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class EventSubscription(pulumi.CustomResource):
arn: pulumi.Output[str]
customer_aws_id: pulumi.Output[str]
enabled: pulumi.Output[bool]
"""
A boolean flag to enable/disable the subscription. Defaults to true.
"""
event_categories: pulumi.Output[list]
"""
A list of event categories for a SourceType that you want to subscribe to. See https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-event-notifications.html or run `aws redshift describe-event-categories`.
"""
name: pulumi.Output[str]
"""
The name of the Redshift event subscription.
"""
severity: pulumi.Output[str]
"""
The event severity to be published by the notification subscription. Valid options are `INFO` or `ERROR`.
"""
sns_topic_arn: pulumi.Output[str]
"""
The ARN of the SNS topic to send events to.
"""
source_ids: pulumi.Output[list]
"""
A list of identifiers of the event sources for which events will be returned. If not specified, then all sources are included in the response. If specified, a source_type must also be specified.
"""
source_type: pulumi.Output[str]
"""
The type of source that will be generating the events. Valid options are `cluster`, `cluster-parameter-group`, `cluster-security-group`, or `cluster-snapshot`. If not set, all sources will be subscribed to.
"""
status: pulumi.Output[str]
tags: pulumi.Output[dict]
"""
A mapping of tags to assign to the resource.
"""
def __init__(__self__, resource_name, opts=None, enabled=None, event_categories=None, name=None, severity=None, sns_topic_arn=None, source_ids=None, source_type=None, tags=None, __props__=None, __name__=None, __opts__=None):
"""
Provides a Redshift event subscription resource.
## Attributes
The following additional atttributes are provided:
* `arn` - Amazon Resource Name (ARN) of the Redshift event notification subscription
* `id` - The name of the Redshift event notification subscription
* `customer_aws_id` - The AWS customer account associated with the Redshift event notification subscription
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] enabled: A boolean flag to enable/disable the subscription. Defaults to true.
:param pulumi.Input[list] event_categories: A list of event categories for a SourceType that you want to subscribe to. See https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-event-notifications.html or run `aws redshift describe-event-categories`.
:param pulumi.Input[str] name: The name of the Redshift event subscription.
:param pulumi.Input[str] severity: The event severity to be published by the notification subscription. Valid options are `INFO` or `ERROR`.
:param pulumi.Input[str] sns_topic_arn: The ARN of the SNS topic to send events to.
:param pulumi.Input[list] source_ids: A list of identifiers of the event sources for which events will be returned. If not specified, then all sources are included in the response. If specified, a source_type must also be specified.
:param pulumi.Input[str] source_type: The type of source that will be generating the events. Valid options are `cluster`, `cluster-parameter-group`, `cluster-security-group`, or `cluster-snapshot`. If not set, all sources will be subscribed to.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the resource.
> This content is derived from https://github.com/terraform-providers/terraform-provider-aws/blob/master/website/docs/r/redshift_event_subscription.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['enabled'] = enabled
__props__['event_categories'] = event_categories
__props__['name'] = name
__props__['severity'] = severity
if sns_topic_arn is None:
raise TypeError("Missing required property 'sns_topic_arn'")
__props__['sns_topic_arn'] = sns_topic_arn
__props__['source_ids'] = source_ids
__props__['source_type'] = source_type
__props__['tags'] = tags
__props__['arn'] = None
__props__['customer_aws_id'] = None
__props__['status'] = None
super(EventSubscription, __self__).__init__(
'aws:redshift/eventSubscription:EventSubscription',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, arn=None, customer_aws_id=None, enabled=None, event_categories=None, name=None, severity=None, sns_topic_arn=None, source_ids=None, source_type=None, status=None, tags=None):
"""
Get an existing EventSubscription resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] enabled: A boolean flag to enable/disable the subscription. Defaults to true.
:param pulumi.Input[list] event_categories: A list of event categories for a SourceType that you want to subscribe to. See https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-event-notifications.html or run `aws redshift describe-event-categories`.
:param pulumi.Input[str] name: The name of the Redshift event subscription.
:param pulumi.Input[str] severity: The event severity to be published by the notification subscription. Valid options are `INFO` or `ERROR`.
:param pulumi.Input[str] sns_topic_arn: The ARN of the SNS topic to send events to.
:param pulumi.Input[list] source_ids: A list of identifiers of the event sources for which events will be returned. If not specified, then all sources are included in the response. If specified, a source_type must also be specified.
:param pulumi.Input[str] source_type: The type of source that will be generating the events. Valid options are `cluster`, `cluster-parameter-group`, `cluster-security-group`, or `cluster-snapshot`. If not set, all sources will be subscribed to.
:param pulumi.Input[dict] tags: A mapping of tags to assign to the resource.
> This content is derived from https://github.com/terraform-providers/terraform-provider-aws/blob/master/website/docs/r/redshift_event_subscription.html.markdown.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["arn"] = arn
__props__["customer_aws_id"] = customer_aws_id
__props__["enabled"] = enabled
__props__["event_categories"] = event_categories
__props__["name"] = name
__props__["severity"] = severity
__props__["sns_topic_arn"] = sns_topic_arn
__props__["source_ids"] = source_ids
__props__["source_type"] = source_type
__props__["status"] = status
__props__["tags"] = tags
return EventSubscription(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
py | 1a39b14f07d919291dd88baac1680e5b5919b44e | from mathsmonkey import settings
from random import randint
from math import copysign
def gen_rnd(n_digits, var_digits = 0):
""" generate a positive random number n-digits long with specified variance in the number of digits
"""
assert(n_digits >= 1)
assert(var_digits < n_digits)
dgts_offst = 0
if var_digits > 0 :
dgts_offst = randint(0, var_digits)
min, max = pow(10, n_digits - 1 - dgts_offst), pow(10, n_digits - dgts_offst) - 1
assert(min < max)
return randint(min, max)
sign = lambda x:int(copysign(1, x))
|
py | 1a39b1898970bd6ec38e32fb9fd00ed1d2c13f5f | """Config flow for UpCloud."""
import logging
import requests.exceptions
import upcloud_api
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_PASSWORD, CONF_SCAN_INTERVAL, CONF_USERNAME
from homeassistant.core import callback
from .const import DEFAULT_SCAN_INTERVAL, DOMAIN
_LOGGER = logging.getLogger(__name__)
class UpCloudConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""UpCloud config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
username: str
password: str
async def async_step_user(self, user_input=None):
"""Handle user initiated flow."""
if user_input is None:
return self._async_show_form(step_id="user")
await self.async_set_unique_id(user_input[CONF_USERNAME])
manager = upcloud_api.CloudManager(
user_input[CONF_USERNAME], user_input[CONF_PASSWORD]
)
errors = {}
try:
await self.hass.async_add_executor_job(manager.authenticate)
except upcloud_api.UpCloudAPIError:
errors["base"] = "invalid_auth"
_LOGGER.debug("invalid_auth", exc_info=True)
except requests.exceptions.RequestException:
errors["base"] = "cannot_connect"
_LOGGER.debug("cannot_connect", exc_info=True)
if errors:
return self._async_show_form(
step_id="user", user_input=user_input, errors=errors
)
return self.async_create_entry(title=user_input[CONF_USERNAME], data=user_input)
async def async_step_import(self, user_input=None):
"""Handle import initiated flow."""
await self.async_set_unique_id(user_input[CONF_USERNAME])
self._abort_if_unique_id_configured()
return await self.async_step_user(user_input=user_input)
@callback
def _async_show_form(self, step_id, user_input=None, errors=None):
"""Show our form."""
if user_input is None:
user_input = {}
return self.async_show_form(
step_id=step_id,
data_schema=vol.Schema(
{
vol.Required(
CONF_USERNAME, default=user_input.get(CONF_USERNAME, "")
): str,
vol.Required(
CONF_PASSWORD, default=user_input.get(CONF_PASSWORD, "")
): str,
}
),
errors=errors or {},
)
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get options flow."""
return UpCloudOptionsFlow(config_entry)
class UpCloudOptionsFlow(config_entries.OptionsFlow):
"""UpCloud options flow."""
def __init__(self, config_entry: config_entries.ConfigEntry):
"""Initialize options flow."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Handle options flow."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
data_schema = vol.Schema(
{
vol.Optional(
CONF_SCAN_INTERVAL,
default=self.config_entry.options.get(CONF_SCAN_INTERVAL)
or DEFAULT_SCAN_INTERVAL.seconds,
): vol.All(vol.Coerce(int), vol.Range(min=30)),
}
)
return self.async_show_form(step_id="init", data_schema=data_schema)
|
py | 1a39b1ba89693e1db533679c6da69cbcf3473f9f | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetVirtualRouterResult',
'AwaitableGetVirtualRouterResult',
'get_virtual_router',
]
@pulumi.output_type
class GetVirtualRouterResult:
"""
VirtualRouter Resource.
"""
def __init__(__self__, etag=None, hosted_gateway=None, hosted_subnet=None, id=None, location=None, name=None, peerings=None, provisioning_state=None, tags=None, type=None, virtual_router_asn=None, virtual_router_ips=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if hosted_gateway and not isinstance(hosted_gateway, dict):
raise TypeError("Expected argument 'hosted_gateway' to be a dict")
pulumi.set(__self__, "hosted_gateway", hosted_gateway)
if hosted_subnet and not isinstance(hosted_subnet, dict):
raise TypeError("Expected argument 'hosted_subnet' to be a dict")
pulumi.set(__self__, "hosted_subnet", hosted_subnet)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if peerings and not isinstance(peerings, list):
raise TypeError("Expected argument 'peerings' to be a list")
pulumi.set(__self__, "peerings", peerings)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if virtual_router_asn and not isinstance(virtual_router_asn, float):
raise TypeError("Expected argument 'virtual_router_asn' to be a float")
pulumi.set(__self__, "virtual_router_asn", virtual_router_asn)
if virtual_router_ips and not isinstance(virtual_router_ips, list):
raise TypeError("Expected argument 'virtual_router_ips' to be a list")
pulumi.set(__self__, "virtual_router_ips", virtual_router_ips)
@property
@pulumi.getter
def etag(self) -> str:
"""
Gets a unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="hostedGateway")
def hosted_gateway(self) -> Optional['outputs.SubResourceResponse']:
"""
The Gateway on which VirtualRouter is hosted.
"""
return pulumi.get(self, "hosted_gateway")
@property
@pulumi.getter(name="hostedSubnet")
def hosted_subnet(self) -> Optional['outputs.SubResourceResponse']:
"""
The Subnet on which VirtualRouter is hosted.
"""
return pulumi.get(self, "hosted_subnet")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def peerings(self) -> Sequence['outputs.SubResourceResponse']:
"""
List of references to VirtualRouterPeerings
"""
return pulumi.get(self, "peerings")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualRouterAsn")
def virtual_router_asn(self) -> Optional[float]:
"""
VirtualRouter ASN.
"""
return pulumi.get(self, "virtual_router_asn")
@property
@pulumi.getter(name="virtualRouterIps")
def virtual_router_ips(self) -> Optional[Sequence[str]]:
"""
VirtualRouter IPs
"""
return pulumi.get(self, "virtual_router_ips")
class AwaitableGetVirtualRouterResult(GetVirtualRouterResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVirtualRouterResult(
etag=self.etag,
hosted_gateway=self.hosted_gateway,
hosted_subnet=self.hosted_subnet,
id=self.id,
location=self.location,
name=self.name,
peerings=self.peerings,
provisioning_state=self.provisioning_state,
tags=self.tags,
type=self.type,
virtual_router_asn=self.virtual_router_asn,
virtual_router_ips=self.virtual_router_ips)
def get_virtual_router(expand: Optional[str] = None,
resource_group_name: Optional[str] = None,
virtual_router_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVirtualRouterResult:
"""
VirtualRouter Resource.
:param str expand: Expands referenced resources.
:param str resource_group_name: The name of the resource group.
:param str virtual_router_name: The name of the Virtual Router.
"""
__args__ = dict()
__args__['expand'] = expand
__args__['resourceGroupName'] = resource_group_name
__args__['virtualRouterName'] = virtual_router_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20190801:getVirtualRouter', __args__, opts=opts, typ=GetVirtualRouterResult).value
return AwaitableGetVirtualRouterResult(
etag=__ret__.etag,
hosted_gateway=__ret__.hosted_gateway,
hosted_subnet=__ret__.hosted_subnet,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
peerings=__ret__.peerings,
provisioning_state=__ret__.provisioning_state,
tags=__ret__.tags,
type=__ret__.type,
virtual_router_asn=__ret__.virtual_router_asn,
virtual_router_ips=__ret__.virtual_router_ips)
|
py | 1a39b544a6206765366db108e1f005b90a3bb339 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import hashlib
import os
from typing import Iterable, List, Tuple, Union
import pandas as pd
from pytest_regressions.data_regression import DataRegressionFixture
from pytest_regressions.dataframe_regression import DataFrameRegressionFixture
from pytest_regressions.file_regression import FileRegressionFixture
from parlai.crowdsourcing.utils.tests import AbstractOneTurnCrowdsourcingTest
TASK_DIRECTORY = os.path.dirname(os.path.abspath(__file__))
def get_hashed_combo_path(
root_dir: str,
subdir: str,
task: str,
combos: Iterable[Union[List[str], Tuple[str, str]]],
) -> str:
"""
Return a unique path for the given combinations of models.
:param root_dir: root save directory
:param subdir: immediate subdirectory of root_dir
:param task: the ParlAI task being considered
:param combos: the combinations of models being compared
"""
# Sort the names in each combo, as well as the overall combos
sorted_combos = []
for combo in combos:
assert len(combo) == 2
sorted_combos.append(tuple(sorted(combo)))
sorted_combos = sorted(sorted_combos)
os.makedirs(os.path.join(root_dir, subdir), exist_ok=True)
path = os.path.join(
root_dir,
subdir,
hashlib.sha1(
'___and___'.join(
[f"{m1}vs{m2}.{task.replace(':', '_')}" for m1, m2 in sorted_combos]
).encode('utf-8')
).hexdigest()[:10],
)
return path
class AbstractFastAcuteTest(AbstractOneTurnCrowdsourcingTest):
"""
Abstract test class for testing Fast ACUTE code.
"""
TASK_DIRECTORY = TASK_DIRECTORY
MODELS = ['model1', 'model2']
MODEL_STRING = ','.join(MODELS)
TASK_DATA = {
"final_data": [
{"speakerChoice": "human_as_model", "textReason": "Makes more sense"},
{"speakerChoice": "model1", "textReason": "Makes more sense"},
{"speakerChoice": "model2", "textReason": "Makes more sense"},
{"speakerChoice": "model1", "textReason": "Makes more sense"},
{"speakerChoice": "model2", "textReason": "Makes more sense"},
]
}
def _get_common_overrides(self, root_dir: str) -> List[str]:
"""
Return overrides for all subclassed Fast ACUTE test code.
"""
# TODO: clean this up when Hydra has support for recursive defaults
return [
'+mephisto.blueprint.acute_eval_type=engaging',
'mephisto.blueprint.block_on_onboarding_fail=False',
'+mephisto.blueprint.matchups_per_pair=60',
'+mephisto.blueprint.num_self_chats=5',
f'+mephisto.blueprint.onboarding_path={self.TASK_DIRECTORY}/task_config/onboarding.json',
f'+mephisto.blueprint.root_dir={root_dir}',
'+mephisto.blueprint.sufficient_matchups_multiplier=2',
'+mephisto.blueprint.task=blended_skill_talk',
'mephisto.task.task_name=acute_eval_test',
]
def test_agent_state(self, setup_teardown, data_regression: DataRegressionFixture):
outputs = setup_teardown
self._check_agent_state(state=outputs['state'], data_regression=data_regression)
def test_all_convo_pairs_txt(
self, setup_teardown, file_regression: FileRegressionFixture
):
outputs = setup_teardown
self._check_file_contents(
results_folder=outputs['results_folder'],
file_suffix='all_convo_pairs.txt',
file_regression=file_regression,
)
def test_all_html(self, setup_teardown, file_regression: FileRegressionFixture):
outputs = setup_teardown
self._check_file_contents(
results_folder=outputs['results_folder'],
file_suffix='all.html',
file_regression=file_regression,
)
def test_full_csv(
self, setup_teardown, dataframe_regression: DataFrameRegressionFixture
):
outputs = setup_teardown
self._check_dataframe(
results_folder=outputs['results_folder'],
file_suffix='full.csv',
dataframe_regression=dataframe_regression,
)
def test_grid_csv(
self, setup_teardown, dataframe_regression: DataFrameRegressionFixture
):
outputs = setup_teardown
self._check_dataframe(
results_folder=outputs['results_folder'],
file_suffix='grid.csv',
dataframe_regression=dataframe_regression,
)
def test_grid_winners_as_rows_csv(
self, setup_teardown, dataframe_regression: DataFrameRegressionFixture
):
outputs = setup_teardown
self._check_dataframe(
results_folder=outputs['results_folder'],
file_suffix='grid.winners_as_rows.csv',
dataframe_regression=dataframe_regression,
)
def test_ratings_per_worker_csv(
self, setup_teardown, dataframe_regression: DataFrameRegressionFixture
):
outputs = setup_teardown
self._check_dataframe(
results_folder=outputs['results_folder'],
file_suffix='ratings_per_worker.csv',
dataframe_regression=dataframe_regression,
)
def test_reason_html(self, setup_teardown, file_regression: FileRegressionFixture):
outputs = setup_teardown
self._check_file_contents(
results_folder=outputs['results_folder'],
file_suffix='reason.html',
file_regression=file_regression,
)
def test_significance_csv(
self, setup_teardown, dataframe_regression: DataFrameRegressionFixture
):
outputs = setup_teardown
self._check_dataframe(
results_folder=outputs['results_folder'],
file_suffix='significance.csv',
dataframe_regression=dataframe_regression,
)
def _check_dataframe(
self,
results_folder: str,
file_suffix: str,
dataframe_regression: DataFrameRegressionFixture,
):
file_path = self._get_matching_file_path(
results_folder=results_folder, file_suffix=file_suffix
)
df = pd.read_csv(file_path)
dataframe_regression.check(data_frame=df)
def _check_file_contents(
self,
results_folder: str,
file_suffix: str,
file_regression: FileRegressionFixture,
):
file_path = self._get_matching_file_path(
results_folder=results_folder, file_suffix=file_suffix
)
with open(file_path) as f:
contents = f.read()
file_regression.check(contents=contents)
def _get_matching_file_path(self, results_folder: str, file_suffix: str) -> str:
matching_files = [
obj for obj in os.listdir(results_folder) if obj.endswith(file_suffix)
]
assert len(matching_files) == 1
return os.path.join(results_folder, matching_files[0])
|
py | 1a39b554b7c793debeb0eb6b3e87f857b2b82a1a | """
Module containing helper functions for the JASMIN notifications app.
"""
__author__ = "Matt Pryor"
__copyright__ = "Copyright 2015 UK Science and Technology Facilities Council"
from datetime import date
from django.conf import settings
from django.urls import reverse
from django.contrib.auth import get_user_model
from .models import NotificationType, UserNotification, EmailNotification
def notification_context(notification):
"""
Takes a notification and returns a template context dictionary for that notification.
This context dictionary will contain:
* ``notification_type`` - the notification type as a string
* ``level`` - the notification level as a string
* ``email`` - the email that the notification is for
* ``user`` - the user the notification is for, or ``None`` if the notification
is to an email that is not associated with a user
* ``target`` - the target object for the notification
* ``link`` - the *fully qualified* link underlying the notification
* ``follow_link`` - the *fully qualified* link to follow the notification
* ``created_at`` - the datetime at which the notification was created
* ``followed_at`` - the datetime at which the notification was followed, or
``None`` if it has not been followed
* Any variables specified as ``extra_context``
"""
if isinstance(notification, UserNotification):
user = notification.user
email = user.email
else:
# For email notifications, try to find a user with the email address to go
# into the context
email = notification.email
user = get_user_model().objects.filter(email = email).first()
# Create the context
link_prefix = '' if notification.link.startswith('http') else settings.BASE_URL
context = {
'notification_type' : notification.notification_type.name,
'level' : notification.notification_type.level.value,
'email' : email,
'user' : user,
'target' : notification.target,
'link' : link_prefix + notification.link,
'follow_link' : settings.BASE_URL + reverse(
'jasmin_notifications:follow', kwargs = { 'uuid' : notification.uuid }
),
'created_at' : notification.created_at,
'followed_at' : notification.followed_at,
}
context.update(notification.extra_context)
return context
def notify(notification_type, target, link, user = None, email = None, cc = None, **extra_context):
"""
Creates a notification with the given ``notification_type``, ``target`` and ``link``.
``notification_type`` can be given as a string.
If ``user`` is given, a :py:class:`~.models.UserNotification` is created (even
if ``email`` is also given), otherwise an :py:class:``~.models.EmailNotification``
is created.
Any additional ``kwargs`` are based as context variables for template rendering,
both for emails and messages (if appropriate).
"""
if not isinstance(notification_type, NotificationType):
notification_type = NotificationType.objects.get(name = notification_type)
if user:
notification = UserNotification(user = user)
elif email:
notification = EmailNotification(email = email, cc = cc)
else:
raise ValueError('One of user or email must be given')
notification.notification_type = notification_type
notification.target = target
notification.link = link
notification.extra_context = extra_context
notification.save()
def notify_if_not_exists(notification_type, target, link, user = None, email = None, **extra_context):
"""
Creates a notification with the given ``notification_type``, ``target`` and
``email``\ /``user`` only if such a notification does not already exist.
See :py:func:`notify` for more details.
"""
if user:
query = UserNotification.objects.filter(user = user)
elif email:
query = EmailNotification.objects.filter(email = email)
else:
raise ValueError('One of user or email must be given')
if not query.filter_type(notification_type).filter_target(target).exists():
notify(notification_type, target, link, user, email, **extra_context)
def notify_pending_deadline(deadline, deltas, notification_type, target,
link, user = None, email = None, **extra_context):
"""
Ensures that a notification of the given type, target and email/user is sent
exactly once for each of the given ``deltas`` before the given ``deadline``.
It is assumed that ``deltas`` are given in descending order, i.e. the longest
delta first.
If ``user`` is present in ``kwargs``, :py:class:`~.models.UserNotification`\ s
are created, otherwise :py:class:``~.models.EmailNotification``\ s are created.
"""
# If the deadline has already passed, there is nothing to do
today = date.today()
if deadline < today:
return
# Work out whether we are using email or user notifications
if user:
query = UserNotification.objects.filter(user = user)
elif email:
query = EmailNotification.objects.filter(email = email)
else:
raise ValueError('One of user or email must be given')
# Find the most recent notification for the type/target/recipient combo
latest = query.filter_type(notification_type) \
.filter_target(target) \
.order_by('-created_at') \
.first()
# Add the deadline and the number of notifications to the context
extra_context.update(deadline = deadline, n = len(deltas))
for i, delta in enumerate(deltas, start = 1):
threshold = deadline - delta
# Deltas should be given longest first, so if we are before the threshold
# for this delta, we are done
if today <= threshold:
return
# Now we know threshold < today <= deadline
# So send a notification unless one has already been sent in the window
if not latest or latest.created_at.date() < threshold:
# Add the number of this notification to the context
extra_context = dict(extra_context, i = i)
notify(notification_type, target, link, user, email, **extra_context)
return
|
py | 1a39b6b89fedf69c6db24b454e955cb678e8e8c1 | """Views for the django ``User`` application.
"""
# pylint: disable=too-many-ancestors
from rest_framework import generics
from rest_framework.permissions import IsAuthenticated
from user import models
from user import permissions
from user import serializers
class UserPreferencesDetailView(generics.RetrieveUpdateDestroyAPIView):
"""Retrieve, Update, and Destroy REST API view for ``UserPreferences``model.
"""
serializer_class = serializers.UserPreferencesSerializer
permission_classes = (IsAuthenticated, permissions.IsUser)
def get_queryset(self):
return models.UserPreferences.objects.filter(user=self.request.user)
def get_object(self):
# Overridden to always return the UserPreferences associated with the
# request's user.
return self.get_queryset().get(user=self.request.user)
|
py | 1a39b70c08263cfe17291311287773ea871ef841 | """Doc2Vec sklearn wrapper"""
from pathlib import Path
import multiprocessing
import statistics
import logging
from sklearn.base import BaseEstimator, TransformerMixin
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
import numpy as np
logging.getLogger("gensim").setLevel(logging.WARNING)
class Doc2VecVectorizer(BaseEstimator, TransformerMixin):
def __init__(
self,
vector_size=100,
window_size=5,
n_jobs=1,
min_count=2,
negative=5,
sample=1e-5,
epochs=20,
learning_rate=0.025,
model="dm",
pretrained=None,
):
"""
Args:
vector_size: size of vector to represent text
window_size: words left and right of context words used to create representation
min_count: filter words that appear less than min_count. default: 2
negative: number of negative words to be used for training.
if zero hierarchical softmax is used. default: 5
sample: threshold for downsampling high frequency words. default: 1e-5
learning_rate: learning rate used by SGD. default: 0.025
model: underlying model architecture, one of dm or dbow. default: dm
epochs: number of passes over training data. default: 20
n_jobs: number of cores to use (-1 for all). default: 1
pretrained: path to directory containing saved pretrained doc2vec artifacts
"""
self.vector_size = vector_size
self.window_size = window_size
self.epochs = epochs
self.min_count = min_count
self.negative = negative
self.sample = sample
self.n_jobs = n_jobs
self.learning_rate = learning_rate
self.model = model
self.pretrained = pretrained
def _tokenize(self, x):
return x.lower().split()
def _yield_tagged_documents(self, X):
for i, x in enumerate(X):
yield TaggedDocument(self._tokenize(x), [i])
def fit(self, X, *_):
"""
Args:
X: list of texts (strings)
"""
# If pretrained, just load, no need to fit
if self.pretrained:
self.load(self.pretrained)
return self
if self.n_jobs == -1:
workers = multiprocessing.cpu_count()
else:
workers = self.n_jobs
# TODO: Debug streaming implementation below
# atm it gives different result than non streaming
# tagged_documents = self._yield_tagged_documents(X)
# self.model = Doc2Vec(
# vector_size=self.vector_size, window_size=self.window_size,
# workers=workers, min_count=self.min_count, epochs=self.epochs
# )
# self.model.build_vocab(tagged_documents)
# self.model.train(tagged_documents, total_examples=self.model.corpus_count,
# epochs=self.model.epochs)
tagged_documents = list(self._yield_tagged_documents(X))
self.model = Doc2Vec(
tagged_documents,
vector_size=self.vector_size,
window=self.window_size,
workers=workers,
min_count=self.min_count,
epochs=self.epochs,
negative=self.negative,
sample=self.sample,
alpha=self.learning_rate,
dm=1 if self.model == "dm" else 0,
hs=1 if self.negative == 0 else 0,
)
return self
def transform(self, X):
"""
Args:
X: list of texts (strings)
Returns:
docvectors: matrix of size (nb_docs, vector_size)
"""
return np.array([self.model.infer_vector(self._tokenize(x)) for x in X])
def score(self, X):
"""
Args:
X: list of texts (strings). Needs to be the same used for fit.
Returns:
score: percentage of documents that are most similar with themselves
"""
correct = []
docvecs = self.transform(X)
for doc_id, inferred_vector in enumerate(docvecs):
sims = self.model.docvecs.most_similar(
[inferred_vector], topn=len(self.model.docvecs)
)
rank = [docid for docid, sim in sims].index(doc_id)
correct.append(int(rank == 0))
return statistics.mean(correct)
def _get_model_path(self, model_dir):
return "{}/doc2vec".format(model_dir)
def save(self, model_dir):
Path(model_dir).mkdir(parents=True, exist_ok=True)
model_path = self._get_model_path(model_dir)
self.model.save(model_path)
def load(self, model_dir):
model_path = self._get_model_path(model_dir)
self.model = Doc2Vec.load(model_path)
|
py | 1a39b7761cce09f79e886983e9a9482e7c1d5836 | """
Bundle Adjustment using GBP.
"""
import numpy as np
import argparse
from gbp import gbp_ba
import vis
parser = argparse.ArgumentParser()
parser.add_argument("--bal_file", required=True,
help="BAL style file with BA data")
parser.add_argument("--n_iters", type=int, default=200,
help="Number of iterations of GBP")
parser.add_argument("--gauss_noise_std", type=int, default=2,
help="Standard deviation of Gaussian noise of measurement model.")
parser.add_argument("--loss", default=None,
help="Loss function: None (squared error), huber or constant.")
parser.add_argument("--Nstds", type=float, default=3.,
help="If loss is not None, number of stds at which point the "
"loss transitions to linear or constant.")
parser.add_argument("--beta", type=float, default=0.01,
help="Threshold for the change in the mean of adjacent beliefs for "
"relinearisation at a factor.")
parser.add_argument("--num_undamped_iters", type=int, default=6,
help="Number of undamped iterations at a factor node after relinearisation.")
parser.add_argument("--min_linear_iters", type=int, default=8,
help="Minimum number of iterations between consecutive relinearisations of a factor.")
parser.add_argument("--eta_damping", type=float, default=0.4,
help="Max damping of information vector of messages.")
parser.add_argument("--prior_std_weaker_factor", type=float, default=50.,
help="Ratio of std of information matrix at measurement factors / "
"std of information matrix at prior factors.")
parser.add_argument("--float_implementation", action='store_true', default=False,
help="Float implementation, so start with strong priors that are weakened")
parser.add_argument("--final_prior_std_weaker_factor", type=float, default=100.,
help="Ratio of information at measurement factors / information at prior factors "
"after the priors are weakened (for floats implementation).")
parser.add_argument("--num_weakening_steps", type=int, default=5,
help="Number of steps over which the priors are weakened (for floats implementation)")
args = parser.parse_args()
print('Configs: \n', args)
configs = dict({
'gauss_noise_std': args.gauss_noise_std,
'loss': args.loss,
'Nstds': args.Nstds,
'beta': args.beta,
'num_undamped_iters': args.num_undamped_iters,
'min_linear_iters': args.min_linear_iters,
'eta_damping': args.eta_damping,
'prior_std_weaker_factor': args.prior_std_weaker_factor,
})
if args.float_implementation:
configs['final_prior_std_weaker_factor'] = args.final_prior_std_weaker_factor
configs['num_weakening_steps'] = args.num_weakening_steps
weakening_factor = np.log10(args.final_prior_std_weaker_factor) / args.num_weakening_steps
graph = gbp_ba.create_ba_graph(args.bal_file, configs)
print(f'\nData: {args.bal_file}\n')
print(f'Number of keyframes: {len(graph.cam_nodes)}')
print(f'Number of landmarks: {len(graph.lmk_nodes)}')
print(f'Number of measurement factors: {len(graph.factors)}\n')
# Sets prior factors automatically to be much weaker than measurement factors.
graph.generate_priors_var(weaker_factor=args.prior_std_weaker_factor)
graph.update_all_beliefs()
# Set up visualisation
scene = vis.ba_vis.create_scene(graph)
viewer = vis.ba_vis.TrimeshSceneViewer(scene=scene, resolution=scene.camera.resolution)
viewer.show()
for i in range(args.n_iters):
# To copy weakening of strong priors as must be done on IPU with float
if args.float_implementation and (i+1) % 2 == 0 and (i < args.num_weakening_steps * 2):
print('Weakening priors')
graph.weaken_priors(weakening_factor)
# At the start, allow a larger number of iterations before linearising
if i == 3 or i == 8:
for factor in graph.factors:
factor.iters_since_relin = 1
are = graph.are()
energy = graph.energy()
n_factor_relins = 0
for factor in graph.factors:
if factor.iters_since_relin == 0:
n_factor_relins += 1
print(f'Iteration {i} // ARE {are:.4f} // Energy {energy:.4f} // Num factors relinearising {n_factor_relins}')
viewer.update(graph)
graph.synchronous_iteration(robustify=True, local_relin=True)
|
py | 1a39b914a5fadc7e47b77bab5181fd3aa40e6913 | """
Copyright (c) 2017 Max deGroot, Ellis Brown
Released under the MIT license
https://github.com/amdegroot/ssd.pytorch
Updated by: Takuya Mouri
"""
# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F
# handbook
# from torch.autograd import Variable
# handbook
from data import coco as cfg
from ..box_utils import match, log_sum_exp
class MultiBoxLoss(nn.Module):
"""SSD Weighted Loss Function
Compute Targets:
1) Produce Confidence Target Indices by matching ground truth boxes
with (default) 'priorboxes' that have jaccard index > threshold parameter
(default threshold: 0.5).
2) Produce localization target by 'encoding' variance into offsets of ground
truth boxes and their matched 'priorboxes'.
3) Hard negative mining to filter the excessive number of negative examples
that comes with using a large number of default bounding boxes.
(default negative:positive ratio 3:1)
Objective Loss:
L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N
Where, Lconf is the CrossEntropy Loss and Lloc is the SmoothL1 Loss
weighted by α which is set to 1 by cross val.
Args:
c: class confidences,
l: predicted boxes,
g: ground truth boxes
N: number of matched default boxes
See: https://arxiv.org/pdf/1512.02325.pdf for more details.
"""
def __init__(self, num_classes, overlap_thresh, prior_for_matching,
bkg_label, neg_mining, neg_pos, neg_overlap, encode_target,
use_gpu=True):
super(MultiBoxLoss, self).__init__()
self.use_gpu = use_gpu
self.num_classes = num_classes
self.threshold = overlap_thresh
self.background_label = bkg_label
self.encode_target = encode_target
self.use_prior_for_matching = prior_for_matching
self.do_neg_mining = neg_mining
self.negpos_ratio = neg_pos
self.neg_overlap = neg_overlap
self.variance = cfg['variance']
def forward(self, predictions, targets):
"""Multibox Loss
Args:
predictions (tuple): A tuple containing loc preds, conf preds,
and prior boxes from SSD net.
conf shape: torch.size(batch_size,num_priors,num_classes)
loc shape: torch.size(batch_size,num_priors,4)
priors shape: torch.size(num_priors,4)
targets (tensor): Ground truth boxes and labels for a batch,
shape: [batch_size,num_objs,5] (last idx is the label).
"""
# 推論結果をオフセット、確信度、ボックス座標にセット
loc_data, conf_data, priors = predictions
num = loc_data.size(0)
priors = priors[:loc_data.size(1), :]
num_priors = (priors.size(0))
num_classes = self.num_classes
# match priors (default boxes) and ground truth boxes
# 正解座標のオフセット、正解ラベルのテンソルを作成
loc_t = torch.Tensor(num, num_priors, 4)
conf_t = torch.LongTensor(num, num_priors)
# バッチサイズ毎にループし、訓練データを正解座標、正解ラベルに分解
for idx in range(num):
truths = targets[idx][:, :-1].data
labels = targets[idx][:, -1].data
defaults = priors.data
# 正解座標とボックス座標のマッチング
match(self.threshold, truths, defaults, self.variance, labels,
loc_t, conf_t, idx)
if self.use_gpu:
# handbook
#loc_t = loc_t.cuda()
#conf_t = conf_t.cuda()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
loc_t = loc_t.to(device)
conf_t = conf_t.to(device)
# handbook
# wrap targets
# handbook
#loc_t = Variable(loc_t, requires_grad=False)
#conf_t = Variable(conf_t, requires_grad=False)
# handbook
# クラス番号が0より大きいPositiveのボックスのリスト作成
pos = conf_t > 0
# Positiveのボックス数
num_pos = pos.sum(dim=1, keepdim=True)
# Localization Loss (Smooth L1)
# Shape: [batch,num_priors,4]
# Positiveのボックスのインデックスpos_idxを取得
pos_idx = pos.unsqueeze(pos.dim()).expand_as(loc_data)
# 推論結果のオフセット
loc_p = loc_data[pos_idx].view(-1, 4)
# 正解座標のオフセット
loc_t = loc_t[pos_idx].view(-1, 4)
# 位置の損失関数
loss_l = F.smooth_l1_loss(loc_p, loc_t, size_average=False)
# Compute max conf across batch for hard negative mining
batch_conf = conf_data.view(-1, self.num_classes)
loss_c = log_sum_exp(batch_conf) - batch_conf.gather(1, conf_t.view(-1, 1))
# Hard Negative Mining
# handbook
#loss_c[pos] = 0 # filter out pos boxes for now
#loss_c = loss_c.view(num, -1)
loss_c = loss_c.view(num, -1)
loss_c[pos] = 0 # filter out pos boxes for now
# handbook
_, loss_idx = loss_c.sort(1, descending=True)
_, idx_rank = loss_idx.sort(1)
num_pos = pos.long().sum(1, keepdim=True)
num_neg = torch.clamp(self.negpos_ratio*num_pos, max=pos.size(1)-1)
neg = idx_rank < num_neg.expand_as(idx_rank)
# Confidence Loss Including Positive and Negative Examples
pos_idx = pos.unsqueeze(2).expand_as(conf_data)
neg_idx = neg.unsqueeze(2).expand_as(conf_data)
# 推論結果の確信度conf_dataをpos_idx+neg_idxで絞り込み
conf_p = conf_data[(pos_idx+neg_idx).gt(0)].view(-1, self.num_classes)
# 正解ラベルのconf_tをposとnegで絞り込み
targets_weighted = conf_t[(pos+neg).gt(0)]
# クラス確信度の損失関数
loss_c = F.cross_entropy(conf_p, targets_weighted, size_average=False)
# Sum of losses: L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N
# handbook
#N = num_pos.data.sum()
N = num_pos.data.sum().double()
loss_l = loss_l.double()
loss_c = loss_c.double()
# handbook
loss_l /= N
loss_c /= N
return loss_l, loss_c
|
py | 1a39ba70695c1dea7b98b582de5e784cc8847ef1 | #/usr/bin/env python
# -*- coding: utf-8 -*-
'''from: http://www.dongwm.com/archives/pythonban-ge-ren-jian-li/'''
import re
import random
def color(messages):
color = '\x1B[%d;%dm' % (1, random.randint(30, 37))
return '%s %s\x1B[0m' % (color, messages)
def len_zh(data):
temp = re.findall('[^a-zA-Z0-9._ ]+', data)
count = 0
for i in temp:
count += len(i)
return(count)
def colorprint(mes, flag=True):
def _deco(func):
def wrapper(*args):
res = func(*args)
print(color(mes + ':\n'))
if flag:
for k1, v1 in res.items():
zh = len_zh(k1)
if not isinstance(v1, dict):
print('{0}: {1}'.format(k1.ljust(20 + zh), v1))
else:
print('{0}:'.format(k1.ljust(20 + zh)))
for k2, v2 in v1.items():
zh = len_zh(k2.decode('utf-8'))
print(' {0}: {1}'.format(k2.ljust(16 + zh), v2))
else:
for i in res:
if not isinstance(i[1], dict):
print(i)
else:
for k, v in i[1].items():
zh = len_zh(k.decode('utf-8'))
print('{0}[{1}]: {2}'.format(
k.ljust(17 + zh), i[0], v))
print('\n')
return res
return wrapper
return _deco
class Resume(object):
def __str__(self):
return color('董伟明的python简历'.center(400))
@property
@colorprint('个人信息')
def personal_information(self):
return {
'Name': '董伟明',
'Gender': 'Male',
'Born': [1985, 8, 9],
'Education': {
'School Name': '保定科技职业学院',
'Major': '烹饪工艺与营养',
'Degree': 'Three-year college',
'Graduation': 2009
},
'QQ': '6196622X',
'Tel': '13552651XXX',
'Email': '[email protected]',
'Target Positions': re.compile(
"'Python Developer'|DevOps", re.I | re.M).pattern
}
@property
@colorprint('个人特点')
def characteristics(self):
return {
'心里承受能力强': '从非计算机专业-Linux运维-Python开发',
'热衷和喜爱': '正是因为喜欢IT, 我才会放弃大学所学专业',
'自学能力强': '没有大学的计算机基础, 都是自学',
'毅力和耐性': '从不放弃一个解决不了的难题,看过的计算机专业技术多于700页的书籍>30本', # noqa
'is_geek': True
}
@property
@colorprint('个人能力')
def skills(self):
return {
'Language': {
'熟悉': ['Python', 'Ruby', 'Bash'],
'了解': ['Haskell', 'Lisp', 'Erlang']},
'OS': ['Gentoo', 'Debian', 'Centos/Rhel', 'Opensuse'],
'Tool': ['Vim', 'Mercurial', 'Git'],
'Databaseandtools': ['MySQL',
'PostgreSQL', 'MongoDB', 'Redis', 'Memcached', 'SQLAlchemy'],
'WebFramework': {
'熟悉': ['Tornado', 'Django', 'Gae'],
'了解': ['Flask']
},
'OtherFramework': ['Twisted', 'gevent',
'stackless', 'scrapy', 'mechanize'],
'GUI': 'pyqt',
'Network': 'Cisco Certified Security Professional',
'Other': '给Gentoo和Centos提交过bug'
}
@property
@colorprint('工作经验', False)
def work_experience(self):
return enumerate([
{
'Time period': '2011.10-2012.08',
'Company Name': 'XX(北京)科技有限公司',
'Position': '运维开发工程师'
},
{
'Time period': '2009.10-2011.10',
'Company Name': 'XX(北京)科技有限公司',
'Position': '运维工程师'
},
])
@property
@colorprint('项目经验', False)
def project_experience(self):
return enumerate([
{
'Project': 'kvm远程管理系统',
'Description': ('前台(django)接手至其它同事并完成维护,'
'后台独立完成,用来创建,修改,删除kvm,查看状态信息等')
},
{
'Project': 'postfix群发邮件系统',
'Description': ('前台(tornado),为其它部门提供发送邮件的web端, '
'并作为数据收集服务端,前后台独立完成')
},
{
'Project': 'windows个人安全终端系统',
'Description': ('前后台和接收数据的socket服务器独立完成,'
'客户端图形编程使用qt')
},
{
'Project': '地推IDC质量测试系统',
'Description': ('还在代码实现中,前台flask, 数据接收服务准备'
'使用twisted,客户端为windows进程')
}
])
@property
@colorprint('@Where', False)
def findme(self):
return enumerate([
{
'Link': 'http://www.dongwm.com',
'Description': '个人技术博客'},
{
'Link': 'http://www.zhihu.com/people/dongweiming',
'Description': '知乎'},
{
'Link': 'http://youhouer.appspot.com',
'Description': '基于Google App Engine的前端网站'
}
])
def show(self):
prolist = [i for i in dir(self) if not i.startswith('_')
and not i.startswith('personal')]
self.personal_information
for pro in prolist:
getattr(self, pro)
if __name__ == '__main__':
resume = Resume()
resume.show()
resume.__module__()
|
py | 1a39baa494d39e90923ca12d815bfe522e7a1888 | # -*- coding: utf-8 -*-
#!/usr/bin/env python
#
"""
解析docker 容器的启动参数
require: python 3
author: Song yanlin
mail: [email protected]
date: 2021-12-26
"""
import os
from sys import argv
import docker
from docker.errors import APIError
def unit_converter(size: int) -> str or int:
"""存储单位转换
byte 转换 GB、MB、KB
:param size: 字节数
:return:
"""
if size <= 0:
return 0
if (size >> 30) > 0:
return f"{size >> 30}GB"
elif (size >> 20) > 0:
return f"{size >> 20}MB"
elif (size >> 10) > 0:
return f"{size >> 10}KB"
else:
return size
def get_user_methods_by_class_(cls) ->list:
"""获取指定类的用户自定义方法
:param cls: class
类实例
:return: list
用户定义的方法名列表.
返回的list元素为方法(fundtion),而非str型的方法名,是可以调用的方法对象。
"""
methds = []
for method in dir(cls):
if method.startswith('__'):
continue
if method.endswith('__'):
continue
if callable(getattr(cls, method)):
methds.append(getattr(cls, method))
return methds
def camel2connector(s: str):
"""驼峰字符转连接字符格式。
DriverOpts -> driver-opt
:param s:
:return:
"""
if len(s) <= 1:
return s.lower()
s_list = list(s)
for i in range(len(s_list)):
if i != 0 and ('A' <= s_list[i] <= 'Z'):
s_list[i] = s_list[i].lower()
s_list.insert(i, '-')
ss = "".join(s_list).lower()
if ss.endswith("s"):
ss = ss[:-1]
return ss
def file_mode_converter(num: int):
"""
十进制mode 转 user:group:other mode
0444 <--> 292
444 --> 100 100 100(2) --> 292(10)
:param num: 十进制数字
:return: ser:group:other mode
格式:0ugo
"""
# 数字转二进制字符串
user = (num & 0b111000000) >> 6
group = (num & 0b111000) >> 3
other = num & 0b111
return f"0{user}{group}{other}"
def list_or_dict_to_ini(o, key: str):
"""list或dict对象转 initialization file 格式
:return:
"""
ini = ""
try:
if type(o) == list:
if o :
for i in o:
ini += f"{camel2connector(key)}={i},"
elif type(o) == dict:
for k in o:
ini += f"{camel2connector(key)}={k}={o[k]},"
# 去掉最后一个","
if ini and ini.endswith(","):
ini = ini[:-1]
except:
pass
return ini
class MYDOCKER(object):
def __init__(self, service=None):
super(MYDOCKER, self).__init__()
if not os.path.exists("/var/run/docker.sock"):
self.help_msg()
exit(1)
self.client = docker.DockerClient(base_url='unix://var/run/docker.sock')
self.api_client = docker.APIClient(base_url='unix://var/run/docker.sock')
# service name or service id. type is str
self.service = service
self.inspect:dict = {}
self.docker_service_create = ""
self.options = {"kv": [], "k": []}
self.image = None # str
self.args = []
self.entity_info = {'type': None, 'name': None} # Options: container, service, stack
def get_services(self) -> list:
return self.api_client.services()
def _print_command(self):
"""
Usage: docker service create [OPTIONS] IMAGE [COMMAND] [ARG...]
:return: str
运行容器的完整命令
"""
if self.entity_info['type'] == "stack":
print(f"This is a docker stack: {self.entity_info['name']}.")
print("Reverse stack to a compose file reference `https://hub.docker.com/repository/docker/cucker/stack2compose`")
print("docker service create command: ")
if not self.inspect:
return
options_key = ""
# if self.options['k']:
# options_key = "-"
# key 型options
for k in self.options['k']:
options_key += f"{k} "
options_key = options_key.strip(" ")
# key-value 型options
options_kv = ""
is_pretty = len(self.options['kv']) > 2
if is_pretty:
for dic in self.options['kv']:
_k = list(dic.keys())[0]
if _k.endswith('='):
options_kv += f" {_k}{dic[_k]} \\\n"
else:
options_kv += f" {_k} {dic[_k]} \\\n"
if options_key:
options = f"{options_key} \\\n {options_kv.lstrip(' ')}"
else:
options = f"{options_kv}".lstrip(" ")
else:
for dic in self.options['kv']:
_k = list(dic.keys())[0]
if _k.endswith('='):
options_kv += f"{_k}{dic[_k]} "
else:
options_kv += f"{_k} {dic[_k]} "
options = f"{options_key} {options_kv}".strip()
command = ""
if self.args:
# _args = " ".join(self.args[0])
_args = ""
for i in self.args[0]:
# sh -c “xxx” 命令中,-c 后的子命令需要引号包裹的情况
if i.__contains__(' "'):
i = f"'{i}'"
elif i.__contains__(" '"):
i = f'"{i}"'
elif i.__contains__(" "):
i = f'"{i}"'
_args += f"{i} "
command = f"docker service creat {options} {self.image} {_args}"
else:
command = f"docker service creat {options} {self.image}"
print(command)
def check_entity_type(self):
"""判断传入的实体类型
:return:
"""
if not self.inspect:
return
if 'Spec' in list(self.inspect.keys()):
is_stack = False
try:
if self.inspect['Spec']['Labels']['com.docker.stack.namespace']:
is_stack = True
except:
pass
if is_stack:
self.entity_info['type'] = "stack"
self.entity_info['name'] = self.inspect['Spec']['Labels']['com.docker.stack.namespace']
else:
self.entity_info['type'] = "service"
def _get_inspect(self):
"""get service inspect
:return:
"""
try:
self.inspect = self.api_client.inspect_service(self.service)
except APIError as e:
print(e)
exit(-1)
def _parse_service_inspect(self):
if not self.entity_info['type']:
self.check_entity_type()
# if self.entity_info['type'] != "service":
# return
if not self.inspect:
return
# image
self.image: str = self.inspect['Spec']['TaskTemplate']['ContainerSpec']['Image'].split('@')[0]
if self.image.startswith('sha256:'):
self.image = self.image.split(':')[1]
# # name of service
# self.options['kv'].append(
# {'--name': self.inspect['Spec']['Name']}
# )
# parse options
obj = PARSE_OPTIONS(self.inspect, self.options, self.args)
for m in get_user_methods_by_class_(obj):
try:
m()
except:
pass
def get_network_name_by_id(self, network_id: str):
return self.api_client.inspect_network(network_id)['Name']
@staticmethod
def help_msg():
_MSG = """Usage:
# Command alias
echo "alias get_command_service='docker run --rm -v /var/run/docker.sock:/var/run/docker.sock cucker/get_command_by_service'" >> ~/.bashrc
. ~/.bashrc
# Excute command
## For all services
get_command_service {all}
## For one or more services
get_command_service <SERVICE> [SERVICE...]
"""
print(_MSG)
def start(self):
self._get_inspect()
self._parse_service_inspect()
self._print_command()
class PARSE_OPTIONS(object):
"""从service inspect信息中解析docker service create命令的options
"""
dock = MYDOCKER()
def __init__(self, inspect: dict, options: dict, args: list):
self.inspect = inspect
self.options = options
self.args = args
# --name
# 方法名前缀为_,可以在dir(类型) 时排前
def _name(self):
self.options['kv'].append(
{'--name': self.inspect['Spec']['Name']}
)
# --replicas, Number of tasks
def replicas(self):
if "Replicated" in list(self.inspect['Spec']['Mode'].keys()):
if self.inspect['Spec']['Mode']['Replicated']['Replicas'] !=1:
self.options['kv'].append(
{'--replicas': self.inspect['Spec']['Mode']['Replicated']['Replicas']}
)
# --mode, options: replicated, global, replicated-job, or global-job. replicated is the default.
# --max-concurrent
def mode(self):
mode: list = list(self.inspect['Spec']['Mode'].keys())
# global
if "Global" in mode:
self.options['kv'].append(
{'--mode': 'global'}
)
# replicated-job
"""
"Mode": {
"ReplicatedJob": {
"MaxConcurrent": 2,
"TotalCompletions": 10
}
},
"""
if "ReplicatedJob" in mode:
self.options['kv'].append(
{'--mode': 'replicated-job'}
)
# --max-concurrent
if self.inspect['Spec']['Mode']['ReplicatedJob']['MaxConcurrent'] != 1:
self.options['kv'].append({
'--max-concurrent': self.inspect['Spec']['Mode']['ReplicatedJob']['MaxConcurrent']
})
if self.inspect['Spec']['Mode']['ReplicatedJob']['TotalCompletions'] != 1:
self.options['kv'].append(
{'--replicas': self.inspect['Spec']['Mode']['ReplicatedJob']['TotalCompletions']}
)
# global-job
if "GlobalJob" in mode:
self.options['kv'].append({
'--mode': 'global-job'
})
# --publish, -p
def publish(self):
ports:list = self.inspect['Spec']['EndpointSpec']['Ports']
if ports:
for port in ports:
if port['PublishMode'] == "ingress":
if port['Protocol'] == "tcp":
p = f"{port['PublishedPort']}:{port['TargetPort']}"
else:
p = f"{port['PublishedPort']}:{port['TargetPort']}/{port['Protocol']}"
else:
p = f"published={port['PublishedPort']},target={port['TargetPort']},protocol={port['Protocol']},mode=host"
self.options['kv'].append(
{'--publish': p}
)
# --mount
def mount(self):
mounts: list = self.inspect['Spec']['TaskTemplate']['ContainerSpec']['Mounts']
for m in mounts:
try:
readonly = ""
keys_m = list(m.keys())
if "ReadOnly" in keys_m:
if m['ReadOnly']:
readonly = f",readonly=true"
else:
readonly = f",readonly=false"
v = ""
if "VolumeOptions" in keys_m:
if "DriverConfig" in list(m['VolumeOptions'].keys()) and m['VolumeOptions']['DriverConfig']:
v = f"type={m['Type']}{readonly},volume-driver={m['VolumeOptions']['DriverConfig']['Name']},source={m['Source']},destination={m['Target']}"
elif "Labels" in list(m['VolumeOptions'].keys()):
labels: dict = m['VolumeOptions']['Labels']
lab = ""
for _k in labels:
lab += f'volume-label="{_k}={labels[_k]}",'
if lab.endswith(","):
lab = lab[:-1]
v = f"type={m['Type']}{readonly},source={m['Source']},destination={m['Target']},{lab}"
else:
v = f"type={m['Type']}{readonly},source={m['Source']},destination={m['Target']}"
if v:
self.options['kv'].append(
{'--mount': v}
)
except:
pass
# --network
def network(self):
networks: list = self.inspect['Spec']['TaskTemplate']['Networks']
for net in networks:
if len(net.keys()) == 1:
v = PARSE_OPTIONS.dock.get_network_name_by_id(net['Target'])
else:
v = f"name={PARSE_OPTIONS.dock.get_network_name_by_id(net['Target'])}"
for k in net:
if k == "Target":
continue
v += f",{list_or_dict_to_ini(net[k], k)}"
self.options['kv'].append(
{'--network': v}
)
# --env , -e
# --env-file
def environment(self):
env: list = self.inspect['Spec']['TaskTemplate']['ContainerSpec']['Env']
for e in env:
self.options['kv'].append(
{'--env': e}
)
# --workdir, -w
def workdir(self):
dir: str = self.inspect['Spec']['TaskTemplate']['ContainerSpec']['Dir']
if dir:
self.options['kv'].append(
{'--workdir': dir}
)
# --constraint
def constraint(self):
constraints = self.inspect['Spec']['TaskTemplate']['Placement']['Constraints']
if not constraints:
return
for c in constraints:
self.options['kv'].append(
{'--constraint': c}
)
# --container-label
def container_label(self):
labels: dict = self.inspect['Spec']['TaskTemplate']['ContainerSpec']['Labels']
for k in labels:
self.options['kv'].append(
{'--container-label': f"{k}={labels[k]}"}
)
# --health-cmd
# --health-interval
# --health-retries
# --health-start-period
# --health-timeout
# --no-healthcheck
def health_check(self):
hc: dict = self.inspect['Spec']['TaskTemplate']['ContainerSpec']['Healthcheck']
# --health-cmd
try:
if hc['Test'][0] == "CMD-SHELL":
self.options['kv'].append(
{'--health-cmd': f'"{hc["Test"][1]}"'}
)
# --no-healthcheck
elif hc['Test'][0] == "NONE":
self.options['k'].append("--no-healthcheck")
except:
pass
# --health-interval
try:
if hc['Interval']:
self.options['kv'].append(
{'--health-interval': f"{int(hc['Interval'] / 10**9)}s"}
)
except:
pass
# --health-retries
try:
if hc['Retries']:
self.options['kv'].append(
{'--health-retries': hc['Retries']}
)
except:
pass
# --health-start-period
try:
if hc['StartPeriod']:
self.options['kv'].append(
{'--health-start-period': f"{int(hc['StartPeriod'] / 10**9)}s"}
)
except:
pass
# --health-timeout
if hc['Timeout']:
self.options['kv'].append(
{'--health-timeout': f"{int(hc['Timeout'] / 10**9)}s"}
)
# --secret
def secret(self):
secrets: list = self.inspect['Spec']['TaskTemplate']['ContainerSpec']['Secrets']
for sec in secrets:
v = ""
if sec['File']['UID'] == "0" and sec['File']['GID'] == "0":
if sec['File']['Mode'] == 292:
v = f"source={sec['SecretName']},target={sec['File']['Name']}"
else:
v = f"source={sec['SecretName']},target={sec['File']['Name']},mode={file_mode_converter(sec['File']['Mode'])}"
else:
if sec['File']['Mode'] == 292:
v = f"source={sec['SecretName']},target={sec['File']['Name']},uid={sec['File']['UID']}," \
f"gid={sec['File']['GID']}"
else:
v = f"source={sec['SecretName']},target={sec['File']['Name']},uid={sec['File']['UID']}," \
f"gid={sec['File']['GID']},mode={file_mode_converter(sec['File']['Mode'])}"
self.options['kv'].append(
{'--secret': v}
)
# --tty , -t
def tty(self):
tty = self.inspect['Spec']['TaskTemplate']['ContainerSpec']['TTY']
if tty:
self.options['k'].append('-t')
# --cap-add
def cap_add(self):
caps: list = self.inspect['Spec']['TaskTemplate']['ContainerSpec']['CapabilityAdd']
for cap in caps:
self.options['kv'].append(
{'--cap-add': cap}
)
# --cap-drop
def cap_drop(self):
caps = self.inspect['Spec']['TaskTemplate']['ContainerSpec']['CapabilityDrop']
for cap in caps:
self.options['kv'].append(
{'--cap-drop': cap}
)
# --config
def config(self):
cs: list = self.inspect['Spec']['TaskTemplate']['ContainerSpec']['Configs']
for c in cs:
v = ""
if c['File']['UID'] == "0" and c['File']['GID'] == "0":
if c['File']['Mode'] == 292: # 292 --> mode=0444
if c['ConfigName'] == c['ConfigName']['File']['Name']:
v = c['ConfigName']
else:
v = f"source={c['ConfigName']},target={c['File']['Name']}"
else:
v = f"source={c['ConfigName']},target={c['File']['Name']},mode={file_mode_converter(c['File']['Mode'])}"
print(v)
else:
if c['File']['Mode'] == 292:
v = f"source={c['ConfigName']},target={c['File']['Name']},uid={c['File']['UID']},gid={c['File']['GID']}"
else:
v = f"source={c['ConfigName']},target={c['File']['Name']},uid={c['File']['UID']}," \
f"gid={c['File']['GID']},mode={file_mode_converter(c['File']['Mode'])}"
self.options['kv'].append(
{'--config': v}
)
# --detach , -d
# --dns
# --dns-option
# --dns-search
def dns_config(self):
dnsconfig: dict = self.inspect['Spec']['TaskTemplate']['ContainerSpec']['DNSConfig']
if not dnsconfig:
return
keys = list(dnsconfig.keys())
## --dns
if "Nameservers" in keys:
for ns in dnsconfig['Nameservers']:
self.options['kv'].append(
{'--dns': f'"{ns}"'}
)
## --dns-search
if "Search" in keys:
for s in dnsconfig['Search']:
self.options['kv'].append(
{'--dns-search': s}
)
## --dns-option
if "Options" in keys:
for op in dnsconfig['Options']:
self.options['kv'].append(
{'--dns-option': op}
)
# --endpoint-mode, default is vip (vip or dnsrr)
def endpoint_mode(self):
if self.inspect['Spec']['EndpointSpec']['Mode'] != "vip":
self.options['kv'].append(
{'--endpoint-mode': self.inspect['Spec']['EndpointSpec']['Mode']}
)
# --entrypoint
def entrypoint(self):
containerSpec: dict = self.inspect['Spec']['TaskTemplate']['ContainerSpec']
if "Command" in list(containerSpec.keys()):
ep = " ".join(containerSpec['Command'])
if ep.__contains__(' "'):
v = f"'{ep}'"
elif ep.__contains__(" '"):
v = f'"{ep}"'
elif ep.__contains__(" "):
v = f'"{ep}"'
else:
v = ep
self.options['kv'].append(
{"--entrypoint": v}
)
# --generic-resource
def generic_resource(self):
grs: list = self.inspect['Spec']['TaskTemplate']['Resources']['Reservations']['GenericResources']
for gr in grs:
self.options['kv'].append(
{'--generic-resource': f'"{gr["DiscreteResourceSpec"]["Kind"]}={gr["DiscreteResourceSpec"]["Value"]}"'}
)
# --group, 该用户组,要在主机中存在.
def group(self):
gs: list = self.inspect['Spec']['TaskTemplate']['ContainerSpec']['Groups']
for g in gs:
self.options['kv'].append(
{'--group': g}
)
# --host, Set one or more custom host-to-IP mappings (host:ip)
def host(self):
hosts = self.inspect['Spec']['TaskTemplate']['ContainerSpec']['Hosts']
for h in hosts:
h_split = h.split(" ")
self.options['kv'].append(
{'--host': f"{h_split[1]}:{h_split[0]}"}
)
# --hostname
def hostname(self):
if "Hostname" not in list(self.inspect['Spec']['TaskTemplate']['ContainerSpec'].keys()):
return
self.options['kv'].append(
{'--hostname': self.inspect['Spec']['TaskTemplate']['ContainerSpec']['Hostname']}
)
# --init, Use an init inside each service container to forward signals and reap processes
def init(self):
if self.inspect['Spec']['TaskTemplate']['ContainerSpec']['Init']:
self.options['k'].append("--init")
# --isolation
def isolation(self):
if self.inspect['Spec']['TaskTemplate']['ContainerSpec']['Isolation'] != "default":
self.options['kv'].append(
{'--isolation': self.inspect['Spec']['TaskTemplate']['ContainerSpec']['Isolation']}
)
# --label , -l
def label(self):
labels = self.inspect['Spec']['Labels']
if labels:
for k in labels:
self.options['kv'].append(
{'--label': f"{k}={labels[k]}"}
)
# --limit-cpu
# --limit-memory
# --limit-pids, Limit maximum number of processes (default 0 = unlimited)
def resources_limits(self):
rl: dict = self.inspect['Spec']['TaskTemplate']['Resources']['Limits']
## --limit-memory
keys = list(rl.keys())
if "MemoryBytes" in keys:
self.options['kv'].append(
{'--limit-memory': unit_converter(rl['MemoryBytes'])}
)
## --limit-cpu
if "NanoCPUs" in keys:
self.options['kv'].append(
{'--limit-cpu': rl['NanoCPUs'] / 10**9}
)
## --limit-pids
if "Pids" in keys:
self.options['kv'].append(
{'--limit-pids': rl['Pids']}
)
# --log-driver
# --log-opt
def log_driver(self):
logdriver: dict = self.inspect['Spec']['TaskTemplate']['LogDriver']
## --log-driver
if "Name" in list(logdriver.keys()):
self.options['kv'].append(
{'--log-driver': logdriver['Name']}
)
## --log-opt
if "Options" in list(logdriver.keys()):
for k in logdriver['Options']:
self.options['kv'].append(
{'--log-opt': f"{k}={logdriver['Options'][k]}"}
)
# --no-resolve-image
def no_resolve_image(self):
image = self.inspect['Spec']['TaskTemplate']['ContainerSpec']['Image']
if not image.__contains__("sha256:"):
self.options['k'].append("--no-resolve-image")
# --placement-pref
def placement_pref(self):
preferences: list = self.inspect['Spec']['TaskTemplate']['Placement']['Preferences']
for p in preferences:
v = ""
for k in p:
# p[k] 的第一个kv对应的key
pk = list(p[k].keys())[0]
v += f"{camel2connector(k)}={p[k][pk]},"
if v.endswith(","):
v = v[:-1]
if not v:
continue
self.options['kv'].append(
{'--placement-pref': f'"{v}"'}
)
# -quiet, -q
# --read-only
def read_only(self):
if "ReadOnly" in list(self.inspect['Spec']['TaskTemplate']['ContainerSpec'].keys()):
self.options['k'].append("--read-only")
# --replicas-max-per-node, Maximum number of tasks per node (default 0 = unlimited)
def replicas_max_per_node(self):
if self.inspect['Spec']['TaskTemplate']['Placement']['MaxReplicas']:
self.options['kv'].append(
{'--replicas-max-per-node': self.inspect['Spec']['TaskTemplate']['Placement']['MaxReplicas']}
)
# --reserve-cpu
def reserve_cpu(self):
nc = self.inspect['Spec']['TaskTemplate']['Resources']['Reservations']['NanoCPUs']
self.options['kv'].append(
{'--reserve-cpu': nc / 10**9}
)
# --reserve-memory
def reserve_memory(self):
mb = self.inspect['Spec']['TaskTemplate']['Resources']['Reservations']['MemoryBytes']
self.options['kv'].append(
{'--reserve-memory': unit_converter(mb)}
)
# --restart-condition, Restart when condition is met ("none"|"on-failure"|"any") (default "any")
# --restart-delay, Delay between restart attempts (ns|us|ms|s|m|h) (default 5s)
# --restart-max-attempts, Maximum number of restarts before giving up
def restart_policy(self):
rp: dict = self.inspect['Spec']['TaskTemplate']['RestartPolicy']
## --restart-condition
if rp['Condition'] != "any":
self.options['kv'].append(
{'--restart-condition': rp['Condition']}
)
## --restart-delay
if rp['Delay'] != 5000000000:
self.options['kv'].append(
{'--restart-delay': f"{int(rp['Delay'] / 10**9)}s"}
)
## --restart-max-attempts
if rp['MaxAttempts'] != 0:
self.options['kv'].append(
{'--restart-max-attempts': rp['MaxAttempts']}
)
# --rollback-delay, Delay between task rollbacks (ns|us|ms|s|m|h) (default 0s)
# --rollback-failure-action, Action on rollback failure ("pause"|"continue") (default "pause")
# --rollback-max-failure-ratio
# --rollback-monitor, Duration after each task rollback to monitor for failure (ns|us|ms|s|m|h) (default 5s)
# --rollback-order, Rollback order ("start-first"|"stop-first") (default "stop-first")
# --rollback-parallelism, Maximum number of tasks rolled back simultaneously (0 to roll back all at once), The default value is 1
def rollback_config(self):
rc: dict = self.inspect['Spec']['RollbackConfig']
## --rollback-parallelism
if rc['Parallelism'] != 1:
self.options['kv'].append(
{'--rollback-parallelism': rc['Parallelism']}
)
## --rollback-failure-action
if rc['FailureAction'] != "pause":
self.options['kv'].append(
{'--rollback-failure-action': rc['FailureAction']}
)
## --rollback-monitor
if rc['Monitor'] != 5000000000:
self.options['kv'].append(
{'--rollback-monitor': f"{int(rc['Monitor'] / 10**9)}s"}
)
## --rollback-max-failure-ratio
if rc['MaxFailureRatio'] != 0:
self.options['kv'].append(
{'--rollback-max-failure-ratio': rc['MaxFailureRatio']}
)
## --rollback-order
if rc['Order'] != "stop-first":
self.options['kv'].append(
{'--rollback-order': rc['Order']}
)
## --rollback-delay
try:
if rc['Delay']:
self.options['kv'].append(
{'--rollback-delay': f"{int(rc['Delay'] / 10 ** 9)}s"}
)
except:
pass
# --stop-grace-period, Time to wait before force killing a container (ns|us|ms|s|m|h) (default 10s)
def stop_grace_period(self):
sgp = self.inspect['Spec']['TaskTemplate']['ContainerSpec']['StopGracePeriod']
if sgp != 10000000000:
self.options['kv'].append(
{'--stop-grace-period': f"{int(sgp / 10**6)}ms"}
)
# --stop-signal
def stop_signal(self):
self.options['kv'].append(
{'--stop-signal': self.inspect['Spec']['TaskTemplate']['ContainerSpec']['StopSignal']}
)
# --sysctl
def sysctl(self):
sysctls: dict = self.inspect['Spec']['TaskTemplate']['ContainerSpec']['Sysctls']
for k in sysctls:
self.options['kv'].append(
{'--sysctl': f"{k}={sysctls[k]}"}
)
# --ulimit
def ulimit(self):
ulimits: list = self.inspect['Spec']['TaskTemplate']['ContainerSpec']['Ulimits']
for u in ulimits:
if u['Hard'] != u['Soft']:
v = f"{u['Soft']}:{u['Hard']}"
else:
v = u['Soft']
self.options['kv'].append(
{'--ulimit': f"{u['Name']}={v}"}
)
# --update-delay
# --update-parallelism, Maximum number of tasks updated simultaneously (0 to update all at once)
# --update-failure-action, Action on update failure ("pause"|"continue"|"rollback") (default "pause")
# --update-monitor
# --update-max-failure-ratio, Failure rate to tolerate during an update (default 0)
# --update-order, Update order ("start-first"|"stop-first") (default "stop-first")
def update_config(self):
uc: dict = self.inspect['Spec']['UpdateConfig']
## --update-parallelism
if uc['Parallelism'] != 1:
self.options['kv'].append(
{'--update-parallelism': uc['Parallelism']}
)
## --update-failure-action
if uc['FailureAction'] != "pause":
self.options['kv'].append(
{'--update-failure-action': uc['FailureAction']}
)
## --update-monitor
if uc['Monitor'] != 5000000000:
self.options['kv'].append(
{'--rollback-monitor': f"{int(uc['Monitor'] / 10 ** 9)}s"}
)
## --update-max-failure-ratio
if uc['MaxFailureRatio'] != 0:
self.options['kv'].append(
{'--update-max-failure-ratio': uc['MaxFailureRatio']}
)
## --update-order
if uc['Order'] != "stop-first":
self.options['kv'].append(
{'--update-order': uc['Order']}
)
## --update-delay
try:
if uc['Delay']:
self.options['kv'].append(
{'--update-delay': f"{int(uc['Delay'] / 10 ** 9)}s"}
)
except:
pass
# --user, -u, Username or UID (format: <name|uid>[:<group|gid>])
def user(self):
u = self.inspect['Spec']['TaskTemplate']['ContainerSpec']['User']
self.options['kv'].append(
{'--user': u}
)
# --with-registry-auth
# Args, docker service create command args
def arguments(self):
li: list = self.inspect['Spec']['TaskTemplate']['ContainerSpec']['Args']
if li:
self.args.append(li)
if __name__ == '__main__':
if len(argv) < 2 or argv[1] == "--help":
MYDOCKER.help_msg()
exit(1)
# 查看所有service的docker service create命令
elif argv[1] == "{all}":
for serv in MYDOCKER().get_services():
print(f"=== service: {serv['Spec']['Name']} ===")
try:
MYDOCKER(serv['Spec']['Name']).start()
except:
pass
print("\n")
elif len(argv) > 2:
for s in argv[1:]:
print(f"=== service: {s} ===")
try:
MYDOCKER(s).start()
except:
pass
print("\n")
else:
mydocker = MYDOCKER(argv[1])
ret = mydocker.start() |
py | 1a39bb669b588cd65847ffe090346fbce66ec4d2 | # flake8: noqa
from typing import Tuple
import numpy as np
import pytest
from catalyst.metrics.functional._classification import (
f1score,
get_aggregated_metrics,
precision,
recall,
)
EPS = 1e-5
@pytest.mark.parametrize(
"tp,fp,zero_division,true_value",
((5, 3, 1, 0.625), (5, 5, 0, 0.5), (0, 0, 0, 0), (0, 0, 1, 1)),
)
def test_precision(tp: int, fp: int, zero_division: int, true_value: float):
"""
Test precision metric
Args:
tp: true positive statistic
fp: false positive statistic
zero_division: 0 or 1, value to return in case of zero division
true_value: true metric value
"""
precision_value = precision(tp=tp, fp=fp, zero_division=zero_division)
assert (precision_value - true_value) < EPS
@pytest.mark.parametrize(
"tp,fn,zero_division,true_value",
((5, 3, 1, 0.625), (5, 5, 0, 0.5), (0, 0, 0, 0), (0, 0, 1, 1)),
)
def test_recall(tp: int, fn: int, zero_division: int, true_value: float):
"""
Test recall metric
Args:
tp: true positive statistic
fn: false negative statistic
zero_division: 0 or 1, value to return in case of zero division
true_value: true metric value
"""
recall_value = recall(tp=tp, fn=fn, zero_division=zero_division)
assert (recall_value - true_value) < EPS
@pytest.mark.parametrize(
"precision_value,recall_value,true_value",
((0.8, 0.7, 0.746667), (0.5, 0.5, 0.5), (0.6, 0.4, 0.48)),
)
def test_f1score(precision_value: float, recall_value: float, true_value: float):
"""
Test f1 score
Args:
precision_value: precision value
recall_value: recall value
true_value: true metric value
"""
f1 = f1score(precision_value=precision_value, recall_value=recall_value)
assert abs(f1 - true_value) < EPS
@pytest.mark.parametrize(
"tp,fp,fn,support,zero_division,true_answer",
(
(
np.array([0.0, 1.0, 1.0, 0.0]),
np.array([0.0, 0.0, 0.0, 1.0]),
np.array([1.0, 0.0, 0.0, 0.0]),
np.array([1.0, 1.0, 1.0, 0.0]),
1,
(0.666667, 0.666667, 0.666667),
),
(
np.array([1.0, 2.0, 2.0, 0.0]),
np.array([0.0, 0.0, 0.0, 2.0]),
np.array([2.0, 0.0, 0.0, 0.0]),
np.array([3.0, 2.0, 2.0, 0.0]),
0,
(0.714286, 0.714286, 0.714286),
),
(
np.array([1.0, 2.0, 2.0, 0.0, 0.0, 0.0]),
np.array([0.0, 0.0, 0.0, 2.0, 1.0, 1.0]),
np.array([3.0, 0.0, 0.0, 0.0, 0.0, 1.0]),
np.array([4.0, 2.0, 2.0, 0.0, 0.0, 1.0]),
0,
(0.555556, 0.555556, 0.555556),
),
),
)
def test_micro(
tp: np.array,
fp: np.array,
fn: np.array,
support: np.array,
zero_division: int,
true_answer: Tuple[float],
):
"""
Test micro metrics averaging
Args:
tp: true positive statistic
fp: false positive statistic
fn: false negative statistic
support: support statistic
zero_division: 0 or 1
true_answer: true metric value
"""
_, micro, _, _ = get_aggregated_metrics(
tp=tp, fp=fp, fn=fn, support=support, zero_division=zero_division
)
assert micro[-1] is None
for pred, real in zip(micro[:-1], true_answer):
assert abs(pred - real) < EPS
@pytest.mark.parametrize(
"tp,fp,fn,support,zero_division,true_answer",
(
(
np.array([0, 1, 3]),
np.array([1, 2, 3]),
np.array([2, 2, 2]),
np.array([2, 3, 5]),
0,
(0.277778, 0.311111, 0.292929),
),
(
np.array([1.0, 2.0, 1.0, 0.0, 1.0, 1.0]),
np.array([0.0, 0.0, 1.0, 1.0, 0.0, 0.0]),
np.array([1.0, 1.0, 0.0, 0.0, 0.0, 0.0]),
np.array([2.0, 3.0, 1.0, 0.0, 1.0, 1.0]),
0,
(0.75, 0.694444, 0.688889),
),
(
np.array([0.0, 1.0, 1.0, 0.0]),
np.array([0.0, 0.0, 0.0, 1.0]),
np.array([1.0, 0.0, 0.0, 0.0]),
np.array([1.0, 1.0, 1.0, 0.0]),
1,
(0.75, 0.75, 0.5),
),
),
)
def test_macro_average(
tp: np.array,
fp: np.array,
fn: np.array,
support: np.array,
zero_division: int,
true_answer: Tuple[float],
):
"""
Test macro metrics averaging
Args:
tp: true positive statistic
fp: false positive statistic
fn: false negative statistic
support: support statistic
zero_division: 0 or 1
true_answer: true metric value
"""
_, _, macro, _ = get_aggregated_metrics(
tp=tp, fp=fp, fn=fn, support=support, zero_division=zero_division
)
assert macro[-1] is None
for pred, real in zip(macro[:-1], true_answer):
assert abs(pred - real) < EPS
@pytest.mark.parametrize(
"tp,fp,fn,support,zero_division,true_answer",
(
(
np.array([1.0, 2.0, 2.0, 0.0, 0.0, 0.0]),
np.array([0.0, 0.0, 0.0, 2.0, 1.0, 1.0]),
np.array([3.0, 0.0, 0.0, 0.0, 0.0, 1.0]),
np.array([4.0, 2.0, 2.0, 0.0, 0.0, 1.0]),
0,
(0.888889, 0.555556, 0.622222),
),
(
np.array([1.0, 2.0, 2.0, 0.0]),
np.array([0.0, 0.0, 0.0, 2.0]),
np.array([2.0, 0.0, 0.0, 0.0]),
np.array([3.0, 2.0, 2.0, 0.0]),
0,
(1.0, 0.714286, 0.785714),
),
(
np.array([0.0, 1.0]),
np.array([1.0, 1.0]),
np.array([1.0, 1.0]),
np.array([1.0, 2.0]),
0,
(0.333333, 0.333333, 0.333333),
),
),
)
def test_weighted(
tp: np.array,
fp: np.array,
fn: np.array,
support: np.array,
zero_division: int,
true_answer: Tuple[float],
):
"""
Test weighted metrics averaging
Args:
tp: true positive statistic
fp: false positive statistic
fn: false negative statistic
support: support statistic
zero_division: 0 or 1
true_answer: true metric value
"""
_, _, _, weighted = get_aggregated_metrics(
tp=tp, fp=fp, fn=fn, support=support, zero_division=zero_division
)
assert weighted[-1] is None
for pred, real in zip(weighted[:-1], true_answer):
assert abs(pred - real) < EPS
|
py | 1a39bba0acca803d163de9e5a0d5982225c75fe9 | import numpy as np
N = 3
# the pulses corresponding to 0 and 90 degrees
servo_0_90 = np.array([
[1170, 2040],
[1410, 2274],
[1217, 2055]
])
servo_per_radian = (servo_0_90[:,1] - servo_0_90[:,0]) / np.radians(90)
servo_0 = servo_0_90[:,0]
# the limits beyond which the servo can't tell the difference, and might cause
# damage
servo_limits = (550, 2300)
servo_angle_limits = (np.array(servo_limits) - servo_0[:,np.newaxis]) / servo_per_radian[:,np.newaxis]
# the length of the links, in meters
lengths = np.array([
0.125,
0.148,
0.149,
0.139
])
# These are the adc readings when a link is held horizontally, and the adjacent
# link allowed to fall under gravity. The heavier side of the joint is the on
# that should be held. By repeating the experiment with the setup upsidedown,
# we should get two torques that sum to zero, giving us the reading
# corresponding to zero torque
_adc_zero = np.array([
[515, 525],
[455, 600],
[519, 544]
])
adc_0 = _adc_zero.mean(axis=1)
#See Link 3 Data.txt for
adc_0[2] = 528.7845903
rad_per_adc = np.radians(0.368583536)
# the limits which cannot be actively driven beyond
adc_active_lims = np.array([
(425, 590),
(422, 619),
(460, 616)
])
adc_passive_lims = np.array([(360, 680)] * 3)
error_active_lim = (adc_active_lims - adc_0[:,np.newaxis]) * rad_per_adc
# TODO: https://github.com/eric-wieser/4m20-coursework2/issues/12
com = lengths / 2
# TODO: https://github.com/eric-wieser/4m20-coursework2/issues/4
_total_mass = 0.40
masses = _total_mass * np.ones(4) / 4
max_torque = 0.250 * lengths[0] * 9.81
# Make everything read only!
for key, val in list(locals().items()):
if isinstance(val, np.ndarray):
val.setflags(write=False)
if __name__ == '__main__':
print('Config')
print('------')
print('servo_0:', servo_0)
print('servo_per_radian:', servo_per_radian)
print('servo_angle_limits:', servo_angle_limits)
print('adc_0:', adc_0)
print('rad_per_adc:', rad_per_adc)
print('adc_active_lims:', adc_active_lims)
print('error_active_lim:', error_active_lim)
print('lengths:', lengths)
print('masses:', masses)
print('max_torque:', max_torque) |
py | 1a39bbafe65021b1aae43755ccdce98d451927ef | import numpy as np
import pytest
import sys
from tempfile import TemporaryDirectory
import tensorflow as tf
from tensorflow.keras.layers import Dense, InputLayer
from typing import Callable
from alibi_detect.ad import AdversarialAE, ModelDistillation
from alibi_detect.cd import ChiSquareDrift, KSDrift, MMDDrift, TabularDrift
from alibi_detect.cd.preprocess import UAE
from alibi_detect.models.autoencoder import DecoderLSTM, EncoderLSTM
from alibi_detect.od import (IForest, LLR, Mahalanobis, OutlierAEGMM, OutlierVAE, OutlierVAEGMM,
OutlierProphet, SpectralResidual, OutlierSeq2Seq, OutlierAE)
from alibi_detect.utils.saving import save_detector, load_detector # type: ignore
input_dim = 4
latent_dim = 2
n_gmm = 2
threshold = 10.
samples = 6
seq_len = 10
p_val = .05
X_ref = np.random.rand(samples * input_dim).reshape(samples, input_dim)
X_ref_cat = np.tile(np.array([np.arange(samples)] * input_dim).T, (2, 1))
X_ref_mix = X_ref.copy()
X_ref_mix[:, 0] = np.tile(np.array(np.arange(samples // 2)), (1, 2)).T[:, 0]
# define encoder and decoder
encoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(input_dim,)),
Dense(5, activation=tf.nn.relu),
Dense(latent_dim, activation=None)
]
)
decoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(latent_dim,)),
Dense(5, activation=tf.nn.relu),
Dense(input_dim, activation=tf.nn.sigmoid)
]
)
kwargs = {'encoder_net': encoder_net,
'decoder_net': decoder_net}
preprocess_kwargs = {'model': UAE(encoder_net=encoder_net)}
gmm_density_net = tf.keras.Sequential(
[
InputLayer(input_shape=(latent_dim + 2,)),
Dense(10, activation=tf.nn.relu),
Dense(n_gmm, activation=tf.nn.softmax)
]
)
threshold_net = tf.keras.Sequential(
[
InputLayer(input_shape=(seq_len, latent_dim)),
Dense(5, activation=tf.nn.relu)
]
)
# define model
inputs = tf.keras.Input(shape=(input_dim,))
outputs = tf.keras.layers.Dense(2, activation=tf.nn.softmax)(inputs)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
detector = [
AdversarialAE(threshold=threshold,
model=model,
**kwargs),
ModelDistillation(threshold=threshold,
model=model,
distilled_model=model),
IForest(threshold=threshold),
LLR(threshold=threshold, model=model),
Mahalanobis(threshold=threshold),
OutlierAEGMM(threshold=threshold,
gmm_density_net=gmm_density_net,
n_gmm=n_gmm,
**kwargs),
OutlierVAE(threshold=threshold,
latent_dim=latent_dim,
samples=samples,
**kwargs),
OutlierAE(threshold=threshold,
**kwargs),
OutlierVAEGMM(threshold=threshold,
gmm_density_net=gmm_density_net,
n_gmm=n_gmm,
latent_dim=latent_dim,
samples=samples,
**kwargs),
OutlierProphet(threshold=.7,
growth='logistic'),
SpectralResidual(threshold=threshold,
window_amp=10,
window_local=10),
OutlierSeq2Seq(input_dim,
seq_len,
threshold=threshold,
threshold_net=threshold_net,
latent_dim=latent_dim),
KSDrift(p_val=p_val,
X_ref=X_ref,
preprocess_X_ref=False,
preprocess_kwargs=preprocess_kwargs),
MMDDrift(p_val=p_val,
X_ref=X_ref,
preprocess_X_ref=False,
preprocess_kwargs=preprocess_kwargs,
n_permutations=10,
chunk_size=10),
ChiSquareDrift(p_val=p_val,
X_ref=X_ref_cat,
preprocess_X_ref=True),
TabularDrift(p_val=p_val,
X_ref=X_ref_mix,
categories_per_feature={0: None},
preprocess_X_ref=True)
]
n_tests = len(detector)
@pytest.fixture
def select_detector(request):
return detector[request.param]
@pytest.mark.parametrize('select_detector', list(range(n_tests)), indirect=True)
def test_save_load(select_detector):
det = select_detector
det_name = det.meta['name']
# save and load functionality does not work for OutlierProphet and Python 3.6.
# https://github.com/facebook/prophet/issues/1361
if sys.version_info.minor == 6 and isinstance(det, OutlierProphet):
return
with TemporaryDirectory() as temp_dir:
temp_dir += '/'
save_detector(det, temp_dir)
if isinstance(det, (KSDrift, MMDDrift)):
det_load = load_detector(temp_dir, **{'preprocess_kwargs': preprocess_kwargs})
else:
det_load = load_detector(temp_dir)
det_load_name = det_load.meta['name']
assert det_load_name == det_name
if not type(det_load) in [OutlierProphet, ChiSquareDrift, KSDrift, MMDDrift, TabularDrift]:
assert det_load.threshold == det.threshold == threshold
if type(det_load) in [OutlierVAE, OutlierVAEGMM]:
assert det_load.samples == det.samples == samples
if type(det_load) == AdversarialAE or type(det_load) == ModelDistillation:
for layer in det_load.model.layers:
assert not layer.trainable
if type(det_load) == MMDDrift:
assert det_load.infer_sigma
assert isinstance(det_load.permutation_test, Callable)
if type(det_load) == KSDrift:
assert det_load.n_features == latent_dim
if type(det_load) in [ChiSquareDrift, TabularDrift]:
assert isinstance(det_load.categories_per_feature, dict)
assert isinstance(det_load.X_ref_count, dict)
if type(det_load) == OutlierAEGMM:
assert isinstance(det_load.aegmm.encoder, tf.keras.Sequential)
assert isinstance(det_load.aegmm.decoder, tf.keras.Sequential)
assert isinstance(det_load.aegmm.gmm_density, tf.keras.Sequential)
assert isinstance(det_load.aegmm, tf.keras.Model)
assert det_load.aegmm.n_gmm == n_gmm
elif type(det_load) == OutlierVAEGMM:
assert isinstance(det_load.vaegmm.encoder.encoder_net, tf.keras.Sequential)
assert isinstance(det_load.vaegmm.decoder, tf.keras.Sequential)
assert isinstance(det_load.vaegmm.gmm_density, tf.keras.Sequential)
assert isinstance(det_load.vaegmm, tf.keras.Model)
assert det_load.vaegmm.latent_dim == latent_dim
assert det_load.vaegmm.n_gmm == n_gmm
elif type(det_load) in [AdversarialAE, OutlierAE]:
assert isinstance(det_load.ae.encoder.encoder_net, tf.keras.Sequential)
assert isinstance(det_load.ae.decoder.decoder_net, tf.keras.Sequential)
assert isinstance(det_load.ae, tf.keras.Model)
elif type(det_load) == ModelDistillation:
assert isinstance(det_load.model, tf.keras.Sequential) or isinstance(det_load.model, tf.keras.Model)
assert (isinstance(det_load.distilled_model, tf.keras.Sequential) or
isinstance(det_load.distilled_model, tf.keras.Model))
elif type(det_load) == OutlierVAE:
assert isinstance(det_load.vae.encoder.encoder_net, tf.keras.Sequential)
assert isinstance(det_load.vae.decoder.decoder_net, tf.keras.Sequential)
assert isinstance(det_load.vae, tf.keras.Model)
assert det_load.vae.latent_dim == latent_dim
elif type(det_load) == Mahalanobis:
assert det_load.clip is None
assert det_load.mean == det_load.C == det_load.n == 0
assert det_load.meta['detector_type'] == 'online'
elif type(det_load) == OutlierProphet:
assert det_load.model.interval_width == .7
assert det_load.model.growth == 'logistic'
assert det_load.meta['data_type'] == 'time-series'
elif type(det_load) == SpectralResidual:
assert det_load.window_amp == 10
assert det_load.window_local == 10
elif type(det_load) == OutlierSeq2Seq:
assert isinstance(det_load.seq2seq, tf.keras.Model)
assert isinstance(det_load.seq2seq.threshold_net, tf.keras.Sequential)
assert isinstance(det_load.seq2seq.encoder, EncoderLSTM)
assert isinstance(det_load.seq2seq.decoder, DecoderLSTM)
assert det_load.latent_dim == latent_dim
assert det_load.threshold == threshold
assert det_load.shape == (-1, seq_len, input_dim)
elif type(det_load) in [KSDrift, MMDDrift]:
assert det_load.p_val == p_val
assert (det_load.X_ref == X_ref).all()
assert isinstance(det_load.preprocess_fn, Callable)
assert det_load.preprocess_fn.func.__name__ == 'preprocess_drift'
elif type(det_load) in [ChiSquareDrift, TabularDrift]:
assert det_load.p_val == p_val
x = X_ref_cat.copy() if isinstance(det_load, ChiSquareDrift) else X_ref_mix.copy()
assert (det_load.X_ref == x).all()
elif type(det_load) == LLR:
assert isinstance(det_load.dist_s, tf.keras.Model)
assert isinstance(det_load.dist_b, tf.keras.Model)
assert not det_load.sequential
assert not det_load.has_log_prob
|
py | 1a39bf5788748d2312ca5228592e0fa6be571e34 | # Copyright 2014-present PlatformIO <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from platformio.managers.platform import PlatformBase
class Espressif8266Platform(PlatformBase):
def configure_default_packages(self, variables, targets):
framework = variables.get("pioframework", [])
if "arduino" not in framework:
self.packages['toolchain-xtensa']['version'] = "~1.40802.0"
if "buildfs" in targets:
self.packages['tool-mkspiffs']['optional'] = False
return PlatformBase.configure_default_packages(
self, variables, targets)
def get_boards(self, id_=None):
result = PlatformBase.get_boards(self, id_)
if not result:
return result
if id_:
return self._add_upload_protocols(result)
else:
for key, value in result.items():
result[key] = self._add_upload_protocols(result[key])
return result
def _add_upload_protocols(self, board):
if not board.get("upload.protocols", []):
board.manifest['upload']['protocols'] = ["esptool", "espota"]
if not board.get("upload.protocol", ""):
board.manifest['upload']['protocol'] = "esptool"
return board
|
py | 1a39c0078b693a0c9b31acef520f6e2043c787db | # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GAX wrapper for Pubsub API requests."""
import functools
from google.cloud.gapic.pubsub.v1.publisher_api import PublisherApi
from google.cloud.gapic.pubsub.v1.subscriber_api import SubscriberApi
from google.gax import CallOptions
from google.gax import INITIAL_PAGE
from google.gax.errors import GaxError
from google.gax.grpc import exc_to_code
from google.pubsub.v1.pubsub_pb2 import PubsubMessage
from google.pubsub.v1.pubsub_pb2 import PushConfig
from grpc import insecure_channel
from grpc import StatusCode
from google.cloud._helpers import _to_bytes
from google.cloud._helpers import _pb_timestamp_to_rfc3339
from google.cloud.exceptions import Conflict
from google.cloud.exceptions import NotFound
from google.cloud.iterator import Iterator
from google.cloud.iterator import Page
from google.cloud.pubsub.topic import Topic
_FAKE_ITEMS_KEY = 'not-a-key'
class _PublisherAPI(object):
"""Helper mapping publisher-related APIs.
:type gax_api: :class:`google.pubsub.v1.publisher_api.PublisherApi`
:param gax_api: API object used to make GAX requests.
:type client: :class:`~google.cloud.pubsub.client.Client`
:param client: The client that owns this API object.
"""
def __init__(self, gax_api, client):
self._gax_api = gax_api
self._client = client
def list_topics(self, project, page_size=0, page_token=None):
"""List topics for the project associated with this API.
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.topics/list
:type project: str
:param project: project ID
:type page_size: int
:param page_size: maximum number of topics to return, If not passed,
defaults to a value set by the API.
:type page_token: str
:param page_token: opaque marker for the next "page" of topics. If not
passed, the API will return the first page of
topics.
:rtype: :class:`~google.cloud.iterator.Iterator`
:returns: Iterator of :class:`~google.cloud.pubsub.topic.Topic`
accessible to the current API.
"""
if page_token is None:
page_token = INITIAL_PAGE
options = CallOptions(page_token=page_token)
path = 'projects/%s' % (project,)
page_iter = self._gax_api.list_topics(
path, page_size=page_size, options=options)
page_iter = functools.partial(_recast_page_iterator, page_iter)
return Iterator(client=self._client, path=path,
item_to_value=_item_to_topic,
page_iter=page_iter)
def topic_create(self, topic_path):
"""API call: create a topic
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.topics/create
:type topic_path: str
:param topic_path: fully-qualified path of the new topic, in format
``projects/<PROJECT>/topics/<TOPIC_NAME>``.
:rtype: dict
:returns: ``Topic`` resource returned from the API.
:raises: :exc:`google.cloud.exceptions.Conflict` if the topic already
exists
"""
try:
topic_pb = self._gax_api.create_topic(topic_path)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.FAILED_PRECONDITION:
raise Conflict(topic_path)
raise
return {'name': topic_pb.name}
def topic_get(self, topic_path):
"""API call: retrieve a topic
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.topics/get
:type topic_path: str
:param topic_path: fully-qualified path of the topic, in format
``projects/<PROJECT>/topics/<TOPIC_NAME>``.
:rtype: dict
:returns: ``Topic`` resource returned from the API.
:raises: :exc:`google.cloud.exceptions.NotFound` if the topic does not
exist
"""
try:
topic_pb = self._gax_api.get_topic(topic_path)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
raise NotFound(topic_path)
raise
return {'name': topic_pb.name}
def topic_delete(self, topic_path):
"""API call: delete a topic
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.topics/create
:type topic_path: str
:param topic_path: fully-qualified path of the new topic, in format
``projects/<PROJECT>/topics/<TOPIC_NAME>``.
"""
try:
self._gax_api.delete_topic(topic_path)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
raise NotFound(topic_path)
raise
def topic_publish(self, topic_path, messages):
"""API call: publish one or more messages to a topic
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.topics/publish
:type topic_path: str
:param topic_path: fully-qualified path of the topic, in format
``projects/<PROJECT>/topics/<TOPIC_NAME>``.
:type messages: list of dict
:param messages: messages to be published.
:rtype: list of string
:returns: list of opaque IDs for published messages.
:raises: :exc:`google.cloud.exceptions.NotFound` if the topic does not
exist
"""
options = CallOptions(is_bundling=False)
message_pbs = [_message_pb_from_mapping(message)
for message in messages]
try:
result = self._gax_api.publish(topic_path, message_pbs,
options=options)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
raise NotFound(topic_path)
raise
return result.message_ids
def topic_list_subscriptions(self, topic_path, page_size=0,
page_token=None):
"""API call: list subscriptions bound to a topic
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.topics.subscriptions/list
:type topic_path: str
:param topic_path: fully-qualified path of the topic, in format
``projects/<PROJECT>/topics/<TOPIC_NAME>``.
:type page_size: int
:param page_size: maximum number of subscriptions to return, If not
passed, defaults to a value set by the API.
:type page_token: str
:param page_token: opaque marker for the next "page" of subscriptions.
If not passed, the API will return the first page
of subscriptions.
:rtype: list of strings
:returns: fully-qualified names of subscriptions for the supplied
topic.
:raises: :exc:`google.cloud.exceptions.NotFound` if the topic does not
exist
"""
if page_token is None:
page_token = INITIAL_PAGE
options = CallOptions(page_token=page_token)
try:
page_iter = self._gax_api.list_topic_subscriptions(
topic_path, page_size=page_size, options=options)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
raise NotFound(topic_path)
raise
subs = page_iter.next()
token = page_iter.page_token or None
return subs, token
class _SubscriberAPI(object):
"""Helper mapping subscriber-related APIs.
:type gax_api: :class:`google.pubsub.v1.publisher_api.SubscriberApi`
:param gax_api: API object used to make GAX requests.
"""
def __init__(self, gax_api):
self._gax_api = gax_api
def list_subscriptions(self, project, page_size=0, page_token=None):
"""List subscriptions for the project associated with this API.
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/list
:type project: str
:param project: project ID
:type page_size: int
:param page_size: maximum number of subscriptions to return, If not
passed, defaults to a value set by the API.
:type page_token: str
:param page_token: opaque marker for the next "page" of subscriptions.
If not passed, the API will return the first page
of subscriptions.
:rtype: tuple, (list, str)
:returns: list of ``Subscription`` resource dicts, plus a
"next page token" string: if not None, indicates that
more topics can be retrieved with another call (pass that
value as ``page_token``).
"""
if page_token is None:
page_token = INITIAL_PAGE
options = CallOptions(page_token=page_token)
path = 'projects/%s' % (project,)
page_iter = self._gax_api.list_subscriptions(
path, page_size=page_size, options=options)
subscriptions = [_subscription_pb_to_mapping(sub_pb)
for sub_pb in page_iter.next()]
token = page_iter.page_token or None
return subscriptions, token
def subscription_create(self, subscription_path, topic_path,
ack_deadline=None, push_endpoint=None):
"""API call: create a subscription
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/create
:type subscription_path: str
:param subscription_path:
the fully-qualified path of the new subscription, in format
``projects/<PROJECT>/subscriptions/<SUB_NAME>``.
:type topic_path: str
:param topic_path: the fully-qualified path of the topic being
subscribed, in format
``projects/<PROJECT>/topics/<TOPIC_NAME>``.
:type ack_deadline: int
:param ack_deadline:
(Optional) the deadline (in seconds) by which messages pulled from
the back-end must be acknowledged.
:type push_endpoint: str
:param push_endpoint:
(Optional) URL to which messages will be pushed by the back-end.
If not set, the application must pull messages.
:rtype: dict
:returns: ``Subscription`` resource returned from the API.
"""
if push_endpoint is not None:
push_config = PushConfig(push_endpoint=push_endpoint)
else:
push_config = None
if ack_deadline is None:
ack_deadline = 0
try:
sub_pb = self._gax_api.create_subscription(
subscription_path, topic_path,
push_config=push_config, ack_deadline_seconds=ack_deadline)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.FAILED_PRECONDITION:
raise Conflict(topic_path)
raise
return _subscription_pb_to_mapping(sub_pb)
def subscription_get(self, subscription_path):
"""API call: retrieve a subscription
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/get
:type subscription_path: str
:param subscription_path:
the fully-qualified path of the subscription, in format
``projects/<PROJECT>/subscriptions/<SUB_NAME>``.
:rtype: dict
:returns: ``Subscription`` resource returned from the API.
"""
try:
sub_pb = self._gax_api.get_subscription(subscription_path)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
raise NotFound(subscription_path)
raise
return _subscription_pb_to_mapping(sub_pb)
def subscription_delete(self, subscription_path):
"""API call: delete a subscription
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/delete
:type subscription_path: str
:param subscription_path:
the fully-qualified path of the subscription, in format
``projects/<PROJECT>/subscriptions/<SUB_NAME>``.
"""
try:
self._gax_api.delete_subscription(subscription_path)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
raise NotFound(subscription_path)
raise
def subscription_modify_push_config(self, subscription_path,
push_endpoint):
"""API call: update push config of a subscription
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/modifyPushConfig
:type subscription_path: str
:param subscription_path:
the fully-qualified path of the new subscription, in format
``projects/<PROJECT>/subscriptions/<SUB_NAME>``.
:type push_endpoint: str
:param push_endpoint:
(Optional) URL to which messages will be pushed by the back-end.
If not set, the application must pull messages.
"""
push_config = PushConfig(push_endpoint=push_endpoint)
try:
self._gax_api.modify_push_config(subscription_path, push_config)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
raise NotFound(subscription_path)
raise
def subscription_pull(self, subscription_path, return_immediately=False,
max_messages=1):
"""API call: retrieve messages for a subscription
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/modifyPushConfig
:type subscription_path: str
:param subscription_path:
the fully-qualified path of the new subscription, in format
``projects/<PROJECT>/subscriptions/<SUB_NAME>``.
:type return_immediately: bool
:param return_immediately: if True, the back-end returns even if no
messages are available; if False, the API
call blocks until one or more messages are
available.
:type max_messages: int
:param max_messages: the maximum number of messages to return.
:rtype: list of dict
:returns: the ``receivedMessages`` element of the response.
"""
try:
response_pb = self._gax_api.pull(
subscription_path, max_messages,
return_immediately=return_immediately)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
raise NotFound(subscription_path)
raise
return [_received_message_pb_to_mapping(rmpb)
for rmpb in response_pb.received_messages]
def subscription_acknowledge(self, subscription_path, ack_ids):
"""API call: acknowledge retrieved messages
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/modifyPushConfig
:type subscription_path: str
:param subscription_path:
the fully-qualified path of the new subscription, in format
``projects/<PROJECT>/subscriptions/<SUB_NAME>``.
:type ack_ids: list of string
:param ack_ids: ack IDs of messages being acknowledged
"""
try:
self._gax_api.acknowledge(subscription_path, ack_ids)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
raise NotFound(subscription_path)
raise
def subscription_modify_ack_deadline(self, subscription_path, ack_ids,
ack_deadline):
"""API call: update ack deadline for retrieved messages
See:
https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions/modifyAckDeadline
:type subscription_path: str
:param subscription_path:
the fully-qualified path of the new subscription, in format
``projects/<PROJECT>/subscriptions/<SUB_NAME>``.
:type ack_ids: list of string
:param ack_ids: ack IDs of messages being acknowledged
:type ack_deadline: int
:param ack_deadline: the deadline (in seconds) by which messages pulled
from the back-end must be acknowledged.
"""
try:
self._gax_api.modify_ack_deadline(
subscription_path, ack_ids, ack_deadline)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
raise NotFound(subscription_path)
raise
def _message_pb_from_mapping(message):
"""Helper for :meth:`_PublisherAPI.topic_publish`.
Performs "impedance matching" between the protobuf attrs and the keys
expected in the JSON API.
"""
return PubsubMessage(data=_to_bytes(message['data']),
attributes=message['attributes'])
def _subscription_pb_to_mapping(sub_pb):
"""Helper for :meth:`list_subscriptions`, et aliae
Performs "impedance matching" between the protobuf attrs and the keys
expected in the JSON API.
"""
mapping = {
'name': sub_pb.name,
'topic': sub_pb.topic,
'ackDeadlineSeconds': sub_pb.ack_deadline_seconds,
}
if sub_pb.push_config.push_endpoint != '':
mapping['pushConfig'] = {
'pushEndpoint': sub_pb.push_config.push_endpoint,
}
return mapping
def _message_pb_to_mapping(message_pb):
"""Helper for :meth:`pull`, et aliae
Performs "impedance matching" between the protobuf attrs and the keys
expected in the JSON API.
"""
return {
'messageId': message_pb.message_id,
'data': message_pb.data,
'attributes': message_pb.attributes,
'publishTime': _pb_timestamp_to_rfc3339(message_pb.publish_time),
}
def _received_message_pb_to_mapping(received_message_pb):
"""Helper for :meth:`pull`, et aliae
Performs "impedance matching" between the protobuf attrs and the keys
expected in the JSON API.
"""
return {
'ackId': received_message_pb.ack_id,
'message': _message_pb_to_mapping(
received_message_pb.message),
}
def make_gax_publisher_api(connection):
"""Create an instance of the GAX Publisher API.
If the ``connection`` is intended for a local emulator, then
an insecure ``channel`` is created pointing at the local
Pub / Sub server.
:type connection: :class:`~google.cloud.pubsub.connection.Connection`
:param connection: The connection that holds configuration details.
:rtype: :class:`~google.cloud.pubsub.v1.publisher_api.PublisherApi`
:returns: A publisher API instance with the proper connection
configuration.
:rtype: :class:`~google.cloud.pubsub.v1.subscriber_api.SubscriberApi`
"""
channel = None
if connection.in_emulator:
channel = insecure_channel(connection.host)
return PublisherApi(channel=channel)
def make_gax_subscriber_api(connection):
"""Create an instance of the GAX Subscriber API.
If the ``connection`` is intended for a local emulator, then
an insecure ``channel`` is created pointing at the local
Pub / Sub server.
:type connection: :class:`~google.cloud.pubsub.connection.Connection`
:param connection: The connection that holds configuration details.
:rtype: :class:`~google.cloud.pubsub.v1.subscriber_api.SubscriberApi`
:returns: A subscriber API instance with the proper connection
configuration.
"""
channel = None
if connection.in_emulator:
channel = insecure_channel(connection.host)
return SubscriberApi(channel=channel)
def _item_to_topic(iterator, resource):
"""Convert a JSON job to the native object.
:type iterator: :class:`~google.cloud.iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type resource: :class:`google.pubsub.v1.pubsub_pb2.Topic`
:param resource: A topic returned from the API.
:rtype: :class:`~google.cloud.pubsub.topic.Topic`
:returns: The next topic in the page.
"""
return Topic.from_api_repr(
{'name': resource.name}, iterator.client)
def _recast_page_iterator(page_iter, iterator):
"""Wrap GAX pages generator.
In particular, wrap each page and capture some state from the
GAX iterator.
Yields :class:`~google.cloud.iterator.Page` instances
:type page_iter: :class:`~google.gax.PageIterator`
:param page_iter: The iterator to wrap.
:type iterator: :class:`~google.cloud.iterator.Iterator`
:param iterator: The iterator that owns each page.
"""
for items in page_iter:
fake_response = {_FAKE_ITEMS_KEY: items}
page = Page(
iterator, fake_response, _FAKE_ITEMS_KEY, _item_to_topic)
iterator.next_page_token = page_iter.page_token or None
iterator.num_results += page.num_items
yield page
|
py | 1a39c03c1fbcacc9d2f0a1be6c3bed46c3cd10db | version_number = '0.0.7'
document_revision = 'D'
release_date = '2020-12-9'
if __name__ == '__main__':
print(version_number)
|
py | 1a39c0c37842a2cec3a7a68aa9431079b7fedde6 | '''
Created on March 16, 2020
@authors: [email protected]; [email protected]
'''
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from spectrum_etl.config import default_config
import pprint
import requests
import codecs
import json
import logging.config
from spectrum_etl.data_integration import validation
import sys
pp = pprint.PrettyPrinter(indent=4)
class Integration(object):
'''
This is an experimental class for the integration of genomic data and pathology data from eLabInventory
and REDCap respectively.
'''
def __init__(self):
#self.extract_hne_table()
#self.extract_scrna_table()
self.__validate()
#self.filter()
def clean_json(self, json_str):
'''
Remove all carriage returns from the specified json string.
:param json_str: json string with \r, \n
:returns cleaned json string
'''
json_str = json_str.replace('\r', '')
json_str = json_str.replace('\n', '')
return json_str
def extract_scrna_table(self, sample_sets=None):
'''
Extract SCRNA table from elab.
'''
headers = {'Authorization': default_config.get_elab_api_token(), "Host": default_config.get_elab_host_url()}
# get sample count
response = requests.get(default_config.get_elab_api_url()+'sampleSeries?$records=1', headers=headers)
total_records = response.json()['totalRecords']
# get all sample meta meta data
page = 0
sample_sets = []
while len(sample_sets) != total_records:
response = requests.get(default_config.get_elab_api_url() + 'sampleSeries?$page='+str(page), headers=headers)
sample_sets += response.json()['data']
page += 1
assert len(sample_sets) == total_records
# filter by patient subset
patient_subset = []
pt_id_list = []
for ii in range(1,73): # patients 1 to 72
id = 'SPECTRUM-OV-0' + str("{:02d}".format(ii))
pt_id_list.append(id)
for sample in sample_sets:
if sample['name'] in pt_id_list:
patient_subset.append(sample)
logger.info("attempting to get data for "+str(len(patient_subset))+" patients...")
# get all sample meta data for patient subset
#elab_sample_data = []
elab_metadata = []
for patient in patient_subset:
sample_ids = patient["sampleIDs"]
logger.info("getting data for patient "+patient['name'])
for sample_id in sample_ids:
response = requests.get(default_config.get_elab_api_url()+'samples/{sampleid}'.format(sampleid=sample_id), headers=headers)
# filter sample meta data by Tissue samples only
if response.json()["sampleType"]["name"] == "Tissue":
response = requests.get(default_config.get_elab_api_url() + 'samples/{sampleid}/meta'.format(sampleid=sample_id),headers=headers)
all_elab_metadata = response.json()
# pull key value attributes only from elab API pull
data = {}
for meta in all_elab_metadata['data']:
if 'value' not in meta.keys():
data[meta['key']] = meta['files'][0]['name']
else:
data[meta['key']] = meta['value']
elab_metadata.append(data)
# break # just collect 1 since it takes time to collect all
with open("elab_metadata", 'w') as outfile:
jstr = json.dumps(elab_metadata, sort_keys=True,
indent=2, separators=(',', ': '))
outfile.write(jstr)
# pp.pprint(elab_metadata)
# run validation code on API pull
def __validate(self):
with open("elab_metadata", "r") as read_file:
elab_metadata = json.load(read_file)
allPass = True
for row in elab_metadata:
if not validation.is_pt_id_valid(row):
allPass = False
if not validation.is_mrn_valid(row):
allPass = False
if not validation.is_surgery_id_valid(row):
allPass = False
if not validation.is_patient_excluded(row):
allPass = False
if not validation.is_specimen_site_valid(row):
allPass = False
if not validation.is_downstream_submission_valid(row):
allPass = False
if not validation.is_seq_info_valid(row):
allPass = False
if not validation.is_submitted_populations_valid(row):
allPass = False
if not validation.is_scrna_igo_id_valid(row):
allPass = False
if not validation.is_scrna_igo_sub_id_valid(row):
allPass = False
if not validation.is_scrna_rex_id_valid(row):
allPass = False
if not validation.is_qc_checks_valid(row):
allPass = False
if not validation.is_dlp_rex_id_valid(row):
allPass = False
if not validation.is_bccrc_dlp_sample_id_valid(row):
allPass = False
if not validation.is_wgs_tissue_type_valid(row):
allPass = False
if not validation.is_ppbc_acc_num_valid(row):
allPass = False
if not validation.is_ppbc_bank_num_valid(row):
allPass = False
if not validation.is_wgs_igo_id_valid(row):
allPass = False
if not validation.is_wgs_igo_submission_id_valid(row):
allPass = False
if not validation.is_wgs_rex_id_valid(row):
allPass = False
if not validation.is_if_tissue_type_valid(row):
allPass = False
if allPass == False:
sys.exit(1)
# filter out metadata fields with blanks and patients excluded from study
def filter(self):
with open("elab_metadata", "r") as read_file:
elab_metadata = json.load(read_file)
meta_noBlanks = []
meta_onStudy = []
for row in elab_metadata:
if elab_metadata[row.values()] != "":
elab_metadata[row['key']] = row['value']
meta_noBlanks.append(elab_metadata)
if meta_noBlanks[row["Excluded"]] == "No":
meta_onStudy.append(elab_metadata)
# remove all meta data fields without values
# for meta in sample_meta['data']:
# if ('value' in meta.keys()) and (meta['value'] != ""):
# data[meta['key']] = meta['value']
#
# elab_sample_data.append(data)
# filter sample meta data for patients/sites we have scRNA seq data
# for sample_metadata in elab_sample_data:
# if 'QC Checks' in sample_metadata.keys():
# if sample_metadata['Excluded'] == "No":
# filtered_elab_sample_data.append(sample_metadata)
# break # just collect 1 since it takes time to collect all
def extract_hne_table(self):
'''
Extract GYN Pathology table from REDCap.
'''
hne_metadata = []
for ii in range(1,73): # patients 1-72
data = {
'token': default_config.get_redcap_token(instance_name="production"),
'content': 'record',
'format': 'json',
'type': 'flat',
'records[0]': 'SPECTRUM-OV-0'+str("{:02d}".format(ii)),
'fields[0]': 'patient_id',
'forms[1]': 'gyn_pathology',
'events[0]': 'tissue_collection_arm_1',
'rawOrLabel': 'raw',
'rawOrLabelHeaders': 'raw',
'exportCheckboxLabel': 'false',
'exportSurveyFields': 'false',
'exportDataAccessGroups': 'false',
'returnFormat': 'json'
}
response = requests.post(url=default_config.get_redcap_api_url(), data=data)
path_metadata = response.json()
# filter metadata for samples with values
for meta in path_metadata:
filtered_meta = {}
for k, v in meta.items():
if v != "":
filtered_meta[k] = v
hne_metadata.append(filtered_meta)
pp.pprint("got patient "+str(ii))
# export filtered metadata as a json file
with open("hne_metadata", 'w') as outfile:
jstr = json.dumps(hne_metadata, sort_keys=True,
indent=2, separators=(',', ': '))
outfile.write(jstr)
if __name__ == '__main__':
# create an initial logger. It will only log to console and it will disabled
# when we read the logging configuration from the config file.
# This logger can be useful when we need early logging. E.g. we may want to log
# the location of the JSON file (e.g. if we get it from a CLI argument).
logging.basicConfig(level="INFO")
logger = logging.getLogger()
logger.info("This is the logger configured by `logging.basicConfig()`.")
# Load the configuration.
config_file = "config_logging.json"
with codecs.open(config_file, "r", encoding="utf-8") as fd:
config = json.load(fd)
# Set up proper logging. This one disables the previously configured loggers.
logging.config.dictConfig(config["logging"])
Integration() |
py | 1a39c0fff87528df028d60ff3fff7400428ffcee | """This module provides classes that make up an issue report."""
import logging
import json
import operator
from jinja2 import PackageLoader, Environment
from typing import Dict, List
import hashlib
from mythril.solidity.soliditycontract import SolidityContract
from mythril.analysis.swc_data import SWC_TO_TITLE
from mythril.support.source_support import Source
from mythril.support.start_time import StartTime
from mythril.support.support_utils import get_code_hash
from mythril.support.signatures import SignatureDB
from time import time
log = logging.getLogger(__name__)
class Issue:
"""Representation of an issue and its location."""
def __init__(
self,
contract,
function_name,
address,
swc_id,
title,
bytecode,
gas_used=(None, None),
severity=None,
description_head="",
description_tail="",
transaction_sequence=None,
):
"""
:param contract: The contract
:param function_name: Function name where the issue is detected
:param address: The address of the issue
:param swc_id: Issue's corresponding swc-id
:param title: Title
:param bytecode: bytecode of the issue
:param gas_used: amount of gas used
:param severity: The severity of the issue
:param description_head: The top part of description
:param description_tail: The bottom part of the description
:param debug: The transaction sequence
"""
self.title = title
self.contract = contract
self.function = function_name
self.address = address
self.description_head = description_head
self.description_tail = description_tail
self.description = "%s\n%s" % (description_head, description_tail)
self.severity = severity
self.swc_id = swc_id
self.min_gas_used, self.max_gas_used = gas_used
self.filename = None
self.code = None
self.lineno = None
self.source_mapping = None
self.discovery_time = time() - StartTime().global_start_time
self.bytecode_hash = get_code_hash(bytecode)
self.transaction_sequence = transaction_sequence
@property
def transaction_sequence_users(self):
""" Returns the transaction sequence without pre-generated block data"""
return self.transaction_sequence
@property
def transaction_sequence_jsonv2(self):
""" Returns the transaction sequence as a json string with pre-generated block data"""
return (
self.add_block_data(self.transaction_sequence)
if self.transaction_sequence
else None
)
@staticmethod
def add_block_data(transaction_sequence: Dict):
""" Adds sane block data to a transaction_sequence """
for step in transaction_sequence["steps"]:
step["gasLimit"] = "0x7d000"
step["gasPrice"] = "0x773594000"
step["blockCoinbase"] = "0xcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcbcb"
step["blockDifficulty"] = "0xa7d7343662e26"
step["blockGasLimit"] = "0x7d0000"
step["blockNumber"] = "0x66e393"
step["blockTime"] = "0x5bfa4639"
return transaction_sequence
@property
def as_dict(self):
"""
:return:
"""
issue = {
"title": self.title,
"swc-id": self.swc_id,
"contract": self.contract,
"description": self.description,
"function": self.function,
"severity": self.severity,
"address": self.address,
"tx_sequence": self.transaction_sequence,
"min_gas_used": self.min_gas_used,
"max_gas_used": self.max_gas_used,
"sourceMap": self.source_mapping,
}
if self.filename and self.lineno:
issue["filename"] = self.filename
issue["lineno"] = self.lineno
if self.code:
issue["code"] = self.code
return issue
def _set_internal_compiler_error(self):
"""
Adds the false positive to description and changes severity to low
"""
self.severity = "Low"
self.description_tail += (
" This issue is reported for internal compiler generated code."
)
self.description = "%s\n%s" % (self.description_head, self.description_tail)
self.code = ""
def add_code_info(self, contract):
"""
:param contract:
"""
if self.address and isinstance(contract, SolidityContract):
codeinfo = contract.get_source_info(
self.address, constructor=(self.function == "constructor")
)
self.filename = codeinfo.filename
self.code = codeinfo.code
self.lineno = codeinfo.lineno
if self.lineno is None:
self._set_internal_compiler_error()
self.source_mapping = codeinfo.solc_mapping
else:
self.source_mapping = self.address
def resolve_function_names(self):
""" Resolves function names for each step """
if (
self.transaction_sequence is None
or "steps" not in self.transaction_sequence
):
return
signatures = SignatureDB()
for step in self.transaction_sequence["steps"]:
_hash = step["input"][:10]
try:
sig = signatures.get(_hash)
if len(sig) > 0:
step["name"] = sig[0]
else:
step["name"] = "unknown"
except ValueError:
step["name"] = "unknown"
class Report:
"""A report containing the content of multiple issues."""
environment = Environment(
loader=PackageLoader("mythril.analysis"), trim_blocks=True
)
def __init__(self, contracts=None, exceptions=None):
"""
:param contracts:
:param exceptions:
"""
self.issues = {}
self.solc_version = ""
self.meta = {}
self.source = Source()
self.source.get_source_from_contracts_list(contracts)
self.exceptions = exceptions or []
def sorted_issues(self):
"""
:return:
"""
issue_list = [issue.as_dict for key, issue in self.issues.items()]
return sorted(issue_list, key=operator.itemgetter("address", "title"))
def append_issue(self, issue):
"""
:param issue:
"""
m = hashlib.md5()
m.update((issue.contract + str(issue.address) + issue.title).encode("utf-8"))
issue.resolve_function_names()
self.issues[m.digest()] = issue
def as_text(self):
"""
:return:
"""
name = self._file_name()
template = Report.environment.get_template("report_as_text.jinja2")
return template.render(filename=name, issues=self.sorted_issues())
def as_json(self):
"""
:return:
"""
result = {"success": True, "error": None, "issues": self.sorted_issues()}
return json.dumps(result, sort_keys=True)
def _get_exception_data(self) -> dict:
if not self.exceptions:
return {}
logs = [] # type: List[Dict]
for exception in self.exceptions:
logs += [{"level": "error", "hidden": "true", "msg": exception}]
return {"logs": logs}
def as_swc_standard_format(self):
"""Format defined for integration and correlation.
:return:
"""
_issues = []
for key, issue in self.issues.items():
idx = self.source.get_source_index(issue.bytecode_hash)
try:
title = SWC_TO_TITLE[issue.swc_id]
except KeyError:
title = "Unspecified Security Issue"
extra = {"discoveryTime": int(issue.discovery_time * 10 ** 9)}
if issue.transaction_sequence_jsonv2:
extra["testCases"] = [issue.transaction_sequence_jsonv2]
_issues.append(
{
"swcID": "SWC-" + issue.swc_id,
"swcTitle": title,
"description": {
"head": issue.description_head,
"tail": issue.description_tail,
},
"severity": issue.severity,
"locations": [{"sourceMap": "%d:1:%d" % (issue.address, idx)}],
"extra": extra,
}
)
meta_data = self._get_exception_data()
result = [
{
"issues": _issues,
"sourceType": self.source.source_type,
"sourceFormat": self.source.source_format,
"sourceList": self.source.source_list,
"meta": meta_data,
}
]
return json.dumps(result, sort_keys=True)
def as_markdown(self):
"""
:return:
"""
filename = self._file_name()
template = Report.environment.get_template("report_as_markdown.jinja2")
return template.render(filename=filename, issues=self.sorted_issues())
def _file_name(self):
"""
:return:
"""
if len(self.issues.values()) > 0:
return list(self.issues.values())[0].filename
|
py | 1a39c16aa416635b75c24244adf83bed92fd20f3 |
import os
from flask_babel import _
from flask_login import current_user, login_required
from flask import render_template, redirect, url_for, flash, request,abort
from app.main.forms import PostForm, ProfileForm, BeginForm, MiddleForm, FinalForm
from app.main import bp
from app.models import User, Post
from ext import mongo
from werkzeug.urls import url_parse
from werkzeug.utils import secure_filename
@bp.route('/', methods=['GET', 'POST'])
@bp.route('/index', methods=['GET', 'POST'])
@login_required
def index():
if current_user.get_admin():
return redirect(url_for('admin.index'))
return render_template('Index.html')
@bp.route('/post', methods=['GET', 'POST'])
def post():
if not current_user.get_admin():
return redirect(url_for('index'))
form = PostForm()
if form.validate_on_submit():
stage=int(form.data.stage)
users = mongo.db.users
user = users.find_one({'name': form.username.data})
user['projects'][stage]['passed'] = True
users.save(user)
return redirect(url_for('profile', username=form.username.data))
return abort(404)
@bp.route('/profile/<username>', methods=['GET', 'POST'])
@login_required
def profile(username):
if not current_user.get_admin():
return redirect(url_for('index'))
form = ProfileForm()
users = mongo.db.users
user = users.find_one({'name': username})
if 'pass' not in user.keys():
user['passed'] = False
mongo.db.users.save(user)
post = user['pass']
if not user or user['post_num'] < 1:
abort(404)
return render_template(
'Profile.html',
forms=user['posts'],
form=form,
post=post,
username=username,
admin=False)
@bp.route('/info/<page>')
@login_required
def info(page):
if page not in ['college', "class", "money", "preresult", "proccess", "accept", "finish", "eval"]:
abort(404)
return '功能正在完善'
@bp.route('/input_0', methods=['GET', 'POST'])
@login_required
def input_0():
if current_user.get_admin():
return redirect(url_for('admin'))
return render_template('waitting.html')
@bp.route('/input_1', methods=['GET', 'POST'])
@login_required
def input_1():
if current_user.get_admin():
return redirect(url_for('admin'))
if current_user.get_post_num() > 0:
return redirect(url_for('input_0'))
form = BeginForm()
if form.validate_on_submit():
file = form.__class__.__name__ + '-'+secure_filename(
form.upload.data.filename)
file_path = current_user.path
if not os.path.exists(file_path):
os.makedirs(file_path)
filedata = os.listdir(file_path)
if file not in filedata:
filedata.append(file)
form.upload.data.save(file_path + '/' + file)
post = {
'project': form.project.data,
'person': form.person.data,
'money': form.money.data,
'post': form.post.data,
'upload': filedata,
}
p = Post(current_user.name, post_1=post)
p.submit()
current_user.set_post_num(1)
return redirect(url_for('input_0'))
return render_template('BeginForm.html', title='项目申请', form=form)
@bp.route('/input_2', methods=['GET', 'POST'])
@login_required
def input_2():
if current_user.get_admin():
return redirect(url_for('admin'))
if current_user.get_post_num() > 1:
return redirect(url_for('input_0'))
form = MiddleForm()
if form.validate_on_submit():
file = form.__class__.__name__ + '-' + secure_filename(
form.upload.data.filename)
file_path = current_user.path
if not os.path.exists(file_path):
os.makedirs(file_path)
filedata = os.listdir(file_path)
if file not in filedata:
filedata.append(file)
form.upload.data.save(file_path + '/' + file)
post = {
'schedule': form.schedule.data,
'preview': form.preview.data,
'post': form.post.data,
'upload': filedata,
}
p = Post(current_user.name, post_2=post)
p.submit()
current_user.set_post_num(3)
return redirect(url_for('input_0'))
return render_template('MiddleForm.html', title='中期检查', form=form)
@bp.route('/input_3', methods=['GET', 'POST'])
@login_required
def input_3():
if current_user.get_admin():
return redirect(url_for('admin'))
if current_user.get_post_num() > 3:
return redirect(url_for('input_0'))
form = FinalForm()
if form.validate_on_submit():
file = form.__class__.__name__ + '-' + secure_filename(
form.upload.data.filename)
file_path = current_user.path
if not os.path.exists(file_path):
os.makedirs(file_path)
filedata = os.listdir(file_path)
if file not in filedata:
filedata.append(file)
form.upload.data.save(file_path + '/' + file)
post = {
'change': form.change.data,
'achievement': form.achievement.data,
'post': form.post.data,
'upload': filedata,
}
p = Post(current_user.name, post_3=post)
p.submit()
current_user.set_post_num(7)
return redirect(url_for('input_0'))
return render_template('FinalForm.html', title='成果验收', form=form)
|
py | 1a39c23bf5e54e7d8c4f090e9c780cc45c3bca9a | from abc import abstractmethod, ABC
class WidgetBase(ABC):
def __init__(self, win, x, y, width, height):
""" Base for all widgets
:param win: Surface on which to draw
:type win: pygame.Surface
:param x: X-coordinate of top left
:type x: int
:param y: Y-coordinate of top left
:type y: int
:param width: Width of button
:type width: int
:param height: Height of button
:type height: int
"""
self.win = win
self.x = x
self.y = y
self.width = width
self.height = height
self.hidden = False
@abstractmethod
def listen(self, events):
pass
@abstractmethod
def draw(self):
pass
def contains(self, x, y):
return self.x < x < self.x + self.width and self.y < y < self.y + self.height
def hide(self):
self.hidden = True
def show(self):
self.hidden = False
def moveX(self, x):
self.x += x
def moveY(self, y):
self.y += y
def get(self, attr):
"""Default setter for any attributes. Call super if overriding
:param attr: Attribute to get
:return: Value of the attribute
"""
if attr == 'x':
return self.x
if attr == 'y':
return self.y
if attr == 'width':
return self.width
if attr == 'height':
return self.height
def getX(self):
return self.x
def getY(self):
return self.y
def getWidth(self):
return self.width
def getHeight(self):
return self.height
def set(self, attr, value):
"""Default setter for any attributes. Call super if overriding
:param attr: Attribute to set
:param value: Value to set
"""
if attr == 'x':
self.x = value
if attr == 'y':
self.y = value
if attr == 'width':
self.width = value
if attr == 'height':
self.height = value
def setX(self, x):
self.x = x
def setY(self, y):
self.y = y
def setWidth(self, width):
self.width = width
def setHeight(self, height):
self.height = height
|
py | 1a39c25c52d8f20c7d3067209619c7d40935827f | # flake8: noqa
from __future__ import unicode_literals
from .abc import (
ABCIE,
ABCIViewIE,
)
from .abcnews import (
AbcNewsIE,
AbcNewsVideoIE,
)
from .abcotvs import (
ABCOTVSIE,
ABCOTVSClipsIE,
)
from .academicearth import AcademicEarthCourseIE
from .acast import (
ACastIE,
ACastChannelIE,
)
from .adn import ADNIE
from .adobeconnect import AdobeConnectIE
from .adobetv import (
AdobeTVEmbedIE,
AdobeTVIE,
AdobeTVShowIE,
AdobeTVChannelIE,
AdobeTVVideoIE,
)
from .adultswim import AdultSwimIE
from .aenetworks import (
AENetworksIE,
AENetworksCollectionIE,
AENetworksShowIE,
HistoryTopicIE,
HistoryPlayerIE,
BiographyIE,
)
from .afreecatv import AfreecaTVIE
from .airmozilla import AirMozillaIE
from .aljazeera import AlJazeeraIE
from .alphaporno import AlphaPornoIE
from .amara import AmaraIE
from .amcnetworks import AMCNetworksIE
from .americastestkitchen import (
AmericasTestKitchenIE,
AmericasTestKitchenSeasonIE,
)
from .animeondemand import AnimeOnDemandIE
from .anvato import AnvatoIE
from .aol import AolIE
from .allocine import AllocineIE
from .aliexpress import AliExpressLiveIE
from .apa import APAIE
from .aparat import AparatIE
from .appleconnect import AppleConnectIE
from .appletrailers import (
AppleTrailersIE,
AppleTrailersSectionIE,
)
from .applepodcasts import ApplePodcastsIE
from .archiveorg import ArchiveOrgIE
from .arcpublishing import ArcPublishingIE
from .arkena import ArkenaIE
from .ard import (
ARDBetaMediathekIE,
ARDIE,
ARDMediathekIE,
)
from .arte import (
ArteTVIE,
ArteTVEmbedIE,
ArteTVPlaylistIE,
)
from .asiancrush import (
AsianCrushIE,
AsianCrushPlaylistIE,
)
from .atresplayer import AtresPlayerIE
from .atttechchannel import ATTTechChannelIE
from .atvat import ATVAtIE
from .audimedia import AudiMediaIE
from .audioboom import AudioBoomIE
from .audiomack import AudiomackIE, AudiomackAlbumIE
from .awaan import (
AWAANIE,
AWAANVideoIE,
AWAANLiveIE,
AWAANSeasonIE,
)
from .azmedien import AZMedienIE
from .baidu import BaiduVideoIE
from .bandcamp import BandcampIE, BandcampAlbumIE, BandcampWeeklyIE
from .bbc import (
BBCCoUkIE,
BBCCoUkArticleIE,
BBCCoUkIPlayerPlaylistIE,
BBCCoUkPlaylistIE,
BBCIE,
)
from .beeg import BeegIE
from .behindkink import BehindKinkIE
from .bellmedia import BellMediaIE
from .beatport import BeatportIE
from .bet import BetIE
from .bfi import BFIPlayerIE
from .bfmtv import (
BFMTVIE,
BFMTVLiveIE,
BFMTVArticleIE,
)
from .bibeltv import BibelTVIE
from .bigflix import BigflixIE
from .bild import BildIE
from .bilibili import (
BiliBiliIE,
BiliBiliBangumiIE,
BilibiliAudioIE,
BilibiliAudioAlbumIE,
BiliBiliPlayerIE,
)
from .biobiochiletv import BioBioChileTVIE
from .bitchute import (
BitChuteIE,
BitChuteChannelIE,
)
from .biqle import BIQLEIE
from .bleacherreport import (
BleacherReportIE,
BleacherReportCMSIE,
)
from .blinkx import BlinkxIE
from .bloomberg import BloombergIE
from .bokecc import BokeCCIE
from .bongacams import BongaCamsIE
from .bostonglobe import BostonGlobeIE
from .box import BoxIE
from .bpb import BpbIE
from .br import (
BRIE,
BRMediathekIE,
)
from .bravotv import BravoTVIE
from .breakcom import BreakIE
from .brightcove import (
BrightcoveLegacyIE,
BrightcoveNewIE,
)
from .businessinsider import BusinessInsiderIE
from .buzzfeed import BuzzFeedIE
from .byutv import BYUtvIE
from .c56 import C56IE
from .camdemy import (
CamdemyIE,
CamdemyFolderIE
)
from .cammodels import CamModelsIE
from .camtube import CamTubeIE
from .camwithher import CamWithHerIE
from .canalplus import CanalplusIE
from .canalc2 import Canalc2IE
from .canvas import (
CanvasIE,
CanvasEenIE,
VrtNUIE,
DagelijkseKostIE,
)
from .carambatv import (
CarambaTVIE,
CarambaTVPageIE,
)
from .cartoonnetwork import CartoonNetworkIE
from .cbc import (
CBCIE,
CBCPlayerIE,
CBCWatchVideoIE,
CBCWatchIE,
CBCOlympicsIE,
)
from .cbs import CBSIE
from .cbslocal import (
CBSLocalIE,
CBSLocalArticleIE,
)
from .cbsinteractive import CBSInteractiveIE
from .cbsnews import (
CBSNewsEmbedIE,
CBSNewsIE,
CBSNewsLiveVideoIE,
)
from .cbssports import CBSSportsIE
from .ccc import (
CCCIE,
CCCPlaylistIE,
)
from .ccma import CCMAIE
from .cctv import CCTVIE
from .cda import CDAIE
from .ceskatelevize import (
CeskaTelevizeIE,
CeskaTelevizePoradyIE,
)
from .channel9 import Channel9IE
from .charlierose import CharlieRoseIE
from .chaturbate import ChaturbateIE
from .chilloutzone import ChilloutzoneIE
from .chirbit import (
ChirbitIE,
ChirbitProfileIE,
)
from .cinchcast import CinchcastIE
from .cinemax import CinemaxIE
from .ciscolive import (
CiscoLiveSessionIE,
CiscoLiveSearchIE,
)
from .cjsw import CJSWIE
from .cliphunter import CliphunterIE
from .clippit import ClippitIE
from .cliprs import ClipRsIE
from .clipsyndicate import ClipsyndicateIE
from .closertotruth import CloserToTruthIE
from .cloudflarestream import CloudflareStreamIE
from .cloudy import CloudyIE
from .clubic import ClubicIE
from .clyp import ClypIE
from .cmt import CMTIE
from .cnbc import (
CNBCIE,
CNBCVideoIE,
)
from .cnn import (
CNNIE,
CNNBlogsIE,
CNNArticleIE,
)
from .coub import CoubIE
from .comedycentral import (
ComedyCentralIE,
ComedyCentralTVIE,
)
from .commonmistakes import CommonMistakesIE, UnicodeBOMIE
from .commonprotocols import (
MmsIE,
RtmpIE,
)
from .condenast import CondeNastIE
from .contv import CONtvIE
from .corus import CorusIE
from .cracked import CrackedIE
from .crackle import CrackleIE
from .crooksandliars import CrooksAndLiarsIE
from .crunchyroll import (
CrunchyrollIE,
CrunchyrollShowPlaylistIE
)
from .cspan import CSpanIE
from .ctsnews import CtsNewsIE
from .ctv import CTVIE
from .ctvnews import CTVNewsIE
from .cultureunplugged import CultureUnpluggedIE
from .curiositystream import (
CuriosityStreamIE,
CuriosityStreamCollectionIE,
)
from .cwtv import CWTVIE
from .dailymail import DailyMailIE
from .dailymotion import (
DailymotionIE,
DailymotionPlaylistIE,
DailymotionUserIE,
)
from .daum import (
DaumIE,
DaumClipIE,
DaumPlaylistIE,
DaumUserIE,
)
from .dbtv import DBTVIE
from .dctp import DctpTvIE
from .deezer import DeezerPlaylistIE
from .democracynow import DemocracynowIE
from .dfb import DFBIE
from .dhm import DHMIE
from .digg import DiggIE
from .dotsub import DotsubIE
from .douyutv import (
DouyuShowIE,
DouyuTVIE,
)
from .dplay import (
DPlayIE,
DiscoveryPlusIE,
HGTVDeIE,
)
from .dreisat import DreiSatIE
from .drbonanza import DRBonanzaIE
from .drtuber import DrTuberIE
from .drtv import (
DRTVIE,
DRTVLiveIE,
)
from .dtube import DTubeIE
from .dvtv import DVTVIE
from .dumpert import DumpertIE
from .defense import DefenseGouvFrIE
from .discovery import DiscoveryIE
from .discoverygo import (
DiscoveryGoIE,
DiscoveryGoPlaylistIE,
)
from .discoverynetworks import DiscoveryNetworksDeIE
from .discoveryvr import DiscoveryVRIE
from .disney import DisneyIE
from .dispeak import DigitallySpeakingIE
from .dropbox import DropboxIE
from .dw import (
DWIE,
DWArticleIE,
)
from .eagleplatform import EaglePlatformIE
from .ebaumsworld import EbaumsWorldIE
from .echomsk import EchoMskIE
from .egghead import (
EggheadCourseIE,
EggheadLessonIE,
)
from .ehow import EHowIE
from .eighttracks import EightTracksIE
from .einthusan import EinthusanIE
from .eitb import EitbIE
from .ellentube import (
EllenTubeIE,
EllenTubeVideoIE,
EllenTubePlaylistIE,
)
from .elpais import ElPaisIE
from .embedly import EmbedlyIE
from .engadget import EngadgetIE
from .eporner import EpornerIE
from .eroprofile import EroProfileIE
from .escapist import EscapistIE
from .espn import (
ESPNIE,
ESPNArticleIE,
FiveThirtyEightIE,
)
from .esri import EsriVideoIE
from .europa import EuropaIE
from .expotv import ExpoTVIE
from .expressen import ExpressenIE
from .extremetube import ExtremeTubeIE
from .eyedotv import EyedoTVIE
from .facebook import (
FacebookIE,
FacebookPluginsVideoIE,
)
from .faz import FazIE
from .fc2 import (
FC2IE,
FC2EmbedIE,
)
from .fczenit import FczenitIE
from .filmon import (
FilmOnIE,
FilmOnChannelIE,
)
from .filmweb import FilmwebIE
from .firsttv import FirstTVIE
from .fivemin import FiveMinIE
from .fivetv import FiveTVIE
from .flickr import FlickrIE
from .folketinget import FolketingetIE
from .footyroom import FootyRoomIE
from .formula1 import Formula1IE
from .fourtube import (
FourTubeIE,
PornTubeIE,
PornerBrosIE,
FuxIE,
)
from .fox import FOXIE
from .fox9 import (
FOX9IE,
FOX9NewsIE,
)
from .foxgay import FoxgayIE
from .foxnews import (
FoxNewsIE,
FoxNewsArticleIE,
)
from .foxsports import FoxSportsIE
from .franceculture import FranceCultureIE
from .franceinter import FranceInterIE
from .francetv import (
FranceTVIE,
FranceTVSiteIE,
FranceTVEmbedIE,
FranceTVInfoIE,
FranceTVInfoSportIE,
FranceTVJeunesseIE,
GenerationWhatIE,
CultureboxIE,
)
from .freesound import FreesoundIE
from .freespeech import FreespeechIE
from .freshlive import FreshLiveIE
from .frontendmasters import (
FrontendMastersIE,
FrontendMastersLessonIE,
FrontendMastersCourseIE
)
from .fujitv import FujiTVFODPlus7IE
from .funimation import FunimationIE
from .funk import FunkIE
from .fusion import FusionIE
from .gaia import GaiaIE
from .gameinformer import GameInformerIE
from .gamespot import GameSpotIE
from .gamestar import GameStarIE
from .gaskrank import GaskrankIE
from .gazeta import GazetaIE
from .gdcvault import GDCVaultIE
from .generic import GenericIE
from .gfycat import GfycatIE
from .giantbomb import GiantBombIE
from .giga import GigaIE
from .glide import GlideIE
from .globo import (
GloboIE,
GloboArticleIE,
)
from .go import GoIE
from .godtube import GodTubeIE
from .golem import GolemIE
from .googledrive import GoogleDriveIE
from .googlepodcasts import (
GooglePodcastsIE,
GooglePodcastsFeedIE,
)
from .googlesearch import GoogleSearchIE
from .goshgay import GoshgayIE
from .gputechconf import GPUTechConfIE
from .groupon import GrouponIE
from .hbo import HBOIE
from .hearthisat import HearThisAtIE
from .heise import HeiseIE
from .hellporno import HellPornoIE
from .helsinki import HelsinkiIE
from .hentaistigma import HentaiStigmaIE
from .hgtv import HGTVComShowIE
from .hketv import HKETVIE
from .hidive import HiDiveIE
from .historicfilms import HistoricFilmsIE
from .hitbox import HitboxIE, HitboxLiveIE
from .hitrecord import HitRecordIE
from .hornbunny import HornBunnyIE
from .hotnewhiphop import HotNewHipHopIE
from .hotstar import (
HotStarIE,
HotStarPlaylistIE,
)
from .howcast import HowcastIE
from .howstuffworks import HowStuffWorksIE
from .hrti import (
HRTiIE,
HRTiPlaylistIE,
)
from .huajiao import HuajiaoIE
from .huffpost import HuffPostIE
from .hungama import (
HungamaIE,
HungamaSongIE,
)
from .hypem import HypemIE
from .ign import (
IGNIE,
IGNVideoIE,
IGNArticleIE,
)
from .iheart import (
IHeartRadioIE,
IHeartRadioPodcastIE,
)
from .imdb import (
ImdbIE,
ImdbListIE
)
from .imgur import (
ImgurIE,
ImgurAlbumIE,
ImgurGalleryIE,
)
from .ina import InaIE
from .inc import IncIE
from .indavideo import IndavideoEmbedIE
from .infoq import InfoQIE
from .instagram import (
InstagramIE,
InstagramUserIE,
InstagramTagIE,
)
from .internazionale import InternazionaleIE
from .internetvideoarchive import InternetVideoArchiveIE
from .iprima import IPrimaIE
from .iqiyi import IqiyiIE
from .ir90tv import Ir90TvIE
from .itv import (
ITVIE,
ITVBTCCIE,
)
from .ivi import (
IviIE,
IviCompilationIE
)
from .ivideon import IvideonIE
from .iwara import IwaraIE
from .izlesene import IzleseneIE
from .jamendo import (
JamendoIE,
JamendoAlbumIE,
)
from .jeuxvideo import JeuxVideoIE
from .jove import JoveIE
from .joj import JojIE
from .jwplatform import JWPlatformIE
from .kakao import KakaoIE
from .kaltura import KalturaIE
from .kankan import KankanIE
from .karaoketv import KaraoketvIE
from .karrierevideos import KarriereVideosIE
from .keezmovies import KeezMoviesIE
from .ketnet import KetnetIE
from .khanacademy import (
KhanAcademyIE,
KhanAcademyUnitIE,
)
from .kickstarter import KickStarterIE
from .kinja import KinjaEmbedIE
from .kinopoisk import KinoPoiskIE
from .konserthusetplay import KonserthusetPlayIE
from .krasview import KrasViewIE
from .ku6 import Ku6IE
from .kusi import KUSIIE
from .kuwo import (
KuwoIE,
KuwoAlbumIE,
KuwoChartIE,
KuwoSingerIE,
KuwoCategoryIE,
KuwoMvIE,
)
from .la7 import LA7IE
from .laola1tv import (
Laola1TvEmbedIE,
Laola1TvIE,
EHFTVIE,
ITTFIE,
)
from .lbry import (
LBRYIE,
LBRYChannelIE,
)
from .lci import LCIIE
from .lcp import (
LcpPlayIE,
LcpIE,
)
from .lecture2go import Lecture2GoIE
from .lecturio import (
LecturioIE,
LecturioCourseIE,
LecturioDeCourseIE,
)
from .leeco import (
LeIE,
LePlaylistIE,
LetvCloudIE,
)
from .lego import LEGOIE
from .lemonde import LemondeIE
from .lenta import LentaIE
from .libraryofcongress import LibraryOfCongressIE
from .libsyn import LibsynIE
from .lifenews import (
LifeNewsIE,
LifeEmbedIE,
)
from .limelight import (
LimelightMediaIE,
LimelightChannelIE,
LimelightChannelListIE,
)
from .line import LineTVIE
from .linkedin import (
LinkedInLearningIE,
LinkedInLearningCourseIE,
)
from .linuxacademy import LinuxAcademyIE
from .litv import LiTVIE
from .livejournal import LiveJournalIE
from .liveleak import (
LiveLeakIE,
LiveLeakEmbedIE,
)
from .livestream import (
LivestreamIE,
LivestreamOriginalIE,
LivestreamShortenerIE,
)
from .lnkgo import LnkGoIE
from .localnews8 import LocalNews8IE
from .lovehomeporn import LoveHomePornIE
from .lrt import LRTIE
from .lynda import (
LyndaIE,
LyndaCourseIE
)
from .m6 import M6IE
from .mailru import (
MailRuIE,
MailRuMusicIE,
MailRuMusicSearchIE,
)
from .malltv import MallTVIE
from .mangomolo import (
MangomoloVideoIE,
MangomoloLiveIE,
)
from .manyvids import ManyVidsIE
from .markiza import (
MarkizaIE,
MarkizaPageIE,
)
from .massengeschmacktv import MassengeschmackTVIE
from .matchtv import MatchTVIE
from .mdr import MDRIE
from .medaltv import MedalTVIE
from .mediaset import MediasetIE
from .mediasite import (
MediasiteIE,
MediasiteCatalogIE,
MediasiteNamedCatalogIE,
)
from .medici import MediciIE
from .megaphone import MegaphoneIE
from .meipai import MeipaiIE
from .melonvod import MelonVODIE
from .meta import METAIE
from .metacafe import MetacafeIE
from .metacritic import MetacriticIE
from .mgoon import MgoonIE
from .mgtv import MGTVIE
from .miaopai import MiaoPaiIE
from .microsoftvirtualacademy import (
MicrosoftVirtualAcademyIE,
MicrosoftVirtualAcademyCourseIE,
)
from .minds import (
MindsIE,
MindsChannelIE,
MindsGroupIE,
)
from .ministrygrid import MinistryGridIE
from .minoto import MinotoIE
from .miomio import MioMioIE
from .mit import TechTVMITIE, OCWMITIE
from .mitele import MiTeleIE
from .mixcloud import (
MixcloudIE,
MixcloudUserIE,
MixcloudPlaylistIE,
)
from .mlb import MLBIE
from .mnet import MnetIE
from .moevideo import MoeVideoIE
from .mofosex import (
MofosexIE,
MofosexEmbedIE,
)
from .mojvideo import MojvideoIE
from .morningstar import MorningstarIE
from .motherless import (
MotherlessIE,
MotherlessGroupIE
)
from .motorsport import MotorsportIE
from .movieclips import MovieClipsIE
from .moviezine import MoviezineIE
from .movingimage import MovingImageIE
from .msn import MSNIE
from .mtv import (
MTVIE,
MTVVideoIE,
MTVServicesEmbeddedIE,
MTVDEIE,
MTVJapanIE,
)
from .muenchentv import MuenchenTVIE
from .mwave import MwaveIE, MwaveMeetGreetIE
from .mychannels import MyChannelsIE
from .myspace import MySpaceIE, MySpaceAlbumIE
from .myspass import MySpassIE
from .myvi import (
MyviIE,
MyviEmbedIE,
)
from .myvidster import MyVidsterIE
from .nationalgeographic import (
NationalGeographicVideoIE,
NationalGeographicTVIE,
)
from .naver import NaverIE
from .nba import (
NBAWatchEmbedIE,
NBAWatchIE,
NBAWatchCollectionIE,
NBAEmbedIE,
NBAIE,
NBAChannelIE,
)
from .nbc import (
NBCIE,
NBCNewsIE,
NBCOlympicsIE,
NBCOlympicsStreamIE,
NBCSportsIE,
NBCSportsStreamIE,
NBCSportsVPlayerIE,
)
from .ndr import (
NDRIE,
NJoyIE,
NDREmbedBaseIE,
NDREmbedIE,
NJoyEmbedIE,
)
from .ndtv import NDTVIE
from .netzkino import NetzkinoIE
from .nerdcubed import NerdCubedFeedIE
from .neteasemusic import (
NetEaseMusicIE,
NetEaseMusicAlbumIE,
NetEaseMusicSingerIE,
NetEaseMusicListIE,
NetEaseMusicMvIE,
NetEaseMusicProgramIE,
NetEaseMusicDjRadioIE,
)
from .newgrounds import (
NewgroundsIE,
NewgroundsPlaylistIE,
)
from .newstube import NewstubeIE
from .nextmedia import (
NextMediaIE,
NextMediaActionNewsIE,
AppleDailyIE,
NextTVIE,
)
from .nexx import (
NexxIE,
NexxEmbedIE,
)
from .nfl import (
NFLIE,
NFLArticleIE,
)
from .nhk import (
NhkVodIE,
NhkVodProgramIE,
)
from .nhl import NHLIE
from .nick import (
NickIE,
NickBrIE,
NickDeIE,
NickNightIE,
NickRuIE,
)
from .niconico import NiconicoIE, NiconicoPlaylistIE
from .ninecninemedia import NineCNineMediaIE
from .ninegag import NineGagIE
from .ninenow import NineNowIE
from .nintendo import NintendoIE
from .njpwworld import NJPWWorldIE
from .nobelprize import NobelPrizeIE
from .nonktube import NonkTubeIE
from .noovo import NoovoIE
from .normalboots import NormalbootsIE
from .nosvideo import NosVideoIE
from .nova import (
NovaEmbedIE,
NovaIE,
)
from .nowness import (
NownessIE,
NownessPlaylistIE,
NownessSeriesIE,
)
from .noz import NozIE
from .npo import (
AndereTijdenIE,
NPOIE,
NPOLiveIE,
NPORadioIE,
NPORadioFragmentIE,
SchoolTVIE,
HetKlokhuisIE,
VPROIE,
WNLIE,
)
from .npr import NprIE
from .nrk import (
NRKIE,
NRKPlaylistIE,
NRKSkoleIE,
NRKTVIE,
NRKTVDirekteIE,
NRKRadioPodkastIE,
NRKTVEpisodeIE,
NRKTVEpisodesIE,
NRKTVSeasonIE,
NRKTVSeriesIE,
)
from .nrl import NRLTVIE
from .ntvcojp import NTVCoJpCUIE
from .ntvde import NTVDeIE
from .ntvru import NTVRuIE
from .nytimes import (
NYTimesIE,
NYTimesArticleIE,
NYTimesCookingIE,
)
from .nuvid import NuvidIE
from .nzz import NZZIE
from .odatv import OdaTVIE
from .odnoklassniki import OdnoklassnikiIE
from .oktoberfesttv import OktoberfestTVIE
from .ondemandkorea import OnDemandKoreaIE
from .onet import (
OnetIE,
OnetChannelIE,
OnetMVPIE,
OnetPlIE,
)
from .onionstudios import OnionStudiosIE
from .ooyala import (
OoyalaIE,
OoyalaExternalIE,
)
from .ora import OraTVIE
from .orf import (
ORFTVthekIE,
ORFFM4IE,
ORFFM4StoryIE,
ORFOE1IE,
ORFOE3IE,
ORFNOEIE,
ORFWIEIE,
ORFBGLIE,
ORFOOEIE,
ORFSTMIE,
ORFKTNIE,
ORFSBGIE,
ORFTIRIE,
ORFVBGIE,
ORFIPTVIE,
)
from .outsidetv import OutsideTVIE
from .packtpub import (
PacktPubIE,
PacktPubCourseIE,
)
from .pandoratv import PandoraTVIE
from .parliamentliveuk import ParliamentLiveUKIE
from .patreon import PatreonIE
from .pbs import PBSIE
from .pearvideo import PearVideoIE
from .peertube import PeerTubeIE
from .people import PeopleIE
from .performgroup import PerformGroupIE
from .periscope import (
PeriscopeIE,
PeriscopeUserIE,
)
from .philharmoniedeparis import PhilharmonieDeParisIE
from .phoenix import PhoenixIE
from .photobucket import PhotobucketIE
from .picarto import (
PicartoIE,
PicartoVodIE,
)
from .piksel import PikselIE
from .pinkbike import PinkbikeIE
from .pinterest import (
PinterestIE,
PinterestCollectionIE,
)
from .pladform import PladformIE
from .platzi import (
PlatziIE,
PlatziCourseIE,
)
from .playfm import PlayFMIE
from .playplustv import PlayPlusTVIE
from .plays import PlaysTVIE
from .playtvak import PlaytvakIE
from .playvid import PlayvidIE
from .playwire import PlaywireIE
from .pluralsight import (
PluralsightIE,
PluralsightCourseIE,
)
from .podomatic import PodomaticIE
from .pokemon import PokemonIE
from .polskieradio import (
PolskieRadioIE,
PolskieRadioCategoryIE,
)
from .popcorntimes import PopcorntimesIE
from .popcorntv import PopcornTVIE
from .porn91 import Porn91IE
from .porncom import PornComIE
from .pornhd import PornHdIE
from .pornhub import (
PornHubIE,
PornHubUserIE,
PornHubPagedVideoListIE,
PornHubUserVideosUploadIE,
)
from .pornotube import PornotubeIE
from .pornovoisines import PornoVoisinesIE
from .pornoxo import PornoXOIE
from .puhutv import (
PuhuTVIE,
PuhuTVSerieIE,
)
from .presstv import PressTVIE
from .prosiebensat1 import ProSiebenSat1IE
from .puls4 import Puls4IE
from .pyvideo import PyvideoIE
from .qqmusic import (
QQMusicIE,
QQMusicSingerIE,
QQMusicAlbumIE,
QQMusicToplistIE,
QQMusicPlaylistIE,
)
from .r7 import (
R7IE,
R7ArticleIE,
)
from .radiocanada import (
RadioCanadaIE,
RadioCanadaAudioVideoIE,
)
from .radiode import RadioDeIE
from .radiojavan import RadioJavanIE
from .radiobremen import RadioBremenIE
from .radiofrance import RadioFranceIE
from .rai import (
RaiPlayIE,
RaiPlayLiveIE,
RaiPlayPlaylistIE,
RaiIE,
)
from .raywenderlich import (
RayWenderlichIE,
RayWenderlichCourseIE,
)
from .rbmaradio import RBMARadioIE
from .rds import RDSIE
from .redbulltv import (
RedBullTVIE,
RedBullEmbedIE,
RedBullTVRrnContentIE,
RedBullIE,
)
from .reddit import (
RedditIE,
RedditRIE,
)
from .redtube import RedTubeIE
from .regiotv import RegioTVIE
from .rentv import (
RENTVIE,
RENTVArticleIE,
)
from .restudy import RestudyIE
from .reuters import ReutersIE
from .reverbnation import ReverbNationIE
from .rice import RICEIE
from .rmcdecouverte import RMCDecouverteIE
from .ro220 import Ro220IE
from .rockstargames import RockstarGamesIE
from .roosterteeth import RoosterTeethIE
from .rottentomatoes import RottenTomatoesIE
from .roxwel import RoxwelIE
from .rozhlas import RozhlasIE
from .rtbf import RTBFIE
from .rte import RteIE, RteRadioIE
from .rtlnl import RtlNlIE
from .rtl2 import (
RTL2IE,
RTL2YouIE,
RTL2YouSeriesIE,
)
from .rtp import RTPIE
from .rts import RTSIE
from .rtve import RTVEALaCartaIE, RTVELiveIE, RTVEInfantilIE, RTVELiveIE, RTVETelevisionIE
from .rtvnh import RTVNHIE
from .rtvs import RTVSIE
from .ruhd import RUHDIE
from .rumble import RumbleEmbedIE
from .rutube import (
RutubeIE,
RutubeChannelIE,
RutubeEmbedIE,
RutubeMovieIE,
RutubePersonIE,
RutubePlaylistIE,
)
from .rutv import RUTVIE
from .ruutu import RuutuIE
from .ruv import RuvIE
from .safari import (
SafariIE,
SafariApiIE,
SafariCourseIE,
)
from .samplefocus import SampleFocusIE
from .sapo import SapoIE
from .savefrom import SaveFromIE
from .sbs import SBSIE
from .screencast import ScreencastIE
from .screencastomatic import ScreencastOMaticIE
from .scrippsnetworks import (
ScrippsNetworksWatchIE,
ScrippsNetworksIE,
)
from .scte import (
SCTEIE,
SCTECourseIE,
)
from .seeker import SeekerIE
from .senateisvp import SenateISVPIE
from .sendtonews import SendtoNewsIE
from .servus import ServusIE
from .sevenplus import SevenPlusIE
from .sexu import SexuIE
from .seznamzpravy import (
SeznamZpravyIE,
SeznamZpravyArticleIE,
)
from .shahid import (
ShahidIE,
ShahidShowIE,
)
from .shared import (
SharedIE,
VivoIE,
)
from .showroomlive import ShowRoomLiveIE
from .simplecast import (
SimplecastIE,
SimplecastEpisodeIE,
SimplecastPodcastIE,
)
from .sina import SinaIE
from .sixplay import SixPlayIE
from .skyit import (
SkyItPlayerIE,
SkyItVideoIE,
SkyItVideoLiveIE,
SkyItIE,
SkyItAcademyIE,
SkyItArteIE,
CieloTVItIE,
TV8ItIE,
)
from .skylinewebcams import SkylineWebcamsIE
from .skynewsarabia import (
SkyNewsArabiaIE,
SkyNewsArabiaArticleIE,
)
from .sky import (
SkyNewsIE,
SkySportsIE,
SkySportsNewsIE,
)
from .slideshare import SlideshareIE
from .slideslive import SlidesLiveIE
from .slutload import SlutloadIE
from .snotr import SnotrIE
from .sohu import SohuIE
from .sonyliv import SonyLIVIE
from .soundcloud import (
SoundcloudEmbedIE,
SoundcloudIE,
SoundcloudSetIE,
SoundcloudUserIE,
SoundcloudTrackStationIE,
SoundcloudPlaylistIE,
SoundcloudSearchIE,
)
from .soundgasm import (
SoundgasmIE,
SoundgasmProfileIE
)
from .southpark import (
SouthParkIE,
SouthParkDeIE,
SouthParkDkIE,
SouthParkEsIE,
SouthParkNlIE
)
from .spankbang import (
SpankBangIE,
SpankBangPlaylistIE,
)
from .spankwire import SpankwireIE
from .spiegel import SpiegelIE
from .spike import (
BellatorIE,
ParamountNetworkIE,
)
from .stitcher import (
StitcherIE,
StitcherShowIE,
)
from .sport5 import Sport5IE
from .sportbox import SportBoxIE
from .sportdeutschland import SportDeutschlandIE
from .spotify import (
SpotifyIE,
SpotifyShowIE,
)
from .spreaker import (
SpreakerIE,
SpreakerPageIE,
SpreakerShowIE,
SpreakerShowPageIE,
)
from .springboardplatform import SpringboardPlatformIE
from .sprout import SproutIE
from .srgssr import (
SRGSSRIE,
SRGSSRPlayIE,
)
from .srmediathek import SRMediathekIE
from .stanfordoc import StanfordOpenClassroomIE
from .steam import SteamIE
from .storyfire import (
StoryFireIE,
StoryFireUserIE,
StoryFireSeriesIE,
)
from .streamable import StreamableIE
from .streamcloud import StreamcloudIE
from .streamcz import StreamCZIE
from .streetvoice import StreetVoiceIE
from .stretchinternet import StretchInternetIE
from .stv import STVPlayerIE
from .sunporno import SunPornoIE
from .sverigesradio import (
SverigesRadioEpisodeIE,
SverigesRadioPublicationIE,
)
from .svt import (
SVTIE,
SVTPageIE,
SVTPlayIE,
SVTSeriesIE,
)
from .swrmediathek import SWRMediathekIE
from .syfy import SyfyIE
from .sztvhu import SztvHuIE
from .tagesschau import (
TagesschauPlayerIE,
TagesschauIE,
)
from .tass import TassIE
from .tbs import TBSIE
from .tdslifeway import TDSLifewayIE
from .teachable import (
TeachableIE,
TeachableCourseIE,
)
from .teachertube import (
TeacherTubeIE,
TeacherTubeUserIE,
)
from .teachingchannel import TeachingChannelIE
from .teamcoco import TeamcocoIE
from .teamtreehouse import TeamTreeHouseIE
from .techtalks import TechTalksIE
from .ted import TEDIE
from .tele5 import Tele5IE
from .tele13 import Tele13IE
from .telebruxelles import TeleBruxellesIE
from .telecinco import TelecincoIE
from .telegraaf import TelegraafIE
from .telemb import TeleMBIE
from .telequebec import (
TeleQuebecIE,
TeleQuebecSquatIE,
TeleQuebecEmissionIE,
TeleQuebecLiveIE,
TeleQuebecVideoIE,
)
from .teletask import TeleTaskIE
from .telewebion import TelewebionIE
from .tennistv import TennisTVIE
from .tenplay import TenPlayIE
from .testurl import TestURLIE
from .tf1 import TF1IE
from .tfo import TFOIE
from .theintercept import TheInterceptIE
from .theplatform import (
ThePlatformIE,
ThePlatformFeedIE,
)
from .thescene import TheSceneIE
from .thestar import TheStarIE
from .thesun import TheSunIE
from .theweatherchannel import TheWeatherChannelIE
from .thisamericanlife import ThisAmericanLifeIE
from .thisav import ThisAVIE
from .thisoldhouse import ThisOldHouseIE
from .threeqsdn import ThreeQSDNIE
from .tiktok import (
TikTokIE,
TikTokUserIE,
)
from .tinypic import TinyPicIE
from .tmz import (
TMZIE,
TMZArticleIE,
)
from .tnaflix import (
TNAFlixNetworkEmbedIE,
TNAFlixIE,
EMPFlixIE,
MovieFapIE,
)
from .toggle import (
ToggleIE,
MeWatchIE,
)
from .tonline import TOnlineIE
from .toongoggles import ToonGogglesIE
from .toutv import TouTvIE
from .toypics import ToypicsUserIE, ToypicsIE
from .traileraddict import TrailerAddictIE
from .trilulilu import TriluliluIE
from .trovo import (
TrovoIE,
TrovoVodIE,
)
from .trunews import TruNewsIE
from .trutv import TruTVIE
from .tube8 import Tube8IE
from .tubitv import TubiTvIE
from .tumblr import TumblrIE
from .tunein import (
TuneInClipIE,
TuneInStationIE,
TuneInProgramIE,
TuneInTopicIE,
TuneInShortenerIE,
)
from .tunepk import TunePkIE
from .turbo import TurboIE
from .tv2 import (
TV2IE,
TV2ArticleIE,
KatsomoIE,
MTVUutisetArticleIE,
)
from .tv2dk import (
TV2DKIE,
TV2DKBornholmPlayIE,
)
from .tv2hu import TV2HuIE
from .tv4 import TV4IE
from .tv5mondeplus import TV5MondePlusIE
from .tv5unis import (
TV5UnisVideoIE,
TV5UnisIE,
)
from .tva import (
TVAIE,
QubIE,
)
from .tvanouvelles import (
TVANouvellesIE,
TVANouvellesArticleIE,
)
from .tvc import (
TVCIE,
TVCArticleIE,
)
from .tver import TVerIE
from .tvigle import TvigleIE
from .tvland import TVLandIE
from .tvn24 import TVN24IE
from .tvnet import TVNetIE
from .tvnoe import TVNoeIE
from .tvnow import (
TVNowIE,
TVNowNewIE,
TVNowSeasonIE,
TVNowAnnualIE,
TVNowShowIE,
)
from .tvp import (
TVPEmbedIE,
TVPIE,
TVPWebsiteIE,
)
from .tvplay import (
TVPlayIE,
ViafreeIE,
TVPlayHomeIE,
)
from .tvplayer import TVPlayerIE
from .tweakers import TweakersIE
from .twentyfourvideo import TwentyFourVideoIE
from .twentymin import TwentyMinutenIE
from .twentythreevideo import TwentyThreeVideoIE
from .twitcasting import TwitCastingIE
from .twitch import (
TwitchVodIE,
TwitchCollectionIE,
TwitchVideosIE,
TwitchVideosClipsIE,
TwitchVideosCollectionsIE,
TwitchStreamIE,
TwitchClipsIE,
)
from .twitter import (
TwitterCardIE,
TwitterIE,
TwitterAmplifyIE,
TwitterBroadcastIE,
)
from .udemy import (
UdemyIE,
UdemyCourseIE
)
from .udn import UDNEmbedIE
from .ufctv import (
UFCTVIE,
UFCArabiaIE,
)
from .uktvplay import UKTVPlayIE
from .digiteka import DigitekaIE
from .dlive import (
DLiveVODIE,
DLiveStreamIE,
)
from .umg import UMGDeIE
from .unistra import UnistraIE
from .unity import UnityIE
from .uol import UOLIE
from .uplynk import (
UplynkIE,
UplynkPreplayIE,
)
from .urort import UrortIE
from .urplay import URPlayIE
from .usanetwork import USANetworkIE
from .usatoday import USATodayIE
from .ustream import UstreamIE, UstreamChannelIE
from .ustudio import (
UstudioIE,
UstudioEmbedIE,
)
from .varzesh3 import Varzesh3IE
from .vbox7 import Vbox7IE
from .veehd import VeeHDIE
from .veoh import VeohIE
from .vesti import VestiIE
from .vevo import (
VevoIE,
VevoPlaylistIE,
)
from .vgtv import (
BTArticleIE,
BTVestlendingenIE,
VGTVIE,
)
from .vh1 import VH1IE
from .vice import (
ViceIE,
ViceArticleIE,
ViceShowIE,
)
from .vidbit import VidbitIE
from .viddler import ViddlerIE
from .videa import VideaIE
from .videodetective import VideoDetectiveIE
from .videofyme import VideofyMeIE
from .videomore import (
VideomoreIE,
VideomoreVideoIE,
VideomoreSeasonIE,
)
from .videopress import VideoPressIE
from .vidio import VidioIE
from .vidlii import VidLiiIE
from .vidme import (
VidmeIE,
VidmeUserIE,
VidmeUserLikesIE,
)
from .vier import VierIE, VierVideosIE
from .viewlift import (
ViewLiftIE,
ViewLiftEmbedIE,
)
from .viidea import ViideaIE
from .vimeo import (
VimeoIE,
VimeoAlbumIE,
VimeoChannelIE,
VimeoGroupsIE,
VimeoLikesIE,
VimeoOndemandIE,
VimeoReviewIE,
VimeoUserIE,
VimeoWatchLaterIE,
VHXEmbedIE,
)
from .vimple import VimpleIE
from .vine import (
VineIE,
VineUserIE,
)
from .viki import (
VikiIE,
VikiChannelIE,
)
from .viqeo import ViqeoIE
from .viu import (
ViuIE,
ViuPlaylistIE,
ViuOTTIE,
)
from .vk import (
VKIE,
VKUserVideosIE,
VKWallPostIE,
)
from .vlive import (
VLiveIE,
VLivePostIE,
VLiveChannelIE,
)
from .vodlocker import VodlockerIE
from .vodpl import VODPlIE
from .vodplatform import VODPlatformIE
from .voicerepublic import VoiceRepublicIE
from .voot import VootIE
from .voxmedia import (
VoxMediaVolumeIE,
VoxMediaIE,
)
from .vrt import VRTIE
from .vrak import VrakIE
from .vrv import (
VRVIE,
VRVSeriesIE,
)
from .vshare import VShareIE
from .vtm import VTMIE
from .medialaan import MedialaanIE
from .vube import VubeIE
from .vuclip import VuClipIE
from .vvvvid import (
VVVVIDIE,
VVVVIDShowIE,
)
from .vyborymos import VyboryMosIE
from .vzaar import VzaarIE
from .wakanim import WakanimIE
from .walla import WallaIE
from .washingtonpost import (
WashingtonPostIE,
WashingtonPostArticleIE,
)
from .wat import WatIE
from .watchbox import WatchBoxIE
from .watchindianporn import WatchIndianPornIE
from .wdr import (
WDRIE,
WDRPageIE,
WDRElefantIE,
WDRMobileIE,
)
from .webcaster import (
WebcasterIE,
WebcasterFeedIE,
)
from .webofstories import (
WebOfStoriesIE,
WebOfStoriesPlaylistIE,
)
from .weibo import (
WeiboIE,
WeiboMobileIE
)
from .weiqitv import WeiqiTVIE
from .wistia import (
WistiaIE,
WistiaPlaylistIE,
)
from .worldstarhiphop import WorldStarHipHopIE
from .wsj import (
WSJIE,
WSJArticleIE,
)
from .wwe import WWEIE
from .xbef import XBefIE
from .xboxclips import XboxClipsIE
from .xfileshare import XFileShareIE
from .xhamster import (
XHamsterIE,
XHamsterEmbedIE,
XHamsterUserIE,
)
from .xiami import (
XiamiSongIE,
XiamiAlbumIE,
XiamiArtistIE,
XiamiCollectionIE
)
from .ximalaya import (
XimalayaIE,
XimalayaAlbumIE
)
from .xminus import XMinusIE
from .xnxx import XNXXIE
from .xstream import XstreamIE
from .xtube import XTubeUserIE, XTubeIE
from .xuite import XuiteIE
from .xvideos import XVideosIE
from .xxxymovies import XXXYMoviesIE
from .yahoo import (
YahooIE,
YahooSearchIE,
YahooGyaOPlayerIE,
YahooGyaOIE,
YahooJapanNewsIE,
)
from .yandexdisk import YandexDiskIE
from .yandexmusic import (
YandexMusicTrackIE,
YandexMusicAlbumIE,
YandexMusicPlaylistIE,
YandexMusicArtistTracksIE,
YandexMusicArtistAlbumsIE,
)
from .yandexvideo import YandexVideoIE
from .yapfiles import YapFilesIE
from .yesjapan import YesJapanIE
from .yinyuetai import YinYueTaiIE
from .ynet import YnetIE
from .youjizz import YouJizzIE
from .youku import (
YoukuIE,
YoukuShowIE,
)
from .younow import (
YouNowLiveIE,
YouNowChannelIE,
YouNowMomentIE,
)
from .youporn import YouPornIE
from .yourporn import YourPornIE
from .yourupload import YourUploadIE
from .youtube import (
YoutubeIE,
YoutubeFavouritesIE,
YoutubeHistoryIE,
YoutubeTabIE,
YoutubePlaylistIE,
YoutubeRecommendedIE,
YoutubeSearchDateIE,
YoutubeSearchIE,
#YoutubeSearchURLIE,
YoutubeSubscriptionsIE,
YoutubeTruncatedIDIE,
YoutubeTruncatedURLIE,
YoutubeYtBeIE,
YoutubeYtUserIE,
YoutubeWatchLaterIE,
)
from .zapiks import ZapiksIE
from .zattoo import (
BBVTVIE,
EinsUndEinsTVIE,
EWETVIE,
GlattvisionTVIE,
MNetTVIE,
MyVisionTVIE,
NetPlusIE,
OsnatelTVIE,
QuantumTVIE,
QuicklineIE,
QuicklineLiveIE,
SaltTVIE,
SAKTVIE,
VTXTVIE,
WalyTVIE,
ZattooIE,
ZattooLiveIE,
)
from .zdf import ZDFIE, ZDFChannelIE
from .zhihu import ZhihuIE
from .zingmp3 import ZingMp3IE
from .zype import ZypeIE
|
py | 1a39c351d29a1588e3fa5b40b4cbdd32ed42b00a | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/components/weapon/shared_wpn_sfs_imperial_special_forces_blaster.iff"
result.attribute_template_id = 8
result.stfName("space/space_item","wpn_sfs_imperial_special_forces_blaster_n")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
py | 1a39c3564615c7e1393010e5bf734a65f1b9d218 | """
This module uses ROSEGRAPHICS to demonstrate:
-- CONSTRUCTING objects,
-- applying METHODS to them, and
-- accessing their DATA via INSTANCE VARIABLES.
Authors: David Mutchler, Dave Fisher, Vibha Alangar, Mark Hays, Amanda Stouder,
their colleagues and Michael Kuznicki.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
########################################################################
#
# DONE: 2.
# RUN this program. Then answer the following,
# GETTING HELP AS NEED! (Ask questions!!!)
#
# a. For the RoseGraphics coordinate system:
#
# -- Where is the (0, 0) point on the screen?
# It is in the upper left corner.
#
# -- In what direction on the screen
# does the positive X-axis point?
# It points to the right.
#
# -- In what direction on the screen
# does the positive Y-axis point?
# It points down.
#
# b. Write a line of code that constructs a RoseWindow object:
# window = rg.RoseWindow(400,400)
#
#
#
#
#
#
#
#
# e. Use the DOT trick to answer the following:
#
# -- Write the names of two types of graphics objects that
# you can construct OTHER than Circle and Point:
# Arc and Button
#
# -- Write the names of three METHODs that Circle objects have:
# fill_color and attach_to
#
# -- Write the names of three INSTANCE VARIABLEs that Circle
# objects have:
# center and radius
#
# f. What does a RoseWindow RENDER method do?
# It draws the objects.
#
# g. When is a RoseWindow close_on_mouse_click method call
# necessary? Why?
# It is necessary when you want to control when the window
# closes instead of it closing on its own.
#
# ASK QUESTIONS ** NOW ** if you do not understand how the
# RoseGraphics graphics system works.
#
# When you are confident that you have written correct answers
# to the above questions (ASK QUESTIONS AS NEEDED!),
# change the above TODO to DONE.
#
########################################################################
import rosegraphics as rg
def main():
"""
Uses ROSEGRAPHICS to demonstrate:
-- CONSTRUCTING objects,
-- applying METHODS to them, and
-- accessing their DATA via INSTANCE VARIABLES
"""
example1()
example2()
example3()
def example1():
""" Displays an empty window. """
window = rg.RoseWindow(500, 300, 'Example 1: An empty window')
window.close_on_mouse_click()
def example2():
""" Displays two Point objects. """
# ------------------------------------------------------------------
# Construct the window in which objects will be drawn.
# ------------------------------------------------------------------
window = rg.RoseWindow()
# ------------------------------------------------------------------
# Construct some rg.Point objects.
# Note: the y-axis goes DOWN from the TOP.
# ------------------------------------------------------------------
point1 = rg.Point(100, 150)
point2 = rg.Point(200, 50)
# ------------------------------------------------------------------
# A RoseGraphics object is not associated with a window,
# and hence are not drawn, until you ATTACH it to a window.
# ------------------------------------------------------------------
point1.attach_to(window)
point2.attach_to(window)
# ------------------------------------------------------------------
# And they still are not DRAWN until you RENDER the window.
# That will draw ALL the objects on the window.
# This two-step approach is important for animation.
# ------------------------------------------------------------------
window.render()
window.close_on_mouse_click()
def example3():
""" Displays a Circle and a Rectangle. """
# ------------------------------------------------------------------
# RoseWindow: optionally takes its width and height.
# ------------------------------------------------------------------
width = 700
height = 400
window = rg.RoseWindow(width, height)
# ------------------------------------------------------------------
# Circle: needs its center and radius.
# Has fill_color instance variable.
# ------------------------------------------------------------------
center_point = rg.Point(300, 100)
radius = 50
circle = rg.Circle(center_point, radius)
circle.fill_color = 'green'
circle.attach_to(window)
# ------------------------------------------------------------------
# Rectangle: needs two opposite corners.
# ------------------------------------------------------------------
point1 = rg.Point(100, 150)
point2 = rg.Point(200, 50)
rectangle = rg.Rectangle(point1, point2)
rectangle.attach_to(window)
# ------------------------------------------------------------------
# render: Draw ALL the objects attached to this window.
# ------------------------------------------------------------------
window.render()
# ------------------------------------------------------------------
# A Rectangle has instance variables corner_1 and corner2.
# ------------------------------------------------------------------
corner1 = rectangle.corner_1
corner2 = rectangle.corner_2
print(corner1, corner2) # You can also PRINT RoseGraphics objects.
print(rectangle) # See the Console for the output.
# ------------------------------------------------------------------
# close_on_mouse_click: Keeps the window open until user clicks.
# ------------------------------------------------------------------
window.close_on_mouse_click()
# ----------------------------------------------------------------------
# Calls main to start the ball rolling.
# ----------------------------------------------------------------------
main()
|
py | 1a39c3589e82d8f410c4552b13b85a0adbca28d3 | #! /usr/bin/python
from __future__ import absolute_import
from __future__ import print_function
from . import rowingdata
from sys import argv
def main():
readFile=argv[1]
try:
rowerFile=argv[2]
except IndexError:
rowerFile="defaultrower.txt"
rower=rowingdata.getrower(rowerFile)
csvoutput=readFile+"_o.CSV"
rp=rowingdata.ErgDataParser(readFile)
rp.write_csv(csvoutput)
res=rowingdata.rowingdata(csvoutput,rowtype="On-water",
rower=rower)
res.plotmeters_erg()
print((res.allstats()))
print(("done "+readFile))
|
py | 1a39c3bd1207eab6ed412d10816d31d97c16fce5 | #!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Copyright (c) 2017 The Raven Core developers
# Copyright (c) 2018 The Kimora Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test wallet import RPCs.
Test rescan behavior of importaddress, importpubkey, importprivkey, and
importmulti RPCs with different types of keys and rescan options.
In the first part of the test, node 0 creates an address for each type of
import RPC call and sends KMR to it. Then other nodes import the addresses,
and the test makes listtransactions and getbalance calls to confirm that the
importing node either did or did not execute rescans picking up the send
transactions.
In the second part of the test, node 0 sends more KMR to each address, and the
test makes more listtransactions and getbalance calls to confirm that the
importing nodes pick up the new transactions regardless of whether rescans
happened previously.
"""
from test_framework.test_framework import KimoraTestFramework
from test_framework.util import (assert_raises_rpc_error, connect_nodes, sync_blocks, assert_equal, set_node_times)
import collections
import enum
import itertools
Call = enum.Enum("Call", "single multi")
Data = enum.Enum("Data", "address pub priv")
Rescan = enum.Enum("Rescan", "no yes late_timestamp")
class Variant(collections.namedtuple("Variant", "call data rescan prune")):
"""Helper for importing one key and verifying scanned transactions."""
def try_rpc(self, func, *args, **kwargs):
if self.expect_disabled:
assert_raises_rpc_error(-4, "Rescan is disabled in pruned mode", func, *args, **kwargs)
else:
return func(*args, **kwargs)
def do_import(self, timestamp):
"""Call one key import RPC."""
if self.call == Call.single:
if self.data == Data.address:
response = self.try_rpc(self.node.importaddress, self.address["address"], self.label,
self.rescan == Rescan.yes)
elif self.data == Data.pub:
response = self.try_rpc(self.node.importpubkey, self.address["pubkey"], self.label,
self.rescan == Rescan.yes)
elif self.data == Data.priv:
response = self.try_rpc(self.node.importprivkey, self.key, self.label, self.rescan == Rescan.yes)
assert_equal(response, None)
elif self.call == Call.multi:
response = self.node.importmulti([{
"scriptPubKey": {
"address": self.address["address"]
},
"timestamp": timestamp + TIMESTAMP_WINDOW + (1 if self.rescan == Rescan.late_timestamp else 0),
"pubkeys": [self.address["pubkey"]] if self.data == Data.pub else [],
"keys": [self.key] if self.data == Data.priv else [],
"label": self.label,
"watchonly": self.data != Data.priv
}], {"rescan": self.rescan in (Rescan.yes, Rescan.late_timestamp)})
assert_equal(response, [{"success": True}])
def check(self, txid=None, amount=None, confirmations=None):
"""Verify that getbalance/listtransactions return expected values."""
balance = self.node.getbalance(self.label, 0, True)
assert_equal(balance, self.expected_balance)
txs = self.node.listtransactions(self.label, 10000, 0, True)
assert_equal(len(txs), self.expected_txs)
if txid is not None:
tx, = [tx for tx in txs if tx["txid"] == txid]
assert_equal(tx["account"], self.label)
assert_equal(tx["address"], self.address["address"])
assert_equal(tx["amount"], amount)
assert_equal(tx["category"], "receive")
assert_equal(tx["label"], self.label)
assert_equal(tx["txid"], txid)
assert_equal(tx["confirmations"], confirmations)
assert_equal("trusted" not in tx, True)
# Verify the transaction is correctly marked watchonly depending on
# whether the transaction pays to an imported public key or
# imported private key. The test setup ensures that transaction
# inputs will not be from watchonly keys (important because
# involvesWatchonly will be true if either the transaction output
# or inputs are watchonly).
if self.data != Data.priv:
assert_equal(tx["involvesWatchonly"], True)
else:
assert_equal("involvesWatchonly" not in tx, True)
# List of Variants for each way a key or address could be imported.
IMPORT_VARIANTS = [Variant(*variants) for variants in itertools.product(Call, Data, Rescan, (False, True))]
# List of nodes to import keys to. Half the nodes will have pruning disabled,
# half will have it enabled. Different nodes will be used for imports that are
# expected to cause rescans, and imports that are not expected to cause
# rescans, in order to prevent rescans during later imports picking up
# transactions associated with earlier imports. This makes it easier to keep
# track of expected balances and transactions.
ImportNode = collections.namedtuple("ImportNode", "prune rescan")
IMPORT_NODES = [ImportNode(*fields) for fields in itertools.product((False, True), repeat=2)]
# Rescans start at the earliest block up to 2 hours before the key timestamp.
TIMESTAMP_WINDOW = 2 * 60 * 60
class ImportRescanTest(KimoraTestFramework):
def set_test_params(self):
self.num_nodes = 2 + len(IMPORT_NODES)
def setup_network(self):
extra_args = [[] for _ in range(self.num_nodes)]
for i, import_node in enumerate(IMPORT_NODES, 2):
if import_node.prune:
extra_args[i] += ["-prune=1"]
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
for i in range(1, self.num_nodes):
connect_nodes(self.nodes[i], 0)
def run_test(self):
# Create one transaction on node 0 with a unique amount and label for
# each possible type of wallet import RPC.
for i, variant in enumerate(IMPORT_VARIANTS):
variant.label = "label {} {}".format(i, variant)
variant.address = self.nodes[1].validateaddress(self.nodes[1].getnewaddress(variant.label))
variant.key = self.nodes[1].dumpprivkey(variant.address["address"])
variant.initial_amount = 10 - (i + 1) / 4.0
variant.initial_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.initial_amount)
# Generate a block containing the initial transactions, then another
# block further in the future (past the rescan window).
self.nodes[0].generate(1)
assert_equal(self.nodes[0].getrawmempool(), [])
timestamp = self.nodes[0].getblockheader(self.nodes[0].getbestblockhash())["time"]
set_node_times(self.nodes, timestamp + TIMESTAMP_WINDOW + 1)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# For each variation of wallet key import, invoke the import RPC and
# check the results from getbalance and listtransactions.
for variant in IMPORT_VARIANTS:
variant.expect_disabled = variant.rescan == Rescan.yes and variant.prune and variant.call == Call.single
expect_rescan = variant.rescan == Rescan.yes and not variant.expect_disabled
variant.node = self.nodes[2 + IMPORT_NODES.index(ImportNode(variant.prune, expect_rescan))]
variant.do_import(timestamp)
if expect_rescan:
variant.expected_balance = variant.initial_amount
variant.expected_txs = 1
variant.check(variant.initial_txid, variant.initial_amount, 2)
else:
variant.expected_balance = 0
variant.expected_txs = 0
variant.check()
# Create new transactions sending to each address.
for i, variant in enumerate(IMPORT_VARIANTS):
variant.sent_amount = 10 - (2 * i + 1) / 8.0
variant.sent_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.sent_amount)
# Generate a block containing the new transactions.
self.nodes[0].generate(1)
assert_equal(self.nodes[0].getrawmempool(), [])
sync_blocks(self.nodes)
# Check the latest results from getbalance and listtransactions.
for variant in IMPORT_VARIANTS:
if not variant.expect_disabled:
variant.expected_balance += variant.sent_amount
variant.expected_txs += 1
variant.check(variant.sent_txid, variant.sent_amount, 1)
else:
variant.check()
if __name__ == "__main__":
ImportRescanTest().main()
|
py | 1a39c3eb5436148dffffd19927f44185b911b023 | import scrapy
from sec.items import SecItem
import re
class SecSpider(scrapy.Spider):
name = 'sec'
allowed_domains = ['sec.gov']
start_urls = [
'https://www.sec.gov/cgi-bin/browse-edgar?CIK=t&owner=exclude&action=getcompany&count=100',
]
def parse(self, response):
for sel in response.xpath('//table[@class="tableFile2"]/tr'):
item = SecItem()
item['filing'] = sel.xpath('td[1]/text()').extract()
item['link'] = sel.xpath('td[2]/a/@href').extract()
item['date'] = sel.xpath('td[4]/text()').extract()
print(item)
yield item
next_page = response.xpath("//input[@type='button']/@onclick")
# print(next_page)
if next_page:
path = re.findall("'((?:.|\n)*?)'", next_page.pop().extract()).pop()
url = 'https://www.sec.gov' + path
yield scrapy.Request(url, self.parse)
|
py | 1a39c5872a28af748bf84587e7b159b0d55638fd | """
Migration script to alter the type of the tool_dependency.version column from TrimmedString(40) to Text.
"""
import logging
from sqlalchemy import (
MetaData,
Table
)
log = logging.getLogger(__name__)
metadata = MetaData()
def upgrade(migrate_engine):
print(__doc__)
metadata.bind = migrate_engine
metadata.reflect()
Table("tool_dependency", metadata, autoload=True)
# Change the tool_dependency table's version column from TrimmedString to Text.
if migrate_engine.name in ['postgres', 'postgresql']:
cmd = "ALTER TABLE tool_dependency ALTER COLUMN version TYPE Text;"
elif migrate_engine.name == 'mysql':
cmd = "ALTER TABLE tool_dependency MODIFY COLUMN version Text;"
else:
# We don't have to do anything for sqlite tables. From the sqlite documentation at http://sqlite.org/datatype3.html:
# 1.0 Storage Classes and Datatypes
# Each value stored in an SQLite database (or manipulated by the database engine) has one of the following storage classes:
# NULL. The value is a NULL value.
# INTEGER. The value is a signed integer, stored in 1, 2, 3, 4, 6, or 8 bytes depending on the magnitude of the value.
# REAL. The value is a floating point value, stored as an 8-byte IEEE floating point number.
# TEXT. The value is a text string, stored using the database encoding (UTF-8, UTF-16BE or UTF-16LE).
# BLOB. The value is a blob of data, stored exactly as it was input.
cmd = None
if cmd:
try:
migrate_engine.execute(cmd)
except Exception:
log.exception("Altering tool_dependency.version column from TrimmedString(40) to Text failed.")
def downgrade(migrate_engine):
# Not necessary to change column type Text to TrimmedString(40).
pass
|
py | 1a39c5d1264eac56a18a0651b28d00073880b7e7 | from collections import OrderedDict
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, InvalidPage
from django.http import HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from django.urls import reverse
from django.utils import timezone
from .models import Event, Signup
# Create your views here.
def upcoming(request, page):
return render(request, 'events/list.html', getContext(page, 'Upcoming'))
def previous(request, page):
return render(request, 'events/list.html', getContext(page, 'Previous'))
# puting duplicate code from upcoming/previous in one place - Sorc
def getContext(page, event_type):
if 'Upcoming' == event_type:
events_all = Event.objects.filter(when__gte=timezone.now()).order_by('when')
else:
events_all = Event.objects.filter(when__lte=timezone.now()).order_by('-when')
paginator = Paginator(events_all, 12)
try:
events_page = paginator.page(page)
except InvalidPage:
events_page = paginator.page(1)
events = events_page if 'Upcoming' == event_type else events_page
# Solution copied from uwcs-zarya
weeks_dict = OrderedDict()
for event in events:
event_week = event.when.isocalendar()[1]
key = '{year}-{week}'.format(year=event.when.year, week=event_week)
if weeks_dict.get(key):
weeks_dict.get(key).append(event)
else:
weeks_dict[key] = [event]
weeks = list()
for _, week in weeks_dict.items():
weeks.append(week)
return {
'weeks': weeks,
'paginator_page': events_page,
'list_type': event_type,
}
def event_view(request, event_id):
template = 'events/view.html'
event = get_object_or_404(Event, id=event_id)
if not event.signup_required():
context = {
'event': event,
'signup_required': False,
}
return render(request, template, context)
user_is_signed_up = False if not request.user.is_authenticated else event.already_signed_up(request.user.member)
context = {
'event': event,
'signups': Signup.objects.filter(event=event).order_by("-created"),
'signup_required': True,
'user_is_signed_up': user_is_signed_up,
'event_is_full': event.is_full(),
'closed': event.closed(),
'opened': event.opened()
}
return render(request, template, context)
@login_required
def signup(request, event_id):
event = get_object_or_404(Event, id=event_id)
if request.method == 'POST':
if event.is_full():
messages.error(request, 'Event is full')
elif not event.signup_required():
messages.error(request, 'Signups are not required for this event')
elif event.already_signed_up(request.user.member):
messages.error(request, 'You have already signed up for this event')
elif event.closed():
messages.error(request, 'Signups for this event are closed')
elif not event.opened():
messages.error(request, 'Signups for this event are not open yet')
else:
new_signup = Signup(
who=request.user.member,
event=Event.objects.get(id=event_id),
comment=request.POST['signup_comment'],
created=timezone.now()
)
new_signup.save()
messages.success(request, 'Signup for event successful')
return HttpResponseRedirect(reverse('events:view', args=[event.id]))
@login_required
def cancel(request, event_id):
event = get_object_or_404(Event, id=event_id)
if request.method == 'POST':
if event.closed():
messages.error(request, 'Signups for this event are closed')
else:
Signup.objects.filter(event=event, who=request.user.member).delete();
messages.success(request, 'Canceling successful')
return HttpResponseRedirect(reverse('events:view', args=[event.id]))
|
py | 1a39c6c5d294944d0a3d4ae7606006f7e1236f88 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = ['WebAppApplicationSettings']
class WebAppApplicationSettings(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
kind: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
String dictionary resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] kind: Kind of resource.
:param pulumi.Input[str] name: Name of the app.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] properties: Settings.
:param pulumi.Input[str] resource_group_name: Name of the resource group to which the resource belongs.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['kind'] = kind
if name is None and not opts.urn:
raise TypeError("Missing required property 'name'")
__props__['name'] = name
__props__['properties'] = properties
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:web/v20160801:WebAppApplicationSettings"), pulumi.Alias(type_="azure-native:web:WebAppApplicationSettings"), pulumi.Alias(type_="azure-nextgen:web:WebAppApplicationSettings"), pulumi.Alias(type_="azure-native:web/latest:WebAppApplicationSettings"), pulumi.Alias(type_="azure-nextgen:web/latest:WebAppApplicationSettings"), pulumi.Alias(type_="azure-native:web/v20150801:WebAppApplicationSettings"), pulumi.Alias(type_="azure-nextgen:web/v20150801:WebAppApplicationSettings"), pulumi.Alias(type_="azure-native:web/v20180201:WebAppApplicationSettings"), pulumi.Alias(type_="azure-nextgen:web/v20180201:WebAppApplicationSettings"), pulumi.Alias(type_="azure-native:web/v20181101:WebAppApplicationSettings"), pulumi.Alias(type_="azure-nextgen:web/v20181101:WebAppApplicationSettings"), pulumi.Alias(type_="azure-native:web/v20190801:WebAppApplicationSettings"), pulumi.Alias(type_="azure-nextgen:web/v20190801:WebAppApplicationSettings"), pulumi.Alias(type_="azure-native:web/v20200601:WebAppApplicationSettings"), pulumi.Alias(type_="azure-nextgen:web/v20200601:WebAppApplicationSettings"), pulumi.Alias(type_="azure-native:web/v20200901:WebAppApplicationSettings"), pulumi.Alias(type_="azure-nextgen:web/v20200901:WebAppApplicationSettings"), pulumi.Alias(type_="azure-native:web/v20201001:WebAppApplicationSettings"), pulumi.Alias(type_="azure-nextgen:web/v20201001:WebAppApplicationSettings")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(WebAppApplicationSettings, __self__).__init__(
'azure-native:web/v20160801:WebAppApplicationSettings',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'WebAppApplicationSettings':
"""
Get an existing WebAppApplicationSettings resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["kind"] = None
__props__["name"] = None
__props__["properties"] = None
__props__["type"] = None
return WebAppApplicationSettings(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def kind(self) -> pulumi.Output[Optional[str]]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> pulumi.Output[Mapping[str, str]]:
"""
Settings.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
py | 1a39c6f2113ed40ea62cfde584cc641ea36a150f | """
This file must not depend on any other CuPy modules.
"""
import ctypes
import json
import os
import os.path
import shutil
import sys
import warnings
# '' for uninitialized, None for non-existing
_cuda_path = ''
_nvcc_path = ''
_rocm_path = ''
_hipcc_path = ''
_cub_path = ''
"""
Library Preloading
------------------
Wheel packages are built against specific versions of CUDA libraries
(cuTENSOR/NCCL/cuDNN).
To avoid loading wrong version, these shared libraries are manually
preloaded.
# TODO(kmaehashi): Support NCCL
Example of `_preload_config` is as follows:
{
# installation source
'packaging': 'pip',
# CUDA version string
'cuda': '11.0',
'cudnn': {
# cuDNN version string
'version': '8.0.0',
# names of the shared library
'filenames': ['libcudnn.so.X.Y.Z'] # or `cudnn64_X.dll` for Windows
}
}
The configuration file is intended solely for internal purposes and
not expected to be parsed by end-users.
"""
_preload_config = None
_preload_libs = {
'cudnn': None,
'nccl': None,
'cutensor': None,
}
_preload_logs = []
def _log(msg):
# TODO(kmaehashi): replace with the standard logging
_preload_logs.append(msg)
def get_cuda_path():
# Returns the CUDA installation path or None if not found.
global _cuda_path
if _cuda_path == '':
_cuda_path = _get_cuda_path()
return _cuda_path
def get_nvcc_path():
# Returns the path to the nvcc command or None if not found.
global _nvcc_path
if _nvcc_path == '':
_nvcc_path = _get_nvcc_path()
return _nvcc_path
def get_rocm_path():
# Returns the ROCm installation path or None if not found.
global _rocm_path
if _rocm_path == '':
_rocm_path = _get_rocm_path()
return _rocm_path
def get_hipcc_path():
# Returns the path to the hipcc command or None if not found.
global _hipcc_path
if _hipcc_path == '':
_hipcc_path = _get_hipcc_path()
return _hipcc_path
def get_cub_path():
# Returns the CUB header path or None if not found.
global _cub_path
if _cub_path == '':
_cub_path = _get_cub_path()
return _cub_path
def _get_cuda_path():
# Use environment variable
cuda_path = os.environ.get('CUDA_PATH', '') # Nvidia default on Windows
if os.path.exists(cuda_path):
return cuda_path
# Use nvcc path
nvcc_path = shutil.which('nvcc')
if nvcc_path is not None:
return os.path.dirname(os.path.dirname(nvcc_path))
# Use typical path
if os.path.exists('/usr/local/cuda'):
return '/usr/local/cuda'
return None
def _get_nvcc_path():
# Honor the "NVCC" env var
nvcc_path = os.environ.get('NVCC', None)
if nvcc_path is not None:
return nvcc_path
# Lookup <CUDA>/bin
cuda_path = get_cuda_path()
if cuda_path is None:
return None
return shutil.which('nvcc', path=os.path.join(cuda_path, 'bin'))
def _get_rocm_path():
# Use environment variable
rocm_path = os.environ.get('ROCM_HOME', '')
if os.path.exists(rocm_path):
return rocm_path
# Use hipcc path
hipcc_path = shutil.which('hipcc')
if hipcc_path is not None:
return os.path.dirname(os.path.dirname(hipcc_path))
# Use typical path
if os.path.exists('/opt/rocm'):
return '/opt/rocm'
return None
def _get_hipcc_path():
# TODO(leofang): Introduce an env var HIPCC?
# Lookup <ROCM>/bin
rocm_path = get_rocm_path()
if rocm_path is None:
return None
return shutil.which('hipcc', path=os.path.join(rocm_path, 'bin'))
def _get_cub_path():
# runtime discovery of CUB headers
from cupy_backends.cuda.api import runtime
current_dir = os.path.dirname(os.path.abspath(__file__))
if not runtime.is_hip:
cuda_path = get_cuda_path()
if os.path.isdir(os.path.join(current_dir, '_core/include/cupy/cub')):
_cub_path = '<bundle>'
elif cuda_path is not None and os.path.isdir(
os.path.join(cuda_path, 'include/cub')):
# use built-in CUB for CUDA 11+
_cub_path = '<CUDA>'
else:
_cub_path = None
else:
# the bundled CUB does not work in ROCm
rocm_path = get_rocm_path()
if rocm_path is not None and os.path.isdir(
os.path.join(rocm_path, 'include/hipcub')):
# use hipCUB
_cub_path = '<ROCm>'
else:
_cub_path = None
return _cub_path
def _setup_win32_dll_directory():
# Setup DLL directory to load CUDA Toolkit libs and shared libraries
# added during the build process.
if sys.platform.startswith('win32'):
is_conda = ((os.environ.get('CONDA_PREFIX') is not None)
or (os.environ.get('CONDA_BUILD_STATE') is not None))
# Path to the CUDA Toolkit binaries
cuda_path = get_cuda_path()
if cuda_path is not None:
if is_conda:
cuda_bin_path = cuda_path
else:
cuda_bin_path = os.path.join(cuda_path, 'bin')
else:
cuda_bin_path = None
warnings.warn(
'CUDA path could not be detected.'
' Set CUDA_PATH environment variable if CuPy fails to load.')
_log('CUDA_PATH: {}'.format(cuda_path))
# Path to shared libraries in wheel
wheel_libdir = os.path.join(
get_cupy_install_path(), 'cupy', '.data', 'lib')
if os.path.isdir(wheel_libdir):
_log('Wheel shared libraries: {}'.format(wheel_libdir))
else:
_log('Not wheel distribution ({} not found)'.format(
wheel_libdir))
wheel_libdir = None
if (3, 8) <= sys.version_info:
if cuda_bin_path is not None:
_log('Adding DLL search path: {}'.format(cuda_bin_path))
os.add_dll_directory(cuda_bin_path)
if wheel_libdir is not None:
_log('Adding DLL search path: {}'.format(wheel_libdir))
os.add_dll_directory(wheel_libdir)
else:
# Users are responsible for adding `%CUDA_PATH%/bin` to PATH.
if wheel_libdir is not None:
_log('Adding to PATH: {}'.format(wheel_libdir))
path = os.environ.get('PATH', '')
os.environ['PATH'] = wheel_libdir + os.pathsep + path
def get_cupy_install_path():
# Path to the directory where the package is installed.
return os.path.abspath(
os.path.join(os.path.dirname(__file__), '..'))
def get_cupy_cuda_lib_path():
"""Returns the directory where CUDA external libraries are installed.
This environment variable only affects wheel installations.
Shared libraries are looked up from
`$CUPY_CUDA_LIB_PATH/$CUDA_VER/$LIB_NAME/$LIB_VER/{lib,lib64,bin}`,
e.g., `~/.cupy/cuda_lib/11.2/cudnn/8.1.1/lib64/libcudnn.so.8.1.1`.
The default $CUPY_CUDA_LIB_PATH is `~/.cupy/cuda_lib`.
"""
cupy_cuda_lib_path = os.environ.get('CUPY_CUDA_LIB_PATH', None)
if cupy_cuda_lib_path is None:
return os.path.expanduser('~/.cupy/cuda_lib')
return os.path.abspath(cupy_cuda_lib_path)
def get_preload_config():
global _preload_config
if _preload_config is None:
config_path = os.path.join(
get_cupy_install_path(), 'cupy', '.data', '_wheel.json')
if not os.path.exists(config_path):
return None
with open(config_path) as f:
_preload_config = json.load(f)
return _preload_config
def _can_attempt_preload(lib: str) -> bool:
"""Returns if the preload can be attempted."""
config = get_preload_config()
if (config is None) or (config['packaging'] == 'conda'):
# We don't do preload if CuPy is installed from Conda-Forge, as we
# cannot guarantee the version pinned in _wheel.json, which is
# encoded in config[lib]['filenames'], is always available on
# Conda-Forge. See here for the configuration files used in
# Conda-Forge distributions.
# https://github.com/conda-forge/cupy-feedstock/blob/master/recipe/preload_config/
_log(f'Cannot preload {lib} as this is not a wheel installation')
return False
if lib not in _preload_libs:
raise AssertionError(f'Unknown preload library: {lib}')
if lib not in config:
_log(f'Preload {lib} not configured in wheel')
return False
if _preload_libs[lib] is not None:
_log(f'Preload already attempted: {lib}')
return False
return True
def _preload_library(lib):
"""Preload dependent shared libraries.
The preload configuration file (cupy/.data/_wheel.json) will be added
during the wheel build process.
"""
_log(f'Preloading triggered for library: {lib}')
if not _can_attempt_preload(lib):
return
_preload_libs[lib] = {}
config = get_preload_config()
cuda_version = config['cuda']
_log('CuPy wheel package built for CUDA {}'.format(cuda_version))
cupy_cuda_lib_path = get_cupy_cuda_lib_path()
_log('CuPy CUDA library directory: {}'.format(cupy_cuda_lib_path))
version = config[lib]['version']
filenames = config[lib]['filenames']
for filename in filenames:
_log(f'Looking for {lib} version {version} ({filename})')
# "lib": cuTENSOR (Linux/Windows) / NCCL (Linux)
# "lib64": cuDNN (Linux)
# "bin": cuDNN (Windows)
libpath_cands = [
os.path.join(
cupy_cuda_lib_path, config['cuda'], lib, version, x,
filename)
for x in ['lib', 'lib64', 'bin']]
for libpath in libpath_cands:
if not os.path.exists(libpath):
_log('Rejected candidate (not found): {}'.format(libpath))
continue
try:
_log(f'Trying to load {libpath}')
# Keep reference to the preloaded module.
_preload_libs[lib][libpath] = ctypes.CDLL(libpath)
_log('Loaded')
break
except Exception as e:
e_type = type(e).__name__ # NOQA
msg = (
f'CuPy failed to preload library ({libpath}): '
f'{e_type} ({e})')
_log(msg)
warnings.warn(msg)
else:
_log('File {} could not be found'.format(filename))
# Lookup library with fully-qualified version (e.g.,
# `libcudnn.so.X.Y.Z`).
_log(f'Trying to load {filename} from default search path')
try:
_preload_libs[lib][filename] = ctypes.CDLL(filename)
_log('Loaded')
except Exception as e:
# Fallback to the standard shared library lookup which only
# uses the major version (e.g., `libcudnn.so.X`).
_log(f'Library {lib} could not be preloaded: {e}')
def _get_preload_logs():
return '\n'.join(_preload_logs)
def _preload_warning(lib, exc):
config = get_preload_config()
if config is not None and lib in config:
msg = '''
{lib} library could not be loaded.
Reason: {exc_type} ({exc})
You can install the library by:
'''
if config['packaging'] == 'pip':
msg += '''
$ python -m cupyx.tools.install_library --library {lib} --cuda {cuda}
'''
elif config['packaging'] == 'conda':
msg += '''
$ conda install -c conda-forge {lib}
'''
else:
raise AssertionError
msg = msg.format(
lib=lib, exc_type=type(exc).__name__, exc=str(exc),
cuda=config['cuda'])
warnings.warn(msg)
def _detect_duplicate_installation():
# importlib.metadata only available in Python 3.8+.
if sys.version_info < (3, 8):
return
import importlib.metadata
# List of all CuPy packages, including out-dated ones.
known = [
'cupy',
'cupy-cuda80',
'cupy-cuda90',
'cupy-cuda91',
'cupy-cuda92',
'cupy-cuda100',
'cupy-cuda101',
'cupy-cuda102',
'cupy-cuda110',
'cupy-cuda111',
'cupy-cuda112',
'cupy-cuda113',
'cupy-cuda114',
'cupy-cuda115',
'cupy-cuda116',
'cupy-rocm-4-0',
'cupy-rocm-4-1',
'cupy-rocm-4-2',
'cupy-rocm-4-3',
]
cupy_installed = [
name for name in known
if list(importlib.metadata.distributions(name=name))]
if 1 < len(cupy_installed):
cupy_packages_list = ', '.join(sorted(cupy_installed))
warnings.warn(f'''
--------------------------------------------------------------------------------
CuPy may not function correctly because multiple CuPy packages are installed
in your environment:
{cupy_packages_list}
Follow these steps to resolve this issue:
1. For all packages listed above, run the following command to remove all
existing CuPy installations:
$ pip uninstall <package_name>
If you previously installed CuPy via conda, also run the following:
$ conda uninstall cupy
2. Install the appropriate CuPy package.
Refer to the Installation Guide for detailed instructions.
https://docs.cupy.dev/en/stable/install.html
--------------------------------------------------------------------------------
''')
def _diagnose_import_error() -> str:
# TODO(kmaehashi): provide better diagnostics.
return '''\
Failed to import CuPy.
If you installed CuPy via wheels (cupy-cudaXXX or cupy-rocm-X-X), make sure that the package matches with the version of CUDA or ROCm installed.
On Linux, you may need to set LD_LIBRARY_PATH environment variable depending on how you installed CUDA/ROCm.
On Windows, try setting CUDA_PATH environment variable.
Check the Installation Guide for details:
https://docs.cupy.dev/en/latest/install.html''' # NOQA
|
py | 1a39c85137db8fcb8238a5f8773661967bcc8560 | import string
BASE_CHARACTERS = string.ascii_letters + string.digits
SAFE_CHARACTERS = frozenset(BASE_CHARACTERS + '-_.')
KEY_LENGTH = (2, 128)
NONCE_LENGTH = (6, 128)
SECRET_LENGTH = (6, 128)
default_app_config = 'django_lti_login.apps.DjangoLTILoginConfig'
|
py | 1a39c870144b319795498b06dab9a3ec599ed73a | # -*- coding: utf-8 -*-
import numpy as np
import os
from VDE.VASPMoleculeFeature import VASP_DataExtract
import pickle
from dlmep.DatasetOffer import print_file
class DatasetMaker(object):
def __init__(self,dir_list):
if not isinstance(dir_list,list):
dir_list = [dir_list]
self.in_dir = dir_list
self.vasp_dirs = []
for i in self.in_dir:
self.vasp_dirs.extend(self.get_vasp_dirs(i))
print("Get total %s vasp dirs" % len(self.vasp_dirs))
if len(self.vasp_dirs) == 0:
raise ValueError("No vasp dirs Available")
self.total_info = {}
self.total_info["instance"] = "DatasetMaker"
self.atom_cases = set([])
for i in self.vasp_dirs:
self.total_info[i] = {}
self.total_info[i]["generated"] = 0
self.b_make_dataset = 0
def make_dataset(self):
t = len(self.vasp_dirs)
for i in range(t):
print_file("Process For generating dataset: %s / %s"%(i, t))
#print("Process for %s" % self.vasp_dirs[i])
self.__make_one_dataset(self.vasp_dirs[i])
self.b_make_dataset = 1
def save_dataset(self,pkl_path):
if self.b_make_dataset == 0:
raise ValueError("make dataset before save dataset!")
if os.path.isdir(pkl_path):
pkl_path += "/atom_dataset.pkl"
if not pkl_path.endswith(".pkl"):
pkl_path += '.pkl'
with open(pkl_path, "wb") as f:
pickle.dump(self.total_info,f)
def give_out_dataset(self):
if self.b_make_dataset == 0:
raise ValueError("make dataset before save dataset!")
return self.total_info
def __make_one_dataset(self,vasp_dir):
test = VASP_DataExtract(vasp_dir=vasp_dir)
test.get_atom_and_position_info()
a = test.get_output_as_atom3Dspace()
if len(a.atoms_pos_info) <=4: # 如果样本不够,这是很可能出现的
print_file("No enough samples for %s, which have %s." % (vasp_dir, len(a.atoms_pos_info)))
del self.total_info[vasp_dir]
return
print_file("vasp_dir %s have sample %s" % (vasp_dir, len(a.atoms_pos_info)))
self.total_info[vasp_dir]["generated"] = 1
# 这里的x y不是坐标而是坐标x和能量y
self.total_info[vasp_dir]['x'], self.total_info[vasp_dir]['y'], atom_cases = a.generate_data()
self.atom_cases = self.atom_cases.union(atom_cases)
print("AtomCases",self.atom_cases)
self.total_info[vasp_dir]['atom_cases'] = self.atom_cases
# 这里要以一系列数据集为核心建立模型,包含所有的原子
def get_vasp_dirs(self,dir):
files = os.walk(dir)
vasp_dir = []
for i in files:
if "OUTCAR" in i[2]:
vasp_dir.append(i[0])
return vasp_dir
if __name__ == '__main__':
aim_vasp_path = "C:\\Users\wang\Desktop\运行结果\嵌套运行结果\interm\Pt\AllSurfaceG1"
temp = DatasetMaker(aim_vasp_path)
temp.make_dataset()
print(temp.total_info)
#temp.save_dataset("C:\\Users\wang\Desktop\运行结果")
# 现在的问题是,OUT.ANI有2倍的坐标数据于能量数据: 最终选择:直接从OUTCAR中提取坐标,寻找以前有的代码
# TODO 数据集需要规定原子种类,如果有没有的原子,它的结果不应该增加到能量中,因为有bias的存在,所以不能用feature为0000来进行 |
py | 1a39c8c0ea3ed3903fec39da50c5a6d959394b4d | # Copyright 2017, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from opencensus.trace import execution_context
# By default the blacklist urls are not tracing, currently just include the
# health check url. The paths are literal string matched instead of regular
# expressions. Do not include the '/' at the beginning of the path.
DEFAULT_BLACKLIST_PATHS = [
'_ah/health',
]
# Pattern for matching the 'https://', 'http://', 'ftp://' part.
URL_PATTERN = '^(https?|ftp):\\/\\/'
def get_func_name(func):
"""Return a name which includes the module name and function name."""
func_name = getattr(func, '__name__', func.__class__.__name__)
module_name = func.__module__
if module_name is not None:
module_name = func.__module__
return '{}.{}'.format(module_name, func_name)
return func_name
def disable_tracing_url(url, blacklist_paths=None):
"""Disable tracing on the provided blacklist paths, by default not tracing
the health check request.
If the url path starts with the blacklisted path, return True.
:type blacklist_paths: list
:param blacklist_paths: Paths that not tracing.
:rtype: bool
:returns: True if not tracing, False if tracing.
"""
if blacklist_paths is None:
blacklist_paths = DEFAULT_BLACKLIST_PATHS
# Remove the 'https?|ftp://' if exists
url = re.sub(URL_PATTERN, '', url)
# Split the url by the first '/' and get the path part
url_path = url.split('/', 1)[1]
for path in blacklist_paths:
if url_path.startswith(path):
return True
return False
def disable_tracing_hostname(url, blacklist_hostnames=None):
"""Disable tracing for the provided blacklist URLs, by default not tracing
the exporter url.
If the url path starts with the blacklisted path, return True.
:type blacklist_hostnames: list
:param blacklist_hostnames: URL that not tracing.
:rtype: bool
:returns: True if not tracing, False if tracing.
"""
if blacklist_hostnames is None:
# Exporter host_name are not traced by default
_tracer = execution_context.get_opencensus_tracer()
try:
blacklist_hostnames = [
'{}:{}'.format(
_tracer.exporter.host_name,
_tracer.exporter.port
)
]
except(AttributeError):
blacklist_hostnames = []
return url in blacklist_hostnames
|
py | 1a39c8c8cc02a6b488e22677a9d48356ff5dfc36 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Downloads and converts Flowers data to TFRecords of TF-Example protos.
This module downloads the Flowers data, uncompresses it, reads the files
that make up the Flowers data and creates two TFRecord datasets: one for train
and one for test. Each TFRecord dataset is comprised of a set of TF-Example
protocol buffers, each of which contain a single image and label.
The script should take about a minute to run.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import random
import sys
from shutil import copyfile
import tensorflow as tf
from datasets import dataset_utils
# The URL where the Flowers data can be downloaded.
_DATA_URL = 'http://download.tensorflow.org/example_images/flower_photos.tgz'
# The number of images in the validation set.
_NUM_VALIDATION = 130
# Seed for repeatability.
_RANDOM_SEED = 0
# The number of shards per dataset split.
_NUM_SHARDS = 5
class ImageReader(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Initializes function that decodes RGB PNG data.
self._decode_png_data = tf.placeholder(dtype=tf.string)
self._decode_png = tf.image.decode_png(self._decode_png_data, channels=3)
def read_image_dims(self, sess, image_data):
image = self.decode_png(sess, image_data)
return image.shape[0], image.shape[1]
def decode_png(self, sess, image_data):
image = sess.run(self._decode_png,
feed_dict={self._decode_png_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def copy_evaluting_images_file(filename_list, dataset_dir):
wd_snapshot_root = os.path.join(dataset_dir, 'wd_snapshot_photo')
evaluting_dir = os.path.join(wd_snapshot_root, 'evaluting_dir')
if not tf.gfile.Exists(evaluting_dir):
tf.gfile.MakeDirs(evaluting_dir)
for filename in filename_list:
_str_basename = os.path.basename(filename)
_dst_dir_file = os.path.join(evaluting_dir, _str_basename)
copyfile(filename, _dst_dir_file)
def _get_filenames_and_classes(dataset_dir):
"""Returns a list of filenames and inferred class names.
Args:
dataset_dir: A directory containing a set of subdirectories representing
class names. Each subdirectory should contain PNG or JPG encoded images.
Returns:
A list of image file paths, relative to `dataset_dir` and the list of
subdirectories, representing class names.
"""
wd_snapshot_root = os.path.join(dataset_dir, 'wd_snapshot_photo')
directories = []
class_names = []
for filename in os.listdir(wd_snapshot_root):
if filename == "evaluting_dir" or filename == "bad_case_dir":
continue
path = os.path.join(wd_snapshot_root, filename)
if os.path.isdir(path):
directories.append(path)
class_names.append(filename)
photo_filenames = []
for directory in directories:
for filename in os.listdir(directory):
path = os.path.join(directory, filename)
photo_filenames.append(path)
return photo_filenames, sorted(class_names)
def _get_dataset_filename(dataset_dir, split_name, shard_id):
output_filename = 'wdsnapshot_%s_%05d-of-%05d.tfrecord' % (
split_name, shard_id, _NUM_SHARDS)
return os.path.join(dataset_dir, output_filename)
def _convert_dataset(split_name, filenames, class_names_to_ids, dataset_dir):
"""Converts the given filenames to a TFRecord dataset.
Args:
split_name: The name of the dataset, either 'train' or 'validation'.
filenames: A list of absolute paths to png or jpg images.
class_names_to_ids: A dictionary from class names (strings) to ids
(integers).
dataset_dir: The directory where the converted datasets are stored.
"""
assert split_name in ['train', 'validation']
num_per_shard = int(math.ceil(len(filenames) / float(_NUM_SHARDS)))
with tf.Graph().as_default():
image_reader = ImageReader()
with tf.Session('') as sess:
for shard_id in range(_NUM_SHARDS):
output_filename = _get_dataset_filename(
dataset_dir, split_name, shard_id)
with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer:
start_ndx = shard_id * num_per_shard
end_ndx = min((shard_id+1) * num_per_shard, len(filenames))
for i in range(start_ndx, end_ndx):
sys.stdout.write('\r>> Converting image %d/%d shard %d' % (
i+1, len(filenames), shard_id))
sys.stdout.flush()
# Read the filename:
image_data = tf.gfile.FastGFile(filenames[i], 'rb').read()
height, width = image_reader.read_image_dims(sess, image_data)
class_name = os.path.basename(os.path.dirname(filenames[i]))
class_id = class_names_to_ids[class_name]
example = dataset_utils.image_to_tfexample(
image_data, b'png', height, width, class_id)
tfrecord_writer.write(example.SerializeToString())
sys.stdout.write('\n')
sys.stdout.flush()
def _clean_up_temporary_files(dataset_dir):
"""Removes temporary files used to create the dataset.
Args:
dataset_dir: The directory where the temporary files are stored.
"""
filename = _DATA_URL.split('/')[-1]
filepath = os.path.join(dataset_dir, filename)
tf.gfile.Remove(filepath)
tmp_dir = os.path.join(dataset_dir, 'flower_photos')
tf.gfile.DeleteRecursively(tmp_dir)
def _dataset_exists(dataset_dir):
for split_name in ['train', 'validation']:
for shard_id in range(_NUM_SHARDS):
output_filename = _get_dataset_filename(
dataset_dir, split_name, shard_id)
if not tf.gfile.Exists(output_filename):
return False
return True
def run(dataset_dir):
"""Runs the conversion operation.
Args:
dataset_dir: The dataset directory where the dataset is stored.
"""
if not tf.gfile.Exists(dataset_dir):
tf.gfile.MakeDirs(dataset_dir)
if _dataset_exists(dataset_dir):
print('Dataset files already exist. Exiting without re-creating them.')
return
#dataset_utils.download_and_uncompress_tarball(_DATA_URL, dataset_dir)
photo_filenames, class_names = _get_filenames_and_classes(dataset_dir)
class_names_to_ids = dict(zip(class_names, range(len(class_names))))
# Divide into train and test:
random.seed(_RANDOM_SEED)
random.shuffle(photo_filenames)
_validation_num = int(len(photo_filenames)/5)
training_filenames = photo_filenames[_validation_num:]
validation_filenames = photo_filenames[:_validation_num]
copy_evaluting_images_file(validation_filenames, dataset_dir)
# First, convert the training and validation sets.
_convert_dataset('train', training_filenames, class_names_to_ids,
dataset_dir)
_convert_dataset('validation', validation_filenames, class_names_to_ids,
dataset_dir)
# Finally, write the labels file:
labels_to_class_names = dict(zip(range(len(class_names)), class_names))
dataset_utils.write_label_file(labels_to_class_names, dataset_dir)
#_clean_up_temporary_files(dataset_dir)
print('\nFinished converting the wd_snapshot dataset!')
|
py | 1a39c9ac5176628766d30b49f7ec71fb7a6f70fc | from django.db import migrations
from job_board.models.job_type import JobType
from job_board.resources.job_types import JOB_TYPES
class Migration(migrations.Migration):
dependencies = [
('job_board', '0001_initial'),
]
def generate_jobType_data(apps, schema_editor):
for jobType in JOB_TYPES:
JobType(text=jobType).save()
operations = [
migrations.RunPython(generate_jobType_data),
]
|
py | 1a39ca412a1faf9a8cefb1de0db66c33ed9dc27e | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ['build_head']
def build_head(config):
# det head
from .det_db_head import DBHead
from .det_east_head import EASTHead
from .det_sast_head import SASTHead
# rec head
from .rec_ctc_head import CTCHead
from .rec_srn_head import SRNHead
# cls head
from .cls_head import ClsHead
support_dict = [
'DBHead', 'EASTHead', 'SASTHead', 'CTCHead', 'ClsHead', 'SRNHead'
]
module_name = config.pop('name')
assert module_name in support_dict, Exception('head only support {}'.format(
support_dict))
module_class = eval(module_name)(**config)
return module_class
|
py | 1a39cab971023f5e46f7aed15bf70ac8497d74e9 | from __future__ import division
from itertools import product, groupby
import numpy as np
import logging
from scipy.special._ufuncs import erfc
from statsmodels.stats.multitest import fdrcorrection
logging.basicConfig(level=logging.DEBUG)
def timelag_by_for_loop (timeseries1, timeseries2):
"""Returns for each event in the first time series the time lags for the event in the second time series
that precedes, succeeds. Both time series must be sorted in increasing values."""
preceding_time_lags = []
succeeding_time_lags = []
for time1 in timeseries1:
preceding_time_lags.append(next((time2 - time1 for time2 in reversed(timeseries2) if time2 < time1), []))
succeeding_time_lags.append(next((time2 - time1 for time2 in timeseries2 if time2 > time1), []))
return np.sort(np.hstack(preceding_time_lags + succeeding_time_lags))
def sawtooth(timeseries, dtype = np.float32):
"""Sawtooth function expressing the time lag to the next event in the timeseries."""
epsilon = np.finfo(dtype).eps
gaps = np.diff(timeseries)
x = np.column_stack((timeseries[0:-1], timeseries[1:] - epsilon)).flatten()
y = np.column_stack((gaps, np.zeros_like(gaps))).flatten()
return [x, y]
def timelag_by_sawtooth (timeseries1, timeseries2):
"""Returns for each event in the first time series the time lags for the event in the second time series
that precedes, succeeds. Both time series must be sorted in increasing values. Faster than timelag_by_for_loop."""
try:
preceding_time_lags = - np.interp(np.flipud(-timeseries1), *sawtooth(-np.flipud(timeseries2)), left=np.nan, right=np.nan)
except ValueError:
preceding_time_lags = []
try:
succeeding_time_lags = np.interp(timeseries1, *sawtooth(timeseries2), left=np.nan, right=np.nan)
except ValueError:
succeeding_time_lags = []
time_lags = np.sort(np.hstack([preceding_time_lags, succeeding_time_lags]))
valid_time_lags = (np.ma.fix_invalid(time_lags))
return np.ma.compressed(valid_time_lags)
timelag = timelag_by_sawtooth
def timelag_hist (timelags, min_timelag=-0.005, max_timelag=0.005, bin_n=100):
bins = np.linspace(min_timelag, max_timelag, bin_n + 1, endpoint=True)
return np.histogram(timelags, bins=bins)
def swap_intervals (timeseries, indicies):
"""Swap intervals between adjacent intervals indicated by indicies"""
intervals = np.diff(timeseries)
for index in indicies:
intervals[index], intervals[index+1] = intervals[index+1], intervals[index]
return np.hstack([timeseries[0], timeseries[0]+np.cumsum(intervals)])
def randomize_intervals_by_swapping (timeseries, factor):
"""Randomize timeseries by randomly swapping adjacent intervals, total factor times the length of timeseries"""
length = len(timeseries)-1
times = round(factor*length,0)
indicies = np.random.randint(0,length-1,int(times))
return swap_intervals(timeseries,indicies)
def randomize_intervals_by_gaussian (timeseries, factor):
"""Randomize timeseries by assuming indicies make a random walk with (+factor,-factor) of equal probability.
Much faster than randomize_intervals_by_swapping."""
gaps = np.diff(timeseries)
length = len(gaps)
new_positions = range(length) + np.random.normal(0, factor, length)
index = np.argsort(new_positions)
return timeseries[0] + np.hstack((0,np.cumsum(gaps[index])))
randomize_intervals = randomize_intervals_by_gaussian
def surrogate_timeseries (timeseries, n=10, factor=2):
return [randomize_intervals(timeseries,factor=factor) for i in range(n)]
def timelag_standardscore(timeseries1, timeseries2, surrogates):
"""Returns timelags (midpoints of bins) and standard score as well as the counts from the orginal timeseries
and mean and standard deviation for the counts from surrogate timeseries"""
timeseries_hist, bins = timelag_hist(timelag(timeseries1, timeseries2))
timelags = (bins[:-1] + bins[1:])/2 * 1000 # ms
surrogates_hist = np.vstack([timelag_hist(timelag(timeseries1, surrogate))[0] for surrogate in surrogates])
surrogates_mean = surrogates_hist.mean(0)
surrogates_std = np.std(surrogates_hist, 0)
try: std_score = (timeseries_hist - surrogates_mean) / surrogates_std
except ZeroDivisionError: pass
return timelags, std_score, timeseries_hist, surrogates_mean, surrogates_std
def timeseries_to_surrogates(timeseries, n=10, factor=2):
"""Generating surrogate timeseries (this can take a while)"""
timeseries_surrogates = dict([(key, surrogate_timeseries(timeseries[key], n=n, factor=factor)) for key in timeseries])
return timeseries_surrogates
def all_timelag_standardscore (timeseries, timeseries_surrogates):
"""Compute standardscore time histograms"""
all_std_score = []
all_timeseries_hist = []
for pair in product(timeseries, repeat=2):
timelags, std_score, timeseries_hist,surrogates_mean, surrogates_std \
= timelag_standardscore(timeseries[pair[0]], timeseries[pair[1]], timeseries_surrogates[pair[1]])
logging.info ( "Timeseries %d->%d" % pair )
all_std_score.append((pair, std_score))
all_timeseries_hist.append((pair, timeseries_hist))
# if logging.getLogger().getEffectiveLevel()==logging.DEBUG:
# plot_pair_func(timelags, timeseries_hist, surrogates_mean, surrogates_std, std_score,
# "Timeseries %d->%d" % pair)
# plt.show()
return timelags, dict(all_std_score), dict(all_timeseries_hist)
def all_peaks (timelags, std_score_dict, structural_delay_dict=None, minimal_synapse_delay=0):
"""
Return the largest standard score peak for each functional connection, rejecting false positives.
Implemented is the forward direction, that is looking for peaks at post-synaptic time lags
After converting z values into p values by p = erfc(z/sqrt(2), the Benjamini-Hochberg procedure is applied to
control the false discovery rate in the multiple comparisons with a false discover rat fixed at one in all
comparisons. (alpha = 1/number of standard scores)
If provided only timelags larger than the sum of axonal and synaptic delay are considered, but the returned
time lags correspond to the response times to the presynaptic spikes that exclude the axonal delays. This implies
that only structural connected neuron pairs are tested.
:param timelags: array with time lags for standard scores
:param std_score_dict: standard scores indexed by neuron pair
:param structural_delay_dict: (optional) axonal delays indexed by neuron pair
:param minimal_synapse_delay: (optional) time lag must be larger than this synapse delay (and axonal delay)
:return: all_score_max: standard score index py neuron pair
all_timelag_max: time lags indexed by neuron pair
z_thr: threshold for standard score
"""
# TODO Implement reverse directions, that is looking for peaks at pre-synaptic spike time lags
# TODO Implement detection of negative peaks (inhibitory connections)
if structural_delay_dict is None:
pairs = std_score_dict
offset = lambda pair: 0
else: # consider axonal delays
pairs = structural_delay_dict
offset = lambda pair: structural_delay_dict[pair]
# first, collect all z values and determine threshold
z_values = list()
for pair in pairs:
use = timelags > offset(pair)
if pair in std_score_dict:
std_score = std_score_dict[pair]
z_values += list(std_score[use])
z_thr = BH_threshold(z_values)
# second, determine peak z value and check if above threshold
all_score_max, all_timelag_max = [], []
for pair in pairs:
use = timelags > offset(pair) + minimal_synapse_delay
if pair in std_score_dict:
std_score = std_score_dict[pair]
try:
index_max = np.argmax(std_score[use])
timelag_max = timelags[use][index_max]
score_max = std_score[use][index_max]
except ValueError: # ValueError: attempt to get argmax of an empty sequence
score_max = 0
if score_max > z_thr: # looking at positive peaks only
all_score_max.append((pair, score_max))
all_timelag_max.append((pair, timelag_max))
logging.info(("Timeseries %d->%d" % pair) +
(": max z = %f at %f s" % (score_max, timelag_max)))
else:
logging.info(("Timeseries %d->%d" % pair) + ': no peak (above threshold)')
logging.info('FDR correction %d --> %d with z>%f' % (len(std_score_dict), len(all_score_max), z_thr))
return dict(all_score_max), dict(all_timelag_max), z_thr
def BH_threshold(z_values):
"""
Threshold for standard scores by the Benjamini-Hochberg procedure.
:param z_values: standard scores
:return: z_threshold: for absolute value of the standard scores, that is abs(z)>z_threshold
"""
abs_z_values = np.abs(z_values)
p_values = erfc(abs_z_values / np.sqrt(2))
FDR = 1 / p_values.size
rejected, p_values_corrected = fdrcorrection(p_values, alpha=FDR, method='indep', is_sorted=False)
z_thr = min(abs_z_values[rejected == True])
return z_thr
|
py | 1a39cb1d6270f508a051be9d5321165cfb63425a | # coding: utf-8
"""
Control-M Services
Provides access to BMC Control-M Services # noqa: E501
OpenAPI spec version: 9.20.215
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from ctm_api_client.configuration import Configuration
class EmOrderFolderParameters(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
"annotation_details": "AnnotationDetails",
"ctm_name": "str",
"em_order_folder_parameters": "list[EmOrderFolder]",
}
attribute_map = {
"annotation_details": "annotation_details",
"ctm_name": "ctm_name",
"em_order_folder_parameters": "em_order_folder_parameters",
}
def __init__(
self,
annotation_details=None,
ctm_name=None,
em_order_folder_parameters=None,
_configuration=None,
): # noqa: E501
"""EmOrderFolderParameters - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._annotation_details = None
self._ctm_name = None
self._em_order_folder_parameters = None
self.discriminator = None
if annotation_details is not None:
self.annotation_details = annotation_details
if ctm_name is not None:
self.ctm_name = ctm_name
if em_order_folder_parameters is not None:
self.em_order_folder_parameters = em_order_folder_parameters
@property
def annotation_details(self):
"""Gets the annotation_details of this EmOrderFolderParameters. # noqa: E501
:return: The annotation_details of this EmOrderFolderParameters. # noqa: E501
:rtype: AnnotationDetails
"""
return self._annotation_details
@annotation_details.setter
def annotation_details(self, annotation_details):
"""Sets the annotation_details of this EmOrderFolderParameters.
:param annotation_details: The annotation_details of this EmOrderFolderParameters. # noqa: E501
:type: AnnotationDetails
"""
self._annotation_details = annotation_details
@property
def ctm_name(self):
"""Gets the ctm_name of this EmOrderFolderParameters. # noqa: E501
:return: The ctm_name of this EmOrderFolderParameters. # noqa: E501
:rtype: str
"""
return self._ctm_name
@ctm_name.setter
def ctm_name(self, ctm_name):
"""Sets the ctm_name of this EmOrderFolderParameters.
:param ctm_name: The ctm_name of this EmOrderFolderParameters. # noqa: E501
:type: str
"""
self._ctm_name = ctm_name
@property
def em_order_folder_parameters(self):
"""Gets the em_order_folder_parameters of this EmOrderFolderParameters. # noqa: E501
:return: The em_order_folder_parameters of this EmOrderFolderParameters. # noqa: E501
:rtype: list[EmOrderFolder]
"""
return self._em_order_folder_parameters
@em_order_folder_parameters.setter
def em_order_folder_parameters(self, em_order_folder_parameters):
"""Sets the em_order_folder_parameters of this EmOrderFolderParameters.
:param em_order_folder_parameters: The em_order_folder_parameters of this EmOrderFolderParameters. # noqa: E501
:type: list[EmOrderFolder]
"""
self._em_order_folder_parameters = em_order_folder_parameters
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
if issubclass(EmOrderFolderParameters, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, EmOrderFolderParameters):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, EmOrderFolderParameters):
return True
return self.to_dict() != other.to_dict()
|
py | 1a39cb66fde2cc5765b028d9875f1c2f36438d33 | # Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Marker classes for indicating which additional features gates support.
For example: some gates are reversible, some have known matrices, etc.
"""
from typing import (
Any, Dict, Optional, Sequence, Tuple, Iterable, TypeVar, Union,
)
import string
from cirq import abc, value
from cirq.ops import op_tree, raw_types
from cirq.study import ParamResolver
class InterchangeableQubitsGate(metaclass=abc.ABCMeta):
"""Indicates operations should be equal under some qubit permutations."""
def qubit_index_to_equivalence_group_key(self, index: int) -> int:
"""Returns a key that differs between non-interchangeable qubits."""
return 0
class ReversibleEffect(metaclass=abc.ABCMeta):
"""A gate whose effect can be undone in a known way."""
@abc.abstractmethod
def inverse(self) -> 'ReversibleEffect':
"""Returns a gate with an exactly opposite effect."""
TSelf_ExtrapolatableEffect = TypeVar('TSelf_ExtrapolatableEffect',
bound='ExtrapolatableEffect')
class ExtrapolatableEffect(ReversibleEffect,
metaclass=abc.ABCMeta):
"""A gate whose effect can be continuously scaled up/down/negated."""
@abc.abstractmethod
def extrapolate_effect(self: TSelf_ExtrapolatableEffect,
factor: Union[float, value.Symbol]
) -> TSelf_ExtrapolatableEffect:
"""Augments, diminishes, or reverses the effect of the receiving gate.
Args:
factor: The amount to scale the gate's effect by.
Returns:
A gate equivalent to applying the receiving gate 'factor' times.
"""
def __pow__(self: TSelf_ExtrapolatableEffect,
power: Union[float, value.Symbol]
) -> TSelf_ExtrapolatableEffect:
"""Extrapolates the effect of the gate.
Note that there are cases where (G**a)**b != G**(a*b). For example,
start with a 90 degree rotation then cube it then raise it to a
non-integer power such as 3/2. Assuming that rotations are always
normalized into the range (-180, 180], note that:
((rot 90)**3)**1.5 = (rot 270)**1.5 = (rot -90)**1.5 = rot -135
but
(rot 90)**(3*1.5) = (rot 90)**4.5 = rot 405 = rot 35
Because normalization discards the winding number.
Args:
power: The extrapolation factor.
Returns:
A gate with the extrapolated effect.
"""
return self.extrapolate_effect(power)
def inverse(self: TSelf_ExtrapolatableEffect) -> TSelf_ExtrapolatableEffect:
return self.extrapolate_effect(-1)
class CompositeOperation(metaclass=abc.ABCMeta):
"""An operation with a known decomposition into simpler operations."""
@abc.abstractmethod
def default_decompose(self) -> op_tree.OP_TREE:
"""Yields simpler operations for performing the receiving operation."""
class CompositeGate(metaclass=abc.ABCMeta):
"""A gate with a known decomposition into simpler gates."""
@abc.abstractmethod
def default_decompose(
self, qubits: Sequence[raw_types.QubitId]) -> op_tree.OP_TREE:
"""Yields operations for performing this gate on the given qubits.
Args:
qubits: The qubits the gate should be applied to.
"""
class TextDiagramInfoArgs:
"""
Attributes:
known_qubits: The qubits the gate is being applied to. None means this
information is not known by the caller.
known_qubit_count: The number of qubits the gate is being applied to
None means this information is not known by the caller.
use_unicode_characters: If true, the wire symbols are permitted to
include unicode characters (as long as they work well in fixed
width fonts). If false, use only ascii characters. ASCII is
preferred in cases where UTF8 support is done poorly, or where
the fixed-width font being used to show the diagrams does not
properly handle unicode characters.
precision: The number of digits after the decimal to show for numbers in
the text diagram. None means use full precision.
qubit_map: The map from qubits to diagram positions.
"""
UNINFORMED_DEFAULT = None # type: TextDiagramInfoArgs
def __init__(self,
known_qubits: Optional[Tuple[raw_types.QubitId, ...]],
known_qubit_count: Optional[int],
use_unicode_characters: bool,
precision: Optional[int],
qubit_map: Optional[Dict[raw_types.QubitId, int]]) -> None:
self.known_qubits = known_qubits
self.known_qubit_count = known_qubit_count
self.use_unicode_characters = use_unicode_characters
self.precision = precision
self.qubit_map = qubit_map
TextDiagramInfoArgs.UNINFORMED_DEFAULT = TextDiagramInfoArgs(
known_qubits=None,
known_qubit_count=None,
use_unicode_characters=True,
precision=3,
qubit_map=None)
class TextDiagramInfo:
def __init__(self,
wire_symbols: Tuple[str, ...],
exponent: Any = 1,
connected: bool = True) -> None:
"""
Args:
wire_symbols: The symbols that should be shown on the qubits
affected by this operation. Must match the number of qubits that
the operation is applied to.
exponent: An optional convenience value that will be appended onto
an operation's final gate symbol with a caret in front
(unless it's equal to 1). For example, the square root of X gate
has a text diagram exponent of 0.5 and symbol of 'X' so it is
drawn as 'X^0.5'.
connected: Whether or not to draw a line connecting the qubits.
"""
self.wire_symbols = wire_symbols
self.exponent = exponent
self.connected = connected
def _eq_tuple(self):
return (TextDiagramInfo, self.wire_symbols,
self.exponent, self.connected)
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return self._eq_tuple() == other._eq_tuple()
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self._eq_tuple())
def __repr__(self):
return ('cirq.TextDiagramInfo(' +
'wire_symbols={!r}, '.format(self.wire_symbols) +
'exponent={!r}, '.format(self.exponent) +
'connected={!r})'.format(self.connected)
)
class TextDiagrammable(metaclass=abc.ABCMeta):
"""A thing which can be printed in a text diagram."""
@abc.abstractmethod
def text_diagram_info(self, args: TextDiagramInfoArgs) -> TextDiagramInfo:
"""Describes how to draw something in a text diagram.
Args:
args: A TextDiagramInfoArgs instance encapsulating various pieces of
information (e.g. how many qubits are we being applied to) as
well as user options (e.g. whether to avoid unicode characters).
Returns:
A TextDiagramInfo instance describing what to print.
"""
TSelf_PhaseableEffect = TypeVar('TSelf_PhaseableEffect',
bound='PhaseableEffect')
class PhaseableEffect(metaclass=abc.ABCMeta):
"""An effect that can be phased around the Z axis of target qubits."""
@abc.abstractmethod
def phase_by(self: TSelf_PhaseableEffect,
phase_turns: float,
qubit_index: int) -> TSelf_PhaseableEffect:
"""Returns a phased version of the effect.
For example, an X gate phased by 90 degrees would be a Y gate.
Args:
phase_turns: The amount to phase the gate, in fractions of a whole
turn.
qubit_index: The index of the target qubit the phasing applies to.
Returns:
The phased gate or operation.
"""
class BoundedEffect(metaclass=abc.ABCMeta):
"""An effect with known bounds on how easy it is to detect.
Used when deciding whether or not an operation is negligible. For example,
the trace distance between the states before and after a Z**0.00000001
operation is very close to 0, so it would typically be considered
negligible.
"""
@abc.abstractmethod
def trace_distance_bound(self) -> float:
"""A maximum on the trace distance between this effect's input/output.
Generally this method is used when deciding whether to keep gates, so
only the behavior near 0 is important. Approximations that overestimate
the maximum trace distance are permitted. Even ones that exceed 1.
Underestimates are not permitted.
"""
class SingleQubitGate(raw_types.Gate, metaclass=abc.ABCMeta):
"""A gate that must be applied to exactly one qubit."""
def validate_args(self, qubits):
if len(qubits) != 1:
raise ValueError(
'Single-qubit gate applied to multiple qubits: {}({})'.
format(self, qubits))
def on_each(self, targets: Iterable[raw_types.QubitId]) -> op_tree.OP_TREE:
"""Returns a list of operations apply this gate to each of the targets.
Args:
targets: The qubits to apply this gate to.
Returns:
Operations applying this gate to the target qubits.
"""
return [self.on(target) for target in targets]
class TwoQubitGate(raw_types.Gate, metaclass=abc.ABCMeta):
"""A gate that must be applied to exactly two qubits."""
def validate_args(self, qubits):
if len(qubits) != 2:
raise ValueError(
'Two-qubit gate not applied to two qubits: {}({})'.
format(self, qubits))
class ThreeQubitGate(raw_types.Gate, metaclass=abc.ABCMeta):
"""A gate that must be applied to exactly three qubits."""
def validate_args(self, qubits):
if len(qubits) != 3:
raise ValueError(
'Three-qubit gate not applied to three qubits: {}({})'.
format(self, qubits))
TSelf_ParameterizableEffect = TypeVar('TSelf_ParameterizableEffect',
bound='ParameterizableEffect')
class ParameterizableEffect(metaclass=abc.ABCMeta):
"""An effect that can be parameterized by Symbols."""
@abc.abstractmethod
def is_parameterized(self) -> bool:
"""Whether the effect is parameterized.
Returns True if the gate has any unresolved Symbols and False otherwise.
"""
@abc.abstractmethod
def with_parameters_resolved_by(self: TSelf_ParameterizableEffect,
param_resolver: ParamResolver
) -> TSelf_ParameterizableEffect:
"""Resolve the parameters in the effect.
Returns a gate or operation of the same type, but with all Symbols
replaced with floats according to the given ParamResolver.
"""
class QasmOutputArgs(string.Formatter):
"""
Attributes:
precision: The number of digits after the decimal to show for numbers in
the text diagram.
version: The QASM version to output. QasmConvertibleGate/Operation may
return different text depending on version.
qubit_id_map: A dictionary mapping qubits to qreg QASM identifiers.
meas_key_id_map: A dictionary mapping measurement keys to creg QASM
identifiers.
"""
def __init__(self,
precision: int = 10,
version: str = '2.0',
qubit_id_map: Dict[raw_types.QubitId, str] = None,
meas_key_id_map: Dict[str, str] = None,
) -> None:
self.precision = precision
self.version = version
self.qubit_id_map = {} if qubit_id_map is None else qubit_id_map
self.meas_key_id_map = ({} if meas_key_id_map is None
else meas_key_id_map)
def format_field(self, value: Any, spec: str) -> str:
"""Method of string.Formatter that specifies the output of format()."""
if isinstance(value, float):
value = round(value, self.precision)
if spec == 'half_turns':
value = 'pi*{}'.format(value) if value != 0 else '0'
spec = ''
elif isinstance(value, raw_types.QubitId):
value = self.qubit_id_map[value]
elif isinstance(value, str) and spec == 'meas':
value = self.meas_key_id_map[value]
spec = ''
return super().format_field(value, spec)
def validate_version(self, *supported_versions: str) -> None:
if self.version not in supported_versions:
raise ValueError('QASM version {} output is not supported.'.format(
self.version))
class QasmConvertibleGate(metaclass=abc.ABCMeta):
"""A gate that knows its representation in QASM."""
@abc.abstractmethod
def known_qasm_output(self,
qubits: Tuple[raw_types.QubitId, ...],
args: QasmOutputArgs) -> Optional[str]:
"""Returns lines of QASM output representing the gate on the given
qubits or None if a simple conversion is not possible.
"""
class QasmConvertibleOperation(metaclass=abc.ABCMeta):
"""An operation that knows its representation in QASM."""
@abc.abstractmethod
def known_qasm_output(self, args: QasmOutputArgs) -> Optional[str]:
"""Returns lines of QASM output representing the operation or None if a
simple conversion is not possible."""
|
py | 1a39cd0d03051502e0462dac9d4460991ad1725f | # encoding: utf-8
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'QueryHistory.notify'
db.add_column('beeswax_queryhistory', 'notify', self.gf('django.db.models.fields.BooleanField')(default=False, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'QueryHistory.notify'
db.delete_column('beeswax_queryhistory', 'notify')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'unique': 'True'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '30', 'unique': 'True'})
},
'beeswax.metainstall': {
'Meta': {'object_name': 'MetaInstall'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'installed_example': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'})
},
'beeswax.queryhistory': {
'Meta': {'object_name': 'QueryHistory'},
'design': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['beeswax.SavedQuery']", 'null': 'True'}),
'has_results': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_state': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'log_context': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'notify': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'query': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'server_id': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'submission_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'beeswax.savedquery': {
'Meta': {'object_name': 'SavedQuery'},
'data': ('django.db.models.fields.TextField', [], {'max_length': '65536'}),
'desc': ('django.db.models.fields.TextField', [], {'max_length': '1024'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_auto': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'mtime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'type': ('django.db.models.fields.IntegerField', [], {})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['beeswax']
|
py | 1a39cd86922d902d3f867a19cf38e82a495cc360 | #!/usr/bin/env python3
import copy
import os
import pickle
import sys
import time
from src.channel_maps import channel_loc_map
from src.data_preprocess import data_1D_to_2D
from src.movement_onset_detect import *
db_dir = os.getcwd()
ME_db_fname = "prelim_ME_db_128.pickle"
ME_Kin_db_fname = "noneeg_ME_db_128.pickle"
rej_ME_db_fname = "reject_ME_db_128.pickle"
fs = 128
ME_db = {}
ME_kin_db = {}
rej_ME_db = {}
######## Load databases from files in db_dir
t1 = time.time()
with open(db_dir + "/" + rej_ME_db_fname, "rb") as f:
rej_ME_db = pickle.load(f)
with open(db_dir + "/" + ME_db_fname, "rb") as f:
ME_db = pickle.load(f)
with open(db_dir + "/" + ME_Kin_db_fname, "rb") as f:
ME_kin_db = pickle.load(f)
print("Loaded ME database in %f s" % (time.time() - t1))
######## Baseline subtraction and infs/NaNs rejection
t1 = time.time()
ME_db_norm = copy.deepcopy(ME_db)
for i in range(1, 8):
for j in range(0, 900):
try:
signal.detrend(ME_db_norm[i][j], axis=0, overwrite_data=True)
except ValueError as e: # add trials with infs/NaNs to rejected db
rej_ME_db[i][j] = 1
print("Baseline subtraction and infs/NaNs rejection finished in %f s" % (time.time() - t1))
# map event type to event label
# class 1: 0x600 = 1536 (elbow flexion)
# class 2: 0x601 = 1537 (elbow extension)
# class 3: 0x602 = 1538 (supination)
# class 4: 0x603 = 1539 (pronation)
# class 5: 0x604 = 1540 (hand close)
# class 6: 0x605 = 1541 (hand open)
# class 7: 0x606 = 1542 (rest)
######## Movement onset detection
onsetAll = np.zeros((8, 900))
chElbow = np.array([87, 88, 89]) - 65 # adjust for offset as indexed in ME_kin_db
chForeArm = np.array([94]) - 65
chHand = np.arange(65, 80) - 65
plot = False
t1 = time.time()
detectOnset(ME_kin_db, onsetAll, 1, chElbow, baselineEnd=16, threshV=1, threshdV=0.01, filt=17, plot=plot)
detectOnset(ME_kin_db, onsetAll, 2, chElbow, baselineEnd=16, threshV=1, threshdV=0.01, filt=17, plot=plot)
detectOnset(ME_kin_db, onsetAll, 3, chForeArm, baselineEnd=16, threshV=1, threshdV=0.01, filt=17, plot=plot)
detectOnset(ME_kin_db, onsetAll, 4, chForeArm, baselineEnd=16, threshV=1, threshdV=0.01, filt=17, plot=plot)
detectOnsetPCA(ME_kin_db, onsetAll, 5, chHand, baselineEnd=16, threshV=1, threshdV=0.01, filt=17, plot=plot)
detectOnsetPCA(ME_kin_db, onsetAll, 6, chHand, baselineEnd=16, threshV=1, threshdV=0.01, filt=17, plot=plot)
onsetAll[7, :] = np.mean(onsetAll[1:7, :])
onsetAll = onsetAll.astype(int)
print("Found movement onset in %f s" % (time.time() - t1))
######## Movement onset alignment
t1 = time.time()
ME_db_aligned = alignTrials(ME_db_norm, onsetAll, fs)
print("Created ME_db_aligned in %f s" % (time.time() - t1))
######## Removing artifacts
t1 = time.time()
num_good_trials = np.zeros(8, dtype=int) # list storing the number of good trials per class after trial rejection
ME_db_aligned_no_art = {}
for clas in range(1, 8):
ME_db_aligned_no_art[clas] = None
for clas in range(1, 8):
reject_mask = np.array(rej_ME_db[clas])
ME_db_aligned_no_art[clas] = np.delete(ME_db_aligned[clas], np.nonzero(reject_mask == 1), axis=0)
num_good_trials[clas] = ME_db_aligned_no_art[clas].shape[0]
print("Removing artifacts %f s" % (time.time() - t1))
######## Find minimum number of good trials and balance out all classes
min_num_good_trials = np.min(num_good_trials[1:])
for clas in range(1, 8):
ME_db_aligned_no_art[clas] = ME_db_aligned_no_art[clas][0:min_num_good_trials, :, :]
print(ME_db_aligned_no_art[1].shape)
######## Converting 1D to 2D mesh
CLM = channel_loc_map()
# populate the mesh with the electrodes
mesh = [["" for y in range(0, 9)] for x in range(0, 9)]
for chan in range(0, np.shape(CLM)[0]):
mesh[CLM[chan][0]][CLM[chan][1]] = channel_label_map[chan + 1]
# print the 2D mesh of channels
for x in range(0, 9):
print(mesh[x])
t1 = time.time()
ME_db_final_2D_mesh = data_1D_to_2D(ME_db_aligned_no_art, 9, 9, CLM)
print("Converting 1D to 2D mesh takes %f s" % (time.time() - t1))
t1 = time.time()
with open("mesh_ME_db_128.pickle", "wb") as f:
i_str = pickle.dumps(ME_db_final_2D_mesh)
f_size = sys.getsizeof(i_str) / 1048576
f.write(i_str)
print("Finished writing %.2f MB of data to mesh_ME_db_128.pickle in %f s" % (f_size, time.time() - t1))
|
py | 1a39ce415ee1a36f8606104cf29905208a585082 | """
LUT Processing
==============
Defines the classes and definitions handling *LUT* processing:
- :class:`colour.LUT1D`
- :class:`colour.LUT3x1D`
- :class:`colour.LUT3D`
- :class:`colour.io.LUT_to_LUT`
"""
from __future__ import annotations
import numpy as np
from abc import ABC, abstractmethod
from copy import deepcopy
from operator import (
add,
mul,
pow,
sub,
truediv,
iadd,
imul,
ipow,
isub,
itruediv,
)
from colour.algebra import (
Extrapolator,
LinearInterpolator,
linear_conversion,
table_interpolation_trilinear,
)
from colour.hints import (
Any,
ArrayLike,
Boolean,
FloatingOrArrayLike,
Integer,
IntegerOrArrayLike,
List,
Literal,
NDArray,
Optional,
Sequence,
Type,
Union,
cast,
)
from colour.utilities import (
as_float_array,
as_int,
as_int_array,
as_int_scalar,
attest,
is_numeric,
is_iterable,
is_string,
full,
optional,
required,
runtime_warning,
tsplit,
tstack,
usage_warning,
validate_method,
)
__author__ = "Colour Developers"
__copyright__ = "Copyright (C) 2013-2022 - Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "[email protected]"
__status__ = "Production"
__all__ = [
"AbstractLUT",
"LUT1D",
"LUT3x1D",
"LUT3D",
"LUT_to_LUT",
]
class AbstractLUT(ABC):
"""
Defines the base class for *LUT*.
This is an :class:`ABCMeta` abstract class that must be inherited by
sub-classes.
Parameters
----------
table
Underlying *LUT* table.
name
*LUT* name.
dimensions
*LUT* dimensions, typically, 1 for a 1D *LUT*, 2 for a 3x1D *LUT* and 3
for a 3D *LUT*.
domain
*LUT* domain, also used to define the instantiation time default table
domain.
size
*LUT* size, also used to define the instantiation time default table
size.
comments
Comments to add to the *LUT*.
Attributes
----------
- :attr:`~colour.io.luts.lut.AbstractLUT.table`
- :attr:`~colour.io.luts.lut.AbstractLUT.name`
- :attr:`~colour.io.luts.lut.AbstractLUT.dimensions`
- :attr:`~colour.io.luts.lut.AbstractLUT.domain`
- :attr:`~colour.io.luts.lut.AbstractLUT.size`
- :attr:`~colour.io.luts.lut.AbstractLUT.comments`
Methods
-------
- :meth:`~colour.io.luts.lut.AbstractLUT.__init__`
- :meth:`~colour.io.luts.lut.AbstractLUT.__str__`
- :meth:`~colour.io.luts.lut.AbstractLUT.__repr__`
- :meth:`~colour.io.luts.lut.AbstractLUT.__eq__`
- :meth:`~colour.io.luts.lut.AbstractLUT.__ne__`
- :meth:`~colour.io.luts.lut.AbstractLUT.__add__`
- :meth:`~colour.io.luts.lut.AbstractLUT.__iadd__`
- :meth:`~colour.io.luts.lut.AbstractLUT.__sub__`
- :meth:`~colour.io.luts.lut.AbstractLUT.__isub__`
- :meth:`~colour.io.luts.lut.AbstractLUT.__mul__`
- :meth:`~colour.io.luts.lut.AbstractLUT.__imul__`
- :meth:`~colour.io.luts.lut.AbstractLUT.__div__`
- :meth:`~colour.io.luts.lut.AbstractLUT.__idiv__`
- :meth:`~colour.io.luts.lut.AbstractLUT.__pow__`
- :meth:`~colour.io.luts.lut.AbstractLUT.__ipow__`
- :meth:`~colour.io.luts.lut.AbstractLUT.arithmetical_operation`
- :meth:`~colour.io.luts.lut.AbstractLUT.is_domain_explicit`
- :meth:`~colour.io.luts.lut.AbstractLUT.linear_table`
- :meth:`~colour.io.luts.lut.AbstractLUT.copy`
- :meth:`~colour.io.luts.lut.AbstractLUT.invert`
- :meth:`~colour.io.luts.lut.AbstractLUT.apply`
- :meth:`~colour.io.luts.lut.AbstractLUT.as_LUT`
"""
def __init__(
self,
table: Optional[ArrayLike] = None,
name: Optional[str] = None,
dimensions: Optional[Integer] = None,
domain: Optional[ArrayLike] = None,
size: Optional[IntegerOrArrayLike] = None,
comments: Optional[Sequence] = None,
):
self._name: str = f"Unity {size!r}" if table is None else f"{id(self)}"
self.name = optional(name, self._name)
self._dimensions = optional(dimensions, 0)
self._table: NDArray = self.linear_table(
cast(ArrayLike, optional(size, 0)),
cast(ArrayLike, optional(domain, np.array([]))),
)
self.table = cast(
ArrayLike, optional(table, self._table)
) # type: ignore[assignment]
# TODO: Remove pragma when https://github.com/python/mypy/issues/3004
# is resolved.
self._domain: NDArray = np.array([])
self.domain = cast(
ArrayLike, optional(domain, self._domain)
) # type: ignore[assignment]
self._comments: List = []
self.comments = cast(
ArrayLike, optional(comments, self._comments)
) # type: ignore[assignment]
@property
def table(self) -> NDArray:
"""
Getter and setter property for the underlying *LUT* table.
Parameters
----------
value
Value to set the underlying *LUT* table with.
Returns
-------
:class:`numpy.ndarray`
Underlying *LUT* table.
"""
return self._table
@table.setter
def table(self, value: ArrayLike):
"""
Setter for the **self.table** property.
"""
self._table = self._validate_table(value)
@property
def name(self) -> str:
"""
Getter and setter property for the *LUT* name.
Parameters
----------
value
Value to set the *LUT* name with.
Returns
-------
:class:`str`
*LUT* name.
"""
return self._name
@name.setter
def name(self, value: str):
"""
Setter for the **self.name** property.
"""
attest(
is_string(value),
f'"name" property: "{value}" type is not "str"!',
)
self._name = value
@property
def domain(self) -> NDArray:
"""
Getter and setter property for the *LUT* domain.
Parameters
----------
value
Value to set the *LUT* domain with.
Returns
-------
:class:`numpy.ndarray`
*LUT* domain.
"""
return self._domain
@domain.setter
def domain(self, value: ArrayLike):
"""
Setter for the **self.domain** property.
"""
# pylint: disable=E1121
self._domain = self._validate_domain(value)
@property
def dimensions(self) -> Integer:
"""
Getter property for the *LUT* dimensions.
Returns
-------
:class:`numpy.integer`
*LUT* dimensions.
"""
return self._dimensions
@property
def size(self) -> Integer:
"""
Getter property for the *LUT* size.
Returns
-------
:class:`numpy.integer`
*LUT* size.
"""
return self._table.shape[0]
@property
def comments(self) -> List:
"""
Getter and setter property for the *LUT* comments.
Parameters
----------
value
Value to set the *LUT* comments with.
Returns
-------
:class:`list`
*LUT* comments.
"""
return self._comments
@comments.setter
def comments(self, value: Sequence):
"""
Setter for the **self.comments** property.
"""
attest(
is_iterable(value),
f'"comments" property: "{value}" must be a sequence!',
)
self._comments = list(value)
def __str__(self) -> str:
"""
Returns a formatted string representation of the *LUT*.
Returns
-------
:class:`str`
Formatted string representation.
"""
def _indent_array(a: ArrayLike) -> str:
"""
Indents given array string representation.
"""
return str(a).replace(" [", " " * 14 + "[")
comments = [
f"Comment {str(i + 1).zfill(2)} : {comment}"
for i, comment in enumerate(self.comments)
]
return (
"{} - {}\n"
"{}\n\n"
"Dimensions : {}\n"
"Domain : {}\n"
"Size : {!s}{}"
).format(
self.__class__.__name__,
self.name,
"-" * (len(self.__class__.__name__) + 3 + len(self.name)),
self.dimensions,
_indent_array(self.domain),
str(self.table.shape).replace("L", ""),
"\n{}".format("\n".join(comments)) if comments else "",
)
def __repr__(self) -> str:
"""
Returns an evaluable string representation of the *LUT*.
Returns
-------
:class:`str`
Evaluable string representation.
"""
representation = repr(self.table)
representation = representation.replace(
"array", self.__class__.__name__
)
representation = representation.replace(
" [", f"{' ' * (len(self.__class__.__name__) + 2)}["
)
domain = repr(self.domain).replace("array(", "").replace(")", "")
domain = domain.replace(
" [", f"{' ' * (len(self.__class__.__name__) + 9)}["
)
indentation = " " * (len(self.__class__.__name__) + 1)
representation = (
"{0},\n" "{1}name='{2}',\n" "{1}domain={3}{4})"
).format(
representation[:-1],
indentation,
self.name,
domain,
f",\n{indentation}comments={repr(self.comments)}"
if self.comments
else "",
)
return representation
def __eq__(self, other: Any) -> bool:
"""
Returns whether the *LUT* is equal to given other object.
Parameters
----------
other
Object to test whether it is equal to the *LUT*.
Returns
-------
:class:`bool`
Whether given object is equal to the *LUT*.
"""
if isinstance(other, AbstractLUT):
if all(
[
np.array_equal(self.table, other.table),
np.array_equal(self.domain, other.domain),
]
):
return True
return False
def __ne__(self, other: Any) -> bool:
"""
Returns whether the *LUT* is not equal to given other object.
Parameters
----------
other
Object to test whether it is not equal to the *LUT*.
Returns
-------
:class:`bool`
Whether given object is not equal to the *LUT*.
"""
return not (self == other)
def __add__(
self, a: Union[FloatingOrArrayLike, AbstractLUT]
) -> AbstractLUT:
"""
Implements support for addition.
Parameters
----------
a
:math:`a` variable to add.
Returns
-------
:class:`colour.io.luts.lut.AbstractLUT`
Variable added *LUT*.
"""
return self.arithmetical_operation(a, "+")
def __iadd__(
self, a: Union[FloatingOrArrayLike, AbstractLUT]
) -> AbstractLUT:
"""
Implements support for in-place addition.
Parameters
----------
a
:math:`a` variable to add in-place.
Returns
-------
:class:`colour.io.luts.lut.AbstractLUT`
In-place variable added *LUT*.
"""
return self.arithmetical_operation(a, "+", True)
def __sub__(
self, a: Union[FloatingOrArrayLike, AbstractLUT]
) -> AbstractLUT:
"""
Implements support for subtraction.
Parameters
----------
a
:math:`a` variable to subtract.
Returns
-------
:class:`colour.io.luts.lut.AbstractLUT`
Variable subtracted *LUT*.
"""
return self.arithmetical_operation(a, "-")
def __isub__(
self, a: Union[FloatingOrArrayLike, AbstractLUT]
) -> AbstractLUT:
"""
Implements support for in-place subtraction.
Parameters
----------
a
:math:`a` variable to subtract in-place.
Returns
-------
:class:`colour.io.luts.lut.AbstractLUT`
In-place variable subtracted *LUT*.
"""
return self.arithmetical_operation(a, "-", True)
def __mul__(
self, a: Union[FloatingOrArrayLike, AbstractLUT]
) -> AbstractLUT:
"""
Implements support for multiplication.
Parameters
----------
a
:math:`a` variable to multiply by.
Returns
-------
:class:`colour.io.luts.lut.AbstractLUT`
Variable multiplied *LUT*.
"""
return self.arithmetical_operation(a, "*")
def __imul__(
self, a: Union[FloatingOrArrayLike, AbstractLUT]
) -> AbstractLUT:
"""
Implements support for in-place multiplication.
Parameters
----------
a
:math:`a` variable to multiply by in-place.
Returns
-------
:class:`colour.io.luts.lut.AbstractLUT`
In-place variable multiplied *LUT*.
"""
return self.arithmetical_operation(a, "*", True)
def __div__(
self, a: Union[FloatingOrArrayLike, AbstractLUT]
) -> AbstractLUT:
"""
Implements support for division.
Parameters
----------
a
:math:`a` variable to divide by.
Returns
-------
:class:`colour.io.luts.lut.AbstractLUT`
Variable divided *LUT*.
"""
return self.arithmetical_operation(a, "/")
def __idiv__(
self, a: Union[FloatingOrArrayLike, AbstractLUT]
) -> AbstractLUT:
"""
Implements support for in-place division.
Parameters
----------
a
:math:`a` variable to divide by in-place.
Returns
-------
:class:`colour.io.luts.lut.AbstractLUT`
In-place variable divided *LUT*.
"""
return self.arithmetical_operation(a, "/", True)
__itruediv__ = __idiv__
__truediv__ = __div__
def __pow__(
self, a: Union[FloatingOrArrayLike, AbstractLUT]
) -> AbstractLUT:
"""
Implements support for exponentiation.
Parameters
----------
a
:math:`a` variable to exponentiate by.
Returns
-------
:class:`colour.io.luts.lut.AbstractLUT`
Variable exponentiated *LUT*.
"""
return self.arithmetical_operation(a, "**")
def __ipow__(
self, a: Union[FloatingOrArrayLike, AbstractLUT]
) -> AbstractLUT:
"""
Implements support for in-place exponentiation.
Parameters
----------
a
:math:`a` variable to exponentiate by in-place.
Returns
-------
:class:`colour.io.luts.lut.AbstractLUT`
In-place variable exponentiated *LUT*.
"""
return self.arithmetical_operation(a, "**", True)
def arithmetical_operation(
self,
a: Union[FloatingOrArrayLike, AbstractLUT],
operation: Literal["+", "-", "*", "/", "**"],
in_place: Boolean = False,
) -> AbstractLUT:
"""
Performs given arithmetical operation with :math:`a` operand, the
operation can be either performed on a copy or in-place, must be
reimplemented by sub-classes.
Parameters
----------
a
Operand.
operation
Operation to perform.
in_place
Operation happens in place.
Returns
-------
:class:`colour.io.luts.lut.AbstractLUT`
*LUT*.
"""
operator, ioperator = {
"+": (add, iadd),
"-": (sub, isub),
"*": (mul, imul),
"/": (truediv, itruediv),
"**": (pow, ipow),
}[operation]
if in_place:
if isinstance(a, AbstractLUT):
operand = a.table
else:
operand = as_float_array(a)
self.table = operator(self.table, operand)
return self
else:
copy = ioperator(self.copy(), a)
return copy
@abstractmethod
def _validate_table(self, table: ArrayLike) -> NDArray:
"""
Validates given table according to *LUT* dimensions.
Parameters
----------
table
Table to validate.
Returns
-------
:class:`numpy.ndarray`
Validated table as a :class:`ndarray` instance.
"""
pass
@abstractmethod
def _validate_domain(self, domain: ArrayLike) -> NDArray:
"""
Validates given domain according to *LUT* dimensions.
Parameters
----------
domain
Domain to validate.
Returns
-------
:class:`numpy.ndarray`
Validated domain as a :class:`ndarray` instance.
"""
pass
@abstractmethod
def is_domain_explicit(self) -> Boolean:
"""
Returns whether the *LUT* domain is explicit (or implicit).
An implicit domain is defined by its shape only::
[[0 1]
[0 1]
[0 1]]
While an explicit domain defines every single discrete samples::
[[0.0 0.0 0.0]
[0.1 0.1 0.1]
[0.2 0.2 0.2]
[0.3 0.3 0.3]
[0.4 0.4 0.4]
[0.8 0.8 0.8]
[1.0 1.0 1.0]]
Returns
-------
:class:`bool`
Is *LUT* domain explicit.
"""
pass
@staticmethod
@abstractmethod
def linear_table(
size: Optional[IntegerOrArrayLike] = None,
domain: Optional[ArrayLike] = None,
) -> NDArray:
"""
Returns a linear table of given size according to *LUT* dimensions.
Parameters
----------
size
Expected table size, for a 1D *LUT*, the number of output samples
:math:`n` is equal to ``size``, for a 3x1D *LUT* :math:`n` is equal
to ``size * 3`` or ``size[0] + size[1] + size[2]``, for a 3D *LUT*
:math:`n` is equal to ``size**3 * 3`` or
``size[0] * size[1] * size[2] * 3``.
domain
Domain of the table.
Returns
-------
:class:`numpy.ndarray`
Linear table.
"""
pass
def copy(self) -> AbstractLUT:
"""
Returns a copy of the sub-class instance.
Returns
-------
:class:`colour.io.luts.lut.AbstractLUT`
*LUT* copy.
"""
return deepcopy(self)
@abstractmethod
def invert(self, **kwargs: Any) -> AbstractLUT:
"""
Computes and returns an inverse copy of the *LUT*.
Other Parameters
----------------
kwargs
Keywords arguments.
Returns
-------
:class:`colour.io.luts.lut.AbstractLUT`
Inverse *LUT* class instance.
"""
pass
@abstractmethod
def apply(self, RGB: ArrayLike, **kwargs: Any) -> NDArray:
"""
Applies the *LUT* to given *RGB* colourspace array using given method.
Parameters
----------
RGB
*RGB* colourspace array to apply the *LUT* onto.
Other Parameters
----------------
direction
Whether the *LUT* should be applied in the forward or inverse
direction.
extrapolator
Extrapolator class type or object to use as extrapolating function.
extrapolator_kwargs
Arguments to use when instantiating or calling the extrapolating
function.
interpolator
Interpolator class type or object to use as interpolating function.
interpolator_kwargs
Arguments to use when instantiating or calling the interpolating
function.
Returns
-------
:class:`numpy.ndarray`
Interpolated *RGB* colourspace array.
"""
pass
@abstractmethod
def as_LUT(
self,
cls: Type[AbstractLUT],
force_conversion: Boolean = False,
**kwargs: Any,
) -> AbstractLUT:
"""
Converts the *LUT* to given ``cls`` class instance.
Parameters
----------
cls
*LUT* class instance.
force_conversion
Whether to force the conversion as it might be destructive.
Other Parameters
----------------
interpolator
Interpolator class type to use as interpolating function.
interpolator_kwargs
Arguments to use when instantiating the interpolating function.
size
Expected table size in case of an upcast to or a downcast from a
:class:`LUT3D` class instance.
Returns
-------
:class:`colour.io.luts.lut.AbstractLUT`
Converted *LUT* class instance.
Warnings
--------
Some conversions are destructive and raise a :class:`ValueError`
exception by default.
Raises
------
ValueError
If the conversion is destructive.
"""
pass
class LUT1D(AbstractLUT):
"""
Defines the base class for a 1D *LUT*.
Parameters
----------
table
Underlying *LUT* table.
name
*LUT* name.
domain
*LUT* domain, also used to define the instantiation time default table
domain.
size
Size of the instantiation time default table, default to 10.
comments
Comments to add to the *LUT*.
Methods
-------
- :meth:`~colour.LUT1D.__init__`
- :meth:`~colour.LUT1D.is_domain_explicit`
- :meth:`~colour.LUT1D.linear_table`
- :meth:`~colour.LUT1D.invert`
- :meth:`~colour.LUT1D.apply`
- :meth:`~colour.LUT1D.as_LUT`
Examples
--------
Instantiating a unity LUT with a table with 16 elements:
>>> print(LUT1D(size=16))
LUT1D - Unity 16
----------------
<BLANKLINE>
Dimensions : 1
Domain : [ 0. 1.]
Size : (16,)
Instantiating a LUT using a custom table with 16 elements:
>>> print(LUT1D(LUT1D.linear_table(16) ** (1 / 2.2))) # doctest: +ELLIPSIS
LUT1D - ...
--------...
<BLANKLINE>
Dimensions : 1
Domain : [ 0. 1.]
Size : (16,)
Instantiating a LUT using a custom table with 16 elements, custom name,
custom domain and comments:
>>> from colour.algebra import spow
>>> domain = np.array([-0.1, 1.5])
>>> print(LUT1D(
... spow(LUT1D.linear_table(16, domain), 1 / 2.2),
... 'My LUT',
... domain,
... comments=['A first comment.', 'A second comment.']))
LUT1D - My LUT
--------------
<BLANKLINE>
Dimensions : 1
Domain : [-0.1 1.5]
Size : (16,)
Comment 01 : A first comment.
Comment 02 : A second comment.
"""
def __init__(
self,
table: Optional[ArrayLike] = None,
name: Optional[str] = None,
domain: Optional[ArrayLike] = None,
size: Optional[IntegerOrArrayLike] = None,
comments: Optional[Sequence] = None,
):
domain = as_float_array(
cast(ArrayLike, optional(domain, np.array([0, 1])))
)
size = cast(Integer, optional(size, 10))
super().__init__(table, name, 1, domain, size, comments)
def _validate_table(self, table: ArrayLike) -> NDArray:
"""
Validates given table is a 1D array.
Parameters
----------
table
Table to validate.
Returns
-------
:class:`numpy.ndarray`
Validated table as a :class:`ndarray` instance.
"""
table = as_float_array(table)
attest(len(table.shape) == 1, "The table must be a 1D array!")
return table
def _validate_domain(self, domain: ArrayLike) -> NDArray:
"""
Validates given domain.
Parameters
----------
domain
Domain to validate.
Returns
-------
:class:`numpy.ndarray`
Validated domain as a :class:`ndarray` instance.
"""
domain = as_float_array(domain)
attest(len(domain.shape) == 1, "The domain must be a 1D array!")
attest(
domain.shape[0] >= 2,
"The domain column count must be equal or greater than 2!",
)
return domain
def is_domain_explicit(self) -> Boolean:
"""
Returns whether the *LUT* domain is explicit (or implicit).
An implicit domain is defined by its shape only::
[0 1]
While an explicit domain defines every single discrete samples::
[0.0 0.1 0.2 0.4 0.8 1.0]
Returns
-------
:class:`bool`
Is *LUT* domain explicit.
Examples
--------
>>> LUT1D().is_domain_explicit()
False
>>> table = domain = np.linspace(0, 1, 10)
>>> LUT1D(table, domain=domain).is_domain_explicit()
True
"""
return len(self.domain) != 2
@staticmethod
def linear_table(
size: Optional[IntegerOrArrayLike] = None,
domain: Optional[ArrayLike] = None,
) -> NDArray:
"""
Returns a linear table, the number of output samples :math:`n` is equal
to ``size``.
Parameters
----------
size
Expected table size, default to 10.
domain
Domain of the table.
Returns
-------
:class:`numpy.ndarray`
Linear table with ``size`` samples.
Examples
--------
>>> LUT1D.linear_table(5, np.array([-0.1, 1.5]))
array([-0.1, 0.3, 0.7, 1.1, 1.5])
>>> LUT1D.linear_table(domain=np.linspace(-0.1, 1.5, 5))
array([-0.1, 0.3, 0.7, 1.1, 1.5])
"""
size = cast(Integer, optional(size, 10))
domain = as_float_array(
cast(ArrayLike, optional(domain, np.array([0, 1])))
)
if len(domain) != 2:
return domain
else:
attest(is_numeric(size), "Linear table size must be a numeric!")
return np.linspace(domain[0], domain[1], as_int_scalar(size))
def invert(self, **kwargs: Any) -> LUT1D:
"""
Computes and returns an inverse copy of the *LUT*.
Other Parameters
----------------
kwargs
Keywords arguments, only given for signature compatibility with
the :meth:`AbstractLUT.invert` method.
Returns
-------
:class:`colour.LUT1D`
Inverse *LUT* class instance.
Examples
--------
>>> LUT = LUT1D(LUT1D.linear_table() ** (1 / 2.2))
>>> print(LUT.table) # doctest: +ELLIPSIS
[ 0. ... 0.3683438... 0.5047603... 0.6069133... \
0.6916988... 0.7655385...
0.8316843... 0.8920493... 0.9478701... 1. ]
>>> print(LUT.invert()) # doctest: +ELLIPSIS
LUT1D - ... - Inverse
--------...----------
<BLANKLINE>
Dimensions : 1
Domain : [ 0. 0.3683438... 0.5047603... 0.6069133... \
0.6916988... 0.7655385...
0.8316843... 0.8920493... 0.9478701... 1. ]
Size : (10,)
>>> print(LUT.invert().table) # doctest: +ELLIPSIS
[ 0. ... 0.1111111... 0.2222222... 0.3333333... \
0.4444444... 0.5555555...
0.6666666... 0.7777777... 0.8888888... 1. ]
"""
if self.is_domain_explicit():
domain = self.domain
else:
domain_min, domain_max = self.domain
domain = np.linspace(domain_min, domain_max, self.size)
LUT_i = LUT1D(
table=domain,
name=f"{self.name} - Inverse",
domain=self.table,
)
return LUT_i
def apply(self, RGB: ArrayLike, **kwargs: Any) -> NDArray:
"""
Applies the *LUT* to given *RGB* colourspace array using given method.
Parameters
----------
RGB
*RGB* colourspace array to apply the *LUT* onto.
Other Parameters
----------------
direction
Whether the *LUT* should be applied in the forward or inverse
direction.
extrapolator
Extrapolator class type or object to use as extrapolating function.
extrapolator_kwargs
Arguments to use when instantiating or calling the extrapolating
function.
interpolator
Interpolator class type to use as interpolating function.
interpolator_kwargs
Arguments to use when instantiating the interpolating function.
Returns
-------
:class:`numpy.ndarray`
Interpolated *RGB* colourspace array.
Examples
--------
>>> LUT = LUT1D(LUT1D.linear_table() ** (1 / 2.2))
>>> RGB = np.array([0.18, 0.18, 0.18])
*LUT* applied to the given *RGB* colourspace in the forward direction:
>>> LUT.apply(RGB) # doctest: +ELLIPSIS
array([ 0.4529220..., 0.4529220..., 0.4529220...])
*LUT* applied to the modified *RGB* colourspace in the inverse
direction:
>>> LUT.apply(LUT.apply(RGB), direction='Inverse')
... # doctest: +ELLIPSIS
array([ 0.18..., 0.18..., 0.18...])
"""
direction = validate_method(
kwargs.get("direction", "Forward"), ["Forward", "Inverse"]
)
interpolator = kwargs.get("interpolator", LinearInterpolator)
interpolator_kwargs = kwargs.get("interpolator_kwargs", {})
extrapolator = kwargs.get("extrapolator", Extrapolator)
extrapolator_kwargs = kwargs.get("extrapolator_kwargs", {})
LUT = self.invert() if direction == "inverse" else self
if LUT.is_domain_explicit():
samples = LUT.domain
else:
domain_min, domain_max = LUT.domain
samples = np.linspace(domain_min, domain_max, LUT.size)
RGB_interpolator = extrapolator(
interpolator(samples, LUT.table, **interpolator_kwargs),
**extrapolator_kwargs,
)
return RGB_interpolator(RGB)
def as_LUT(
self,
cls: Type[AbstractLUT],
force_conversion: Boolean = False,
**kwargs: Any,
) -> AbstractLUT:
"""
Converts the *LUT* to given ``cls`` class instance.
Parameters
----------
cls
*LUT* class instance.
force_conversion
Whether to force the conversion as it might be destructive.
Other Parameters
----------------
interpolator
Interpolator class type to use as interpolating function.
interpolator_kwargs
Arguments to use when instantiating the interpolating function.
size
Expected table size in case of an upcast to a :class:`LUT3D` class
instance.
Returns
-------
:class:`colour.LUT1D` or :class:`colour.LUT3x1D` or \
:class:`colour.LUT3D`
Converted *LUT* class instance.
Warnings
--------
Some conversions are destructive and raise a :class:`ValueError`
exception by default.
Raises
------
ValueError
If the conversion is destructive.
Examples
--------
>>> LUT = LUT1D()
>>> print(LUT.as_LUT(LUT1D))
LUT1D - Unity 10 - Converted 1D to 1D
-------------------------------------
<BLANKLINE>
Dimensions : 1
Domain : [ 0. 1.]
Size : (10,)
>>> print(LUT.as_LUT(LUT3x1D))
LUT3x1D - Unity 10 - Converted 1D to 3x1D
-----------------------------------------
<BLANKLINE>
Dimensions : 2
Domain : [[ 0. 0. 0.]
[ 1. 1. 1.]]
Size : (10, 3)
>>> print(LUT.as_LUT(LUT3D, force_conversion=True))
LUT3D - Unity 10 - Converted 1D to 3D
-------------------------------------
<BLANKLINE>
Dimensions : 3
Domain : [[ 0. 0. 0.]
[ 1. 1. 1.]]
Size : (33, 33, 33, 3)
"""
return LUT_to_LUT(self, cls, force_conversion, **kwargs)
class LUT3x1D(AbstractLUT):
"""
Defines the base class for a 3x1D *LUT*.
Parameters
----------
table
Underlying *LUT* table.
name
*LUT* name.
domain
*LUT* domain, also used to define the instantiation time default table
domain.
size
Size of the instantiation time default table, default to 10.
comments
Comments to add to the *LUT*.
Methods
-------
- :meth:`~colour.LUT3x1D.__init__`
- :meth:`~colour.LUT3x1D.is_domain_explicit`
- :meth:`~colour.LUT3x1D.linear_table`
- :meth:`~colour.LUT3x1D.invert`
- :meth:`~colour.LUT3x1D.apply`
- :meth:`~colour.LUT3x1D.as_LUT`
Examples
--------
Instantiating a unity LUT with a table with 16x3 elements:
>>> print(LUT3x1D(size=16))
LUT3x1D - Unity 16
------------------
<BLANKLINE>
Dimensions : 2
Domain : [[ 0. 0. 0.]
[ 1. 1. 1.]]
Size : (16, 3)
Instantiating a LUT using a custom table with 16x3 elements:
>>> print(LUT3x1D(LUT3x1D.linear_table(16) ** (1 / 2.2)))
... # doctest: +ELLIPSIS
LUT3x1D - ...
----------...
<BLANKLINE>
Dimensions : 2
Domain : [[ 0. 0. 0.]
[ 1. 1. 1.]]
Size : (16, 3)
Instantiating a LUT using a custom table with 16x3 elements, custom name,
custom domain and comments:
>>> from colour.algebra import spow
>>> domain = np.array([[-0.1, -0.2, -0.4], [1.5, 3.0, 6.0]])
>>> print(LUT3x1D(
... spow(LUT3x1D.linear_table(16), 1 / 2.2),
... 'My LUT',
... domain,
... comments=['A first comment.', 'A second comment.']))
LUT3x1D - My LUT
----------------
<BLANKLINE>
Dimensions : 2
Domain : [[-0.1 -0.2 -0.4]
[ 1.5 3. 6. ]]
Size : (16, 3)
Comment 01 : A first comment.
Comment 02 : A second comment.
"""
def __init__(
self,
table: Optional[ArrayLike] = None,
name: Optional[str] = None,
domain: Optional[ArrayLike] = None,
size: Optional[IntegerOrArrayLike] = None,
comments: Optional[Sequence] = None,
):
domain = cast(
ArrayLike, optional(domain, np.array([[0, 0, 0], [1, 1, 1]]))
)
size = cast(Integer, optional(size, 10))
super().__init__(table, name, 2, domain, size, comments)
def _validate_table(self, table: ArrayLike) -> NDArray:
"""
Validates given table is a 3x1D array.
Parameters
----------
table
Table to validate.
Returns
-------
:class:`numpy.ndarray`
Validated table as a :class:`ndarray` instance.
"""
table = as_float_array(table)
attest(len(table.shape) == 2, "The table must be a 2D array!")
return table
def _validate_domain(self, domain: ArrayLike) -> NDArray:
"""
Validates given domain.
Parameters
----------
domain
Domain to validate.
Returns
-------
:class:`numpy.ndarray`
Validated domain as a :class:`ndarray` instance.
"""
domain = as_float_array(domain)
attest(len(domain.shape) == 2, "The domain must be a 2D array!")
attest(
domain.shape[0] >= 2,
"The domain row count must be equal or greater than 2!",
)
attest(
domain.shape[1] == 3, "The domain column count must be equal to 3!"
)
return domain
def is_domain_explicit(self) -> Boolean:
"""
Returns whether the *LUT* domain is explicit (or implicit).
An implicit domain is defined by its shape only::
[[0 1]
[0 1]
[0 1]]
While an explicit domain defines every single discrete samples::
[[0.0 0.0 0.0]
[0.1 0.1 0.1]
[0.2 0.2 0.2]
[0.3 0.3 0.3]
[0.4 0.4 0.4]
[0.8 0.8 0.8]
[1.0 1.0 1.0]]
Returns
-------
:class:`bool`
Is *LUT* domain explicit.
Examples
--------
>>> LUT3x1D().is_domain_explicit()
False
>>> samples = np.linspace(0, 1, 10)
>>> table = domain = tstack([samples, samples, samples])
>>> LUT3x1D(table, domain=domain).is_domain_explicit()
True
"""
return self.domain.shape != (2, 3)
@staticmethod
def linear_table(
size: Optional[IntegerOrArrayLike] = None,
domain: Optional[ArrayLike] = None,
) -> NDArray:
"""
Returns a linear table, the number of output samples :math:`n` is equal
to ``size * 3`` or ``size[0] + size[1] + size[2]``.
Parameters
----------
size
Expected table size, default to 10.
domain
Domain of the table.
Returns
-------
:class:`numpy.ndarray`
Linear table with ``size * 3`` or ``size[0] + size[1] + size[2]``
samples.
Warnings
--------
If ``size`` is non uniform, the linear table will be padded
accordingly.
Examples
--------
>>> LUT3x1D.linear_table(
... 5, np.array([[-0.1, -0.2, -0.4], [1.5, 3.0, 6.0]]))
array([[-0.1, -0.2, -0.4],
[ 0.3, 0.6, 1.2],
[ 0.7, 1.4, 2.8],
[ 1.1, 2.2, 4.4],
[ 1.5, 3. , 6. ]])
>>> LUT3x1D.linear_table(
... np.array([5, 3, 2]),
... np.array([[-0.1, -0.2, -0.4], [1.5, 3.0, 6.0]]))
array([[-0.1, -0.2, -0.4],
[ 0.3, 1.4, 6. ],
[ 0.7, 3. , nan],
[ 1.1, nan, nan],
[ 1.5, nan, nan]])
>>> domain = np.array([[-0.1, -0.2, -0.4],
... [0.3, 1.4, 6.0],
... [0.7, 3.0, np.nan],
... [1.1, np.nan, np.nan],
... [1.5, np.nan, np.nan]])
>>> LUT3x1D.linear_table(domain=domain)
array([[-0.1, -0.2, -0.4],
[ 0.3, 1.4, 6. ],
[ 0.7, 3. , nan],
[ 1.1, nan, nan],
[ 1.5, nan, nan]])
"""
size = cast(Integer, optional(size, 10))
domain = as_float_array(
cast(ArrayLike, optional(domain, np.array([[0, 0, 0], [1, 1, 1]])))
)
if domain.shape != (2, 3):
return domain
else:
if is_numeric(size):
size_array = np.tile(size, 3)
else:
size_array = as_int_array(size)
R, G, B = tsplit(domain)
samples = [
np.linspace(a[0], a[1], size_array[i])
for i, a in enumerate([R, G, B])
]
if not len(np.unique(size_array)) == 1:
runtime_warning(
"Table is non uniform, axis will be "
'padded with "NaNs" accordingly!'
)
samples = [
np.pad(
axis,
(0, np.max(size_array) - len(axis)),
mode="constant",
constant_values=np.nan,
)
for axis in samples
]
return tstack(samples)
def invert(self, **kwargs: Any) -> LUT3x1D:
"""
Computes and returns an inverse copy of the *LUT*.
Other Parameters
----------------
kwargs
Keywords arguments, only given for signature compatibility with
the :meth:`AbstractLUT.invert` method.
Returns
-------
:class:`colour.LUT3x1D`
Inverse *LUT* class instance.
Examples
--------
>>> LUT = LUT3x1D(LUT3x1D.linear_table() ** (1 / 2.2))
>>> print(LUT.table)
[[ 0. 0. 0. ]
[ 0.36834383 0.36834383 0.36834383]
[ 0.50476034 0.50476034 0.50476034]
[ 0.60691337 0.60691337 0.60691337]
[ 0.69169882 0.69169882 0.69169882]
[ 0.76553851 0.76553851 0.76553851]
[ 0.83168433 0.83168433 0.83168433]
[ 0.89204934 0.89204934 0.89204934]
[ 0.94787016 0.94787016 0.94787016]
[ 1. 1. 1. ]]
>>> print(LUT.invert()) # doctest: +ELLIPSIS
LUT3x1D - ... - Inverse
----------...----------
<BLANKLINE>
Dimensions : 2
Domain : [[ 0. ... 0. ... 0. ...]
[ 0.3683438... 0.3683438... 0.3683438...]
[ 0.5047603... 0.5047603... 0.5047603...]
[ 0.6069133... 0.6069133... 0.6069133...]
[ 0.6916988... 0.6916988... 0.6916988...]
[ 0.7655385... 0.7655385... 0.7655385...]
[ 0.8316843... 0.8316843... 0.8316843...]
[ 0.8920493... 0.8920493... 0.8920493...]
[ 0.9478701... 0.9478701... 0.9478701...]
[ 1. ... 1. ... 1. ...]]
Size : (10, 3)
>>> print(LUT.invert().table) # doctest: +ELLIPSIS
[[ 0. ... 0. ... 0. ...]
[ 0.1111111... 0.1111111... 0.1111111...]
[ 0.2222222... 0.2222222... 0.2222222...]
[ 0.3333333... 0.3333333... 0.3333333...]
[ 0.4444444... 0.4444444... 0.4444444...]
[ 0.5555555... 0.5555555... 0.5555555...]
[ 0.6666666... 0.6666666... 0.6666666...]
[ 0.7777777... 0.7777777... 0.7777777...]
[ 0.8888888... 0.8888888... 0.8888888...]
[ 1. ... 1. ... 1. ...]]
"""
size = self.table.size // 3
if self.is_domain_explicit():
domain = [
axes[: (~np.isnan(axes)).cumsum().argmax() + 1]
for axes in np.transpose(self.domain)
]
else:
domain_min, domain_max = self.domain
domain = [
np.linspace(domain_min[i], domain_max[i], size)
for i in range(3)
]
LUT_i = LUT3x1D(
table=tstack(domain),
name=f"{self.name} - Inverse",
domain=self.table,
)
return LUT_i
def apply(self, RGB: ArrayLike, **kwargs: Any) -> NDArray:
"""
Applies the *LUT* to given *RGB* colourspace array using given method.
Parameters
----------
RGB
*RGB* colourspace array to apply the *LUT* onto.
Other Parameters
----------------
direction
Whether the *LUT* should be applied in the forward or inverse
direction.
extrapolator
Extrapolator class type or object to use as extrapolating function.
extrapolator_kwargs
Arguments to use when instantiating or calling the extrapolating
function.
interpolator
Interpolator class type to use as interpolating function.
interpolator_kwargs
Arguments to use when instantiating the interpolating function.
Returns
-------
:class:`numpy.ndarray`
Interpolated *RGB* colourspace array.
Examples
--------
>>> LUT = LUT3x1D(LUT3x1D.linear_table() ** (1 / 2.2))
>>> RGB = np.array([0.18, 0.18, 0.18])
>>> LUT.apply(RGB) # doctest: +ELLIPSIS
array([ 0.4529220..., 0.4529220..., 0.4529220...])
>>> LUT.apply(LUT.apply(RGB), direction='Inverse')
... # doctest: +ELLIPSIS
array([ 0.18..., 0.18..., 0.18...])
>>> from colour.algebra import spow
>>> domain = np.array([[-0.1, -0.2, -0.4], [1.5, 3.0, 6.0]])
>>> table = spow(LUT3x1D.linear_table(domain=domain), 1 / 2.2)
>>> LUT = LUT3x1D(table, domain=domain)
>>> RGB = np.array([0.18, 0.18, 0.18])
>>> LUT.apply(RGB) # doctest: +ELLIPSIS
array([ 0.4423903..., 0.4503801..., 0.3581625...])
>>> domain = np.array([[-0.1, -0.2, -0.4],
... [0.3, 1.4, 6.0],
... [0.7, 3.0, np.nan],
... [1.1, np.nan, np.nan],
... [1.5, np.nan, np.nan]])
>>> table = spow(LUT3x1D.linear_table(domain=domain), 1 / 2.2)
>>> LUT = LUT3x1D(table, domain=domain)
>>> RGB = np.array([0.18, 0.18, 0.18])
>>> LUT.apply(RGB) # doctest: +ELLIPSIS
array([ 0.2996370..., -0.0901332..., -0.3949770...])
"""
direction = validate_method(
kwargs.get("direction", "Forward"), ["Forward", "Inverse"]
)
interpolator = kwargs.get("interpolator", LinearInterpolator)
interpolator_kwargs = kwargs.get("interpolator_kwargs", {})
extrapolator = kwargs.get("extrapolator", Extrapolator)
extrapolator_kwargs = kwargs.get("extrapolator_kwargs", {})
R, G, B = tsplit(RGB)
LUT = self.invert() if direction == "inverse" else self
size = LUT.table.size // 3
if LUT.is_domain_explicit():
samples = [
axes[: (~np.isnan(axes)).cumsum().argmax() + 1]
for axes in np.transpose(LUT.domain)
]
R_t, G_t, B_t = (
axes[: len(samples[i])]
for i, axes in enumerate(np.transpose(LUT.table))
)
else:
domain_min, domain_max = LUT.domain
samples = [
np.linspace(domain_min[i], domain_max[i], size)
for i in range(3)
]
R_t, G_t, B_t = tsplit(LUT.table)
s_R, s_G, s_B = samples
RGB_i = [
extrapolator(
interpolator(a[0], a[1], **interpolator_kwargs),
**extrapolator_kwargs,
)(a[2])
for a in zip((s_R, s_G, s_B), (R_t, G_t, B_t), (R, G, B))
]
return tstack(RGB_i)
def as_LUT(
self,
cls: Type[AbstractLUT],
force_conversion: Boolean = False,
**kwargs: Any,
) -> AbstractLUT:
"""
Converts the *LUT* to given ``cls`` class instance.
Parameters
----------
cls
*LUT* class instance.
force_conversion
Whether to force the conversion as it might be destructive.
Other Parameters
----------------
interpolator
Interpolator class type to use as interpolating function.
interpolator_kwargs
Arguments to use when instantiating the interpolating function.
size
Expected table size in case of an upcast to a :class:`LUT3D` class
instance.
Returns
-------
:class:`colour.LUT1D` or :class:`colour.LUT3x1D` or \
:class:`colour.LUT3D`
Converted *LUT* class instance.
Warnings
--------
Some conversions are destructive and raise a :class:`ValueError`
exception by default.
Raises
------
ValueError
If the conversion is destructive.
Examples
--------
>>> LUT = LUT3x1D()
>>> print(LUT.as_LUT(LUT1D, force_conversion=True))
LUT1D - Unity 10 - Converted 3x1D to 1D
---------------------------------------
<BLANKLINE>
Dimensions : 1
Domain : [ 0. 1.]
Size : (10,)
>>> print(LUT.as_LUT(LUT3x1D))
LUT3x1D - Unity 10 - Converted 3x1D to 3x1D
-------------------------------------------
<BLANKLINE>
Dimensions : 2
Domain : [[ 0. 0. 0.]
[ 1. 1. 1.]]
Size : (10, 3)
>>> print(LUT.as_LUT(LUT3D, force_conversion=True))
LUT3D - Unity 10 - Converted 3x1D to 3D
---------------------------------------
<BLANKLINE>
Dimensions : 3
Domain : [[ 0. 0. 0.]
[ 1. 1. 1.]]
Size : (33, 33, 33, 3)
"""
return LUT_to_LUT(self, cls, force_conversion, **kwargs)
class LUT3D(AbstractLUT):
"""
Defines the base class for a 3D *LUT*.
Parameters
----------
table
Underlying *LUT* table.
name
*LUT* name.
domain
*LUT* domain, also used to define the instantiation time default table
domain.
size
Size of the instantiation time default table, default to 33.
comments
Comments to add to the *LUT*.
Methods
-------
- :meth:`~colour.LUT3D.__init__`
- :meth:`~colour.LUT3D.is_domain_explicit`
- :meth:`~colour.LUT3D.linear_table`
- :meth:`~colour.LUT3D.invert`
- :meth:`~colour.LUT3D.apply`
- :meth:`~colour.LUT3D.as_LUT`
Examples
--------
Instantiating a unity LUT with a table with 16x16x16x3 elements:
>>> print(LUT3D(size=16))
LUT3D - Unity 16
----------------
<BLANKLINE>
Dimensions : 3
Domain : [[ 0. 0. 0.]
[ 1. 1. 1.]]
Size : (16, 16, 16, 3)
Instantiating a LUT using a custom table with 16x16x16x3 elements:
>>> print(LUT3D(LUT3D.linear_table(16) ** (1 / 2.2))) # doctest: +ELLIPSIS
LUT3D - ...
--------...
<BLANKLINE>
Dimensions : 3
Domain : [[ 0. 0. 0.]
[ 1. 1. 1.]]
Size : (16, 16, 16, 3)
Instantiating a LUT using a custom table with 16x16x16x3 elements, custom
name, custom domain and comments:
>>> from colour.algebra import spow
>>> domain = np.array([[-0.1, -0.2, -0.4], [1.5, 3.0, 6.0]])
>>> print(LUT3D(
... spow(LUT3D.linear_table(16), 1 / 2.2),
... 'My LUT',
... domain,
... comments=['A first comment.', 'A second comment.']))
LUT3D - My LUT
--------------
<BLANKLINE>
Dimensions : 3
Domain : [[-0.1 -0.2 -0.4]
[ 1.5 3. 6. ]]
Size : (16, 16, 16, 3)
Comment 01 : A first comment.
Comment 02 : A second comment.
"""
def __init__(
self,
table: Optional[ArrayLike] = None,
name: Optional[str] = None,
domain: Optional[ArrayLike] = None,
size: Optional[IntegerOrArrayLike] = None,
comments: Optional[Sequence] = None,
):
domain = cast(
ArrayLike, optional(domain, np.array([[0, 0, 0], [1, 1, 1]]))
)
size = cast(Integer, optional(size, 33))
super().__init__(table, name, 3, domain, size, comments)
def _validate_table(self, table: ArrayLike) -> NDArray:
"""
Validates given table is a 4D array and that its dimensions are equal.
Parameters
----------
table
Table to validate.
Returns
-------
:class:`numpy.ndarray`
Validated table as a :class:`ndarray` instance.
"""
table = as_float_array(table)
attest(len(table.shape) == 4, "The table must be a 4D array!")
return table
def _validate_domain(self, domain: ArrayLike) -> NDArray:
"""
Validates given domain.
Parameters
----------
domain
Domain to validate.
Returns
-------
:class:`numpy.ndarray`
Validated domain as a :class:`ndarray` instance.
Notes
-----
- A :class:`LUT3D` class instance must use an implicit domain.
"""
domain = as_float_array(domain)
attest(len(domain.shape) == 2, "The domain must be a 2D array!")
attest(
domain.shape[0] >= 2,
"The domain row count must be equal or greater than 2!",
)
attest(
domain.shape[1] == 3, "The domain column count must be equal to 3!"
)
return domain
def is_domain_explicit(self) -> Boolean:
"""
Returns whether the *LUT* domain is explicit (or implicit).
An implicit domain is defined by its shape only::
[[0 0 0]
[1 1 1]]
While an explicit domain defines every single discrete samples::
[[0.0 0.0 0.0]
[0.1 0.1 0.1]
[0.2 0.2 0.2]
[0.3 0.3 0.3]
[0.4 0.4 0.4]
[0.8 0.8 0.8]
[1.0 1.0 1.0]]
Returns
-------
:class:`bool`
Is *LUT* domain explicit.
Examples
--------
>>> LUT3D().is_domain_explicit()
False
>>> domain = np.array([[-0.1, -0.2, -0.4],
... [0.7, 1.4, 6.0],
... [1.5, 3.0, np.nan]])
>>> LUT3D(domain=domain).is_domain_explicit()
True
"""
return self.domain.shape != (2, 3)
@staticmethod
def linear_table(
size: Optional[IntegerOrArrayLike] = None,
domain: Optional[ArrayLike] = None,
) -> NDArray:
"""
Returns a linear table, the number of output samples :math:`n` is equal
to ``size**3 * 3`` or ``size[0] * size[1] * size[2] * 3``.
Parameters
----------
size
Expected table size, default to 33.
domain
Domain of the table.
Returns
-------
:class:`numpy.ndarray`
Linear table with ``size**3 * 3`` or
``size[0] * size[1] * size[2] * 3`` samples.
Examples
--------
>>> LUT3D.linear_table(
... 3, np.array([[-0.1, -0.2, -0.4], [1.5, 3.0, 6.0]]))
array([[[[-0.1, -0.2, -0.4],
[-0.1, -0.2, 2.8],
[-0.1, -0.2, 6. ]],
<BLANKLINE>
[[-0.1, 1.4, -0.4],
[-0.1, 1.4, 2.8],
[-0.1, 1.4, 6. ]],
<BLANKLINE>
[[-0.1, 3. , -0.4],
[-0.1, 3. , 2.8],
[-0.1, 3. , 6. ]]],
<BLANKLINE>
<BLANKLINE>
[[[ 0.7, -0.2, -0.4],
[ 0.7, -0.2, 2.8],
[ 0.7, -0.2, 6. ]],
<BLANKLINE>
[[ 0.7, 1.4, -0.4],
[ 0.7, 1.4, 2.8],
[ 0.7, 1.4, 6. ]],
<BLANKLINE>
[[ 0.7, 3. , -0.4],
[ 0.7, 3. , 2.8],
[ 0.7, 3. , 6. ]]],
<BLANKLINE>
<BLANKLINE>
[[[ 1.5, -0.2, -0.4],
[ 1.5, -0.2, 2.8],
[ 1.5, -0.2, 6. ]],
<BLANKLINE>
[[ 1.5, 1.4, -0.4],
[ 1.5, 1.4, 2.8],
[ 1.5, 1.4, 6. ]],
<BLANKLINE>
[[ 1.5, 3. , -0.4],
[ 1.5, 3. , 2.8],
[ 1.5, 3. , 6. ]]]])
>>> LUT3D.linear_table(
... np.array([3, 3, 2]),
... np.array([[-0.1, -0.2, -0.4], [1.5, 3.0, 6.0]]))
array([[[[-0.1, -0.2, -0.4],
[-0.1, -0.2, 6. ]],
<BLANKLINE>
[[-0.1, 1.4, -0.4],
[-0.1, 1.4, 6. ]],
<BLANKLINE>
[[-0.1, 3. , -0.4],
[-0.1, 3. , 6. ]]],
<BLANKLINE>
<BLANKLINE>
[[[ 0.7, -0.2, -0.4],
[ 0.7, -0.2, 6. ]],
<BLANKLINE>
[[ 0.7, 1.4, -0.4],
[ 0.7, 1.4, 6. ]],
<BLANKLINE>
[[ 0.7, 3. , -0.4],
[ 0.7, 3. , 6. ]]],
<BLANKLINE>
<BLANKLINE>
[[[ 1.5, -0.2, -0.4],
[ 1.5, -0.2, 6. ]],
<BLANKLINE>
[[ 1.5, 1.4, -0.4],
[ 1.5, 1.4, 6. ]],
<BLANKLINE>
[[ 1.5, 3. , -0.4],
[ 1.5, 3. , 6. ]]]])
>>> domain = np.array([[-0.1, -0.2, -0.4],
... [0.7, 1.4, 6.0],
... [1.5, 3.0, np.nan]])
>>> LUT3D.linear_table(domain=domain)
array([[[[-0.1, -0.2, -0.4],
[-0.1, -0.2, 6. ]],
<BLANKLINE>
[[-0.1, 1.4, -0.4],
[-0.1, 1.4, 6. ]],
<BLANKLINE>
[[-0.1, 3. , -0.4],
[-0.1, 3. , 6. ]]],
<BLANKLINE>
<BLANKLINE>
[[[ 0.7, -0.2, -0.4],
[ 0.7, -0.2, 6. ]],
<BLANKLINE>
[[ 0.7, 1.4, -0.4],
[ 0.7, 1.4, 6. ]],
<BLANKLINE>
[[ 0.7, 3. , -0.4],
[ 0.7, 3. , 6. ]]],
<BLANKLINE>
<BLANKLINE>
[[[ 1.5, -0.2, -0.4],
[ 1.5, -0.2, 6. ]],
<BLANKLINE>
[[ 1.5, 1.4, -0.4],
[ 1.5, 1.4, 6. ]],
<BLANKLINE>
[[ 1.5, 3. , -0.4],
[ 1.5, 3. , 6. ]]]])
"""
size = cast(Integer, optional(size, 33))
domain = as_float_array(
cast(ArrayLike, optional(domain, np.array([[0, 0, 0], [1, 1, 1]])))
)
if domain.shape != (2, 3):
samples = list(
np.flip(
[
axes[: (~np.isnan(axes)).cumsum().argmax() + 1]
for axes in np.transpose(domain)
],
-1,
)
)
size_array = as_int_array([len(axes) for axes in samples])
else:
if is_numeric(size):
size_array = np.tile(size, 3)
else:
size_array = as_int_array(size)
R, G, B = tsplit(domain)
size_array = np.flip(size_array, -1)
samples = [
np.linspace(a[0], a[1], size_array[i])
for i, a in enumerate([B, G, R])
]
table = np.flip(
np.transpose(np.meshgrid(*samples, indexing="ij")).reshape(
np.hstack([np.flip(size_array, -1), 3])
),
-1,
)
return table
@required("Scikit-Learn")
def invert(self, **kwargs: Any) -> LUT3D:
"""
Computes and returns an inverse copy of the *LUT*.
Other Parameters
----------------
extrapolate
Whether to extrapolate the *LUT* when computing its inverse.
Extrapolation is performed by reflecting the *LUT* cube along its 8
faces. Note that the domain is extended beyond [0, 1], thus the
*LUT* might not be handled properly in other software.
interpolator
Interpolator class type or object to use as interpolating function.
query_size
Number of points to query in the KDTree, their mean is computed,
resulting in a smoother result.
size
Size of the inverse *LUT*. With the given implementation, it is
good practise to double the size of the inverse *LUT* to provide a
smoother result. If ``size`` is not given,
:math:`2^{\\sqrt{size_{LUT}} + 1} + 1` will be used instead.
Returns
-------
:class:`colour.LUT3D`
Inverse *LUT* class instance.
Examples
--------
>>> LUT = LUT3D()
>>> print(LUT)
LUT3D - Unity 33
----------------
<BLANKLINE>
Dimensions : 3
Domain : [[ 0. 0. 0.]
[ 1. 1. 1.]]
Size : (33, 33, 33, 3)
>>> print(LUT.invert())
LUT3D - Unity 33 - Inverse
--------------------------
<BLANKLINE>
Dimensions : 3
Domain : [[ 0. 0. 0.]
[ 1. 1. 1.]]
Size : (108, 108, 108, 3)
"""
# TODO: Drop "sklearn" requirement whenever "Scipy" 1.7 can be
# defined as the minimal version.
from sklearn.neighbors import KDTree
interpolator = kwargs.get(
"interpolator", table_interpolation_trilinear
)
extrapolate = kwargs.get("extrapolate", False)
query_size = kwargs.get("query_size", 3)
LUT = self.copy()
source_size = LUT.size
target_size = kwargs.get(
"size", (as_int(2 ** (np.sqrt(source_size) + 1) + 1))
)
if target_size > 129:
usage_warning("LUT3D inverse computation time could be excessive!")
if extrapolate:
LUT.table = np.pad(
LUT.table,
[(1, 1), (1, 1), (1, 1), (0, 0)],
"reflect",
reflect_type="odd",
)
LUT.domain[0] -= 1 / (source_size - 1)
LUT.domain[1] += 1 / (source_size - 1)
# "LUT_t" is an intermediate LUT with a size equal to that of the
# final inverse LUT which is usually larger than the input LUT.
# The intent is to smooth the inverse LUT's table by increasing the
# resolution of the KDTree.
LUT_t = LUT3D(size=target_size, domain=LUT.domain)
table = LUT_t.table.reshape(-1, 3)
LUT_t.table = LUT.apply(LUT_t.table, interpolator=interpolator)
tree = KDTree(LUT_t.table.reshape(-1, 3))
# "LUT_q" stores the indexes of the KDTree query, i.e. the closest
# entry of "LUT_t" for any searched table sample.
LUT_q = LUT3D(size=target_size, domain=LUT.domain)
query = tree.query(table, query_size)[-1]
if query_size == 1:
LUT_q.table = table[query].reshape(
[target_size, target_size, target_size, 3]
)
else:
LUT_q.table = np.mean(table[query], axis=-2).reshape(
[target_size, target_size, target_size, 3]
)
# "LUT_i" is the final inverse LUT generated by applying "LUT_q" on
# an identity LUT at the target size.
LUT_i = LUT3D(size=target_size, domain=LUT.domain)
LUT_i.table = LUT_q.apply(LUT_i.table, interpolator=interpolator)
LUT_i.name = f"{self.name} - Inverse"
return LUT_i
def apply(self, RGB: ArrayLike, **kwargs: Any) -> NDArray:
"""
Applies the *LUT* to given *RGB* colourspace array using given method.
Parameters
----------
RGB
*RGB* colourspace array to apply the *LUT* onto.
Other Parameters
----------------
direction
Whether the *LUT* should be applied in the forward or inverse
direction.
extrapolate
Whether to extrapolate the *LUT* when computing its inverse.
Extrapolation is performed by reflecting the *LUT* cube along its 8
faces.
interpolator
Interpolator object to use as interpolating function.
interpolator_kwargs
Arguments to use when calling the interpolating function.
query_size
Number of points to query in the KDTree, their mean is computed,
resulting in a smoother result.
size
Size of the inverse *LUT*. With the given implementation, it is
good practise to double the size of the inverse *LUT* to provide a
smoother result. If ``size`` is not given,
:math:`2^{\\sqrt{size_{LUT}} + 1} + 1` will be used instead.
Returns
-------
:class:`numpy.ndarray`
Interpolated *RGB* colourspace array.
Examples
--------
>>> LUT = LUT3D(LUT3D.linear_table() ** (1 / 2.2))
>>> RGB = np.array([0.18, 0.18, 0.18])
>>> LUT.apply(RGB) # doctest: +ELLIPSIS
array([ 0.4583277..., 0.4583277..., 0.4583277...])
>>> LUT.apply(LUT.apply(RGB), direction='Inverse')
... # doctest: +ELLIPSIS
array([ 0.1781995..., 0.1809414..., 0.1809513...])
>>> from colour.algebra import spow
>>> domain = np.array([[-0.1, -0.2, -0.4],
... [0.3, 1.4, 6.0],
... [0.7, 3.0, np.nan],
... [1.1, np.nan, np.nan],
... [1.5, np.nan, np.nan]])
>>> table = spow(LUT3D.linear_table(domain=domain), 1 / 2.2)
>>> LUT = LUT3D(table, domain=domain)
>>> RGB = np.array([0.18, 0.18, 0.18])
>>> LUT.apply(RGB) # doctest: +ELLIPSIS
array([ 0.2996370..., -0.0901332..., -0.3949770...])
"""
direction = validate_method(
kwargs.get("direction", "Forward"), ["Forward", "Inverse"]
)
interpolator = kwargs.get(
"interpolator", table_interpolation_trilinear
)
interpolator_kwargs = kwargs.get("interpolator_kwargs", {})
R, G, B = tsplit(RGB)
settings = {"interpolator": interpolator}
settings.update(**kwargs)
LUT = self.invert(**settings) if direction == "inverse" else self
if LUT.is_domain_explicit():
domain_min = LUT.domain[0, ...]
domain_max = [
axes[: (~np.isnan(axes)).cumsum().argmax() + 1][-1]
for axes in np.transpose(LUT.domain)
]
usage_warning(
f'"LUT" was defined with an explicit domain but requires an '
f"implicit domain to be applied. The following domain will be "
f"used: {np.vstack([domain_min, domain_max])}"
)
else:
domain_min, domain_max = LUT.domain
RGB_l = [
linear_conversion(j, (domain_min[i], domain_max[i]), (0, 1))
for i, j in enumerate((R, G, B))
]
RGB_i = interpolator(tstack(RGB_l), LUT.table, **interpolator_kwargs)
return RGB_i
def as_LUT(
self,
cls: Type[AbstractLUT],
force_conversion: Boolean = False,
**kwargs: Any,
) -> AbstractLUT:
"""
Converts the *LUT* to given ``cls`` class instance.
Parameters
----------
cls
*LUT* class instance.
force_conversion
Whether to force the conversion as it might be destructive.
Other Parameters
----------------
interpolator
Interpolator class type to use as interpolating function.
interpolator_kwargs
Arguments to use when instantiating the interpolating function.
size
Expected table size in case of a downcast from a :class:`LUT3D`
class instance.
Returns
-------
:class:`colour.LUT1D` or :class:`colour.LUT3x1D` or \
:class:`colour.LUT3D`
Converted *LUT* class instance.
Warnings
--------
Some conversions are destructive and raise a :class:`ValueError`
exception by default.
Raises
------
ValueError
If the conversion is destructive.
Examples
--------
>>> LUT = LUT3D()
>>> print(LUT.as_LUT(LUT1D, force_conversion=True))
LUT1D - Unity 33 - Converted 3D to 1D
-------------------------------------
<BLANKLINE>
Dimensions : 1
Domain : [ 0. 1.]
Size : (10,)
>>> print(LUT.as_LUT(LUT3x1D, force_conversion=True))
LUT3x1D - Unity 33 - Converted 3D to 3x1D
-----------------------------------------
<BLANKLINE>
Dimensions : 2
Domain : [[ 0. 0. 0.]
[ 1. 1. 1.]]
Size : (10, 3)
>>> print(LUT.as_LUT(LUT3D))
LUT3D - Unity 33 - Converted 3D to 3D
-------------------------------------
<BLANKLINE>
Dimensions : 3
Domain : [[ 0. 0. 0.]
[ 1. 1. 1.]]
Size : (33, 33, 33, 3)
"""
return LUT_to_LUT(self, cls, force_conversion, **kwargs)
def LUT_to_LUT(
LUT,
cls: Type[AbstractLUT],
force_conversion: Boolean = False,
**kwargs: Any,
) -> AbstractLUT:
"""
Converts given *LUT* to given ``cls`` class instance.
Parameters
----------
cls
*LUT* class instance.
force_conversion
Whether to force the conversion if destructive.
Other Parameters
----------------
channel_weights
Channel weights in case of a downcast from a :class:`LUT3x1D` or
:class:`LUT3D` class instance.
interpolator
Interpolator class type to use as interpolating function.
interpolator_kwargs
Arguments to use when instantiating the interpolating function.
size
Expected table size in case of an upcast to or a downcast from a
:class:`LUT3D` class instance.
Returns
-------
:class:`colour.LUT1D` or :class:`colour.LUT3x1D` or :class:`colour.LUT3D`
Converted *LUT* class instance.
Warnings
--------
Some conversions are destructive and raise a :class:`ValueError` exception
by default.
Raises
------
ValueError
If the conversion is destructive.
Examples
--------
>>> print(LUT_to_LUT(LUT1D(), LUT3D, force_conversion=True))
LUT3D - Unity 10 - Converted 1D to 3D
-------------------------------------
<BLANKLINE>
Dimensions : 3
Domain : [[ 0. 0. 0.]
[ 1. 1. 1.]]
Size : (33, 33, 33, 3)
>>> print(LUT_to_LUT(LUT3x1D(), LUT1D, force_conversion=True))
LUT1D - Unity 10 - Converted 3x1D to 1D
---------------------------------------
<BLANKLINE>
Dimensions : 1
Domain : [ 0. 1.]
Size : (10,)
>>> print(LUT_to_LUT(LUT3D(), LUT1D, force_conversion=True))
LUT1D - Unity 33 - Converted 3D to 1D
-------------------------------------
<BLANKLINE>
Dimensions : 1
Domain : [ 0. 1.]
Size : (10,)
"""
ranks = {LUT1D: 1, LUT3x1D: 2, LUT3D: 3}
path = (ranks[LUT.__class__], ranks[cls])
path_verbose = [
f"{element}D" if element != 2 else "3x1D" for element in path
]
if path in ((1, 3), (2, 1), (2, 3), (3, 1), (3, 2)):
if not force_conversion:
raise ValueError(
'Conversion of a "LUT" {} to a "LUT" {} is destructive, '
'please use the "force_conversion" argument to proceed.'.format(
*path_verbose
)
)
suffix = " - Converted {} to {}".format(*path_verbose)
name = f"{LUT.name}{suffix}"
# Same dimension conversion, returning a copy.
if len(set(path)) == 1:
LUT = LUT.copy()
LUT.name = name
else:
size = kwargs.get("size", 33 if cls is LUT3D else 10)
if "size" in kwargs:
del kwargs["size"]
channel_weights = as_float_array(
kwargs.get("channel_weights", full(3, 1 / 3))
)
if "channel_weights" in kwargs:
del kwargs["channel_weights"]
if isinstance(LUT, LUT1D):
if cls is LUT3x1D:
domain = tstack([LUT.domain, LUT.domain, LUT.domain])
table = tstack([LUT.table, LUT.table, LUT.table])
elif cls is LUT3D:
domain = tstack([LUT.domain, LUT.domain, LUT.domain])
table = LUT3D.linear_table(size, domain)
table = LUT.apply(table, **kwargs)
elif isinstance(LUT, LUT3x1D):
if cls is LUT1D:
domain = np.sum(LUT.domain * channel_weights, axis=-1)
table = np.sum(LUT.table * channel_weights, axis=-1)
elif cls is LUT3D:
domain = LUT.domain
table = LUT3D.linear_table(size, domain)
table = LUT.apply(table, **kwargs)
elif isinstance(LUT, LUT3D):
if cls is LUT1D:
domain = np.sum(LUT.domain * channel_weights, axis=-1)
table = LUT1D.linear_table(size, domain)
table = LUT.apply(tstack([table, table, table]), **kwargs)
table = np.sum(table * channel_weights, axis=-1)
elif cls is LUT3x1D:
domain = LUT.domain
table = LUT3x1D.linear_table(size, domain)
table = LUT.apply(table, **kwargs)
LUT = cls(
table=table,
name=name,
domain=domain,
size=table.shape[0],
comments=LUT.comments,
)
return LUT
|
py | 1a39ce5fad86283da39c952ca9f12a4d3d21a64f | # lib.plugcache - Reads plugin information into a re-usable cache
#
# Copyright (c) 2016 ColoradoFourWheeler / ext
#
import indigo
import logging
import ext
import dtutil
import ui
#import actionslib
from os import listdir
import os.path
from os.path import isfile, join
import glob
#from xml.dom import minidom
import xml.dom.minidom
import plistlib
import string
import os
validDeviceTypes = ["dimmer", "relay", "sensor", "speedcontrol", "thermostat", "sprinkler", "custom"]
fieldTypeTemplates = {
# Key is node <Field> type attribute, value is template file name.
u"serialport": u"_configUiField_serialPort.xml"
}
class plugfilter:
#
# Init
#
def __init__(self):
self.getDevices = True
self.getStates = True
self.getFields = True
self.getActions = True
self.showHiddenFields = False
self.pluginFilter = ""
self.excludeFilter = []
class plugcache:
pluginCache = indigo.Dict()
#
# Init
#
def __init__(self, factory, refreshtime = 24, filter = None):
self.logger = logging.getLogger ("Plugin.plugincache")
self.factory = factory
self.refreshtime = refreshtime
self.filter = filter
if filter is None: self.filter = plugfilter()
self.refresh()
################################################################################
# METHODS
################################################################################
#
# Get a list of fields suitable for a list or menu UI field
#
def getFieldUIList (self, obj):
ret = []
try:
data = self._resolveObject (obj)
if len(data[0]) == 0: return ret
plugInfo = data[0]
deviceTypeId = data[1]
if "xml" in plugInfo == False: return ret
if "devices" in plugInfo["xml"] == False: return ret
for id, info in plugInfo["xml"]["devices"].iteritems():
if id == deviceTypeId:
if len(info["ConfigUI"]) > 0:
for idx, configUI in info["ConfigUI"].iteritems():
for field in configUI:
if field["hidden"]: continue
if field["type"] == "separator":
option = ("-line-", self.factory.ui.getSeparator())
ret.append(option)
elif field["type"] == "label":
continue
else:
label = ""
if field["Label"].strip() != "":
label = field["Label"]
else:
label = field["Description"]
if label == "": continue
label = label.strip()
option = (field["id"], label.replace(":", ""))
ret.append (option)
except Exception as e:
self.logger.error (ext.getException(e))
return self._cleanReturnList (ret)
#
# Get a list of states suitable for a list or menu UI field
#
def getStateUIList (self, obj):
ret = []
try:
data = self._resolveObject (obj)
if len(data[0]) == 0: return ret
plugInfo = data[0]
deviceTypeId = data[1]
if "xml" in plugInfo == False: return ret
if "devices" in plugInfo["xml"] == False: return ret
ret = self._getStateUIList (obj, plugInfo, deviceTypeId)
except Exception as e:
self.logger.error (ext.getException(e))
return self._cleanReturnList (ret)
#
# Run the state list builder from getStateUIList
#
def _getStateUIList (self, obj, plugInfo, deviceTypeId):
ret = []
statesfound = []
try:
for id, info in plugInfo["xml"]["devices"].iteritems():
if id == deviceTypeId:
for state in info["States"]:
if state["Type"] == 0:
option = ("-line-", self.factory.ui.getSeparator())
ret.append(option)
else:
option = (state["Key"], state["StateLabel"])
ret.append (option)
# Add Indigo built-in device states
retIndigo = self.factory.ui.getBuiltInStates (obj)
if len(retIndigo) > 0:
option = ("-line-", self.factory.ui.getSeparator())
retIndigo.append(option)
ret = retIndigo + ret
# Compare actual object states to the states found to pick up stragglers
retadded = []
for state, statevalue in obj.states.iteritems():
isFound = False
for opt in ret:
if opt[0] == state:
isFound = True
continue
if isFound: continue
if len(state) > 4:
if state[-3:] == ".ui": continue # don't confuse the poor user, plugins can decide to use the .ui version if needed
option = (state, self.factory.ui.resolveStateNameToString(state))
retadded.append(option)
if len(ret) > 0 and len(retadded) > 0:
option = ("-line-", self.factory.ui.getSeparator())
ret.append(option)
ret += retadded
except Exception as e:
self.logger.error (ext.getException(e))
return ret
#
# Get a list of fields suitable for a list or menu UI field
#
def getActions (self, obj):
ret = {}
try:
data = self._resolveObject (obj)
if len(data[0]) == 0: return ret
plugInfo = data[0]
deviceTypeId = data[1]
if "xml" in plugInfo == False: return ret
# For some reason the line below doesn't return false properly, using IF ELSE instead
#if "actions" in plugInfo["xml"] == False: return ret
if "actions" in plugInfo["xml"]:
pass
else:
return ret
for id, action in plugInfo["xml"]["actions"].iteritems():
isOk = True
if "DeviceFilter" in action:
isOk = self._isForDevice (plugInfo, deviceTypeId, action["DeviceFilter"])
if isOk:
if deviceTypeId[0:7] == "indigo.":
ret["indigo_" + id] = action
else:
ret["plugin_" + id] = action
# Add Indigo actions as long as this was not already done above
if deviceTypeId[0:7] != "indigo.":
data = self._resolveIndigoDevice (obj)
for id, action in data[0]["xml"]["actions"].iteritems():
isOk = True
if "DeviceFilter" in action:
isOk = self._isForDevice (data[0], data[1], action["DeviceFilter"])
if isOk:
ret["indigo_" + id] = action
except Exception as e:
self.logger.error (ext.getException(e))
return ret
#
# Get a list of variable actions suitable for a list or menu UI field
#
def getVariableActionUIList (self, showUIConfig = False):
ret = []
try:
plugInfo = self.pluginCache["Indigo"]
deviceTypeId = "indigo.variable"
if "xml" in plugInfo == False: return ret
if "actions" in plugInfo["xml"] == False: return ret
ret = self._getActionUIList (plugInfo, deviceTypeId, showUIConfig, "indigo_")
except Exception as e:
self.logger.error (ext.getException(e))
return self._cleanReturnList (ret)
#
# Get a list of sesrver actions suitable for a list or menu UI field
#
def getServerActionUIList (self, showUIConfig = False):
ret = []
try:
plugInfo = self.pluginCache["Indigo"]
deviceTypeId = "indigo.server"
if "xml" in plugInfo == False: return ret
if "actions" in plugInfo["xml"] == False: return ret
ret = self._getActionUIList (plugInfo, deviceTypeId, showUIConfig, "indigo_")
except Exception as e:
self.logger.error (ext.getException(e))
return self._cleanReturnList (ret)
#
# Get a list of actions suitable for a list or menu UI field
#
def getActionUIList (self, obj, showUIConfig = False):
ret = []
try:
data = self._resolveObject (obj)
if len(data[0]) == 0: return ret
plugInfo = data[0]
deviceTypeId = data[1]
if "xml" in plugInfo == False: return ret
#if "actions" in plugInfo["xml"] == False: return ret
if "actions" in plugInfo["xml"]:
pass
else:
return ret
if deviceTypeId[0:7] == "indigo.":
ret = self._getActionUIList (plugInfo, deviceTypeId, showUIConfig, "indigo_")
else:
ret = self._getActionUIList (plugInfo, deviceTypeId, showUIConfig)
# Add Indigo actions as long as this was not already done above
if deviceTypeId[0:7] != "indigo.":
data = self._resolveIndigoDevice (obj)
retEx = self._getActionUIList (data[0], data[1], showUIConfig, "indigo_")
retEx.append (("-line-", self.factory.ui.getSeparator()))
ret = retEx + ret
except Exception as e:
self.logger.error (ext.getException(e))
return self._cleanReturnList (ret)
#
# Run the action list builder from getActionUIList
#
def _getActionUIList (self, plugInfo, deviceTypeId, showUIConfig, prefix = "plugin_"):
ret = []
try:
# Run through every device action and add a placeholder, we'll clean up after
for id, action in plugInfo["xml"]["actions"].iteritems():
ret.append ("")
for id, action in plugInfo["xml"]["actions"].iteritems():
isOk = True
if "DeviceFilter" in action:
isOk = self._isForDevice (plugInfo, deviceTypeId, action["DeviceFilter"])
if "ConfigUI" in action:
if showUIConfig == False and len(action["ConfigUI"]) > 0: isOk = False
if isOk:
if action["Name"] == " - ":
option = ("-line-", self.factory.ui.getSeparator())
ret[action["SortOrder"]] = option
else:
option = (prefix + id, action["Name"])
ret[action["SortOrder"]] = option
except Exception as e:
self.logger.error (ext.getException(e))
return ret
#
# Clean up a return list
#
def _cleanReturnList (self, dirtyList):
ret = []
try:
lastRetItem = ""
for i in range (0, len(dirtyList)):
try:
if lastRetItem != "":
if lastRetItem == dirtyList[i]: continue # don't add successive duplicates (mostly lines)
if dirtyList[i] != "": lastRetItem = dirtyList[i]
if dirtyList[i] is not None and dirtyList[i] != "": ret.append(dirtyList[i])
except:
continue
if len(ret) > 0:
# Make sure we don't start on a line
if ret[0] == ("-line-", self.factory.ui.getSeparator()):
del ret[0]
# Make sure we don't end on a line
if len(ret) > 0 and ret[len(ret) - 1] == ("-line-", self.factory.ui.getSeparator()):
del ret[len(ret) - 1]
return ret
except Exception as e:
self.logger.error (ext.getException(e))
return dirtyList
#
# Compare filter string to device type
#
def _isForDevice (self, plugInfo, deviceTypeId, filter):
try:
if self._deviceMatchesFilter (plugInfo, deviceTypeId, filter): return True
filters = filter.split(",")
for f in filters:
if self._deviceMatchesFilter (plugInfo, deviceTypeId, f): return True
except Exception as e:
self.logger.error (ext.getException(e))
return False
#
# Check if a device type matches a filter
#
def _deviceMatchesFilter (self, plugInfo, deviceTypeId, filter):
try:
#self.logger.threaddebug ("Checking if filter '{0}' matches device type of '{1}'".format(filter, deviceTypeId))
if filter == "": return True # Global
filter = filter.strip()
if filter == "self": return True # Global
if filter == "self." + deviceTypeId: return True # Direct
if filter == deviceTypeId: return True # Direct
if filter == plugInfo["id"]: return True # Global
if filter == plugInfo["id"] + "." + deviceTypeId: return True # Direct
except Exception as e:
self.logger.error (ext.getException(e))
#self.logger.threaddebug ("Filter '{0}' does not match device type of '{1}'".format(filter, deviceTypeId))
return False
#
# Resolve an object to it's local plugin details
#
def _resolveObject (self, obj):
try:
plugInfo = None
deviceTypeId = ""
if type(obj) is str:
self.logger.threaddebug ("Object is typed as '{0}'".format(unicode(type(obj))))
else:
self.logger.threaddebug ("Object '{0}' is typed as '{1}'".format(obj.name, unicode(type(obj))))
if type(obj) is indigo.Variable:
return self._resolveIndigoDevice (obj)
elif type(obj) is indigo.Schedule:
X = 1
elif type(obj) is indigo.Trigger:
X = 1
elif type(obj) is indigo.ActionGroup:
X = 1
elif type(obj) is str:
if obj == "server":
plugInfo = self.pluginCache["Indigo"]
deviceTypeId = "indigo.server"
else:
# It's a device
if obj.pluginId != "" and obj.pluginId in self.pluginCache:
plugInfo = self.pluginCache[obj.pluginId]
deviceTypeId = obj.deviceTypeId
else:
# It's an indigo built in device
return self._resolveIndigoDevice (obj)
return (plugInfo, deviceTypeId)
except Exception as e:
self.logger.error (ext.getException(e))
return ({}, "")
#
# Return Indigo device info
#
def _resolveIndigoDevice (self, obj):
try:
plugInfo = None
deviceTypeId = ""
plugInfo = self.pluginCache["Indigo"] # It's Indigo
if type(obj) is indigo.RelayDevice: deviceTypeId = "indigo.relay"
if type(obj) is indigo.DimmerDevice: deviceTypeId = "indigo.dimmer"
if type(obj) is indigo.indigo.MultiIODevice: deviceTypeId = "indigo.iodevice"
if type(obj) is indigo.SensorDevice: deviceTypeId = "indigo.sensor"
if type(obj) is indigo.SpeedControlDevice: deviceTypeId = "indigo.speedcontrol"
if type(obj) is indigo.SprinklerDevice: deviceTypeId = "indigo.sprinkler"
if type(obj) is indigo.ThermostatDevice: deviceTypeId = "indigo.thermostat"
if type(obj) is indigo.Variable: deviceTypeId = "indigo.variable"
return (plugInfo, deviceTypeId)
except Exception as e:
self.logger.error (ext.getException(e))
return ({}, "")
################################################################################
# PARSER
################################################################################
#
# Read in all plugin information and store
#
def refresh (self):
try:
self.lastUpdate = indigo.server.getTime()
#self.plugins = self.pluglist()
#self.addIndigoActions ()
self.logger.debug ("Refreshing plugin information")
base = indigo.server.getInstallFolderPath() + "/Plugins"
plugins = glob.glob(base + "/*.indigoPlugin")
for plugin in plugins:
plugInfo = self._parsePlist (plugin)
#if plugInfo["id"] != "com.eps.indigoplugin.dev-template": continue
pluginXML = indigo.Dict()
# If it's this plugin then parse in the Indigo built-in commands
if plugInfo["id"] == self.factory.plugin.pluginId:
plugInfoEx = indigo.Dict()
plugInfoEx["id"] = "Indigo"
plugInfoEx["name"] = "Indigo Built-In Commands"
plugInfoEx["path"] = ""
if os.path.isfile(plugin + "/Contents/Server Plugin/lib/actionslib.xml"):
pluginXML["actions"] = self._parseActionsXML(plugin + "/Contents/Server Plugin/lib/actionslib.xml")
pluginXML["devices"] = indigo.Dict() # Placeholder
plugInfoEx["xml"] = pluginXML
self.pluginCache["Indigo"] = plugInfoEx
#indigo.server.log(unicode(plugInfoEx))
if os.path.isfile(plugin + "/Contents/Server Plugin/Devices.xml"):
pluginXML["devices"] = self._parseDevicesXML(plugin + "/Contents/Server Plugin/Devices.xml")
if os.path.isfile(plugin + "/Contents/Server Plugin/Actions.xml"):
pluginXML["actions"] = self._parseActionsXML(plugin + "/Contents/Server Plugin/Actions.xml")
plugInfo["xml"] = pluginXML
self.pluginCache[plugInfo["id"]] = plugInfo
#self._parseDevicesXML(kDevicesFilename)
#self._parseEventsXML(kEventsFilename)
#self._parseActionsXML(kActionsFilename)
X = 1
except Exception as e:
raise
self.logger.error (ext.getException(e))
#
# Parse plist line data (pretty low brow but since plist breaks standard XML reads it works for now)
#
def _parsePlist (self, path):
plugDict = indigo.Dict()
plugDict["path"] = path
try:
plist = open(path + "/Contents/Info.plist")
nameIdx = 0
name = ""
idIdx = 0
id = ""
for line in plist:
if nameIdx == 1:
name = line
nameIdx = 0
continue
if idIdx == 1:
id = line
idIdx = 0
continue
x = string.find (line, 'CFBundleDisplayName')
if x > -1: nameIdx = 1
x = string.find (line, 'CFBundleIdentifier')
if x > -1: idIdx = 1
#indigo.server.log (name + "\t" + id)
x = string.find (name, "<string>")
y = string.find (name, "</string>")
name = name[x + 8:y]
x = string.find (id, "<string>")
y = string.find (id, "</string>")
id = id[x + 8:y]
#return self.plugRecord (path, id, name)
plugDict["id"] = id
plugDict["name"] = name
except Exception as e:
self.logger.error (ext.getException(e))
#return self.plugRecord (path, "Unknown", "Unknown")
return plugDict
################################################################################
def _getChildElementsByTagName(self, elem, tagName):
childList = []
for child in elem.childNodes:
if child.nodeType == child.ELEMENT_NODE and (tagName == u"*" or child.tagName == tagName):
childList.append(child)
return childList
def _getXmlFromFile(self, filename):
if not os.path.isfile(filename):
return u""
xml_file = file(filename, 'r')
xml_data = xml_file.read()
xml_file.close()
return xml_data
def _getXmlFromTemplate(self, templateName):
filename = indigo.host.resourcesFolderPath + '/templates/' + templateName
return self._getXmlFromFile(filename)
def _getElementAttribute(self, elem, attrName, required=True, default=None, errorIfNotAscii=True, filename=u"unknown"):
attrStr = elem.getAttribute(attrName)
if attrStr is None or len(attrStr) == 0:
if required:
raise ValueError(u"required XML attribute '%s' is missing or empty in file %s" % (attrName,filename))
return default
elif errorIfNotAscii and attrStr[0] not in string.ascii_letters:
raise ValueError(u"XML attribute '%s' in file %s has a value that starts with invalid characters: '%s' (should begin with A-Z or a-z):\n%s" % (attrName,filename,attrStr,elem.toprettyxml()))
return attrStr
def _getElementValueByTagName(self, elem, tagName, required=True, default=None, filename=u"unknown"):
valueElemList = self._getChildElementsByTagName(elem, tagName)
if len(valueElemList) == 0:
if required:
raise ValueError(u"required XML element <%s> is missing in file %s" % (tagName,filename))
return default
elif len(valueElemList) > 1:
raise ValueError(u"found more than one XML element <%s> (should only be one) in file %s" % (tagName,filename))
valueStr = valueElemList[0].firstChild.data
if valueStr is None or len(valueStr) == 0:
if required:
raise ValueError(u"required XML element <%s> is empty in file %s" % (tagName,filename))
return default
return valueStr
################################################################################
def _parseMenuItemsXML(self, filename):
if not os.path.isfile(filename):
return
try:
dom = xml.dom.minidom.parseString(self._getXmlFromFile(filename))
except:
raise
raise LookupError(u"%s is malformed" % (filename))
menuItemsElem = self._getChildElementsByTagName(dom, u"MenuItems")
if len(menuItemsElem) != 1:
raise LookupError(u"Incorrect number of <MenuItems> elements found in file %s" % (filename))
menuItems = self._getChildElementsByTagName(menuItemsElem[0], u"MenuItem")
for menu in menuItems:
menuDict = indigo.Dict()
menuId = self._getElementAttribute(menu, u"id", filename=filename)
if menuId in self.menuItemsDict:
raise LookupError(u"Duplicate menu id (%s) found in file %s" % (menuId, filename))
menuDict[u"Id"] = menuId
menuDict[u"Name"] = self._getElementValueByTagName(menu, u"Name", False, filename=filename)
if "Name" in menuDict:
menuDict[u"ButtonTitle"] = self._getElementValueByTagName(menu, u"ButtonTitle", False, filename=filename)
# Plugin should specify at least a CallbackMethod or ConfigUIRawXml (possibly both)
menuDict[u"CallbackMethod"] = self._getElementValueByTagName(menu, u"CallbackMethod", False, filename=filename)
configUIList = self._getChildElementsByTagName(menu, u"ConfigUI")
if len(configUIList) > 0:
#menuDict[u"ConfigUIRawXml"] = self._parseConfigUINode(dom, configUIList[0], filename=filename).toxml()
menuDict[u"ConfigUI"] = self._parseConfigUINode (dom, configUIList[0])
else:
if not "CallbackMethod" in menuDict:
raise ValueError(u"<MenuItem> elements must contain either a <CallbackMethod> and/or a <ConfigUI> element")
self.menuItemsList.append(menuDict)
self.menuItemsDict[menuId] = menuDict
###################
def _getDeviceStateDictForType(self, type, stateId, triggerLabel, controlPageLabel, disabled=False):
stateDict = indigo.Dict()
stateDict[u"Type"] = int(type)
stateDict[u"Key"] = stateId
stateDict[u"Disabled"] = disabled
stateDict[u"TriggerLabel"] = triggerLabel
stateDict[u"StateLabel"] = controlPageLabel
return stateDict
def getDeviceStateDictForSeparator(self, stateId):
return self._getDeviceStateDictForType(indigo.kTriggerKeyType.Label, stateId, u"_Separator", u"_Separator", True)
def getDeviceStateDictForSeperator(self, stateId):
return self.getDeviceStateDictForSeparator(stateId)
def getDeviceStateDictForNumberType(self, stateId, triggerLabel, controlPageLabel, disabled=False):
return self._getDeviceStateDictForType(indigo.kTriggerKeyType.Number, stateId, triggerLabel, controlPageLabel, disabled)
def getDeviceStateDictForStringType(self, stateId, triggerLabel, controlPageLabel, disabled=False):
return self._getDeviceStateDictForType(indigo.kTriggerKeyType.String, stateId, triggerLabel, controlPageLabel, disabled)
def getDeviceStateDictForEnumType(self, stateId, triggerLabel, controlPageLabel, disabled=False):
return self._getDeviceStateDictForType(indigo.kTriggerKeyType.Enumeration, stateId, triggerLabel, controlPageLabel, disabled)
def getDeviceStateDictForBoolOnOffType(self, stateId, triggerLabel, controlPageLabel, disabled=False):
stateDict = self._getDeviceStateDictForType(indigo.kTriggerKeyType.BoolOnOff, stateId, triggerLabel, controlPageLabel, disabled)
stateDict[u"StateLabel"] = stateDict[u"StateLabel"] + u" (on or off)"
return stateDict
def getDeviceStateDictForBoolYesNoType(self, stateId, triggerLabel, controlPageLabel, disabled=False):
stateDict = self._getDeviceStateDictForType(indigo.kTriggerKeyType.BoolYesNo, stateId, triggerLabel, controlPageLabel, disabled)
stateDict[u"StateLabel"] = stateDict[u"StateLabel"] + u" (yes or no)"
return stateDict
def getDeviceStateDictForBoolOneZeroType(self, stateId, triggerLabel, controlPageLabel, disabled=False):
stateDict = self._getDeviceStateDictForType(indigo.kTriggerKeyType.BoolOneZero, stateId, triggerLabel, controlPageLabel, disabled)
stateDict[u"StateLabel"] = stateDict[u"StateLabel"] + u" (1 or 0)"
return stateDict
def getDeviceStateDictForBoolTrueFalseType(self, stateId, triggerLabel, controlPageLabel, disabled=False):
stateDict = self._getDeviceStateDictForType(indigo.kTriggerKeyType.BoolTrueFalse, stateId, triggerLabel, controlPageLabel, disabled)
stateDict[u"StateLabel"] = stateDict[u"StateLabel"] + u" (true or false)"
return stateDict
def _parseActionsXML(self, filename):
ret = indigo.Dict()
if not os.path.isfile(filename):
return
try:
dom = xml.dom.minidom.parseString(self._getXmlFromFile(filename))
except:
raise LookupError(u"%s is malformed" % (filename))
actionsElement = self._getChildElementsByTagName(dom, u"Actions")
if len(actionsElement) != 1:
raise LookupError(u"Incorrect number of <Actions> elements found in file %s" % (filename))
sortIndex = 0
actionElemList = self._getChildElementsByTagName(actionsElement[0], u"Action")
for action in actionElemList:
serverVers = self._getElementAttribute(action, u"_minServerVers", required=False, errorIfNotAscii=False, filename=filename)
if serverVers is not None and not PluginBase.serverVersCompatWith(serverVers):
continue # This version of Indigo Server isn't compatible with this object (skip it)
actionDict = indigo.Dict()
actionTypeId = self._getElementAttribute(action, u"id", filename=filename)
try:
actionDict[u"DeviceFilter"] = self._getElementAttribute(action, u"deviceFilter", False, u"", filename=filename)
actionDict[u"Name"] = self._getElementValueByTagName(action, u"Name", filename=filename)
actionDict[u"CallbackMethod"] = self._getElementValueByTagName(action, u"CallbackMethod", filename=filename)
except ValueError:
# It's missing <Name> or <CallbackMethod> so treat it as a separator
actionDict[u"Name"] = u" - "
actionDict[u"CallbackMethod"] = u""
#actionDict[u"DeviceFilter"] = u""
actionDict[u"UiPath"] = self._getElementAttribute(action, u"uiPath", required=False, filename=filename)
actionDict[u"PrivateUiPath"] = self._getElementAttribute(action, u"privateUiPath", required=False, filename=filename)
actionDict[u"SortOrder"] = sortIndex
sortIndex += 1
configUIList = self._getChildElementsByTagName(action, u"ConfigUI")
if len(configUIList) > 0:
#actionDict[u"ConfigUIRawXml"] = self._parseConfigUINode(dom, configUIList[0], filename=filename).toxml()
actionDict[u"ConfigUI"] = self._parseConfigUINode(dom, configUIList[0], filename=filename)
#self.actionsTypeDict[actionTypeId] = actionDict
ret[actionTypeId] = actionDict
return ret
def _parseDevicesXML(self, filename):
ret = indigo.Dict()
if not os.path.isfile(filename):
return
try:
dom = xml.dom.minidom.parseString(self._getXmlFromFile(filename))
except Exception, e:
self.logger.error(u"%s has an error: %s" % (filename, unicode(e)))
raise LookupError(u"%s is malformed" % (filename))
# Now get all devices from the <Devices> element
devicesElement = self._getChildElementsByTagName(dom, u"Devices")
if len(devicesElement) != 1:
raise LookupError(u"Incorrect number of <Devices> elements found in file %s" % (filename))
# Look for a DeviceFactory element - that will be used to create devices
# rather than creating them directly using the <Device> XML. This allows
# a plugin to discover device types rather than forcing the user to select
# the type up-front (like how INSTEON devices are added).
deviceFactoryElements = self._getChildElementsByTagName(devicesElement[0], u"DeviceFactory")
if len(deviceFactoryElements) > 1:
raise LookupError(u"Incorrect number of <DeviceFactory> elements found in file %s" % (filename))
elif len(deviceFactoryElements) == 1:
deviceFactory = deviceFactoryElements[0]
elems = self._getChildElementsByTagName(deviceFactory, u"Name")
if len(elems) != 1:
raise LookupError(u"<DeviceFactory> element must contain exactly one <Name> element in file %s" % (filename))
elems = self._getChildElementsByTagName(deviceFactory, u"ButtonTitle")
if len(elems) != 1:
raise LookupError(u"<DeviceFactory> element must contain exactly one <ButtonTitle> element in file %s" % (filename))
elems = self._getChildElementsByTagName(deviceFactory, u"ConfigUI")
if len(elems) != 1:
raise LookupError(u"<DeviceFactory> element must contain exactly one <ConfigUI> element in file %s" % (filename))
self.deviceFactoryXml = deviceFactory.toxml()
else:
self.deviceFactoryXml = None
sortIndex = 0
deviceElemList = self._getChildElementsByTagName(devicesElement[0], u"Device")
for device in deviceElemList:
deviceDict = indigo.Dict()
deviceTypeId = self._getElementAttribute(device, u"id", filename=filename)
deviceDict[u"Type"] = self._getElementAttribute(device, u"type", filename=filename)
if deviceDict[u"Type"] not in validDeviceTypes:
raise LookupError(u"Unknown device type in file %s" % (filename))
deviceDict[u"Name"] = self._getElementValueByTagName(device, u"Name", filename=filename)
deviceDict[u"DisplayStateId"] = self._getElementValueByTagName(device, u"UiDisplayStateId", required=False, default=u"", filename=filename)
deviceDict[u"SortOrder"] = sortIndex
sortIndex += 1
configUIList = self._getChildElementsByTagName(device, u"ConfigUI")
if len(configUIList) > 0:
#deviceDict[u"ConfigUIRawXml"] = self._parseConfigUINode(dom, configUIList[0], filename=filename).toxml()
deviceDict[u"ConfigUI"] = self._parseConfigUINode(dom, configUIList[0], filename=filename)
deviceStatesElementList = self._getChildElementsByTagName(device, u"States")
statesList = indigo.List()
if len(deviceStatesElementList) > 1:
raise LookupError(u"Incorrect number of <States> elements found in file %s" % (filename))
elif len(deviceStatesElementList) == 1:
deviceStateElements = self._getChildElementsByTagName(deviceStatesElementList[0], u"State")
for state in deviceStateElements:
stateId = self._getElementAttribute(state, u"id", filename=filename)
triggerLabel = self._getElementValueByTagName(state, u"TriggerLabel", required=False, default=u"", filename=filename)
controlPageLabel = self._getElementValueByTagName(state, u"ControlPageLabel", required=False, default=u"", filename=filename)
disabled = False # ToDo: need to read this?
stateValueTypes = self._getChildElementsByTagName(state, u"ValueType")
if len(stateValueTypes) != 1:
raise LookupError(u"<State> elements must have exactly one <ValueType> element in file %s" % (filename))
valueListElements = self._getChildElementsByTagName(stateValueTypes[0], u"List")
if len(valueListElements) > 1:
raise LookupError(u"<ValueType> elements must have zero or one <List> element in file %s" % (filename))
elif len(valueListElements) == 1:
# It must have a TriggerLabel and a ControlPageLabel
if (triggerLabel == "") or (controlPageLabel == ""):
raise LookupError(u"State elements must have both a TriggerLabel and a ControlPageLabel in file %s" % (filename))
# It's an enumeration -- add an enum type for triggering off of any changes
# to this enumeration type:
stateDict = self.getDeviceStateDictForEnumType(stateId, triggerLabel, controlPageLabel, disabled)
statesList.append(stateDict)
# And add individual true/false types for triggering off every enumeration
# value possiblity (as specified by the Option list):
triggerLabelPrefix = self._getElementValueByTagName(state, u"TriggerLabelPrefix", required=False, default=u"", filename=filename)
controlPageLabelPrefix = self._getElementValueByTagName(state, u"ControlPageLabelPrefix", required=False, default=u"", filename=filename)
valueOptions = self._getChildElementsByTagName(valueListElements[0], u"Option")
if len(valueOptions) < 1:
raise LookupError(u"<List> elements must have at least one <Option> element in file %s" % (filename))
for option in valueOptions:
subStateId = stateId + u"." + self._getElementAttribute(option, u"value", filename=filename)
if len(triggerLabelPrefix) > 0:
subTriggerLabel = triggerLabelPrefix + u" " + option.firstChild.data
else:
subTriggerLabel = option.firstChild.data
if len(controlPageLabelPrefix) > 0:
subControlPageLabel = controlPageLabelPrefix + u" " + option.firstChild.data
else:
subControlPageLabel = option.firstChild.data
subDisabled = False # ToDo: need to read this?
subStateDict = self.getDeviceStateDictForBoolTrueFalseType(subStateId, subTriggerLabel, subControlPageLabel, subDisabled)
statesList.append(subStateDict)
else:
# It's not an enumeration
stateDict = None
valueType = stateValueTypes[0].firstChild.data.lower()
# It must have a TriggerLabel and a ControlPageLabel if it's not a separator
if (valueType != u"separator"):
if (triggerLabel == "") or (controlPageLabel == ""):
raise LookupError(u"State elements must have both a TriggerLabel and a ControlPageLabel in file %s" % (filename))
if valueType == u"boolean":
boolType = stateValueTypes[0].getAttribute(u"boolType").lower()
if boolType == u"onoff":
stateDict = self.getDeviceStateDictForBoolOnOffType(stateId, triggerLabel, controlPageLabel, disabled)
elif boolType == u"yesno":
stateDict = self.getDeviceStateDictForBoolYesNoType(stateId, triggerLabel, controlPageLabel, disabled)
elif boolType == u"onezero":
stateDict = self.getDeviceStateDictForBoolOneZeroType(stateId, triggerLabel, controlPageLabel, disabled)
else:
stateDict = self.getDeviceStateDictForBoolTrueFalseType(stateId, triggerLabel, controlPageLabel, disabled)
elif valueType == u"number" or valueType == u"float" or valueType == u"integer":
stateDict = self.getDeviceStateDictForNumberType(stateId, triggerLabel, controlPageLabel, disabled)
elif valueType == u"string":
stateDict = self.getDeviceStateDictForStringType(stateId, triggerLabel, controlPageLabel, disabled)
elif valueType == u"separator":
stateDict = self.getDeviceStateDictForSeparator(stateId)
if stateDict:
statesList.append(stateDict)
deviceDict[u"States"] = statesList
ret[deviceTypeId] = deviceDict
return ret
################################################################################
def _parseConfigUINode(self, mainDom, configUI, filename=u"unknown"):
UIDict = indigo.Dict()
fieldElements = self._getChildElementsByTagName(configUI, u"Field")
if len(fieldElements) > 0:
fieldList = indigo.List()
for field in fieldElements:
fieldDict = indigo.Dict()
fieldDict["separator"] = False
try:
fieldDict["id"] = self._getElementAttribute(field, u"id", required=True, default="", errorIfNotAscii=False, filename=filename)
except:
fieldDict["id"] = ""
try:
fieldDict["ValueType"] = self._getElementAttribute(field, u"valueType", required=False, default="", errorIfNotAscii=False, filename=filename)
except:
fieldDict["ValueType"] = "string"
try:
fieldDict["Default"] = self._getElementAttribute(field, u"defaultValue", required=False, default="", errorIfNotAscii=False, filename=filename)
except:
fieldDict["Default"] = None
fieldDict["type"] = fieldId = self._getElementAttribute(field, u"type", filename=filename)
if fieldDict["type"].lower() == "separator": fieldDict["separator"] = True
isHidden = self._getElementAttribute(field, u"hidden", required=False, default="false", filename=filename)
if isHidden.lower() == "true":
fieldDict["hidden"] = True
else:
fieldDict["hidden"] = False
try:
fieldDict["Label"] = self._getElementValueByTagName(field, u"Label", required=False, default="", filename=filename)
except:
fieldDict["Label"] = ""
try:
fieldDict["Description"] = self._getElementValueByTagName(field, u"Description", required=False, default="", filename=filename)
except:
fieldDict["Description"] = ""
listList = indigo.List()
listElements = self._getChildElementsByTagName(field, u"List")
if len(listElements) > 0:
listDict = indigo.Dict()
listDict["class"] = self._getElementAttribute(listElements[0], u"class", required=False, default="", filename=filename)
optionsList = indigo.List()
optionElements = self._getChildElementsByTagName(listElements[0], u"Option")
if len(optionElements) > 0:
for option in optionElements:
optionDict = indigo.Dict()
optionDict["value"] = self._getElementAttribute(option, u"value", required=False, default="", errorIfNotAscii=False, filename=filename)
optionDict["Label"] = option.childNodes[0].data
optionsList.append(optionDict)
listDict["Options"] = optionsList
listList.append(listDict)
fieldDict["List"] = listList
fieldList.append(fieldDict)
UIDict["Fields"] = fieldList
return UIDict
|
py | 1a39cf5595fbb1aa7898291a352c1524b61e796a | import apsw
import logging
import re
import threading
import traceback
from collections import deque
import ob2.config as config
from ob2.database import DbCursor
from ob2.database.helpers import (
assign_grade_batch,
get_repo_owners,
get_users_by_ids,
)
from ob2.dockergrader.job import JobFailedError
from ob2.dockergrader.queue import dockergrader_queue
from ob2.mailer import send_template
from ob2.util.build_constants import QUEUED, IN_PROGRESS, SUCCESS, FAILED
from ob2.util.config_data import get_assignment_by_name
from ob2.util.hooks import get_job
from ob2.util.time import now, now_str, slip_units
class Worker(object):
def __init__(self):
self.lock = threading.Lock()
self.log = deque(maxlen=100)
self.status = None
self.updated = now()
self.identifier = dockergrader_queue.register_worker(self)
def probe(self, with_log=False):
with self.lock:
if with_log:
return self.identifier, self.status, self.updated, list(self.log)
else:
return self.identifier, self.status, self.updated
def _log(self, message, exc=False):
payload = (now(), message)
if exc:
payload += (traceback.format_exc(),)
else:
payload += (None,)
with self.lock:
self.log.append(payload)
def _dequeue_job(self):
with self.lock:
self.status = None
self.updated = now()
self._log("Waiting for a new job to run")
return dockergrader_queue.dequeue()
def _sanitize_name(self, name):
return re.sub(r'[^a-zA-Z0-9]+', '_', name)
def _process_job(self, job):
build_name = job.build_name
with self.lock:
self.status = build_name
self.updated = now()
# Mark the job as In Progress
while True:
try:
with DbCursor() as c:
c.execute('''SELECT source, `commit`, message, job, started FROM builds
WHERE build_name = ? AND status = ? LIMIT 1''',
[build_name, QUEUED])
row = c.fetchone()
if row is None:
self._log("Build %s was missing from the database. Skipping." % build_name)
return
source, commit, message, job_name, started = row
owners = get_repo_owners(c, source)
owner_emails = {owner: email for owner, (_, _, _, _, _, email)
in get_users_by_ids(c, owners).items()}
c.execute("UPDATE builds SET status = ?, updated = ? WHERE build_name = ?",
[IN_PROGRESS, now_str(), build_name])
break
except apsw.Error:
self._log("Exception raised while setting status to IN_PROGRESS. Retrying...",
exc=True)
logging.exception("Failed to retrieve next dockergrader job")
self._log("Started building %s" % build_name)
try:
# if the job doesn't exist for some reason, the resulting TypeError will be caught
# and logged
assignment = get_assignment_by_name(job_name)
due_date = assignment.due_date
job_handler = get_job(job_name)
log, score = job_handler(source, commit)
# Ignore any special encoding inside the log, and just treat it as a bytes
log = buffer(log)
min_score, max_score = assignment.min_score, assignment.max_score
full_score = assignment.full_score
if score < min_score or score > max_score:
raise ValueError("A score of %s is not in the acceptable range of %f to %f" %
(str(score), min_score, max_score))
except JobFailedError as e:
self._log("Failed %s with JobFailedError" % build_name, exc=True)
with DbCursor() as c:
c.execute('''UPDATE builds SET status = ?, updated = ?, log = ?
WHERE build_name = ?''', [FAILED, now_str(), str(e), build_name])
if config.mailer_enabled:
try:
for owner in owners:
email = owner_emails.get(owner)
if not email:
continue
subject = "%s failed to complete" % build_name
send_template("build_failed", email, subject, build_name=build_name,
job_name=job_name, source=source, commit=commit,
message=message, error_message=str(e))
except Exception:
self._log("Exception raised while reporting JobFailedError", exc=True)
logging.exception("Exception raised while reporting JobFailedError")
else:
self._log("JobFailedError successfully reported via email")
return
except Exception as e:
self._log("Exception raised while building %s" % build_name, exc=True)
logging.exception("Internal error within build %s" % build_name)
with DbCursor() as c:
c.execute('''UPDATE builds SET status = ?, updated = ?, log = ?
WHERE build_name = ?''',
[FAILED, now_str(), "Build failed due to an internal error.", build_name])
return
self._log("Autograder build %s complete (score: %s)" % (build_name, str(score)))
while True:
try:
with DbCursor() as c:
c.execute('''UPDATE builds SET status = ?, score = ?, updated = ?,
log = ? WHERE build_name = ?''',
[SUCCESS, score, now_str(), log, build_name])
slipunits = slip_units(due_date, started)
affected_users = assign_grade_batch(c, owners, job_name, float(score),
slipunits, build_name, "Automatic build.",
"autograder", dont_lower=True)
break
except apsw.Error:
self._log("Exception raised while assigning grades", exc=True)
logging.exception("Failed to update build %s after build completed" % build_name)
return
if config.mailer_enabled:
try:
for owner in owners:
email = owner_emails.get(owner)
if not email:
continue
subject = "%s complete - score %s / %s" % (build_name, str(score),
str(full_score))
if owner not in affected_users:
subject += " (no effect on grade)"
else:
if slipunits == 1:
subject += " (1 %s used)" % config.slip_unit_name_singular
elif slipunits > 0:
subject += " (%s slip %s used)" % (str(slipunits),
config.slip_unit_name_plural)
send_template("build_finished", email, subject, build_name=build_name,
job_name=job_name, score=score, full_score=str(full_score),
slipunits=slipunits, log=log, source=source, commit=commit,
message=message, affected=(owner in affected_users))
except Exception:
self._log("Exception raised while reporting grade", exc=True)
logging.exception("Exception raised while reporting grade")
else:
self._log("Grade successfully reported via email")
def run(self):
while True:
job = self._dequeue_job()
self._process_job(job)
|
py | 1a39d07c2790b7f5e3e01d356452feb30e2d8dd4 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# test_model.py
#
#
from datetime import datetime
import mock
import pytz
from django.conf import settings
from django.test import TestCase
from qa.mixins import DateMixin
TZ = settings.TIME_ZONE
# This is the function that replaces django.utils.timezone.now()
def mocked_now():
return pytz.timezone(TZ).localize(datetime(2000, 6, 1))
class MixinsTestCase(TestCase):
@mock.patch('django.utils.timezone.now', side_effect=mocked_now)
def test_datemixin(self, mocked):
now = DateMixin()
now.pub_date = pytz.timezone(TZ).localize(datetime(2000, 6, 1))
secs = DateMixin()
secs.pub_date = pytz.timezone(TZ).localize(datetime(2000, 5, 31, 23, 59, 57))
minutes = DateMixin()
minutes.pub_date = pytz.timezone(TZ).localize(datetime(2000, 5, 31, 23, 58, 59))
hours = DateMixin()
hours.pub_date = pytz.timezone(TZ).localize(datetime(2000, 5, 31, 22, 59, 59))
days = DateMixin()
days.pub_date = pytz.timezone(TZ).localize(datetime(2000, 5, 30, 23, 59, 59))
date = DateMixin()
date.pub_date = pytz.timezone(TZ).localize(datetime(2000, 3, 1))
self.assertEqual(now.pub_date_verbose(), "just now")
self.assertEqual(secs.pub_date_verbose(), "3 seconds ago")
self.assertEqual(minutes.pub_date_verbose(), "1 minutes ago")
self.assertEqual(hours.pub_date_verbose(), "1 hours ago")
self.assertEqual(days.pub_date_verbose(), "1 days ago")
self.assertEqual(date.pub_date_verbose(), "2000/03/01")
|
py | 1a39d12e429443e7018c7b1ceb2037548c9552dd | import os
import datetime
import argparse
import cv2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from progress.spinner import Spinner
import ntpath
def make_safe(unsafe_string):
return "".join([c for c in unsafe_string if c.isalpha() or c.isdigit()]).rstrip()
parser = argparse.ArgumentParser(description='Extract lightning frames from a video.')
parser.add_argument('video_file_name', type=str,
help='The file with the lightning in it')
parser.add_argument('--threshold', dest='threshold', action='store',
default=10,
help='Use a non-default (default is 10) threshold for detirming what a lightning flash is.')
parser.add_argument('--outfolder', dest='outfolder', action='store',
help='Specify a folder for the frames and data to be saved to.')
args = parser.parse_args()
THRESHOLD = args.threshold
VIDEO_FILE_NAME = args.video_file_name
print(f"Using Threshold: {THRESHOLD}")
print(f"Using Video File Name: {VIDEO_FILE_NAME}")
if __name__ == '__main__':
if not os.path.isfile(VIDEO_FILE_NAME):
print(f"File not found: {VIDEO_FILE_NAME}")
print("Exiting...")
exit(404)
if not args.outfolder:
OUTFOLDER = f"{ntpath.dirname(VIDEO_FILE_NAME)}/{ make_safe(ntpath.basename(VIDEO_FILE_NAME))}__OUTPUT"
else:
OUTFOLDER = args.outfolder
print(f"Output going to folder: {OUTFOLDER}")
print(f"Starting at: {datetime.datetime.now().isoformat()}")
if not os.path.isdir(OUTFOLDER):
os.makedirs(OUTFOLDER, exist_ok=True)
cap = cv2.VideoCapture(VIDEO_FILE_NAME)
frame_num = 0
frame_data = []
spinner = Spinner('Processing ')
while (cap.isOpened()):
ret, frame = cap.read()
if not ret:
print(f"Looks like we are done at Frame number: {frame_num}")
break
mean_brightness = np.mean(frame)
store_frame = mean_brightness > THRESHOLD
if store_frame:
cv2.imwrite(f"{OUTFOLDER}/frame_{str(frame_num)}.jpg", frame)
frame_data.append([frame_num, mean_brightness, store_frame])
frame_num += 1
spinner.next()
cap.release()
cv2.destroyAllWindows()
print(f"Ending at: {datetime.datetime.now().isoformat()}")
if len(frame_data) == 0:
print(f"Looks like no data was found, was this file location ok?:{VIDEO_FILE_NAME}")
exit(400)
df = pd.DataFrame(frame_data, columns=["frame_num", "brightness", "stored_image"] )
print(df)
df.to_csv(f"{OUTFOLDER}/frame_brighness_data.csv", columns=["frame_num", "brightness", "stored_image"], index=False)
df.plot(x="frame_num", y="brightness")
plt.show()
plt.savefig(f"{OUTFOLDER}/frame_brighness_data.pdf")
|
py | 1a39d1d34f61ff6e8a9d64cd6f9dee3023951930 | import tkinter as tk
from predict import load_model, classify_text
class Todo(tk.Tk):
def __init__(self, tasks=None):
super().__init__()
self.title("Text Language Identifier")
self.geometry("600x210")
self.language_note = tk.Label(self, text="Language Identified", bg="lightgrey", fg="black", pady=10)
self.language_note.pack(side=tk.TOP, fill=tk.X)
self.language_identified = tk.StringVar()
self.language_identified.set("")
self.classification_region = tk.Label(self, textvariable=self.language_identified, bg="grey", fg="white", pady=10)
self.classification_region.pack(side=tk.TOP, fill=tk.X)
self.text_region = tk.Text(self, height=10, bg="white", fg="black")
self.text_region.pack(side=tk.BOTTOM, fill=tk.X)
self.text_region.focus_set()
self.bind("<Return>", self.classify_language)
self.text_region_note = tk.Label(self, text="--- Type or Paste Text Here, and Press Enter ---", bg="lightgrey", fg="black", pady=10)
self.text_region_note.pack(side=tk.BOTTOM, fill=tk.X)
self.colour_schemes = [{"bg": "lightgrey", "fg": "black"}, {"bg": "grey", "fg": "white"}]
def classify_language(self, event=None):
text_input = self.text_region.get(1.0,tk.END).strip()
self.language_identified.set(lc[classify_text(text = text_input, model = model, le = le, n_gram_list = n_gram_list)])
self.text_region.delete(1.0, tk.END)
if __name__ == "__main__":
model, le, lc, n_gram_list = load_model()
todo = Todo()
todo.mainloop()
|
py | 1a39d258210de44f40b18ecfe006a2d1f154c425 | graph = {
'0': ['1', '2'],
'1': ['0', '2', '3', '4', '10'],
'2': ['0', '1', '3', '4', '7', '9'],
'3': ['1', '2', '4', '5'],
'4': ['1', '2', '3', '5', '6', '7', '8'],
'5': ['3', '4', '6', '8'],
'6': ['4', '5'],
'7': ['2', '4', '9', '10'],
'8': ['4', '5'],
'9': ['2', '7'],
'10': ['1', '7']
}
def bfs(graph, start, end):
visited = set()
# maintain a queue of paths
queue = []
# push the first path into the queue
queue.append([start])
visited.add(start)
while queue:
# get the first path from the queue
path = queue.pop(0)
# print(path)
# get the last node from the path
node = path[-1]
# print(node)
# path found
if node == end:
return path
# enumerate all adjacent nodes, construct a new path and push it into the queue
for adjacent in graph.get(node, []):
if adjacent not in visited:
new_path = list(path)
new_path.append(adjacent)
queue.append(new_path)
visited.add(adjacent)
print(visited)
print(bfs(graph, '0', '6'))
from queue import PriorityQueue
q = PriorityQueue()
a = ((1, 1), (10,2), (1, 0))
q.put(a)
print(any((1, 1) in item for item in q.queue)) |
py | 1a39d2b7cb8eaca45f1caeed65f7f7d8c96dce00 | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ListEndpoints
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_v1beta1_generated_EndpointService_ListEndpoints_sync]
from google.cloud import aiplatform_v1beta1
def sample_list_endpoints():
# Create a client
client = aiplatform_v1beta1.EndpointServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListEndpointsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_endpoints(request=request)
# Handle the response
for response in page_result:
print(response)
# [END aiplatform_v1beta1_generated_EndpointService_ListEndpoints_sync]
|
py | 1a39d2b853c51cb9ca889f6f2dd5941f1e59650b | from __future__ import absolute_import
from tornado import web, testing
from tornado.ioloop import IOLoop
from pyswagger import SwaggerApp
from pyswagger.contrib.client.tornado import TornadoClient
from ...utils import create_pet_db, get_test_data_folder, pet_Mary
import json
import sys
import pytest
import six
sapp = SwaggerApp._create_(get_test_data_folder(version='1.2', which='wordnik'))
received_file = None
received_meta = None
""" refer to pyswagger.tests.data.v1_2.wordnik for details """
class RESTHandler(web.RequestHandler):
""" base implementation of RequestHandler,
accept a db as init paramaeter.
"""
def initialize(self, db):
self.db = db
def prepare(self):
"""
According to FAQ of tornado, they won't handle json media-type.
"""
super(RESTHandler, self).prepare()
content_type = self.request.headers.get('Content-Type')
if content_type and content_type.startswith('application/json'):
# handle media-type: json
if content_type.rfind('charset=UTF-8'):
self.json_args = json.loads(self.request.body.decode('utf-8'))
else:
raise web.HTTPError('unsupported application type:' + content_type)
class PetRequestHandler(RESTHandler):
""" refer to /pet """
def put(self):
pet = self.json_args
if not isinstance(pet['id'], int):
self.set_status(400)
if not self.db.update_(**pet):
self.set_status(404)
else:
self.set_status(200)
self.finish()
def post(self):
pet = self.json_args
if self.db.read_(pet['id']) != None:
self.set_status(409)
else:
self.db.create_(**pet)
self.set_status(200)
self.finish()
class PetIdRequestHandler(RESTHandler):
""" refer to /pet/{petId} """
def delete(self, id):
if not self.db.delete_(int(id)):
self.set_status(400)
self.finish()
def get(self, id):
pet = self.db.read_(int(id))
if not pet:
self.set_status(404)
else:
self.write(json.dumps(pet))
self.finish()
class ImageRequestHandler(web.RequestHandler):
""" test for file upload """
def post(self):
""" pass additionalMetadata and file to global
variables.
"""
global received_file
global received_meta
received_file = self.request.files['file'][0].body
received_meta = self.get_argument('additionalMetadata')
""" global variables """
pet_db = create_pet_db()
app = web.Application([
(r'/api/pet', PetRequestHandler, dict(db=pet_db)),
(r'/api/pet/(\d+)', PetIdRequestHandler, dict(db=pet_db)),
(r'/api/pet/uploadImage', ImageRequestHandler)
], debug=True)
@pytest.mark.skipif(sys.version_info[:2] >= (3, 3), reason='httpretty corrupt in python3')
class TornadoTestCase(testing.AsyncHTTPTestCase):
"""
"""
def setUp(self):
global received_file
global received_meta
# reset global
received_file = received_meta = None
super(TornadoTestCase, self).setUp()
self.client = TornadoClient()
def get_new_ioloop(self):
return IOLoop.instance()
def get_app(self):
global app
return app
@testing.gen_test
def test_updatePet(self):
""" updatePet """
global pet_db
resp = yield self.client.request(
sapp.op['updatePet'](body=dict(id=1, name='Tom1')),
opt=dict(
url_netloc='localhost:'+str(self.get_http_port())
))
self.assertEqual(resp.status, 200)
self.assertEqual(pet_db.read_(1)['name'], 'Tom1')
@testing.gen_test
def test_addPet(self):
""" addPet """
global pet_db
resp = yield self.client.request(
sapp.op['addPet'](body=dict(id=5, name='Mission')),
opt=dict(
url_netloc='localhost:'+str(self.get_http_port())
))
self.assertEqual(resp.status, 200)
self.assertEqual(pet_db.read_(5)['name'], 'Mission')
@testing.gen_test
def test_deletePet(self):
""" deletePet """
resp = yield self.client.request(
sapp.op['deletePet'](petId=5),
opt=dict(
url_netloc='localhost:'+str(self.get_http_port())
))
self.assertEqual(resp.status, 200)
self.assertEqual(pet_db.read_(5), None)
@testing.gen_test
def test_getPetById(self):
""" getPetById """
resp = yield self.client.request(
sapp.op['getPetById'](petId=2),
opt=dict(
url_netloc='localhost:'+str(self.get_http_port())
))
self.assertEqual(resp.status, 200)
self.assertEqual(resp.data, pet_Mary)
@testing.gen_test
def test_uploadFile(self):
""" uploadFile """
global received_file
global received_meta
resp = yield self.client.request(
sapp.op['uploadFile'](
additionalMetadata='a test file', file=dict(data=six.StringIO('a test Content'), filename='test.txt')),
opt=dict(
url_netloc='localhost:'+str(self.get_http_port())
))
self.assertEqual(resp.status, 200)
self.assertEqual(received_file.decode(), 'a test Content')
self.assertEqual(received_meta, 'a test file')
|
py | 1a39d30e5dda654ff6dd818b47fc0ebca070df22 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0016_auto_20151222_0052'),
]
operations = [
migrations.AlterModelOptions(
name='photo',
options={'permissions': (('view_photo', 'View photo'),)},
),
]
|
py | 1a39d3856db4ffb96909ef1b18821d44c8d7e4a2 | #!/usr/bin/env python
from time import sleep
from drive import RosAriaDriver
from math import sin, cos
### replace X with the robot number
robot=RosAriaDriver('/PIONIER4')
skan=robot.ReadLaser()
### read and write in json format
import json
with open('data_stereo.json','w') as json_data_file:
json.dump(skan,json_data_file)
## print to stdout
#print(json.dumps(skan))
## read data from file
#json_data = open('skan.json')
#data = json.load(json_data)
import matplotlib.pyplot as plt
import numpy as np
plt.ion()
x = np.arange(0,512)
theta = (np.pi/512 )*(x-256) # angle in rad
#fig2 = plt.figure()
#ax2 = fig2.add_axes([0.1,0.1,0.8,0.8])
#line, = ax2.plot(theta,skan,lw=2.5)
#ax2.set_xlim(-3,3)
#ax2.set_ylim(-3,3) # distance range
#plt.show()
plt.show()
skan=robot.ReadLaser()
a=[]
b=[]
for i in range(0,511):
xx = cos(theta[i])*skan[i]
a.append(xx)
yy = sin(theta[i])*skan[i]
b.append(yy)
fig3 = plt.figure()
ax3 = fig3.add_axes([0.1,0.1,0.8,0.8])
line, = ax3.plot(a,b)
# distance range
while True:
skan=robot.ReadLaser()
aa=[]
bb=[]
for i in range(0,511):
xx = cos(theta[i])*skan[i]
aa.append(xx)
yy = sin(theta[i])*skan[i]
bb.append(yy)
line.set_xdata(aa)
line.set_ydata(bb)
plt.draw()
plt.pause(0.05)
|
py | 1a39d3c248246466840b03045b744d1465c856ae | # Oxi 17/01/22
from HashMap import *
import random
hashmap = HashMap(10, 2)
alpha = "abcdefghijklmnopqrstuwxyz"
def GenKey():
final = ""
for i in range(random.randrange(4, 15)):
final += alpha[random.randrange(0, len(alpha)-1)]
return final
found = False
while not found:
key1 = GenKey()
key2 = GenKey()
key3 = GenKey()
pos1 = hashmap.calcpos(hashmap.hashstr(key1))
pos2 = hashmap.calcpos(hashmap.hashstr(key2))
pos3 = hashmap.calcpos(hashmap.hashstr(key3))
if pos1[0] == pos2[0] == pos3[0] and pos1[1] == pos2[1] == pos3[1]:
print(f"COLLISION: '{key1}' and '{key2}' and '{key3}' at X:{pos1[0]} Y:{pos1[1]}")
|
py | 1a39d5c0a361d131572c94f62a4eae1ae108eb32 | import numpy as np
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
""" attention pad mask """
def get_attn_pad_mask(seq_q, seq_k, i_pad):
batch_size, len_q = seq_q.size()
batch_size, len_k = seq_k.size()
pad_attn_mask = seq_k.data.eq(i_pad).unsqueeze(1).expand(batch_size, len_q, len_k) # <pad>
return pad_attn_mask
""" attention decoder mask """
def get_attn_decoder_mask(seq):
subsequent_mask = torch.ones_like(seq).unsqueeze(-1).expand(seq.size(0), seq.size(1), seq.size(1))
subsequent_mask = subsequent_mask.triu(diagonal=1) # upper triangular part of a matrix(2-D)
return subsequent_mask
""" scale dot product attention """
class ScaledDotProductAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.dropout = nn.Dropout(config.dropout)
self.scale = 1 / (self.config.d_head ** 0.5)
def forward(self, Q, K, V, attn_mask):
# (bs, n_head, n_q_seq, n_k_seq)
scores = torch.matmul(Q, K.transpose(-1, -2)).mul_(self.scale)
scores.masked_fill_(attn_mask, -1e9)
# (bs, n_head, n_q_seq, n_k_seq)
attn_prob = nn.Softmax(dim=-1)(scores)
attn_prob = self.dropout(attn_prob)
# (bs, n_head, n_q_seq, d_v)
context = torch.matmul(attn_prob, V)
# (bs, n_head, n_q_seq, d_v), (bs, n_head, n_q_seq, n_v_seq)
return context, attn_prob
""" multi head attention """
class MultiHeadAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.W_Q = nn.Linear(self.config.d_hidn, self.config.n_head * self.config.d_head)
self.W_K = nn.Linear(self.config.d_hidn, self.config.n_head * self.config.d_head)
self.W_V = nn.Linear(self.config.d_hidn, self.config.n_head * self.config.d_head)
self.scaled_dot_attn = ScaledDotProductAttention(self.config)
self.linear = nn.Linear(self.config.n_head * self.config.d_head, self.config.d_hidn)
self.dropout = nn.Dropout(config.dropout)
def forward(self, Q, K, V, attn_mask):
batch_size = Q.size(0)
# (bs, n_head, n_q_seq, d_head)
q_s = self.W_Q(Q).view(batch_size, -1, self.config.n_head, self.config.d_head).transpose(1,2)
# (bs, n_head, n_k_seq, d_head)
k_s = self.W_K(K).view(batch_size, -1, self.config.n_head, self.config.d_head).transpose(1,2)
# (bs, n_head, n_v_seq, d_head)
v_s = self.W_V(V).view(batch_size, -1, self.config.n_head, self.config.d_head).transpose(1,2)
# (bs, n_head, n_q_seq, n_k_seq)
attn_mask = attn_mask.unsqueeze(1).repeat(1, self.config.n_head, 1, 1)
# (bs, n_head, n_q_seq, d_head), (bs, n_head, n_q_seq, n_k_seq)
context, attn_prob = self.scaled_dot_attn(q_s, k_s, v_s, attn_mask)
# (bs, n_head, n_q_seq, h_head * d_head)
context = context.transpose(1, 2).contiguous().view(batch_size, -1, self.config.n_head * self.config.d_head)
# (bs, n_head, n_q_seq, e_embd)
output = self.linear(context)
output = self.dropout(output)
# (bs, n_q_seq, d_hidn), (bs, n_head, n_q_seq, n_k_seq)
return output, attn_prob
""" feed forward """
class PoswiseFeedForwardNet(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.conv1 = nn.Conv1d(in_channels=self.config.d_hidn, out_channels=self.config.d_ff, kernel_size=1)
self.conv2 = nn.Conv1d(in_channels=self.config.d_ff, out_channels=self.config.d_hidn, kernel_size=1)
self.active = F.gelu
self.dropout = nn.Dropout(config.dropout)
def forward(self, inputs):
# (bs, d_ff, n_seq)
output = self.active(self.conv1(inputs.transpose(1, 2)))
# (bs, n_seq, d_hidn)
output = self.conv2(output).transpose(1, 2)
output = self.dropout(output)
# (bs, n_seq, d_hidn)
return output
""" encoder layer """
class EncoderLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.self_attn = MultiHeadAttention(self.config)
self.layer_norm1 = nn.LayerNorm(self.config.d_hidn, eps=self.config.layer_norm_epsilon)
self.pos_ffn = PoswiseFeedForwardNet(self.config)
self.layer_norm2 = nn.LayerNorm(self.config.d_hidn, eps=self.config.layer_norm_epsilon)
def forward(self, inputs, attn_mask):
# (bs, n_enc_seq, d_hidn), (bs, n_head, n_enc_seq, n_enc_seq)
att_outputs, attn_prob = self.self_attn(inputs, inputs, inputs, attn_mask)
att_outputs = self.layer_norm1(inputs + att_outputs)
# (bs, n_enc_seq, d_hidn)
ffn_outputs = self.pos_ffn(att_outputs)
ffn_outputs = self.layer_norm2(ffn_outputs + att_outputs)
# (bs, n_enc_seq, d_hidn), (bs, n_head, n_enc_seq, n_enc_seq)
return ffn_outputs, attn_prob
""" encoder """
class Encoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.enc_emb = nn.Embedding(self.config.n_enc_vocab, self.config.d_hidn)
self.pos_emb = nn.Embedding(self.config.n_enc_seq + 1, self.config.d_hidn)
self.seg_emb = nn.Embedding(self.config.n_seg_type, self.config.d_hidn)
self.layers = nn.ModuleList([EncoderLayer(self.config) for _ in range(self.config.n_layer)])
def forward(self, inputs, segments):
positions = torch.arange(inputs.size(1), device=inputs.device, dtype=inputs.dtype).expand(inputs.size(0), inputs.size(1)).contiguous() + 1
pos_mask = inputs.eq(self.config.i_pad)
positions.masked_fill_(pos_mask, 0)
# (bs, n_enc_seq, d_hidn)
outputs = self.enc_emb(inputs) + self.pos_emb(positions) + self.seg_emb(segments)
# (bs, n_enc_seq, n_enc_seq)
attn_mask = get_attn_pad_mask(inputs, inputs, self.config.i_pad)
attn_probs = []
for layer in self.layers:
# (bs, n_enc_seq, d_hidn), (bs, n_head, n_enc_seq, n_enc_seq)
outputs, attn_prob = layer(outputs, attn_mask)
attn_probs.append(attn_prob)
# (bs, n_enc_seq, d_hidn), [(bs, n_head, n_enc_seq, n_enc_seq)]
return outputs, attn_probs
""" bert """
class BERT(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.encoder = Encoder(self.config)
self.linear = nn.Linear(config.d_hidn, config.d_hidn)
self.activation = torch.tanh
def forward(self, inputs, segments):
# (bs, n_seq, d_hidn), [(bs, n_head, n_enc_seq, n_enc_seq)]
outputs, self_attn_probs = self.encoder(inputs, segments)
# (bs, d_hidn)
outputs_cls = outputs[:, 0].contiguous()
outputs_cls = self.linear(outputs_cls)
outputs_cls = self.activation(outputs_cls)
# (bs, n_enc_seq, n_enc_vocab), (bs, d_hidn), [(bs, n_head, n_enc_seq, n_enc_seq)]
return outputs, outputs_cls, self_attn_probs
def save(self, epoch, loss, path):
torch.save({
"epoch": epoch,
"loss": loss,
"state_dict": self.state_dict()
}, path)
def load(self, path, map_location=None):
save = torch.load(path, map_location)
self.load_state_dict(save["state_dict"], )
return save["epoch"], save["loss"]
""" BERT pretrain """
class BERTPretrain(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.bert = BERT(self.config)
# classfier
self.projection_cls = nn.Linear(self.config.d_hidn, 2, bias=False)
# lm
self.projection_lm = nn.Linear(self.config.d_hidn, self.config.n_enc_vocab, bias=False)
self.projection_lm.weight = self.bert.encoder.enc_emb.weight
def forward(self, inputs, segments):
# (bs, n_enc_seq, d_hidn), (bs, d_hidn), [(bs, n_head, n_enc_seq, n_enc_seq)]
outputs, outputs_cls, attn_probs = self.bert(inputs, segments)
# (bs, 2)
logits_cls = self.projection_cls(outputs_cls)
# (bs, n_enc_seq, n_enc_vocab)
logits_lm = self.projection_lm(outputs)
# (bs, n_enc_vocab), (bs, n_enc_seq, n_enc_vocab), [(bs, n_head, n_enc_seq, n_enc_seq)]
return logits_cls, logits_lm, attn_probs
""" naver movie classfication """
class MovieClassification(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.bert = BERT(self.config)
# classfier
self.projection_cls = nn.Linear(self.config.d_hidn, self.config.n_output, bias=False)
def forward(self, inputs, segments):
# (bs, n_enc_seq, d_hidn), (bs, d_hidn), [(bs, n_head, n_enc_seq, n_enc_seq)]
outputs, outputs_cls, attn_probs = self.bert(inputs, segments)
# (bs, n_output)
logits_cls = self.projection_cls(outputs_cls)
# (bs, n_output), [(bs, n_head, n_enc_seq, n_enc_seq)]
return logits_cls, attn_probs
def save(self, epoch, loss, score, path):
torch.save({
"epoch": epoch,
"loss": loss,
"score": score,
"state_dict": self.state_dict()
}, path)
def load(self, path):
save = torch.load(path)
self.load_state_dict(save["state_dict"])
return save["epoch"], save["loss"], save["score"]
|
py | 1a39d6b77a83370d1d1694e764e3e2e99c02041b | from flask import Blueprint, render_template, url_for, request, redirect
from logzero import logger
from base.utils.auth import admin_required, get_jwt
from base.forms import AdminEditToolContainerVersion
from caendr.services.tool_versions import get_all_containers, get_available_version_tags, get_container, get_version, update_version
admin_tools_bp = Blueprint('admin_tools',
__name__,
template_folder='templates')
@admin_tools_bp.route('/', methods=['GET'])
@admin_required()
def admin_tools():
title = 'Tool Container Versions'
alt_parent_breadcrumb = {"title": "Admin/Tools", "url": url_for('admin_tools.admin_tools')}
containers = get_all_containers()
return render_template('admin/tool/list.html', **locals())
@admin_tools_bp.route('/<id>/edit', methods=["GET", "POST"])
@admin_required()
def edit_tool(id):
if id is None:
raise UnprocessableEntity('Error: No profile id in URL')
title = f'{id}'
alt_parent_breadcrumb = {"title": "Admin/Tools", "url": url_for('admin_tools.admin_tools')}
jwt_csrf_token = (get_jwt() or {}).get("csrf")
tool = get_container(id)
versions = get_available_version_tags(tool)
versions.reverse()
form = AdminEditToolContainerVersion(version=get_version(tool))
form.version.choices = [(ver, ver) for ver in versions]
if request.method == 'POST' and form.validate_on_submit():
update_version(tool, request.form.get('version'))
return redirect(url_for("admin_tools.admin_tools"), code=302)
return render_template('admin/tool/edit.html', **locals())
|
py | 1a39d7cc5348908d2d957433d03409c0e7888fe1 | '''
Created on Sep 6, 2021
@author: mhindle
'''
import numpy as np
import numbers
from typing import Tuple, List, Dict, Union, Set
import itertools
from collections import defaultdict
import pandas as pd
class JointAllellicDistribution(object):
def __init__(self, snp_ordered, chromosome2snp=None, pseudocount = 1, surround_size=1):
self.pseudocount = pseudocount
self.frequency: Dict[Tuple[str,int],Dict[Tuple[str,int],Dict[Tuple[str,int],int]]] = dict()
self.n_observations: Dict[Tuple[str,str,str]] = defaultdict(int)
self.surround_size = surround_size
self.window_size = (surround_size*2)+1
self.snp_ordered = snp_ordered
self.chromosome2snp = chromosome2snp
def getWindow(self, targetSnp):
'''
targetSnp is the snp around which to extract the symetric window of +- window_size
'''
targetpos = self.snp_ordered.index(targetSnp)
startpos_snp = targetpos-self.surround_size
if startpos_snp < 0:
startpos_snp = 0
endpos_snp = targetpos+self.surround_size+1
if endpos_snp >= len(self.snp_ordered):
endpos_snp = len(self.snp_ordered)-1
snpWindow = self.snp_ordered[startpos_snp:endpos_snp]
if self.chromosome2snp is not None:
targetchr = self.chromosome2snp[targetSnp]
return([snpId for snpId in snpWindow if self.chromosome2snp[snpId] == targetchr])
return(snpWindow)
def getCountTable(self, observedstates: dict, targetSnp):
all_obs = [(snpid,observedstates[snpid]) for snpid in self.getWindow(targetSnp)]
def copypastefunc(x):
return([(snpid,state) if snpid != targetSnp else (targetSnp, x) for snpid,state in all_obs])
for state, query in enumerate(list(map(copypastefunc, [0,1,2]))):
#print("%s == %s" % (state, query))
workinghash = self.frequency
for item in query:
workinghash = workinghash[item]
if "obs" in workinghash:
yield workinghash["obs"] #it should be the result
else:
print("query %s" % query)
print("first %s" % self.frequency[query[0]])
print("workinghash %s" % workinghash)
print("item %s" % "_".join(map(str,item)))
raise Exception("incomplete traversal of nested hash: final %s state %s" % (workinghash, state))
def countJointFrq(self, table, mask, column_names: List[str], conditions_index=[0,1,2,9]):
column_names = np.array(column_names)
subset = table[np.all(mask,axis=1),:]
for values in list(itertools.product(conditions_index, repeat=self.window_size)):
conditions = list(zip(column_names, values))
nine_truth = np.ones((subset.shape[0],1), dtype=bool)
rows_that_meet = np.logical_and.reduce([nine_truth if value == 9 else np.equal(subset[:,column_names == snp],value) for snp,value in conditions])
keys = list(zip(column_names, values))
obs = np.count_nonzero(rows_that_meet)
self.recurse_set_dict(self.frequency, keys, obs)
if 9 not in values: # only count complete real value arrays
self.n_observations[tuple(column_names)] += (obs+self.pseudocount) # this we keep track of how many observations there have been for these three snps
def recurse_set_dict(self, d, queue, value):
f = queue.pop(0)
if len(queue) > 0:
if f not in d:
d[f] = dict()
self.recurse_set_dict(d[f], queue, value)
else:
if f not in d:
d[f] = dict()
if "obs" not in d[f]:
d[f]["obs"] = value+self.pseudocount # we record the observations for this state combo
elif d[f]["obs"] != value+self.pseudocount:
raise Exception("overwriting value %s with %s " % (d[f]["obs"], value))
def countJointFrqAll(self, table:pd.DataFrame, mask=None):
'''
table expect pandas Dataframe with columns as snpids and rows being observations
mask expect numpy bool matrix but will deal with pandas bool Dataframe
'''
if mask is None:
mask = np.ones(table.shape,dtype=bool)
elif mask is pd.DataFrame:
mask = mask.to_numpy(dtype=bool)
for targetSnp in self.snp_ordered:
snp_window = self.getWindow(targetSnp)
indexofsnps = [x in snp_window for x in table.columns]
self.countJointFrq(table.loc[:,snp_window].to_numpy(dtype=int), mask[:,indexofsnps], snp_window)
|
py | 1a39d87e46fd41a6e25cbde32c39cdecf1641e8f | from pathlib import Path
from typing import Dict
def public_ssl_paths(path: Path, config: Dict):
return (
path / config["ssl"]["public_crt"],
path / config["ssl"]["public_key"],
)
def private_ssl_paths(path: Path, config: Dict):
return (
path / config["ssl"]["private_crt"],
path / config["ssl"]["private_key"],
)
def private_ssl_ca_paths(path: Path, config: Dict):
return (
path / config["private_ssl_ca"]["crt"],
path / config["private_ssl_ca"]["key"],
)
def achi_ssl_ca_paths(path: Path, config: Dict):
return (
path / config["achi_ssl_ca"]["crt"],
path / config["achi_ssl_ca"]["key"],
)
|
py | 1a39d8d22a79b4882b23ef0b2e953543eee46cad | # -*- coding: utf-8 -*-
#
# Copyright (C) 2010-2016 PPMessage.
# Guijin Ding, [email protected]
#
#
from .basehandler import BaseHandler
from ppmessage.api.error import API_ERR
from ppmessage.db.models import OrgGroup
from ppmessage.db.models import OrgGroupUserData
from ppmessage.core.constant import API_LEVEL
from ppmessage.core.redis import redis_hash_to_dict
from ppmessage.api.handlers.ppaddorggroupuserhandler import update_group_icon
import json
import logging
class PPRemoveOrgGroupUserHandler(BaseHandler):
"""
"""
def _remove(self, _group_uuid, _user_uuid):
_redis = self.application.redis
_key = OrgGroupUserData.__tablename__ + ".group_uuid." + _group_uuid
if _redis.sismember(_key, _user_uuid) == False:
self.setErrorCode(API_ERR.NOT_GROUP_USER)
logging.error("user: %s not in group:%s" % (_user_uuid, _group_uuid))
return False
_key = OrgGroupUserData.__tablename__ + ".group_uuid." + _group_uuid + \
".user_uuid." + _user_uuid
_data_uuid = _redis.get(_key)
if _data_uuid == None:
self.setErrorCode(API_ERR.NOT_GROUP_USER)
logging.error("user: %s group:%s not bind." % (_user_uuid, _group_uuid))
return False
_row = OrgGroupUserData(uuid=_data_uuid)
_row.async_delete(_redis)
_row.delete_redis_keys(_redis)
return True
def _get(self, _app_uuid, _group_uuid, _user_list):
_redis = self.application.redis
for _user_uuid in _user_list:
_r = self._remove(_group_uuid, _user_uuid)
update_group_icon(_redis, _group_uuid)
return
def initialize(self):
self.addPermission(app_uuid=True)
self.addPermission(api_level=API_LEVEL.PPCONSOLE)
self.addPermission(api_level=API_LEVEL.THIRD_PARTY_CONSOLE)
return
def _Task(self):
super(PPRemoveOrgGroupUserHandler, self)._Task()
_body = json.loads(self.request.body)
_app_uuid = _body.get("app_uuid")
_group_uuid = _body.get("group_uuid")
_user_list = _body.get("user_list")
if _app_uuid == None or _group_uuid == None or _user_list == None:
self.setErrorCode(API_ERR.NO_PARA)
return
if not isinstance(_user_list, list):
self.setErrorCode(API_ERR.NOT_LIST)
return
self._get(_app_uuid, _group_uuid, _user_list)
return
|
py | 1a39d93bdd2b5f45f278ea122265e229a595130b | #Problem
''' '''
#Answer
''' '''
#Code Written & Designed by Kenneth Friedman
|
py | 1a39d94d4cb261a7dcf3ae38f76935d7adab3e90 | #!/usr/bin/env python
from _title_word import ngram_line, ngram, run
from collections import Counter, defaultdict
def parse_word(title, txt):
count = defaultdict(int)
# word_set = set()
total = len(title) + len(txt)
for i in ngram_line(title):
if len(i) > 1:
count[i] = 1
for i in ngram_line(txt):
if i in count:
count[i] += 1
for word, n in sorted(count.items(), key=lambda x: len(x[0])):
if n >= 3:
if len(word) > 2:
for i in ngram(word, len(word)):
count[i] -= n
r = []
for word, n in count.items():
len_word = len(word)
if len_word > 2:
for i in ngram(word, len_word):
if n < count[i]:
n = 0
break
if n > 3 and (n * len(word)) / total > 0.005:
r.append(word)
return r
run(__file__, parse_word)
|
py | 1a39da9f438c52197ad50cb1a50ad222ccd69c8d | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayOpenAppServiceValidpageQueryModel import AlipayOpenAppServiceValidpageQueryModel
class AlipayOpenAppServiceValidpageQueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayOpenAppServiceValidpageQueryModel):
self._biz_content = value
else:
self._biz_content = AlipayOpenAppServiceValidpageQueryModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.open.app.service.validpage.query'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
|
py | 1a39db3837791054ad710072a2e44831dd58af53 | """Benchmark for merge0.
Trains a small percentage of autonomous vehicles to dissipate shockwaves caused
by merges in an open network. The autonomous penetration rate in this example
is 10%.
- **Action Dimension**: (5, )
- **Observation Dimension**: (25, )
- **Horizon**: 750 steps
"""
from flow.envs import MergePOEnvAvgVel
from flow.networks import MergeNetwork
from copy import deepcopy
from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams, \
InFlows, SumoCarFollowingParams
from flow.networks.merge import ADDITIONAL_NET_PARAMS
from flow.core.params import VehicleParams, SumoLaneChangeParams
from flow.controllers import SimCarFollowingController, RLController,IDMController,SimLaneChangeController
# time horizon of a single rollout
HORIZON = 1500
# inflow rate at the highway
FLOW_RATE = 2000
# percent of autonomous vehicles
RL_PENETRATION = 0.1
# num_rl term (see ADDITIONAL_ENV_PARAMs)
NUM_RL = 20
# We consider a highway network with an upstream merging lane producing
# shockwaves
additional_net_params = deepcopy(ADDITIONAL_NET_PARAMS)
additional_net_params["merge_lanes"] = 1
additional_net_params["highway_lanes"] = 2
additional_net_params["pre_merge_length"] = 500
# RL vehicles constitute 5% of the total number of vehicles
vehicles = VehicleParams()
vehicles.add(
veh_id="human",
acceleration_controller=(SimCarFollowingController, {
}
),
lane_change_controller=(SimLaneChangeController,{}),
car_following_params=SumoCarFollowingParams(
speed_mode=9,
),
lane_change_params=SumoLaneChangeParams(
#model="SL2015",
lane_change_mode=1621,
#lc_pushy=0,
#lc_assertive=5,
lc_impatience=1e-8,
lc_time_to_impatience=1e12
),
num_vehicles=0)
vehicles.add(
veh_id="rl",
acceleration_controller=(RLController, {}),
lane_change_controller=(SimLaneChangeController,{}),
car_following_params=SumoCarFollowingParams(
speed_mode=9,
),
lane_change_params=SumoLaneChangeParams(
#model="SL2015",
lane_change_mode=1621,
#lc_pushy=0,
#lc_assertive=5,
lc_impatience=1e-8,
lc_time_to_impatience=1e12
),
num_vehicles=0)
# Vehicles are introduced from both sides of merge, with RL vehicles entering
# from the highway portion as well
inflow = InFlows()
inflow.add(
veh_type="human",
edge="inflow_highway",
vehs_per_hour=(1 - RL_PENETRATION) * FLOW_RATE,
depart_lane=0,#"first",#"free",
depart_speed=10)
inflow.add(
veh_type="rl",
edge="inflow_highway",
vehs_per_hour=RL_PENETRATION * FLOW_RATE,
depart_lane=0,#"free",
depart_speed=10)
inflow.add(
veh_type="human",
edge="inflow_merge",
vehs_per_hour=200,
depart_lane="first",#"free",
depart_speed=7.5)
flow_params = dict(
# name of the experiment
exp_tag="merge_4_Sim_AvgVel_MultiLane",
# name of the flow environment the experiment is running on
env_name=MergePOEnvAvgVel,
# name of the network class the experiment is running on
network=MergeNetwork,
# simulator that is used by the experiment
simulator='traci',
# sumo-related parameters (see flow.core.params.SumoParams)
sim=SumoParams(
restart_instance=True,
sim_step=0.5,
render=False,
),
# environment related parameters (see flow.core.params.EnvParams)
env=EnvParams(
horizon=HORIZON,
sims_per_step=2,
warmup_steps=0,
additional_params={
"max_accel": 9,
"max_decel": 9,
"target_velocity": 30,
"num_rl": NUM_RL,
},
),
# network-related parameters (see flow.core.params.NetParams and the
# network's documentation or ADDITIONAL_NET_PARAMS component)
net=NetParams(
inflows=inflow,
additional_params=additional_net_params,
),
# vehicles to be placed in the network at the start of a rollout (see
# flow.core.params.VehicleParams)
veh=vehicles,
# parameters specifying the positioning of vehicles upon initialization/
# reset (see flow.core.params.InitialConfig)
initial=InitialConfig(),
)
|
py | 1a39db5f49747a1ab42db9a01db83fd98e035688 | # coding=utf-8
import logging
import random
import string
import sys
import unittest
from time import time, sleep
import apiritif
import os
import re
from appium import webdriver
from selenium.common.exceptions import NoSuchElementException, TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support import expected_conditions as econd
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
from bzt.resources.selenium_extras import dialogs_replace, get_locator, wait_for, waiter
class TestLocScAppium(unittest.TestCase):
def setUp(self):
self.vars = {}
timeout = 3.5
self.driver = None
options = None
self.driver = webdriver.Remote(command_executor='http://localhost:4723/wd/hub',
desired_capabilities={'browserName': 'chrome', 'deviceName': '',
'platformName': 'android'},
options=options)
self.driver.implicitly_wait(timeout)
apiritif.put_into_thread_store(timeout=timeout, func_mode=False, driver=self.driver, windows={},
scenario_name='loc_sc_appium')
def _1_(self):
with apiritif.smart_transaction('/'):
self.driver.get('http://blazedemo.com/')
dialogs_replace()
wait_for('present', [{'xpath': "//input[@type='submit']"}], 3.5)
self.assertEqual(self.driver.title, 'BlazeDemo')
body = self.driver.page_source
re_pattern = re.compile('contained_text')
self.assertEqual(0, len(re.findall(re_pattern, body)), "Assertion: 'contained_text' found in BODY")
def _2_empty(self):
with apiritif.smart_transaction('empty'):
pass
def test_locscappium(self):
self._1_()
self._2_empty()
def tearDown(self):
if self.driver:
self.driver.quit()
|
py | 1a39dcae7de52e9c4ec762e9c49230e4adcebb2a | from collections import OrderedDict
from datetime import timedelta as td
from django.core.urlresolvers import reverse
from django.template.defaultfilters import slugify
from cachecow.decorators import cached_function
from django.utils.translation import ugettext, ugettext_lazy as _, pgettext, pgettext_lazy
from django.conf import settings
from django.db.models import *
from django.db.models.signals import post_save
from canvas import bgwork, util
from canvas.cache_patterns import CachedCall
from canvas.models import BaseCanvasModel, Comment, Content, get_mapping_id_from_short_id, Visibility
from canvas.redis_models import redis, RealtimeChannel, RedisSortedSet
from canvas.util import UnixTimestampField
from canvas.notifications.actions import Actions
from drawquest import knobs
from drawquest.apps.drawquest_auth.models import User, AnonymousUser
from drawquest.apps.push_notifications.models import push_notification
from drawquest.apps.quest_comments.models import QuestComment
from drawquest.apps.quests import signals
from drawquest.pagination import Paginator
from drawquest.apps.quest_invites.models import InvitedQuests
from drawquest.apps.quests.top import top_quests_buffer, get_quest_score
from services import Services
class ScheduledQuest(BaseCanvasModel):
quest = ForeignKey('Quest', null=False)
curator = ForeignKey(User, blank=True, null=True, default=None, related_name='scheduled_quests')
timestamp = UnixTimestampField(default=0)
appeared_on = UnixTimestampField(null=True, db_index=True)
sort = IntegerField()
class Meta:
ordering = ['-appeared_on']
@classmethod
def get_or_create(cls, quest):
if quest.parent_comment_id:
quest = quest.parent_comment
try:
return cls.objects.get(quest=quest.id)
except cls.DoesNotExist:
return cls.objects.create(quest=Quest.objects.get(pk=quest.id), sort=1)
@classmethod
def archived(cls, select_quests=False):
qs = cls.objects
if select_quests:
qs = qs.select_related('quest')
current_quest_id = redis.get('dq:current_scheduled_quest')
if current_quest_id:
qs = qs.exclude(id=current_quest_id)
return qs.exclude(appeared_on__isnull=True).order_by('-appeared_on')
@classmethod
def unarchived(cls):
return cls.objects.filter(appeared_on__isnull=True).order_by('sort')
def _publish_quest_of_the_day(self):
signals.current_quest_changed.send(ScheduledQuest, instance=self)
RealtimeChannel('qotd', 1).publish({'quest_id': self.quest_id})
push_notification('quest_of_the_day',
_(u"Today's Quest: %(quest_title)s" % {'quest_title': self.quest.title}),
extra_metadata={'quest_id': self.quest.id},
badge=1)
def set_as_current_quest(self):
redis.set('dq:current_scheduled_quest', self.id)
self.appeared_on = Services.time.time()
self.save()
self.quest.details.force()
self._publish_quest_of_the_day()
@classmethod
def rollover_next_quest(cls):
""" Sets the next scheduled quest as the currently active one / quest of the day. """
try:
cls.unarchived().order_by('sort')[0].set_as_current_quest()
except IndexError:
cls.archived().exclude(quest__title='Give him a smile!').order_by('appeared_on')[0].set_as_current_quest()
@classmethod
def current_scheduled_quest(cls):
""" The `ScheduledQuest` instance representing the current quest of the day. """
scheduled_quest_id = redis.get('dq:current_scheduled_quest')
if scheduled_quest_id:
return cls.objects.get(id=scheduled_quest_id)
class QuestManager(Visibility.PublicOnlyManager):
def get_query_set(self):
return super(QuestManager, self).get_query_set().filter(parent_comment__isnull=True)
class QuestAllManager(Manager):
def get_query_set(self):
return super(QuestAllManager, self).get_query_set().filter(parent_comment__isnull=True)
class QuestPublishedManager(Visibility.PublishedOnlyManager):
def get_query_set(self):
return super(QuestPublishedManager, self).get_query_set().filter(parent_comment__isnull=True)
class QuestVisibleOnlyManager(Visibility.PublishedOnlyManager):
def get_query_set(self):
return super(QuestVisibleOnlyManager, self).get_query_set().filter(parent_comment__isnull=True)
class Quest(Comment):
objects = QuestManager()
all_objects = QuestAllManager()
published = QuestPublishedManager()
visible = QuestVisibleOnlyManager()
class Meta:
proxy = True
@property
def comments_url(self):
return settings.API_PREFIX + 'quests/comments'
@property
def comments(self):
return self.replies
@classmethod
def completed_by_user_count(self, user):
""" The number of quests a user has completed. """
return QuestComment.by_author(user).values('parent_comment_id').distinct().count()
def first_appeared_on(self):
if self.ugq:
return self.timestamp
if self.scheduledquest_set.exists():
return self.scheduledquest_set.all()[0].appeared_on
def get_absolute_url(self):
if not slugify(self.title):
return '/q/' + util.base36encode(self.id)
return reverse('quest', args=[util.base36encode(self.id), slugify(self.title)])
def author_count(self):
return self.replies.values_list('author_id', flat=True).distinct().count()
def drawing_count(self):
return self.replies.exclude(reply_content__isnull=True).count()
def schedule(self, ordinal, curator=None):
""" Returns `scheduled_quest` instance. """
scheduled_quest = ScheduledQuest.get_or_create(self)
if not scheduled_quest.curator:
scheduled_quest.curator = curator
scheduled_quest.timestamp = Services.time.time()
scheduled_quest.sort = ordinal
scheduled_quest.save()
return scheduled_quest
def is_currently_scheduled(self):
""" 'currently scheduled' means it's the quest of the day. """
scheduled_quest = ScheduledQuest.objects.get(id=redis.get('dq:current_scheduled_quest'))
return scheduled_quest.quest_id == self.id
def is_onboarding_quest(self):
return str(knobs.ONBOARDING_QUEST_ID) == str(self.id)
def user_has_completed(self, user):
""" Whether `user` has contributed a drawing for this quest. """
return self.replies.filter(author=user).exclude(reply_content__isnull=True).exists()
def attribute_to_user(self, user, attribution_copy):
self.attribution_user = user
self.attribution_copy = attribution_copy
self.save()
self.details.force()
def clear_attribution(self):
self.attribution_user = None
self.attribution_copy = ''
self.save()
self.details.force()
def dismiss(self, dismisser):
dismisser.redis.dismissed_quests.dismiss_quest(self)
def update_score(self):
score = get_quest_score(self)
top_quests_buffer.bump(self.id, score)
return score
@property
def invited_users(self):
from drawquest.apps.quest_invites.models import InvitedUsers
return InvitedUsers(self)
def _details(self):
content_details = self.reply_content.details().to_backend() if self.reply_content else {}
ts = self.timestamp
if self.scheduledquest_set.exists():
ts = self.scheduledquest_set.all().order_by('-appeared_on')[0].appeared_on or ts
ret = {
'id': self.id,
'author_id': self.author_id,
'content': content_details,
'timestamp': ts,
'title': self.title,
'comments_url': self.comments_url,
'author_count': self.author_count(),
'drawing_count': self.drawing_count(),
'visibility': self.visibility,
'attribution_copy': self.attribution_copy,
'ugq': self.ugq,
}
try:
ret['attribution_username'] = self.attribution_user.username
user = User.objects.get(id=self.attribution_user_id)
if user.userinfo.avatar:
ret['attribution_avatar_url'] = user.userinfo.avatar.details().get_absolute_url_for_image_type('archive')
ret['attribution_avatar_urls'] = user.details().avatar_urls['gallery']
except AttributeError:
ret['attribution_username'] = None
return ret
@classmethod
def details_by_id(cls, quest_id, promoter=None):
from drawquest.apps.quests.details_models import QuestDetails
if promoter is None:
promoter = QuestDetails
def inner_call():
return cls.all_objects.get(id=quest_id)._details()
return CachedCall(
'quest:{}:details_v15'.format(quest_id),
inner_call,
24*60*60,
promoter=promoter,
)
@property
def details(self):
return self.details_by_id(self.id)
@classmethod
def _auto_moderation(cls, author):
""" Returns (skip_moderation, curate,) booleans. """
curate = ((author.userinfo.trusted is None and redis.get('dq:auto_curate'))
or author.userinfo.trusted == False)
return False, curate
@classmethod
def create_and_post(cls, request, author, title, content=None, ugq=False):
skip_moderation, curate = cls._auto_moderation(author)
quest = super(Quest, cls).create_and_post(
request,
author,
False,
None,
content,
curate=curate,
skip_moderation=skip_moderation,
ugq=ugq,
title=title,
)
if ugq:
author.redis.ugq_buffer.bump(quest.id)
@bgwork.defer
def followee_created_ugq():
Actions.followee_created_ugq(author, quest)
return quest
def get_share_page_url(self, absolute=False):
slug = slugify(self.title)
if slug:
url = reverse('quest', args=[util.base36encode(self.id), slug])
else:
url = '/q/{}'.format(util.base36encode(self.id))
if absolute:
url = 'http://' + settings.DOMAIN + url
return url
class DismissedQuests(RedisSortedSet):
def __init__(self, user):
self.user_id = getattr(user, 'id', user)
super(DismissedQuests, self).__init__('user:{}:dismissed_quests'.format(self.user_id))
def dismiss_quest(self, quest):
""" `comment` can be a Comment or CommentDetails. """
self.zadd(quest.id, Services.time.time())
def filter_quests(self, quests):
hidden_quest_ids = set(int(id_) for id_ in self.zrange(0, -1))
return [quest for quest in quests if int(quest.id) not in hidden_quest_ids]
def _dedupe_quests(quests):
''' Each quest should be a dict with id and timestamp. Favors recency. '''
quests = sorted(quests, key=lambda quest: quest['timestamp'])
quests = dict((cmt['id'], cmt['timestamp']) for cmt in quests)
quests = [{'id': id_, 'timestamp': timestamp} for id_, timestamp in quests.items()]
return quests
@cached_function(timeout=td(days=30), key=[
'completed_quest_ids_with_timestamps', 'v5',
lambda user: getattr(user, 'id', user),
])
def completed_quest_ids_with_timestamps(user):
from drawquest.apps.quest_comments.models import QuestComment
user_id = getattr(user, 'id', user)
comments = QuestComment.objects.filter(author_id=user_id).exclude(reply_content__isnull=True).values('parent_comment_id', 'timestamp')
quests = [{'id': cmt['parent_comment_id'], 'timestamp': cmt['timestamp']}
for cmt in comments]
quests = _dedupe_quests(quests)
quests = list(sorted(quests, key=lambda cmt: cmt['timestamp']))
return quests
# Cache invalidation for completed_quest_ids.
post_save.connect(
lambda sender, instance, **kwargs: completed_quest_ids_with_timestamps.delete_cache(instance.author_id),
sender=QuestComment, dispatch_uid='post_save_for_completed_quest_ids_with_timestamps_api', weak=False
)
def completed_quest_ids(user):
quests = completed_quest_ids_with_timestamps(user)
quests = sorted(quests, key=lambda quest: quest['timestamp'])
return [quest['id'] for quest in quests]
def archived_quests(offset=None):
""" Returns quest details. """
def get_cached(archived_quests):
return CachedCall.multicall([archived.quest.details for archived in archived_quests])
archived_quests = ScheduledQuest.archived(select_quests=True)
if offset is None:
return get_cached(archived_quests)
pagination = Paginator(archived_quests, knobs.QUESTS_PER_PAGE, offset=offset)
archived_quests = pagination.items
archived_quests = get_cached(archived_quests)
return archived_quests, pagination
def current_quest_details():
try:
quest = ScheduledQuest.current_scheduled_quest().quest
except AttributeError:
return None
return quest.details()
def _followee_quest_ids(user, since_timestamp=None):
buffer_keys = ['ugq_by_user:{}'.format(followee_id)
for followee_id in user.redis.new_following.zrange(0, -1)]
items = redis.zunion(buffer_keys, withscores=True, transaction=False)
if since_timestamp is not None:
items = [item for item in items if item[1] > since_timestamp]
items = sorted(items, key=lambda item: -item[1])
return [int(item[0]) for item in items]
def _current_quest_for_inbox(user):
try:
current_quest = ScheduledQuest.current_scheduled_quest().quest
if current_quest.replies.filter(author=user).exists():
return None
else:
return current_quest.details()
except AttributeError:
return None
def quest_inbox(user):
"""
Returns quest details in a tuple: current_quest, quests.
current_quest may be None.
"""
from drawquest.apps.quests.details_models import QuestDetails
if not user.is_authenticated():
return (current_quest_details(), [])
current_quest = _current_quest_for_inbox(user)
user_completed_quest_ids = completed_quest_ids(user)
followee_quest_ids = _followee_quest_ids(user)
followee_quest_ids = [id_ for id_ in followee_quest_ids
if id_ not in user_completed_quest_ids]
followee_quests = QuestDetails.from_ids(followee_quest_ids[:knobs.QUEST_INBOX_SIZE])
followee_quests = [(quest, quest.timestamp) for quest in followee_quests]
invited_quests = user.redis.quest_invites.uncompleted_invites()
invited_quests = [
(quest, ts)
for quest, ts in invited_quests
if ((current_quest is None or quest.id != current_quest.id)
and quest.id not in followee_quest_ids)
]
quests = followee_quests + invited_quests
quests = [(quest, ts) for quest, ts in quests
if int(quest.id) not in user_completed_quest_ids]
quests = [quest for quest, ts in sorted(quests, key=lambda q: -q[1])]
quests = user.redis.dismissed_quests.filter_quests(quests)
quests = quests[:knobs.QUEST_INBOX_SIZE]
if (current_quest is not None
and (current_quest.id in user_completed_quest_ids
or str(current_quest.id) in user.redis.dismissed_quests)):
current_quest = None
return current_quest, quests
def has_new_inbox_items(user, since_timestamp):
since_timestamp = int(since_timestamp)
if _current_quest_for_inbox(user) is not None:
return True
user_completed_quest_ids = completed_quest_ids(user)
followee_quest_ids = _followee_quest_ids(user, since_timestamp=since_timestamp)
if any(id_ for id_ in followee_quest_ids if id_ not in user_completed_quest_ids):
return True
invited_quests = user.redis.quest_invites.uncompleted_invites()
if any(ts > since_timestamp for quest, ts in invited_quests):
return True
return False
def quest_history(user):
""" Returns quest details. """
from drawquest.apps.quests.details_models import QuestDetails
if not user.is_authenticated():
return []
completed_quests = completed_quest_ids_with_timestamps(user)
completed_quests = sorted(completed_quests, key=lambda q: -q['timestamp'])
completed_quests = completed_quests[:knobs.QUEST_HISTORY_SIZE]
ugq = Quest.objects.filter(author=user).order_by('-id').values('id', 'timestamp')
ugq = list(ugq[:knobs.QUEST_HISTORY_SIZE])
dismissed_quests = user.redis.dismissed_quests.zrevrange(0, knobs.QUEST_HISTORY_SIZE,
withscores=True)
dismissed_quests = [{'id': int(item[0]), 'timestamp': item[1]}
for item in dismissed_quests]
history = completed_quests + ugq + dismissed_quests
history = _dedupe_quests(history)
history = sorted(history, key=lambda quest: -quest['timestamp'])
history = history[:knobs.QUEST_HISTORY_SIZE]
history = [quest['id'] for quest in history]
return QuestDetails.from_ids(history)
|
py | 1a39dd7ad315488a3656c5a2d1a78abdcac67e61 | #!/usr/bin/env python
import os
import re
import sys
from setuptools import setup, find_packages
version = re.compile(r'VERSION\s*=\s*\((.*?)\)')
def get_package_version():
"returns package version without importing it"
base = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(base, "flower/__init__.py")) as initf:
for line in initf:
m = version.match(line.strip())
if not m:
continue
return ".".join(m.groups()[0].split(", "))
def get_requirements(filename):
return open('requirements/' + filename).read().splitlines()
classes = """
Development Status :: 4 - Beta
Intended Audience :: Developers
License :: OSI Approved :: BSD License
Topic :: System :: Distributed Computing
Programming Language :: Python
Programming Language :: Python :: 2
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Programming Language :: Python :: 3.3
Programming Language :: Python :: 3.4
Programming Language :: Python :: 3.5
Programming Language :: Python :: 3.6
Programming Language :: Python :: Implementation :: CPython
Programming Language :: Python :: Implementation :: PyPy
Operating System :: OS Independent
"""
classifiers = [s.strip() for s in classes.split('\n') if s]
install_requires = get_requirements('default.txt')
if sys.version_info < (3, 0):
install_requires.append('futures')
setup(
name='ma-flower',
version=get_package_version(),
description='Celery Flower',
long_description=open('README.rst').read(),
author='Mher Movsisyan',
author_email='[email protected]',
url='https://github.com/mher/flower',
license='BSD',
classifiers=classifiers,
packages=find_packages(exclude=['tests', 'tests.*']),
install_requires=install_requires,
test_suite="tests",
tests_require=get_requirements('test.txt'),
package_data={'flower': ['templates/*', 'static/*.*',
'static/**/*.*', 'static/**/**/*.*']},
entry_points={
'console_scripts': [
'flower = flower.__main__:main',
],
'celery.commands': [
'flower = flower.command:FlowerCommand',
],
},
)
|
py | 1a39ddbebb92c1f8092d4f8bda12f4341b3c67a1 | # -*- coding: utf-8 -*-
"""
Trac WebAdmin plugin for administration of custom fields.
License: BSD
(c) 2005-2012 ::: www.CodeResort.com - BV Network AS ([email protected])
(c) 2007-2009 ::: www.Optaros.com (.....)
"""
from pkg_resources import resource_filename
from trac.config import Option
from trac.core import *
from trac.web.chrome import Chrome, ITemplateProvider, add_script, add_warning
from trac.admin.api import IAdminPanelProvider
from customfieldadmin.api import CustomFields, _
class CustomFieldAdminPage(Component):
implements(ITemplateProvider, IAdminPanelProvider)
def __init__(self):
# Init CustomFields so translations work from first request
# FIXME: It actually only works from SECOND request - Trac bug?!
CustomFields(self.env)
# IAdminPanelProvider methods
def get_admin_panels(self, req):
if 'TICKET_ADMIN' in req.perm('admin', 'ticket/customfields'):
yield ('ticket', _("Ticket System"),
'customfields', _("Custom Fields"))
def render_admin_panel(self, req, cat, page, customfield):
req.perm('admin', 'ticket/customfields').require('TICKET_ADMIN')
add_script(req, 'customfieldadmin/js/customfieldadmin.js')
def _customfield_from_req(self, req):
cfield = {'name': req.args.get('name','').encode('utf-8'),
'label': req.args.get('label','').encode('utf-8'),
'type': req.args.get('type','').encode('utf-8'),
'value': req.args.get('value','').encode('utf-8'),
'options': [x.strip().encode('utf-8') for x in \
req.args.get('options','').split("\n")],
'cols': req.args.get('cols','').encode('utf-8'),
'rows': req.args.get('rows','').encode('utf-8'),
'order': req.args.get('order', '').encode('utf-8'),
'format': req.args.get('format', '').encode('utf-8')}
return cfield
cf_api = CustomFields(self.env)
cf_admin = {} # Return values for template rendering
# Detail view?
if customfield:
cfield = None
for a_cfield in cf_api.get_custom_fields():
if a_cfield['name'] == customfield:
cfield = a_cfield
break
if not cfield:
raise TracError(_("Custom field %(name)s does not exist.",
name=customfield))
if req.method == 'POST':
if req.args.get('save'):
cfield.update(_customfield_from_req(self, req))
cf_api.update_custom_field(cfield)
req.redirect(req.href.admin(cat, page))
elif req.args.get('cancel'):
req.redirect(req.href.admin(cat, page))
if cfield.has_key('options'):
optional_line = ''
if cfield.get('optional', False):
optional_line = "\n\n"
cfield['options'] = optional_line + "\n".join(cfield['options'])
cf_admin['cfield'] = cfield
cf_admin['cf_display'] = 'detail'
else:
if req.method == 'POST':
# Add Custom Field
if req.args.get('add') and req.args.get('name'):
cfield = _customfield_from_req(self, req)
cf_api.update_custom_field(cfield, create=True)
req.redirect(req.href.admin(cat, page))
# Remove Custom Field
elif req.args.get('remove') and req.args.get('sel'):
sel = req.args.get('sel')
sel = isinstance(sel, list) and sel or [sel]
if not sel:
raise TracError(_("No custom field selected"))
for name in sel:
cfield = {'name': name}
cf_api.delete_custom_field(cfield)
req.redirect(req.href.admin(cat, page))
elif req.args.get('apply'):
# Change order
order = dict([(key[6:], req.args.get(key)) for key
in req.args.keys()
if key.startswith('order_')])
cfields = cf_api.get_custom_fields()
for current_cfield in cfields:
new_order = order.get(current_cfield['name'], 0)
if new_order:
current_cfield['order'] = new_order
cf_api.update_custom_field(current_cfield)
req.redirect(req.href.admin(cat, page))
cfields = []
orders_in_use = []
for item in cf_api.get_custom_fields():
item['href'] = req.href.admin(cat, page, item['name'])
item['registry'] = ('ticket-custom',
item['name']) in Option.registry
cfields.append(item)
orders_in_use.append(int(item.get('order')))
cf_admin['cfields'] = cfields
cf_admin['cf_display'] = 'list'
if sorted(orders_in_use) != range(1, len(cfields)+1):
add_warning(req, _("Custom Fields are not correctly sorted. " \
"This may affect appearance when viewing tickets."))
if hasattr(Chrome(self.env), 'jenv'):
return 'customfieldadmin.html', cf_admin, None
else:
return 'customfieldadmin.html', cf_admin
# ITemplateProvider methods
def get_templates_dirs(self):
return [resource_filename(__name__, 'templates')]
def get_htdocs_dirs(self):
return [('customfieldadmin', resource_filename(__name__, 'htdocs'))]
|
py | 1a39de17c4aab08e589d5e16f77b09db4800c6db | from whatsapp_defines import (
WATags,
WASingleByteTokens,
WADoubleByteTokens,
WAWebMessageInfo,
)
class WABinaryReader:
def __init__(self, data):
self.data = data
self.index = 0
def checkEOS(self, length):
if self.index + length > len(self.data):
raise EOFError("end of stream reached")
def readByte(self):
self.checkEOS(1)
ret = ord(self.data[self.index])
self.index += 1
return ret
def readIntN(self, n, littleEndian=False):
self.checkEOS(n)
ret = 0
for i in range(n):
currShift = i if littleEndian else n - 1 - i
ret |= ord(self.data[self.index + i]) << (currShift * 8)
self.index += n
return ret
def readInt16(self, littleEndian=False):
return self.readIntN(2, littleEndian)
def readInt20(self):
self.checkEOS(3)
ret = (
((ord(self.data[self.index]) & 15) << 16)
+ (ord(self.data[self.index + 1]) << 8)
+ ord(self.data[self.index + 2])
)
self.index += 3
return ret
def readInt32(self, littleEndian=False):
return self.readIntN(4, littleEndian)
def readInt64(self, littleEndian=False):
return self.readIntN(8, littleEndian)
def readPacked8(self, tag):
startByte = self.readByte()
ret = ""
for i in range(startByte & 127):
currByte = self.readByte()
ret += self.unpackByte(tag, (currByte & 0xF0) >> 4) + self.unpackByte(
tag, currByte & 0x0F
)
if (startByte >> 7) != 0:
ret = ret[: len(ret) - 1]
return ret
def unpackByte(self, tag, value):
if tag == WATags.NIBBLE_8:
return self.unpackNibble(value)
elif tag == WATags.HEX_8:
return self.unpackHex(value)
def unpackNibble(self, value):
if value >= 0 and value <= 9:
return chr(ord("0") + value)
elif value == 10:
return "-"
elif value == 11:
return "."
elif value == 15:
return "\0"
raise ValueError("invalid nibble to unpack: " + value)
def unpackHex(self, value):
if value < 0 or value > 15:
raise ValueError("invalid hex to unpack: " + str(value))
if value < 10:
return chr(ord("0") + value)
else:
return chr(ord("A") + value - 10)
def readRangedVarInt(self, minVal, maxVal, desc="unknown"):
ret = self.readVarInt()
if ret < minVal or ret >= maxVal:
raise ValueError("varint for " + desc + " is out of bounds: " + str(ret))
return ret
def isListTag(self, tag):
return tag == WATags.LIST_EMPTY or tag == WATags.LIST_8 or tag == WATags.LIST_16
def readListSize(self, tag):
if tag == WATags.LIST_EMPTY:
return 0
elif tag == WATags.LIST_8:
return self.readByte()
elif tag == WATags.LIST_16:
return self.readInt16()
raise ValueError("invalid tag for list size: " + str(tag))
def readString(self, tag):
if tag >= 3 and tag <= 235:
token = self.getToken(tag)
if token == "s.whatsapp.net":
token = "c.us"
return token
if (
tag == WATags.DICTIONARY_0
or tag == WATags.DICTIONARY_1
or tag == WATags.DICTIONARY_2
or tag == WATags.DICTIONARY_3
):
return self.getTokenDouble(tag - WATags.DICTIONARY_0, self.readByte())
elif tag == WATags.LIST_EMPTY:
return
elif tag == WATags.BINARY_8:
return self.readStringFromChars(self.readByte())
elif tag == WATags.BINARY_20:
return self.readStringFromChars(self.readInt20())
elif tag == WATags.BINARY_32:
return self.readStringFromChars(self.readInt32())
elif tag == WATags.JID_PAIR:
i = self.readString(self.readByte())
j = self.readString(self.readByte())
if i is None or j is None:
raise ValueError("invalid jid pair: " + str(i) + ", " + str(j))
return i + "@" + j
elif tag == WATags.NIBBLE_8 or tag == WATags.HEX_8:
return self.readPacked8(tag)
else:
raise ValueError("invalid string with tag " + str(tag))
def readStringFromChars(self, length):
self.checkEOS(length)
ret = self.data[self.index : self.index + length]
self.index += length
return ret
def readAttributes(self, n):
ret = {}
if n == 0:
return
for i in range(n):
index = self.readString(self.readByte())
ret[index] = self.readString(self.readByte())
return ret
def readList(self, tag):
ret = []
for i in range(self.readListSize(tag)):
ret.append(self.readNode())
return ret
def readNode(self):
listSize = self.readListSize(self.readByte())
descrTag = self.readByte()
if descrTag == WATags.STREAM_END:
raise ValueError("unexpected stream end")
descr = self.readString(descrTag)
if listSize == 0 or not descr:
raise ValueError("invalid node")
attrs = self.readAttributes((listSize - 1) >> 1)
if listSize % 2 == 1:
return [descr, attrs, None]
tag = self.readByte()
if self.isListTag(tag):
content = self.readList(tag)
elif tag == WATags.BINARY_8:
content = self.readBytes(self.readByte())
elif tag == WATags.BINARY_20:
content = self.readBytes(self.readInt20())
elif tag == WATags.BINARY_32:
content = self.readBytes(self.readInt32())
else:
content = self.readString(tag)
return [descr, attrs, content]
def readBytes(self, n):
ret = ""
for i in range(n):
ret += chr(self.readByte())
return ret
def getToken(self, index):
if index < 3 or index >= len(WASingleByteTokens):
raise ValueError("invalid token index: " + str(index))
return WASingleByteTokens[index]
def getTokenDouble(self, index1, index2):
n = 256 * index1 + index2
if n < 0 or n >= len(WADoubleByteTokens):
raise ValueError("invalid token index: " + str(n))
return WADoubleByteTokens[n]
def whatsappReadMessageArray(msgs):
if not isinstance(msgs, list):
return msgs
ret = []
for x in msgs:
ret.append(
WAWebMessageInfo.decode(x[2])
if isinstance(x, list) and x[0] == "message"
else x
)
return ret
def whatsappReadBinary(data, withMessages=False):
node = WABinaryReader(data).readNode()
if (
withMessages
and node is not None
and isinstance(node, list)
and node[1] is not None
):
node[2] = whatsappReadMessageArray(node[2])
return node
|
py | 1a39de4da6cf32d0a30450c2862580454c9f6e84 | import fv3gfs.wrapper
import fv3gfs.wrapper.examples
import f90nml
from datetime import timedelta
if __name__ == "__main__":
# load timestep from the namelist
namelist = f90nml.read("input.nml")
timestep = timedelta(seconds=namelist["coupler_nml"]["dt_atmos"])
# initialize the machine learning model
rf_model = fv3gfs.wrapper.examples.get_random_forest()
fv3gfs.wrapper.initialize()
for i in range(fv3gfs.wrapper.get_step_count()):
fv3gfs.wrapper.step_dynamics()
fv3gfs.wrapper.step_physics()
# apply an update from the machine learning model
state = fv3gfs.wrapper.get_state(rf_model.inputs)
rf_model.update(state, timestep=timestep)
fv3gfs.wrapper.set_state(state)
fv3gfs.wrapper.save_intermediate_restart_if_enabled()
fv3gfs.wrapper.cleanup()
|
py | 1a39df325aa6243be522f7767a596f4ab6a26e40 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import featuretools as ft
import pandas as pd
import pytest
from numpy import nan
from cardea.data_loader import EntitySetLoader
from cardea.problem_definition import MissedAppointment
@pytest.fixture()
def missed_appointment():
return MissedAppointment()
@pytest.fixture()
def es_loader():
return EntitySetLoader()
@pytest.fixture()
def cutoff_times():
return pd.DataFrame(
{"instance_id": [10, 11, 12],
"time": [7 / 22 / 2018, 8 / 21 / 2018, 9 / 16 / 2018],
"label": ['noshow', 'noshow', 'fulfilled']
})
@pytest.fixture()
def objects(es_loader):
appointment_df = pd.DataFrame({"identifier": [10, 11, 12],
"status": ['noshow', 'noshow', 'fulfilled'],
"start": [7 / 22 / 2018, 8 / 21 / 2018, 9 / 16 / 2018],
"participant": [120, 121, 122],
"created": [7 / 22 / 2018, 8 / 21 / 2018, 9 / 16 / 2018]})
participant_df = pd.DataFrame({"object_id": [120, 121, 122],
"actor": [0, 1, 2]})
patient_df = pd.DataFrame({"identifier": [0, 1, 2],
"gender": ['female', 'female', 'male'],
"birthDate": ['10/21/2000', '7/2/2000', '1/10/2000'],
"active": ['True', 'True', 'nan']})
appointment = es_loader.create_object(appointment_df, 'Appointment')
participant = es_loader.create_object(participant_df, 'Appointment_Participant')
patient = es_loader.create_object(patient_df, 'Patient')
return [appointment, participant, patient]
@pytest.fixture()
def es_success(objects, es_loader):
es = ft.EntitySet(id="test")
identifiers = es_loader.get_object_ids(objects)
fhir_dict = es_loader.get_dataframes(objects)
es_loader.create_entity(fhir_dict, identifiers, entity_set=es)
relationships = es_loader.get_relationships(objects, list(fhir_dict.keys()))
es_loader.create_relationships(relationships, entity_set=es)
return es
@pytest.fixture()
def object_error_missing_label(es_loader):
appointment_df = pd.DataFrame({"identifier": [10, 11, 12],
"start": [7 / 22 / 2018, 8 / 21 / 2018, 9 / 16 / 2018],
"participant": [120, 121, 122],
"created": [7 / 22 / 2018, 8 / 21 / 2018, 9 / 16 / 2018]})
appointment = es_loader.create_object(appointment_df, 'Appointment')
return appointment
@pytest.fixture()
def objects_error_missing_cutoff_label(es_loader):
appointment_df = pd.DataFrame({"identifier": [10, 11, 12],
"start": [7 / 22 / 2018, 8 / 21 / 2018, 9 / 16 / 2018],
"status": ['noshow', 'noshow', 'fulfilled'],
"participant": [120, 121, 122]})
appointment = es_loader.create_object(appointment_df, 'Appointment')
return appointment
@pytest.fixture()
def entityset_error_missing_label(objects, object_error_missing_label, es_loader):
es = ft.EntitySet(id="test")
objects.extend([object_error_missing_label])
identifiers = es_loader.get_object_ids(objects)
fhir_dict = es_loader.get_dataframes(objects)
es_loader.create_entity(fhir_dict, identifiers, entity_set=es)
relationships = es_loader.get_relationships(objects, list(fhir_dict.keys()))
es_loader.create_relationships(relationships, entity_set=es)
return es
@pytest.fixture()
def entityset_error_missing_cutoff_label(objects, objects_error_missing_cutoff_label, es_loader):
es = ft.EntitySet(id="test")
for object in objects:
es_loader.create_entity(object, entity_set=es)
for object in objects:
es_loader.create_relationships(object, entity_set=es)
es_loader.create_entity(objects_error_missing_cutoff_label, entity_set=es)
es_loader.create_relationships(objects_error_missing_cutoff_label, entity_set=es)
return es
def test_generate_cutoff_times_success(
es_success, missed_appointment, cutoff_times):
_, _, generated_df = missed_appointment.generate_cutoff_times(es_success)
generated_df.index = cutoff_times.index # both should have the same index
generated_df = generated_df[cutoff_times.columns] # same columns order
assert generated_df.equals(cutoff_times)
def test_generate_cutoff_times_error(
entityset_error_missing_label, missed_appointment):
with pytest.raises(ValueError):
missed_appointment.generate_cutoff_times(
entityset_error_missing_label)
def test_generate_cutoff_times_error_value(es_success, missed_appointment):
es_success['Appointment'].df.loc[len(es_success['Appointment'].df)] = [
nan, nan, nan, nan, nan]
with pytest.raises(ValueError):
missed_appointment.generate_cutoff_times(
es_success)
def test_generate_cutoff_times_missing_cutoff_time(
es_success, missed_appointment):
es_success['Appointment'].delete_variables(['created'])
with pytest.raises(ValueError):
missed_appointment.generate_cutoff_times(
es_success)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.